Datasets:

ArXiv:
shulin16's picture
Upload folder using huggingface_hub
9f3bc09 verified
import os
import sys
import json
import glob
import argparse
from datasets import load_dataset, concatenate_datasets
import pandas as pd
import shutil
import chardet
import ast
import transformers
# Add parent directory to Python path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from eval_agent.system_prompts import sys_prompts
vbench_dimention_df = pd.read_csv("eval_agent/vbench_dimension_scores.tsv", sep="\t")
t2i_dimention_df = pd.read_csv("eval_agent/t2i_dimension_scores.tsv", sep="\t")
# Templates for different components
alpaca_template = {
"instruction": "{instruction}",
"input": "{input}",
"output": "{output}",
"system": "{system}"
}
thinking_template = "<think>{thinking}</think>"
tool_template = "<tool>{tool}</tool>"
observation_template = "<information>{information}</information>"
analysis_template = "<analysis>{analysis}</analysis>"
summary_template = "<summary>{summary}</summary>"
# Global counter for tracking data
global_counter = 0
length_counter = 0
def format_subaspect(sub_aspect):
"""Format sub-aspect for output."""
import random
template_list = [
"I will evaluate the model's sub-aspect: {sub_aspect}.",
"I will focus on the {sub_aspect} sub-aspect of the model.",
"Let me assess the {sub_aspect} sub-aspect of this model.",
"I need to examine the model's {sub_aspect} sub-aspect.",
"Now I will analyze the {sub_aspect} sub-aspect dimension.",
"I'll investigate the {sub_aspect} sub-aspect quality of the model.",
"Time to evaluate the {sub_aspect} sub-aspect performance.",
"I should check the model's {sub_aspect} sub-aspect capabilities."
]
selected_template = random.choice(template_list)
return selected_template.format(sub_aspect=sub_aspect)
def format_summary(analysis, summary):
"""Format summary for output."""
return f"Analysis: {analysis}\nSummary: {summary}"
def load_data(file_path):
"""Load JSON data from a file."""
with open(file_path, "r", encoding="utf-8") as f:
return json.load(f)
def format_template(template, **kwargs):
"""Format a template with provided values."""
if isinstance(template, dict):
result = {}
for key, value in template.items():
if isinstance(value, str):
result[key] = value.format(**kwargs)
else:
result[key] = value
return result
return template.format(**kwargs)
def extract_obs(tool_name, obs):
"""Extract observation information for a tool."""
return f"Observation: {obs}"
def check_data(data):
"""Check if data is valid."""
if len(data["cot"]) > 8:
return False
if data["cot"][-1]["answer"] != data["ground_truth"]:
return False
return True
# def format_eval_results(eval_results: dict) -> list:
# """Format eval results for output."""
# score = eval_results['score'][0]
# video_results_list = eval_results['score'][1] # list of dict
# # remove the video path
# for video_result in video_results_list:
# video_result.pop('video_path')
# return score, video_results_list
# format the reference table
def format_dimension_as_string(df, dimension_name):
row = df.loc[df['Dimension'] == dimension_name]
if row.empty:
return f"No data found for dimension: {dimension_name}"
formatted_string = (
f"{row['Dimension'].values[0]}: "
f"Very High -> {row['Very High'].values[0]}, "
f"High -> {row['High'].values[0]}, "
f"Moderate -> {row['Moderate'].values[0]}, "
f"Low -> {row['Low'].values[0]}, "
f"Very Low -> {row['Very Low'].values[0]}"
)
return formatted_string
def format_eval_results(results, reference_table):
tool_name = results["Tool"]
average_score = results["eval_results"]["score"][0]
video_results = results["eval_results"]["score"][1]
# More concise and structured format for SFT
output = f"Scoring Reference Table of '{tool_name}': {reference_table}\n\n"
output += f"Results:\n"
output += f"- Overall score: {average_score:.4f}\n"
output += f"- Per-prompt scores:\n"
for video in video_results:
prompt = video["prompt"]
score = video["video_results"]
output += f" • \"{prompt}\": {score:.4f}\n"
return output
# Main function to convert the data to the Alpaca format
def convert_to_alpaca(json_path, output_dir, return_data=False):
"""Convert data to Alpaca format for training."""
global global_counter
data_list = []
# Process each file
with open(json_path, "r", encoding="utf-8") as in_f:
data = json.load(in_f)
# remove the last element
data.pop()
# data["ID"] = global_counter
ops = []
obs = []
# Generate the history
for i in range(1, len(data)):
# Prepare the output
try:
if i == len(data) - 1: # last step
op = f"{thinking_template.format(thinking=data[i]['Thought'])}{summary_template.format(summary=format_summary(data[i]['Analysis'], data[i]['Summary']))}"
else:
op = f"{thinking_template.format(thinking=data[i]['Thought'] + ' ' + format_subaspect(data[i]['Sub-aspect']))}{tool_template.format(tool=data[i]['Tool'])}"
# only n-1 steps have observation
# obs.append(observation_template.format(information=extract_obs(data["cot"][i]["tool"]["name"], data["cot"][i]["observation"])))
# score, video_results_list = format_eval_results(data[i]['eval_results'])
# obs.append(observation_template.format(info0=score, info1=video_results_list)) # Current observation is the eval_results
reference_table = format_dimension_as_string(vbench_dimention_df, data[i]['Tool'])
obs.append(observation_template.format(information=format_eval_results(data[i], reference_table)))
except Exception as e:
print(f"Error in processing data {json_path} at step {i}: {e}")
continue
ops.append(op)
# Build history for this step
history = []
for j in range(1, i): # Start from 1 since we process from step 1
if j == 1:
traj = [
data[0],
ops[j-1] # ops is 0-indexed but we start processing from step 1
]
else:
traj = [
obs[j-2], # obs is built as we go
ops[j-1]
]
history.append(traj)
# Convert the data to the Alpaca format at the n-th step
if i == 1: # First step after initial instruction
data_n = format_template(alpaca_template, **{
"instruction": data[0],
"input": "",
"output": op,
"system": sys_prompts["eval-agent-vbench-training-sys_v1"] + sys_prompts["eval-agent-format-sys"]
})
else:
data_n = format_template(alpaca_template, **{
"instruction": obs[i-2], # Previous observation
"input": "",
"output": op,
"system": sys_prompts["eval-agent-vbench-training-sys_v1"] + sys_prompts["eval-agent-format-sys"]
})
data_n["history"] = history
# # filter the tokens > 8096
# tokenizer = transformers.AutoTokenizer.from_pretrained('Ego-R1/qwen-sft-epoch3-len16192-20250511-3b-inst')
# tokens = tokenizer(f"{data_n['instruction']} {data_n['output']} {data_n['system']} {str(data_n['history'])}")
# if len(tokens['input_ids']) > 8096:
# print(f"Skipping data with tokens > 8096: {data['ID']}")
# continue
data_list.append(data_n)
global_counter += 1
if return_data:
return data_list
print(f"Size of the sft dataset: {len(data_list)}")
# Create output directory if it doesn't exist
os.makedirs(output_dir, exist_ok=True)
# Save the processed data
with open(os.path.join(output_dir, "processed_data.json"), "w", encoding="utf-8") as out_f:
json.dump(data_list, out_f, ensure_ascii=False, indent=4)
def arg_parse():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Process EgoLife data for training")
parser.add_argument("--home_dir", type=str, default="/home/data2/sltian/code/evaluation_agent_dev")
parser.add_argument("--data_dir", type=str, default="ea-data") # for test set, the data_dir should be "/home/data2/sltian/code/Ego-R1_dev/egor1-bench/QA-egolife/benchmark/benchmark_shuffle_new"
parser.add_argument("--output_dir", type=str, default=None)
parser.add_argument("--format", type=str, choices=["alpaca", "glaive"], default="alpaca")
return parser.parse_args()
def main():
"""Main function to process data."""
args = arg_parse()
if args.output_dir is None:
import datetime
args.output_dir = os.path.join("data", f"postprocess_{datetime.datetime.now().strftime('%Y%m%d')}")
# Get all JSON files from the preprocess directory
preprocess_dir = "/home/data2/sltian/code/evaluation_agent_dev/data/preprocess"
json_files = glob.glob(os.path.join(preprocess_dir, "*.json"))
# Filter out mapping files and only keep chat history files
chat_files = [f for f in json_files if not f.endswith("mapping.txt") and not f.endswith("summary_by_model.txt") and f.endswith(".json")]
print(f"Found {len(chat_files)} chat history files to process")
# Create a combined dataset from all files
all_data = []
for i, json_path in enumerate(chat_files):
print(f"Processing file {i+1}/{len(chat_files)}: {os.path.basename(json_path)}")
try:
file_data = convert_to_alpaca(json_path, args.output_dir, return_data=True)
all_data.extend(file_data)
except Exception as e:
print(f"Error processing {json_path}: {e}")
continue
print(f"\nTotal training examples created: {len(all_data)}")
# Save the combined dataset
os.makedirs(args.output_dir, exist_ok=True)
output_path = os.path.join(args.output_dir, "evaluation_agent_cot_dataset.json")
with open(output_path, "w", encoding="utf-8") as f:
json.dump(all_data, f, ensure_ascii=False, indent=2)
print(f"Combined dataset saved to: {output_path}")
if __name__ == "__main__":
main()