Datasets:

ArXiv:
File size: 11,117 Bytes
9f3bc09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
import os
import sys
import json
import glob
import argparse
from datasets import load_dataset, concatenate_datasets
import pandas as pd
import shutil
import chardet
import ast
import transformers

# Add parent directory to Python path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from eval_agent.system_prompts import sys_prompts


vbench_dimention_df = pd.read_csv("eval_agent/vbench_dimension_scores.tsv", sep="\t")
t2i_dimention_df = pd.read_csv("eval_agent/t2i_dimension_scores.tsv", sep="\t")

# Templates for different components
alpaca_template = {
    "instruction": "{instruction}",
    "input": "{input}",
    "output": "{output}",
    "system": "{system}"
}


thinking_template = "<think>{thinking}</think>"
tool_template = "<tool>{tool}</tool>"

observation_template = "<information>{information}</information>"

analysis_template = "<analysis>{analysis}</analysis>"
summary_template = "<summary>{summary}</summary>"

# Global counter for tracking data
global_counter = 0
length_counter = 0

def format_subaspect(sub_aspect):
    """Format sub-aspect for output."""
    import random
    template_list = [
        "I will evaluate the model's sub-aspect: {sub_aspect}.",
        "I will focus on the {sub_aspect} sub-aspect of the model.",
        "Let me assess the {sub_aspect} sub-aspect of this model.",
        "I need to examine the model's {sub_aspect} sub-aspect.",
        "Now I will analyze the {sub_aspect} sub-aspect dimension.",
        "I'll investigate the {sub_aspect} sub-aspect quality of the model.",
        "Time to evaluate the {sub_aspect} sub-aspect performance.",
        "I should check the model's {sub_aspect} sub-aspect capabilities."
    ]
    selected_template = random.choice(template_list)
    return selected_template.format(sub_aspect=sub_aspect)

def format_summary(analysis, summary):
    """Format summary for output."""
    return f"Analysis: {analysis}\nSummary: {summary}"
    
def load_data(file_path):
    """Load JSON data from a file."""
    with open(file_path, "r", encoding="utf-8") as f:
        return json.load(f)
    
def format_template(template, **kwargs):
    """Format a template with provided values."""
    if isinstance(template, dict):
        result = {}
        for key, value in template.items():
            if isinstance(value, str):
                result[key] = value.format(**kwargs)
            else:
                result[key] = value
        return result
    return template.format(**kwargs)

def extract_obs(tool_name, obs):
    """Extract observation information for a tool."""
    return f"Observation: {obs}"

def check_data(data):
    """Check if data is valid."""
    if len(data["cot"]) > 8:
        return False
    if data["cot"][-1]["answer"] != data["ground_truth"]:
        return False
    return True


# def format_eval_results(eval_results: dict) -> list:
#     """Format eval results for output."""
#     score = eval_results['score'][0]
#     video_results_list = eval_results['score'][1] # list of dict
    
#     # remove the video path
#     for video_result in video_results_list:
#         video_result.pop('video_path')

#     return score, video_results_list


# format the reference table
def format_dimension_as_string(df, dimension_name):
    row = df.loc[df['Dimension'] == dimension_name]
    if row.empty:
        return f"No data found for dimension: {dimension_name}"
    
    formatted_string = (
        f"{row['Dimension'].values[0]}: "
        f"Very High -> {row['Very High'].values[0]}, "
        f"High -> {row['High'].values[0]}, "
        f"Moderate -> {row['Moderate'].values[0]}, "
        f"Low -> {row['Low'].values[0]}, "
        f"Very Low -> {row['Very Low'].values[0]}"
    )
    
    return formatted_string

def format_eval_results(results, reference_table):
    tool_name = results["Tool"]
    average_score = results["eval_results"]["score"][0]
    video_results = results["eval_results"]["score"][1]
    

    # More concise and structured format for SFT
    output = f"Scoring Reference Table of '{tool_name}': {reference_table}\n\n"
    output += f"Results:\n"
    output += f"- Overall score: {average_score:.4f}\n"
    output += f"- Per-prompt scores:\n"

    for video in video_results:
        prompt = video["prompt"]
        score = video["video_results"]
        output += f"  • \"{prompt}\": {score:.4f}\n"
    
    return output

# Main function to convert the data to the Alpaca format
def convert_to_alpaca(json_path, output_dir, return_data=False):
    """Convert data to Alpaca format for training."""
    global global_counter
    data_list = []
    # Process each file
    with open(json_path, "r", encoding="utf-8") as in_f:
        data = json.load(in_f)
        
        # remove the last element
        data.pop()
        
        # data["ID"] = global_counter
        ops = []
        obs = []
        

        
        # Generate the history
        for i in range(1, len(data)):
            # Prepare the output
            try:
                if i == len(data) - 1:  # last step
                    op = f"{thinking_template.format(thinking=data[i]['Thought'])}{summary_template.format(summary=format_summary(data[i]['Analysis'], data[i]['Summary']))}"
                else:
                    op = f"{thinking_template.format(thinking=data[i]['Thought'] + ' ' + format_subaspect(data[i]['Sub-aspect']))}{tool_template.format(tool=data[i]['Tool'])}"
                    
                    # only n-1 steps have observation
                    # obs.append(observation_template.format(information=extract_obs(data["cot"][i]["tool"]["name"], data["cot"][i]["observation"])))
                    # score, video_results_list = format_eval_results(data[i]['eval_results'])
                    # obs.append(observation_template.format(info0=score, info1=video_results_list)) # Current observation is the eval_results
                    reference_table = format_dimension_as_string(vbench_dimention_df, data[i]['Tool'])
                    obs.append(observation_template.format(information=format_eval_results(data[i], reference_table)))
                    
                    
            except Exception as e:
                print(f"Error in processing data {json_path} at step {i}: {e}")
                continue
            
            ops.append(op)
            
            
            # Build history for this step
            history = []
            for j in range(1, i):  # Start from 1 since we process from step 1
                if j == 1:
                    traj = [
                        data[0],
                        ops[j-1]  # ops is 0-indexed but we start processing from step 1
                    ]
                else:
                    traj = [
                        obs[j-2],  # obs is built as we go
                        ops[j-1]
                    ]
                
                history.append(traj)
            
            
            
            
            # Convert the data to the Alpaca format at the n-th step
            if i == 1:  # First step after initial instruction
                data_n = format_template(alpaca_template, **{
                    "instruction": data[0],
                    "input": "",
                    "output": op,
                    "system": sys_prompts["eval-agent-vbench-training-sys_v1"] + sys_prompts["eval-agent-format-sys"]
                })
            else:
                data_n = format_template(alpaca_template, **{
                    "instruction": obs[i-2],  # Previous observation
                    "input": "",
                    "output": op,
                    "system": sys_prompts["eval-agent-vbench-training-sys_v1"] + sys_prompts["eval-agent-format-sys"]
                })
            
            data_n["history"] = history
            
            
            
            # #  filter the tokens > 8096
            # tokenizer = transformers.AutoTokenizer.from_pretrained('Ego-R1/qwen-sft-epoch3-len16192-20250511-3b-inst')
            # tokens = tokenizer(f"{data_n['instruction']} {data_n['output']} {data_n['system']} {str(data_n['history'])}")
            # if len(tokens['input_ids']) > 8096:
            #     print(f"Skipping data with tokens > 8096: {data['ID']}")
            #     continue
            
            data_list.append(data_n)
            global_counter += 1
                
    if return_data:
        return data_list
    
    print(f"Size of the sft dataset: {len(data_list)}")
    
    # Create output directory if it doesn't exist
    os.makedirs(output_dir, exist_ok=True)
    
    # Save the processed data
    with open(os.path.join(output_dir, "processed_data.json"), "w", encoding="utf-8") as out_f:
        json.dump(data_list, out_f, ensure_ascii=False, indent=4)

    
    
def arg_parse():
    """Parse command line arguments."""
    parser = argparse.ArgumentParser(description="Process EgoLife data for training")
    parser.add_argument("--home_dir", type=str, default="/home/data2/sltian/code/evaluation_agent_dev")
    parser.add_argument("--data_dir", type=str, default="ea-data") # for test set, the data_dir should be "/home/data2/sltian/code/Ego-R1_dev/egor1-bench/QA-egolife/benchmark/benchmark_shuffle_new"
    parser.add_argument("--output_dir", type=str, default=None)
    parser.add_argument("--format", type=str, choices=["alpaca", "glaive"], default="alpaca")
    return parser.parse_args()

def main():
    """Main function to process data."""
    args = arg_parse()

    if args.output_dir is None:
        import datetime
        args.output_dir = os.path.join("data", f"postprocess_{datetime.datetime.now().strftime('%Y%m%d')}")

    # Get all JSON files from the preprocess directory
    preprocess_dir = "/home/data2/sltian/code/evaluation_agent_dev/data/preprocess"
    json_files = glob.glob(os.path.join(preprocess_dir, "*.json"))
    
    # Filter out mapping files and only keep chat history files
    chat_files = [f for f in json_files if not f.endswith("mapping.txt") and not f.endswith("summary_by_model.txt") and f.endswith(".json")]
    
    print(f"Found {len(chat_files)} chat history files to process")
    
    # Create a combined dataset from all files
    all_data = []
    for i, json_path in enumerate(chat_files):
        print(f"Processing file {i+1}/{len(chat_files)}: {os.path.basename(json_path)}")
        try:
            file_data = convert_to_alpaca(json_path, args.output_dir, return_data=True)
            all_data.extend(file_data)
        except Exception as e:
            print(f"Error processing {json_path}: {e}")
            continue
    
    print(f"\nTotal training examples created: {len(all_data)}")
    
    # Save the combined dataset
    os.makedirs(args.output_dir, exist_ok=True)
    output_path = os.path.join(args.output_dir, "evaluation_agent_cot_dataset.json")
    with open(output_path, "w", encoding="utf-8") as f:
        json.dump(all_data, f, ensure_ascii=False, indent=2)
    
    print(f"Combined dataset saved to: {output_path}")

if __name__ == "__main__":
    main()