Farouk commited on
Commit
017644c
Β·
1 Parent(s): a37f25c

Training in progress, step 8800

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:446bfa0c2ad7a8e354b5cf24c58e1978dfa01865e17dbf1fe9cf087beb45fdc1
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccc5aed67f5b95706ecbe947d1bd0a4311cd3f755c37d86f51d0ba6916dcc226
3
  size 319977229
{checkpoint-6600 β†’ checkpoint-8600/adapter_model/adapter_model}/README.md RENAMED
File without changes
{checkpoint-6600 β†’ checkpoint-8600/adapter_model/adapter_model}/adapter_config.json RENAMED
File without changes
{checkpoint-6600 β†’ checkpoint-8600/adapter_model/adapter_model}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:11ad69e2bca9d9fe76b7ccd9f9752f1bd8172d0d07765c4336aa13caa5da3639
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:446bfa0c2ad7a8e354b5cf24c58e1978dfa01865e17dbf1fe9cf087beb45fdc1
3
  size 319977229
checkpoint-8800/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-8800/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "pankajmathur/orca_mini_v3_7b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "gate_proj",
18
+ "up_proj",
19
+ "k_proj",
20
+ "down_proj",
21
+ "o_proj",
22
+ "v_proj",
23
+ "q_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-8800/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccc5aed67f5b95706ecbe947d1bd0a4311cd3f755c37d86f51d0ba6916dcc226
3
+ size 319977229
{checkpoint-6600 β†’ checkpoint-8800}/added_tokens.json RENAMED
File without changes
{checkpoint-6600 β†’ checkpoint-8800}/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab43853311af964432f71e0e6a4ff9b249ef56d15257e89dab5883d9e6df9849
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0729f2f95db904668d3245d8c1b07d1a574383d737563986cb7e7e074520ea7
3
  size 1279539973
{checkpoint-6600 β†’ checkpoint-8800}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b5aecac0679f0dd14f05b6a5680d16b92fa392984e27712f245f079b2e827da7
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3230649c6c4fd3fa9889efc221536e89faaaa2d1394dec5eb5fd9c408c13fcb
3
  size 14511
{checkpoint-6600 β†’ checkpoint-8800}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa19c433c8c029403e57118df2ab52631b3fc535294c01cab201bdeb198ed0f4
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c21e5e690d8e0cade5418c2fcaefb2d060d01ccdb64d9d6411a6d2dbbe882b0
3
  size 627
{checkpoint-6600 β†’ checkpoint-8800}/special_tokens_map.json RENAMED
File without changes
{checkpoint-6600 β†’ checkpoint-8800}/tokenizer.model RENAMED
File without changes
{checkpoint-6600 β†’ checkpoint-8800}/tokenizer_config.json RENAMED
File without changes
{checkpoint-6600 β†’ checkpoint-8800}/trainer_state.json RENAMED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 0.44189587235450745,
3
- "best_model_checkpoint": "experts/expert-3/checkpoint-5000",
4
- "epoch": 1.3030602171767027,
5
- "global_step": 6600,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -6309,11 +6309,2112 @@
6309
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
6310
  "mmlu_loss": 1.178827084542566,
6311
  "step": 6600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6312
  }
6313
  ],
6314
  "max_steps": 10000,
6315
  "num_train_epochs": 2,
6316
- "total_flos": 6.112398584251023e+17,
6317
  "trial_name": null,
6318
  "trial_params": null
6319
  }
 
1
  {
2
+ "best_metric": 0.4287540912628174,
3
+ "best_model_checkpoint": "experts/expert-3/checkpoint-8800",
4
+ "epoch": 1.7374136229022705,
5
+ "global_step": 8800,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
6309
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
6310
  "mmlu_loss": 1.178827084542566,
6311
  "step": 6600
6312
+ },
6313
+ {
6314
+ "epoch": 1.31,
6315
+ "learning_rate": 0.0002,
6316
+ "loss": 0.3946,
6317
+ "step": 6610
6318
+ },
6319
+ {
6320
+ "epoch": 1.31,
6321
+ "learning_rate": 0.0002,
6322
+ "loss": 0.3958,
6323
+ "step": 6620
6324
+ },
6325
+ {
6326
+ "epoch": 1.31,
6327
+ "learning_rate": 0.0002,
6328
+ "loss": 0.3441,
6329
+ "step": 6630
6330
+ },
6331
+ {
6332
+ "epoch": 1.31,
6333
+ "learning_rate": 0.0002,
6334
+ "loss": 0.3368,
6335
+ "step": 6640
6336
+ },
6337
+ {
6338
+ "epoch": 1.31,
6339
+ "learning_rate": 0.0002,
6340
+ "loss": 0.3992,
6341
+ "step": 6650
6342
+ },
6343
+ {
6344
+ "epoch": 1.31,
6345
+ "learning_rate": 0.0002,
6346
+ "loss": 0.359,
6347
+ "step": 6660
6348
+ },
6349
+ {
6350
+ "epoch": 1.32,
6351
+ "learning_rate": 0.0002,
6352
+ "loss": 0.4192,
6353
+ "step": 6670
6354
+ },
6355
+ {
6356
+ "epoch": 1.32,
6357
+ "learning_rate": 0.0002,
6358
+ "loss": 0.3531,
6359
+ "step": 6680
6360
+ },
6361
+ {
6362
+ "epoch": 1.32,
6363
+ "learning_rate": 0.0002,
6364
+ "loss": 0.3698,
6365
+ "step": 6690
6366
+ },
6367
+ {
6368
+ "epoch": 1.32,
6369
+ "learning_rate": 0.0002,
6370
+ "loss": 0.4178,
6371
+ "step": 6700
6372
+ },
6373
+ {
6374
+ "epoch": 1.32,
6375
+ "learning_rate": 0.0002,
6376
+ "loss": 0.3839,
6377
+ "step": 6710
6378
+ },
6379
+ {
6380
+ "epoch": 1.33,
6381
+ "learning_rate": 0.0002,
6382
+ "loss": 0.3901,
6383
+ "step": 6720
6384
+ },
6385
+ {
6386
+ "epoch": 1.33,
6387
+ "learning_rate": 0.0002,
6388
+ "loss": 0.4016,
6389
+ "step": 6730
6390
+ },
6391
+ {
6392
+ "epoch": 1.33,
6393
+ "learning_rate": 0.0002,
6394
+ "loss": 0.4134,
6395
+ "step": 6740
6396
+ },
6397
+ {
6398
+ "epoch": 1.33,
6399
+ "learning_rate": 0.0002,
6400
+ "loss": 0.3701,
6401
+ "step": 6750
6402
+ },
6403
+ {
6404
+ "epoch": 1.33,
6405
+ "learning_rate": 0.0002,
6406
+ "loss": 0.3915,
6407
+ "step": 6760
6408
+ },
6409
+ {
6410
+ "epoch": 1.34,
6411
+ "learning_rate": 0.0002,
6412
+ "loss": 0.312,
6413
+ "step": 6770
6414
+ },
6415
+ {
6416
+ "epoch": 1.34,
6417
+ "learning_rate": 0.0002,
6418
+ "loss": 0.4279,
6419
+ "step": 6780
6420
+ },
6421
+ {
6422
+ "epoch": 1.34,
6423
+ "learning_rate": 0.0002,
6424
+ "loss": 0.4226,
6425
+ "step": 6790
6426
+ },
6427
+ {
6428
+ "epoch": 1.34,
6429
+ "learning_rate": 0.0002,
6430
+ "loss": 0.415,
6431
+ "step": 6800
6432
+ },
6433
+ {
6434
+ "epoch": 1.34,
6435
+ "eval_loss": 0.44151541590690613,
6436
+ "eval_runtime": 120.9028,
6437
+ "eval_samples_per_second": 8.271,
6438
+ "eval_steps_per_second": 4.136,
6439
+ "step": 6800
6440
+ },
6441
+ {
6442
+ "epoch": 1.34,
6443
+ "mmlu_eval_accuracy": 0.5084804815045342,
6444
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
6445
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
6446
+ "mmlu_eval_accuracy_astronomy": 0.5,
6447
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
6448
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
6449
+ "mmlu_eval_accuracy_college_biology": 0.3125,
6450
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
6451
+ "mmlu_eval_accuracy_college_computer_science": 0.45454545454545453,
6452
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
6453
+ "mmlu_eval_accuracy_college_medicine": 0.5,
6454
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
6455
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
6456
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
6457
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6458
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
6459
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
6460
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
6461
+ "mmlu_eval_accuracy_global_facts": 0.4,
6462
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
6463
+ "mmlu_eval_accuracy_high_school_chemistry": 0.45454545454545453,
6464
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6465
+ "mmlu_eval_accuracy_high_school_european_history": 0.7777777777777778,
6466
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
6467
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6468
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.37209302325581395,
6469
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
6470
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
6471
+ "mmlu_eval_accuracy_high_school_physics": 0.35294117647058826,
6472
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
6473
+ "mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216,
6474
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
6475
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
6476
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
6477
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
6478
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
6479
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
6480
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6481
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
6482
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
6483
+ "mmlu_eval_accuracy_marketing": 0.8,
6484
+ "mmlu_eval_accuracy_medical_genetics": 1.0,
6485
+ "mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
6486
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
6487
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
6488
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
6489
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
6490
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
6491
+ "mmlu_eval_accuracy_professional_accounting": 0.45161290322580644,
6492
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
6493
+ "mmlu_eval_accuracy_professional_medicine": 0.4838709677419355,
6494
+ "mmlu_eval_accuracy_professional_psychology": 0.4492753623188406,
6495
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
6496
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
6497
+ "mmlu_eval_accuracy_sociology": 0.8181818181818182,
6498
+ "mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
6499
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
6500
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
6501
+ "mmlu_loss": 1.193184396266626,
6502
+ "step": 6800
6503
+ },
6504
+ {
6505
+ "epoch": 1.34,
6506
+ "learning_rate": 0.0002,
6507
+ "loss": 0.4017,
6508
+ "step": 6810
6509
+ },
6510
+ {
6511
+ "epoch": 1.35,
6512
+ "learning_rate": 0.0002,
6513
+ "loss": 0.3976,
6514
+ "step": 6820
6515
+ },
6516
+ {
6517
+ "epoch": 1.35,
6518
+ "learning_rate": 0.0002,
6519
+ "loss": 0.3451,
6520
+ "step": 6830
6521
+ },
6522
+ {
6523
+ "epoch": 1.35,
6524
+ "learning_rate": 0.0002,
6525
+ "loss": 0.3789,
6526
+ "step": 6840
6527
+ },
6528
+ {
6529
+ "epoch": 1.35,
6530
+ "learning_rate": 0.0002,
6531
+ "loss": 0.3654,
6532
+ "step": 6850
6533
+ },
6534
+ {
6535
+ "epoch": 1.35,
6536
+ "learning_rate": 0.0002,
6537
+ "loss": 0.4088,
6538
+ "step": 6860
6539
+ },
6540
+ {
6541
+ "epoch": 1.36,
6542
+ "learning_rate": 0.0002,
6543
+ "loss": 0.3614,
6544
+ "step": 6870
6545
+ },
6546
+ {
6547
+ "epoch": 1.36,
6548
+ "learning_rate": 0.0002,
6549
+ "loss": 0.4376,
6550
+ "step": 6880
6551
+ },
6552
+ {
6553
+ "epoch": 1.36,
6554
+ "learning_rate": 0.0002,
6555
+ "loss": 0.4113,
6556
+ "step": 6890
6557
+ },
6558
+ {
6559
+ "epoch": 1.36,
6560
+ "learning_rate": 0.0002,
6561
+ "loss": 0.384,
6562
+ "step": 6900
6563
+ },
6564
+ {
6565
+ "epoch": 1.36,
6566
+ "learning_rate": 0.0002,
6567
+ "loss": 0.3689,
6568
+ "step": 6910
6569
+ },
6570
+ {
6571
+ "epoch": 1.37,
6572
+ "learning_rate": 0.0002,
6573
+ "loss": 0.3565,
6574
+ "step": 6920
6575
+ },
6576
+ {
6577
+ "epoch": 1.37,
6578
+ "learning_rate": 0.0002,
6579
+ "loss": 0.3899,
6580
+ "step": 6930
6581
+ },
6582
+ {
6583
+ "epoch": 1.37,
6584
+ "learning_rate": 0.0002,
6585
+ "loss": 0.392,
6586
+ "step": 6940
6587
+ },
6588
+ {
6589
+ "epoch": 1.37,
6590
+ "learning_rate": 0.0002,
6591
+ "loss": 0.3805,
6592
+ "step": 6950
6593
+ },
6594
+ {
6595
+ "epoch": 1.37,
6596
+ "learning_rate": 0.0002,
6597
+ "loss": 0.3245,
6598
+ "step": 6960
6599
+ },
6600
+ {
6601
+ "epoch": 1.38,
6602
+ "learning_rate": 0.0002,
6603
+ "loss": 0.3815,
6604
+ "step": 6970
6605
+ },
6606
+ {
6607
+ "epoch": 1.38,
6608
+ "learning_rate": 0.0002,
6609
+ "loss": 0.353,
6610
+ "step": 6980
6611
+ },
6612
+ {
6613
+ "epoch": 1.38,
6614
+ "learning_rate": 0.0002,
6615
+ "loss": 0.3542,
6616
+ "step": 6990
6617
+ },
6618
+ {
6619
+ "epoch": 1.38,
6620
+ "learning_rate": 0.0002,
6621
+ "loss": 0.4175,
6622
+ "step": 7000
6623
+ },
6624
+ {
6625
+ "epoch": 1.38,
6626
+ "eval_loss": 0.44231361150741577,
6627
+ "eval_runtime": 120.8967,
6628
+ "eval_samples_per_second": 8.272,
6629
+ "eval_steps_per_second": 4.136,
6630
+ "step": 7000
6631
+ },
6632
+ {
6633
+ "epoch": 1.38,
6634
+ "mmlu_eval_accuracy": 0.5054198185434623,
6635
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
6636
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
6637
+ "mmlu_eval_accuracy_astronomy": 0.625,
6638
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
6639
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
6640
+ "mmlu_eval_accuracy_college_biology": 0.3125,
6641
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
6642
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6643
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
6644
+ "mmlu_eval_accuracy_college_medicine": 0.5,
6645
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6646
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
6647
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
6648
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6649
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
6650
+ "mmlu_eval_accuracy_elementary_mathematics": 0.43902439024390244,
6651
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
6652
+ "mmlu_eval_accuracy_global_facts": 0.4,
6653
+ "mmlu_eval_accuracy_high_school_biology": 0.4375,
6654
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
6655
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6656
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
6657
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
6658
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6659
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256,
6660
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
6661
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
6662
+ "mmlu_eval_accuracy_high_school_physics": 0.35294117647058826,
6663
+ "mmlu_eval_accuracy_high_school_psychology": 0.8166666666666667,
6664
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
6665
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
6666
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
6667
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
6668
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6669
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
6670
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
6671
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6672
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
6673
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
6674
+ "mmlu_eval_accuracy_marketing": 0.84,
6675
+ "mmlu_eval_accuracy_medical_genetics": 1.0,
6676
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
6677
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
6678
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
6679
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
6680
+ "mmlu_eval_accuracy_philosophy": 0.5,
6681
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
6682
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
6683
+ "mmlu_eval_accuracy_professional_law": 0.32941176470588235,
6684
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
6685
+ "mmlu_eval_accuracy_professional_psychology": 0.43478260869565216,
6686
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
6687
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
6688
+ "mmlu_eval_accuracy_sociology": 0.7727272727272727,
6689
+ "mmlu_eval_accuracy_us_foreign_policy": 0.9090909090909091,
6690
+ "mmlu_eval_accuracy_virology": 0.5,
6691
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
6692
+ "mmlu_loss": 1.1865613444200072,
6693
+ "step": 7000
6694
+ },
6695
+ {
6696
+ "epoch": 1.38,
6697
+ "learning_rate": 0.0002,
6698
+ "loss": 0.4169,
6699
+ "step": 7010
6700
+ },
6701
+ {
6702
+ "epoch": 1.39,
6703
+ "learning_rate": 0.0002,
6704
+ "loss": 0.3836,
6705
+ "step": 7020
6706
+ },
6707
+ {
6708
+ "epoch": 1.39,
6709
+ "learning_rate": 0.0002,
6710
+ "loss": 0.3449,
6711
+ "step": 7030
6712
+ },
6713
+ {
6714
+ "epoch": 1.39,
6715
+ "learning_rate": 0.0002,
6716
+ "loss": 0.4059,
6717
+ "step": 7040
6718
+ },
6719
+ {
6720
+ "epoch": 1.39,
6721
+ "learning_rate": 0.0002,
6722
+ "loss": 0.3668,
6723
+ "step": 7050
6724
+ },
6725
+ {
6726
+ "epoch": 1.39,
6727
+ "learning_rate": 0.0002,
6728
+ "loss": 0.4043,
6729
+ "step": 7060
6730
+ },
6731
+ {
6732
+ "epoch": 1.4,
6733
+ "learning_rate": 0.0002,
6734
+ "loss": 0.3529,
6735
+ "step": 7070
6736
+ },
6737
+ {
6738
+ "epoch": 1.4,
6739
+ "learning_rate": 0.0002,
6740
+ "loss": 0.3659,
6741
+ "step": 7080
6742
+ },
6743
+ {
6744
+ "epoch": 1.4,
6745
+ "learning_rate": 0.0002,
6746
+ "loss": 0.4007,
6747
+ "step": 7090
6748
+ },
6749
+ {
6750
+ "epoch": 1.4,
6751
+ "learning_rate": 0.0002,
6752
+ "loss": 0.4162,
6753
+ "step": 7100
6754
+ },
6755
+ {
6756
+ "epoch": 1.4,
6757
+ "learning_rate": 0.0002,
6758
+ "loss": 0.3846,
6759
+ "step": 7110
6760
+ },
6761
+ {
6762
+ "epoch": 1.41,
6763
+ "learning_rate": 0.0002,
6764
+ "loss": 0.4277,
6765
+ "step": 7120
6766
+ },
6767
+ {
6768
+ "epoch": 1.41,
6769
+ "learning_rate": 0.0002,
6770
+ "loss": 0.4338,
6771
+ "step": 7130
6772
+ },
6773
+ {
6774
+ "epoch": 1.41,
6775
+ "learning_rate": 0.0002,
6776
+ "loss": 0.3412,
6777
+ "step": 7140
6778
+ },
6779
+ {
6780
+ "epoch": 1.41,
6781
+ "learning_rate": 0.0002,
6782
+ "loss": 0.4108,
6783
+ "step": 7150
6784
+ },
6785
+ {
6786
+ "epoch": 1.41,
6787
+ "learning_rate": 0.0002,
6788
+ "loss": 0.4078,
6789
+ "step": 7160
6790
+ },
6791
+ {
6792
+ "epoch": 1.42,
6793
+ "learning_rate": 0.0002,
6794
+ "loss": 0.3698,
6795
+ "step": 7170
6796
+ },
6797
+ {
6798
+ "epoch": 1.42,
6799
+ "learning_rate": 0.0002,
6800
+ "loss": 0.4155,
6801
+ "step": 7180
6802
+ },
6803
+ {
6804
+ "epoch": 1.42,
6805
+ "learning_rate": 0.0002,
6806
+ "loss": 0.3653,
6807
+ "step": 7190
6808
+ },
6809
+ {
6810
+ "epoch": 1.42,
6811
+ "learning_rate": 0.0002,
6812
+ "loss": 0.3598,
6813
+ "step": 7200
6814
+ },
6815
+ {
6816
+ "epoch": 1.42,
6817
+ "eval_loss": 0.43925899267196655,
6818
+ "eval_runtime": 120.9998,
6819
+ "eval_samples_per_second": 8.264,
6820
+ "eval_steps_per_second": 4.132,
6821
+ "step": 7200
6822
+ },
6823
+ {
6824
+ "epoch": 1.42,
6825
+ "mmlu_eval_accuracy": 0.48394951792577023,
6826
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
6827
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
6828
+ "mmlu_eval_accuracy_astronomy": 0.5625,
6829
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
6830
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5862068965517241,
6831
+ "mmlu_eval_accuracy_college_biology": 0.375,
6832
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
6833
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
6834
+ "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
6835
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
6836
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6837
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
6838
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
6839
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6840
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
6841
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
6842
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
6843
+ "mmlu_eval_accuracy_global_facts": 0.3,
6844
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
6845
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
6846
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6847
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
6848
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
6849
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6850
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256,
6851
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
6852
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
6853
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
6854
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
6855
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
6856
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
6857
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
6858
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
6859
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6860
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
6861
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6862
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6863
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
6864
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
6865
+ "mmlu_eval_accuracy_marketing": 0.8,
6866
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6867
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
6868
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
6869
+ "mmlu_eval_accuracy_moral_scenarios": 0.26,
6870
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
6871
+ "mmlu_eval_accuracy_philosophy": 0.5,
6872
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
6873
+ "mmlu_eval_accuracy_professional_accounting": 0.45161290322580644,
6874
+ "mmlu_eval_accuracy_professional_law": 0.35294117647058826,
6875
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
6876
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
6877
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
6878
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
6879
+ "mmlu_eval_accuracy_sociology": 0.7727272727272727,
6880
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
6881
+ "mmlu_eval_accuracy_virology": 0.5,
6882
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
6883
+ "mmlu_loss": 1.127036721486958,
6884
+ "step": 7200
6885
+ },
6886
+ {
6887
+ "epoch": 1.42,
6888
+ "learning_rate": 0.0002,
6889
+ "loss": 0.3588,
6890
+ "step": 7210
6891
+ },
6892
+ {
6893
+ "epoch": 1.43,
6894
+ "learning_rate": 0.0002,
6895
+ "loss": 0.3902,
6896
+ "step": 7220
6897
+ },
6898
+ {
6899
+ "epoch": 1.43,
6900
+ "learning_rate": 0.0002,
6901
+ "loss": 0.3806,
6902
+ "step": 7230
6903
+ },
6904
+ {
6905
+ "epoch": 1.43,
6906
+ "learning_rate": 0.0002,
6907
+ "loss": 0.3985,
6908
+ "step": 7240
6909
+ },
6910
+ {
6911
+ "epoch": 1.43,
6912
+ "learning_rate": 0.0002,
6913
+ "loss": 0.3945,
6914
+ "step": 7250
6915
+ },
6916
+ {
6917
+ "epoch": 1.43,
6918
+ "learning_rate": 0.0002,
6919
+ "loss": 0.4605,
6920
+ "step": 7260
6921
+ },
6922
+ {
6923
+ "epoch": 1.44,
6924
+ "learning_rate": 0.0002,
6925
+ "loss": 0.3761,
6926
+ "step": 7270
6927
+ },
6928
+ {
6929
+ "epoch": 1.44,
6930
+ "learning_rate": 0.0002,
6931
+ "loss": 0.3667,
6932
+ "step": 7280
6933
+ },
6934
+ {
6935
+ "epoch": 1.44,
6936
+ "learning_rate": 0.0002,
6937
+ "loss": 0.3682,
6938
+ "step": 7290
6939
+ },
6940
+ {
6941
+ "epoch": 1.44,
6942
+ "learning_rate": 0.0002,
6943
+ "loss": 0.3361,
6944
+ "step": 7300
6945
+ },
6946
+ {
6947
+ "epoch": 1.44,
6948
+ "learning_rate": 0.0002,
6949
+ "loss": 0.3685,
6950
+ "step": 7310
6951
+ },
6952
+ {
6953
+ "epoch": 1.45,
6954
+ "learning_rate": 0.0002,
6955
+ "loss": 0.3448,
6956
+ "step": 7320
6957
+ },
6958
+ {
6959
+ "epoch": 1.45,
6960
+ "learning_rate": 0.0002,
6961
+ "loss": 0.3498,
6962
+ "step": 7330
6963
+ },
6964
+ {
6965
+ "epoch": 1.45,
6966
+ "learning_rate": 0.0002,
6967
+ "loss": 0.3714,
6968
+ "step": 7340
6969
+ },
6970
+ {
6971
+ "epoch": 1.45,
6972
+ "learning_rate": 0.0002,
6973
+ "loss": 0.3915,
6974
+ "step": 7350
6975
+ },
6976
+ {
6977
+ "epoch": 1.45,
6978
+ "learning_rate": 0.0002,
6979
+ "loss": 0.3867,
6980
+ "step": 7360
6981
+ },
6982
+ {
6983
+ "epoch": 1.46,
6984
+ "learning_rate": 0.0002,
6985
+ "loss": 0.3838,
6986
+ "step": 7370
6987
+ },
6988
+ {
6989
+ "epoch": 1.46,
6990
+ "learning_rate": 0.0002,
6991
+ "loss": 0.3923,
6992
+ "step": 7380
6993
+ },
6994
+ {
6995
+ "epoch": 1.46,
6996
+ "learning_rate": 0.0002,
6997
+ "loss": 0.3739,
6998
+ "step": 7390
6999
+ },
7000
+ {
7001
+ "epoch": 1.46,
7002
+ "learning_rate": 0.0002,
7003
+ "loss": 0.4029,
7004
+ "step": 7400
7005
+ },
7006
+ {
7007
+ "epoch": 1.46,
7008
+ "eval_loss": 0.43981724977493286,
7009
+ "eval_runtime": 121.0098,
7010
+ "eval_samples_per_second": 8.264,
7011
+ "eval_steps_per_second": 4.132,
7012
+ "step": 7400
7013
+ },
7014
+ {
7015
+ "epoch": 1.46,
7016
+ "mmlu_eval_accuracy": 0.47466190250303497,
7017
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
7018
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7019
+ "mmlu_eval_accuracy_astronomy": 0.5625,
7020
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
7021
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
7022
+ "mmlu_eval_accuracy_college_biology": 0.25,
7023
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7024
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
7025
+ "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
7026
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7027
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
7028
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
7029
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
7030
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
7031
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
7032
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
7033
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7034
+ "mmlu_eval_accuracy_global_facts": 0.3,
7035
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
7036
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
7037
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7038
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
7039
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7040
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7041
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256,
7042
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
7043
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
7044
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
7045
+ "mmlu_eval_accuracy_high_school_psychology": 0.8,
7046
+ "mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216,
7047
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7048
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
7049
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
7050
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
7051
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
7052
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7053
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7054
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
7055
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7056
+ "mmlu_eval_accuracy_marketing": 0.8,
7057
+ "mmlu_eval_accuracy_medical_genetics": 1.0,
7058
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
7059
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
7060
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
7061
+ "mmlu_eval_accuracy_nutrition": 0.5757575757575758,
7062
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
7063
+ "mmlu_eval_accuracy_prehistory": 0.34285714285714286,
7064
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
7065
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
7066
+ "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
7067
+ "mmlu_eval_accuracy_professional_psychology": 0.4492753623188406,
7068
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
7069
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
7070
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
7071
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
7072
+ "mmlu_eval_accuracy_virology": 0.5,
7073
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
7074
+ "mmlu_loss": 1.1439847480846137,
7075
+ "step": 7400
7076
+ },
7077
+ {
7078
+ "epoch": 1.46,
7079
+ "learning_rate": 0.0002,
7080
+ "loss": 0.3562,
7081
+ "step": 7410
7082
+ },
7083
+ {
7084
+ "epoch": 1.46,
7085
+ "learning_rate": 0.0002,
7086
+ "loss": 0.3582,
7087
+ "step": 7420
7088
+ },
7089
+ {
7090
+ "epoch": 1.47,
7091
+ "learning_rate": 0.0002,
7092
+ "loss": 0.4188,
7093
+ "step": 7430
7094
+ },
7095
+ {
7096
+ "epoch": 1.47,
7097
+ "learning_rate": 0.0002,
7098
+ "loss": 0.3889,
7099
+ "step": 7440
7100
+ },
7101
+ {
7102
+ "epoch": 1.47,
7103
+ "learning_rate": 0.0002,
7104
+ "loss": 0.3712,
7105
+ "step": 7450
7106
+ },
7107
+ {
7108
+ "epoch": 1.47,
7109
+ "learning_rate": 0.0002,
7110
+ "loss": 0.4247,
7111
+ "step": 7460
7112
+ },
7113
+ {
7114
+ "epoch": 1.47,
7115
+ "learning_rate": 0.0002,
7116
+ "loss": 0.3805,
7117
+ "step": 7470
7118
+ },
7119
+ {
7120
+ "epoch": 1.48,
7121
+ "learning_rate": 0.0002,
7122
+ "loss": 0.3322,
7123
+ "step": 7480
7124
+ },
7125
+ {
7126
+ "epoch": 1.48,
7127
+ "learning_rate": 0.0002,
7128
+ "loss": 0.3859,
7129
+ "step": 7490
7130
+ },
7131
+ {
7132
+ "epoch": 1.48,
7133
+ "learning_rate": 0.0002,
7134
+ "loss": 0.3529,
7135
+ "step": 7500
7136
+ },
7137
+ {
7138
+ "epoch": 1.48,
7139
+ "learning_rate": 0.0002,
7140
+ "loss": 0.3412,
7141
+ "step": 7510
7142
+ },
7143
+ {
7144
+ "epoch": 1.48,
7145
+ "learning_rate": 0.0002,
7146
+ "loss": 0.4411,
7147
+ "step": 7520
7148
+ },
7149
+ {
7150
+ "epoch": 1.49,
7151
+ "learning_rate": 0.0002,
7152
+ "loss": 0.3807,
7153
+ "step": 7530
7154
+ },
7155
+ {
7156
+ "epoch": 1.49,
7157
+ "learning_rate": 0.0002,
7158
+ "loss": 0.3794,
7159
+ "step": 7540
7160
+ },
7161
+ {
7162
+ "epoch": 1.49,
7163
+ "learning_rate": 0.0002,
7164
+ "loss": 0.355,
7165
+ "step": 7550
7166
+ },
7167
+ {
7168
+ "epoch": 1.49,
7169
+ "learning_rate": 0.0002,
7170
+ "loss": 0.404,
7171
+ "step": 7560
7172
+ },
7173
+ {
7174
+ "epoch": 1.49,
7175
+ "learning_rate": 0.0002,
7176
+ "loss": 0.4042,
7177
+ "step": 7570
7178
+ },
7179
+ {
7180
+ "epoch": 1.5,
7181
+ "learning_rate": 0.0002,
7182
+ "loss": 0.3696,
7183
+ "step": 7580
7184
+ },
7185
+ {
7186
+ "epoch": 1.5,
7187
+ "learning_rate": 0.0002,
7188
+ "loss": 0.3807,
7189
+ "step": 7590
7190
+ },
7191
+ {
7192
+ "epoch": 1.5,
7193
+ "learning_rate": 0.0002,
7194
+ "loss": 0.4191,
7195
+ "step": 7600
7196
+ },
7197
+ {
7198
+ "epoch": 1.5,
7199
+ "eval_loss": 0.43704837560653687,
7200
+ "eval_runtime": 120.9943,
7201
+ "eval_samples_per_second": 8.265,
7202
+ "eval_steps_per_second": 4.132,
7203
+ "step": 7600
7204
+ },
7205
+ {
7206
+ "epoch": 1.5,
7207
+ "mmlu_eval_accuracy": 0.4864390912539841,
7208
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
7209
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7210
+ "mmlu_eval_accuracy_astronomy": 0.625,
7211
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
7212
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5862068965517241,
7213
+ "mmlu_eval_accuracy_college_biology": 0.25,
7214
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7215
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
7216
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
7217
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7218
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
7219
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
7220
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
7221
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
7222
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
7223
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
7224
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7225
+ "mmlu_eval_accuracy_global_facts": 0.3,
7226
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7227
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
7228
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7229
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7230
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7231
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7232
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3953488372093023,
7233
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
7234
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
7235
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
7236
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
7237
+ "mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216,
7238
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
7239
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7240
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
7241
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
7242
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
7243
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
7244
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7245
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
7246
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7247
+ "mmlu_eval_accuracy_marketing": 0.8,
7248
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7249
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
7250
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
7251
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
7252
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
7253
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
7254
+ "mmlu_eval_accuracy_prehistory": 0.37142857142857144,
7255
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
7256
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
7257
+ "mmlu_eval_accuracy_professional_medicine": 0.4838709677419355,
7258
+ "mmlu_eval_accuracy_professional_psychology": 0.42028985507246375,
7259
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
7260
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
7261
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
7262
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
7263
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
7264
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7265
+ "mmlu_loss": 1.202333774018848,
7266
+ "step": 7600
7267
+ },
7268
+ {
7269
+ "epoch": 1.5,
7270
+ "learning_rate": 0.0002,
7271
+ "loss": 0.4169,
7272
+ "step": 7610
7273
+ },
7274
+ {
7275
+ "epoch": 1.5,
7276
+ "learning_rate": 0.0002,
7277
+ "loss": 0.4004,
7278
+ "step": 7620
7279
+ },
7280
+ {
7281
+ "epoch": 1.51,
7282
+ "learning_rate": 0.0002,
7283
+ "loss": 0.3925,
7284
+ "step": 7630
7285
+ },
7286
+ {
7287
+ "epoch": 1.51,
7288
+ "learning_rate": 0.0002,
7289
+ "loss": 0.3838,
7290
+ "step": 7640
7291
+ },
7292
+ {
7293
+ "epoch": 1.51,
7294
+ "learning_rate": 0.0002,
7295
+ "loss": 0.339,
7296
+ "step": 7650
7297
+ },
7298
+ {
7299
+ "epoch": 1.51,
7300
+ "learning_rate": 0.0002,
7301
+ "loss": 0.3929,
7302
+ "step": 7660
7303
+ },
7304
+ {
7305
+ "epoch": 1.51,
7306
+ "learning_rate": 0.0002,
7307
+ "loss": 0.4526,
7308
+ "step": 7670
7309
+ },
7310
+ {
7311
+ "epoch": 1.52,
7312
+ "learning_rate": 0.0002,
7313
+ "loss": 0.3797,
7314
+ "step": 7680
7315
+ },
7316
+ {
7317
+ "epoch": 1.52,
7318
+ "learning_rate": 0.0002,
7319
+ "loss": 0.4072,
7320
+ "step": 7690
7321
+ },
7322
+ {
7323
+ "epoch": 1.52,
7324
+ "learning_rate": 0.0002,
7325
+ "loss": 0.3355,
7326
+ "step": 7700
7327
+ },
7328
+ {
7329
+ "epoch": 1.52,
7330
+ "learning_rate": 0.0002,
7331
+ "loss": 0.3736,
7332
+ "step": 7710
7333
+ },
7334
+ {
7335
+ "epoch": 1.52,
7336
+ "learning_rate": 0.0002,
7337
+ "loss": 0.3589,
7338
+ "step": 7720
7339
+ },
7340
+ {
7341
+ "epoch": 1.53,
7342
+ "learning_rate": 0.0002,
7343
+ "loss": 0.3284,
7344
+ "step": 7730
7345
+ },
7346
+ {
7347
+ "epoch": 1.53,
7348
+ "learning_rate": 0.0002,
7349
+ "loss": 0.3473,
7350
+ "step": 7740
7351
+ },
7352
+ {
7353
+ "epoch": 1.53,
7354
+ "learning_rate": 0.0002,
7355
+ "loss": 0.3735,
7356
+ "step": 7750
7357
+ },
7358
+ {
7359
+ "epoch": 1.53,
7360
+ "learning_rate": 0.0002,
7361
+ "loss": 0.3869,
7362
+ "step": 7760
7363
+ },
7364
+ {
7365
+ "epoch": 1.53,
7366
+ "learning_rate": 0.0002,
7367
+ "loss": 0.367,
7368
+ "step": 7770
7369
+ },
7370
+ {
7371
+ "epoch": 1.54,
7372
+ "learning_rate": 0.0002,
7373
+ "loss": 0.4084,
7374
+ "step": 7780
7375
+ },
7376
+ {
7377
+ "epoch": 1.54,
7378
+ "learning_rate": 0.0002,
7379
+ "loss": 0.3827,
7380
+ "step": 7790
7381
+ },
7382
+ {
7383
+ "epoch": 1.54,
7384
+ "learning_rate": 0.0002,
7385
+ "loss": 0.4602,
7386
+ "step": 7800
7387
+ },
7388
+ {
7389
+ "epoch": 1.54,
7390
+ "eval_loss": 0.4351891577243805,
7391
+ "eval_runtime": 121.0193,
7392
+ "eval_samples_per_second": 8.263,
7393
+ "eval_steps_per_second": 4.132,
7394
+ "step": 7800
7395
+ },
7396
+ {
7397
+ "epoch": 1.54,
7398
+ "mmlu_eval_accuracy": 0.48877828979099663,
7399
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
7400
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7401
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7402
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
7403
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
7404
+ "mmlu_eval_accuracy_college_biology": 0.3125,
7405
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7406
+ "mmlu_eval_accuracy_college_computer_science": 0.45454545454545453,
7407
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
7408
+ "mmlu_eval_accuracy_college_medicine": 0.5,
7409
+ "mmlu_eval_accuracy_college_physics": 0.5454545454545454,
7410
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
7411
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
7412
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
7413
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
7414
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
7415
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7416
+ "mmlu_eval_accuracy_global_facts": 0.3,
7417
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
7418
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
7419
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7420
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
7421
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7422
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7423
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.27906976744186046,
7424
+ "mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
7425
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
7426
+ "mmlu_eval_accuracy_high_school_physics": 0.4117647058823529,
7427
+ "mmlu_eval_accuracy_high_school_psychology": 0.8166666666666667,
7428
+ "mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216,
7429
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7430
+ "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
7431
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
7432
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
7433
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
7434
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
7435
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7436
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
7437
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7438
+ "mmlu_eval_accuracy_marketing": 0.8,
7439
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7440
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
7441
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
7442
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
7443
+ "mmlu_eval_accuracy_nutrition": 0.5757575757575758,
7444
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
7445
+ "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
7446
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
7447
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
7448
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
7449
+ "mmlu_eval_accuracy_professional_psychology": 0.391304347826087,
7450
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
7451
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
7452
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
7453
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
7454
+ "mmlu_eval_accuracy_virology": 0.5,
7455
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7456
+ "mmlu_loss": 1.0244236340838686,
7457
+ "step": 7800
7458
+ },
7459
+ {
7460
+ "epoch": 1.54,
7461
+ "learning_rate": 0.0002,
7462
+ "loss": 0.3771,
7463
+ "step": 7810
7464
+ },
7465
+ {
7466
+ "epoch": 1.54,
7467
+ "learning_rate": 0.0002,
7468
+ "loss": 0.4082,
7469
+ "step": 7820
7470
+ },
7471
+ {
7472
+ "epoch": 1.55,
7473
+ "learning_rate": 0.0002,
7474
+ "loss": 0.4122,
7475
+ "step": 7830
7476
+ },
7477
+ {
7478
+ "epoch": 1.55,
7479
+ "learning_rate": 0.0002,
7480
+ "loss": 0.4006,
7481
+ "step": 7840
7482
+ },
7483
+ {
7484
+ "epoch": 1.55,
7485
+ "learning_rate": 0.0002,
7486
+ "loss": 0.4035,
7487
+ "step": 7850
7488
+ },
7489
+ {
7490
+ "epoch": 1.55,
7491
+ "learning_rate": 0.0002,
7492
+ "loss": 0.3887,
7493
+ "step": 7860
7494
+ },
7495
+ {
7496
+ "epoch": 1.55,
7497
+ "learning_rate": 0.0002,
7498
+ "loss": 0.3624,
7499
+ "step": 7870
7500
+ },
7501
+ {
7502
+ "epoch": 1.56,
7503
+ "learning_rate": 0.0002,
7504
+ "loss": 0.3508,
7505
+ "step": 7880
7506
+ },
7507
+ {
7508
+ "epoch": 1.56,
7509
+ "learning_rate": 0.0002,
7510
+ "loss": 0.3463,
7511
+ "step": 7890
7512
+ },
7513
+ {
7514
+ "epoch": 1.56,
7515
+ "learning_rate": 0.0002,
7516
+ "loss": 0.3644,
7517
+ "step": 7900
7518
+ },
7519
+ {
7520
+ "epoch": 1.56,
7521
+ "learning_rate": 0.0002,
7522
+ "loss": 0.428,
7523
+ "step": 7910
7524
+ },
7525
+ {
7526
+ "epoch": 1.56,
7527
+ "learning_rate": 0.0002,
7528
+ "loss": 0.3583,
7529
+ "step": 7920
7530
+ },
7531
+ {
7532
+ "epoch": 1.57,
7533
+ "learning_rate": 0.0002,
7534
+ "loss": 0.3895,
7535
+ "step": 7930
7536
+ },
7537
+ {
7538
+ "epoch": 1.57,
7539
+ "learning_rate": 0.0002,
7540
+ "loss": 0.379,
7541
+ "step": 7940
7542
+ },
7543
+ {
7544
+ "epoch": 1.57,
7545
+ "learning_rate": 0.0002,
7546
+ "loss": 0.3231,
7547
+ "step": 7950
7548
+ },
7549
+ {
7550
+ "epoch": 1.57,
7551
+ "learning_rate": 0.0002,
7552
+ "loss": 0.3399,
7553
+ "step": 7960
7554
+ },
7555
+ {
7556
+ "epoch": 1.57,
7557
+ "learning_rate": 0.0002,
7558
+ "loss": 0.4171,
7559
+ "step": 7970
7560
+ },
7561
+ {
7562
+ "epoch": 1.58,
7563
+ "learning_rate": 0.0002,
7564
+ "loss": 0.4399,
7565
+ "step": 7980
7566
+ },
7567
+ {
7568
+ "epoch": 1.58,
7569
+ "learning_rate": 0.0002,
7570
+ "loss": 0.3888,
7571
+ "step": 7990
7572
+ },
7573
+ {
7574
+ "epoch": 1.58,
7575
+ "learning_rate": 0.0002,
7576
+ "loss": 0.3381,
7577
+ "step": 8000
7578
+ },
7579
+ {
7580
+ "epoch": 1.58,
7581
+ "eval_loss": 0.43523940443992615,
7582
+ "eval_runtime": 120.9711,
7583
+ "eval_samples_per_second": 8.266,
7584
+ "eval_steps_per_second": 4.133,
7585
+ "step": 8000
7586
+ },
7587
+ {
7588
+ "epoch": 1.58,
7589
+ "mmlu_eval_accuracy": 0.5000652378894127,
7590
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7591
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7592
+ "mmlu_eval_accuracy_astronomy": 0.5,
7593
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
7594
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
7595
+ "mmlu_eval_accuracy_college_biology": 0.3125,
7596
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7597
+ "mmlu_eval_accuracy_college_computer_science": 0.45454545454545453,
7598
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
7599
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
7600
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
7601
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
7602
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
7603
+ "mmlu_eval_accuracy_econometrics": 0.08333333333333333,
7604
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
7605
+ "mmlu_eval_accuracy_elementary_mathematics": 0.4146341463414634,
7606
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
7607
+ "mmlu_eval_accuracy_global_facts": 0.3,
7608
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7609
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
7610
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7611
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
7612
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7613
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7614
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
7615
+ "mmlu_eval_accuracy_high_school_mathematics": 0.3103448275862069,
7616
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
7617
+ "mmlu_eval_accuracy_high_school_physics": 0.35294117647058826,
7618
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
7619
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
7620
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
7621
+ "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
7622
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
7623
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
7624
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
7625
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
7626
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7627
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
7628
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
7629
+ "mmlu_eval_accuracy_marketing": 0.8,
7630
+ "mmlu_eval_accuracy_medical_genetics": 1.0,
7631
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
7632
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
7633
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
7634
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
7635
+ "mmlu_eval_accuracy_philosophy": 0.4117647058823529,
7636
+ "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
7637
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
7638
+ "mmlu_eval_accuracy_professional_law": 0.3176470588235294,
7639
+ "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
7640
+ "mmlu_eval_accuracy_professional_psychology": 0.43478260869565216,
7641
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
7642
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
7643
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
7644
+ "mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
7645
+ "mmlu_eval_accuracy_virology": 0.5,
7646
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
7647
+ "mmlu_loss": 1.1098531644120229,
7648
+ "step": 8000
7649
+ },
7650
+ {
7651
+ "epoch": 1.58,
7652
+ "learning_rate": 0.0002,
7653
+ "loss": 0.3125,
7654
+ "step": 8010
7655
+ },
7656
+ {
7657
+ "epoch": 1.58,
7658
+ "learning_rate": 0.0002,
7659
+ "loss": 0.312,
7660
+ "step": 8020
7661
+ },
7662
+ {
7663
+ "epoch": 1.59,
7664
+ "learning_rate": 0.0002,
7665
+ "loss": 0.3938,
7666
+ "step": 8030
7667
+ },
7668
+ {
7669
+ "epoch": 1.59,
7670
+ "learning_rate": 0.0002,
7671
+ "loss": 0.363,
7672
+ "step": 8040
7673
+ },
7674
+ {
7675
+ "epoch": 1.59,
7676
+ "learning_rate": 0.0002,
7677
+ "loss": 0.3951,
7678
+ "step": 8050
7679
+ },
7680
+ {
7681
+ "epoch": 1.59,
7682
+ "learning_rate": 0.0002,
7683
+ "loss": 0.3938,
7684
+ "step": 8060
7685
+ },
7686
+ {
7687
+ "epoch": 1.59,
7688
+ "learning_rate": 0.0002,
7689
+ "loss": 0.3905,
7690
+ "step": 8070
7691
+ },
7692
+ {
7693
+ "epoch": 1.6,
7694
+ "learning_rate": 0.0002,
7695
+ "loss": 0.3564,
7696
+ "step": 8080
7697
+ },
7698
+ {
7699
+ "epoch": 1.6,
7700
+ "learning_rate": 0.0002,
7701
+ "loss": 0.4336,
7702
+ "step": 8090
7703
+ },
7704
+ {
7705
+ "epoch": 1.6,
7706
+ "learning_rate": 0.0002,
7707
+ "loss": 0.3662,
7708
+ "step": 8100
7709
+ },
7710
+ {
7711
+ "epoch": 1.6,
7712
+ "learning_rate": 0.0002,
7713
+ "loss": 0.3913,
7714
+ "step": 8110
7715
+ },
7716
+ {
7717
+ "epoch": 1.6,
7718
+ "learning_rate": 0.0002,
7719
+ "loss": 0.3552,
7720
+ "step": 8120
7721
+ },
7722
+ {
7723
+ "epoch": 1.61,
7724
+ "learning_rate": 0.0002,
7725
+ "loss": 0.3672,
7726
+ "step": 8130
7727
+ },
7728
+ {
7729
+ "epoch": 1.61,
7730
+ "learning_rate": 0.0002,
7731
+ "loss": 0.4189,
7732
+ "step": 8140
7733
+ },
7734
+ {
7735
+ "epoch": 1.61,
7736
+ "learning_rate": 0.0002,
7737
+ "loss": 0.4258,
7738
+ "step": 8150
7739
+ },
7740
+ {
7741
+ "epoch": 1.61,
7742
+ "learning_rate": 0.0002,
7743
+ "loss": 0.3944,
7744
+ "step": 8160
7745
+ },
7746
+ {
7747
+ "epoch": 1.61,
7748
+ "learning_rate": 0.0002,
7749
+ "loss": 0.3819,
7750
+ "step": 8170
7751
+ },
7752
+ {
7753
+ "epoch": 1.62,
7754
+ "learning_rate": 0.0002,
7755
+ "loss": 0.326,
7756
+ "step": 8180
7757
+ },
7758
+ {
7759
+ "epoch": 1.62,
7760
+ "learning_rate": 0.0002,
7761
+ "loss": 0.3583,
7762
+ "step": 8190
7763
+ },
7764
+ {
7765
+ "epoch": 1.62,
7766
+ "learning_rate": 0.0002,
7767
+ "loss": 0.3877,
7768
+ "step": 8200
7769
+ },
7770
+ {
7771
+ "epoch": 1.62,
7772
+ "eval_loss": 0.43449267745018005,
7773
+ "eval_runtime": 120.9418,
7774
+ "eval_samples_per_second": 8.268,
7775
+ "eval_steps_per_second": 4.134,
7776
+ "step": 8200
7777
+ },
7778
+ {
7779
+ "epoch": 1.62,
7780
+ "mmlu_eval_accuracy": 0.49162575918435,
7781
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
7782
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7783
+ "mmlu_eval_accuracy_astronomy": 0.5,
7784
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
7785
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
7786
+ "mmlu_eval_accuracy_college_biology": 0.25,
7787
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7788
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
7789
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
7790
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
7791
+ "mmlu_eval_accuracy_college_physics": 0.5454545454545454,
7792
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
7793
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
7794
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
7795
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
7796
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
7797
+ "mmlu_eval_accuracy_formal_logic": 0.35714285714285715,
7798
+ "mmlu_eval_accuracy_global_facts": 0.3,
7799
+ "mmlu_eval_accuracy_high_school_biology": 0.25,
7800
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
7801
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7802
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7803
+ "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
7804
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7805
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3953488372093023,
7806
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
7807
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.2692307692307692,
7808
+ "mmlu_eval_accuracy_high_school_physics": 0.4117647058823529,
7809
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
7810
+ "mmlu_eval_accuracy_high_school_statistics": 0.5217391304347826,
7811
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7812
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
7813
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
7814
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7815
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
7816
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
7817
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7818
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
7819
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7820
+ "mmlu_eval_accuracy_marketing": 0.84,
7821
+ "mmlu_eval_accuracy_medical_genetics": 1.0,
7822
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
7823
+ "mmlu_eval_accuracy_moral_disputes": 0.42105263157894735,
7824
+ "mmlu_eval_accuracy_moral_scenarios": 0.26,
7825
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
7826
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
7827
+ "mmlu_eval_accuracy_prehistory": 0.4,
7828
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
7829
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
7830
+ "mmlu_eval_accuracy_professional_medicine": 0.5161290322580645,
7831
+ "mmlu_eval_accuracy_professional_psychology": 0.4492753623188406,
7832
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
7833
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
7834
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
7835
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
7836
+ "mmlu_eval_accuracy_virology": 0.5,
7837
+ "mmlu_eval_accuracy_world_religions": 0.7894736842105263,
7838
+ "mmlu_loss": 1.1440130493970826,
7839
+ "step": 8200
7840
+ },
7841
+ {
7842
+ "epoch": 1.62,
7843
+ "learning_rate": 0.0002,
7844
+ "loss": 0.399,
7845
+ "step": 8210
7846
+ },
7847
+ {
7848
+ "epoch": 1.62,
7849
+ "learning_rate": 0.0002,
7850
+ "loss": 0.3728,
7851
+ "step": 8220
7852
+ },
7853
+ {
7854
+ "epoch": 1.62,
7855
+ "learning_rate": 0.0002,
7856
+ "loss": 0.3855,
7857
+ "step": 8230
7858
+ },
7859
+ {
7860
+ "epoch": 1.63,
7861
+ "learning_rate": 0.0002,
7862
+ "loss": 0.3794,
7863
+ "step": 8240
7864
+ },
7865
+ {
7866
+ "epoch": 1.63,
7867
+ "learning_rate": 0.0002,
7868
+ "loss": 0.3679,
7869
+ "step": 8250
7870
+ },
7871
+ {
7872
+ "epoch": 1.63,
7873
+ "learning_rate": 0.0002,
7874
+ "loss": 0.3555,
7875
+ "step": 8260
7876
+ },
7877
+ {
7878
+ "epoch": 1.63,
7879
+ "learning_rate": 0.0002,
7880
+ "loss": 0.33,
7881
+ "step": 8270
7882
+ },
7883
+ {
7884
+ "epoch": 1.63,
7885
+ "learning_rate": 0.0002,
7886
+ "loss": 0.3658,
7887
+ "step": 8280
7888
+ },
7889
+ {
7890
+ "epoch": 1.64,
7891
+ "learning_rate": 0.0002,
7892
+ "loss": 0.3872,
7893
+ "step": 8290
7894
+ },
7895
+ {
7896
+ "epoch": 1.64,
7897
+ "learning_rate": 0.0002,
7898
+ "loss": 0.3699,
7899
+ "step": 8300
7900
+ },
7901
+ {
7902
+ "epoch": 1.64,
7903
+ "learning_rate": 0.0002,
7904
+ "loss": 0.3732,
7905
+ "step": 8310
7906
+ },
7907
+ {
7908
+ "epoch": 1.64,
7909
+ "learning_rate": 0.0002,
7910
+ "loss": 0.3534,
7911
+ "step": 8320
7912
+ },
7913
+ {
7914
+ "epoch": 1.64,
7915
+ "learning_rate": 0.0002,
7916
+ "loss": 0.3955,
7917
+ "step": 8330
7918
+ },
7919
+ {
7920
+ "epoch": 1.65,
7921
+ "learning_rate": 0.0002,
7922
+ "loss": 0.3624,
7923
+ "step": 8340
7924
+ },
7925
+ {
7926
+ "epoch": 1.65,
7927
+ "learning_rate": 0.0002,
7928
+ "loss": 0.3391,
7929
+ "step": 8350
7930
+ },
7931
+ {
7932
+ "epoch": 1.65,
7933
+ "learning_rate": 0.0002,
7934
+ "loss": 0.3551,
7935
+ "step": 8360
7936
+ },
7937
+ {
7938
+ "epoch": 1.65,
7939
+ "learning_rate": 0.0002,
7940
+ "loss": 0.3488,
7941
+ "step": 8370
7942
+ },
7943
+ {
7944
+ "epoch": 1.65,
7945
+ "learning_rate": 0.0002,
7946
+ "loss": 0.39,
7947
+ "step": 8380
7948
+ },
7949
+ {
7950
+ "epoch": 1.66,
7951
+ "learning_rate": 0.0002,
7952
+ "loss": 0.4008,
7953
+ "step": 8390
7954
+ },
7955
+ {
7956
+ "epoch": 1.66,
7957
+ "learning_rate": 0.0002,
7958
+ "loss": 0.3572,
7959
+ "step": 8400
7960
+ },
7961
+ {
7962
+ "epoch": 1.66,
7963
+ "eval_loss": 0.43166613578796387,
7964
+ "eval_runtime": 121.0703,
7965
+ "eval_samples_per_second": 8.26,
7966
+ "eval_steps_per_second": 4.13,
7967
+ "step": 8400
7968
+ },
7969
+ {
7970
+ "epoch": 1.66,
7971
+ "mmlu_eval_accuracy": 0.4921472652780345,
7972
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7973
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7974
+ "mmlu_eval_accuracy_astronomy": 0.5,
7975
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
7976
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
7977
+ "mmlu_eval_accuracy_college_biology": 0.3125,
7978
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
7979
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
7980
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
7981
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
7982
+ "mmlu_eval_accuracy_college_physics": 0.5454545454545454,
7983
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7984
+ "mmlu_eval_accuracy_conceptual_physics": 0.3076923076923077,
7985
+ "mmlu_eval_accuracy_econometrics": 0.08333333333333333,
7986
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
7987
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
7988
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
7989
+ "mmlu_eval_accuracy_global_facts": 0.3,
7990
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
7991
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
7992
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7993
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
7994
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7995
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7996
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
7997
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
7998
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
7999
+ "mmlu_eval_accuracy_high_school_physics": 0.4117647058823529,
8000
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
8001
+ "mmlu_eval_accuracy_high_school_statistics": 0.4782608695652174,
8002
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
8003
+ "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
8004
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
8005
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
8006
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
8007
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
8008
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
8009
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
8010
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
8011
+ "mmlu_eval_accuracy_marketing": 0.8,
8012
+ "mmlu_eval_accuracy_medical_genetics": 1.0,
8013
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
8014
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
8015
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
8016
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
8017
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
8018
+ "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
8019
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
8020
+ "mmlu_eval_accuracy_professional_law": 0.3176470588235294,
8021
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
8022
+ "mmlu_eval_accuracy_professional_psychology": 0.391304347826087,
8023
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
8024
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
8025
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
8026
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
8027
+ "mmlu_eval_accuracy_virology": 0.5,
8028
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
8029
+ "mmlu_loss": 1.201037272071714,
8030
+ "step": 8400
8031
+ },
8032
+ {
8033
+ "epoch": 1.66,
8034
+ "learning_rate": 0.0002,
8035
+ "loss": 0.3819,
8036
+ "step": 8410
8037
+ },
8038
+ {
8039
+ "epoch": 1.66,
8040
+ "learning_rate": 0.0002,
8041
+ "loss": 0.4056,
8042
+ "step": 8420
8043
+ },
8044
+ {
8045
+ "epoch": 1.66,
8046
+ "learning_rate": 0.0002,
8047
+ "loss": 0.4169,
8048
+ "step": 8430
8049
+ },
8050
+ {
8051
+ "epoch": 1.67,
8052
+ "learning_rate": 0.0002,
8053
+ "loss": 0.4646,
8054
+ "step": 8440
8055
+ },
8056
+ {
8057
+ "epoch": 1.67,
8058
+ "learning_rate": 0.0002,
8059
+ "loss": 0.3713,
8060
+ "step": 8450
8061
+ },
8062
+ {
8063
+ "epoch": 1.67,
8064
+ "learning_rate": 0.0002,
8065
+ "loss": 0.3924,
8066
+ "step": 8460
8067
+ },
8068
+ {
8069
+ "epoch": 1.67,
8070
+ "learning_rate": 0.0002,
8071
+ "loss": 0.3988,
8072
+ "step": 8470
8073
+ },
8074
+ {
8075
+ "epoch": 1.67,
8076
+ "learning_rate": 0.0002,
8077
+ "loss": 0.4092,
8078
+ "step": 8480
8079
+ },
8080
+ {
8081
+ "epoch": 1.68,
8082
+ "learning_rate": 0.0002,
8083
+ "loss": 0.3528,
8084
+ "step": 8490
8085
+ },
8086
+ {
8087
+ "epoch": 1.68,
8088
+ "learning_rate": 0.0002,
8089
+ "loss": 0.3653,
8090
+ "step": 8500
8091
+ },
8092
+ {
8093
+ "epoch": 1.68,
8094
+ "learning_rate": 0.0002,
8095
+ "loss": 0.3953,
8096
+ "step": 8510
8097
+ },
8098
+ {
8099
+ "epoch": 1.68,
8100
+ "learning_rate": 0.0002,
8101
+ "loss": 0.3782,
8102
+ "step": 8520
8103
+ },
8104
+ {
8105
+ "epoch": 1.68,
8106
+ "learning_rate": 0.0002,
8107
+ "loss": 0.3816,
8108
+ "step": 8530
8109
+ },
8110
+ {
8111
+ "epoch": 1.69,
8112
+ "learning_rate": 0.0002,
8113
+ "loss": 0.4093,
8114
+ "step": 8540
8115
+ },
8116
+ {
8117
+ "epoch": 1.69,
8118
+ "learning_rate": 0.0002,
8119
+ "loss": 0.3487,
8120
+ "step": 8550
8121
+ },
8122
+ {
8123
+ "epoch": 1.69,
8124
+ "learning_rate": 0.0002,
8125
+ "loss": 0.4031,
8126
+ "step": 8560
8127
+ },
8128
+ {
8129
+ "epoch": 1.69,
8130
+ "learning_rate": 0.0002,
8131
+ "loss": 0.3905,
8132
+ "step": 8570
8133
+ },
8134
+ {
8135
+ "epoch": 1.69,
8136
+ "learning_rate": 0.0002,
8137
+ "loss": 0.3379,
8138
+ "step": 8580
8139
+ },
8140
+ {
8141
+ "epoch": 1.7,
8142
+ "learning_rate": 0.0002,
8143
+ "loss": 0.3251,
8144
+ "step": 8590
8145
+ },
8146
+ {
8147
+ "epoch": 1.7,
8148
+ "learning_rate": 0.0002,
8149
+ "loss": 0.3807,
8150
+ "step": 8600
8151
+ },
8152
+ {
8153
+ "epoch": 1.7,
8154
+ "eval_loss": 0.4307052195072174,
8155
+ "eval_runtime": 120.921,
8156
+ "eval_samples_per_second": 8.27,
8157
+ "eval_steps_per_second": 4.135,
8158
+ "step": 8600
8159
+ },
8160
+ {
8161
+ "epoch": 1.7,
8162
+ "mmlu_eval_accuracy": 0.48902984209003103,
8163
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
8164
+ "mmlu_eval_accuracy_anatomy": 0.5,
8165
+ "mmlu_eval_accuracy_astronomy": 0.5625,
8166
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
8167
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
8168
+ "mmlu_eval_accuracy_college_biology": 0.3125,
8169
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
8170
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
8171
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
8172
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
8173
+ "mmlu_eval_accuracy_college_physics": 0.5454545454545454,
8174
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
8175
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
8176
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
8177
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
8178
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
8179
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
8180
+ "mmlu_eval_accuracy_global_facts": 0.3,
8181
+ "mmlu_eval_accuracy_high_school_biology": 0.4375,
8182
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
8183
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
8184
+ "mmlu_eval_accuracy_high_school_european_history": 0.7222222222222222,
8185
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
8186
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
8187
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3953488372093023,
8188
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
8189
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.23076923076923078,
8190
+ "mmlu_eval_accuracy_high_school_physics": 0.4117647058823529,
8191
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
8192
+ "mmlu_eval_accuracy_high_school_statistics": 0.4782608695652174,
8193
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
8194
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
8195
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
8196
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
8197
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
8198
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
8199
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
8200
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
8201
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
8202
+ "mmlu_eval_accuracy_marketing": 0.8,
8203
+ "mmlu_eval_accuracy_medical_genetics": 1.0,
8204
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
8205
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
8206
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
8207
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
8208
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
8209
+ "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
8210
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
8211
+ "mmlu_eval_accuracy_professional_law": 0.3,
8212
+ "mmlu_eval_accuracy_professional_medicine": 0.4838709677419355,
8213
+ "mmlu_eval_accuracy_professional_psychology": 0.4057971014492754,
8214
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
8215
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
8216
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
8217
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
8218
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
8219
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
8220
+ "mmlu_loss": 1.1535750755908907,
8221
+ "step": 8600
8222
+ },
8223
+ {
8224
+ "epoch": 1.7,
8225
+ "learning_rate": 0.0002,
8226
+ "loss": 0.3005,
8227
+ "step": 8610
8228
+ },
8229
+ {
8230
+ "epoch": 1.7,
8231
+ "learning_rate": 0.0002,
8232
+ "loss": 0.3911,
8233
+ "step": 8620
8234
+ },
8235
+ {
8236
+ "epoch": 1.7,
8237
+ "learning_rate": 0.0002,
8238
+ "loss": 0.3333,
8239
+ "step": 8630
8240
+ },
8241
+ {
8242
+ "epoch": 1.71,
8243
+ "learning_rate": 0.0002,
8244
+ "loss": 0.3275,
8245
+ "step": 8640
8246
+ },
8247
+ {
8248
+ "epoch": 1.71,
8249
+ "learning_rate": 0.0002,
8250
+ "loss": 0.3681,
8251
+ "step": 8650
8252
+ },
8253
+ {
8254
+ "epoch": 1.71,
8255
+ "learning_rate": 0.0002,
8256
+ "loss": 0.365,
8257
+ "step": 8660
8258
+ },
8259
+ {
8260
+ "epoch": 1.71,
8261
+ "learning_rate": 0.0002,
8262
+ "loss": 0.3958,
8263
+ "step": 8670
8264
+ },
8265
+ {
8266
+ "epoch": 1.71,
8267
+ "learning_rate": 0.0002,
8268
+ "loss": 0.3902,
8269
+ "step": 8680
8270
+ },
8271
+ {
8272
+ "epoch": 1.72,
8273
+ "learning_rate": 0.0002,
8274
+ "loss": 0.3519,
8275
+ "step": 8690
8276
+ },
8277
+ {
8278
+ "epoch": 1.72,
8279
+ "learning_rate": 0.0002,
8280
+ "loss": 0.3752,
8281
+ "step": 8700
8282
+ },
8283
+ {
8284
+ "epoch": 1.72,
8285
+ "learning_rate": 0.0002,
8286
+ "loss": 0.4066,
8287
+ "step": 8710
8288
+ },
8289
+ {
8290
+ "epoch": 1.72,
8291
+ "learning_rate": 0.0002,
8292
+ "loss": 0.407,
8293
+ "step": 8720
8294
+ },
8295
+ {
8296
+ "epoch": 1.72,
8297
+ "learning_rate": 0.0002,
8298
+ "loss": 0.3423,
8299
+ "step": 8730
8300
+ },
8301
+ {
8302
+ "epoch": 1.73,
8303
+ "learning_rate": 0.0002,
8304
+ "loss": 0.3839,
8305
+ "step": 8740
8306
+ },
8307
+ {
8308
+ "epoch": 1.73,
8309
+ "learning_rate": 0.0002,
8310
+ "loss": 0.4293,
8311
+ "step": 8750
8312
+ },
8313
+ {
8314
+ "epoch": 1.73,
8315
+ "learning_rate": 0.0002,
8316
+ "loss": 0.3772,
8317
+ "step": 8760
8318
+ },
8319
+ {
8320
+ "epoch": 1.73,
8321
+ "learning_rate": 0.0002,
8322
+ "loss": 0.3927,
8323
+ "step": 8770
8324
+ },
8325
+ {
8326
+ "epoch": 1.73,
8327
+ "learning_rate": 0.0002,
8328
+ "loss": 0.3952,
8329
+ "step": 8780
8330
+ },
8331
+ {
8332
+ "epoch": 1.74,
8333
+ "learning_rate": 0.0002,
8334
+ "loss": 0.3434,
8335
+ "step": 8790
8336
+ },
8337
+ {
8338
+ "epoch": 1.74,
8339
+ "learning_rate": 0.0002,
8340
+ "loss": 0.3012,
8341
+ "step": 8800
8342
+ },
8343
+ {
8344
+ "epoch": 1.74,
8345
+ "eval_loss": 0.4287540912628174,
8346
+ "eval_runtime": 120.9927,
8347
+ "eval_samples_per_second": 8.265,
8348
+ "eval_steps_per_second": 4.132,
8349
+ "step": 8800
8350
+ },
8351
+ {
8352
+ "epoch": 1.74,
8353
+ "mmlu_eval_accuracy": 0.47671443145279063,
8354
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
8355
+ "mmlu_eval_accuracy_anatomy": 0.5,
8356
+ "mmlu_eval_accuracy_astronomy": 0.4375,
8357
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
8358
+ "mmlu_eval_accuracy_clinical_knowledge": 0.6206896551724138,
8359
+ "mmlu_eval_accuracy_college_biology": 0.375,
8360
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
8361
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
8362
+ "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
8363
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
8364
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
8365
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
8366
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
8367
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
8368
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
8369
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
8370
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
8371
+ "mmlu_eval_accuracy_global_facts": 0.3,
8372
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
8373
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
8374
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
8375
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
8376
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
8377
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
8378
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3953488372093023,
8379
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
8380
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
8381
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
8382
+ "mmlu_eval_accuracy_high_school_psychology": 0.7833333333333333,
8383
+ "mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216,
8384
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
8385
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
8386
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
8387
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
8388
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
8389
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
8390
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
8391
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
8392
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
8393
+ "mmlu_eval_accuracy_marketing": 0.8,
8394
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
8395
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
8396
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
8397
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
8398
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
8399
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
8400
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
8401
+ "mmlu_eval_accuracy_professional_accounting": 0.4838709677419355,
8402
+ "mmlu_eval_accuracy_professional_law": 0.3,
8403
+ "mmlu_eval_accuracy_professional_medicine": 0.4838709677419355,
8404
+ "mmlu_eval_accuracy_professional_psychology": 0.4057971014492754,
8405
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
8406
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
8407
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
8408
+ "mmlu_eval_accuracy_us_foreign_policy": 0.9090909090909091,
8409
+ "mmlu_eval_accuracy_virology": 0.5,
8410
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
8411
+ "mmlu_loss": 1.2309164613407524,
8412
+ "step": 8800
8413
  }
8414
  ],
8415
  "max_steps": 10000,
8416
  "num_train_epochs": 2,
8417
+ "total_flos": 8.13769492693549e+17,
8418
  "trial_name": null,
8419
  "trial_params": null
8420
  }
{checkpoint-6600 β†’ checkpoint-8800}/training_args.bin RENAMED
File without changes