ppo-Pyramids / run_logs /timers.json
topaanbgs's picture
Pyramids Training with RND Curiosity
e285e60 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1504896581172943,
"min": 0.1504896581172943,
"max": 1.4357991218566895,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4452.0859375,
"min": 4452.0859375,
"max": 43556.40234375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999974.0,
"min": 29952.0,
"max": 2999974.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999974.0,
"min": 29952.0,
"max": 2999974.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.779034435749054,
"min": -0.11108969897031784,
"max": 0.8666342496871948,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 229.81515502929688,
"min": -26.328258514404297,
"max": 262.52069091796875,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.004198036156594753,
"min": -0.009613278321921825,
"max": 0.5368284583091736,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.2384207248687744,
"min": -2.6917178630828857,
"max": 127.22834777832031,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0686938612746589,
"min": 0.06531853751714423,
"max": 0.07446926394394333,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0304079191198836,
"min": 0.5026327675931361,
"max": 1.0570045030896613,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013597396059757632,
"min": 0.00014659730315891563,
"max": 0.01678392351410973,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2039609408963645,
"min": 0.0019057649410659033,
"max": 0.25175885271164594,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5043794985733313e-06,
"min": 1.5043794985733313e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.256569247859997e-05,
"min": 2.256569247859997e-05,
"max": 0.0039693666768778,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10050142666666669,
"min": 0.10050142666666669,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5075214000000003,
"min": 1.3962282666666668,
"max": 2.752421266666667,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.009252399999992e-05,
"min": 6.009252399999992e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0009013878599999988,
"min": 0.0009013878599999988,
"max": 0.13231990778,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006880967877805233,
"min": 0.006717723328620195,
"max": 0.4850473999977112,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10321451723575592,
"min": 0.09404812753200531,
"max": 3.395331859588623,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 220.27272727272728,
"min": 208.3404255319149,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 26653.0,
"min": 15984.0,
"max": 32860.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.747552442868225,
"min": -1.0000000521540642,
"max": 1.7916478720349325,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 213.20139802992344,
"min": -30.99400159716606,
"max": 256.75639855861664,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.747552442868225,
"min": -1.0000000521540642,
"max": 1.7916478720349325,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 213.20139802992344,
"min": -30.99400159716606,
"max": 256.75639855861664,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.015674081003662202,
"min": 0.015227210674202612,
"max": 10.178515158593655,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.9122378824467887,
"min": 1.9122378824467887,
"max": 162.85624253749847,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1770572759",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1770584506"
},
"total": 11747.432379294,
"count": 1,
"self": 1.0737334519981232,
"children": {
"run_training.setup": {
"total": 0.05354335700030788,
"count": 1,
"self": 0.05354335700030788
},
"TrainerController.start_learning": {
"total": 11746.305102485001,
"count": 1,
"self": 8.594326183589146,
"children": {
"TrainerController._reset_env": {
"total": 4.063957200000004,
"count": 1,
"self": 4.063957200000004
},
"TrainerController.advance": {
"total": 11733.570480002412,
"count": 194966,
"self": 8.485412097752487,
"children": {
"env_step": {
"total": 8340.818109782338,
"count": 194966,
"self": 7798.123360157972,
"children": {
"SubprocessEnvManager._take_step": {
"total": 537.6905320323222,
"count": 194966,
"self": 24.533524908132677,
"children": {
"TorchPolicy.evaluate": {
"total": 513.1570071241895,
"count": 187548,
"self": 513.1570071241895
}
}
},
"workers": {
"total": 5.00421759204437,
"count": 194966,
"self": 0.0,
"children": {
"worker_root": {
"total": 11715.60580070906,
"count": 194966,
"is_parallel": true,
"self": 4545.461138751034,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004828226999961771,
"count": 1,
"is_parallel": true,
"self": 0.0017048810022970429,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003123345997664728,
"count": 8,
"is_parallel": true,
"self": 0.003123345997664728
}
}
},
"UnityEnvironment.step": {
"total": 0.08799931999965338,
"count": 1,
"is_parallel": true,
"self": 0.0007377660003839992,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006140969999250956,
"count": 1,
"is_parallel": true,
"self": 0.0006140969999250956
},
"communicator.exchange": {
"total": 0.08397158399930049,
"count": 1,
"is_parallel": true,
"self": 0.08397158399930049
},
"steps_from_proto": {
"total": 0.0026758730000437936,
"count": 1,
"is_parallel": true,
"self": 0.00048493000213056803,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021909429979132256,
"count": 8,
"is_parallel": true,
"self": 0.0021909429979132256
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 7170.144661958026,
"count": 194965,
"is_parallel": true,
"self": 153.48109574496266,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 103.26651790306096,
"count": 194965,
"is_parallel": true,
"self": 103.26651790306096
},
"communicator.exchange": {
"total": 6454.302466546684,
"count": 194965,
"is_parallel": true,
"self": 6454.302466546684
},
"steps_from_proto": {
"total": 459.0945817633183,
"count": 194965,
"is_parallel": true,
"self": 96.8688043904458,
"children": {
"_process_rank_one_or_two_observation": {
"total": 362.22577737287247,
"count": 1559720,
"is_parallel": true,
"self": 362.22577737287247
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3384.2669581223217,
"count": 194966,
"self": 17.079250931630668,
"children": {
"process_trajectory": {
"total": 533.4337689046697,
"count": 194966,
"self": 532.6649221486678,
"children": {
"RLTrainer._checkpoint": {
"total": 0.768846756001949,
"count": 6,
"self": 0.768846756001949
}
}
},
"_update_policy": {
"total": 2833.7539382860214,
"count": 1395,
"self": 1118.0166832759205,
"children": {
"TorchPPOOptimizer.update": {
"total": 1715.7372550101009,
"count": 68349,
"self": 1715.7372550101009
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0429976100567728e-06,
"count": 1,
"self": 1.0429976100567728e-06
},
"TrainerController._save_models": {
"total": 0.07633805600198684,
"count": 1,
"self": 0.002092820002872031,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07424523599911481,
"count": 1,
"self": 0.07424523599911481
}
}
}
}
}
}
}