ppo-PyramidOne / run_logs /timers.json
mikedata's picture
Pyramid ppo first push
27d024a verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3810027241706848,
"min": 0.3810027241706848,
"max": 1.4327415227890015,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11259.392578125,
"min": 11259.392578125,
"max": 43463.6484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989894.0,
"min": 29952.0,
"max": 989894.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989894.0,
"min": 29952.0,
"max": 989894.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5958008170127869,
"min": -0.2856314480304718,
"max": 0.7400102615356445,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 157.29141235351562,
"min": -67.69465637207031,
"max": 198.32275390625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02725687250494957,
"min": 0.026031309738755226,
"max": 0.7800797820091248,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.19581413269043,
"min": 6.71607780456543,
"max": 184.87890625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06790267418462982,
"min": 0.06736714873955969,
"max": 0.07418762125765041,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9506374385848175,
"min": 0.5110900348313314,
"max": 1.058998348203022,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0105950219674144,
"min": 0.00011257775114183562,
"max": 0.011147382700015408,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1483303075438016,
"min": 0.000900622009134685,
"max": 0.15984516440067262,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.4537240403428558e-05,
"min": 2.4537240403428558e-05,
"max": 0.0009838354301878857,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0003435213656479998,
"min": 0.0003435213656479998,
"max": 0.010020050197995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245371428571429,
"min": 0.10245371428571429,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.434352,
"min": 1.3691136000000002,
"max": 2.4004248,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 1.2208342857142858e-05,
"min": 1.2208342857142858e-05,
"max": 9.854518857142857e-05,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00017091680000000002,
"min": 0.00017091680000000002,
"max": 0.00104038232,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012177111580967903,
"min": 0.011694434098899364,
"max": 0.35659000277519226,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17047956585884094,
"min": 0.16372208297252655,
"max": 2.4961299896240234,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 482.96666666666664,
"min": 441.6764705882353,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28978.0,
"min": 15984.0,
"max": 33324.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.35520980617062,
"min": -1.0000000521540642,
"max": 1.4382806196808815,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 82.66779817640781,
"min": -32.000001668930054,
"max": 93.96219828724861,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.35520980617062,
"min": -1.0000000521540642,
"max": 1.4382806196808815,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 82.66779817640781,
"min": -32.000001668930054,
"max": 93.96219828724861,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06479253911997238,
"min": 0.05611740758938728,
"max": 9.845519872382283,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.952344886318315,
"min": 3.5462809289165307,
"max": 157.52831795811653,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1746457361",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1746459899"
},
"total": 2537.458616328,
"count": 1,
"self": 0.47547662900069554,
"children": {
"run_training.setup": {
"total": 0.020622181999897293,
"count": 1,
"self": 0.020622181999897293
},
"TrainerController.start_learning": {
"total": 2536.9625175169995,
"count": 1,
"self": 1.3124354622286774,
"children": {
"TrainerController._reset_env": {
"total": 2.219502709000153,
"count": 1,
"self": 2.219502709000153
},
"TrainerController.advance": {
"total": 2533.3347599917715,
"count": 63484,
"self": 1.4098596733283557,
"children": {
"env_step": {
"total": 1501.5122837727222,
"count": 63484,
"self": 1350.8759863185287,
"children": {
"SubprocessEnvManager._take_step": {
"total": 149.85782354919138,
"count": 63484,
"self": 4.705159901363004,
"children": {
"TorchPolicy.evaluate": {
"total": 145.15266364782838,
"count": 62565,
"self": 145.15266364782838
}
}
},
"workers": {
"total": 0.7784739050021017,
"count": 63484,
"self": 0.0,
"children": {
"worker_root": {
"total": 2532.0665197640374,
"count": 63484,
"is_parallel": true,
"self": 1295.8601934498693,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022961549993851804,
"count": 1,
"is_parallel": true,
"self": 0.0006994099985604407,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015967450008247397,
"count": 8,
"is_parallel": true,
"self": 0.0015967450008247397
}
}
},
"UnityEnvironment.step": {
"total": 0.047689295999589376,
"count": 1,
"is_parallel": true,
"self": 0.000533751000148186,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005162659999768948,
"count": 1,
"is_parallel": true,
"self": 0.0005162659999768948
},
"communicator.exchange": {
"total": 0.04507649199968,
"count": 1,
"is_parallel": true,
"self": 0.04507649199968
},
"steps_from_proto": {
"total": 0.0015627869997842936,
"count": 1,
"is_parallel": true,
"self": 0.00033443399934185436,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012283530004424392,
"count": 8,
"is_parallel": true,
"self": 0.0012283530004424392
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1236.206326314168,
"count": 63483,
"is_parallel": true,
"self": 31.635081907414133,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.60558687480807,
"count": 63483,
"is_parallel": true,
"self": 23.60558687480807
},
"communicator.exchange": {
"total": 1084.1305624629204,
"count": 63483,
"is_parallel": true,
"self": 1084.1305624629204
},
"steps_from_proto": {
"total": 96.83509506902556,
"count": 63483,
"is_parallel": true,
"self": 19.54165666966037,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.29343839936519,
"count": 507864,
"is_parallel": true,
"self": 77.29343839936519
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1030.412616545721,
"count": 63484,
"self": 2.525724707905283,
"children": {
"process_trajectory": {
"total": 128.39281419481722,
"count": 63484,
"self": 128.1965452408176,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1962689539996063,
"count": 2,
"self": 0.1962689539996063
}
}
},
"_update_policy": {
"total": 899.4940776429985,
"count": 433,
"self": 498.0586412210314,
"children": {
"TorchPPOOptimizer.update": {
"total": 401.4354364219671,
"count": 38035,
"self": 401.4354364219671
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.079994924832135e-07,
"count": 1,
"self": 9.079994924832135e-07
},
"TrainerController._save_models": {
"total": 0.09581844599961187,
"count": 1,
"self": 0.001811391000956064,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0940070549986558,
"count": 1,
"self": 0.0940070549986558
}
}
}
}
}
}
}