PyramidsRND / run_logs /timers.json
Lyuhong's picture
First Push
6e71faf verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43163731694221497,
"min": 0.4153560400009155,
"max": 1.4269607067108154,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12942.212890625,
"min": 12440.744140625,
"max": 43288.28125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989956.0,
"min": 29952.0,
"max": 989956.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989956.0,
"min": 29952.0,
"max": 989956.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4822089374065399,
"min": -0.10434004664421082,
"max": 0.5244603157043457,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 129.71420288085938,
"min": -25.354631423950195,
"max": 142.65321350097656,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.055451054126024246,
"min": -0.013870149850845337,
"max": 0.2779541611671448,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 14.916333198547363,
"min": -3.7726807594299316,
"max": 67.26490783691406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06502017837354085,
"min": 0.06478529941458772,
"max": 0.07438894039519248,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9753026756031128,
"min": 0.5207225827663473,
"max": 1.0372913038202873,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01691120349892622,
"min": 0.000710129963435088,
"max": 0.01691120349892622,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2536680524838933,
"min": 0.007811429597785968,
"max": 0.2536680524838933,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.560517479859996e-06,
"min": 7.560517479859996e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011340776219789995,
"min": 0.00011340776219789995,
"max": 0.0036333373888875996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252014,
"min": 0.10252014,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5378021,
"min": 1.3886848,
"max": 2.6111124000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002617619859999999,
"min": 0.0002617619859999999,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003926429789999999,
"min": 0.003926429789999999,
"max": 0.12113012876000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007886173203587532,
"min": 0.007886173203587532,
"max": 0.4306356906890869,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11829259991645813,
"min": 0.11122598499059677,
"max": 3.0144498348236084,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 375.4625,
"min": 341.685393258427,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30037.0,
"min": 15984.0,
"max": 33700.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4244774755090475,
"min": -1.0000000521540642,
"max": 1.568388743012139,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 113.9581980407238,
"min": -29.70800169557333,
"max": 139.58659812808037,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4244774755090475,
"min": -1.0000000521540642,
"max": 1.568388743012139,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 113.9581980407238,
"min": -29.70800169557333,
"max": 139.58659812808037,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.030213625391115783,
"min": 0.030213625391115783,
"max": 9.195544684305787,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4170900312892627,
"min": 2.3112042422872037,
"max": 147.1287149488926,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1722576107",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1722578308"
},
"total": 2200.491875445,
"count": 1,
"self": 1.0929356330007067,
"children": {
"run_training.setup": {
"total": 0.051451882000037585,
"count": 1,
"self": 0.051451882000037585
},
"TrainerController.start_learning": {
"total": 2199.3474879299993,
"count": 1,
"self": 1.5632833710096747,
"children": {
"TrainerController._reset_env": {
"total": 1.8111348999998427,
"count": 1,
"self": 1.8111348999998427
},
"TrainerController.advance": {
"total": 2195.84831694499,
"count": 63811,
"self": 1.3546107928441415,
"children": {
"env_step": {
"total": 1569.1203841080549,
"count": 63811,
"self": 1439.6724325361063,
"children": {
"SubprocessEnvManager._take_step": {
"total": 128.6550199599501,
"count": 63811,
"self": 4.655809844893611,
"children": {
"TorchPolicy.evaluate": {
"total": 123.99921011505648,
"count": 62564,
"self": 123.99921011505648
}
}
},
"workers": {
"total": 0.7929316119984833,
"count": 63811,
"self": 0.0,
"children": {
"worker_root": {
"total": 2194.162218524977,
"count": 63811,
"is_parallel": true,
"self": 873.4329671159853,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020412250000845233,
"count": 1,
"is_parallel": true,
"self": 0.0006687819995931932,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013724430004913302,
"count": 8,
"is_parallel": true,
"self": 0.0013724430004913302
}
}
},
"UnityEnvironment.step": {
"total": 0.048146164000172575,
"count": 1,
"is_parallel": true,
"self": 0.0006165910001527664,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044886499995300255,
"count": 1,
"is_parallel": true,
"self": 0.00044886499995300255
},
"communicator.exchange": {
"total": 0.04546441799993772,
"count": 1,
"is_parallel": true,
"self": 0.04546441799993772
},
"steps_from_proto": {
"total": 0.0016162900001290836,
"count": 1,
"is_parallel": true,
"self": 0.0003537170002800849,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012625729998489987,
"count": 8,
"is_parallel": true,
"self": 0.0012625729998489987
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1320.7292514089918,
"count": 63810,
"is_parallel": true,
"self": 33.13686204685723,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.47245547602256,
"count": 63810,
"is_parallel": true,
"self": 23.47245547602256
},
"communicator.exchange": {
"total": 1165.7749696280557,
"count": 63810,
"is_parallel": true,
"self": 1165.7749696280557
},
"steps_from_proto": {
"total": 98.34496425805628,
"count": 63810,
"is_parallel": true,
"self": 20.01244563117075,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.33251862688553,
"count": 510480,
"is_parallel": true,
"self": 78.33251862688553
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 625.3733220440911,
"count": 63811,
"self": 2.494954543097265,
"children": {
"process_trajectory": {
"total": 128.83818862299495,
"count": 63811,
"self": 128.59726045299476,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2409281700001884,
"count": 2,
"self": 0.2409281700001884
}
}
},
"_update_policy": {
"total": 494.04017887799887,
"count": 451,
"self": 292.51333045900583,
"children": {
"TorchPPOOptimizer.update": {
"total": 201.52684841899304,
"count": 22782,
"self": 201.52684841899304
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.491000148234889e-06,
"count": 1,
"self": 1.491000148234889e-06
},
"TrainerController._save_models": {
"total": 0.12475122299929353,
"count": 1,
"self": 0.0019286069991721888,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12282261600012134,
"count": 1,
"self": 0.12282261600012134
}
}
}
}
}
}
}