ppo-PyramidsRND / run_logs /timers.json
Umang-Bansal's picture
First Push
f44d027 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.38626715540885925,
"min": 0.38626715540885925,
"max": 1.5069020986557007,
"count": 29
},
"Pyramids.Policy.Entropy.sum": {
"value": 11588.0146484375,
"min": 11588.0146484375,
"max": 45713.3828125,
"count": 29
},
"Pyramids.Step.mean": {
"value": 869971.0,
"min": 29952.0,
"max": 869971.0,
"count": 29
},
"Pyramids.Step.sum": {
"value": 869971.0,
"min": 29952.0,
"max": 869971.0,
"count": 29
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6553356647491455,
"min": -0.09985408186912537,
"max": 0.6553356647491455,
"count": 29
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 186.77066040039062,
"min": -24.064834594726562,
"max": 186.77066040039062,
"count": 29
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025371676310896873,
"min": -6.2439967223326676e-06,
"max": 0.23632660508155823,
"count": 29
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.23092794418335,
"min": -0.0017295870929956436,
"max": 56.00940704345703,
"count": 29
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06743653473477382,
"min": 0.06743653473477382,
"max": 0.07443838153202605,
"count": 29
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9441114862868335,
"min": 0.49479189453995787,
"max": 1.1165757229803908,
"count": 29
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015381722852591938,
"min": 0.0005439771740100148,
"max": 0.015477805970456553,
"count": 29
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21534411993628713,
"min": 0.005983748914110162,
"max": 0.23216708955684828,
"count": 29
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00021444189994794763,
"min": 0.00021444189994794763,
"max": 0.00029838354339596195,
"count": 29
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0030021865992712667,
"min": 0.0020886848037717336,
"max": 0.0038473109175630998,
"count": 29
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.17148062380952384,
"min": 0.17148062380952384,
"max": 0.19946118095238097,
"count": 29
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.4007287333333336,
"min": 1.3962282666666668,
"max": 2.7824369000000004,
"count": 29
},
"Pyramids.Policy.Beta.mean": {
"value": 0.007150914318571428,
"min": 0.007150914318571428,
"max": 0.009946171977142856,
"count": 29
},
"Pyramids.Policy.Beta.sum": {
"value": 0.10011280046,
"min": 0.06962320384,
"max": 0.12826544631,
"count": 29
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009586741216480732,
"min": 0.00904719065874815,
"max": 0.32112836837768555,
"count": 29
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1342143714427948,
"min": 0.12666067481040955,
"max": 2.247898578643799,
"count": 29
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 294.37,
"min": 294.37,
"max": 999.0,
"count": 29
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29437.0,
"min": 15984.0,
"max": 33401.0,
"count": 29
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6856219828128816,
"min": -1.0000000521540642,
"max": 1.6856219828128816,
"count": 29
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 168.56219828128815,
"min": -31.998801663517952,
"max": 168.56219828128815,
"count": 29
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6856219828128816,
"min": -1.0000000521540642,
"max": 1.6856219828128816,
"count": 29
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 168.56219828128815,
"min": -31.998801663517952,
"max": 168.56219828128815,
"count": 29
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029283408086339478,
"min": 0.02887456903408747,
"max": 6.432943071238697,
"count": 29
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9283408086339477,
"min": 2.8297077653405722,
"max": 102.92708913981915,
"count": 29
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 29
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 29
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1757606395",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1757609094"
},
"total": 2699.454068872,
"count": 1,
"self": 0.6948407459999544,
"children": {
"run_training.setup": {
"total": 0.06233486500013896,
"count": 1,
"self": 0.06233486500013896
},
"TrainerController.start_learning": {
"total": 2698.696893261,
"count": 1,
"self": 1.9353385320509915,
"children": {
"TrainerController._reset_env": {
"total": 3.260545957999966,
"count": 1,
"self": 3.260545957999966
},
"TrainerController.advance": {
"total": 2693.347818645949,
"count": 55668,
"self": 1.9575989249724444,
"children": {
"env_step": {
"total": 1799.3365510830217,
"count": 55668,
"self": 1668.0998923020022,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.06639266502498,
"count": 55668,
"self": 5.808971339108439,
"children": {
"TorchPolicy.evaluate": {
"total": 124.25742132591654,
"count": 54595,
"self": 124.25742132591654
}
}
},
"workers": {
"total": 1.1702661159945364,
"count": 55667,
"self": 0.0,
"children": {
"worker_root": {
"total": 2691.4152504759822,
"count": 55667,
"is_parallel": true,
"self": 1172.2270647219575,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021171729999878153,
"count": 1,
"is_parallel": true,
"self": 0.0006883040002776397,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014288689997101756,
"count": 8,
"is_parallel": true,
"self": 0.0014288689997101756
}
}
},
"UnityEnvironment.step": {
"total": 0.07336673400004656,
"count": 1,
"is_parallel": true,
"self": 0.0007333340001878241,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005099969998809684,
"count": 1,
"is_parallel": true,
"self": 0.0005099969998809684
},
"communicator.exchange": {
"total": 0.07016588700003012,
"count": 1,
"is_parallel": true,
"self": 0.07016588700003012
},
"steps_from_proto": {
"total": 0.0019575159999476455,
"count": 1,
"is_parallel": true,
"self": 0.00046407199988607317,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014934440000615723,
"count": 8,
"is_parallel": true,
"self": 0.0014934440000615723
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1519.1881857540247,
"count": 55666,
"is_parallel": true,
"self": 38.59723676396811,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.501670212015597,
"count": 55666,
"is_parallel": true,
"self": 26.501670212015597
},
"communicator.exchange": {
"total": 1344.5806588120308,
"count": 55666,
"is_parallel": true,
"self": 1344.5806588120308
},
"steps_from_proto": {
"total": 109.5086199660102,
"count": 55666,
"is_parallel": true,
"self": 23.29415191800058,
"children": {
"_process_rank_one_or_two_observation": {
"total": 86.21446804800962,
"count": 445328,
"is_parallel": true,
"self": 86.21446804800962
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 892.0536686379544,
"count": 55667,
"self": 3.4968224908930097,
"children": {
"process_trajectory": {
"total": 134.7211725200616,
"count": 55667,
"self": 134.46368460406143,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25748791600017285,
"count": 1,
"self": 0.25748791600017285
}
}
},
"_update_policy": {
"total": 753.8356736269998,
"count": 385,
"self": 305.78395610399184,
"children": {
"TorchPPOOptimizer.update": {
"total": 448.05171752300794,
"count": 19959,
"self": 448.05171752300794
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5120003808988258e-06,
"count": 1,
"self": 1.5120003808988258e-06
},
"TrainerController._save_models": {
"total": 0.15318861299965647,
"count": 1,
"self": 0.0024000469993552542,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15078856600030122,
"count": 1,
"self": 0.15078856600030122
}
}
}
}
}
}
}