Pyramids1 / run_logs /timers.json
pomp's picture
Init
94cbbbf
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5111764669418335,
"min": 0.5111764669418335,
"max": 1.3763642311096191,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15302.578125,
"min": 15302.578125,
"max": 41753.38671875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989909.0,
"min": 29952.0,
"max": 989909.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989909.0,
"min": 29952.0,
"max": 989909.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4948144853115082,
"min": -0.09361627697944641,
"max": 0.5173130035400391,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 133.59991455078125,
"min": -22.655139923095703,
"max": 140.1918182373047,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.014236483722925186,
"min": -0.07928085327148438,
"max": 0.49544256925582886,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.843850612640381,
"min": -21.485111236572266,
"max": 117.41989135742188,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06646509808100139,
"min": 0.06598758987439353,
"max": 0.07447238116167552,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9305113731340194,
"min": 0.5023804677649664,
"max": 1.0787041993559492,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014028587367474322,
"min": 0.0008511170174885417,
"max": 0.014028587367474322,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1964002231446405,
"min": 0.011915638244839584,
"max": 0.1964002231446405,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.682540296328575e-06,
"min": 7.682540296328575e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010755556414860006,
"min": 0.00010755556414860006,
"max": 0.0036335107888298,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025608142857143,
"min": 0.1025608142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358514000000002,
"min": 1.3886848,
"max": 2.611170200000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026582534714285734,
"min": 0.00026582534714285734,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003721554860000003,
"min": 0.003721554860000003,
"max": 0.12113590298000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008870408870279789,
"min": 0.008870408870279789,
"max": 0.4211749732494354,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1241857260465622,
"min": 0.1241857260465622,
"max": 2.9482247829437256,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 366.9746835443038,
"min": 366.9746835443038,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28991.0,
"min": 15984.0,
"max": 33686.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5570531441252442,
"min": -1.0000000521540642,
"max": 1.5907721280863014,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 123.0071983858943,
"min": -29.83800170570612,
"max": 125.6709981188178,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5570531441252442,
"min": -1.0000000521540642,
"max": 1.5907721280863014,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 123.0071983858943,
"min": -29.83800170570612,
"max": 125.6709981188178,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03341580830712263,
"min": 0.03341580830712263,
"max": 7.4758562706410885,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.639848856262688,
"min": 2.639848856262688,
"max": 119.61370033025742,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679669163",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679671293"
},
"total": 2129.684761696,
"count": 1,
"self": 0.48157280399982483,
"children": {
"run_training.setup": {
"total": 0.17355113100006747,
"count": 1,
"self": 0.17355113100006747
},
"TrainerController.start_learning": {
"total": 2129.0296377610002,
"count": 1,
"self": 1.539033614994878,
"children": {
"TrainerController._reset_env": {
"total": 7.300176039999997,
"count": 1,
"self": 7.300176039999997
},
"TrainerController.advance": {
"total": 2120.0984501800053,
"count": 63660,
"self": 1.5850444859966046,
"children": {
"env_step": {
"total": 1494.924199659031,
"count": 63660,
"self": 1380.9081529441573,
"children": {
"SubprocessEnvManager._take_step": {
"total": 113.10353976295096,
"count": 63660,
"self": 4.7840848619835015,
"children": {
"TorchPolicy.evaluate": {
"total": 108.31945490096746,
"count": 62555,
"self": 108.31945490096746
}
}
},
"workers": {
"total": 0.9125069519227509,
"count": 63660,
"self": 0.0,
"children": {
"worker_root": {
"total": 2124.1015011089235,
"count": 63660,
"is_parallel": true,
"self": 863.9014780878895,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023266220000550675,
"count": 1,
"is_parallel": true,
"self": 0.0006462729995746486,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001680349000480419,
"count": 8,
"is_parallel": true,
"self": 0.001680349000480419
}
}
},
"UnityEnvironment.step": {
"total": 0.045389075000002777,
"count": 1,
"is_parallel": true,
"self": 0.0005103779997170932,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005086470000605914,
"count": 1,
"is_parallel": true,
"self": 0.0005086470000605914
},
"communicator.exchange": {
"total": 0.04280294300019705,
"count": 1,
"is_parallel": true,
"self": 0.04280294300019705
},
"steps_from_proto": {
"total": 0.0015671070000280451,
"count": 1,
"is_parallel": true,
"self": 0.000337766999791711,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012293400002363342,
"count": 8,
"is_parallel": true,
"self": 0.0012293400002363342
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1260.200023021034,
"count": 63659,
"is_parallel": true,
"self": 31.420505986036915,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.781952241014096,
"count": 63659,
"is_parallel": true,
"self": 22.781952241014096
},
"communicator.exchange": {
"total": 1113.7038721479755,
"count": 63659,
"is_parallel": true,
"self": 1113.7038721479755
},
"steps_from_proto": {
"total": 92.29369264600746,
"count": 63659,
"is_parallel": true,
"self": 20.097022200014862,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.1966704459926,
"count": 509272,
"is_parallel": true,
"self": 72.1966704459926
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 623.5892060349777,
"count": 63660,
"self": 2.872228233972237,
"children": {
"process_trajectory": {
"total": 119.39238245900333,
"count": 63660,
"self": 119.19196807200296,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2004143870003645,
"count": 2,
"self": 0.2004143870003645
}
}
},
"_update_policy": {
"total": 501.3245953420021,
"count": 452,
"self": 319.95333977299856,
"children": {
"TorchPPOOptimizer.update": {
"total": 181.37125556900355,
"count": 22758,
"self": 181.37125556900355
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.870001693139784e-07,
"count": 1,
"self": 8.870001693139784e-07
},
"TrainerController._save_models": {
"total": 0.09197703900008491,
"count": 1,
"self": 0.0013599760004581185,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0906170629996268,
"count": 1,
"self": 0.0906170629996268
}
}
}
}
}
}
}