PyramidsRND-v1 / run_logs /timers.json
c-bone's picture
First Push
eca0aa6 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.21981509029865265,
"min": 0.18506763875484467,
"max": 1.411207914352417,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 6619.07177734375,
"min": 5549.06787109375,
"max": 42810.40234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989881.0,
"min": 29952.0,
"max": 989881.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989881.0,
"min": 29952.0,
"max": 989881.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.16111139953136444,
"min": -0.1166430115699768,
"max": 0.16111139953136444,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 40.27785110473633,
"min": -28.110965728759766,
"max": 40.27785110473633,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.16578516364097595,
"min": 0.16079334914684296,
"max": 0.6670405864715576,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 41.4462890625,
"min": 40.359130859375,
"max": 160.75677490234375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.04677092236363225,
"min": 0.04265627311265451,
"max": 0.053192347935543456,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.7015638354544838,
"min": 0.3723464355488042,
"max": 0.7249526728388715,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00936165225187627,
"min": 0.0005959668250766082,
"max": 0.009585602481214059,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.14042478377814405,
"min": 0.008343535551072515,
"max": 0.14042478377814405,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.527297490933331e-06,
"min": 7.527297490933331e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011290946236399996,
"min": 0.00011290946236399996,
"max": 0.0036326935891021995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250906666666666,
"min": 0.10250906666666666,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.537636,
"min": 1.3886848,
"max": 2.6108978,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026065575999999994,
"min": 0.00026065575999999994,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003909836399999999,
"min": 0.003909836399999999,
"max": 0.12110869022,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.17659518122673035,
"min": 0.1558270901441574,
"max": 0.9753618836402893,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 2.648927688598633,
"min": 2.181579351425171,
"max": 6.82753324508667,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 645.4347826086956,
"min": 645.4347826086956,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29690.0,
"min": 15984.0,
"max": 33287.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.6587086663297985,
"min": -1.0000000521540642,
"max": 0.6587086663297985,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 30.30059865117073,
"min": -27.60760159045458,
"max": 30.30059865117073,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.6587086663297985,
"min": -1.0000000521540642,
"max": 0.6587086663297985,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 30.30059865117073,
"min": -27.60760159045458,
"max": 30.30059865117073,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.1744339446861134,
"min": 1.1144351417188654,
"max": 14.709853190928698,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 54.02396145556122,
"min": 49.053154496476054,
"max": 235.35765105485916,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1744641195",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1744643132"
},
"total": 1937.667725711,
"count": 1,
"self": 0.7555339259999982,
"children": {
"run_training.setup": {
"total": 0.028994232000059128,
"count": 1,
"self": 0.028994232000059128
},
"TrainerController.start_learning": {
"total": 1936.883197553,
"count": 1,
"self": 1.445675336995464,
"children": {
"TrainerController._reset_env": {
"total": 2.61513846999992,
"count": 1,
"self": 2.61513846999992
},
"TrainerController.advance": {
"total": 1932.701053325004,
"count": 63417,
"self": 1.4852948799846217,
"children": {
"env_step": {
"total": 1395.795611395002,
"count": 63417,
"self": 1228.1472580229915,
"children": {
"SubprocessEnvManager._take_step": {
"total": 166.76848318400994,
"count": 63417,
"self": 4.972066104022133,
"children": {
"TorchPolicy.evaluate": {
"total": 161.7964170799878,
"count": 62562,
"self": 161.7964170799878
}
}
},
"workers": {
"total": 0.8798701880004955,
"count": 63417,
"self": 0.0,
"children": {
"worker_root": {
"total": 1931.5961652650974,
"count": 63417,
"is_parallel": true,
"self": 820.0762642761347,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002836247999994157,
"count": 1,
"is_parallel": true,
"self": 0.0008682260001933173,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019680219998008397,
"count": 8,
"is_parallel": true,
"self": 0.0019680219998008397
}
}
},
"UnityEnvironment.step": {
"total": 0.04660052200006248,
"count": 1,
"is_parallel": true,
"self": 0.0006330189999061986,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044072900004721305,
"count": 1,
"is_parallel": true,
"self": 0.00044072900004721305
},
"communicator.exchange": {
"total": 0.04390232800005833,
"count": 1,
"is_parallel": true,
"self": 0.04390232800005833
},
"steps_from_proto": {
"total": 0.0016244460000507388,
"count": 1,
"is_parallel": true,
"self": 0.0003671000001759239,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012573459998748149,
"count": 8,
"is_parallel": true,
"self": 0.0012573459998748149
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1111.5199009889627,
"count": 63416,
"is_parallel": true,
"self": 32.12871161892508,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.588566452026043,
"count": 63416,
"is_parallel": true,
"self": 23.588566452026043
},
"communicator.exchange": {
"total": 956.2558198550253,
"count": 63416,
"is_parallel": true,
"self": 956.2558198550253
},
"steps_from_proto": {
"total": 99.54680306298633,
"count": 63416,
"is_parallel": true,
"self": 20.36828647789821,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.17851658508812,
"count": 507328,
"is_parallel": true,
"self": 79.17851658508812
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 535.4201470500175,
"count": 63417,
"self": 2.839436560981767,
"children": {
"process_trajectory": {
"total": 131.3611685350345,
"count": 63417,
"self": 131.07835224903442,
"children": {
"RLTrainer._checkpoint": {
"total": 0.28281628600007025,
"count": 2,
"self": 0.28281628600007025
}
}
},
"_update_policy": {
"total": 401.2195419540012,
"count": 455,
"self": 257.8619154770297,
"children": {
"TorchPPOOptimizer.update": {
"total": 143.35762647697152,
"count": 11298,
"self": 143.35762647697152
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.700003490957897e-07,
"count": 1,
"self": 8.700003490957897e-07
},
"TrainerController._save_models": {
"total": 0.12132955100014442,
"count": 1,
"self": 0.0021194979999563657,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11921005300018805,
"count": 1,
"self": 0.11921005300018805
}
}
}
}
}
}
}