salym's picture
First Push
588f85e verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.0211870670318604,
"min": 2.0211870670318604,
"max": 2.890355110168457,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 20632.27734375,
"min": 19609.34765625,
"max": 31793.90625,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 6.206444263458252,
"min": 0.0072152577340602875,
"max": 6.206444263458252,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 620.6444091796875,
"min": 0.7359563112258911,
"max": 620.6444091796875,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 18.04,
"min": 2.6136363636363638,
"max": 18.04,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 902.0,
"min": 115.0,
"max": 940.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 18.04,
"min": 2.6136363636363638,
"max": 18.04,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 902.0,
"min": 115.0,
"max": 940.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.021776044233160975,
"min": 0.021257978291635025,
"max": 0.028197265916969628,
"count": 14
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.021776044233160975,
"min": 0.021257978291635025,
"max": 0.028197265916969628,
"count": 14
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2791651332750916,
"min": 0.09541434375569224,
"max": 0.3243330538272858,
"count": 14
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.2791651332750916,
"min": 0.09541434375569224,
"max": 0.3243330538272858,
"count": 14
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.400097533333317e-06,
"min": 7.400097533333317e-06,
"max": 0.00027879200706933333,
"count": 14
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 7.400097533333317e-06,
"min": 7.400097533333317e-06,
"max": 0.00027879200706933333,
"count": 14
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10246666666666668,
"min": 0.10246666666666668,
"max": 0.19293066666666672,
"count": 14
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10246666666666668,
"min": 0.10246666666666668,
"max": 0.19293066666666672,
"count": 14
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00013308666666666643,
"min": 0.00013308666666666643,
"max": 0.004647240266666667,
"count": 14
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00013308666666666643,
"min": 0.00013308666666666643,
"max": 0.004647240266666667,
"count": 14
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742421722",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --force --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742422279"
},
"total": 557.1427988669998,
"count": 1,
"self": 0.32937241699983133,
"children": {
"run_training.setup": {
"total": 0.023361000000022614,
"count": 1,
"self": 0.023361000000022614
},
"TrainerController.start_learning": {
"total": 556.7900654499999,
"count": 1,
"self": 0.4980271520144015,
"children": {
"TrainerController._reset_env": {
"total": 2.033088264000071,
"count": 1,
"self": 2.033088264000071
},
"TrainerController.advance": {
"total": 554.1067161589854,
"count": 27328,
"self": 0.5131484539442681,
"children": {
"env_step": {
"total": 423.3628580420036,
"count": 27328,
"self": 312.00436787992794,
"children": {
"SubprocessEnvManager._take_step": {
"total": 111.04953330206399,
"count": 27328,
"self": 1.803791116098182,
"children": {
"TorchPolicy.evaluate": {
"total": 109.24574218596581,
"count": 27328,
"self": 109.24574218596581
}
}
},
"workers": {
"total": 0.3089568600116763,
"count": 27328,
"self": 0.0,
"children": {
"worker_root": {
"total": 555.4250960720055,
"count": 27328,
"is_parallel": true,
"self": 280.4062517719817,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002166887999919709,
"count": 1,
"is_parallel": true,
"self": 0.0006821630004196777,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014847249995000311,
"count": 10,
"is_parallel": true,
"self": 0.0014847249995000311
}
}
},
"UnityEnvironment.step": {
"total": 0.039336656000159564,
"count": 1,
"is_parallel": true,
"self": 0.00040775800016490393,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004708569999820611,
"count": 1,
"is_parallel": true,
"self": 0.0004708569999820611
},
"communicator.exchange": {
"total": 0.0371828639999876,
"count": 1,
"is_parallel": true,
"self": 0.0371828639999876
},
"steps_from_proto": {
"total": 0.0012751770000249962,
"count": 1,
"is_parallel": true,
"self": 0.00029860299900974496,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009765740010152513,
"count": 10,
"is_parallel": true,
"self": 0.0009765740010152513
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 275.01884430002383,
"count": 27327,
"is_parallel": true,
"self": 11.418572458020208,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.42339337799649,
"count": 27327,
"is_parallel": true,
"self": 6.42339337799649
},
"communicator.exchange": {
"total": 218.12262301996043,
"count": 27327,
"is_parallel": true,
"self": 218.12262301996043
},
"steps_from_proto": {
"total": 39.0542554440467,
"count": 27327,
"is_parallel": true,
"self": 7.3725552941059505,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.681700149940752,
"count": 273270,
"is_parallel": true,
"self": 31.681700149940752
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 130.2307096630375,
"count": 27328,
"self": 0.6010766931067337,
"children": {
"process_trajectory": {
"total": 31.85877447093071,
"count": 27328,
"self": 30.862882198930947,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9958922719997645,
"count": 6,
"self": 0.9958922719997645
}
}
},
"_update_policy": {
"total": 97.77085849900004,
"count": 14,
"self": 72.12570073999541,
"children": {
"TorchPPOOptimizer.update": {
"total": 25.645157759004633,
"count": 1120,
"self": 25.645157759004633
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.649999472254422e-07,
"count": 1,
"self": 8.649999472254422e-07
},
"TrainerController._save_models": {
"total": 0.15223301000014544,
"count": 1,
"self": 0.0033827040001597197,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14885030599998572,
"count": 1,
"self": 0.14885030599998572
}
}
}
}
}
}
}