poca-SoccerTwos / run_logs /timers.json
andrea-silvi's picture
Second push - 8M steps
96b47ec
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6364408731460571,
"min": 1.2111297845840454,
"max": 3.295790433883667,
"count": 29261
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 1256.78662109375,
"min": 38.75615310668945,
"max": 75934.9140625,
"count": 29261
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 107.5,
"min": 1.5,
"max": 999.0,
"count": 29815
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 860.0,
"min": 12.0,
"max": 19980.0,
"count": 29815
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1681.6275089035726,
"min": 1189.5147432814815,
"max": 1744.6113248627523,
"count": 29560
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 6726.51003561429,
"min": 2379.029486562963,
"max": 23797.706080004307,
"count": 29560
},
"SoccerTwos.Step.mean": {
"value": 7999830.0,
"min": 66.0,
"max": 7999830.0,
"count": 35757
},
"SoccerTwos.Step.sum": {
"value": 7999830.0,
"min": 66.0,
"max": 7999830.0,
"count": 35757
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.2291768193244934,
"min": -0.6357744336128235,
"max": 0.5942150354385376,
"count": 35757
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.2291768193244934,
"min": -2.943234920501709,
"max": 4.191102027893066,
"count": 35757
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.2442202866077423,
"min": -0.6416802406311035,
"max": 0.6011806726455688,
"count": 35757
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.2442202866077423,
"min": -2.953758955001831,
"max": 4.2166595458984375,
"count": 35757
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 35757
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 35757
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -1.0,
"min": -1.0,
"max": 0.9932000041007996,
"count": 35757
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -1.0,
"min": -9.0,
"max": 8.780399978160858,
"count": 35757
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -1.0,
"min": -1.0,
"max": 0.9932000041007996,
"count": 35757
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -1.0,
"min": -9.0,
"max": 8.780399978160858,
"count": 35757
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 35757
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 35757
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.023088003506321304,
"min": 0.012560889627153907,
"max": 0.027914463468672088,
"count": 386
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.023088003506321304,
"min": 0.012560889627153907,
"max": 0.027914463468672088,
"count": 386
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08402285973230998,
"min": 5.7798829341966966e-05,
"max": 0.10703929290175437,
"count": 386
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08402285973230998,
"min": 5.7798829341966966e-05,
"max": 0.10703929290175437,
"count": 386
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0861522302031517,
"min": 5.083439458151891e-05,
"max": 0.1108598418533802,
"count": 386
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0861522302031517,
"min": 5.083439458151891e-05,
"max": 0.1108598418533802,
"count": 386
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 386
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 386
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 386
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 386
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 386
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 386
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681975243",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\andre\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos_2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1682024082"
},
"total": 48839.2381127,
"count": 1,
"self": 1.5360641999941436,
"children": {
"run_training.setup": {
"total": 0.23951239999999885,
"count": 1,
"self": 0.23951239999999885
},
"TrainerController.start_learning": {
"total": 48837.4625361,
"count": 1,
"self": 18.071318898437312,
"children": {
"TrainerController._reset_env": {
"total": 22.469729600012784,
"count": 40,
"self": 22.469729600012784
},
"TrainerController.advance": {
"total": 48796.55171660154,
"count": 544485,
"self": 18.84643090087775,
"children": {
"env_step": {
"total": 13480.940880298956,
"count": 544485,
"self": 9624.141251096935,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3845.350842202946,
"count": 544485,
"self": 125.70891010125706,
"children": {
"TorchPolicy.evaluate": {
"total": 3719.641932101689,
"count": 1006372,
"self": 3719.641932101689
}
}
},
"workers": {
"total": 11.44878699907548,
"count": 544485,
"self": 0.0,
"children": {
"worker_root": {
"total": 48790.7222916014,
"count": 544485,
"is_parallel": true,
"self": 41243.46022880326,
"children": {
"steps_from_proto": {
"total": 0.10577209997159187,
"count": 80,
"is_parallel": true,
"self": 0.021406299991507893,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.08436579998008398,
"count": 320,
"is_parallel": true,
"self": 0.08436579998008398
}
}
},
"UnityEnvironment.step": {
"total": 7547.156290698165,
"count": 544485,
"is_parallel": true,
"self": 433.1522372964082,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 388.46125400078466,
"count": 544485,
"is_parallel": true,
"self": 388.46125400078466
},
"communicator.exchange": {
"total": 5352.378265900363,
"count": 544485,
"is_parallel": true,
"self": 5352.378265900363
},
"steps_from_proto": {
"total": 1373.1645335006097,
"count": 1088970,
"is_parallel": true,
"self": 279.40359070710474,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1093.760942793505,
"count": 4355880,
"is_parallel": true,
"self": 1093.760942793505
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 35296.76440540171,
"count": 544485,
"self": 131.58449890200427,
"children": {
"process_trajectory": {
"total": 6924.779084599799,
"count": 544485,
"self": 6919.675474399804,
"children": {
"RLTrainer._checkpoint": {
"total": 5.103610199994364,
"count": 16,
"self": 5.103610199994364
}
}
},
"_update_policy": {
"total": 28240.400821899908,
"count": 386,
"self": 1867.136166400087,
"children": {
"TorchPOCAOptimizer.update": {
"total": 26373.26465549982,
"count": 11598,
"self": 26373.26465549982
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.500004145782441e-06,
"count": 1,
"self": 1.500004145782441e-06
},
"TrainerController._save_models": {
"total": 0.3697695000009844,
"count": 1,
"self": 0.07219520000217017,
"children": {
"RLTrainer._checkpoint": {
"total": 0.29757429999881424,
"count": 1,
"self": 0.29757429999881424
}
}
}
}
}
}
}