dogukankartal's picture
First push
f128bee verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6879401803016663,
"min": 0.654887855052948,
"max": 1.4360133409500122,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 20792.3046875,
"min": 19531.375,
"max": 43562.90234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989925.0,
"min": 29952.0,
"max": 989925.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989925.0,
"min": 29952.0,
"max": 989925.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2505187690258026,
"min": -0.22013568878173828,
"max": 0.2505187690258026,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 63.381248474121094,
"min": -52.172157287597656,
"max": 63.381248474121094,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.013546476140618324,
"min": -0.013546476140618324,
"max": 0.31149423122406006,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.4272584915161133,
"min": -3.4272584915161133,
"max": 75.07010650634766,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.061661323940543473,
"min": 0.061661323940543473,
"max": 0.07331444574391412,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9249198591081521,
"min": 0.47261095472096987,
"max": 1.0590531569317794,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.009954051265873098,
"min": 0.00019516668503997064,
"max": 0.010586337453114996,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.14931076898809648,
"min": 0.0025371669055196185,
"max": 0.14931076898809648,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.514397495233332e-06,
"min": 7.514397495233332e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011271596242849998,
"min": 0.00011271596242849998,
"max": 0.0036332182889272993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250476666666669,
"min": 0.10250476666666669,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375715000000003,
"min": 1.3886848,
"max": 2.6110727,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026022618999999996,
"min": 0.00026022618999999996,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039033928499999995,
"min": 0.0039033928499999995,
"max": 0.12112616273000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014551150612533092,
"min": 0.014551150612533092,
"max": 0.3621668219566345,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2182672619819641,
"min": 0.2143244445323944,
"max": 2.535167694091797,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 632.8,
"min": 566.1698113207547,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31640.0,
"min": 15984.0,
"max": 32563.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8712162830087603,
"min": -1.0000000521540642,
"max": 0.9533333048500396,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 42.689597867429256,
"min": -31.99320164322853,
"max": 51.47999846190214,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8712162830087603,
"min": -1.0000000521540642,
"max": 0.9533333048500396,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 42.689597867429256,
"min": -31.99320164322853,
"max": 51.47999846190214,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.09578146394912382,
"min": 0.09165765505814408,
"max": 6.9449106473475695,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.693291733507067,
"min": 4.630503593507456,
"max": 111.11857035756111,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1721916431",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1721919612"
},
"total": 3180.73370772,
"count": 1,
"self": 0.7947104579998268,
"children": {
"run_training.setup": {
"total": 0.07077057200001491,
"count": 1,
"self": 0.07077057200001491
},
"TrainerController.start_learning": {
"total": 3179.86822669,
"count": 1,
"self": 2.3857583149770107,
"children": {
"TrainerController._reset_env": {
"total": 3.1439796650000176,
"count": 1,
"self": 3.1439796650000176
},
"TrainerController.advance": {
"total": 3174.2534468930226,
"count": 63283,
"self": 2.524634227065235,
"children": {
"env_step": {
"total": 2069.3605310120124,
"count": 63283,
"self": 1904.740085341946,
"children": {
"SubprocessEnvManager._take_step": {
"total": 163.2051896600156,
"count": 63283,
"self": 7.3917117130922065,
"children": {
"TorchPolicy.evaluate": {
"total": 155.8134779469234,
"count": 62574,
"self": 155.8134779469234
}
}
},
"workers": {
"total": 1.4152560100507117,
"count": 63283,
"self": 0.0,
"children": {
"worker_root": {
"total": 3172.3573158280037,
"count": 63283,
"is_parallel": true,
"self": 1454.8350297329985,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005466948999981014,
"count": 1,
"is_parallel": true,
"self": 0.00358252299980677,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018844260001742441,
"count": 8,
"is_parallel": true,
"self": 0.0018844260001742441
}
}
},
"UnityEnvironment.step": {
"total": 0.05730140000002848,
"count": 1,
"is_parallel": true,
"self": 0.0007552509999868562,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005298600000287479,
"count": 1,
"is_parallel": true,
"self": 0.0005298600000287479
},
"communicator.exchange": {
"total": 0.054153686000006473,
"count": 1,
"is_parallel": true,
"self": 0.054153686000006473
},
"steps_from_proto": {
"total": 0.001862603000006402,
"count": 1,
"is_parallel": true,
"self": 0.00039181000011012657,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014707929998962754,
"count": 8,
"is_parallel": true,
"self": 0.0014707929998962754
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1717.5222860950053,
"count": 63282,
"is_parallel": true,
"self": 52.566312589007794,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 32.14106916497167,
"count": 63282,
"is_parallel": true,
"self": 32.14106916497167
},
"communicator.exchange": {
"total": 1498.2908997040313,
"count": 63282,
"is_parallel": true,
"self": 1498.2908997040313
},
"steps_from_proto": {
"total": 134.5240046369944,
"count": 63282,
"is_parallel": true,
"self": 29.237643085947695,
"children": {
"_process_rank_one_or_two_observation": {
"total": 105.2863615510467,
"count": 506256,
"is_parallel": true,
"self": 105.2863615510467
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1102.368281653945,
"count": 63283,
"self": 4.5428839849682845,
"children": {
"process_trajectory": {
"total": 163.8804928939735,
"count": 63283,
"self": 163.5855752229736,
"children": {
"RLTrainer._checkpoint": {
"total": 0.29491767099989374,
"count": 2,
"self": 0.29491767099989374
}
}
},
"_update_policy": {
"total": 933.9449047750031,
"count": 450,
"self": 388.4577855530065,
"children": {
"TorchPPOOptimizer.update": {
"total": 545.4871192219966,
"count": 22851,
"self": 545.4871192219966
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3660001059179194e-06,
"count": 1,
"self": 1.3660001059179194e-06
},
"TrainerController._save_models": {
"total": 0.0850404510001681,
"count": 1,
"self": 0.0023008130001471727,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08273963800002093,
"count": 1,
"self": 0.08273963800002093
}
}
}
}
}
}
}