philippds's picture
Upload 17 files
d5e93ab verified
raw
history blame
16.8 kB
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 0.8137915730476379,
"min": 0.637660026550293,
"max": 1.0954152345657349,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 32564.68359375,
"min": 25511.501953125,
"max": 43913.00390625,
"count": 200
},
"Agent.Environment.LessonNumber.pattern.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.pattern.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Step.mean": {
"value": 7999096.0,
"min": 39096.0,
"max": 7999096.0,
"count": 200
},
"Agent.Step.sum": {
"value": 7999096.0,
"min": 39096.0,
"max": 7999096.0,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.501697540283203,
"min": 2.3389432430267334,
"max": 9.816634178161621,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 228.04074096679688,
"min": 53.79569625854492,
"max": 235.59921264648438,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.04955274910441534,
"min": 0.04477203898718543,
"max": 0.05425709840785694,
"count": 200
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.148658247313246,
"min": 0.10508813887524109,
"max": 0.1627712952235708,
"count": 200
},
"Agent.Losses.ValueLoss.mean": {
"value": 0.009523128849058782,
"min": 0.002932505446306239,
"max": 3.669854566144447,
"count": 200
},
"Agent.Losses.ValueLoss.sum": {
"value": 0.028569386547176345,
"min": 0.008797516338918718,
"max": 10.79426912023198,
"count": 200
},
"Agent.Policy.LearningRate.mean": {
"value": 8.856997047999973e-07,
"min": 8.856997047999973e-07,
"max": 0.00029907840030719997,
"count": 200
},
"Agent.Policy.LearningRate.sum": {
"value": 2.657099114399992e-06,
"min": 2.657099114399992e-06,
"max": 0.0008936568021144,
"count": 200
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10029520000000001,
"min": 0.10029520000000001,
"max": 0.1996928,
"count": 200
},
"Agent.Policy.Epsilon.sum": {
"value": 0.30088560000000003,
"min": 0.30088560000000003,
"max": 0.5978856,
"count": 200
},
"Agent.Policy.Beta.mean": {
"value": 2.4730479999999958e-05,
"min": 2.4730479999999958e-05,
"max": 0.004984670720000001,
"count": 200
},
"Agent.Policy.Beta.sum": {
"value": 7.419143999999988e-05,
"min": 7.419143999999988e-05,
"max": 0.014894491440000001,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 4999.0,
"min": 4999.0,
"max": 4999.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 39992.0,
"min": 39992.0,
"max": 39992.0,
"count": 200
},
"Agent.WindFarmControl.IndividualPerformance.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.WindFarmControl.IndividualPerformance.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.WindFarmControl.AvoidDamageReward.mean": {
"value": 4709.554931640625,
"min": 1613.9169616699219,
"max": 4892.742919921875,
"count": 200
},
"Agent.WindFarmControl.AvoidDamageReward.sum": {
"value": 37676.439453125,
"min": 12911.335693359375,
"max": 39141.943359375,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 4726.380996704102,
"min": 1765.8604888916016,
"max": 4889.912284851074,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 37811.04797363281,
"min": 14126.883911132812,
"max": 39119.298278808594,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 4726.380996704102,
"min": 1765.8604888916016,
"max": 4889.912284851074,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 37811.04797363281,
"min": 14126.883911132812,
"max": 39119.298278808594,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715171861",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/WindFarmControl_pattern_2_task_1_run_id_0_train.yaml --run-id=WindFarmControl/train/WindFarmControl_pattern_2_task_1_run_id_0_train",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.2",
"end_time_seconds": "1715175583"
},
"total": 3722.4016714,
"count": 1,
"self": 0.16862429999991946,
"children": {
"run_training.setup": {
"total": 0.04616050000000005,
"count": 1,
"self": 0.04616050000000005
},
"TrainerController.start_learning": {
"total": 3722.1868866,
"count": 1,
"self": 10.338838699990902,
"children": {
"TrainerController._reset_env": {
"total": 1.619233,
"count": 1,
"self": 1.619233
},
"TrainerController.advance": {
"total": 3710.201611300009,
"count": 1002052,
"self": 9.984701399917412,
"children": {
"env_step": {
"total": 3700.2169099000917,
"count": 1002052,
"self": 1570.7886952000463,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2123.541292500062,
"count": 1002052,
"self": 26.114850999963437,
"children": {
"TorchPolicy.evaluate": {
"total": 2097.4264415000985,
"count": 1002052,
"self": 2097.4264415000985
}
}
},
"workers": {
"total": 5.88692219998325,
"count": 1002052,
"self": 0.0,
"children": {
"worker_root": {
"total": 3708.38345829974,
"count": 1002052,
"is_parallel": true,
"self": 2791.9995448996156,
"children": {
"steps_from_proto": {
"total": 0.00025439999999998797,
"count": 1,
"is_parallel": true,
"self": 0.00012109999999998511,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00013330000000000286,
"count": 2,
"is_parallel": true,
"self": 0.00013330000000000286
}
}
},
"UnityEnvironment.step": {
"total": 916.3836590001247,
"count": 1002052,
"is_parallel": true,
"self": 45.25587559998246,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 68.96804120012082,
"count": 1002052,
"is_parallel": true,
"self": 68.96804120012082
},
"communicator.exchange": {
"total": 678.288798699905,
"count": 1002052,
"is_parallel": true,
"self": 678.288798699905
},
"steps_from_proto": {
"total": 123.87094350011637,
"count": 1002052,
"is_parallel": true,
"self": 69.59912830032582,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.27181519979056,
"count": 2004104,
"is_parallel": true,
"self": 54.27181519979056
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.210000002378365e-05,
"count": 1,
"self": 2.210000002378365e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3719.5350889999454,
"count": 109540,
"is_parallel": true,
"self": 4.837310999988404,
"children": {
"process_trajectory": {
"total": 1914.3180694999548,
"count": 109540,
"is_parallel": true,
"self": 1913.9023417999551,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4157276999997066,
"count": 16,
"is_parallel": true,
"self": 0.4157276999997066
}
}
},
"_update_policy": {
"total": 1800.379708500002,
"count": 600,
"is_parallel": true,
"self": 575.0998159000906,
"children": {
"TorchPPOOptimizer.update": {
"total": 1225.2798925999114,
"count": 93600,
"is_parallel": true,
"self": 1225.2798925999114
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.027181500000097003,
"count": 1,
"self": 0.006050400000276568,
"children": {
"RLTrainer._checkpoint": {
"total": 0.021131099999820435,
"count": 1,
"self": 0.021131099999820435
}
}
}
}
}
}
}