philippds's picture
Upload 17 files
289888b verified
raw
history blame
16.5 kB
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.0342463254928589,
"min": 0.8567436933517456,
"max": 1.0977368354797363,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 41378.12890625,
"min": 34276.6015625,
"max": 43979.73046875,
"count": 200
},
"Agent.Environment.LessonNumber.pattern.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.pattern.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Step.mean": {
"value": 7999096.0,
"min": 39096.0,
"max": 7999096.0,
"count": 200
},
"Agent.Step.sum": {
"value": 7999096.0,
"min": 39096.0,
"max": 7999096.0,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.325874328613281,
"min": 0.7655065655708313,
"max": 9.509753227233887,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 223.82098388671875,
"min": 18.37215805053711,
"max": 228.23406982421875,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.04881577084447527,
"min": 0.04492947267089409,
"max": 0.05480822551688771,
"count": 200
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.1464473125334258,
"min": 0.09814778697909787,
"max": 0.16442467655066312,
"count": 200
},
"Agent.Losses.ValueLoss.mean": {
"value": 0.29578603130103726,
"min": 0.0682248831814384,
"max": 1.462727802572772,
"count": 200
},
"Agent.Losses.ValueLoss.sum": {
"value": 0.8873580939031117,
"min": 0.20467464954431522,
"max": 2.925455605145544,
"count": 200
},
"Agent.Policy.LearningRate.mean": {
"value": 8.856997047999973e-07,
"min": 8.856997047999973e-07,
"max": 0.00029907840030719997,
"count": 200
},
"Agent.Policy.LearningRate.sum": {
"value": 2.657099114399992e-06,
"min": 2.657099114399992e-06,
"max": 0.0008936568021144,
"count": 200
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10029520000000001,
"min": 0.10029520000000001,
"max": 0.1996928,
"count": 200
},
"Agent.Policy.Epsilon.sum": {
"value": 0.30088560000000003,
"min": 0.30088560000000003,
"max": 0.5978856,
"count": 200
},
"Agent.Policy.Beta.mean": {
"value": 2.4730479999999958e-05,
"min": 2.4730479999999958e-05,
"max": 0.004984670720000001,
"count": 200
},
"Agent.Policy.Beta.sum": {
"value": 7.419143999999988e-05,
"min": 7.419143999999988e-05,
"max": 0.014894491440000001,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 4999.0,
"min": 4999.0,
"max": 4999.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 39992.0,
"min": 39992.0,
"max": 39992.0,
"count": 200
},
"Agent.WindFarmControl.IndividualPerformance.mean": {
"value": 4625.7371826171875,
"min": 618.1424263715744,
"max": 4678.6807861328125,
"count": 200
},
"Agent.WindFarmControl.IndividualPerformance.sum": {
"value": 37005.8974609375,
"min": 4945.139410972595,
"max": 37429.4462890625,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 4625.749465942383,
"min": 802.4631873253398,
"max": 4682.931671142578,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 37005.99572753906,
"min": 6419.705498602719,
"max": 37463.453369140625,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 4625.749465942383,
"min": 802.4631873253398,
"max": 4682.931671142578,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 37005.99572753906,
"min": 6419.705498602719,
"max": 37463.453369140625,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716278148",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/WindFarmControl_pattern_8_task_0_run_id_0_train.yaml --run-id=WindFarmControl/train/WindFarmControl_pattern_8_task_0_run_id_0_train --base-port 5009",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.2",
"end_time_seconds": "1716285124"
},
"total": 6975.7818057,
"count": 1,
"self": 0.34803290000036213,
"children": {
"run_training.setup": {
"total": 0.06823010000000007,
"count": 1,
"self": 0.06823010000000007
},
"TrainerController.start_learning": {
"total": 6975.3655427,
"count": 1,
"self": 21.392211200079146,
"children": {
"TrainerController._reset_env": {
"total": 4.3076970999999995,
"count": 1,
"self": 4.3076970999999995
},
"TrainerController.advance": {
"total": 6949.629124099921,
"count": 1002051,
"self": 19.631331999895338,
"children": {
"env_step": {
"total": 6929.997792100025,
"count": 1002051,
"self": 2292.9432863998,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4627.3965150002705,
"count": 1002051,
"self": 38.25333070077522,
"children": {
"TorchPolicy.evaluate": {
"total": 4589.143184299495,
"count": 1002051,
"self": 4589.143184299495
}
}
},
"workers": {
"total": 9.657990699955352,
"count": 1002051,
"self": 0.0,
"children": {
"worker_root": {
"total": 6951.450070599854,
"count": 1002051,
"is_parallel": true,
"self": 5560.053164699688,
"children": {
"steps_from_proto": {
"total": 0.00048599999999998644,
"count": 1,
"is_parallel": true,
"self": 0.00033770000000066247,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00014829999999932397,
"count": 2,
"is_parallel": true,
"self": 0.00014829999999932397
}
}
},
"UnityEnvironment.step": {
"total": 1391.3964199001666,
"count": 1002051,
"is_parallel": true,
"self": 66.13279789980515,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 96.4390646001216,
"count": 1002051,
"is_parallel": true,
"self": 96.4390646001216
},
"communicator.exchange": {
"total": 1037.158965500481,
"count": 1002051,
"is_parallel": true,
"self": 1037.158965500481
},
"steps_from_proto": {
"total": 191.66559189975908,
"count": 1002051,
"is_parallel": true,
"self": 105.45197919906975,
"children": {
"_process_rank_one_or_two_observation": {
"total": 86.21361270068932,
"count": 2004102,
"is_parallel": true,
"self": 86.21361270068932
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.200000057608122e-05,
"count": 1,
"self": 4.200000057608122e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 6968.055332399885,
"count": 224650,
"is_parallel": true,
"self": 11.720379300204513,
"children": {
"process_trajectory": {
"total": 3783.882188399685,
"count": 224650,
"is_parallel": true,
"self": 3783.2448023996863,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6373859999991396,
"count": 16,
"is_parallel": true,
"self": 0.6373859999991396
}
}
},
"_update_policy": {
"total": 3172.4527646999954,
"count": 600,
"is_parallel": true,
"self": 828.331355099876,
"children": {
"TorchPPOOptimizer.update": {
"total": 2344.1214096001195,
"count": 93600,
"is_parallel": true,
"self": 2344.1214096001195
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.03646829999979673,
"count": 1,
"self": 0.004892799999652198,
"children": {
"RLTrainer._checkpoint": {
"total": 0.03157550000014453,
"count": 1,
"self": 0.03157550000014453
}
}
}
}
}
}
}