ppo-Huggy / run_logs /timers.json
fawwazanvilen's picture
Huggy
9156df6
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4123179912567139,
"min": 1.4123179912567139,
"max": 1.4196950197219849,
"count": 21
},
"Huggy.Policy.Entropy.sum": {
"value": 70793.8515625,
"min": 969.6517333984375,
"max": 71571.0390625,
"count": 21
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 74.19277108433735,
"min": 25.2,
"max": 92.07262569832402,
"count": 21
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49264.0,
"min": 126.0,
"max": 49498.0,
"count": 21
},
"Huggy.Step.mean": {
"value": 1999965.0,
"min": 999972.0,
"max": 1999965.0,
"count": 21
},
"Huggy.Step.sum": {
"value": 1999965.0,
"min": 999972.0,
"max": 1999965.0,
"count": 21
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.415548324584961,
"min": 1.5017623901367188,
"max": 2.467564821243286,
"count": 21
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1603.924072265625,
"min": 6.007049560546875,
"max": 1619.984130859375,
"count": 21
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7888835135353616,
"min": 1.8218119591474533,
"max": 4.048805369212564,
"count": 21
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2515.81865298748,
"min": 7.287247836589813,
"max": 2569.396039247513,
"count": 21
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7888835135353616,
"min": 1.8218119591474533,
"max": 4.048805369212564,
"count": 21
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2515.81865298748,
"min": 7.287247836589813,
"max": 2569.396039247513,
"count": 21
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 21
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 21
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017869885503387195,
"min": 0.0126288044732064,
"max": 0.020615114750883852,
"count": 20
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03573977100677439,
"min": 0.02958455478074029,
"max": 0.05838273290670865,
"count": 20
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05985790298630794,
"min": 0.05222394429147243,
"max": 0.06110340439611012,
"count": 20
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.11971580597261589,
"min": 0.10775570546587307,
"max": 0.18331021318833035,
"count": 20
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.628223790624988e-06,
"min": 3.628223790624988e-06,
"max": 0.00014539107653632503,
"count": 20
},
"Huggy.Policy.LearningRate.sum": {
"value": 7.256447581249976e-06,
"min": 7.256447581249976e-06,
"max": 0.00039457786847410005,
"count": 20
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120937500000002,
"min": 0.10120937500000002,
"max": 0.14846367500000002,
"count": 20
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20241875000000004,
"min": 0.20241875000000004,
"max": 0.4315259,
"count": 20
},
"Huggy.Policy.Beta.mean": {
"value": 7.03478124999998e-05,
"min": 7.03478124999998e-05,
"max": 0.0024283373825,
"count": 20
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001406956249999996,
"min": 0.0001406956249999996,
"max": 0.006593142410000001,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672809345",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672810453"
},
"total": 1107.285346216,
"count": 1,
"self": 0.4310495880004055,
"children": {
"run_training.setup": {
"total": 0.1090892869997333,
"count": 1,
"self": 0.1090892869997333
},
"TrainerController.start_learning": {
"total": 1106.745207341,
"count": 1,
"self": 1.9975928960620877,
"children": {
"TrainerController._reset_env": {
"total": 6.195654363999893,
"count": 1,
"self": 6.195654363999893
},
"TrainerController.advance": {
"total": 1098.4286861079381,
"count": 117451,
"self": 2.0024845349753377,
"children": {
"env_step": {
"total": 859.4346182239101,
"count": 117451,
"self": 722.6047130087491,
"children": {
"SubprocessEnvManager._take_step": {
"total": 135.54600578506097,
"count": 117451,
"self": 7.161195659075929,
"children": {
"TorchPolicy.evaluate": {
"total": 128.38481012598504,
"count": 111573,
"self": 32.27217096798131,
"children": {
"TorchPolicy.sample_actions": {
"total": 96.11263915800373,
"count": 111573,
"self": 96.11263915800373
}
}
}
}
},
"workers": {
"total": 1.283899430100064,
"count": 117451,
"self": 0.0,
"children": {
"worker_root": {
"total": 1102.8192365929858,
"count": 117451,
"is_parallel": true,
"self": 506.06247822991736,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008333160003530793,
"count": 1,
"is_parallel": true,
"self": 0.000301233000755019,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005320829995980603,
"count": 2,
"is_parallel": true,
"self": 0.0005320829995980603
}
}
},
"UnityEnvironment.step": {
"total": 0.03202065599998605,
"count": 1,
"is_parallel": true,
"self": 0.0002682289996300824,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020359200016173418,
"count": 1,
"is_parallel": true,
"self": 0.00020359200016173418
},
"communicator.exchange": {
"total": 0.030833610000172484,
"count": 1,
"is_parallel": true,
"self": 0.030833610000172484
},
"steps_from_proto": {
"total": 0.0007152250000217464,
"count": 1,
"is_parallel": true,
"self": 0.0002347340000596887,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004804909999620577,
"count": 2,
"is_parallel": true,
"self": 0.0004804909999620577
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 596.7567583630685,
"count": 117450,
"is_parallel": true,
"self": 16.929733276003844,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 36.78619466304235,
"count": 117450,
"is_parallel": true,
"self": 36.78619466304235
},
"communicator.exchange": {
"total": 495.5046715110434,
"count": 117450,
"is_parallel": true,
"self": 495.5046715110434
},
"steps_from_proto": {
"total": 47.53615891297886,
"count": 117450,
"is_parallel": true,
"self": 19.088825570921927,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.447333342056936,
"count": 234900,
"is_parallel": true,
"self": 28.447333342056936
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 236.9915833490527,
"count": 117451,
"self": 2.8219069561514516,
"children": {
"process_trajectory": {
"total": 79.95032203090204,
"count": 117451,
"self": 79.23413333390135,
"children": {
"RLTrainer._checkpoint": {
"total": 0.716188697000689,
"count": 6,
"self": 0.716188697000689
}
}
},
"_update_policy": {
"total": 154.2193543619992,
"count": 48,
"self": 128.09066687400355,
"children": {
"TorchPPOOptimizer.update": {
"total": 26.12868748799565,
"count": 1440,
"self": 26.12868748799565
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.639999578008428e-07,
"count": 1,
"self": 8.639999578008428e-07
},
"TrainerController._save_models": {
"total": 0.12327310899991062,
"count": 1,
"self": 0.0026039230001515534,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12066918599975907,
"count": 1,
"self": 0.12066918599975907
}
}
}
}
}
}
}