ppo-Pyramids / run_logs /timers.json
SyntaxTheRed's picture
First Push
ac8097e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.591925323009491,
"min": 0.5579382181167603,
"max": 1.4636337757110596,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17625.16796875,
"min": 16666.73046875,
"max": 44400.79296875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989985.0,
"min": 29925.0,
"max": 989985.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989985.0,
"min": 29925.0,
"max": 989985.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.19230064749717712,
"min": -0.11907154321670532,
"max": 0.29784902930259705,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 48.45976257324219,
"min": -28.219955444335938,
"max": 75.65365600585938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 9.964550971984863,
"min": -0.036371611058712006,
"max": 10.328259468078613,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2511.06689453125,
"min": -9.056530952453613,
"max": 2633.7060546875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06901576416057446,
"min": 0.0656256905358911,
"max": 0.0747430377915791,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9662206982480426,
"min": 0.5018770361983889,
"max": 1.0802840924094388,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 13.93250840087356,
"min": 0.000169167281646334,
"max": 13.93250840087356,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 195.05511761222985,
"min": 0.002199174661402342,
"max": 195.05511761222985,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.601261751992852e-06,
"min": 7.601261751992852e-06,
"max": 0.0002952340730172143,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010641766452789992,
"min": 0.00010641766452789992,
"max": 0.0036325180891607,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253372142857142,
"min": 0.10253372142857142,
"max": 0.19841135714285715,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4354721,
"min": 1.3888795,
"max": 2.6108393,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026311877071428553,
"min": 0.00026311877071428553,
"max": 0.009841294578571427,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036836627899999976,
"min": 0.0036836627899999976,
"max": 0.12110284606999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012186178006231785,
"min": 0.012027187272906303,
"max": 0.5746474862098694,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17060649394989014,
"min": 0.16838061809539795,
"max": 4.0225324630737305,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 702.3720930232558,
"min": 530.4464285714286,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30202.0,
"min": 16724.0,
"max": 33142.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.6927999587253083,
"min": -0.9999125525355339,
"max": 1.1480285501373666,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 29.790398225188255,
"min": -31.997201681137085,
"max": 64.28959880769253,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.6927999587253083,
"min": -0.9999125525355339,
"max": 1.1480285501373666,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 29.790398225188255,
"min": -31.997201681137085,
"max": 64.28959880769253,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08960242874021533,
"min": 0.0654441894221236,
"max": 10.533837996861514,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8529044358292595,
"min": 3.6648746076389216,
"max": 179.07524594664574,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715735784",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1715738788"
},
"total": 3003.536194075,
"count": 1,
"self": 0.7113225059997603,
"children": {
"run_training.setup": {
"total": 0.08085487199997488,
"count": 1,
"self": 0.08085487199997488
},
"TrainerController.start_learning": {
"total": 3002.744016697,
"count": 1,
"self": 2.2361933619581578,
"children": {
"TrainerController._reset_env": {
"total": 3.325425516999985,
"count": 1,
"self": 3.325425516999985
},
"TrainerController.advance": {
"total": 2997.0926982200417,
"count": 63284,
"self": 2.4201370240871256,
"children": {
"env_step": {
"total": 1945.229080328996,
"count": 63284,
"self": 1788.983911837956,
"children": {
"SubprocessEnvManager._take_step": {
"total": 154.84697404104708,
"count": 63284,
"self": 6.962436044008655,
"children": {
"TorchPolicy.evaluate": {
"total": 147.88453799703842,
"count": 62544,
"self": 147.88453799703842
}
}
},
"workers": {
"total": 1.3981944499927863,
"count": 63284,
"self": 0.0,
"children": {
"worker_root": {
"total": 2995.703734744967,
"count": 63284,
"is_parallel": true,
"self": 1391.7363884019835,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006754505000003519,
"count": 1,
"is_parallel": true,
"self": 0.004397775000029469,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00235672999997405,
"count": 8,
"is_parallel": true,
"self": 0.00235672999997405
}
}
},
"UnityEnvironment.step": {
"total": 0.061622747000001254,
"count": 1,
"is_parallel": true,
"self": 0.0008043980000138617,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005601830000045993,
"count": 1,
"is_parallel": true,
"self": 0.0005601830000045993
},
"communicator.exchange": {
"total": 0.058225463999974636,
"count": 1,
"is_parallel": true,
"self": 0.058225463999974636
},
"steps_from_proto": {
"total": 0.0020327020000081575,
"count": 1,
"is_parallel": true,
"self": 0.00042591700008642874,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016067849999217287,
"count": 8,
"is_parallel": true,
"self": 0.0016067849999217287
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1603.9673463429833,
"count": 63283,
"is_parallel": true,
"self": 50.32165760195926,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.36030527501731,
"count": 63283,
"is_parallel": true,
"self": 31.36030527501731
},
"communicator.exchange": {
"total": 1392.9675663019573,
"count": 63283,
"is_parallel": true,
"self": 1392.9675663019573
},
"steps_from_proto": {
"total": 129.31781716404953,
"count": 63283,
"is_parallel": true,
"self": 27.895371161086928,
"children": {
"_process_rank_one_or_two_observation": {
"total": 101.4224460029626,
"count": 506264,
"is_parallel": true,
"self": 101.4224460029626
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1049.4434808669587,
"count": 63284,
"self": 4.3154006939571445,
"children": {
"process_trajectory": {
"total": 162.3012131800033,
"count": 63284,
"self": 161.9919894070033,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30922377299998516,
"count": 2,
"self": 0.30922377299998516
}
}
},
"_update_policy": {
"total": 882.8268669929982,
"count": 450,
"self": 367.5926787890123,
"children": {
"TorchPPOOptimizer.update": {
"total": 515.2341882039859,
"count": 22713,
"self": 515.2341882039859
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0580001799098682e-06,
"count": 1,
"self": 1.0580001799098682e-06
},
"TrainerController._save_models": {
"total": 0.08969853999997213,
"count": 1,
"self": 0.00209417999985817,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08760436000011396,
"count": 1,
"self": 0.08760436000011396
}
}
}
}
}
}
}