Initial commit
Browse files
dqn-SpaceInvadersNoFrameskip-v4.zip
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 27224794
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:11f8df309f9bff907d3633c9142a54890a2170b783c23f28710f8f7e78a0e4c8
|
| 3 |
size 27224794
|
dqn-SpaceInvadersNoFrameskip-v4/data
CHANGED
|
@@ -4,9 +4,9 @@
|
|
| 4 |
":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmRxbi5wb2xpY2llc5SMCUNublBvbGljeZSTlC4=",
|
| 5 |
"__module__": "stable_baselines3.dqn.policies",
|
| 6 |
"__doc__": "\n Policy class for DQN when using images as input.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
| 7 |
-
"__init__": "<function CnnPolicy.__init__ at
|
| 8 |
"__abstractmethods__": "frozenset()",
|
| 9 |
-
"_abc_impl": "<_abc_data object at
|
| 10 |
},
|
| 11 |
"verbose": 1,
|
| 12 |
"policy_kwargs": {},
|
|
@@ -83,12 +83,12 @@
|
|
| 83 |
":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
|
| 84 |
"__module__": "stable_baselines3.common.buffers",
|
| 85 |
"__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
|
| 86 |
-
"__init__": "<function ReplayBuffer.__init__ at
|
| 87 |
-
"add": "<function ReplayBuffer.add at
|
| 88 |
-
"sample": "<function ReplayBuffer.sample at
|
| 89 |
-
"_get_samples": "<function ReplayBuffer._get_samples at
|
| 90 |
"__abstractmethods__": "frozenset()",
|
| 91 |
-
"_abc_impl": "<_abc_data object at
|
| 92 |
},
|
| 93 |
"replay_buffer_kwargs": {},
|
| 94 |
"train_freq": {
|
|
|
|
| 4 |
":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLmRxbi5wb2xpY2llc5SMCUNublBvbGljeZSTlC4=",
|
| 5 |
"__module__": "stable_baselines3.dqn.policies",
|
| 6 |
"__doc__": "\n Policy class for DQN when using images as input.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
| 7 |
+
"__init__": "<function CnnPolicy.__init__ at 0x7f21b8a22f70>",
|
| 8 |
"__abstractmethods__": "frozenset()",
|
| 9 |
+
"_abc_impl": "<_abc_data object at 0x7f21b8a266f0>"
|
| 10 |
},
|
| 11 |
"verbose": 1,
|
| 12 |
"policy_kwargs": {},
|
|
|
|
| 83 |
":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
|
| 84 |
"__module__": "stable_baselines3.common.buffers",
|
| 85 |
"__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
|
| 86 |
+
"__init__": "<function ReplayBuffer.__init__ at 0x7f21b8e8dee0>",
|
| 87 |
+
"add": "<function ReplayBuffer.add at 0x7f21b8e8df70>",
|
| 88 |
+
"sample": "<function ReplayBuffer.sample at 0x7f21b8e8f040>",
|
| 89 |
+
"_get_samples": "<function ReplayBuffer._get_samples at 0x7f21b8e8f0d0>",
|
| 90 |
"__abstractmethods__": "frozenset()",
|
| 91 |
+
"_abc_impl": "<_abc_data object at 0x7f21b8f18b40>"
|
| 92 |
},
|
| 93 |
"replay_buffer_kwargs": {},
|
| 94 |
"train_freq": {
|
results.json
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"mean_reward": 202.0, "std_reward": 166.66133324799728, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2022-12-24T15:
|
|
|
|
| 1 |
+
{"mean_reward": 202.0, "std_reward": 166.66133324799728, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2022-12-24T15:24:57.288856"}
|