ZTWHHH commited on
Commit
f9ca870
·
verified ·
1 Parent(s): 2f92078

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/__pycache__/appo_torch_policy.cpython-310.pyc +0 -0
  2. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/__pycache__/utils.cpython-310.pyc +0 -0
  3. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/torch/appo_torch_learner.py +260 -0
  4. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__init__.py +8 -0
  5. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__pycache__/cql.cpython-310.pyc +0 -0
  6. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__pycache__/cql_torch_policy.cpython-310.pyc +0 -0
  7. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/cql.py +389 -0
  8. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/cql_tf_policy.py +426 -0
  9. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/torch/__pycache__/cql_torch_learner.cpython-310.pyc +0 -0
  10. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/torch/__pycache__/cql_torch_rl_module.cpython-310.pyc +0 -0
  11. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__init__.py +10 -0
  12. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_rainbow_catalog.cpython-310.pyc +0 -0
  13. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_rainbow_learner.cpython-310.pyc +0 -0
  14. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_torch_policy.cpython-310.pyc +0 -0
  15. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/learner_thread.cpython-310.pyc +0 -0
  16. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn.py +862 -0
  17. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_rainbow_learner.py +109 -0
  18. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_rainbow_noisy_net_configs.py +60 -0
  19. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_tf_policy.py +500 -0
  20. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_torch_model.py +173 -0
  21. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__init__.py +0 -0
  22. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/__init__.cpython-310.pyc +0 -0
  23. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/dqn_rainbow_torch_learner.cpython-310.pyc +0 -0
  24. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/dqn_rainbow_torch_rl_module.cpython-310.pyc +0 -0
  25. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_learner.py +264 -0
  26. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_noisy_net.py +276 -0
  27. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_rl_module.py +337 -0
  28. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/torch_noisy_linear.py +147 -0
  29. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__init__.py +15 -0
  30. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/__init__.cpython-310.pyc +0 -0
  31. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3.cpython-310.pyc +0 -0
  32. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_catalog.cpython-310.pyc +0 -0
  33. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_learner.cpython-310.pyc +0 -0
  34. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_rl_module.cpython-310.pyc +0 -0
  35. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/dreamerv3_catalog.py +80 -0
  36. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/dreamerv3_learner.py +31 -0
  37. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/__init__.py +0 -0
  38. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/__pycache__/dreamerv3_tf_learner.cpython-310.pyc +0 -0
  39. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/__pycache__/critic_network.cpython-310.pyc +0 -0
  40. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/actor_network.py +203 -0
  41. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__init__.py +0 -0
  42. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/dynamics_predictor.cpython-310.pyc +0 -0
  43. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/representation_layer.cpython-310.pyc +0 -0
  44. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/sequence_model.cpython-310.pyc +0 -0
  45. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/cnn_atari.py +112 -0
  46. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/reward_predictor.py +112 -0
  47. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/reward_predictor_layer.py +110 -0
  48. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/sequence_model.py +144 -0
  49. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/disagree_networks.py +94 -0
  50. deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/dreamer_model.py +606 -0
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/__pycache__/appo_torch_policy.cpython-310.pyc ADDED
Binary file (9.95 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.27 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/appo/torch/appo_torch_learner.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Asynchronous Proximal Policy Optimization (APPO)
2
+
3
+ The algorithm is described in [1] (under the name of "IMPACT"):
4
+
5
+ Detailed documentation:
6
+ https://docs.ray.io/en/master/rllib-algorithms.html#appo
7
+
8
+ [1] IMPACT: Importance Weighted Asynchronous Architectures with Clipped Target Networks.
9
+ Luo et al. 2020
10
+ https://arxiv.org/pdf/1912.00167
11
+ """
12
+ from typing import Dict
13
+
14
+ from ray.rllib.algorithms.appo.appo import (
15
+ APPOConfig,
16
+ LEARNER_RESULTS_CURR_KL_COEFF_KEY,
17
+ LEARNER_RESULTS_KL_KEY,
18
+ TARGET_ACTION_DIST_LOGITS_KEY,
19
+ )
20
+ from ray.rllib.algorithms.appo.appo_learner import APPOLearner
21
+ from ray.rllib.algorithms.impala.torch.impala_torch_learner import IMPALATorchLearner
22
+ from ray.rllib.algorithms.impala.torch.vtrace_torch_v2 import (
23
+ make_time_major,
24
+ vtrace_torch,
25
+ )
26
+ from ray.rllib.core.columns import Columns
27
+ from ray.rllib.core.learner.learner import POLICY_LOSS_KEY, VF_LOSS_KEY, ENTROPY_KEY
28
+ from ray.rllib.core.rl_module.apis import TargetNetworkAPI, ValueFunctionAPI
29
+ from ray.rllib.utils.annotations import override
30
+ from ray.rllib.utils.framework import try_import_torch
31
+ from ray.rllib.utils.numpy import convert_to_numpy
32
+ from ray.rllib.utils.typing import ModuleID, TensorType
33
+
34
+ torch, nn = try_import_torch()
35
+
36
+
37
+ class APPOTorchLearner(APPOLearner, IMPALATorchLearner):
38
+ """Implements APPO loss / update logic on top of IMPALATorchLearner."""
39
+
40
+ @override(IMPALATorchLearner)
41
+ def compute_loss_for_module(
42
+ self,
43
+ *,
44
+ module_id: ModuleID,
45
+ config: APPOConfig,
46
+ batch: Dict,
47
+ fwd_out: Dict[str, TensorType],
48
+ ) -> TensorType:
49
+ module = self.module[module_id].unwrapped()
50
+ assert isinstance(module, TargetNetworkAPI)
51
+ assert isinstance(module, ValueFunctionAPI)
52
+
53
+ # TODO (sven): Now that we do the +1ts trick to be less vulnerable about
54
+ # bootstrap values at the end of rollouts in the new stack, we might make
55
+ # this a more flexible, configurable parameter for users, e.g.
56
+ # `v_trace_seq_len` (independent of `rollout_fragment_length`). Separation
57
+ # of concerns (sampling vs learning).
58
+ rollout_frag_or_episode_len = config.get_rollout_fragment_length()
59
+ recurrent_seq_len = batch.get("seq_lens")
60
+
61
+ loss_mask = batch[Columns.LOSS_MASK].float()
62
+ loss_mask_time_major = make_time_major(
63
+ loss_mask,
64
+ trajectory_len=rollout_frag_or_episode_len,
65
+ recurrent_seq_len=recurrent_seq_len,
66
+ )
67
+ size_loss_mask = torch.sum(loss_mask)
68
+
69
+ values = module.compute_values(
70
+ batch, embeddings=fwd_out.get(Columns.EMBEDDINGS)
71
+ )
72
+
73
+ action_dist_cls_train = module.get_train_action_dist_cls()
74
+
75
+ # Policy being trained (current).
76
+ current_action_dist = action_dist_cls_train.from_logits(
77
+ fwd_out[Columns.ACTION_DIST_INPUTS]
78
+ )
79
+ current_actions_logp = current_action_dist.logp(batch[Columns.ACTIONS])
80
+ current_actions_logp_time_major = make_time_major(
81
+ current_actions_logp,
82
+ trajectory_len=rollout_frag_or_episode_len,
83
+ recurrent_seq_len=recurrent_seq_len,
84
+ )
85
+
86
+ # Target policy.
87
+ target_action_dist = action_dist_cls_train.from_logits(
88
+ module.forward_target(batch)[TARGET_ACTION_DIST_LOGITS_KEY]
89
+ )
90
+ target_actions_logp = target_action_dist.logp(batch[Columns.ACTIONS])
91
+ target_actions_logp_time_major = make_time_major(
92
+ target_actions_logp,
93
+ trajectory_len=rollout_frag_or_episode_len,
94
+ recurrent_seq_len=recurrent_seq_len,
95
+ )
96
+
97
+ # EnvRunner's policy (behavior).
98
+ behavior_actions_logp = batch[Columns.ACTION_LOGP]
99
+ behavior_actions_logp_time_major = make_time_major(
100
+ behavior_actions_logp,
101
+ trajectory_len=rollout_frag_or_episode_len,
102
+ recurrent_seq_len=recurrent_seq_len,
103
+ )
104
+
105
+ rewards_time_major = make_time_major(
106
+ batch[Columns.REWARDS],
107
+ trajectory_len=rollout_frag_or_episode_len,
108
+ recurrent_seq_len=recurrent_seq_len,
109
+ )
110
+
111
+ assert Columns.VALUES_BOOTSTRAPPED not in batch
112
+ values_time_major = make_time_major(
113
+ values,
114
+ trajectory_len=rollout_frag_or_episode_len,
115
+ recurrent_seq_len=recurrent_seq_len,
116
+ )
117
+ # Use as bootstrap values the vf-preds in the next "batch row", except
118
+ # for the very last row (which doesn't have a next row), for which the
119
+ # bootstrap value does not matter b/c it has a +1ts value at its end
120
+ # anyways. So we chose an arbitrary item (for simplicity of not having to
121
+ # move new data to the device).
122
+ bootstrap_values = torch.cat(
123
+ [
124
+ values_time_major[0][1:], # 0th ts values from "next row"
125
+ values_time_major[0][0:1], # <- can use any arbitrary value here
126
+ ],
127
+ dim=0,
128
+ )
129
+
130
+ # The discount factor that is used should be `gamma * lambda_`, except for
131
+ # termination timesteps, in which case the discount factor should be 0.
132
+ discounts_time_major = (
133
+ (
134
+ 1.0
135
+ - make_time_major(
136
+ batch[Columns.TERMINATEDS],
137
+ trajectory_len=rollout_frag_or_episode_len,
138
+ recurrent_seq_len=recurrent_seq_len,
139
+ ).float()
140
+ # See [1] 3.1: Discounts must contain the GAE lambda_ parameter as well.
141
+ )
142
+ * config.gamma
143
+ * config.lambda_
144
+ )
145
+
146
+ # Note that vtrace will compute the main loop on the CPU for better performance.
147
+ vtrace_adjusted_target_values, pg_advantages = vtrace_torch(
148
+ # See [1] 3.1: For AˆV-GAE, the ratios used are: min(c¯, π(target)/π(i))
149
+ # π(target)
150
+ target_action_log_probs=target_actions_logp_time_major,
151
+ # π(i)
152
+ behaviour_action_log_probs=behavior_actions_logp_time_major,
153
+ # See [1] 3.1: Discounts must contain the GAE lambda_ parameter as well.
154
+ discounts=discounts_time_major,
155
+ rewards=rewards_time_major,
156
+ values=values_time_major,
157
+ bootstrap_values=bootstrap_values,
158
+ # c¯
159
+ clip_rho_threshold=config.vtrace_clip_rho_threshold,
160
+ # c¯ (but we allow users to distinguish between c¯ used for
161
+ # value estimates and c¯ used for the advantages.
162
+ clip_pg_rho_threshold=config.vtrace_clip_pg_rho_threshold,
163
+ )
164
+ pg_advantages = pg_advantages * loss_mask_time_major
165
+
166
+ # The policy gradient loss.
167
+ # As described in [1], use a logp-ratio of:
168
+ # min(π(i) / π(target), ρ) * (π / π(i)), where ..
169
+ # - π are the action probs from the current (learner) policy
170
+ # - π(i) are the action probs from the ith EnvRunner
171
+ # - π(target) are the action probs from the target network
172
+ # - ρ is the "target-worker clipping" (2.0 in the paper)
173
+ target_worker_is_ratio = torch.clip(
174
+ torch.exp(
175
+ behavior_actions_logp_time_major - target_actions_logp_time_major
176
+ ),
177
+ 0.0,
178
+ config.target_worker_clipping,
179
+ )
180
+ target_worker_logp_ratio = target_worker_is_ratio * torch.exp(
181
+ current_actions_logp_time_major - behavior_actions_logp_time_major
182
+ )
183
+ surrogate_loss = torch.minimum(
184
+ pg_advantages * target_worker_logp_ratio,
185
+ pg_advantages
186
+ * torch.clip(
187
+ target_worker_logp_ratio,
188
+ 1 - config.clip_param,
189
+ 1 + config.clip_param,
190
+ ),
191
+ )
192
+ mean_pi_loss = -(torch.sum(surrogate_loss) / size_loss_mask)
193
+
194
+ # Compute KL-loss (if required): KL divergence between current action dist.
195
+ # and target action dict.
196
+ if config.use_kl_loss:
197
+ action_kl = target_action_dist.kl(current_action_dist) * loss_mask
198
+ mean_kl_loss = torch.sum(action_kl) / size_loss_mask
199
+ else:
200
+ mean_kl_loss = 0.0
201
+
202
+ # Compute value function loss.
203
+ delta = values_time_major - vtrace_adjusted_target_values
204
+ vf_loss = 0.5 * torch.sum(torch.pow(delta, 2.0) * loss_mask_time_major)
205
+ mean_vf_loss = vf_loss / size_loss_mask
206
+
207
+ # Compute entropy loss.
208
+ mean_entropy_loss = (
209
+ -torch.sum(current_action_dist.entropy() * loss_mask) / size_loss_mask
210
+ )
211
+
212
+ # The summed weighted loss.
213
+ total_loss = (
214
+ mean_pi_loss
215
+ + (mean_vf_loss * config.vf_loss_coeff)
216
+ + (
217
+ mean_entropy_loss
218
+ * self.entropy_coeff_schedulers_per_module[
219
+ module_id
220
+ ].get_current_value()
221
+ )
222
+ + (mean_kl_loss * self.curr_kl_coeffs_per_module[module_id])
223
+ )
224
+
225
+ # Log important loss stats.
226
+ self.metrics.log_dict(
227
+ {
228
+ POLICY_LOSS_KEY: mean_pi_loss,
229
+ VF_LOSS_KEY: mean_vf_loss,
230
+ ENTROPY_KEY: -mean_entropy_loss,
231
+ LEARNER_RESULTS_KL_KEY: mean_kl_loss,
232
+ LEARNER_RESULTS_CURR_KL_COEFF_KEY: (
233
+ self.curr_kl_coeffs_per_module[module_id]
234
+ ),
235
+ },
236
+ key=module_id,
237
+ window=1, # <- single items (should not be mean/ema-reduced over time).
238
+ )
239
+ # Return the total loss.
240
+ return total_loss
241
+
242
+ @override(APPOLearner)
243
+ def _update_module_kl_coeff(self, module_id: ModuleID, config: APPOConfig) -> None:
244
+ # Update the current KL value based on the recently measured value.
245
+ # Increase.
246
+ kl = convert_to_numpy(self.metrics.peek((module_id, LEARNER_RESULTS_KL_KEY)))
247
+ kl_coeff_var = self.curr_kl_coeffs_per_module[module_id]
248
+
249
+ if kl > 2.0 * config.kl_target:
250
+ # TODO (Kourosh) why not *2.0?
251
+ kl_coeff_var.data *= 1.5
252
+ # Decrease.
253
+ elif kl < 0.5 * config.kl_target:
254
+ kl_coeff_var.data *= 0.5
255
+
256
+ self.metrics.log_value(
257
+ (module_id, LEARNER_RESULTS_CURR_KL_COEFF_KEY),
258
+ kl_coeff_var.item(),
259
+ window=1,
260
+ )
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from ray.rllib.algorithms.cql.cql import CQL, CQLConfig
2
+ from ray.rllib.algorithms.cql.cql_torch_policy import CQLTorchPolicy
3
+
4
+ __all__ = [
5
+ "CQL",
6
+ "CQLTorchPolicy",
7
+ "CQLConfig",
8
+ ]
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__pycache__/cql.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/__pycache__/cql_torch_policy.cpython-310.pyc ADDED
Binary file (9.4 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/cql.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Optional, Type, Union
3
+
4
+ from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided
5
+ from ray.rllib.algorithms.cql.cql_tf_policy import CQLTFPolicy
6
+ from ray.rllib.algorithms.cql.cql_torch_policy import CQLTorchPolicy
7
+ from ray.rllib.algorithms.sac.sac import (
8
+ SAC,
9
+ SACConfig,
10
+ )
11
+ from ray.rllib.connectors.common.add_observations_from_episodes_to_batch import (
12
+ AddObservationsFromEpisodesToBatch,
13
+ )
14
+ from ray.rllib.connectors.learner.add_next_observations_from_episodes_to_train_batch import ( # noqa
15
+ AddNextObservationsFromEpisodesToTrainBatch,
16
+ )
17
+ from ray.rllib.core.learner.learner import Learner
18
+ from ray.rllib.core.rl_module.rl_module import RLModuleSpec
19
+ from ray.rllib.execution.rollout_ops import (
20
+ synchronous_parallel_sample,
21
+ )
22
+ from ray.rllib.execution.train_ops import (
23
+ multi_gpu_train_one_step,
24
+ train_one_step,
25
+ )
26
+ from ray.rllib.policy.policy import Policy
27
+ from ray.rllib.utils.annotations import OldAPIStack, override
28
+ from ray.rllib.utils.deprecation import (
29
+ DEPRECATED_VALUE,
30
+ deprecation_warning,
31
+ )
32
+ from ray.rllib.utils.framework import try_import_tf, try_import_tfp
33
+ from ray.rllib.utils.metrics import (
34
+ ALL_MODULES,
35
+ LEARNER_RESULTS,
36
+ LEARNER_UPDATE_TIMER,
37
+ LAST_TARGET_UPDATE_TS,
38
+ NUM_AGENT_STEPS_SAMPLED,
39
+ NUM_AGENT_STEPS_TRAINED,
40
+ NUM_ENV_STEPS_SAMPLED,
41
+ NUM_ENV_STEPS_TRAINED,
42
+ NUM_TARGET_UPDATES,
43
+ OFFLINE_SAMPLING_TIMER,
44
+ TARGET_NET_UPDATE_TIMER,
45
+ SYNCH_WORKER_WEIGHTS_TIMER,
46
+ SAMPLE_TIMER,
47
+ TIMERS,
48
+ )
49
+ from ray.rllib.utils.typing import ResultDict, RLModuleSpecType
50
+
51
+ tf1, tf, tfv = try_import_tf()
52
+ tfp = try_import_tfp()
53
+ logger = logging.getLogger(__name__)
54
+
55
+
56
+ class CQLConfig(SACConfig):
57
+ """Defines a configuration class from which a CQL can be built.
58
+
59
+ .. testcode::
60
+ :skipif: True
61
+
62
+ from ray.rllib.algorithms.cql import CQLConfig
63
+ config = CQLConfig().training(gamma=0.9, lr=0.01)
64
+ config = config.resources(num_gpus=0)
65
+ config = config.env_runners(num_env_runners=4)
66
+ print(config.to_dict())
67
+ # Build a Algorithm object from the config and run 1 training iteration.
68
+ algo = config.build(env="CartPole-v1")
69
+ algo.train()
70
+ """
71
+
72
+ def __init__(self, algo_class=None):
73
+ super().__init__(algo_class=algo_class or CQL)
74
+
75
+ # fmt: off
76
+ # __sphinx_doc_begin__
77
+ # CQL-specific config settings:
78
+ self.bc_iters = 20000
79
+ self.temperature = 1.0
80
+ self.num_actions = 10
81
+ self.lagrangian = False
82
+ self.lagrangian_thresh = 5.0
83
+ self.min_q_weight = 5.0
84
+ self.deterministic_backup = True
85
+ self.lr = 3e-4
86
+ # Note, the new stack defines learning rates for each component.
87
+ # The base learning rate `lr` has to be set to `None`, if using
88
+ # the new stack.
89
+ self.actor_lr = 1e-4
90
+ self.critic_lr = 1e-3
91
+ self.alpha_lr = 1e-3
92
+
93
+ self.replay_buffer_config = {
94
+ "_enable_replay_buffer_api": True,
95
+ "type": "MultiAgentPrioritizedReplayBuffer",
96
+ "capacity": int(1e6),
97
+ # If True prioritized replay buffer will be used.
98
+ "prioritized_replay": False,
99
+ "prioritized_replay_alpha": 0.6,
100
+ "prioritized_replay_beta": 0.4,
101
+ "prioritized_replay_eps": 1e-6,
102
+ # Whether to compute priorities already on the remote worker side.
103
+ "worker_side_prioritization": False,
104
+ }
105
+
106
+ # Changes to Algorithm's/SACConfig's default:
107
+
108
+ # .reporting()
109
+ self.min_sample_timesteps_per_iteration = 0
110
+ self.min_train_timesteps_per_iteration = 100
111
+ # fmt: on
112
+ # __sphinx_doc_end__
113
+
114
+ self.timesteps_per_iteration = DEPRECATED_VALUE
115
+
116
+ @override(SACConfig)
117
+ def training(
118
+ self,
119
+ *,
120
+ bc_iters: Optional[int] = NotProvided,
121
+ temperature: Optional[float] = NotProvided,
122
+ num_actions: Optional[int] = NotProvided,
123
+ lagrangian: Optional[bool] = NotProvided,
124
+ lagrangian_thresh: Optional[float] = NotProvided,
125
+ min_q_weight: Optional[float] = NotProvided,
126
+ deterministic_backup: Optional[bool] = NotProvided,
127
+ **kwargs,
128
+ ) -> "CQLConfig":
129
+ """Sets the training-related configuration.
130
+
131
+ Args:
132
+ bc_iters: Number of iterations with Behavior Cloning pretraining.
133
+ temperature: CQL loss temperature.
134
+ num_actions: Number of actions to sample for CQL loss
135
+ lagrangian: Whether to use the Lagrangian for Alpha Prime (in CQL loss).
136
+ lagrangian_thresh: Lagrangian threshold.
137
+ min_q_weight: in Q weight multiplier.
138
+ deterministic_backup: If the target in the Bellman update should have an
139
+ entropy backup. Defaults to `True`.
140
+
141
+ Returns:
142
+ This updated AlgorithmConfig object.
143
+ """
144
+ # Pass kwargs onto super's `training()` method.
145
+ super().training(**kwargs)
146
+
147
+ if bc_iters is not NotProvided:
148
+ self.bc_iters = bc_iters
149
+ if temperature is not NotProvided:
150
+ self.temperature = temperature
151
+ if num_actions is not NotProvided:
152
+ self.num_actions = num_actions
153
+ if lagrangian is not NotProvided:
154
+ self.lagrangian = lagrangian
155
+ if lagrangian_thresh is not NotProvided:
156
+ self.lagrangian_thresh = lagrangian_thresh
157
+ if min_q_weight is not NotProvided:
158
+ self.min_q_weight = min_q_weight
159
+ if deterministic_backup is not NotProvided:
160
+ self.deterministic_backup = deterministic_backup
161
+
162
+ return self
163
+
164
+ @override(AlgorithmConfig)
165
+ def offline_data(self, **kwargs) -> "CQLConfig":
166
+
167
+ super().offline_data(**kwargs)
168
+
169
+ # Check, if the passed in class incorporates the `OfflinePreLearner`
170
+ # interface.
171
+ if "prelearner_class" in kwargs:
172
+ from ray.rllib.offline.offline_data import OfflinePreLearner
173
+
174
+ if not issubclass(kwargs.get("prelearner_class"), OfflinePreLearner):
175
+ raise ValueError(
176
+ f"`prelearner_class` {kwargs.get('prelearner_class')} is not a "
177
+ "subclass of `OfflinePreLearner`. Any class passed to "
178
+ "`prelearner_class` needs to implement the interface given by "
179
+ "`OfflinePreLearner`."
180
+ )
181
+
182
+ return self
183
+
184
+ @override(SACConfig)
185
+ def get_default_learner_class(self) -> Union[Type["Learner"], str]:
186
+ if self.framework_str == "torch":
187
+ from ray.rllib.algorithms.cql.torch.cql_torch_learner import CQLTorchLearner
188
+
189
+ return CQLTorchLearner
190
+ else:
191
+ raise ValueError(
192
+ f"The framework {self.framework_str} is not supported. "
193
+ "Use `'torch'` instead."
194
+ )
195
+
196
+ @override(AlgorithmConfig)
197
+ def build_learner_connector(
198
+ self,
199
+ input_observation_space,
200
+ input_action_space,
201
+ device=None,
202
+ ):
203
+ pipeline = super().build_learner_connector(
204
+ input_observation_space=input_observation_space,
205
+ input_action_space=input_action_space,
206
+ device=device,
207
+ )
208
+
209
+ # Prepend the "add-NEXT_OBS-from-episodes-to-train-batch" connector piece (right
210
+ # after the corresponding "add-OBS-..." default piece).
211
+ pipeline.insert_after(
212
+ AddObservationsFromEpisodesToBatch,
213
+ AddNextObservationsFromEpisodesToTrainBatch(),
214
+ )
215
+
216
+ return pipeline
217
+
218
+ @override(SACConfig)
219
+ def validate(self) -> None:
220
+ # First check, whether old `timesteps_per_iteration` is used.
221
+ if self.timesteps_per_iteration != DEPRECATED_VALUE:
222
+ deprecation_warning(
223
+ old="timesteps_per_iteration",
224
+ new="min_train_timesteps_per_iteration",
225
+ error=True,
226
+ )
227
+
228
+ # Call super's validation method.
229
+ super().validate()
230
+
231
+ # CQL-torch performs the optimizer steps inside the loss function.
232
+ # Using the multi-GPU optimizer will therefore not work (see multi-GPU
233
+ # check above) and we must use the simple optimizer for now.
234
+ if self.simple_optimizer is not True and self.framework_str == "torch":
235
+ self.simple_optimizer = True
236
+
237
+ if self.framework_str in ["tf", "tf2"] and tfp is None:
238
+ logger.warning(
239
+ "You need `tensorflow_probability` in order to run CQL! "
240
+ "Install it via `pip install tensorflow_probability`. Your "
241
+ f"tf.__version__={tf.__version__ if tf else None}."
242
+ "Trying to import tfp results in the following error:"
243
+ )
244
+ try_import_tfp(error=True)
245
+
246
+ # Assert that for a local learner the number of iterations is 1. Note,
247
+ # this is needed because we have no iterators, but instead a single
248
+ # batch returned directly from the `OfflineData.sample` method.
249
+ if (
250
+ self.num_learners == 0
251
+ and not self.dataset_num_iters_per_learner
252
+ and self.enable_rl_module_and_learner
253
+ ):
254
+ raise ValueError(
255
+ "When using a single local learner the number of iterations "
256
+ "per learner, `dataset_num_iters_per_learner` has to be defined. "
257
+ "Set this hyperparameter in the `AlgorithmConfig.offline_data`."
258
+ )
259
+
260
+ @override(SACConfig)
261
+ def get_default_rl_module_spec(self) -> RLModuleSpecType:
262
+ from ray.rllib.algorithms.sac.sac_catalog import SACCatalog
263
+
264
+ if self.framework_str == "torch":
265
+ from ray.rllib.algorithms.cql.torch.cql_torch_rl_module import (
266
+ CQLTorchRLModule,
267
+ )
268
+
269
+ return RLModuleSpec(module_class=CQLTorchRLModule, catalog_class=SACCatalog)
270
+ else:
271
+ raise ValueError(
272
+ f"The framework {self.framework_str} is not supported. " "Use `torch`."
273
+ )
274
+
275
+ @property
276
+ def _model_config_auto_includes(self):
277
+ return super()._model_config_auto_includes | {
278
+ "num_actions": self.num_actions,
279
+ }
280
+
281
+
282
+ class CQL(SAC):
283
+ """CQL (derived from SAC)."""
284
+
285
+ @classmethod
286
+ @override(SAC)
287
+ def get_default_config(cls) -> AlgorithmConfig:
288
+ return CQLConfig()
289
+
290
+ @classmethod
291
+ @override(SAC)
292
+ def get_default_policy_class(
293
+ cls, config: AlgorithmConfig
294
+ ) -> Optional[Type[Policy]]:
295
+ if config["framework"] == "torch":
296
+ return CQLTorchPolicy
297
+ else:
298
+ return CQLTFPolicy
299
+
300
+ @override(SAC)
301
+ def training_step(self) -> None:
302
+ # Old API stack (Policy, RolloutWorker, Connector).
303
+ if not self.config.enable_env_runner_and_connector_v2:
304
+ return self._training_step_old_api_stack()
305
+
306
+ # Sampling from offline data.
307
+ with self.metrics.log_time((TIMERS, OFFLINE_SAMPLING_TIMER)):
308
+ # Return an iterator in case we are using remote learners.
309
+ batch_or_iterator = self.offline_data.sample(
310
+ num_samples=self.config.train_batch_size_per_learner,
311
+ num_shards=self.config.num_learners,
312
+ return_iterator=self.config.num_learners > 1,
313
+ )
314
+
315
+ # Updating the policy.
316
+ with self.metrics.log_time((TIMERS, LEARNER_UPDATE_TIMER)):
317
+ # TODO (simon, sven): Check, if we should execute directly s.th. like
318
+ # `LearnerGroup.update_from_iterator()`.
319
+ learner_results = self.learner_group._update(
320
+ batch=batch_or_iterator,
321
+ minibatch_size=self.config.train_batch_size_per_learner,
322
+ num_iters=self.config.dataset_num_iters_per_learner,
323
+ )
324
+
325
+ # Log training results.
326
+ self.metrics.merge_and_log_n_dicts(learner_results, key=LEARNER_RESULTS)
327
+
328
+ # Synchronize weights.
329
+ # As the results contain for each policy the loss and in addition the
330
+ # total loss over all policies is returned, this total loss has to be
331
+ # removed.
332
+ modules_to_update = set(learner_results[0].keys()) - {ALL_MODULES}
333
+
334
+ # Update weights - after learning on the local worker -
335
+ # on all remote workers. Note, we only have the local `EnvRunner`,
336
+ # but from this `EnvRunner` the evaulation `EnvRunner`s get updated.
337
+ with self.metrics.log_time((TIMERS, SYNCH_WORKER_WEIGHTS_TIMER)):
338
+ self.env_runner_group.sync_weights(
339
+ # Sync weights from learner_group to all EnvRunners.
340
+ from_worker_or_learner_group=self.learner_group,
341
+ policies=modules_to_update,
342
+ inference_only=True,
343
+ )
344
+
345
+ @OldAPIStack
346
+ def _training_step_old_api_stack(self) -> ResultDict:
347
+ # Collect SampleBatches from sample workers.
348
+ with self._timers[SAMPLE_TIMER]:
349
+ train_batch = synchronous_parallel_sample(worker_set=self.env_runner_group)
350
+ train_batch = train_batch.as_multi_agent()
351
+ self._counters[NUM_AGENT_STEPS_SAMPLED] += train_batch.agent_steps()
352
+ self._counters[NUM_ENV_STEPS_SAMPLED] += train_batch.env_steps()
353
+
354
+ # Postprocess batch before we learn on it.
355
+ post_fn = self.config.get("before_learn_on_batch") or (lambda b, *a: b)
356
+ train_batch = post_fn(train_batch, self.env_runner_group, self.config)
357
+
358
+ # Learn on training batch.
359
+ # Use simple optimizer (only for multi-agent or tf-eager; all other
360
+ # cases should use the multi-GPU optimizer, even if only using 1 GPU)
361
+ if self.config.get("simple_optimizer") is True:
362
+ train_results = train_one_step(self, train_batch)
363
+ else:
364
+ train_results = multi_gpu_train_one_step(self, train_batch)
365
+
366
+ # Update target network every `target_network_update_freq` training steps.
367
+ cur_ts = self._counters[
368
+ NUM_AGENT_STEPS_TRAINED
369
+ if self.config.count_steps_by == "agent_steps"
370
+ else NUM_ENV_STEPS_TRAINED
371
+ ]
372
+ last_update = self._counters[LAST_TARGET_UPDATE_TS]
373
+ if cur_ts - last_update >= self.config.target_network_update_freq:
374
+ with self._timers[TARGET_NET_UPDATE_TIMER]:
375
+ to_update = self.env_runner.get_policies_to_train()
376
+ self.env_runner.foreach_policy_to_train(
377
+ lambda p, pid: pid in to_update and p.update_target()
378
+ )
379
+ self._counters[NUM_TARGET_UPDATES] += 1
380
+ self._counters[LAST_TARGET_UPDATE_TS] = cur_ts
381
+
382
+ # Update remote workers's weights after learning on local worker
383
+ # (only those policies that were actually trained).
384
+ if self.env_runner_group.num_remote_workers() > 0:
385
+ with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]:
386
+ self.env_runner_group.sync_weights(policies=list(train_results.keys()))
387
+
388
+ # Return all collected metrics for the iteration.
389
+ return train_results
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/cql_tf_policy.py ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TensorFlow policy class used for CQL.
3
+ """
4
+ from functools import partial
5
+ import numpy as np
6
+ import gymnasium as gym
7
+ import logging
8
+ import tree
9
+ from typing import Dict, List, Type, Union
10
+
11
+ import ray
12
+ import ray.experimental.tf_utils
13
+ from ray.rllib.algorithms.sac.sac_tf_policy import (
14
+ apply_gradients as sac_apply_gradients,
15
+ compute_and_clip_gradients as sac_compute_and_clip_gradients,
16
+ get_distribution_inputs_and_class,
17
+ _get_dist_class,
18
+ build_sac_model,
19
+ postprocess_trajectory,
20
+ setup_late_mixins,
21
+ stats,
22
+ validate_spaces,
23
+ ActorCriticOptimizerMixin as SACActorCriticOptimizerMixin,
24
+ ComputeTDErrorMixin,
25
+ )
26
+ from ray.rllib.models.modelv2 import ModelV2
27
+ from ray.rllib.models.tf.tf_action_dist import TFActionDistribution
28
+ from ray.rllib.policy.tf_mixins import TargetNetworkMixin
29
+ from ray.rllib.policy.tf_policy_template import build_tf_policy
30
+ from ray.rllib.policy.policy import Policy
31
+ from ray.rllib.policy.sample_batch import SampleBatch
32
+ from ray.rllib.utils.exploration.random import Random
33
+ from ray.rllib.utils.framework import get_variable, try_import_tf, try_import_tfp
34
+ from ray.rllib.utils.typing import (
35
+ LocalOptimizer,
36
+ ModelGradients,
37
+ TensorType,
38
+ AlgorithmConfigDict,
39
+ )
40
+
41
+ tf1, tf, tfv = try_import_tf()
42
+ tfp = try_import_tfp()
43
+
44
+ logger = logging.getLogger(__name__)
45
+
46
+ MEAN_MIN = -9.0
47
+ MEAN_MAX = 9.0
48
+
49
+
50
+ def _repeat_tensor(t: TensorType, n: int):
51
+ # Insert new axis at position 1 into tensor t
52
+ t_rep = tf.expand_dims(t, 1)
53
+ # Repeat tensor t_rep along new axis n times
54
+ multiples = tf.concat([[1, n], tf.tile([1], tf.expand_dims(tf.rank(t) - 1, 0))], 0)
55
+ t_rep = tf.tile(t_rep, multiples)
56
+ # Merge new axis into batch axis
57
+ t_rep = tf.reshape(t_rep, tf.concat([[-1], tf.shape(t)[1:]], 0))
58
+ return t_rep
59
+
60
+
61
+ # Returns policy tiled actions and log probabilities for CQL Loss
62
+ def policy_actions_repeat(model, action_dist, obs, num_repeat=1):
63
+ batch_size = tf.shape(tree.flatten(obs)[0])[0]
64
+ obs_temp = tree.map_structure(lambda t: _repeat_tensor(t, num_repeat), obs)
65
+ logits, _ = model.get_action_model_outputs(obs_temp)
66
+ policy_dist = action_dist(logits, model)
67
+ actions, logp_ = policy_dist.sample_logp()
68
+ logp = tf.expand_dims(logp_, -1)
69
+ return actions, tf.reshape(logp, [batch_size, num_repeat, 1])
70
+
71
+
72
+ def q_values_repeat(model, obs, actions, twin=False):
73
+ action_shape = tf.shape(actions)[0]
74
+ obs_shape = tf.shape(tree.flatten(obs)[0])[0]
75
+ num_repeat = action_shape // obs_shape
76
+ obs_temp = tree.map_structure(lambda t: _repeat_tensor(t, num_repeat), obs)
77
+ if not twin:
78
+ preds_, _ = model.get_q_values(obs_temp, actions)
79
+ else:
80
+ preds_, _ = model.get_twin_q_values(obs_temp, actions)
81
+ preds = tf.reshape(preds_, [obs_shape, num_repeat, 1])
82
+ return preds
83
+
84
+
85
+ def cql_loss(
86
+ policy: Policy,
87
+ model: ModelV2,
88
+ dist_class: Type[TFActionDistribution],
89
+ train_batch: SampleBatch,
90
+ ) -> Union[TensorType, List[TensorType]]:
91
+ logger.info(f"Current iteration = {policy.cur_iter}")
92
+ policy.cur_iter += 1
93
+
94
+ # For best performance, turn deterministic off
95
+ deterministic = policy.config["_deterministic_loss"]
96
+ assert not deterministic
97
+ twin_q = policy.config["twin_q"]
98
+ discount = policy.config["gamma"]
99
+
100
+ # CQL Parameters
101
+ bc_iters = policy.config["bc_iters"]
102
+ cql_temp = policy.config["temperature"]
103
+ num_actions = policy.config["num_actions"]
104
+ min_q_weight = policy.config["min_q_weight"]
105
+ use_lagrange = policy.config["lagrangian"]
106
+ target_action_gap = policy.config["lagrangian_thresh"]
107
+
108
+ obs = train_batch[SampleBatch.CUR_OBS]
109
+ actions = tf.cast(train_batch[SampleBatch.ACTIONS], tf.float32)
110
+ rewards = tf.cast(train_batch[SampleBatch.REWARDS], tf.float32)
111
+ next_obs = train_batch[SampleBatch.NEXT_OBS]
112
+ terminals = train_batch[SampleBatch.TERMINATEDS]
113
+
114
+ model_out_t, _ = model(SampleBatch(obs=obs, _is_training=True), [], None)
115
+
116
+ model_out_tp1, _ = model(SampleBatch(obs=next_obs, _is_training=True), [], None)
117
+
118
+ target_model_out_tp1, _ = policy.target_model(
119
+ SampleBatch(obs=next_obs, _is_training=True), [], None
120
+ )
121
+
122
+ action_dist_class = _get_dist_class(policy, policy.config, policy.action_space)
123
+ action_dist_inputs_t, _ = model.get_action_model_outputs(model_out_t)
124
+ action_dist_t = action_dist_class(action_dist_inputs_t, model)
125
+ policy_t, log_pis_t = action_dist_t.sample_logp()
126
+ log_pis_t = tf.expand_dims(log_pis_t, -1)
127
+
128
+ # Unlike original SAC, Alpha and Actor Loss are computed first.
129
+ # Alpha Loss
130
+ alpha_loss = -tf.reduce_mean(
131
+ model.log_alpha * tf.stop_gradient(log_pis_t + model.target_entropy)
132
+ )
133
+
134
+ # Policy Loss (Either Behavior Clone Loss or SAC Loss)
135
+ alpha = tf.math.exp(model.log_alpha)
136
+ if policy.cur_iter >= bc_iters:
137
+ min_q, _ = model.get_q_values(model_out_t, policy_t)
138
+ if twin_q:
139
+ twin_q_, _ = model.get_twin_q_values(model_out_t, policy_t)
140
+ min_q = tf.math.minimum(min_q, twin_q_)
141
+ actor_loss = tf.reduce_mean(tf.stop_gradient(alpha) * log_pis_t - min_q)
142
+ else:
143
+ bc_logp = action_dist_t.logp(actions)
144
+ actor_loss = tf.reduce_mean(tf.stop_gradient(alpha) * log_pis_t - bc_logp)
145
+ # actor_loss = -tf.reduce_mean(bc_logp)
146
+
147
+ # Critic Loss (Standard SAC Critic L2 Loss + CQL Entropy Loss)
148
+ # SAC Loss:
149
+ # Q-values for the batched actions.
150
+ action_dist_inputs_tp1, _ = model.get_action_model_outputs(model_out_tp1)
151
+ action_dist_tp1 = action_dist_class(action_dist_inputs_tp1, model)
152
+ policy_tp1, _ = action_dist_tp1.sample_logp()
153
+
154
+ q_t, _ = model.get_q_values(model_out_t, actions)
155
+ q_t_selected = tf.squeeze(q_t, axis=-1)
156
+ if twin_q:
157
+ twin_q_t, _ = model.get_twin_q_values(model_out_t, actions)
158
+ twin_q_t_selected = tf.squeeze(twin_q_t, axis=-1)
159
+
160
+ # Target q network evaluation.
161
+ q_tp1, _ = policy.target_model.get_q_values(target_model_out_tp1, policy_tp1)
162
+ if twin_q:
163
+ twin_q_tp1, _ = policy.target_model.get_twin_q_values(
164
+ target_model_out_tp1, policy_tp1
165
+ )
166
+ # Take min over both twin-NNs.
167
+ q_tp1 = tf.math.minimum(q_tp1, twin_q_tp1)
168
+
169
+ q_tp1_best = tf.squeeze(input=q_tp1, axis=-1)
170
+ q_tp1_best_masked = (1.0 - tf.cast(terminals, tf.float32)) * q_tp1_best
171
+
172
+ # compute RHS of bellman equation
173
+ q_t_target = tf.stop_gradient(
174
+ rewards + (discount ** policy.config["n_step"]) * q_tp1_best_masked
175
+ )
176
+
177
+ # Compute the TD-error (potentially clipped), for priority replay buffer
178
+ base_td_error = tf.math.abs(q_t_selected - q_t_target)
179
+ if twin_q:
180
+ twin_td_error = tf.math.abs(twin_q_t_selected - q_t_target)
181
+ td_error = 0.5 * (base_td_error + twin_td_error)
182
+ else:
183
+ td_error = base_td_error
184
+
185
+ critic_loss_1 = tf.keras.losses.MSE(q_t_selected, q_t_target)
186
+ if twin_q:
187
+ critic_loss_2 = tf.keras.losses.MSE(twin_q_t_selected, q_t_target)
188
+
189
+ # CQL Loss (We are using Entropy version of CQL (the best version))
190
+ rand_actions, _ = policy._random_action_generator.get_exploration_action(
191
+ action_distribution=action_dist_class(
192
+ tf.tile(action_dist_tp1.inputs, (num_actions, 1)), model
193
+ ),
194
+ timestep=0,
195
+ explore=True,
196
+ )
197
+ curr_actions, curr_logp = policy_actions_repeat(
198
+ model, action_dist_class, model_out_t, num_actions
199
+ )
200
+ next_actions, next_logp = policy_actions_repeat(
201
+ model, action_dist_class, model_out_tp1, num_actions
202
+ )
203
+
204
+ q1_rand = q_values_repeat(model, model_out_t, rand_actions)
205
+ q1_curr_actions = q_values_repeat(model, model_out_t, curr_actions)
206
+ q1_next_actions = q_values_repeat(model, model_out_t, next_actions)
207
+
208
+ if twin_q:
209
+ q2_rand = q_values_repeat(model, model_out_t, rand_actions, twin=True)
210
+ q2_curr_actions = q_values_repeat(model, model_out_t, curr_actions, twin=True)
211
+ q2_next_actions = q_values_repeat(model, model_out_t, next_actions, twin=True)
212
+
213
+ random_density = np.log(0.5 ** int(curr_actions.shape[-1]))
214
+ cat_q1 = tf.concat(
215
+ [
216
+ q1_rand - random_density,
217
+ q1_next_actions - tf.stop_gradient(next_logp),
218
+ q1_curr_actions - tf.stop_gradient(curr_logp),
219
+ ],
220
+ 1,
221
+ )
222
+ if twin_q:
223
+ cat_q2 = tf.concat(
224
+ [
225
+ q2_rand - random_density,
226
+ q2_next_actions - tf.stop_gradient(next_logp),
227
+ q2_curr_actions - tf.stop_gradient(curr_logp),
228
+ ],
229
+ 1,
230
+ )
231
+
232
+ min_qf1_loss_ = (
233
+ tf.reduce_mean(tf.reduce_logsumexp(cat_q1 / cql_temp, axis=1))
234
+ * min_q_weight
235
+ * cql_temp
236
+ )
237
+ min_qf1_loss = min_qf1_loss_ - (tf.reduce_mean(q_t) * min_q_weight)
238
+ if twin_q:
239
+ min_qf2_loss_ = (
240
+ tf.reduce_mean(tf.reduce_logsumexp(cat_q2 / cql_temp, axis=1))
241
+ * min_q_weight
242
+ * cql_temp
243
+ )
244
+ min_qf2_loss = min_qf2_loss_ - (tf.reduce_mean(twin_q_t) * min_q_weight)
245
+
246
+ if use_lagrange:
247
+ alpha_prime = tf.clip_by_value(model.log_alpha_prime.exp(), 0.0, 1000000.0)[0]
248
+ min_qf1_loss = alpha_prime * (min_qf1_loss - target_action_gap)
249
+ if twin_q:
250
+ min_qf2_loss = alpha_prime * (min_qf2_loss - target_action_gap)
251
+ alpha_prime_loss = 0.5 * (-min_qf1_loss - min_qf2_loss)
252
+ else:
253
+ alpha_prime_loss = -min_qf1_loss
254
+
255
+ cql_loss = [min_qf1_loss]
256
+ if twin_q:
257
+ cql_loss.append(min_qf2_loss)
258
+
259
+ critic_loss = [critic_loss_1 + min_qf1_loss]
260
+ if twin_q:
261
+ critic_loss.append(critic_loss_2 + min_qf2_loss)
262
+
263
+ # Save for stats function.
264
+ policy.q_t = q_t_selected
265
+ policy.policy_t = policy_t
266
+ policy.log_pis_t = log_pis_t
267
+ policy.td_error = td_error
268
+ policy.actor_loss = actor_loss
269
+ policy.critic_loss = critic_loss
270
+ policy.alpha_loss = alpha_loss
271
+ policy.log_alpha_value = model.log_alpha
272
+ policy.alpha_value = alpha
273
+ policy.target_entropy = model.target_entropy
274
+ # CQL Stats
275
+ policy.cql_loss = cql_loss
276
+ if use_lagrange:
277
+ policy.log_alpha_prime_value = model.log_alpha_prime[0]
278
+ policy.alpha_prime_value = alpha_prime
279
+ policy.alpha_prime_loss = alpha_prime_loss
280
+
281
+ # Return all loss terms corresponding to our optimizers.
282
+ if use_lagrange:
283
+ return actor_loss + tf.math.add_n(critic_loss) + alpha_loss + alpha_prime_loss
284
+ return actor_loss + tf.math.add_n(critic_loss) + alpha_loss
285
+
286
+
287
+ def cql_stats(policy: Policy, train_batch: SampleBatch) -> Dict[str, TensorType]:
288
+ sac_dict = stats(policy, train_batch)
289
+ sac_dict["cql_loss"] = tf.reduce_mean(tf.stack(policy.cql_loss))
290
+ if policy.config["lagrangian"]:
291
+ sac_dict["log_alpha_prime_value"] = policy.log_alpha_prime_value
292
+ sac_dict["alpha_prime_value"] = policy.alpha_prime_value
293
+ sac_dict["alpha_prime_loss"] = policy.alpha_prime_loss
294
+ return sac_dict
295
+
296
+
297
+ class ActorCriticOptimizerMixin(SACActorCriticOptimizerMixin):
298
+ def __init__(self, config):
299
+ super().__init__(config)
300
+ if config["lagrangian"]:
301
+ # Eager mode.
302
+ if config["framework"] == "tf2":
303
+ self._alpha_prime_optimizer = tf.keras.optimizers.Adam(
304
+ learning_rate=config["optimization"]["critic_learning_rate"]
305
+ )
306
+ # Static graph mode.
307
+ else:
308
+ self._alpha_prime_optimizer = tf1.train.AdamOptimizer(
309
+ learning_rate=config["optimization"]["critic_learning_rate"]
310
+ )
311
+
312
+
313
+ def setup_early_mixins(
314
+ policy: Policy,
315
+ obs_space: gym.spaces.Space,
316
+ action_space: gym.spaces.Space,
317
+ config: AlgorithmConfigDict,
318
+ ) -> None:
319
+ """Call mixin classes' constructors before Policy's initialization.
320
+
321
+ Adds the necessary optimizers to the given Policy.
322
+
323
+ Args:
324
+ policy: The Policy object.
325
+ obs_space (gym.spaces.Space): The Policy's observation space.
326
+ action_space (gym.spaces.Space): The Policy's action space.
327
+ config: The Policy's config.
328
+ """
329
+ policy.cur_iter = 0
330
+ ActorCriticOptimizerMixin.__init__(policy, config)
331
+ if config["lagrangian"]:
332
+ policy.model.log_alpha_prime = get_variable(
333
+ 0.0, framework="tf", trainable=True, tf_name="log_alpha_prime"
334
+ )
335
+ policy.alpha_prime_optim = tf.keras.optimizers.Adam(
336
+ learning_rate=config["optimization"]["critic_learning_rate"],
337
+ )
338
+ # Generic random action generator for calculating CQL-loss.
339
+ policy._random_action_generator = Random(
340
+ action_space,
341
+ model=None,
342
+ framework="tf2",
343
+ policy_config=config,
344
+ num_workers=0,
345
+ worker_index=0,
346
+ )
347
+
348
+
349
+ def compute_gradients_fn(
350
+ policy: Policy, optimizer: LocalOptimizer, loss: TensorType
351
+ ) -> ModelGradients:
352
+ grads_and_vars = sac_compute_and_clip_gradients(policy, optimizer, loss)
353
+
354
+ if policy.config["lagrangian"]:
355
+ # Eager: Use GradientTape (which is a property of the `optimizer`
356
+ # object (an OptimizerWrapper): see rllib/policy/eager_tf_policy.py).
357
+ if policy.config["framework"] == "tf2":
358
+ tape = optimizer.tape
359
+ log_alpha_prime = [policy.model.log_alpha_prime]
360
+ alpha_prime_grads_and_vars = list(
361
+ zip(
362
+ tape.gradient(policy.alpha_prime_loss, log_alpha_prime),
363
+ log_alpha_prime,
364
+ )
365
+ )
366
+ # Tf1.x: Use optimizer.compute_gradients()
367
+ else:
368
+ alpha_prime_grads_and_vars = (
369
+ policy._alpha_prime_optimizer.compute_gradients(
370
+ policy.alpha_prime_loss, var_list=[policy.model.log_alpha_prime]
371
+ )
372
+ )
373
+
374
+ # Clip if necessary.
375
+ if policy.config["grad_clip"]:
376
+ clip_func = partial(tf.clip_by_norm, clip_norm=policy.config["grad_clip"])
377
+ else:
378
+ clip_func = tf.identity
379
+
380
+ # Save grads and vars for later use in `build_apply_op`.
381
+ policy._alpha_prime_grads_and_vars = [
382
+ (clip_func(g), v) for (g, v) in alpha_prime_grads_and_vars if g is not None
383
+ ]
384
+
385
+ grads_and_vars += policy._alpha_prime_grads_and_vars
386
+ return grads_and_vars
387
+
388
+
389
+ def apply_gradients_fn(policy, optimizer, grads_and_vars):
390
+ sac_results = sac_apply_gradients(policy, optimizer, grads_and_vars)
391
+
392
+ if policy.config["lagrangian"]:
393
+ # Eager mode -> Just apply and return None.
394
+ if policy.config["framework"] == "tf2":
395
+ policy._alpha_prime_optimizer.apply_gradients(
396
+ policy._alpha_prime_grads_and_vars
397
+ )
398
+ return
399
+ # Tf static graph -> Return grouped op.
400
+ else:
401
+ alpha_prime_apply_op = policy._alpha_prime_optimizer.apply_gradients(
402
+ policy._alpha_prime_grads_and_vars,
403
+ global_step=tf1.train.get_or_create_global_step(),
404
+ )
405
+ return tf.group([sac_results, alpha_prime_apply_op])
406
+ return sac_results
407
+
408
+
409
+ # Build a child class of `TFPolicy`, given the custom functions defined
410
+ # above.
411
+ CQLTFPolicy = build_tf_policy(
412
+ name="CQLTFPolicy",
413
+ loss_fn=cql_loss,
414
+ get_default_config=lambda: ray.rllib.algorithms.cql.cql.CQLConfig(),
415
+ validate_spaces=validate_spaces,
416
+ stats_fn=cql_stats,
417
+ postprocess_fn=postprocess_trajectory,
418
+ before_init=setup_early_mixins,
419
+ after_init=setup_late_mixins,
420
+ make_model=build_sac_model,
421
+ extra_learn_fetches_fn=lambda policy: {"td_error": policy.td_error},
422
+ mixins=[ActorCriticOptimizerMixin, TargetNetworkMixin, ComputeTDErrorMixin],
423
+ action_distribution_fn=get_distribution_inputs_and_class,
424
+ compute_gradients_fn=compute_gradients_fn,
425
+ apply_gradients_fn=apply_gradients_fn,
426
+ )
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/torch/__pycache__/cql_torch_learner.cpython-310.pyc ADDED
Binary file (4.68 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/cql/torch/__pycache__/cql_torch_rl_module.cpython-310.pyc ADDED
Binary file (4.58 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from ray.rllib.algorithms.dqn.dqn import DQN, DQNConfig
2
+ from ray.rllib.algorithms.dqn.dqn_tf_policy import DQNTFPolicy
3
+ from ray.rllib.algorithms.dqn.dqn_torch_policy import DQNTorchPolicy
4
+
5
+ __all__ = [
6
+ "DQN",
7
+ "DQNConfig",
8
+ "DQNTFPolicy",
9
+ "DQNTorchPolicy",
10
+ ]
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_rainbow_catalog.cpython-310.pyc ADDED
Binary file (8.21 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_rainbow_learner.cpython-310.pyc ADDED
Binary file (3.66 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/dqn_torch_policy.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/__pycache__/learner_thread.cpython-310.pyc ADDED
Binary file (2.69 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn.py ADDED
@@ -0,0 +1,862 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Deep Q-Networks (DQN, Rainbow, Parametric DQN)
3
+ ==============================================
4
+
5
+ This file defines the distributed Algorithm class for the Deep Q-Networks
6
+ algorithm. See `dqn_[tf|torch]_policy.py` for the definition of the policies.
7
+
8
+ Detailed documentation:
9
+ https://docs.ray.io/en/master/rllib-algorithms.html#deep-q-networks-dqn-rainbow-parametric-dqn
10
+ """ # noqa: E501
11
+
12
+ from collections import defaultdict
13
+ import logging
14
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
15
+ import numpy as np
16
+
17
+ from ray.rllib.algorithms.algorithm import Algorithm
18
+ from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided
19
+ from ray.rllib.algorithms.dqn.dqn_tf_policy import DQNTFPolicy
20
+ from ray.rllib.algorithms.dqn.dqn_torch_policy import DQNTorchPolicy
21
+ from ray.rllib.connectors.common.add_observations_from_episodes_to_batch import (
22
+ AddObservationsFromEpisodesToBatch,
23
+ )
24
+ from ray.rllib.connectors.learner.add_next_observations_from_episodes_to_train_batch import ( # noqa
25
+ AddNextObservationsFromEpisodesToTrainBatch,
26
+ )
27
+ from ray.rllib.core.learner import Learner
28
+ from ray.rllib.core.rl_module.rl_module import RLModuleSpec
29
+ from ray.rllib.execution.rollout_ops import (
30
+ synchronous_parallel_sample,
31
+ )
32
+ from ray.rllib.policy.sample_batch import MultiAgentBatch
33
+ from ray.rllib.execution.train_ops import (
34
+ train_one_step,
35
+ multi_gpu_train_one_step,
36
+ )
37
+ from ray.rllib.policy.policy import Policy
38
+ from ray.rllib.utils import deep_update
39
+ from ray.rllib.utils.annotations import override
40
+ from ray.rllib.utils.numpy import convert_to_numpy
41
+ from ray.rllib.utils.replay_buffers.utils import (
42
+ update_priorities_in_episode_replay_buffer,
43
+ update_priorities_in_replay_buffer,
44
+ validate_buffer_config,
45
+ )
46
+ from ray.rllib.utils.typing import ResultDict
47
+ from ray.rllib.utils.metrics import (
48
+ ALL_MODULES,
49
+ ENV_RUNNER_RESULTS,
50
+ ENV_RUNNER_SAMPLING_TIMER,
51
+ LAST_TARGET_UPDATE_TS,
52
+ LEARNER_RESULTS,
53
+ LEARNER_UPDATE_TIMER,
54
+ NUM_AGENT_STEPS_SAMPLED,
55
+ NUM_AGENT_STEPS_SAMPLED_LIFETIME,
56
+ NUM_ENV_STEPS_SAMPLED,
57
+ NUM_ENV_STEPS_SAMPLED_LIFETIME,
58
+ NUM_TARGET_UPDATES,
59
+ REPLAY_BUFFER_ADD_DATA_TIMER,
60
+ REPLAY_BUFFER_SAMPLE_TIMER,
61
+ REPLAY_BUFFER_UPDATE_PRIOS_TIMER,
62
+ SAMPLE_TIMER,
63
+ SYNCH_WORKER_WEIGHTS_TIMER,
64
+ TD_ERROR_KEY,
65
+ TIMERS,
66
+ )
67
+ from ray.rllib.utils.deprecation import DEPRECATED_VALUE
68
+ from ray.rllib.utils.replay_buffers.utils import sample_min_n_steps_from_buffer
69
+ from ray.rllib.utils.typing import (
70
+ LearningRateOrSchedule,
71
+ RLModuleSpecType,
72
+ SampleBatchType,
73
+ )
74
+
75
+ logger = logging.getLogger(__name__)
76
+
77
+
78
+ class DQNConfig(AlgorithmConfig):
79
+ r"""Defines a configuration class from which a DQN Algorithm can be built.
80
+
81
+ .. testcode::
82
+
83
+ from ray.rllib.algorithms.dqn.dqn import DQNConfig
84
+
85
+ config = (
86
+ DQNConfig()
87
+ .environment("CartPole-v1")
88
+ .training(replay_buffer_config={
89
+ "type": "PrioritizedEpisodeReplayBuffer",
90
+ "capacity": 60000,
91
+ "alpha": 0.5,
92
+ "beta": 0.5,
93
+ })
94
+ .env_runners(num_env_runners=1)
95
+ )
96
+ algo = config.build()
97
+ algo.train()
98
+ algo.stop()
99
+
100
+ .. testcode::
101
+
102
+ from ray.rllib.algorithms.dqn.dqn import DQNConfig
103
+ from ray import air
104
+ from ray import tune
105
+
106
+ config = (
107
+ DQNConfig()
108
+ .environment("CartPole-v1")
109
+ .training(
110
+ num_atoms=tune.grid_search([1,])
111
+ )
112
+ )
113
+ tune.Tuner(
114
+ "DQN",
115
+ run_config=air.RunConfig(stop={"training_iteration":1}),
116
+ param_space=config,
117
+ ).fit()
118
+
119
+ .. testoutput::
120
+ :hide:
121
+
122
+ ...
123
+
124
+
125
+ """
126
+
127
+ def __init__(self, algo_class=None):
128
+ """Initializes a DQNConfig instance."""
129
+ self.exploration_config = {
130
+ "type": "EpsilonGreedy",
131
+ "initial_epsilon": 1.0,
132
+ "final_epsilon": 0.02,
133
+ "epsilon_timesteps": 10000,
134
+ }
135
+
136
+ super().__init__(algo_class=algo_class or DQN)
137
+
138
+ # Overrides of AlgorithmConfig defaults
139
+ # `env_runners()`
140
+ # Set to `self.n_step`, if 'auto'.
141
+ self.rollout_fragment_length: Union[int, str] = "auto"
142
+ # New stack uses `epsilon` as either a constant value or a scheduler
143
+ # defined like this.
144
+ # TODO (simon): Ensure that users can understand how to provide epsilon.
145
+ # (sven): Should we add this to `self.env_runners(epsilon=..)`?
146
+ self.epsilon = [(0, 1.0), (10000, 0.05)]
147
+
148
+ # `training()`
149
+ self.grad_clip = 40.0
150
+ # Note: Only when using enable_rl_module_and_learner=True can the clipping mode
151
+ # be configured by the user. On the old API stack, RLlib will always clip by
152
+ # global_norm, no matter the value of `grad_clip_by`.
153
+ self.grad_clip_by = "global_norm"
154
+ self.lr = 5e-4
155
+ self.train_batch_size = 32
156
+
157
+ # `evaluation()`
158
+ self.evaluation(evaluation_config=AlgorithmConfig.overrides(explore=False))
159
+
160
+ # `reporting()`
161
+ self.min_time_s_per_iteration = None
162
+ self.min_sample_timesteps_per_iteration = 1000
163
+
164
+ # DQN specific config settings.
165
+ # fmt: off
166
+ # __sphinx_doc_begin__
167
+ self.target_network_update_freq = 500
168
+ self.num_steps_sampled_before_learning_starts = 1000
169
+ self.store_buffer_in_checkpoints = False
170
+ self.adam_epsilon = 1e-8
171
+
172
+ self.tau = 1.0
173
+
174
+ self.num_atoms = 1
175
+ self.v_min = -10.0
176
+ self.v_max = 10.0
177
+ self.noisy = False
178
+ self.sigma0 = 0.5
179
+ self.dueling = True
180
+ self.hiddens = [256]
181
+ self.double_q = True
182
+ self.n_step = 1
183
+ self.before_learn_on_batch = None
184
+ self.training_intensity = None
185
+ self.td_error_loss_fn = "huber"
186
+ self.categorical_distribution_temperature = 1.0
187
+
188
+ # Replay buffer configuration.
189
+ self.replay_buffer_config = {
190
+ "type": "PrioritizedEpisodeReplayBuffer",
191
+ # Size of the replay buffer. Note that if async_updates is set,
192
+ # then each worker will have a replay buffer of this size.
193
+ "capacity": 50000,
194
+ "alpha": 0.6,
195
+ # Beta parameter for sampling from prioritized replay buffer.
196
+ "beta": 0.4,
197
+ }
198
+ # fmt: on
199
+ # __sphinx_doc_end__
200
+
201
+ self.lr_schedule = None # @OldAPIStack
202
+
203
+ # Deprecated
204
+ self.buffer_size = DEPRECATED_VALUE
205
+ self.prioritized_replay = DEPRECATED_VALUE
206
+ self.learning_starts = DEPRECATED_VALUE
207
+ self.replay_batch_size = DEPRECATED_VALUE
208
+ # Can not use DEPRECATED_VALUE here because -1 is a common config value
209
+ self.replay_sequence_length = None
210
+ self.prioritized_replay_alpha = DEPRECATED_VALUE
211
+ self.prioritized_replay_beta = DEPRECATED_VALUE
212
+ self.prioritized_replay_eps = DEPRECATED_VALUE
213
+
214
+ @override(AlgorithmConfig)
215
+ def training(
216
+ self,
217
+ *,
218
+ target_network_update_freq: Optional[int] = NotProvided,
219
+ replay_buffer_config: Optional[dict] = NotProvided,
220
+ store_buffer_in_checkpoints: Optional[bool] = NotProvided,
221
+ lr_schedule: Optional[List[List[Union[int, float]]]] = NotProvided,
222
+ epsilon: Optional[LearningRateOrSchedule] = NotProvided,
223
+ adam_epsilon: Optional[float] = NotProvided,
224
+ grad_clip: Optional[int] = NotProvided,
225
+ num_steps_sampled_before_learning_starts: Optional[int] = NotProvided,
226
+ tau: Optional[float] = NotProvided,
227
+ num_atoms: Optional[int] = NotProvided,
228
+ v_min: Optional[float] = NotProvided,
229
+ v_max: Optional[float] = NotProvided,
230
+ noisy: Optional[bool] = NotProvided,
231
+ sigma0: Optional[float] = NotProvided,
232
+ dueling: Optional[bool] = NotProvided,
233
+ hiddens: Optional[int] = NotProvided,
234
+ double_q: Optional[bool] = NotProvided,
235
+ n_step: Optional[Union[int, Tuple[int, int]]] = NotProvided,
236
+ before_learn_on_batch: Callable[
237
+ [Type[MultiAgentBatch], List[Type[Policy]], Type[int]],
238
+ Type[MultiAgentBatch],
239
+ ] = NotProvided,
240
+ training_intensity: Optional[float] = NotProvided,
241
+ td_error_loss_fn: Optional[str] = NotProvided,
242
+ categorical_distribution_temperature: Optional[float] = NotProvided,
243
+ **kwargs,
244
+ ) -> "DQNConfig":
245
+ """Sets the training related configuration.
246
+
247
+ Args:
248
+ target_network_update_freq: Update the target network every
249
+ `target_network_update_freq` sample steps.
250
+ replay_buffer_config: Replay buffer config.
251
+ Examples:
252
+ {
253
+ "_enable_replay_buffer_api": True,
254
+ "type": "MultiAgentReplayBuffer",
255
+ "capacity": 50000,
256
+ "replay_sequence_length": 1,
257
+ }
258
+ - OR -
259
+ {
260
+ "_enable_replay_buffer_api": True,
261
+ "type": "MultiAgentPrioritizedReplayBuffer",
262
+ "capacity": 50000,
263
+ "prioritized_replay_alpha": 0.6,
264
+ "prioritized_replay_beta": 0.4,
265
+ "prioritized_replay_eps": 1e-6,
266
+ "replay_sequence_length": 1,
267
+ }
268
+ - Where -
269
+ prioritized_replay_alpha: Alpha parameter controls the degree of
270
+ prioritization in the buffer. In other words, when a buffer sample has
271
+ a higher temporal-difference error, with how much more probability
272
+ should it drawn to use to update the parametrized Q-network. 0.0
273
+ corresponds to uniform probability. Setting much above 1.0 may quickly
274
+ result as the sampling distribution could become heavily “pointy” with
275
+ low entropy.
276
+ prioritized_replay_beta: Beta parameter controls the degree of
277
+ importance sampling which suppresses the influence of gradient updates
278
+ from samples that have higher probability of being sampled via alpha
279
+ parameter and the temporal-difference error.
280
+ prioritized_replay_eps: Epsilon parameter sets the baseline probability
281
+ for sampling so that when the temporal-difference error of a sample is
282
+ zero, there is still a chance of drawing the sample.
283
+ store_buffer_in_checkpoints: Set this to True, if you want the contents of
284
+ your buffer(s) to be stored in any saved checkpoints as well.
285
+ Warnings will be created if:
286
+ - This is True AND restoring from a checkpoint that contains no buffer
287
+ data.
288
+ - This is False AND restoring from a checkpoint that does contain
289
+ buffer data.
290
+ epsilon: Epsilon exploration schedule. In the format of [[timestep, value],
291
+ [timestep, value], ...]. A schedule must start from
292
+ timestep 0.
293
+ adam_epsilon: Adam optimizer's epsilon hyper parameter.
294
+ grad_clip: If not None, clip gradients during optimization at this value.
295
+ num_steps_sampled_before_learning_starts: Number of timesteps to collect
296
+ from rollout workers before we start sampling from replay buffers for
297
+ learning. Whether we count this in agent steps or environment steps
298
+ depends on config.multi_agent(count_steps_by=..).
299
+ tau: Update the target by \tau * policy + (1-\tau) * target_policy.
300
+ num_atoms: Number of atoms for representing the distribution of return.
301
+ When this is greater than 1, distributional Q-learning is used.
302
+ v_min: Minimum value estimation
303
+ v_max: Maximum value estimation
304
+ noisy: Whether to use noisy network to aid exploration. This adds parametric
305
+ noise to the model weights.
306
+ sigma0: Control the initial parameter noise for noisy nets.
307
+ dueling: Whether to use dueling DQN.
308
+ hiddens: Dense-layer setup for each the advantage branch and the value
309
+ branch
310
+ double_q: Whether to use double DQN.
311
+ n_step: N-step target updates. If >1, sars' tuples in trajectories will be
312
+ postprocessed to become sa[discounted sum of R][s t+n] tuples. An
313
+ integer will be interpreted as a fixed n-step value. If a tuple of 2
314
+ ints is provided here, the n-step value will be drawn for each sample(!)
315
+ in the train batch from a uniform distribution over the closed interval
316
+ defined by `[n_step[0], n_step[1]]`.
317
+ before_learn_on_batch: Callback to run before learning on a multi-agent
318
+ batch of experiences.
319
+ training_intensity: The intensity with which to update the model (vs
320
+ collecting samples from the env).
321
+ If None, uses "natural" values of:
322
+ `train_batch_size` / (`rollout_fragment_length` x `num_env_runners` x
323
+ `num_envs_per_env_runner`).
324
+ If not None, will make sure that the ratio between timesteps inserted
325
+ into and sampled from the buffer matches the given values.
326
+ Example:
327
+ training_intensity=1000.0
328
+ train_batch_size=250
329
+ rollout_fragment_length=1
330
+ num_env_runners=1 (or 0)
331
+ num_envs_per_env_runner=1
332
+ -> natural value = 250 / 1 = 250.0
333
+ -> will make sure that replay+train op will be executed 4x asoften as
334
+ rollout+insert op (4 * 250 = 1000).
335
+ See: rllib/algorithms/dqn/dqn.py::calculate_rr_weights for further
336
+ details.
337
+ td_error_loss_fn: "huber" or "mse". loss function for calculating TD error
338
+ when num_atoms is 1. Note that if num_atoms is > 1, this parameter
339
+ is simply ignored, and softmax cross entropy loss will be used.
340
+ categorical_distribution_temperature: Set the temperature parameter used
341
+ by Categorical action distribution. A valid temperature is in the range
342
+ of [0, 1]. Note that this mostly affects evaluation since TD error uses
343
+ argmax for return calculation.
344
+
345
+ Returns:
346
+ This updated AlgorithmConfig object.
347
+ """
348
+ # Pass kwargs onto super's `training()` method.
349
+ super().training(**kwargs)
350
+
351
+ if target_network_update_freq is not NotProvided:
352
+ self.target_network_update_freq = target_network_update_freq
353
+ if replay_buffer_config is not NotProvided:
354
+ # Override entire `replay_buffer_config` if `type` key changes.
355
+ # Update, if `type` key remains the same or is not specified.
356
+ new_replay_buffer_config = deep_update(
357
+ {"replay_buffer_config": self.replay_buffer_config},
358
+ {"replay_buffer_config": replay_buffer_config},
359
+ False,
360
+ ["replay_buffer_config"],
361
+ ["replay_buffer_config"],
362
+ )
363
+ self.replay_buffer_config = new_replay_buffer_config["replay_buffer_config"]
364
+ if store_buffer_in_checkpoints is not NotProvided:
365
+ self.store_buffer_in_checkpoints = store_buffer_in_checkpoints
366
+ if lr_schedule is not NotProvided:
367
+ self.lr_schedule = lr_schedule
368
+ if epsilon is not NotProvided:
369
+ self.epsilon = epsilon
370
+ if adam_epsilon is not NotProvided:
371
+ self.adam_epsilon = adam_epsilon
372
+ if grad_clip is not NotProvided:
373
+ self.grad_clip = grad_clip
374
+ if num_steps_sampled_before_learning_starts is not NotProvided:
375
+ self.num_steps_sampled_before_learning_starts = (
376
+ num_steps_sampled_before_learning_starts
377
+ )
378
+ if tau is not NotProvided:
379
+ self.tau = tau
380
+ if num_atoms is not NotProvided:
381
+ self.num_atoms = num_atoms
382
+ if v_min is not NotProvided:
383
+ self.v_min = v_min
384
+ if v_max is not NotProvided:
385
+ self.v_max = v_max
386
+ if noisy is not NotProvided:
387
+ self.noisy = noisy
388
+ if sigma0 is not NotProvided:
389
+ self.sigma0 = sigma0
390
+ if dueling is not NotProvided:
391
+ self.dueling = dueling
392
+ if hiddens is not NotProvided:
393
+ self.hiddens = hiddens
394
+ if double_q is not NotProvided:
395
+ self.double_q = double_q
396
+ if n_step is not NotProvided:
397
+ self.n_step = n_step
398
+ if before_learn_on_batch is not NotProvided:
399
+ self.before_learn_on_batch = before_learn_on_batch
400
+ if training_intensity is not NotProvided:
401
+ self.training_intensity = training_intensity
402
+ if td_error_loss_fn is not NotProvided:
403
+ self.td_error_loss_fn = td_error_loss_fn
404
+ if categorical_distribution_temperature is not NotProvided:
405
+ self.categorical_distribution_temperature = (
406
+ categorical_distribution_temperature
407
+ )
408
+
409
+ return self
410
+
411
+ @override(AlgorithmConfig)
412
+ def validate(self) -> None:
413
+ # Call super's validation method.
414
+ super().validate()
415
+
416
+ # Warn about new API stack on by default.
417
+ if self.enable_rl_module_and_learner:
418
+ logger.warning(
419
+ f"You are running {self.algo_class.__name__} on the new API stack! "
420
+ "This is the new default behavior for this algorithm. If you don't "
421
+ "want to use the new API stack, set `config.api_stack("
422
+ "enable_rl_module_and_learner=False,"
423
+ "enable_env_runner_and_connector_v2=False)`. For a detailed migration "
424
+ "guide, see here: https://docs.ray.io/en/master/rllib/new-api-stack-migration-guide.html" # noqa
425
+ )
426
+
427
+ if (
428
+ not self.enable_rl_module_and_learner
429
+ and self.exploration_config["type"] == "ParameterNoise"
430
+ ):
431
+ if self.batch_mode != "complete_episodes":
432
+ raise ValueError(
433
+ "ParameterNoise Exploration requires `batch_mode` to be "
434
+ "'complete_episodes'. Try setting `config.env_runners("
435
+ "batch_mode='complete_episodes')`."
436
+ )
437
+
438
+ if not self.enable_env_runner_and_connector_v2 and not self.in_evaluation:
439
+ validate_buffer_config(self)
440
+
441
+ if self.td_error_loss_fn not in ["huber", "mse"]:
442
+ raise ValueError("`td_error_loss_fn` must be 'huber' or 'mse'!")
443
+
444
+ # Check rollout_fragment_length to be compatible with n_step.
445
+ if (
446
+ not self.in_evaluation
447
+ and self.rollout_fragment_length != "auto"
448
+ and self.rollout_fragment_length < self.n_step
449
+ ):
450
+ raise ValueError(
451
+ f"Your `rollout_fragment_length` ({self.rollout_fragment_length}) is "
452
+ f"smaller than `n_step` ({self.n_step})! "
453
+ "Try setting config.env_runners(rollout_fragment_length="
454
+ f"{self.n_step})."
455
+ )
456
+
457
+ # TODO (simon): Find a clean solution to deal with
458
+ # configuration configs when using the new API stack.
459
+ if (
460
+ not self.enable_rl_module_and_learner
461
+ and self.exploration_config["type"] == "ParameterNoise"
462
+ ):
463
+ if self.batch_mode != "complete_episodes":
464
+ raise ValueError(
465
+ "ParameterNoise Exploration requires `batch_mode` to be "
466
+ "'complete_episodes'. Try setting `config.env_runners("
467
+ "batch_mode='complete_episodes')`."
468
+ )
469
+ if self.noisy:
470
+ raise ValueError(
471
+ "ParameterNoise Exploration and `noisy` network cannot be"
472
+ " used at the same time!"
473
+ )
474
+
475
+ # Validate that we use the corresponding `EpisodeReplayBuffer` when using
476
+ # episodes.
477
+ # TODO (sven, simon): Implement the multi-agent case for replay buffers.
478
+ from ray.rllib.utils.replay_buffers.episode_replay_buffer import (
479
+ EpisodeReplayBuffer,
480
+ )
481
+
482
+ if (
483
+ self.enable_env_runner_and_connector_v2
484
+ and not isinstance(self.replay_buffer_config["type"], str)
485
+ and not issubclass(self.replay_buffer_config["type"], EpisodeReplayBuffer)
486
+ ):
487
+ raise ValueError(
488
+ "When using the new `EnvRunner API` the replay buffer must be of type "
489
+ "`EpisodeReplayBuffer`."
490
+ )
491
+ elif not self.enable_env_runner_and_connector_v2 and (
492
+ (
493
+ isinstance(self.replay_buffer_config["type"], str)
494
+ and "Episode" in self.replay_buffer_config["type"]
495
+ )
496
+ or issubclass(self.replay_buffer_config["type"], EpisodeReplayBuffer)
497
+ ):
498
+ raise ValueError(
499
+ "When using the old API stack the replay buffer must not be of type "
500
+ "`EpisodeReplayBuffer`! We suggest you use the following config to run "
501
+ "DQN on the old API stack: `config.training(replay_buffer_config={"
502
+ "'type': 'MultiAgentPrioritizedReplayBuffer', "
503
+ "'prioritized_replay_alpha': [alpha], "
504
+ "'prioritized_replay_beta': [beta], "
505
+ "'prioritized_replay_eps': [eps], "
506
+ "})`."
507
+ )
508
+
509
+ @override(AlgorithmConfig)
510
+ def get_rollout_fragment_length(self, worker_index: int = 0) -> int:
511
+ if self.rollout_fragment_length == "auto":
512
+ return (
513
+ self.n_step[1]
514
+ if isinstance(self.n_step, (tuple, list))
515
+ else self.n_step
516
+ )
517
+ else:
518
+ return self.rollout_fragment_length
519
+
520
+ @override(AlgorithmConfig)
521
+ def get_default_rl_module_spec(self) -> RLModuleSpecType:
522
+ from ray.rllib.algorithms.dqn.dqn_rainbow_catalog import DQNRainbowCatalog
523
+
524
+ if self.framework_str == "torch":
525
+ from ray.rllib.algorithms.dqn.torch.dqn_rainbow_torch_rl_module import (
526
+ DQNRainbowTorchRLModule,
527
+ )
528
+
529
+ return RLModuleSpec(
530
+ module_class=DQNRainbowTorchRLModule,
531
+ catalog_class=DQNRainbowCatalog,
532
+ model_config=self.model_config,
533
+ )
534
+ else:
535
+ raise ValueError(
536
+ f"The framework {self.framework_str} is not supported! "
537
+ "Use `config.framework('torch')` instead."
538
+ )
539
+
540
+ @property
541
+ @override(AlgorithmConfig)
542
+ def _model_config_auto_includes(self) -> Dict[str, Any]:
543
+ return super()._model_config_auto_includes | {
544
+ "double_q": self.double_q,
545
+ "dueling": self.dueling,
546
+ "epsilon": self.epsilon,
547
+ "noisy": self.noisy,
548
+ "num_atoms": self.num_atoms,
549
+ "std_init": self.sigma0,
550
+ "v_max": self.v_max,
551
+ "v_min": self.v_min,
552
+ }
553
+
554
+ @override(AlgorithmConfig)
555
+ def get_default_learner_class(self) -> Union[Type["Learner"], str]:
556
+ if self.framework_str == "torch":
557
+ from ray.rllib.algorithms.dqn.torch.dqn_rainbow_torch_learner import (
558
+ DQNRainbowTorchLearner,
559
+ )
560
+
561
+ return DQNRainbowTorchLearner
562
+ else:
563
+ raise ValueError(
564
+ f"The framework {self.framework_str} is not supported! "
565
+ "Use `config.framework('torch')` instead."
566
+ )
567
+
568
+ @override(AlgorithmConfig)
569
+ def build_learner_connector(
570
+ self,
571
+ input_observation_space,
572
+ input_action_space,
573
+ device=None,
574
+ ):
575
+ pipeline = super().build_learner_connector(
576
+ input_observation_space=input_observation_space,
577
+ input_action_space=input_action_space,
578
+ device=device,
579
+ )
580
+
581
+ # Prepend the "add-NEXT_OBS-from-episodes-to-train-batch" connector piece (right
582
+ # after the corresponding "add-OBS-..." default piece).
583
+ pipeline.insert_after(
584
+ AddObservationsFromEpisodesToBatch,
585
+ AddNextObservationsFromEpisodesToTrainBatch(),
586
+ )
587
+
588
+ return pipeline
589
+
590
+
591
+ def calculate_rr_weights(config: AlgorithmConfig) -> List[float]:
592
+ """Calculate the round robin weights for the rollout and train steps"""
593
+ if not config.training_intensity:
594
+ return [1, 1]
595
+
596
+ # Calculate the "native ratio" as:
597
+ # [train-batch-size] / [size of env-rolled-out sampled data]
598
+ # This is to set freshly rollout-collected data in relation to
599
+ # the data we pull from the replay buffer (which also contains old
600
+ # samples).
601
+ native_ratio = config.total_train_batch_size / (
602
+ config.get_rollout_fragment_length()
603
+ * config.num_envs_per_env_runner
604
+ # Add one to workers because the local
605
+ # worker usually collects experiences as well, and we avoid division by zero.
606
+ * max(config.num_env_runners + 1, 1)
607
+ )
608
+
609
+ # Training intensity is specified in terms of
610
+ # (steps_replayed / steps_sampled), so adjust for the native ratio.
611
+ sample_and_train_weight = config.training_intensity / native_ratio
612
+ if sample_and_train_weight < 1:
613
+ return [int(np.round(1 / sample_and_train_weight)), 1]
614
+ else:
615
+ return [1, int(np.round(sample_and_train_weight))]
616
+
617
+
618
+ class DQN(Algorithm):
619
+ @classmethod
620
+ @override(Algorithm)
621
+ def get_default_config(cls) -> AlgorithmConfig:
622
+ return DQNConfig()
623
+
624
+ @classmethod
625
+ @override(Algorithm)
626
+ def get_default_policy_class(
627
+ cls, config: AlgorithmConfig
628
+ ) -> Optional[Type[Policy]]:
629
+ if config["framework"] == "torch":
630
+ return DQNTorchPolicy
631
+ else:
632
+ return DQNTFPolicy
633
+
634
+ @override(Algorithm)
635
+ def training_step(self) -> None:
636
+ """DQN training iteration function.
637
+
638
+ Each training iteration, we:
639
+ - Sample (MultiAgentBatch) from workers.
640
+ - Store new samples in replay buffer.
641
+ - Sample training batch (MultiAgentBatch) from replay buffer.
642
+ - Learn on training batch.
643
+ - Update remote workers' new policy weights.
644
+ - Update target network every `target_network_update_freq` sample steps.
645
+ - Return all collected metrics for the iteration.
646
+
647
+ Returns:
648
+ The results dict from executing the training iteration.
649
+ """
650
+ # Old API stack (Policy, RolloutWorker, Connector).
651
+ if not self.config.enable_env_runner_and_connector_v2:
652
+ return self._training_step_old_api_stack()
653
+
654
+ # New API stack (RLModule, Learner, EnvRunner, ConnectorV2).
655
+ return self._training_step_new_api_stack(with_noise_reset=True)
656
+
657
+ def _training_step_new_api_stack(self, *, with_noise_reset):
658
+ # Alternate between storing and sampling and training.
659
+ store_weight, sample_and_train_weight = calculate_rr_weights(self.config)
660
+
661
+ # Run multiple sampling + storing to buffer iterations.
662
+ for _ in range(store_weight):
663
+ with self.metrics.log_time((TIMERS, ENV_RUNNER_SAMPLING_TIMER)):
664
+ # Sample in parallel from workers.
665
+ episodes, env_runner_results = synchronous_parallel_sample(
666
+ worker_set=self.env_runner_group,
667
+ concat=True,
668
+ sample_timeout_s=self.config.sample_timeout_s,
669
+ _uses_new_env_runners=True,
670
+ _return_metrics=True,
671
+ )
672
+ # Reduce EnvRunner metrics over the n EnvRunners.
673
+ self.metrics.merge_and_log_n_dicts(
674
+ env_runner_results, key=ENV_RUNNER_RESULTS
675
+ )
676
+
677
+ # Add the sampled experiences to the replay buffer.
678
+ with self.metrics.log_time((TIMERS, REPLAY_BUFFER_ADD_DATA_TIMER)):
679
+ self.local_replay_buffer.add(episodes)
680
+
681
+ if self.config.count_steps_by == "agent_steps":
682
+ current_ts = sum(
683
+ self.metrics.peek(
684
+ (ENV_RUNNER_RESULTS, NUM_AGENT_STEPS_SAMPLED_LIFETIME), default={}
685
+ ).values()
686
+ )
687
+ else:
688
+ current_ts = self.metrics.peek(
689
+ (ENV_RUNNER_RESULTS, NUM_ENV_STEPS_SAMPLED_LIFETIME), default=0
690
+ )
691
+
692
+ # If enough experiences have been sampled start training.
693
+ if current_ts >= self.config.num_steps_sampled_before_learning_starts:
694
+ # Resample noise for noisy networks, if necessary. Note, this
695
+ # is proposed in the "Noisy Networks for Exploration" paper
696
+ # (https://arxiv.org/abs/1706.10295) in Algorithm 1. The noise
697
+ # gets sampled once for each training loop.
698
+ if with_noise_reset:
699
+ self.learner_group.foreach_learner(
700
+ func=lambda lrnr: lrnr._reset_noise(),
701
+ timeout_seconds=0.0, # fire-and-forget
702
+ )
703
+ # Run multiple sample-from-buffer and update iterations.
704
+ for _ in range(sample_and_train_weight):
705
+ # Sample a list of episodes used for learning from the replay buffer.
706
+ with self.metrics.log_time((TIMERS, REPLAY_BUFFER_SAMPLE_TIMER)):
707
+ episodes = self.local_replay_buffer.sample(
708
+ num_items=self.config.total_train_batch_size,
709
+ n_step=self.config.n_step,
710
+ gamma=self.config.gamma,
711
+ beta=self.config.replay_buffer_config.get("beta"),
712
+ sample_episodes=True,
713
+ )
714
+
715
+ # Perform an update on the buffer-sampled train batch.
716
+ with self.metrics.log_time((TIMERS, LEARNER_UPDATE_TIMER)):
717
+ learner_results = self.learner_group.update_from_episodes(
718
+ episodes=episodes,
719
+ timesteps={
720
+ NUM_ENV_STEPS_SAMPLED_LIFETIME: (
721
+ self.metrics.peek(
722
+ (ENV_RUNNER_RESULTS, NUM_ENV_STEPS_SAMPLED_LIFETIME)
723
+ )
724
+ ),
725
+ NUM_AGENT_STEPS_SAMPLED_LIFETIME: (
726
+ self.metrics.peek(
727
+ (
728
+ ENV_RUNNER_RESULTS,
729
+ NUM_AGENT_STEPS_SAMPLED_LIFETIME,
730
+ )
731
+ )
732
+ ),
733
+ },
734
+ )
735
+ # Isolate TD-errors from result dicts (we should not log these to
736
+ # disk or WandB, they might be very large).
737
+ td_errors = defaultdict(list)
738
+ for res in learner_results:
739
+ for module_id, module_results in res.items():
740
+ if TD_ERROR_KEY in module_results:
741
+ td_errors[module_id].extend(
742
+ convert_to_numpy(
743
+ module_results.pop(TD_ERROR_KEY).peek()
744
+ )
745
+ )
746
+ td_errors = {
747
+ module_id: {TD_ERROR_KEY: np.concatenate(s, axis=0)}
748
+ for module_id, s in td_errors.items()
749
+ }
750
+ self.metrics.merge_and_log_n_dicts(
751
+ learner_results, key=LEARNER_RESULTS
752
+ )
753
+
754
+ # Update replay buffer priorities.
755
+ with self.metrics.log_time((TIMERS, REPLAY_BUFFER_UPDATE_PRIOS_TIMER)):
756
+ update_priorities_in_episode_replay_buffer(
757
+ replay_buffer=self.local_replay_buffer,
758
+ td_errors=td_errors,
759
+ )
760
+
761
+ # Update weights and global_vars - after learning on the local worker -
762
+ # on all remote workers.
763
+ with self.metrics.log_time((TIMERS, SYNCH_WORKER_WEIGHTS_TIMER)):
764
+ modules_to_update = set(learner_results[0].keys()) - {ALL_MODULES}
765
+ # NOTE: the new API stack does not use global vars.
766
+ self.env_runner_group.sync_weights(
767
+ from_worker_or_learner_group=self.learner_group,
768
+ policies=modules_to_update,
769
+ global_vars=None,
770
+ inference_only=True,
771
+ )
772
+
773
+ def _training_step_old_api_stack(self) -> ResultDict:
774
+ """Training step for the old API stack.
775
+
776
+ More specifically this training step relies on `RolloutWorker`.
777
+ """
778
+ train_results = {}
779
+
780
+ # We alternate between storing new samples and sampling and training
781
+ store_weight, sample_and_train_weight = calculate_rr_weights(self.config)
782
+
783
+ for _ in range(store_weight):
784
+ # Sample (MultiAgentBatch) from workers.
785
+ with self._timers[SAMPLE_TIMER]:
786
+ new_sample_batch: SampleBatchType = synchronous_parallel_sample(
787
+ worker_set=self.env_runner_group,
788
+ concat=True,
789
+ sample_timeout_s=self.config.sample_timeout_s,
790
+ )
791
+
792
+ # Return early if all our workers failed.
793
+ if not new_sample_batch:
794
+ return {}
795
+
796
+ # Update counters
797
+ self._counters[NUM_AGENT_STEPS_SAMPLED] += new_sample_batch.agent_steps()
798
+ self._counters[NUM_ENV_STEPS_SAMPLED] += new_sample_batch.env_steps()
799
+
800
+ # Store new samples in replay buffer.
801
+ self.local_replay_buffer.add(new_sample_batch)
802
+
803
+ global_vars = {
804
+ "timestep": self._counters[NUM_ENV_STEPS_SAMPLED],
805
+ }
806
+
807
+ # Update target network every `target_network_update_freq` sample steps.
808
+ cur_ts = self._counters[
809
+ (
810
+ NUM_AGENT_STEPS_SAMPLED
811
+ if self.config.count_steps_by == "agent_steps"
812
+ else NUM_ENV_STEPS_SAMPLED
813
+ )
814
+ ]
815
+
816
+ if cur_ts > self.config.num_steps_sampled_before_learning_starts:
817
+ for _ in range(sample_and_train_weight):
818
+ # Sample training batch (MultiAgentBatch) from replay buffer.
819
+ train_batch = sample_min_n_steps_from_buffer(
820
+ self.local_replay_buffer,
821
+ self.config.total_train_batch_size,
822
+ count_by_agent_steps=self.config.count_steps_by == "agent_steps",
823
+ )
824
+
825
+ # Postprocess batch before we learn on it
826
+ post_fn = self.config.get("before_learn_on_batch") or (lambda b, *a: b)
827
+ train_batch = post_fn(train_batch, self.env_runner_group, self.config)
828
+
829
+ # Learn on training batch.
830
+ # Use simple optimizer (only for multi-agent or tf-eager; all other
831
+ # cases should use the multi-GPU optimizer, even if only using 1 GPU)
832
+ if self.config.get("simple_optimizer") is True:
833
+ train_results = train_one_step(self, train_batch)
834
+ else:
835
+ train_results = multi_gpu_train_one_step(self, train_batch)
836
+
837
+ # Update replay buffer priorities.
838
+ update_priorities_in_replay_buffer(
839
+ self.local_replay_buffer,
840
+ self.config,
841
+ train_batch,
842
+ train_results,
843
+ )
844
+
845
+ last_update = self._counters[LAST_TARGET_UPDATE_TS]
846
+ if cur_ts - last_update >= self.config.target_network_update_freq:
847
+ to_update = self.env_runner.get_policies_to_train()
848
+ self.env_runner.foreach_policy_to_train(
849
+ lambda p, pid, to_update=to_update: (
850
+ pid in to_update and p.update_target()
851
+ )
852
+ )
853
+ self._counters[NUM_TARGET_UPDATES] += 1
854
+ self._counters[LAST_TARGET_UPDATE_TS] = cur_ts
855
+
856
+ # Update weights and global_vars - after learning on the local worker -
857
+ # on all remote workers.
858
+ with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]:
859
+ self.env_runner_group.sync_weights(global_vars=global_vars)
860
+
861
+ # Return all collected metrics for the iteration.
862
+ return train_results
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_rainbow_learner.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ from typing import Any, Dict, Optional
3
+
4
+ from ray.rllib.core.learner.learner import Learner
5
+ from ray.rllib.core.learner.utils import update_target_network
6
+ from ray.rllib.core.rl_module.apis.target_network_api import TargetNetworkAPI
7
+ from ray.rllib.core.rl_module.multi_rl_module import MultiRLModuleSpec
8
+ from ray.rllib.core.rl_module.rl_module import RLModuleSpec
9
+ from ray.rllib.utils.annotations import (
10
+ override,
11
+ OverrideToImplementCustomLogic_CallToSuperRecommended,
12
+ )
13
+ from ray.rllib.utils.metrics import (
14
+ LAST_TARGET_UPDATE_TS,
15
+ NUM_ENV_STEPS_SAMPLED_LIFETIME,
16
+ NUM_TARGET_UPDATES,
17
+ )
18
+ from ray.rllib.utils.typing import ModuleID, ShouldModuleBeUpdatedFn
19
+
20
+
21
+ # Now, this is double defined: In `SACRLModule` and here. I would keep it here
22
+ # or push it into the `Learner` as these are recurring keys in RL.
23
+ ATOMS = "atoms"
24
+ QF_LOSS_KEY = "qf_loss"
25
+ QF_LOGITS = "qf_logits"
26
+ QF_MEAN_KEY = "qf_mean"
27
+ QF_MAX_KEY = "qf_max"
28
+ QF_MIN_KEY = "qf_min"
29
+ QF_NEXT_PREDS = "qf_next_preds"
30
+ QF_TARGET_NEXT_PREDS = "qf_target_next_preds"
31
+ QF_TARGET_NEXT_PROBS = "qf_target_next_probs"
32
+ QF_PREDS = "qf_preds"
33
+ QF_PROBS = "qf_probs"
34
+ TD_ERROR_MEAN_KEY = "td_error_mean"
35
+
36
+
37
+ class DQNRainbowLearner(Learner):
38
+ @OverrideToImplementCustomLogic_CallToSuperRecommended
39
+ @override(Learner)
40
+ def build(self) -> None:
41
+ super().build()
42
+
43
+ # Make target networks.
44
+ self.module.foreach_module(
45
+ lambda mid, mod: (
46
+ mod.make_target_networks()
47
+ if isinstance(mod, TargetNetworkAPI)
48
+ else None
49
+ )
50
+ )
51
+
52
+ @override(Learner)
53
+ def add_module(
54
+ self,
55
+ *,
56
+ module_id: ModuleID,
57
+ module_spec: RLModuleSpec,
58
+ config_overrides: Optional[Dict] = None,
59
+ new_should_module_be_updated: Optional[ShouldModuleBeUpdatedFn] = None,
60
+ ) -> MultiRLModuleSpec:
61
+ marl_spec = super().add_module(
62
+ module_id=module_id,
63
+ module_spec=module_spec,
64
+ config_overrides=config_overrides,
65
+ new_should_module_be_updated=new_should_module_be_updated,
66
+ )
67
+ # Create target networks for added Module, if applicable.
68
+ if isinstance(self.module[module_id].unwrapped(), TargetNetworkAPI):
69
+ self.module[module_id].unwrapped().make_target_networks()
70
+ return marl_spec
71
+
72
+ @override(Learner)
73
+ def after_gradient_based_update(self, *, timesteps: Dict[str, Any]) -> None:
74
+ """Updates the target Q Networks."""
75
+ super().after_gradient_based_update(timesteps=timesteps)
76
+
77
+ timestep = timesteps.get(NUM_ENV_STEPS_SAMPLED_LIFETIME, 0)
78
+
79
+ # TODO (sven): Maybe we should have a `after_gradient_based_update`
80
+ # method per module?
81
+ for module_id, module in self.module._rl_modules.items():
82
+ config = self.config.get_config_for_module(module_id)
83
+ last_update_ts_key = (module_id, LAST_TARGET_UPDATE_TS)
84
+ if timestep - self.metrics.peek(
85
+ last_update_ts_key, default=0
86
+ ) >= config.target_network_update_freq and isinstance(
87
+ module.unwrapped(), TargetNetworkAPI
88
+ ):
89
+ for (
90
+ main_net,
91
+ target_net,
92
+ ) in module.unwrapped().get_target_network_pairs():
93
+ update_target_network(
94
+ main_net=main_net,
95
+ target_net=target_net,
96
+ tau=config.tau,
97
+ )
98
+ # Increase lifetime target network update counter by one.
99
+ self.metrics.log_value((module_id, NUM_TARGET_UPDATES), 1, reduce="sum")
100
+ # Update the (single-value -> window=1) last updated timestep metric.
101
+ self.metrics.log_value(last_update_ts_key, timestep, window=1)
102
+
103
+ @abc.abstractmethod
104
+ def _reset_noise(self) -> None:
105
+ """Resets the noise in the `Algorithm.training_step()`
106
+
107
+ Note, this can be overridden by the user to reset the noise at different
108
+ points in the training loop.
109
+ """
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_rainbow_noisy_net_configs.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ from ray.rllib.core.models.base import Encoder
4
+ from ray.rllib.core.models.configs import _framework_implemented, _MLPConfig
5
+ from ray.rllib.utils.annotations import ExperimentalAPI, override
6
+
7
+
8
+ @ExperimentalAPI
9
+ @dataclass
10
+ class NoisyMLPConfig(_MLPConfig):
11
+ std_init: float = 0.1
12
+
13
+ @override(_MLPConfig)
14
+ def _validate(self, framework: str = "torch"):
15
+ """Makes sure that standard deviation is positive."""
16
+ super()._validate(framework=framework)
17
+
18
+ if self.std_init < 0.0:
19
+ raise ValueError(
20
+ f"`std_init` ({self.std_init}) of `NoisyMLPConfig must be "
21
+ "non-negative."
22
+ )
23
+
24
+
25
+ @ExperimentalAPI
26
+ @dataclass
27
+ class NoisyMLPEncoderConfig(NoisyMLPConfig):
28
+ @_framework_implemented()
29
+ def build(self, framework: str = "torch") -> "Encoder":
30
+ self._validate(framework)
31
+
32
+ if framework == "torch":
33
+ from ray.rllib.algorithms.dqn.torch.dqn_rainbow_torch_noisy_net import (
34
+ TorchNoisyMLPEncoder,
35
+ )
36
+
37
+ return TorchNoisyMLPEncoder(self)
38
+ else:
39
+ raise ValueError(
40
+ "`NoisyMLPEncoder` is not implemented for framework " f"{framework}. "
41
+ )
42
+
43
+
44
+ @ExperimentalAPI
45
+ @dataclass
46
+ class NoisyMLPHeadConfig(NoisyMLPConfig):
47
+ @_framework_implemented()
48
+ def build(self, framework: str = "torch") -> "Encoder":
49
+ self._validate(framework)
50
+
51
+ if framework == "torch":
52
+ from ray.rllib.algorithms.dqn.torch.dqn_rainbow_torch_noisy_net import (
53
+ TorchNoisyMLPHead,
54
+ )
55
+
56
+ return TorchNoisyMLPHead(self)
57
+ else:
58
+ raise ValueError(
59
+ "`NoisyMLPHead` is not implemented for framework " f"{framework}. "
60
+ )
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_tf_policy.py ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TensorFlow policy class used for DQN"""
2
+
3
+ from typing import Dict
4
+
5
+ import gymnasium as gym
6
+ import numpy as np
7
+
8
+ import ray
9
+ from ray.rllib.algorithms.dqn.distributional_q_tf_model import DistributionalQTFModel
10
+ from ray.rllib.evaluation.postprocessing import adjust_nstep
11
+ from ray.rllib.models import ModelCatalog
12
+ from ray.rllib.models.modelv2 import ModelV2
13
+ from ray.rllib.models.tf.tf_action_dist import get_categorical_class_with_temperature
14
+ from ray.rllib.policy.policy import Policy
15
+ from ray.rllib.policy.sample_batch import SampleBatch
16
+ from ray.rllib.policy.tf_mixins import LearningRateSchedule, TargetNetworkMixin
17
+ from ray.rllib.policy.tf_policy_template import build_tf_policy
18
+ from ray.rllib.utils.error import UnsupportedSpaceException
19
+ from ray.rllib.utils.exploration import ParameterNoise
20
+ from ray.rllib.utils.framework import try_import_tf
21
+ from ray.rllib.utils.numpy import convert_to_numpy
22
+ from ray.rllib.utils.tf_utils import (
23
+ huber_loss,
24
+ l2_loss,
25
+ make_tf_callable,
26
+ minimize_and_clip,
27
+ reduce_mean_ignore_inf,
28
+ )
29
+ from ray.rllib.utils.typing import AlgorithmConfigDict, ModelGradients, TensorType
30
+
31
+ tf1, tf, tfv = try_import_tf()
32
+
33
+ # Importance sampling weights for prioritized replay
34
+ PRIO_WEIGHTS = "weights"
35
+ Q_SCOPE = "q_func"
36
+ Q_TARGET_SCOPE = "target_q_func"
37
+
38
+
39
+ class QLoss:
40
+ def __init__(
41
+ self,
42
+ q_t_selected: TensorType,
43
+ q_logits_t_selected: TensorType,
44
+ q_tp1_best: TensorType,
45
+ q_dist_tp1_best: TensorType,
46
+ importance_weights: TensorType,
47
+ rewards: TensorType,
48
+ done_mask: TensorType,
49
+ gamma: float = 0.99,
50
+ n_step: int = 1,
51
+ num_atoms: int = 1,
52
+ v_min: float = -10.0,
53
+ v_max: float = 10.0,
54
+ loss_fn=huber_loss,
55
+ ):
56
+
57
+ if num_atoms > 1:
58
+ # Distributional Q-learning which corresponds to an entropy loss
59
+
60
+ z = tf.range(num_atoms, dtype=tf.float32)
61
+ z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
62
+
63
+ # (batch_size, 1) * (1, num_atoms) = (batch_size, num_atoms)
64
+ r_tau = tf.expand_dims(rewards, -1) + gamma**n_step * tf.expand_dims(
65
+ 1.0 - done_mask, -1
66
+ ) * tf.expand_dims(z, 0)
67
+ r_tau = tf.clip_by_value(r_tau, v_min, v_max)
68
+ b = (r_tau - v_min) / ((v_max - v_min) / float(num_atoms - 1))
69
+ lb = tf.floor(b)
70
+ ub = tf.math.ceil(b)
71
+ # indispensable judgement which is missed in most implementations
72
+ # when b happens to be an integer, lb == ub, so pr_j(s', a*) will
73
+ # be discarded because (ub-b) == (b-lb) == 0
74
+ floor_equal_ceil = tf.cast(tf.less(ub - lb, 0.5), tf.float32)
75
+
76
+ l_project = tf.one_hot(
77
+ tf.cast(lb, dtype=tf.int32), num_atoms
78
+ ) # (batch_size, num_atoms, num_atoms)
79
+ u_project = tf.one_hot(
80
+ tf.cast(ub, dtype=tf.int32), num_atoms
81
+ ) # (batch_size, num_atoms, num_atoms)
82
+ ml_delta = q_dist_tp1_best * (ub - b + floor_equal_ceil)
83
+ mu_delta = q_dist_tp1_best * (b - lb)
84
+ ml_delta = tf.reduce_sum(l_project * tf.expand_dims(ml_delta, -1), axis=1)
85
+ mu_delta = tf.reduce_sum(u_project * tf.expand_dims(mu_delta, -1), axis=1)
86
+ m = ml_delta + mu_delta
87
+
88
+ # Rainbow paper claims that using this cross entropy loss for
89
+ # priority is robust and insensitive to `prioritized_replay_alpha`
90
+ self.td_error = tf.nn.softmax_cross_entropy_with_logits(
91
+ labels=m, logits=q_logits_t_selected
92
+ )
93
+ self.loss = tf.reduce_mean(
94
+ self.td_error * tf.cast(importance_weights, tf.float32)
95
+ )
96
+ self.stats = {
97
+ # TODO: better Q stats for dist dqn
98
+ "mean_td_error": tf.reduce_mean(self.td_error),
99
+ }
100
+ else:
101
+ q_tp1_best_masked = (1.0 - done_mask) * q_tp1_best
102
+
103
+ # compute RHS of bellman equation
104
+ q_t_selected_target = rewards + gamma**n_step * q_tp1_best_masked
105
+
106
+ # compute the error (potentially clipped)
107
+ self.td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
108
+ self.loss = tf.reduce_mean(
109
+ tf.cast(importance_weights, tf.float32) * loss_fn(self.td_error)
110
+ )
111
+ self.stats = {
112
+ "mean_q": tf.reduce_mean(q_t_selected),
113
+ "min_q": tf.reduce_min(q_t_selected),
114
+ "max_q": tf.reduce_max(q_t_selected),
115
+ "mean_td_error": tf.reduce_mean(self.td_error),
116
+ }
117
+
118
+
119
+ class ComputeTDErrorMixin:
120
+ """Assign the `compute_td_error` method to the DQNTFPolicy
121
+
122
+ This allows us to prioritize on the worker side.
123
+ """
124
+
125
+ def __init__(self):
126
+ @make_tf_callable(self.get_session(), dynamic_shape=True)
127
+ def compute_td_error(
128
+ obs_t, act_t, rew_t, obs_tp1, terminateds_mask, importance_weights
129
+ ):
130
+ # Do forward pass on loss to update td error attribute
131
+ build_q_losses(
132
+ self,
133
+ self.model,
134
+ None,
135
+ {
136
+ SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_t),
137
+ SampleBatch.ACTIONS: tf.convert_to_tensor(act_t),
138
+ SampleBatch.REWARDS: tf.convert_to_tensor(rew_t),
139
+ SampleBatch.NEXT_OBS: tf.convert_to_tensor(obs_tp1),
140
+ SampleBatch.TERMINATEDS: tf.convert_to_tensor(terminateds_mask),
141
+ PRIO_WEIGHTS: tf.convert_to_tensor(importance_weights),
142
+ },
143
+ )
144
+
145
+ return self.q_loss.td_error
146
+
147
+ self.compute_td_error = compute_td_error
148
+
149
+
150
+ def build_q_model(
151
+ policy: Policy,
152
+ obs_space: gym.spaces.Space,
153
+ action_space: gym.spaces.Space,
154
+ config: AlgorithmConfigDict,
155
+ ) -> ModelV2:
156
+ """Build q_model and target_model for DQN
157
+
158
+ Args:
159
+ policy: The Policy, which will use the model for optimization.
160
+ obs_space (gym.spaces.Space): The policy's observation space.
161
+ action_space (gym.spaces.Space): The policy's action space.
162
+ config (AlgorithmConfigDict):
163
+
164
+ Returns:
165
+ ModelV2: The Model for the Policy to use.
166
+ Note: The target q model will not be returned, just assigned to
167
+ `policy.target_model`.
168
+ """
169
+ if not isinstance(action_space, gym.spaces.Discrete):
170
+ raise UnsupportedSpaceException(
171
+ "Action space {} is not supported for DQN.".format(action_space)
172
+ )
173
+
174
+ if config["hiddens"]:
175
+ # try to infer the last layer size, otherwise fall back to 256
176
+ num_outputs = ([256] + list(config["model"]["fcnet_hiddens"]))[-1]
177
+ config["model"]["no_final_linear"] = True
178
+ else:
179
+ num_outputs = action_space.n
180
+
181
+ q_model = ModelCatalog.get_model_v2(
182
+ obs_space=obs_space,
183
+ action_space=action_space,
184
+ num_outputs=num_outputs,
185
+ model_config=config["model"],
186
+ framework="tf",
187
+ model_interface=DistributionalQTFModel,
188
+ name=Q_SCOPE,
189
+ num_atoms=config["num_atoms"],
190
+ dueling=config["dueling"],
191
+ q_hiddens=config["hiddens"],
192
+ use_noisy=config["noisy"],
193
+ v_min=config["v_min"],
194
+ v_max=config["v_max"],
195
+ sigma0=config["sigma0"],
196
+ # TODO(sven): Move option to add LayerNorm after each Dense
197
+ # generically into ModelCatalog.
198
+ add_layer_norm=isinstance(getattr(policy, "exploration", None), ParameterNoise)
199
+ or config["exploration_config"]["type"] == "ParameterNoise",
200
+ )
201
+
202
+ policy.target_model = ModelCatalog.get_model_v2(
203
+ obs_space=obs_space,
204
+ action_space=action_space,
205
+ num_outputs=num_outputs,
206
+ model_config=config["model"],
207
+ framework="tf",
208
+ model_interface=DistributionalQTFModel,
209
+ name=Q_TARGET_SCOPE,
210
+ num_atoms=config["num_atoms"],
211
+ dueling=config["dueling"],
212
+ q_hiddens=config["hiddens"],
213
+ use_noisy=config["noisy"],
214
+ v_min=config["v_min"],
215
+ v_max=config["v_max"],
216
+ sigma0=config["sigma0"],
217
+ # TODO(sven): Move option to add LayerNorm after each Dense
218
+ # generically into ModelCatalog.
219
+ add_layer_norm=isinstance(getattr(policy, "exploration", None), ParameterNoise)
220
+ or config["exploration_config"]["type"] == "ParameterNoise",
221
+ )
222
+
223
+ return q_model
224
+
225
+
226
+ def get_distribution_inputs_and_class(
227
+ policy: Policy, model: ModelV2, input_dict: SampleBatch, *, explore=True, **kwargs
228
+ ):
229
+ q_vals = compute_q_values(
230
+ policy, model, input_dict, state_batches=None, explore=explore
231
+ )
232
+ q_vals = q_vals[0] if isinstance(q_vals, tuple) else q_vals
233
+
234
+ policy.q_values = q_vals
235
+
236
+ # Return a Torch TorchCategorical distribution where the temperature
237
+ # parameter is partially binded to the configured value.
238
+ temperature = policy.config["categorical_distribution_temperature"]
239
+
240
+ return (
241
+ policy.q_values,
242
+ get_categorical_class_with_temperature(temperature),
243
+ [],
244
+ ) # state-out
245
+
246
+
247
+ def build_q_losses(policy: Policy, model, _, train_batch: SampleBatch) -> TensorType:
248
+ """Constructs the loss for DQNTFPolicy.
249
+
250
+ Args:
251
+ policy: The Policy to calculate the loss for.
252
+ model (ModelV2): The Model to calculate the loss for.
253
+ train_batch: The training data.
254
+
255
+ Returns:
256
+ TensorType: A single loss tensor.
257
+ """
258
+ config = policy.config
259
+ # q network evaluation
260
+ q_t, q_logits_t, q_dist_t, _ = compute_q_values(
261
+ policy,
262
+ model,
263
+ SampleBatch({"obs": train_batch[SampleBatch.CUR_OBS]}),
264
+ state_batches=None,
265
+ explore=False,
266
+ )
267
+
268
+ # target q network evalution
269
+ q_tp1, q_logits_tp1, q_dist_tp1, _ = compute_q_values(
270
+ policy,
271
+ policy.target_model,
272
+ SampleBatch({"obs": train_batch[SampleBatch.NEXT_OBS]}),
273
+ state_batches=None,
274
+ explore=False,
275
+ )
276
+ if not hasattr(policy, "target_q_func_vars"):
277
+ policy.target_q_func_vars = policy.target_model.variables()
278
+
279
+ # q scores for actions which we know were selected in the given state.
280
+ one_hot_selection = tf.one_hot(
281
+ tf.cast(train_batch[SampleBatch.ACTIONS], tf.int32), policy.action_space.n
282
+ )
283
+ q_t_selected = tf.reduce_sum(q_t * one_hot_selection, 1)
284
+ q_logits_t_selected = tf.reduce_sum(
285
+ q_logits_t * tf.expand_dims(one_hot_selection, -1), 1
286
+ )
287
+
288
+ # compute estimate of best possible value starting from state at t + 1
289
+ if config["double_q"]:
290
+ (
291
+ q_tp1_using_online_net,
292
+ q_logits_tp1_using_online_net,
293
+ q_dist_tp1_using_online_net,
294
+ _,
295
+ ) = compute_q_values(
296
+ policy,
297
+ model,
298
+ SampleBatch({"obs": train_batch[SampleBatch.NEXT_OBS]}),
299
+ state_batches=None,
300
+ explore=False,
301
+ )
302
+ q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
303
+ q_tp1_best_one_hot_selection = tf.one_hot(
304
+ q_tp1_best_using_online_net, policy.action_space.n
305
+ )
306
+ q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
307
+ q_dist_tp1_best = tf.reduce_sum(
308
+ q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1
309
+ )
310
+ else:
311
+ q_tp1_best_one_hot_selection = tf.one_hot(
312
+ tf.argmax(q_tp1, 1), policy.action_space.n
313
+ )
314
+ q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
315
+ q_dist_tp1_best = tf.reduce_sum(
316
+ q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1
317
+ )
318
+
319
+ loss_fn = huber_loss if policy.config["td_error_loss_fn"] == "huber" else l2_loss
320
+
321
+ policy.q_loss = QLoss(
322
+ q_t_selected,
323
+ q_logits_t_selected,
324
+ q_tp1_best,
325
+ q_dist_tp1_best,
326
+ train_batch[PRIO_WEIGHTS],
327
+ tf.cast(train_batch[SampleBatch.REWARDS], tf.float32),
328
+ tf.cast(train_batch[SampleBatch.TERMINATEDS], tf.float32),
329
+ config["gamma"],
330
+ config["n_step"],
331
+ config["num_atoms"],
332
+ config["v_min"],
333
+ config["v_max"],
334
+ loss_fn,
335
+ )
336
+
337
+ return policy.q_loss.loss
338
+
339
+
340
+ def adam_optimizer(
341
+ policy: Policy, config: AlgorithmConfigDict
342
+ ) -> "tf.keras.optimizers.Optimizer":
343
+ if policy.config["framework"] == "tf2":
344
+ return tf.keras.optimizers.Adam(
345
+ learning_rate=policy.cur_lr, epsilon=config["adam_epsilon"]
346
+ )
347
+ else:
348
+ return tf1.train.AdamOptimizer(
349
+ learning_rate=policy.cur_lr, epsilon=config["adam_epsilon"]
350
+ )
351
+
352
+
353
+ def clip_gradients(
354
+ policy: Policy, optimizer: "tf.keras.optimizers.Optimizer", loss: TensorType
355
+ ) -> ModelGradients:
356
+ if not hasattr(policy, "q_func_vars"):
357
+ policy.q_func_vars = policy.model.variables()
358
+
359
+ return minimize_and_clip(
360
+ optimizer,
361
+ loss,
362
+ var_list=policy.q_func_vars,
363
+ clip_val=policy.config["grad_clip"],
364
+ )
365
+
366
+
367
+ def build_q_stats(policy: Policy, batch) -> Dict[str, TensorType]:
368
+ return dict(
369
+ {
370
+ "cur_lr": tf.cast(policy.cur_lr, tf.float64),
371
+ },
372
+ **policy.q_loss.stats
373
+ )
374
+
375
+
376
+ def setup_mid_mixins(policy: Policy, obs_space, action_space, config) -> None:
377
+ LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
378
+ ComputeTDErrorMixin.__init__(policy)
379
+
380
+
381
+ def setup_late_mixins(
382
+ policy: Policy,
383
+ obs_space: gym.spaces.Space,
384
+ action_space: gym.spaces.Space,
385
+ config: AlgorithmConfigDict,
386
+ ) -> None:
387
+ TargetNetworkMixin.__init__(policy)
388
+
389
+
390
+ def compute_q_values(
391
+ policy: Policy,
392
+ model: ModelV2,
393
+ input_batch: SampleBatch,
394
+ state_batches=None,
395
+ seq_lens=None,
396
+ explore=None,
397
+ is_training: bool = False,
398
+ ):
399
+
400
+ config = policy.config
401
+
402
+ model_out, state = model(input_batch, state_batches or [], seq_lens)
403
+
404
+ if config["num_atoms"] > 1:
405
+ (
406
+ action_scores,
407
+ z,
408
+ support_logits_per_action,
409
+ logits,
410
+ dist,
411
+ ) = model.get_q_value_distributions(model_out)
412
+ else:
413
+ (action_scores, logits, dist) = model.get_q_value_distributions(model_out)
414
+
415
+ if config["dueling"]:
416
+ state_score = model.get_state_value(model_out)
417
+ if config["num_atoms"] > 1:
418
+ support_logits_per_action_mean = tf.reduce_mean(
419
+ support_logits_per_action, 1
420
+ )
421
+ support_logits_per_action_centered = (
422
+ support_logits_per_action
423
+ - tf.expand_dims(support_logits_per_action_mean, 1)
424
+ )
425
+ support_logits_per_action = (
426
+ tf.expand_dims(state_score, 1) + support_logits_per_action_centered
427
+ )
428
+ support_prob_per_action = tf.nn.softmax(logits=support_logits_per_action)
429
+ value = tf.reduce_sum(input_tensor=z * support_prob_per_action, axis=-1)
430
+ logits = support_logits_per_action
431
+ dist = support_prob_per_action
432
+ else:
433
+ action_scores_mean = reduce_mean_ignore_inf(action_scores, 1)
434
+ action_scores_centered = action_scores - tf.expand_dims(
435
+ action_scores_mean, 1
436
+ )
437
+ value = state_score + action_scores_centered
438
+ else:
439
+ value = action_scores
440
+
441
+ return value, logits, dist, state
442
+
443
+
444
+ def postprocess_nstep_and_prio(
445
+ policy: Policy, batch: SampleBatch, other_agent=None, episode=None
446
+ ) -> SampleBatch:
447
+ # N-step Q adjustments.
448
+ if policy.config["n_step"] > 1:
449
+ adjust_nstep(policy.config["n_step"], policy.config["gamma"], batch)
450
+
451
+ # Create dummy prio-weights (1.0) in case we don't have any in
452
+ # the batch.
453
+ if PRIO_WEIGHTS not in batch:
454
+ batch[PRIO_WEIGHTS] = np.ones_like(batch[SampleBatch.REWARDS])
455
+
456
+ # Prioritize on the worker side.
457
+ if batch.count > 0 and policy.config["replay_buffer_config"].get(
458
+ "worker_side_prioritization", False
459
+ ):
460
+ td_errors = policy.compute_td_error(
461
+ batch[SampleBatch.OBS],
462
+ batch[SampleBatch.ACTIONS],
463
+ batch[SampleBatch.REWARDS],
464
+ batch[SampleBatch.NEXT_OBS],
465
+ batch[SampleBatch.TERMINATEDS],
466
+ batch[PRIO_WEIGHTS],
467
+ )
468
+ # Retain compatibility with old-style Replay args
469
+ epsilon = policy.config.get("replay_buffer_config", {}).get(
470
+ "prioritized_replay_eps"
471
+ ) or policy.config.get("prioritized_replay_eps")
472
+ if epsilon is None:
473
+ raise ValueError("prioritized_replay_eps not defined in config.")
474
+
475
+ new_priorities = np.abs(convert_to_numpy(td_errors)) + epsilon
476
+ batch[PRIO_WEIGHTS] = new_priorities
477
+
478
+ return batch
479
+
480
+
481
+ DQNTFPolicy = build_tf_policy(
482
+ name="DQNTFPolicy",
483
+ get_default_config=lambda: ray.rllib.algorithms.dqn.dqn.DQNConfig(),
484
+ make_model=build_q_model,
485
+ action_distribution_fn=get_distribution_inputs_and_class,
486
+ loss_fn=build_q_losses,
487
+ stats_fn=build_q_stats,
488
+ postprocess_fn=postprocess_nstep_and_prio,
489
+ optimizer_fn=adam_optimizer,
490
+ compute_gradients_fn=clip_gradients,
491
+ extra_action_out_fn=lambda policy: {"q_values": policy.q_values},
492
+ extra_learn_fetches_fn=lambda policy: {"td_error": policy.q_loss.td_error},
493
+ before_loss_init=setup_mid_mixins,
494
+ after_init=setup_late_mixins,
495
+ mixins=[
496
+ TargetNetworkMixin,
497
+ ComputeTDErrorMixin,
498
+ LearningRateSchedule,
499
+ ],
500
+ )
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/dqn_torch_model.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """PyTorch model for DQN"""
2
+
3
+ from typing import Sequence
4
+ import gymnasium as gym
5
+ from ray.rllib.models.torch.misc import SlimFC
6
+ from ray.rllib.models.torch.modules.noisy_layer import NoisyLayer
7
+ from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
8
+ from ray.rllib.utils.framework import try_import_torch
9
+ from ray.rllib.utils.typing import ModelConfigDict
10
+
11
+ torch, nn = try_import_torch()
12
+
13
+
14
+ class DQNTorchModel(TorchModelV2, nn.Module):
15
+ """Extension of standard TorchModelV2 to provide dueling-Q functionality."""
16
+
17
+ def __init__(
18
+ self,
19
+ obs_space: gym.spaces.Space,
20
+ action_space: gym.spaces.Space,
21
+ num_outputs: int,
22
+ model_config: ModelConfigDict,
23
+ name: str,
24
+ *,
25
+ q_hiddens: Sequence[int] = (256,),
26
+ dueling: bool = False,
27
+ dueling_activation: str = "relu",
28
+ num_atoms: int = 1,
29
+ use_noisy: bool = False,
30
+ v_min: float = -10.0,
31
+ v_max: float = 10.0,
32
+ sigma0: float = 0.5,
33
+ # TODO(sven): Move `add_layer_norm` into ModelCatalog as
34
+ # generic option, then error if we use ParameterNoise as
35
+ # Exploration type and do not have any LayerNorm layers in
36
+ # the net.
37
+ add_layer_norm: bool = False
38
+ ):
39
+ """Initialize variables of this model.
40
+
41
+ Extra model kwargs:
42
+ q_hiddens (Sequence[int]): List of layer-sizes after(!) the
43
+ Advantages(A)/Value(V)-split. Hence, each of the A- and V-
44
+ branches will have this structure of Dense layers. To define
45
+ the NN before this A/V-split, use - as always -
46
+ config["model"]["fcnet_hiddens"].
47
+ dueling: Whether to build the advantage(A)/value(V) heads
48
+ for DDQN. If True, Q-values are calculated as:
49
+ Q = (A - mean[A]) + V. If False, raw NN output is interpreted
50
+ as Q-values.
51
+ dueling_activation: The activation to use for all dueling
52
+ layers (A- and V-branch). One of "relu", "tanh", "linear".
53
+ num_atoms: If >1, enables distributional DQN.
54
+ use_noisy: Use noisy layers.
55
+ v_min: Min value support for distributional DQN.
56
+ v_max: Max value support for distributional DQN.
57
+ sigma0 (float): Initial value of noisy layers.
58
+ add_layer_norm: Enable layer norm (for param noise).
59
+ """
60
+ nn.Module.__init__(self)
61
+ super(DQNTorchModel, self).__init__(
62
+ obs_space, action_space, num_outputs, model_config, name
63
+ )
64
+
65
+ self.dueling = dueling
66
+ self.num_atoms = num_atoms
67
+ self.v_min = v_min
68
+ self.v_max = v_max
69
+ self.sigma0 = sigma0
70
+ ins = num_outputs
71
+
72
+ advantage_module = nn.Sequential()
73
+ value_module = nn.Sequential()
74
+
75
+ # Dueling case: Build the shared (advantages and value) fc-network.
76
+ for i, n in enumerate(q_hiddens):
77
+ if use_noisy:
78
+ advantage_module.add_module(
79
+ "dueling_A_{}".format(i),
80
+ NoisyLayer(
81
+ ins, n, sigma0=self.sigma0, activation=dueling_activation
82
+ ),
83
+ )
84
+ value_module.add_module(
85
+ "dueling_V_{}".format(i),
86
+ NoisyLayer(
87
+ ins, n, sigma0=self.sigma0, activation=dueling_activation
88
+ ),
89
+ )
90
+ else:
91
+ advantage_module.add_module(
92
+ "dueling_A_{}".format(i),
93
+ SlimFC(ins, n, activation_fn=dueling_activation),
94
+ )
95
+ value_module.add_module(
96
+ "dueling_V_{}".format(i),
97
+ SlimFC(ins, n, activation_fn=dueling_activation),
98
+ )
99
+ # Add LayerNorm after each Dense.
100
+ if add_layer_norm:
101
+ advantage_module.add_module(
102
+ "LayerNorm_A_{}".format(i), nn.LayerNorm(n)
103
+ )
104
+ value_module.add_module("LayerNorm_V_{}".format(i), nn.LayerNorm(n))
105
+ ins = n
106
+
107
+ # Actual Advantages layer (nodes=num-actions).
108
+ if use_noisy:
109
+ advantage_module.add_module(
110
+ "A",
111
+ NoisyLayer(
112
+ ins, self.action_space.n * self.num_atoms, sigma0, activation=None
113
+ ),
114
+ )
115
+ elif q_hiddens:
116
+ advantage_module.add_module(
117
+ "A", SlimFC(ins, action_space.n * self.num_atoms, activation_fn=None)
118
+ )
119
+
120
+ self.advantage_module = advantage_module
121
+
122
+ # Value layer (nodes=1).
123
+ if self.dueling:
124
+ if use_noisy:
125
+ value_module.add_module(
126
+ "V", NoisyLayer(ins, self.num_atoms, sigma0, activation=None)
127
+ )
128
+ elif q_hiddens:
129
+ value_module.add_module(
130
+ "V", SlimFC(ins, self.num_atoms, activation_fn=None)
131
+ )
132
+ self.value_module = value_module
133
+
134
+ def get_q_value_distributions(self, model_out):
135
+ """Returns distributional values for Q(s, a) given a state embedding.
136
+
137
+ Override this in your custom model to customize the Q output head.
138
+
139
+ Args:
140
+ model_out: Embedding from the model layers.
141
+
142
+ Returns:
143
+ (action_scores, logits, dist) if num_atoms == 1, otherwise
144
+ (action_scores, z, support_logits_per_action, logits, dist)
145
+ """
146
+ action_scores = self.advantage_module(model_out)
147
+
148
+ if self.num_atoms > 1:
149
+ # Distributional Q-learning uses a discrete support z
150
+ # to represent the action value distribution
151
+ z = torch.arange(0.0, self.num_atoms, dtype=torch.float32).to(
152
+ action_scores.device
153
+ )
154
+ z = self.v_min + z * (self.v_max - self.v_min) / float(self.num_atoms - 1)
155
+
156
+ support_logits_per_action = torch.reshape(
157
+ action_scores, shape=(-1, self.action_space.n, self.num_atoms)
158
+ )
159
+ support_prob_per_action = nn.functional.softmax(
160
+ support_logits_per_action, dim=-1
161
+ )
162
+ action_scores = torch.sum(z * support_prob_per_action, dim=-1)
163
+ logits = support_logits_per_action
164
+ probs = support_prob_per_action
165
+ return action_scores, z, support_logits_per_action, logits, probs
166
+ else:
167
+ logits = torch.unsqueeze(torch.ones_like(action_scores), -1)
168
+ return action_scores, logits, logits
169
+
170
+ def get_state_value(self, model_out):
171
+ """Returns the state value prediction for the given state embedding."""
172
+
173
+ return self.value_module(model_out)
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__init__.py ADDED
File without changes
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/dqn_rainbow_torch_learner.cpython-310.pyc ADDED
Binary file (4.49 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/__pycache__/dqn_rainbow_torch_rl_module.cpython-310.pyc ADDED
Binary file (7.51 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_learner.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
+
3
+ from ray.rllib.algorithms.dqn.dqn import DQNConfig
4
+ from ray.rllib.algorithms.dqn.dqn_rainbow_learner import (
5
+ ATOMS,
6
+ DQNRainbowLearner,
7
+ QF_LOSS_KEY,
8
+ QF_LOGITS,
9
+ QF_MEAN_KEY,
10
+ QF_MAX_KEY,
11
+ QF_MIN_KEY,
12
+ QF_NEXT_PREDS,
13
+ QF_TARGET_NEXT_PREDS,
14
+ QF_TARGET_NEXT_PROBS,
15
+ QF_PREDS,
16
+ QF_PROBS,
17
+ TD_ERROR_MEAN_KEY,
18
+ )
19
+ from ray.rllib.core.columns import Columns
20
+ from ray.rllib.core.learner.torch.torch_learner import TorchLearner
21
+ from ray.rllib.utils.annotations import override
22
+ from ray.rllib.utils.framework import try_import_torch
23
+ from ray.rllib.utils.metrics import TD_ERROR_KEY
24
+ from ray.rllib.utils.typing import ModuleID, TensorType
25
+
26
+
27
+ torch, nn = try_import_torch()
28
+
29
+
30
+ class DQNRainbowTorchLearner(DQNRainbowLearner, TorchLearner):
31
+ """Implements `torch`-specific DQN Rainbow loss logic on top of `DQNRainbowLearner`
32
+
33
+ This ' Learner' class implements the loss in its
34
+ `self.compute_loss_for_module()` method.
35
+ """
36
+
37
+ @override(TorchLearner)
38
+ def compute_loss_for_module(
39
+ self,
40
+ *,
41
+ module_id: ModuleID,
42
+ config: DQNConfig,
43
+ batch: Dict,
44
+ fwd_out: Dict[str, TensorType]
45
+ ) -> TensorType:
46
+
47
+ q_curr = fwd_out[QF_PREDS]
48
+ q_target_next = fwd_out[QF_TARGET_NEXT_PREDS]
49
+
50
+ # Get the Q-values for the selected actions in the rollout.
51
+ # TODO (simon, sven): Check, if we can use `gather` with a complex action
52
+ # space - we might need the one_hot_selection. Also test performance.
53
+ q_selected = torch.nan_to_num(
54
+ torch.gather(
55
+ q_curr,
56
+ dim=1,
57
+ index=batch[Columns.ACTIONS].view(-1, 1).expand(-1, 1).long(),
58
+ ),
59
+ neginf=0.0,
60
+ ).squeeze()
61
+
62
+ # Use double Q learning.
63
+ if config.double_q:
64
+ # Then we evaluate the target Q-function at the best action (greedy action)
65
+ # over the online Q-function.
66
+ # Mark the best online Q-value of the next state.
67
+ q_next_best_idx = (
68
+ torch.argmax(fwd_out[QF_NEXT_PREDS], dim=1).unsqueeze(dim=-1).long()
69
+ )
70
+ # Get the Q-value of the target network at maximum of the online network
71
+ # (bootstrap action).
72
+ q_next_best = torch.nan_to_num(
73
+ torch.gather(q_target_next, dim=1, index=q_next_best_idx),
74
+ neginf=0.0,
75
+ ).squeeze()
76
+ else:
77
+ # Mark the maximum Q-value(s).
78
+ q_next_best_idx = (
79
+ torch.argmax(q_target_next, dim=1).unsqueeze(dim=-1).long()
80
+ )
81
+ # Get the maximum Q-value(s).
82
+ q_next_best = torch.nan_to_num(
83
+ torch.gather(q_target_next, dim=1, index=q_next_best_idx),
84
+ neginf=0.0,
85
+ ).squeeze()
86
+
87
+ # If we learn a Q-distribution.
88
+ if config.num_atoms > 1:
89
+ # Extract the Q-logits evaluated at the selected actions.
90
+ # (Note, `torch.gather` should be faster than multiplication
91
+ # with a one-hot tensor.)
92
+ # (32, 2, 10) -> (32, 10)
93
+ q_logits_selected = torch.gather(
94
+ fwd_out[QF_LOGITS],
95
+ dim=1,
96
+ # Note, the Q-logits are of shape (B, action_space.n, num_atoms)
97
+ # while the actions have shape (B, 1). We reshape actions to
98
+ # (B, 1, num_atoms).
99
+ index=batch[Columns.ACTIONS]
100
+ .view(-1, 1, 1)
101
+ .expand(-1, 1, config.num_atoms)
102
+ .long(),
103
+ ).squeeze(dim=1)
104
+ # Get the probabilies for the maximum Q-value(s).
105
+ q_probs_next_best = torch.gather(
106
+ fwd_out[QF_TARGET_NEXT_PROBS],
107
+ dim=1,
108
+ # Change the view and then expand to get to the dimensions
109
+ # of the probabilities (dims 0 and 2, 1 should be reduced
110
+ # from 2 -> 1).
111
+ index=q_next_best_idx.view(-1, 1, 1).expand(-1, 1, config.num_atoms),
112
+ ).squeeze(dim=1)
113
+
114
+ # For distributional Q-learning we use an entropy loss.
115
+
116
+ # Extract the support grid for the Q distribution.
117
+ z = fwd_out[ATOMS]
118
+ # TODO (simon): Enable computing on GPU.
119
+ # (batch_size, 1) * (1, num_atoms) = (batch_size, num_atoms)s
120
+ r_tau = torch.clamp(
121
+ batch[Columns.REWARDS].unsqueeze(dim=-1)
122
+ + (
123
+ config.gamma ** batch["n_step"]
124
+ * (1.0 - batch[Columns.TERMINATEDS].float())
125
+ ).unsqueeze(dim=-1)
126
+ * z,
127
+ config.v_min,
128
+ config.v_max,
129
+ ).squeeze(dim=1)
130
+ # (32, 10)
131
+ b = (r_tau - config.v_min) / (
132
+ (config.v_max - config.v_min) / float(config.num_atoms - 1.0)
133
+ )
134
+ lower_bound = torch.floor(b)
135
+ upper_bound = torch.ceil(b)
136
+
137
+ floor_equal_ceil = ((upper_bound - lower_bound) < 0.5).float()
138
+
139
+ # (B, num_atoms, num_atoms).
140
+ lower_projection = nn.functional.one_hot(
141
+ lower_bound.long(), config.num_atoms
142
+ )
143
+ upper_projection = nn.functional.one_hot(
144
+ upper_bound.long(), config.num_atoms
145
+ )
146
+ # (32, 10)
147
+ ml_delta = q_probs_next_best * (upper_bound - b + floor_equal_ceil)
148
+ mu_delta = q_probs_next_best * (b - lower_bound)
149
+ # (32, 10)
150
+ ml_delta = torch.sum(lower_projection * ml_delta.unsqueeze(dim=-1), dim=1)
151
+ mu_delta = torch.sum(upper_projection * mu_delta.unsqueeze(dim=-1), dim=1)
152
+ # We do not want to propagate through the distributional targets.
153
+ # (32, 10)
154
+ m = (ml_delta + mu_delta).detach()
155
+
156
+ # The Rainbow paper claims to use the KL-divergence loss. This is identical
157
+ # to using the cross-entropy (differs only by entropy which is constant)
158
+ # when optimizing by the gradient (the gradient is identical).
159
+ td_error = nn.CrossEntropyLoss(reduction="none")(q_logits_selected, m)
160
+ # Compute the weighted loss (importance sampling weights).
161
+ total_loss = torch.mean(batch["weights"] * td_error)
162
+ else:
163
+ # Masked all Q-values with terminated next states in the targets.
164
+ q_next_best_masked = (
165
+ 1.0 - batch[Columns.TERMINATEDS].float()
166
+ ) * q_next_best
167
+
168
+ # Compute the RHS of the Bellman equation.
169
+ # Detach this node from the computation graph as we do not want to
170
+ # backpropagate through the target network when optimizing the Q loss.
171
+ q_selected_target = (
172
+ batch[Columns.REWARDS]
173
+ + (config.gamma ** batch["n_step"]) * q_next_best_masked
174
+ ).detach()
175
+
176
+ # Choose the requested loss function. Note, in case of the Huber loss
177
+ # we fall back to the default of `delta=1.0`.
178
+ loss_fn = nn.HuberLoss if config.td_error_loss_fn == "huber" else nn.MSELoss
179
+ # Compute the TD error.
180
+ td_error = torch.abs(q_selected - q_selected_target)
181
+ # Compute the weighted loss (importance sampling weights).
182
+ total_loss = torch.mean(
183
+ batch["weights"]
184
+ * loss_fn(reduction="none")(q_selected, q_selected_target)
185
+ )
186
+
187
+ # Log the TD-error with reduce=None, such that - in case we have n parallel
188
+ # Learners - we will re-concatenate the produced TD-error tensors to yield
189
+ # a 1:1 representation of the original batch.
190
+ self.metrics.log_value(
191
+ key=(module_id, TD_ERROR_KEY),
192
+ value=td_error,
193
+ reduce=None,
194
+ clear_on_reduce=True,
195
+ )
196
+ # Log other important loss stats (reduce=mean (default), but with window=1
197
+ # in order to keep them history free).
198
+ self.metrics.log_dict(
199
+ {
200
+ QF_LOSS_KEY: total_loss,
201
+ QF_MEAN_KEY: torch.mean(q_selected),
202
+ QF_MAX_KEY: torch.max(q_selected),
203
+ QF_MIN_KEY: torch.min(q_selected),
204
+ TD_ERROR_MEAN_KEY: torch.mean(td_error),
205
+ },
206
+ key=module_id,
207
+ window=1, # <- single items (should not be mean/ema-reduced over time).
208
+ )
209
+ # If we learn a Q-value distribution store the support and average
210
+ # probabilities.
211
+ if config.num_atoms > 1:
212
+ # Log important loss stats.
213
+ self.metrics.log_dict(
214
+ {
215
+ ATOMS: z,
216
+ # The absolute difference in expectation between the actions
217
+ # should (at least mildly) rise.
218
+ "expectations_abs_diff": torch.mean(
219
+ torch.abs(
220
+ torch.diff(
221
+ torch.sum(fwd_out[QF_PROBS].mean(dim=0) * z, dim=1)
222
+ ).mean(dim=0)
223
+ )
224
+ ),
225
+ # The total variation distance should measure the distance between
226
+ # return distributions of different actions. This should (at least
227
+ # mildly) increase during training when the agent differentiates
228
+ # more between actions.
229
+ "dist_total_variation_dist": torch.diff(
230
+ fwd_out[QF_PROBS].mean(dim=0), dim=0
231
+ )
232
+ .abs()
233
+ .sum()
234
+ * 0.5,
235
+ # The maximum distance between the action distributions. This metric
236
+ # should increase over the course of training.
237
+ "dist_max_abs_distance": torch.max(
238
+ torch.diff(fwd_out[QF_PROBS].mean(dim=0), dim=0).abs()
239
+ ),
240
+ # Mean shannon entropy of action distributions. This should decrease
241
+ # over the course of training.
242
+ "action_dist_mean_entropy": torch.mean(
243
+ (
244
+ fwd_out[QF_PROBS].mean(dim=0)
245
+ * torch.log(fwd_out[QF_PROBS].mean(dim=0))
246
+ ).sum(dim=1),
247
+ dim=0,
248
+ ),
249
+ },
250
+ key=module_id,
251
+ window=1, # <- single items (should not be mean/ema-reduced over time).
252
+ )
253
+
254
+ return total_loss
255
+
256
+ def _reset_noise(self) -> None:
257
+ # Reset the noise for all noisy modules, if necessary.
258
+ self.module.foreach_module(
259
+ lambda mid, module: (
260
+ module._reset_noise(target=True)
261
+ if hasattr(module, "_reset_noise")
262
+ else None
263
+ )
264
+ )
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_noisy_net.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Dict, List, Optional, Union
2
+
3
+ from ray.rllib.algorithms.dqn.dqn_rainbow_noisy_net_configs import (
4
+ NoisyMLPEncoderConfig,
5
+ NoisyMLPHeadConfig,
6
+ )
7
+ from ray.rllib.algorithms.dqn.torch.torch_noisy_linear import NoisyLinear
8
+ from ray.rllib.core.columns import Columns
9
+ from ray.rllib.core.models.base import Encoder, ENCODER_OUT, Model
10
+ from ray.rllib.core.models.torch.base import TorchModel
11
+ from ray.rllib.models.utils import get_activation_fn, get_initializer_fn
12
+ from ray.rllib.utils.annotations import override
13
+ from ray.rllib.utils.framework import try_import_torch
14
+
15
+ torch, nn = try_import_torch()
16
+
17
+
18
+ class TorchNoisyMLPEncoder(TorchModel, Encoder):
19
+ def __init__(self, config: NoisyMLPEncoderConfig) -> None:
20
+ TorchModel.__init__(self, config)
21
+ Encoder.__init__(self, config)
22
+
23
+ # Create the noisy network.
24
+ self.net = TorchNoisyMLP(
25
+ input_dim=config.input_dims[0],
26
+ hidden_layer_dims=config.hidden_layer_dims,
27
+ hidden_layer_activation=config.hidden_layer_activation,
28
+ hidden_layer_use_layernorm=config.hidden_layer_use_layernorm,
29
+ hidden_layer_use_bias=config.hidden_layer_use_bias,
30
+ hidden_layer_weights_initializer=config.hidden_layer_weights_initializer,
31
+ hidden_layer_weights_initializer_config=(
32
+ config.hidden_layer_weights_initializer_config
33
+ ),
34
+ hidden_layer_bias_initializer=config.hidden_layer_bias_initializer,
35
+ hidden_layer_bias_initializer_config=(
36
+ config.hidden_layer_bias_initializer_config
37
+ ),
38
+ output_dim=config.output_layer_dim,
39
+ output_activation=config.output_layer_activation,
40
+ output_use_bias=config.output_layer_use_bias,
41
+ output_weights_initializer=config.output_layer_weights_initializer,
42
+ output_weights_initializer_config=(
43
+ config.output_layer_weights_initializer_config
44
+ ),
45
+ output_bias_initializer=config.output_layer_bias_initializer,
46
+ output_bias_initializer_config=config.output_layer_bias_initializer_config,
47
+ # Note, this is the only additional parameter in regard to a regular MLP.
48
+ std_init=config.std_init,
49
+ )
50
+
51
+ @override(Model)
52
+ def _forward(self, inputs: dict, **kwargs) -> dict:
53
+ return {ENCODER_OUT: self.net(inputs[Columns.OBS])}
54
+
55
+ def _reset_noise(self):
56
+ # Reset the noise in the complete network.
57
+ self.net._reset_noise()
58
+
59
+
60
+ class TorchNoisyMLPHead(TorchModel):
61
+ def __init__(self, config: NoisyMLPHeadConfig) -> None:
62
+ super().__init__(config)
63
+
64
+ self.net = TorchNoisyMLP(
65
+ input_dim=config.input_dims[0],
66
+ hidden_layer_dims=config.hidden_layer_dims,
67
+ hidden_layer_activation=config.hidden_layer_activation,
68
+ hidden_layer_use_layernorm=config.hidden_layer_use_layernorm,
69
+ hidden_layer_use_bias=config.hidden_layer_use_bias,
70
+ hidden_layer_weights_initializer=config.hidden_layer_weights_initializer,
71
+ hidden_layer_weights_initializer_config=(
72
+ config.hidden_layer_weights_initializer_config
73
+ ),
74
+ hidden_layer_bias_initializer=config.hidden_layer_bias_initializer,
75
+ hidden_layer_bias_initializer_config=(
76
+ config.hidden_layer_bias_initializer_config
77
+ ),
78
+ output_dim=config.output_layer_dim,
79
+ output_activation=config.output_layer_activation,
80
+ output_use_bias=config.output_layer_use_bias,
81
+ output_weights_initializer=config.output_layer_weights_initializer,
82
+ output_weights_initializer_config=(
83
+ config.output_layer_weights_initializer_config
84
+ ),
85
+ output_bias_initializer=config.output_layer_bias_initializer,
86
+ output_bias_initializer_config=config.output_layer_bias_initializer_config,
87
+ # Note, this is the only additional parameter in regard to a regular MLP.
88
+ std_init=config.std_init,
89
+ )
90
+
91
+ @override(Model)
92
+ def _forward(self, inputs: torch.Tensor, **kwargs) -> torch.Tensor:
93
+ return self.net(inputs)
94
+
95
+ def _reset_noise(self) -> None:
96
+ # Reset the noise in the complete network.
97
+ self.net._reset_noise()
98
+
99
+
100
+ class TorchNoisyMLP(nn.Module):
101
+ """A multi-layer perceptron with N dense layers.
102
+
103
+ All layers (except for an optional additional extra output layer) share the same
104
+ activation function, bias setup (use bias or not), and LayerNorm setup
105
+ (use layer normalization or not).
106
+
107
+ If `output_dim` (int) is not None, an additional, extra output dense layer is added,
108
+ which might have its own activation function (e.g. "linear"). However, the output
109
+ layer does NOT use layer normalization.
110
+ """
111
+
112
+ def __init__(
113
+ self,
114
+ *,
115
+ input_dim: int,
116
+ hidden_layer_dims: List[int],
117
+ hidden_layer_activation: Union[str, Callable] = "relu",
118
+ hidden_layer_use_bias: bool = True,
119
+ hidden_layer_use_layernorm: bool = False,
120
+ hidden_layer_weights_initializer: Optional[Union[str, Callable]] = None,
121
+ hidden_layer_weights_initializer_config: Optional[Union[str, Callable]] = None,
122
+ hidden_layer_bias_initializer: Optional[Union[str, Callable]] = None,
123
+ hidden_layer_bias_initializer_config: Optional[Dict] = None,
124
+ output_dim: Optional[int] = None,
125
+ output_use_bias: bool = True,
126
+ output_activation: Union[str, Callable] = "linear",
127
+ output_weights_initializer: Optional[Union[str, Callable]] = None,
128
+ output_weights_initializer_config: Optional[Dict] = None,
129
+ output_bias_initializer: Optional[Union[str, Callable]] = None,
130
+ output_bias_initializer_config: Optional[Dict] = None,
131
+ std_init: Optional[float] = 0.1,
132
+ ):
133
+ """Initialize a TorchMLP object.
134
+
135
+ Args:
136
+ input_dim: The input dimension of the network. Must not be None.
137
+ hidden_layer_dims: The sizes of the hidden layers. If an empty list, only a
138
+ single layer will be built of size `output_dim`.
139
+ hidden_layer_use_layernorm: Whether to insert a LayerNormalization
140
+ functionality in between each hidden layer's output and its activation.
141
+ hidden_layer_use_bias: Whether to use bias on all dense layers (excluding
142
+ the possible separate output layer).
143
+ hidden_layer_activation: The activation function to use after each layer
144
+ (except for the output). Either a torch.nn.[activation fn] callable or
145
+ the name thereof, or an RLlib recognized activation name,
146
+ e.g. "ReLU", "relu", "tanh", "SiLU", or "linear".
147
+ hidden_layer_weights_initializer: The initializer function or class to use
148
+ forweights initialization in the hidden layers. If `None` the default
149
+ initializer of the respective dense layer is used. Note, only the
150
+ in-place initializers, i.e. ending with an underscore "_" are allowed.
151
+ hidden_layer_weights_initializer_config: Configuration to pass into the
152
+ initializer defined in `hidden_layer_weights_initializer`.
153
+ hidden_layer_bias_initializer: The initializer function or class to use for
154
+ bias initialization in the hidden layers. If `None` the default
155
+ initializer of the respective dense layer is used. Note, only the
156
+ in-place initializers, i.e. ending with an underscore "_" are allowed.
157
+ hidden_layer_bias_initializer_config: Configuration to pass into the
158
+ initializer defined in `hidden_layer_bias_initializer`.
159
+ output_dim: The output dimension of the network. If None, no specific output
160
+ layer will be added and the last layer in the stack will have
161
+ size=`hidden_layer_dims[-1]`.
162
+ output_use_bias: Whether to use bias on the separate output layer,
163
+ if any.
164
+ output_activation: The activation function to use for the output layer
165
+ (if any). Either a torch.nn.[activation fn] callable or
166
+ the name thereof, or an RLlib recognized activation name,
167
+ e.g. "ReLU", "relu", "tanh", "SiLU", or "linear".
168
+ output_layer_weights_initializer: The initializer function or class to use
169
+ for weights initialization in the output layers. If `None` the default
170
+ initializer of the respective dense layer is used. Note, only the
171
+ in-place initializers, i.e. ending with an underscore "_" are allowed.
172
+ output_layer_weights_initializer_config: Configuration to pass into the
173
+ initializer defined in `output_layer_weights_initializer`.
174
+ output_layer_bias_initializer: The initializer function or class to use for
175
+ bias initialization in the output layers. If `None` the default
176
+ initializer of the respective dense layer is used. Note, only the
177
+ in-place initializers, i.e. ending with an underscore "_" are allowed.
178
+ output_layer_bias_initializer_config: Configuration to pass into the
179
+ initializer defined in `output_layer_bias_initializer`.
180
+ std_init: Initial value of the Gaussian standard deviation before
181
+ optimization. Defaults to `0.1`.
182
+ """
183
+ super().__init__()
184
+ assert input_dim > 0
185
+
186
+ self.input_dim = input_dim
187
+
188
+ hidden_activation = get_activation_fn(
189
+ hidden_layer_activation, framework="torch"
190
+ )
191
+ hidden_weights_initializer = get_initializer_fn(
192
+ hidden_layer_weights_initializer, framework="torch"
193
+ )
194
+ hidden_bias_initializer = get_initializer_fn(
195
+ hidden_layer_bias_initializer, framework="torch"
196
+ )
197
+ output_weights_initializer = get_initializer_fn(
198
+ output_weights_initializer, framework="torch"
199
+ )
200
+ output_bias_initializer = get_initializer_fn(
201
+ output_bias_initializer, framework="torch"
202
+ )
203
+
204
+ layers = []
205
+
206
+ dims = (
207
+ [self.input_dim]
208
+ + list(hidden_layer_dims)
209
+ + ([output_dim] if output_dim else [])
210
+ )
211
+ for i in range(0, len(dims) - 1):
212
+ # Whether we are already processing the last (special) output layer.
213
+ is_output_layer = output_dim is not None and i == len(dims) - 2
214
+
215
+ layer = NoisyLinear(
216
+ dims[i],
217
+ dims[i + 1],
218
+ bias=output_use_bias if is_output_layer else hidden_layer_use_bias,
219
+ std_init=std_init,
220
+ )
221
+
222
+ # Initialize layers, if necessary.
223
+ if is_output_layer:
224
+ # Initialize output layer weigths if necessary.
225
+ if output_weights_initializer:
226
+ output_weights_initializer(
227
+ layer.weight, **output_weights_initializer_config or {}
228
+ )
229
+ # Initialize output layer bias if necessary.
230
+ if output_bias_initializer:
231
+ output_bias_initializer(
232
+ layer.bias, **output_bias_initializer_config or {}
233
+ )
234
+ # Must be hidden.
235
+ else:
236
+ # Initialize hidden layer weights if necessary.
237
+ if hidden_layer_weights_initializer:
238
+ hidden_weights_initializer(
239
+ layer.weight, **hidden_layer_weights_initializer_config or {}
240
+ )
241
+ # Initialize hidden layer bias if necessary.
242
+ if hidden_layer_bias_initializer:
243
+ hidden_bias_initializer(
244
+ layer.bias, **hidden_layer_bias_initializer_config or {}
245
+ )
246
+
247
+ layers.append(layer)
248
+
249
+ # We are still in the hidden layer section: Possibly add layernorm and
250
+ # hidden activation.
251
+ if not is_output_layer:
252
+ # Insert a layer normalization in between layer's output and
253
+ # the activation.
254
+ if hidden_layer_use_layernorm:
255
+ layers.append(nn.LayerNorm(dims[i + 1]))
256
+ # Add the activation function.
257
+ if hidden_activation is not None:
258
+ layers.append(hidden_activation())
259
+
260
+ # Add output layer's (if any) activation.
261
+ output_activation = get_activation_fn(output_activation, framework="torch")
262
+ if output_dim is not None and output_activation is not None:
263
+ layers.append(output_activation())
264
+
265
+ self.mlp = nn.Sequential(*layers)
266
+
267
+ self.expected_input_dtype = torch.float32
268
+
269
+ def forward(self, x):
270
+ return self.mlp(x.type(self.expected_input_dtype))
271
+
272
+ def _reset_noise(self):
273
+ # Reset the noise for all modules (layers).
274
+ for module in self.modules():
275
+ if hasattr(module, "reset_noise"):
276
+ module.reset_noise()
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/dqn_rainbow_torch_rl_module.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Union
2
+
3
+ from ray.rllib.algorithms.dqn.dqn_rainbow_rl_module import (
4
+ DQNRainbowRLModule,
5
+ ATOMS,
6
+ QF_LOGITS,
7
+ QF_NEXT_PREDS,
8
+ QF_PREDS,
9
+ QF_PROBS,
10
+ QF_TARGET_NEXT_PREDS,
11
+ QF_TARGET_NEXT_PROBS,
12
+ )
13
+ from ray.rllib.algorithms.dqn.torch.dqn_rainbow_torch_noisy_net import (
14
+ TorchNoisyMLPEncoder,
15
+ )
16
+ from ray.rllib.core.columns import Columns
17
+ from ray.rllib.core.models.base import Encoder, ENCODER_OUT, Model
18
+ from ray.rllib.core.rl_module.torch.torch_rl_module import TorchRLModule
19
+ from ray.rllib.core.rl_module.rl_module import RLModule
20
+ from ray.rllib.utils.annotations import override
21
+ from ray.rllib.utils.framework import try_import_torch
22
+ from ray.rllib.utils.typing import TensorType, TensorStructType
23
+
24
+ torch, nn = try_import_torch()
25
+
26
+
27
+ class DQNRainbowTorchRLModule(TorchRLModule, DQNRainbowRLModule):
28
+ framework: str = "torch"
29
+
30
+ @override(DQNRainbowRLModule)
31
+ def setup(self):
32
+ super().setup()
33
+
34
+ # If we use a noisy encoder. Note, only if the observation
35
+ # space is a flat space we can use a noisy encoder.
36
+ self.uses_noisy_encoder = isinstance(self.encoder, TorchNoisyMLPEncoder)
37
+
38
+ @override(RLModule)
39
+ def _forward_inference(self, batch: Dict[str, TensorType]) -> Dict[str, TensorType]:
40
+ output = {}
41
+
42
+ # Set the module into evaluation mode, if needed.
43
+ if self.uses_noisy and self.training:
44
+ # This sets the weigths and bias to their constant version.
45
+ self.eval()
46
+
47
+ # Q-network forward pass.
48
+ qf_outs = self._qf(batch)
49
+
50
+ # Get action distribution.
51
+ action_dist_cls = self.get_exploration_action_dist_cls()
52
+ action_dist = action_dist_cls.from_logits(qf_outs[QF_PREDS])
53
+ # Note, the deterministic version of the categorical distribution
54
+ # outputs directly the `argmax` of the logits.
55
+ exploit_actions = action_dist.to_deterministic().sample()
56
+
57
+ # In inference, we only need the exploitation actions.
58
+ output[Columns.ACTIONS] = exploit_actions
59
+
60
+ return output
61
+
62
+ @override(RLModule)
63
+ def _forward_exploration(
64
+ self, batch: Dict[str, TensorType], t: int
65
+ ) -> Dict[str, TensorType]:
66
+ output = {}
67
+
68
+ # Resample the noise for the noisy layers, if needed.
69
+ if self.uses_noisy:
70
+ # We want to resample the noise everytime we step.
71
+ self._reset_noise(target=False)
72
+ if not self.training:
73
+ # Set the module into training mode. This sets
74
+ # the weigths and bias to their noisy version.
75
+ self.train(True)
76
+
77
+ # Q-network forward pass.
78
+ qf_outs = self._qf(batch)
79
+
80
+ # Get action distribution.
81
+ action_dist_cls = self.get_exploration_action_dist_cls()
82
+ action_dist = action_dist_cls.from_logits(qf_outs[QF_PREDS])
83
+ # Note, the deterministic version of the categorical distribution
84
+ # outputs directly the `argmax` of the logits.
85
+ exploit_actions = action_dist.to_deterministic().sample()
86
+
87
+ # In case of noisy networks the parameter noise is sufficient for
88
+ # variation in exploration.
89
+ if self.uses_noisy:
90
+ # Use the exploitation action (coming from the noisy network).
91
+ output[Columns.ACTIONS] = exploit_actions
92
+ # Otherwise we need epsilon greedy to support exploration.
93
+ else:
94
+ # TODO (simon): Implement sampling for nested spaces.
95
+ # Update scheduler.
96
+ self.epsilon_schedule.update(t)
97
+ # Get the actual epsilon,
98
+ epsilon = self.epsilon_schedule.get_current_value()
99
+ # Apply epsilon-greedy exploration.
100
+ B = qf_outs[QF_PREDS].shape[0]
101
+ random_actions = torch.squeeze(
102
+ torch.multinomial(
103
+ (torch.nan_to_num(qf_outs[QF_PREDS], neginf=0.0) != 0.0).float(),
104
+ num_samples=1,
105
+ ),
106
+ dim=1,
107
+ )
108
+ output[Columns.ACTIONS] = torch.where(
109
+ torch.rand((B,)) < epsilon,
110
+ random_actions,
111
+ exploit_actions,
112
+ )
113
+
114
+ return output
115
+
116
+ @override(RLModule)
117
+ def _forward_train(
118
+ self, batch: Dict[str, TensorType]
119
+ ) -> Dict[str, TensorStructType]:
120
+ if self.inference_only:
121
+ raise RuntimeError(
122
+ "Trying to train a module that is not a learner module. Set the "
123
+ "flag `inference_only=False` when building the module."
124
+ )
125
+ output = {}
126
+
127
+ # Set module into training mode.
128
+ if self.uses_noisy and not self.training:
129
+ # This sets the weigths and bias to their noisy version.
130
+ self.train(True)
131
+
132
+ # If we use a double-Q setup.
133
+ if self.uses_double_q:
134
+ # Then we need to make a single forward pass with both,
135
+ # current and next observations.
136
+ batch_base = {
137
+ Columns.OBS: torch.concat(
138
+ [batch[Columns.OBS], batch[Columns.NEXT_OBS]], dim=0
139
+ ),
140
+ }
141
+ # Otherwise we can just use the current observations.
142
+ else:
143
+ batch_base = {Columns.OBS: batch[Columns.OBS]}
144
+ batch_target = {Columns.OBS: batch[Columns.NEXT_OBS]}
145
+
146
+ # Q-network forward passes.
147
+ qf_outs = self._qf(batch_base)
148
+ if self.uses_double_q:
149
+ output[QF_PREDS], output[QF_NEXT_PREDS] = torch.chunk(
150
+ qf_outs[QF_PREDS], chunks=2, dim=0
151
+ )
152
+ else:
153
+ output[QF_PREDS] = qf_outs[QF_PREDS]
154
+ # The target Q-values for the next observations.
155
+ qf_target_next_outs = self.forward_target(batch_target)
156
+ output[QF_TARGET_NEXT_PREDS] = qf_target_next_outs[QF_PREDS]
157
+ # We are learning a Q-value distribution.
158
+ if self.num_atoms > 1:
159
+ # Add distribution artefacts to the output.
160
+ # Distribution support.
161
+ output[ATOMS] = qf_target_next_outs[ATOMS]
162
+ # Original logits from the Q-head.
163
+ output[QF_LOGITS] = qf_outs[QF_LOGITS]
164
+ # Probabilities of the Q-value distribution of the current state.
165
+ output[QF_PROBS] = qf_outs[QF_PROBS]
166
+ # Probabilities of the target Q-value distribution of the next state.
167
+ output[QF_TARGET_NEXT_PROBS] = qf_target_next_outs[QF_PROBS]
168
+
169
+ return output
170
+
171
+ @override(DQNRainbowRLModule)
172
+ def _af_dist(self, batch: Dict[str, TensorType]) -> Dict[str, TensorType]:
173
+ """Compute the advantage distribution.
174
+
175
+ Note this distribution is identical to the Q-distribution in
176
+ case no dueling architecture is used.
177
+
178
+ Args:
179
+ batch: A dictionary containing a tensor with the outputs of the
180
+ forward pass of the Q-head or advantage stream head.
181
+
182
+ Returns:
183
+ A `dict` containing the support of the discrete distribution for
184
+ either Q-values or advantages (in case of a dueling architecture),
185
+ ("atoms"), the logits per action and atom and the probabilities
186
+ of the discrete distribution (per action and atom of the support).
187
+ """
188
+ output = {}
189
+ # Distributional Q-learning uses a discrete support `z`
190
+ # to represent the action value distribution.
191
+ # TODO (simon): Check, if we still need here the device for torch.
192
+ z = torch.arange(0.0, self.num_atoms, dtype=torch.float32).to(
193
+ batch.device,
194
+ )
195
+ # Rescale the support.
196
+ z = self.v_min + z * (self.v_max - self.v_min) / float(self.num_atoms - 1)
197
+ # Reshape the action values.
198
+ # NOTE: Handcrafted action shape.
199
+ logits_per_action_per_atom = torch.reshape(
200
+ batch, shape=(-1, self.action_space.n, self.num_atoms)
201
+ )
202
+ # Calculate the probability for each action value atom. Note,
203
+ # the sum along action value atoms of a single action value
204
+ # must sum to one.
205
+ prob_per_action_per_atom = nn.functional.softmax(
206
+ logits_per_action_per_atom,
207
+ dim=-1,
208
+ )
209
+ # Compute expected action value by weighted sum.
210
+ output[ATOMS] = z
211
+ output["logits"] = logits_per_action_per_atom
212
+ output["probs"] = prob_per_action_per_atom
213
+
214
+ return output
215
+
216
+ # TODO (simon): Test, if providing the function with a `return_probs`
217
+ # improves performance significantly.
218
+ @override(DQNRainbowRLModule)
219
+ def _qf_forward_helper(
220
+ self,
221
+ batch: Dict[str, TensorType],
222
+ encoder: Encoder,
223
+ head: Union[Model, Dict[str, Model]],
224
+ ) -> Dict[str, TensorType]:
225
+ """Computes Q-values.
226
+
227
+ This is a helper function that takes care of all different cases,
228
+ i.e. if we use a dueling architecture or not and if we use distributional
229
+ Q-learning or not.
230
+
231
+ Args:
232
+ batch: The batch received in the forward pass.
233
+ encoder: The encoder network to use. Here we have a single encoder
234
+ for all heads (Q or advantages and value in case of a dueling
235
+ architecture).
236
+ head: Either a head model or a dictionary of head model (dueling
237
+ architecture) containing advantage and value stream heads.
238
+
239
+ Returns:
240
+ In case of expectation learning the Q-value predictions ("qf_preds")
241
+ and in case of distributional Q-learning in addition to the predictions
242
+ the atoms ("atoms"), the Q-value predictions ("qf_preds"), the Q-logits
243
+ ("qf_logits") and the probabilities for the support atoms ("qf_probs").
244
+ """
245
+ output = {}
246
+
247
+ # Encoder forward pass.
248
+ encoder_outs = encoder(batch)
249
+
250
+ # Do we have a dueling architecture.
251
+ if self.uses_dueling:
252
+ # Head forward passes for advantage and value stream.
253
+ qf_outs = head["af"](encoder_outs[ENCODER_OUT])
254
+ vf_outs = head["vf"](encoder_outs[ENCODER_OUT])
255
+ # We learn a Q-value distribution.
256
+ if self.num_atoms > 1:
257
+ # Compute the advantage stream distribution.
258
+ af_dist_output = self._af_dist(qf_outs)
259
+ # Center the advantage stream distribution.
260
+ centered_af_logits = af_dist_output["logits"] - af_dist_output[
261
+ "logits"
262
+ ].mean(dim=1, keepdim=True)
263
+ # Calculate the Q-value distribution by adding advantage and
264
+ # value stream.
265
+ qf_logits = centered_af_logits + vf_outs.unsqueeze(dim=-1)
266
+ # Calculate probabilites for the Q-value distribution along
267
+ # the support given by the atoms.
268
+ qf_probs = nn.functional.softmax(qf_logits, dim=-1)
269
+ # Return also the support as we need it in the learner.
270
+ output[ATOMS] = af_dist_output[ATOMS]
271
+ # Calculate the Q-values by the weighted sum over the atoms.
272
+ output[QF_PREDS] = torch.sum(af_dist_output[ATOMS] * qf_probs, dim=-1)
273
+ output[QF_LOGITS] = qf_logits
274
+ output[QF_PROBS] = qf_probs
275
+ # Otherwise we learn an expectation.
276
+ else:
277
+ # Center advantages. Note, we cannot do an in-place operation here
278
+ # b/c we backpropagate through these values. See for a discussion
279
+ # https://discuss.pytorch.org/t/gradient-computation-issue-due-to-
280
+ # inplace-operation-unsure-how-to-debug-for-custom-model/170133
281
+ # Has to be a mean for each batch element.
282
+ af_outs_mean = torch.unsqueeze(
283
+ torch.nan_to_num(qf_outs, neginf=torch.nan).nanmean(dim=1), dim=1
284
+ )
285
+ qf_outs = qf_outs - af_outs_mean
286
+ # Add advantage and value stream. Note, we broadcast here.
287
+ output[QF_PREDS] = qf_outs + vf_outs
288
+ # No dueling architecture.
289
+ else:
290
+ # Note, in this case the advantage network is the Q-network.
291
+ # Forward pass through Q-head.
292
+ qf_outs = head(encoder_outs[ENCODER_OUT])
293
+ # We learn a Q-value distribution.
294
+ if self.num_atoms > 1:
295
+ # Note in a non-dueling architecture the advantage distribution is
296
+ # the Q-value distribution.
297
+ # Get the Q-value distribution.
298
+ qf_dist_outs = self._af_dist(qf_outs)
299
+ # Get the support of the Q-value distribution.
300
+ output[ATOMS] = qf_dist_outs[ATOMS]
301
+ # Calculate the Q-values by the weighted sum over the atoms.
302
+ output[QF_PREDS] = torch.sum(
303
+ qf_dist_outs[ATOMS] * qf_dist_outs["probs"], dim=-1
304
+ )
305
+ output[QF_LOGITS] = qf_dist_outs["logits"]
306
+ output[QF_PROBS] = qf_dist_outs["probs"]
307
+ # Otherwise we learn an expectation.
308
+ else:
309
+ # In this case we have a Q-head of dimension (1, action_space.n).
310
+ output[QF_PREDS] = qf_outs
311
+
312
+ return output
313
+
314
+ @override(DQNRainbowRLModule)
315
+ def _reset_noise(self, target: bool = False) -> None:
316
+ """Reset the noise of all noisy layers.
317
+
318
+ Args:
319
+ target: Whether to reset the noise of the target networks.
320
+ """
321
+ if self.uses_noisy:
322
+ if self.uses_noisy_encoder:
323
+ self.encoder._reset_noise()
324
+ self.af._reset_noise()
325
+ # If we have a dueling architecture we need to reset the noise
326
+ # of the value stream, too.
327
+ if self.uses_dueling:
328
+ self.vf._reset_noise()
329
+ # Reset the noise of the target networks, if requested.
330
+ if target:
331
+ if self.uses_noisy_encoder:
332
+ self._target_encoder._reset_noise()
333
+ self._target_af._reset_noise()
334
+ # If we have a dueling architecture we need to reset the noise
335
+ # of the value stream, too.
336
+ if self.uses_dueling:
337
+ self._target_vf._reset_noise()
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dqn/torch/torch_noisy_linear.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Optional, Sequence, Union
3
+ from ray.rllib.utils.framework import try_import_torch
4
+
5
+ torch, nn = try_import_torch()
6
+
7
+ DEVICE_TYPING = Union[torch.device, str, int]
8
+
9
+
10
+ class NoisyLinear(nn.Linear):
11
+ """Noisy Linear Layer.
12
+
13
+ Presented in "Noisy Networks for Exploration",
14
+ https://arxiv.org/abs/1706.10295v3, implemented in relation to
15
+ `torchrl`'s `NoisyLinear` layer.
16
+
17
+ A Noisy Linear Layer is a linear layer with parametric noise added to
18
+ the weights. This induced stochasticity can be used in RL networks for
19
+ the agent's policy to aid efficient exploration. The parameters of the
20
+ noise are learned with gradient descent along with any other remaining
21
+ network weights. Factorized Gaussian noise is the type of noise usually
22
+ employed.
23
+
24
+
25
+ Args:
26
+ in_features: Input features dimension.
27
+ out_features: Out features dimension.
28
+ bias: If `True`, a bias term will be added to the matrix
29
+ multiplication: `Ax + b`. Defaults to `True`.
30
+ device: Device of the layer. Defaults to `"cpu"`.
31
+ dtype: `dtype` of the parameters. Defaults to `None` (default `torch`
32
+ `dtype`).
33
+ std_init: Initial value of the Gaussian standard deviation before
34
+ optimization. Defaults to `0.1`.
35
+
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ in_features: int,
41
+ out_features: int,
42
+ bias: bool = True,
43
+ device: Optional[DEVICE_TYPING] = None,
44
+ dtype: Optional[torch.dtype] = None,
45
+ std_init: float = 0.1,
46
+ ):
47
+ nn.Module.__init__(self)
48
+ self.in_features = int(in_features)
49
+ self.out_features = int(out_features)
50
+ self.std_init = std_init
51
+
52
+ self.weight_mu = nn.Parameter(
53
+ torch.empty(
54
+ out_features,
55
+ in_features,
56
+ device=device,
57
+ dtype=dtype,
58
+ requires_grad=True,
59
+ )
60
+ )
61
+ self.weight_sigma = nn.Parameter(
62
+ torch.empty(
63
+ out_features,
64
+ in_features,
65
+ device=device,
66
+ dtype=dtype,
67
+ requires_grad=True,
68
+ )
69
+ )
70
+ self.register_buffer(
71
+ "weight_epsilon",
72
+ torch.empty(out_features, in_features, device=device, dtype=dtype),
73
+ )
74
+ if bias:
75
+ self.bias_mu = nn.Parameter(
76
+ torch.empty(
77
+ out_features,
78
+ device=device,
79
+ dtype=dtype,
80
+ requires_grad=True,
81
+ )
82
+ )
83
+ self.bias_sigma = nn.Parameter(
84
+ torch.empty(
85
+ out_features,
86
+ device=device,
87
+ dtype=dtype,
88
+ requires_grad=True,
89
+ )
90
+ )
91
+ self.register_buffer(
92
+ "bias_epsilon",
93
+ torch.empty(out_features, device=device, dtype=dtype),
94
+ )
95
+ else:
96
+ self.bias_mu = None
97
+ self.reset_parameters()
98
+ self.reset_noise()
99
+ self.training = True
100
+
101
+ @torch.no_grad()
102
+ def reset_parameters(self) -> None:
103
+ # Use initialization for factorized noisy linear layers.
104
+ mu_range = 1 / math.sqrt(self.in_features)
105
+ # Initialize weight distribution parameters.
106
+ self.weight_mu.data.uniform_(-mu_range, mu_range)
107
+ self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
108
+ # If bias is used initial these parameters, too.
109
+ if self.bias_mu is not None:
110
+ self.bias_mu.data.zero_() # (-mu_range, mu_range)
111
+ self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
112
+
113
+ @torch.no_grad()
114
+ def reset_noise(self) -> None:
115
+ with torch.no_grad():
116
+ # Use factorized noise for better performance.
117
+ epsilon_in = self._scale_noise(self.in_features)
118
+ epsilon_out = self._scale_noise(self.out_features)
119
+ self.weight_epsilon.copy_(epsilon_out.outer(epsilon_in))
120
+ if self.bias_mu is not None:
121
+ self.bias_epsilon.copy_(epsilon_out)
122
+
123
+ @torch.no_grad()
124
+ def _scale_noise(self, size: Union[int, torch.Size, Sequence]) -> torch.Tensor:
125
+ if isinstance(size, int):
126
+ size = (size,)
127
+ x = torch.randn(*size, device=self.weight_mu.device)
128
+ return x.sign().mul_(x.abs().sqrt_())
129
+
130
+ @property
131
+ def weight(self) -> torch.Tensor:
132
+ if self.training:
133
+ # If in training mode, sample the noise.
134
+ return self.weight_mu + self.weight_sigma * self.weight_epsilon
135
+ else:
136
+ return self.weight_mu
137
+
138
+ @property
139
+ def bias(self) -> Optional[torch.Tensor]:
140
+ if self.bias_mu is not None:
141
+ if self.training:
142
+ # If in training mode, sample the noise.
143
+ return self.bias_mu + self.bias_sigma * self.bias_epsilon
144
+ else:
145
+ return self.bias_mu
146
+ else:
147
+ return None
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ [1] Mastering Diverse Domains through World Models - 2023
3
+ D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap
4
+ https://arxiv.org/pdf/2301.04104v1.pdf
5
+
6
+ [2] Mastering Atari with Discrete World Models - 2021
7
+ D. Hafner, T. Lillicrap, M. Norouzi, J. Ba
8
+ https://arxiv.org/pdf/2010.02193.pdf
9
+ """
10
+ from ray.rllib.algorithms.dreamerv3.dreamerv3 import DreamerV3, DreamerV3Config
11
+
12
+ __all__ = [
13
+ "DreamerV3",
14
+ "DreamerV3Config",
15
+ ]
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (615 Bytes). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_catalog.cpython-310.pyc ADDED
Binary file (2.53 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_learner.cpython-310.pyc ADDED
Binary file (1.41 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/__pycache__/dreamerv3_rl_module.cpython-310.pyc ADDED
Binary file (4.21 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/dreamerv3_catalog.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gymnasium as gym
2
+
3
+ from ray.rllib.core.models.catalog import Catalog
4
+ from ray.rllib.core.models.base import Encoder, Model
5
+ from ray.rllib.utils import override
6
+
7
+
8
+ class DreamerV3Catalog(Catalog):
9
+ """The Catalog class used to build all the models needed for DreamerV3 training."""
10
+
11
+ def __init__(
12
+ self,
13
+ observation_space: gym.Space,
14
+ action_space: gym.Space,
15
+ model_config_dict: dict,
16
+ ):
17
+ """Initializes a DreamerV3Catalog instance.
18
+
19
+ Args:
20
+ observation_space: The observation space of the environment.
21
+ action_space: The action space of the environment.
22
+ model_config_dict: The model config to use.
23
+ """
24
+ super().__init__(
25
+ observation_space=observation_space,
26
+ action_space=action_space,
27
+ model_config_dict=model_config_dict,
28
+ )
29
+
30
+ self.model_size = self._model_config_dict["model_size"]
31
+ self.is_img_space = len(self.observation_space.shape) in [2, 3]
32
+ self.is_gray_scale = (
33
+ self.is_img_space and len(self.observation_space.shape) == 2
34
+ )
35
+
36
+ # TODO (sven): We should work with sub-component configurations here,
37
+ # and even try replacing all current Dreamer model components with
38
+ # our default primitives. But for now, we'll construct the DreamerV3Model
39
+ # directly in our `build_...()` methods.
40
+
41
+ @override(Catalog)
42
+ def build_encoder(self, framework: str) -> Encoder:
43
+ """Builds the World-Model's encoder network depending on the obs space."""
44
+ if framework != "tf2":
45
+ raise NotImplementedError
46
+
47
+ if self.is_img_space:
48
+ from ray.rllib.algorithms.dreamerv3.tf.models.components.cnn_atari import (
49
+ CNNAtari,
50
+ )
51
+
52
+ return CNNAtari(model_size=self.model_size)
53
+ else:
54
+ from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP
55
+
56
+ return MLP(model_size=self.model_size, name="vector_encoder")
57
+
58
+ def build_decoder(self, framework: str) -> Model:
59
+ """Builds the World-Model's decoder network depending on the obs space."""
60
+ if framework != "tf2":
61
+ raise NotImplementedError
62
+
63
+ if self.is_img_space:
64
+ from ray.rllib.algorithms.dreamerv3.tf.models.components import (
65
+ conv_transpose_atari,
66
+ )
67
+
68
+ return conv_transpose_atari.ConvTransposeAtari(
69
+ model_size=self.model_size,
70
+ gray_scaled=self.is_gray_scale,
71
+ )
72
+ else:
73
+ from ray.rllib.algorithms.dreamerv3.tf.models.components import (
74
+ vector_decoder,
75
+ )
76
+
77
+ return vector_decoder.VectorDecoder(
78
+ model_size=self.model_size,
79
+ observation_space=self.observation_space,
80
+ )
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/dreamerv3_learner.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ [1] Mastering Diverse Domains through World Models - 2023
3
+ D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap
4
+ https://arxiv.org/pdf/2301.04104v1.pdf
5
+
6
+ [2] Mastering Atari with Discrete World Models - 2021
7
+ D. Hafner, T. Lillicrap, M. Norouzi, J. Ba
8
+ https://arxiv.org/pdf/2010.02193.pdf
9
+ """
10
+ from ray.rllib.core.learner.learner import Learner
11
+ from ray.rllib.utils.annotations import (
12
+ override,
13
+ OverrideToImplementCustomLogic_CallToSuperRecommended,
14
+ )
15
+
16
+
17
+ class DreamerV3Learner(Learner):
18
+ """DreamerV3 specific Learner class.
19
+
20
+ Only implements the `after_gradient_based_update()` method to define the logic
21
+ for updating the critic EMA-copy after each training step.
22
+ """
23
+
24
+ @OverrideToImplementCustomLogic_CallToSuperRecommended
25
+ @override(Learner)
26
+ def after_gradient_based_update(self, *, timesteps):
27
+ super().after_gradient_based_update(timesteps=timesteps)
28
+
29
+ # Update EMA weights of the critic.
30
+ for module_id, module in self.module._rl_modules.items():
31
+ module.critic.update_ema()
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/__init__.py ADDED
File without changes
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/__pycache__/dreamerv3_tf_learner.cpython-310.pyc ADDED
Binary file (19.8 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/__pycache__/critic_network.cpython-310.pyc ADDED
Binary file (6.07 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/actor_network.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ [1] Mastering Diverse Domains through World Models - 2023
3
+ D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap
4
+ https://arxiv.org/pdf/2301.04104v1.pdf
5
+ """
6
+ import gymnasium as gym
7
+ from gymnasium.spaces import Box, Discrete
8
+ import numpy as np
9
+
10
+ from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP
11
+ from ray.rllib.algorithms.dreamerv3.utils import (
12
+ get_gru_units,
13
+ get_num_z_categoricals,
14
+ get_num_z_classes,
15
+ )
16
+ from ray.rllib.utils.framework import try_import_tf, try_import_tfp
17
+
18
+ _, tf, _ = try_import_tf()
19
+ tfp = try_import_tfp()
20
+
21
+
22
+ class ActorNetwork(tf.keras.Model):
23
+ """The `actor` (policy net) of DreamerV3.
24
+
25
+ Consists of a simple MLP for Discrete actions and two MLPs for cont. actions (mean
26
+ and stddev).
27
+ Also contains two scalar variables to keep track of the percentile-5 and
28
+ percentile-95 values of the computed value targets within a batch. This is used to
29
+ compute the "scaled value targets" for actor learning. These two variables decay
30
+ over time exponentially (see [1] for more details).
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ *,
36
+ model_size: str = "XS",
37
+ action_space: gym.Space,
38
+ ):
39
+ """Initializes an ActorNetwork instance.
40
+
41
+ Args:
42
+ model_size: The "Model Size" used according to [1] Appendix B.
43
+ Use None for manually setting the different network sizes.
44
+ action_space: The action space of the environment used.
45
+ """
46
+ super().__init__(name="actor")
47
+
48
+ self.model_size = model_size
49
+ self.action_space = action_space
50
+
51
+ # The EMA decay variables used for the [Percentile(R, 95%) - Percentile(R, 5%)]
52
+ # diff to scale value targets for the actor loss.
53
+ self.ema_value_target_pct5 = tf.Variable(
54
+ np.nan, trainable=False, name="value_target_pct5"
55
+ )
56
+ self.ema_value_target_pct95 = tf.Variable(
57
+ np.nan, trainable=False, name="value_target_pct95"
58
+ )
59
+
60
+ # For discrete actions, use a single MLP that computes logits.
61
+ if isinstance(self.action_space, Discrete):
62
+ self.mlp = MLP(
63
+ model_size=self.model_size,
64
+ output_layer_size=self.action_space.n,
65
+ name="actor_mlp",
66
+ )
67
+ # For cont. actions, use separate MLPs for Gaussian mean and stddev.
68
+ # TODO (sven): In the author's original code repo, this is NOT the case,
69
+ # inputs are pushed through a shared MLP, then only the two output linear
70
+ # layers are separate for std- and mean logits.
71
+ elif isinstance(action_space, Box):
72
+ output_layer_size = np.prod(action_space.shape)
73
+ self.mlp = MLP(
74
+ model_size=self.model_size,
75
+ output_layer_size=output_layer_size,
76
+ name="actor_mlp_mean",
77
+ )
78
+ self.std_mlp = MLP(
79
+ model_size=self.model_size,
80
+ output_layer_size=output_layer_size,
81
+ name="actor_mlp_std",
82
+ )
83
+ else:
84
+ raise ValueError(f"Invalid action space: {action_space}")
85
+
86
+ # Trace self.call.
87
+ dl_type = tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32
88
+ self.call = tf.function(
89
+ input_signature=[
90
+ tf.TensorSpec(shape=[None, get_gru_units(model_size)], dtype=dl_type),
91
+ tf.TensorSpec(
92
+ shape=[
93
+ None,
94
+ get_num_z_categoricals(model_size),
95
+ get_num_z_classes(model_size),
96
+ ],
97
+ dtype=dl_type,
98
+ ),
99
+ ]
100
+ )(self.call)
101
+
102
+ def call(self, h, z):
103
+ """Performs a forward pass through this policy network.
104
+
105
+ Args:
106
+ h: The deterministic hidden state of the sequence model. [B, dim(h)].
107
+ z: The stochastic discrete representations of the original
108
+ observation input. [B, num_categoricals, num_classes].
109
+ """
110
+ # Flatten last two dims of z.
111
+ assert len(z.shape) == 3
112
+ z_shape = tf.shape(z)
113
+ z = tf.reshape(z, shape=(z_shape[0], -1))
114
+ assert len(z.shape) == 2
115
+ out = tf.concat([h, z], axis=-1)
116
+ out.set_shape(
117
+ [
118
+ None,
119
+ (
120
+ get_num_z_categoricals(self.model_size)
121
+ * get_num_z_classes(self.model_size)
122
+ + get_gru_units(self.model_size)
123
+ ),
124
+ ]
125
+ )
126
+ # Send h-cat-z through MLP.
127
+ action_logits = tf.cast(self.mlp(out), tf.float32)
128
+
129
+ if isinstance(self.action_space, Discrete):
130
+ action_probs = tf.nn.softmax(action_logits)
131
+
132
+ # Add the unimix weighting (1% uniform) to the probs.
133
+ # See [1]: "Unimix categoricals: We parameterize the categorical
134
+ # distributions for the world model representations and dynamics, as well as
135
+ # for the actor network, as mixtures of 1% uniform and 99% neural network
136
+ # output to ensure a minimal amount of probability mass on every class and
137
+ # thus keep log probabilities and KL divergences well behaved."
138
+ action_probs = 0.99 * action_probs + 0.01 * (1.0 / self.action_space.n)
139
+
140
+ # Danijar's code does: distr = [Distr class](logits=tf.log(probs)).
141
+ # Not sure why we don't directly use the already available probs instead.
142
+ action_logits = tf.math.log(action_probs)
143
+
144
+ # Distribution parameters are the log(probs) directly.
145
+ distr_params = action_logits
146
+ distr = self.get_action_dist_object(distr_params)
147
+
148
+ action = tf.stop_gradient(distr.sample()) + (
149
+ action_probs - tf.stop_gradient(action_probs)
150
+ )
151
+
152
+ elif isinstance(self.action_space, Box):
153
+ # Send h-cat-z through MLP to compute stddev logits for Normal dist
154
+ std_logits = tf.cast(self.std_mlp(out), tf.float32)
155
+ # minstd, maxstd taken from [1] from configs.yaml
156
+ minstd = 0.1
157
+ maxstd = 1.0
158
+
159
+ # Distribution parameters are the squashed std_logits and the tanh'd
160
+ # mean logits.
161
+ # squash std_logits from (-inf, inf) to (minstd, maxstd)
162
+ std_logits = (maxstd - minstd) * tf.sigmoid(std_logits + 2.0) + minstd
163
+ mean_logits = tf.tanh(action_logits)
164
+
165
+ distr_params = tf.concat([mean_logits, std_logits], axis=-1)
166
+ distr = self.get_action_dist_object(distr_params)
167
+
168
+ action = distr.sample()
169
+
170
+ return action, distr_params
171
+
172
+ def get_action_dist_object(self, action_dist_params_T_B):
173
+ """Helper method to create an action distribution object from (T, B, ..) params.
174
+
175
+ Args:
176
+ action_dist_params_T_B: The time-major action distribution parameters.
177
+ This could be simply the logits (discrete) or a to-be-split-in-2
178
+ tensor for mean and stddev (continuous).
179
+
180
+ Returns:
181
+ The tfp action distribution object, from which one can sample, compute
182
+ log probs, entropy, etc..
183
+ """
184
+ if isinstance(self.action_space, gym.spaces.Discrete):
185
+ # Create the distribution object using the unimix'd logits.
186
+ distr = tfp.distributions.OneHotCategorical(
187
+ logits=action_dist_params_T_B,
188
+ dtype=tf.float32,
189
+ )
190
+
191
+ elif isinstance(self.action_space, gym.spaces.Box):
192
+ # Compute Normal distribution from action_logits and std_logits
193
+ loc, scale = tf.split(action_dist_params_T_B, 2, axis=-1)
194
+ distr = tfp.distributions.Normal(loc=loc, scale=scale)
195
+
196
+ # If action_space is a box with multiple dims, make individual dims
197
+ # independent.
198
+ distr = tfp.distributions.Independent(distr, len(self.action_space.shape))
199
+
200
+ else:
201
+ raise ValueError(f"Action space {self.action_space} not supported!")
202
+
203
+ return distr
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__init__.py ADDED
File without changes
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/dynamics_predictor.cpython-310.pyc ADDED
Binary file (3.22 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/representation_layer.cpython-310.pyc ADDED
Binary file (4.17 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/__pycache__/sequence_model.cpython-310.pyc ADDED
Binary file (4.4 kB). View file
 
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/cnn_atari.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ [1] Mastering Diverse Domains through World Models - 2023
3
+ D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap
4
+ https://arxiv.org/pdf/2301.04104v1.pdf
5
+ """
6
+ from typing import Optional
7
+
8
+ from ray.rllib.algorithms.dreamerv3.utils import get_cnn_multiplier
9
+ from ray.rllib.utils.framework import try_import_tf
10
+
11
+ _, tf, _ = try_import_tf()
12
+
13
+
14
+ class CNNAtari(tf.keras.Model):
15
+ """An image encoder mapping 64x64 RGB images via 4 CNN layers into a 1D space."""
16
+
17
+ def __init__(
18
+ self,
19
+ *,
20
+ model_size: Optional[str] = "XS",
21
+ cnn_multiplier: Optional[int] = None,
22
+ ):
23
+ """Initializes a CNNAtari instance.
24
+
25
+ Args:
26
+ model_size: The "Model Size" used according to [1] Appendix B.
27
+ Use None for manually setting the `cnn_multiplier`.
28
+ cnn_multiplier: Optional override for the additional factor used to multiply
29
+ the number of filters with each CNN layer. Starting with
30
+ 1 * `cnn_multiplier` filters in the first CNN layer, the number of
31
+ filters then increases via `2*cnn_multiplier`, `4*cnn_multiplier`, till
32
+ `8*cnn_multiplier`.
33
+ """
34
+ super().__init__(name="image_encoder")
35
+
36
+ cnn_multiplier = get_cnn_multiplier(model_size, override=cnn_multiplier)
37
+
38
+ # See appendix C in [1]:
39
+ # "We use a similar network architecture but employ layer normalization and
40
+ # SiLU as the activation function. For better framework support, we use
41
+ # same-padded convolutions with stride 2 and kernel size 3 instead of
42
+ # valid-padded convolutions with larger kernels ..."
43
+ # HOWEVER: In Danijar's DreamerV3 repo, kernel size=4 is used, so we use it
44
+ # here, too.
45
+ self.conv_layers = [
46
+ tf.keras.layers.Conv2D(
47
+ filters=1 * cnn_multiplier,
48
+ kernel_size=4,
49
+ strides=(2, 2),
50
+ padding="same",
51
+ # No bias or activation due to layernorm.
52
+ activation=None,
53
+ use_bias=False,
54
+ ),
55
+ tf.keras.layers.Conv2D(
56
+ filters=2 * cnn_multiplier,
57
+ kernel_size=4,
58
+ strides=(2, 2),
59
+ padding="same",
60
+ # No bias or activation due to layernorm.
61
+ activation=None,
62
+ use_bias=False,
63
+ ),
64
+ tf.keras.layers.Conv2D(
65
+ filters=4 * cnn_multiplier,
66
+ kernel_size=4,
67
+ strides=(2, 2),
68
+ padding="same",
69
+ # No bias or activation due to layernorm.
70
+ activation=None,
71
+ use_bias=False,
72
+ ),
73
+ # .. until output is 4 x 4 x [num_filters].
74
+ tf.keras.layers.Conv2D(
75
+ filters=8 * cnn_multiplier,
76
+ kernel_size=4,
77
+ strides=(2, 2),
78
+ padding="same",
79
+ # No bias or activation due to layernorm.
80
+ activation=None,
81
+ use_bias=False,
82
+ ),
83
+ ]
84
+ self.layer_normalizations = []
85
+ for _ in range(len(self.conv_layers)):
86
+ self.layer_normalizations.append(tf.keras.layers.LayerNormalization())
87
+ # -> 4 x 4 x num_filters -> now flatten.
88
+ self.flatten_layer = tf.keras.layers.Flatten(data_format="channels_last")
89
+
90
+ @tf.function(
91
+ input_signature=[
92
+ tf.TensorSpec(
93
+ shape=[None, 64, 64, 3],
94
+ dtype=tf.keras.mixed_precision.global_policy().compute_dtype
95
+ or tf.float32,
96
+ )
97
+ ]
98
+ )
99
+ def call(self, inputs):
100
+ """Performs a forward pass through the CNN Atari encoder.
101
+
102
+ Args:
103
+ inputs: The image inputs of shape (B, 64, 64, 3).
104
+ """
105
+ # [B, h, w] -> grayscale.
106
+ if len(inputs.shape) == 3:
107
+ inputs = tf.expand_dims(inputs, -1)
108
+ out = inputs
109
+ for conv_2d, layer_norm in zip(self.conv_layers, self.layer_normalizations):
110
+ out = tf.nn.silu(layer_norm(inputs=conv_2d(out)))
111
+ assert out.shape[1] == 4 and out.shape[2] == 4
112
+ return self.flatten_layer(out)
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/reward_predictor.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ [1] Mastering Diverse Domains through World Models - 2023
3
+ D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap
4
+ https://arxiv.org/pdf/2301.04104v1.pdf
5
+ """
6
+ from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP
7
+ from ray.rllib.algorithms.dreamerv3.tf.models.components.reward_predictor_layer import (
8
+ RewardPredictorLayer,
9
+ )
10
+ from ray.rllib.algorithms.dreamerv3.utils import (
11
+ get_gru_units,
12
+ get_num_z_categoricals,
13
+ get_num_z_classes,
14
+ )
15
+ from ray.rllib.utils.framework import try_import_tf
16
+
17
+ _, tf, _ = try_import_tf()
18
+
19
+
20
+ class RewardPredictor(tf.keras.Model):
21
+ """Wrapper of MLP and RewardPredictorLayer to predict rewards for the world model.
22
+
23
+ Predicted rewards are used to produce "dream data" to learn the policy in.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ *,
29
+ model_size: str = "XS",
30
+ num_buckets: int = 255,
31
+ lower_bound: float = -20.0,
32
+ upper_bound: float = 20.0,
33
+ ):
34
+ """Initializes a RewardPredictor instance.
35
+
36
+ Args:
37
+ model_size: The "Model Size" used according to [1] Appendinx B.
38
+ Determines the exact size of the underlying MLP.
39
+ num_buckets: The number of buckets to create. Note that the number of
40
+ possible symlog'd outcomes from the used distribution is
41
+ `num_buckets` + 1:
42
+ lower_bound --bucket-- o[1] --bucket-- o[2] ... --bucket-- upper_bound
43
+ o=outcomes
44
+ lower_bound=o[0]
45
+ upper_bound=o[num_buckets]
46
+ lower_bound: The symlog'd lower bound for a possible reward value.
47
+ Note that a value of -20.0 here already allows individual (actual env)
48
+ rewards to be as low as -400M. Buckets will be created between
49
+ `lower_bound` and `upper_bound`.
50
+ upper_bound: The symlog'd upper bound for a possible reward value.
51
+ Note that a value of +20.0 here already allows individual (actual env)
52
+ rewards to be as high as 400M. Buckets will be created between
53
+ `lower_bound` and `upper_bound`.
54
+ """
55
+ super().__init__(name="reward_predictor")
56
+ self.model_size = model_size
57
+
58
+ self.mlp = MLP(
59
+ model_size=model_size,
60
+ output_layer_size=None,
61
+ )
62
+ self.reward_layer = RewardPredictorLayer(
63
+ num_buckets=num_buckets,
64
+ lower_bound=lower_bound,
65
+ upper_bound=upper_bound,
66
+ )
67
+
68
+ # Trace self.call.
69
+ dl_type = tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32
70
+ self.call = tf.function(
71
+ input_signature=[
72
+ tf.TensorSpec(shape=[None, get_gru_units(model_size)], dtype=dl_type),
73
+ tf.TensorSpec(
74
+ shape=[
75
+ None,
76
+ get_num_z_categoricals(model_size),
77
+ get_num_z_classes(model_size),
78
+ ],
79
+ dtype=dl_type,
80
+ ),
81
+ ]
82
+ )(self.call)
83
+
84
+ def call(self, h, z):
85
+ """Computes the expected reward using N equal sized buckets of possible values.
86
+
87
+ Args:
88
+ h: The deterministic hidden state of the sequence model. [B, dim(h)].
89
+ z: The stochastic discrete representations of the original
90
+ observation input. [B, num_categoricals, num_classes].
91
+ """
92
+ # Flatten last two dims of z.
93
+ assert len(z.shape) == 3
94
+ z_shape = tf.shape(z)
95
+ z = tf.reshape(z, shape=(z_shape[0], -1))
96
+ assert len(z.shape) == 2
97
+ out = tf.concat([h, z], axis=-1)
98
+ out.set_shape(
99
+ [
100
+ None,
101
+ (
102
+ get_num_z_categoricals(self.model_size)
103
+ * get_num_z_classes(self.model_size)
104
+ + get_gru_units(self.model_size)
105
+ ),
106
+ ]
107
+ )
108
+ # Send h-cat-z through MLP.
109
+ out = self.mlp(out)
110
+ # Return a) mean reward OR b) a tuple: (mean reward, logits over the reward
111
+ # buckets).
112
+ return self.reward_layer(out)
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/reward_predictor_layer.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ [1] Mastering Diverse Domains through World Models - 2023
3
+ D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap
4
+ https://arxiv.org/pdf/2301.04104v1.pdf
5
+
6
+ [2] Mastering Atari with Discrete World Models - 2021
7
+ D. Hafner, T. Lillicrap, M. Norouzi, J. Ba
8
+ https://arxiv.org/pdf/2010.02193.pdf
9
+ """
10
+ from ray.rllib.utils.framework import try_import_tf
11
+
12
+ _, tf, _ = try_import_tf()
13
+
14
+
15
+ class RewardPredictorLayer(tf.keras.layers.Layer):
16
+ """A layer outputting reward predictions using K bins and two-hot encoding.
17
+
18
+ This layer is used in two models in DreamerV3: The reward predictor of the world
19
+ model and the value function. K is 255 by default (see [1]) and doesn't change
20
+ with the model size.
21
+
22
+ Possible predicted reward/values range from symexp(-20.0) to symexp(20.0), which
23
+ should cover any possible environment. Outputs of this layer are generated by
24
+ generating logits/probs via a single linear layer, then interpreting the probs
25
+ as weights for a weighted average of the different possible reward (binned) values.
26
+ """
27
+
28
+ def __init__(
29
+ self,
30
+ *,
31
+ num_buckets: int = 255,
32
+ lower_bound: float = -20.0,
33
+ upper_bound: float = 20.0,
34
+ trainable: bool = True,
35
+ ):
36
+ """Initializes a RewardPredictorLayer instance.
37
+
38
+ Args:
39
+ num_buckets: The number of buckets to create. Note that the number of
40
+ possible symlog'd outcomes from the used distribution is
41
+ `num_buckets` + 1:
42
+ lower_bound --bucket-- o[1] --bucket-- o[2] ... --bucket-- upper_bound
43
+ o=outcomes
44
+ lower_bound=o[0]
45
+ upper_bound=o[num_buckets]
46
+ lower_bound: The symlog'd lower bound for a possible reward value.
47
+ Note that a value of -20.0 here already allows individual (actual env)
48
+ rewards to be as low as -400M. Buckets will be created between
49
+ `lower_bound` and `upper_bound`.
50
+ upper_bound: The symlog'd upper bound for a possible reward value.
51
+ Note that a value of +20.0 here already allows individual (actual env)
52
+ rewards to be as high as 400M. Buckets will be created between
53
+ `lower_bound` and `upper_bound`.
54
+ """
55
+ self.num_buckets = num_buckets
56
+ super().__init__(name=f"reward_layer_{self.num_buckets}buckets")
57
+
58
+ self.lower_bound = lower_bound
59
+ self.upper_bound = upper_bound
60
+ self.reward_buckets_layer = tf.keras.layers.Dense(
61
+ units=self.num_buckets,
62
+ activation=None,
63
+ # From [1]:
64
+ # "We further noticed that the randomly initialized reward predictor and
65
+ # critic networks at the start of training can result in large predicted
66
+ # rewards that can delay the onset of learning. We initialize the output
67
+ # weights of the reward predictor and critic to zeros, which effectively
68
+ # alleviates the problem and accelerates early learning."
69
+ kernel_initializer="zeros",
70
+ bias_initializer="zeros", # zero-bias is default anyways
71
+ trainable=trainable,
72
+ )
73
+
74
+ def call(self, inputs):
75
+ """Computes the expected reward using N equal sized buckets of possible values.
76
+
77
+ Args:
78
+ inputs: The input tensor for the layer, which computes the reward bucket
79
+ weights (logits). [B, dim].
80
+
81
+ Returns:
82
+ A tuple consisting of the expected rewards and the logits that parameterize
83
+ the tfp `FiniteDiscrete` distribution object. To get the individual bucket
84
+ probs, do `[FiniteDiscrete object].probs`.
85
+ """
86
+ # Compute the `num_buckets` weights.
87
+ assert len(inputs.shape) == 2
88
+ logits = tf.cast(self.reward_buckets_layer(inputs), tf.float32)
89
+ # out=[B, `num_buckets`]
90
+
91
+ # Compute the expected(!) reward using the formula:
92
+ # `softmax(Linear(x))` [vectordot] `possible_outcomes`, where
93
+ # `possible_outcomes` is the even-spaced (binned) encoding of all possible
94
+ # symexp'd reward/values.
95
+ # [2]: "The mean of the reward predictor pφ(ˆrt | zˆt) is used as reward
96
+ # sequence rˆ1:H."
97
+ probs = tf.nn.softmax(logits)
98
+ possible_outcomes = tf.linspace(
99
+ self.lower_bound,
100
+ self.upper_bound,
101
+ self.num_buckets,
102
+ )
103
+ # probs=possible_outcomes=[B, `num_buckets`]
104
+
105
+ # Simple vector dot product (over last dim) to get the mean reward
106
+ # weighted sum, where all weights sum to 1.0.
107
+ expected_rewards = tf.reduce_sum(probs * possible_outcomes, axis=-1)
108
+ # expected_rewards=[B]
109
+
110
+ return expected_rewards, logits
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/components/sequence_model.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ [1] Mastering Diverse Domains through World Models - 2023
3
+ D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap
4
+ https://arxiv.org/pdf/2301.04104v1.pdf
5
+ """
6
+ from typing import Optional
7
+
8
+ import gymnasium as gym
9
+ import numpy as np
10
+
11
+ from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP
12
+ from ray.rllib.algorithms.dreamerv3.utils import (
13
+ get_gru_units,
14
+ get_num_z_classes,
15
+ get_num_z_categoricals,
16
+ )
17
+ from ray.rllib.utils.framework import try_import_tf
18
+
19
+ _, tf, _ = try_import_tf()
20
+
21
+
22
+ class SequenceModel(tf.keras.Model):
23
+ """The "sequence model" of the RSSM, computing ht+1 given (ht, zt, at).
24
+
25
+ Note: The "internal state" always consists of:
26
+ The actions `a` (initially, this is a zeroed-out action), `h`-states (deterministic,
27
+ continuous), and `z`-states (stochastic, discrete).
28
+ There are two versions of z-states: "posterior" for world model training and "prior"
29
+ for creating the dream data.
30
+
31
+ Initial internal state values (`a`, `h`, and `z`) are used where ever a new episode
32
+ starts within a batch row OR at the beginning of each train batch's B rows,
33
+ regardless of whether there was an actual episode boundary or not. Thus, internal
34
+ states are not required to be stored in or retrieved from the replay buffer AND
35
+ retrieved batches from the buffer must not be zero padded.
36
+
37
+ Initial `a` is the zero "one hot" action, e.g. [0.0, 0.0] for Discrete(2), initial
38
+ `h` is a separate learned variable, and initial `z` are computed by the "dynamics"
39
+ (or "prior") net, using only the initial-h state as input.
40
+
41
+ The GRU in this SequenceModel always produces the next h-state, then.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ *,
47
+ model_size: Optional[str] = "XS",
48
+ action_space: gym.Space,
49
+ num_gru_units: Optional[int] = None,
50
+ ):
51
+ """Initializes a SequenceModel instance.
52
+
53
+ Args:
54
+ model_size: The "Model Size" used according to [1] Appendinx B.
55
+ Use None for manually setting the number of GRU units used.
56
+ action_space: The action space of the environment used.
57
+ num_gru_units: Overrides the number of GRU units (dimension of the h-state).
58
+ If None, use the value given through `model_size`
59
+ (see [1] Appendix B).
60
+ """
61
+ super().__init__(name="sequence_model")
62
+
63
+ self.model_size = model_size
64
+ self.action_space = action_space
65
+ num_gru_units = get_gru_units(self.model_size, override=num_gru_units)
66
+
67
+ # In Danijar's code, there is an additional layer (units=[model_size])
68
+ # prior to the GRU (but always only with 1 layer), which is not mentioned in
69
+ # the paper.
70
+ self.pre_gru_layer = MLP(
71
+ num_dense_layers=1,
72
+ model_size=self.model_size,
73
+ output_layer_size=None,
74
+ )
75
+ self.gru_unit = tf.keras.layers.GRU(
76
+ num_gru_units,
77
+ return_sequences=False,
78
+ return_state=False,
79
+ # Note: Changing these activations is most likely a bad idea!
80
+ # In experiments, setting one of both of them to silu deteriorated
81
+ # performance significantly.
82
+ # activation=tf.nn.silu,
83
+ # recurrent_activation=tf.nn.silu,
84
+ )
85
+
86
+ # Trace self.call.
87
+ dl_type = tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32
88
+ self.call = tf.function(
89
+ input_signature=[
90
+ tf.TensorSpec(
91
+ shape=[None]
92
+ + (
93
+ [action_space.n]
94
+ if isinstance(action_space, gym.spaces.Discrete)
95
+ else list(action_space.shape)
96
+ ),
97
+ dtype=dl_type,
98
+ ),
99
+ tf.TensorSpec(shape=[None, num_gru_units], dtype=dl_type),
100
+ tf.TensorSpec(
101
+ shape=[
102
+ None,
103
+ get_num_z_categoricals(self.model_size),
104
+ get_num_z_classes(self.model_size),
105
+ ],
106
+ dtype=dl_type,
107
+ ),
108
+ ]
109
+ )(self.call)
110
+
111
+ def call(self, a, h, z):
112
+ """
113
+
114
+ Args:
115
+ a: The previous action (already one-hot'd if applicable). (B, ...).
116
+ h: The previous deterministic hidden state of the sequence model.
117
+ (B, num_gru_units)
118
+ z: The previous stochastic discrete representations of the original
119
+ observation input. (B, num_categoricals, num_classes_per_categorical).
120
+ """
121
+ # Flatten last two dims of z.
122
+ z_shape = tf.shape(z)
123
+ z = tf.reshape(z, shape=(z_shape[0], -1))
124
+ out = tf.concat([z, a], axis=-1)
125
+ out.set_shape(
126
+ [
127
+ None,
128
+ (
129
+ get_num_z_categoricals(self.model_size)
130
+ * get_num_z_classes(self.model_size)
131
+ + (
132
+ self.action_space.n
133
+ if isinstance(self.action_space, gym.spaces.Discrete)
134
+ else int(np.prod(self.action_space.shape))
135
+ )
136
+ ),
137
+ ]
138
+ )
139
+ # Pass through pre-GRU layer.
140
+ out = self.pre_gru_layer(out)
141
+ # Pass through (batch-major) GRU (expand axis=1 as the time axis).
142
+ h_next = self.gru_unit(tf.expand_dims(out, axis=1), initial_state=h)
143
+ # Return the GRU's output (the next h-state).
144
+ return h_next
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/disagree_networks.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ [1] Mastering Diverse Domains through World Models - 2023
3
+ D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap
4
+ https://arxiv.org/pdf/2301.04104v1.pdf
5
+ """
6
+
7
+ from ray.rllib.algorithms.dreamerv3.tf.models.components.mlp import MLP
8
+ from ray.rllib.algorithms.dreamerv3.tf.models.components.representation_layer import (
9
+ RepresentationLayer,
10
+ )
11
+ from ray.rllib.utils.framework import try_import_tf, try_import_tfp
12
+
13
+ _, tf, _ = try_import_tf()
14
+ tfp = try_import_tfp()
15
+
16
+
17
+ class DisagreeNetworks(tf.keras.Model):
18
+ """Predict the RSSM's z^(t+1), given h(t), z^(t), and a(t).
19
+
20
+ Disagreement (stddev) between the N networks in this model on what the next z^ would
21
+ be are used to produce intrinsic rewards for enhanced, curiosity-based exploration.
22
+
23
+ TODO
24
+ """
25
+
26
+ def __init__(self, *, num_networks, model_size, intrinsic_rewards_scale):
27
+ super().__init__(name="disagree_networks")
28
+
29
+ self.model_size = model_size
30
+ self.num_networks = num_networks
31
+ self.intrinsic_rewards_scale = intrinsic_rewards_scale
32
+
33
+ self.mlps = []
34
+ self.representation_layers = []
35
+
36
+ for _ in range(self.num_networks):
37
+ self.mlps.append(
38
+ MLP(
39
+ model_size=self.model_size,
40
+ output_layer_size=None,
41
+ trainable=True,
42
+ )
43
+ )
44
+ self.representation_layers.append(
45
+ RepresentationLayer(model_size=self.model_size, name="disagree")
46
+ )
47
+
48
+ def call(self, inputs, z, a, training=None):
49
+ return self.forward_train(a=a, h=inputs, z=z)
50
+
51
+ def compute_intrinsic_rewards(self, h, z, a):
52
+ forward_train_outs = self.forward_train(a=a, h=h, z=z)
53
+ B = tf.shape(h)[0]
54
+
55
+ # Intrinsic rewards are computed as:
56
+ # Stddev (between the different nets) of the 32x32 discrete, stochastic
57
+ # probabilities. Meaning that if the larger the disagreement
58
+ # (stddev) between the nets on what the probabilities for the different
59
+ # classes should be, the higher the intrinsic reward.
60
+ z_predicted_probs_N_B = forward_train_outs["z_predicted_probs_N_HxB"]
61
+ N = len(z_predicted_probs_N_B)
62
+ z_predicted_probs_N_B = tf.stack(z_predicted_probs_N_B, axis=0)
63
+ # Flatten z-dims (num_categoricals x num_classes).
64
+ z_predicted_probs_N_B = tf.reshape(z_predicted_probs_N_B, shape=(N, B, -1))
65
+
66
+ # Compute stddevs over all disagree nets (axis=0).
67
+ # Mean over last axis ([num categoricals] x [num classes] folded axis).
68
+ stddevs_B_mean = tf.reduce_mean(
69
+ tf.math.reduce_std(z_predicted_probs_N_B, axis=0),
70
+ axis=-1,
71
+ )
72
+ # TEST:
73
+ stddevs_B_mean -= tf.reduce_mean(stddevs_B_mean)
74
+ # END TEST
75
+ return {
76
+ "rewards_intrinsic": stddevs_B_mean * self.intrinsic_rewards_scale,
77
+ "forward_train_outs": forward_train_outs,
78
+ }
79
+
80
+ def forward_train(self, a, h, z):
81
+ HxB = tf.shape(h)[0]
82
+ # Fold z-dims.
83
+ z = tf.reshape(z, shape=(HxB, -1))
84
+ # Concat all input components (h, z, and a).
85
+ inputs_ = tf.stop_gradient(tf.concat([h, z, a], axis=-1))
86
+
87
+ z_predicted_probs_N_HxB = [
88
+ repr(mlp(inputs_))[1] # [0]=sample; [1]=returned probs
89
+ for mlp, repr in zip(self.mlps, self.representation_layers)
90
+ ]
91
+ # shape=(N, HxB, [num categoricals], [num classes]); N=number of disagree nets.
92
+ # HxB -> folded horizon_H x batch_size_B (from dreamed data).
93
+
94
+ return {"z_predicted_probs_N_HxB": z_predicted_probs_N_HxB}
deepseek/lib/python3.10/site-packages/ray/rllib/algorithms/dreamerv3/tf/models/dreamer_model.py ADDED
@@ -0,0 +1,606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ [1] Mastering Diverse Domains through World Models - 2023
3
+ D. Hafner, J. Pasukonis, J. Ba, T. Lillicrap
4
+ https://arxiv.org/pdf/2301.04104v1.pdf
5
+ """
6
+ import re
7
+
8
+ import gymnasium as gym
9
+ import numpy as np
10
+
11
+ from ray.rllib.algorithms.dreamerv3.tf.models.disagree_networks import DisagreeNetworks
12
+ from ray.rllib.algorithms.dreamerv3.tf.models.actor_network import ActorNetwork
13
+ from ray.rllib.algorithms.dreamerv3.tf.models.critic_network import CriticNetwork
14
+ from ray.rllib.algorithms.dreamerv3.tf.models.world_model import WorldModel
15
+ from ray.rllib.algorithms.dreamerv3.utils import (
16
+ get_gru_units,
17
+ get_num_z_categoricals,
18
+ get_num_z_classes,
19
+ )
20
+ from ray.rllib.utils.framework import try_import_tf
21
+ from ray.rllib.utils.tf_utils import inverse_symlog
22
+
23
+ _, tf, _ = try_import_tf()
24
+
25
+
26
+ class DreamerModel(tf.keras.Model):
27
+ """The main tf-keras model containing all necessary components for DreamerV3.
28
+
29
+ Includes:
30
+ - The world model with encoder, decoder, sequence-model (RSSM), dynamics
31
+ (generates prior z-state), and "posterior" model (generates posterior z-state).
32
+ Predicts env dynamics and produces dreamed trajectories for actor- and critic
33
+ learning.
34
+ - The actor network (policy).
35
+ - The critic network for value function prediction.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ *,
41
+ model_size: str = "XS",
42
+ action_space: gym.Space,
43
+ world_model: WorldModel,
44
+ actor: ActorNetwork,
45
+ critic: CriticNetwork,
46
+ horizon: int,
47
+ gamma: float,
48
+ use_curiosity: bool = False,
49
+ intrinsic_rewards_scale: float = 0.1,
50
+ ):
51
+ """Initializes a DreamerModel instance.
52
+
53
+ Args:
54
+ model_size: The "Model Size" used according to [1] Appendinx B.
55
+ Use None for manually setting the different network sizes.
56
+ action_space: The action space of the environment used.
57
+ world_model: The WorldModel component.
58
+ actor: The ActorNetwork component.
59
+ critic: The CriticNetwork component.
60
+ horizon: The dream horizon to use when creating dreamed trajectories.
61
+ """
62
+ super().__init__(name="dreamer_model")
63
+
64
+ self.model_size = model_size
65
+ self.action_space = action_space
66
+ self.use_curiosity = use_curiosity
67
+
68
+ self.world_model = world_model
69
+ self.actor = actor
70
+ self.critic = critic
71
+
72
+ self.horizon = horizon
73
+ self.gamma = gamma
74
+ self._comp_dtype = (
75
+ tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32
76
+ )
77
+
78
+ self.disagree_nets = None
79
+ if self.use_curiosity:
80
+ self.disagree_nets = DisagreeNetworks(
81
+ num_networks=8,
82
+ model_size=self.model_size,
83
+ intrinsic_rewards_scale=intrinsic_rewards_scale,
84
+ )
85
+
86
+ self.dream_trajectory = tf.function(
87
+ input_signature=[
88
+ {
89
+ "h": tf.TensorSpec(
90
+ shape=[
91
+ None,
92
+ get_gru_units(self.model_size),
93
+ ],
94
+ dtype=self._comp_dtype,
95
+ ),
96
+ "z": tf.TensorSpec(
97
+ shape=[
98
+ None,
99
+ get_num_z_categoricals(self.model_size),
100
+ get_num_z_classes(self.model_size),
101
+ ],
102
+ dtype=self._comp_dtype,
103
+ ),
104
+ },
105
+ tf.TensorSpec(shape=[None], dtype=tf.bool),
106
+ ]
107
+ )(self.dream_trajectory)
108
+
109
+ def call(
110
+ self,
111
+ inputs,
112
+ observations,
113
+ actions,
114
+ is_first,
115
+ start_is_terminated_BxT,
116
+ gamma,
117
+ ):
118
+ """Main call method for building this model in order to generate its variables.
119
+
120
+ Note: This method should NOT be used by users directly. It's purpose is only to
121
+ perform all forward passes necessary to define all variables of the DreamerV3.
122
+ """
123
+
124
+ # Forward passes through all models are enough to build all trainable and
125
+ # non-trainable variables:
126
+
127
+ # World model.
128
+ results = self.world_model.forward_train(
129
+ observations,
130
+ actions,
131
+ is_first,
132
+ )
133
+ # Actor.
134
+ _, distr_params = self.actor(
135
+ h=results["h_states_BxT"],
136
+ z=results["z_posterior_states_BxT"],
137
+ )
138
+ # Critic.
139
+ values, _ = self.critic(
140
+ h=results["h_states_BxT"],
141
+ z=results["z_posterior_states_BxT"],
142
+ use_ema=tf.convert_to_tensor(False),
143
+ )
144
+
145
+ # Dream pipeline.
146
+ dream_data = self.dream_trajectory(
147
+ start_states={
148
+ "h": results["h_states_BxT"],
149
+ "z": results["z_posterior_states_BxT"],
150
+ },
151
+ start_is_terminated=start_is_terminated_BxT,
152
+ )
153
+
154
+ return {
155
+ "world_model_fwd": results,
156
+ "dream_data": dream_data,
157
+ "actions": actions,
158
+ "values": values,
159
+ }
160
+
161
+ @tf.function
162
+ def forward_inference(self, observations, previous_states, is_first, training=None):
163
+ """Performs a (non-exploring) action computation step given obs and states.
164
+
165
+ Note that all input data should not have a time rank (only a batch dimension).
166
+
167
+ Args:
168
+ observations: The current environment observation with shape (B, ...).
169
+ previous_states: Dict with keys `a`, `h`, and `z` used as input to the RSSM
170
+ to produce the next h-state, from which then to compute the action
171
+ using the actor network. All values in the dict should have shape
172
+ (B, ...) (no time rank).
173
+ is_first: Batch of is_first flags. These should be True if a new episode
174
+ has been started at the current timestep (meaning `observations` is the
175
+ reset observation from the environment).
176
+ """
177
+ # Perform one step in the world model (starting from `previous_state` and
178
+ # using the observations to yield a current (posterior) state).
179
+ states = self.world_model.forward_inference(
180
+ observations=observations,
181
+ previous_states=previous_states,
182
+ is_first=is_first,
183
+ )
184
+ # Compute action using our actor network and the current states.
185
+ _, distr_params = self.actor(h=states["h"], z=states["z"])
186
+ # Use the mode of the distribution (Discrete=argmax, Normal=mean).
187
+ distr = self.actor.get_action_dist_object(distr_params)
188
+ actions = distr.mode()
189
+ return actions, {"h": states["h"], "z": states["z"], "a": actions}
190
+
191
+ @tf.function
192
+ def forward_exploration(
193
+ self, observations, previous_states, is_first, training=None
194
+ ):
195
+ """Performs an exploratory action computation step given obs and states.
196
+
197
+ Note that all input data should not have a time rank (only a batch dimension).
198
+
199
+ Args:
200
+ observations: The current environment observation with shape (B, ...).
201
+ previous_states: Dict with keys `a`, `h`, and `z` used as input to the RSSM
202
+ to produce the next h-state, from which then to compute the action
203
+ using the actor network. All values in the dict should have shape
204
+ (B, ...) (no time rank).
205
+ is_first: Batch of is_first flags. These should be True if a new episode
206
+ has been started at the current timestep (meaning `observations` is the
207
+ reset observation from the environment).
208
+ """
209
+ # Perform one step in the world model (starting from `previous_state` and
210
+ # using the observations to yield a current (posterior) state).
211
+ states = self.world_model.forward_inference(
212
+ observations=observations,
213
+ previous_states=previous_states,
214
+ is_first=is_first,
215
+ )
216
+ # Compute action using our actor network and the current states.
217
+ actions, _ = self.actor(h=states["h"], z=states["z"])
218
+ return actions, {"h": states["h"], "z": states["z"], "a": actions}
219
+
220
+ def forward_train(self, observations, actions, is_first):
221
+ """Performs a training forward pass given observations and actions.
222
+
223
+ Note that all input data must have a time rank (batch-major: [B, T, ...]).
224
+
225
+ Args:
226
+ observations: The environment observations with shape (B, T, ...). Thus,
227
+ the batch has B rows of T timesteps each. Note that it's ok to have
228
+ episode boundaries (is_first=True) within a batch row. DreamerV3 will
229
+ simply insert an initial state before these locations and continue the
230
+ sequence modelling (with the RSSM). Hence, there will be no zero
231
+ padding.
232
+ actions: The actions actually taken in the environment with shape
233
+ (B, T, ...). See `observations` docstring for details on how B and T are
234
+ handled.
235
+ is_first: Batch of is_first flags. These should be True:
236
+ - if a new episode has been started at the current timestep (meaning
237
+ `observations` is the reset observation from the environment).
238
+ - in each batch row at T=0 (first timestep of each of the B batch
239
+ rows), regardless of whether the actual env had an episode boundary
240
+ there or not.
241
+ """
242
+ return self.world_model.forward_train(
243
+ observations=observations,
244
+ actions=actions,
245
+ is_first=is_first,
246
+ )
247
+
248
+ @tf.function
249
+ def get_initial_state(self):
250
+ """Returns the (current) initial state of the dreamer model (a, h-, z-states).
251
+
252
+ An initial state is generated using the previous action, the tanh of the
253
+ (learned) h-state variable and the dynamics predictor (or "prior net") to
254
+ compute z^0 from h0. In this last step, it is important that we do NOT sample
255
+ the z^-state (as we would usually do during dreaming), but rather take the mode
256
+ (argmax, then one-hot again).
257
+ """
258
+ states = self.world_model.get_initial_state()
259
+
260
+ action_dim = (
261
+ self.action_space.n
262
+ if isinstance(self.action_space, gym.spaces.Discrete)
263
+ else np.prod(self.action_space.shape)
264
+ )
265
+ states["a"] = tf.zeros(
266
+ (
267
+ 1,
268
+ action_dim,
269
+ ),
270
+ dtype=tf.keras.mixed_precision.global_policy().compute_dtype or tf.float32,
271
+ )
272
+ return states
273
+
274
+ def dream_trajectory(self, start_states, start_is_terminated):
275
+ """Dreams trajectories of length H from batch of h- and z-states.
276
+
277
+ Note that incoming data will have the shapes (BxT, ...), where the original
278
+ batch- and time-dimensions are already folded together. Beginning from this
279
+ new batch dim (BxT), we will unroll `timesteps_H` timesteps in a time-major
280
+ fashion, such that the dreamed data will have shape (H, BxT, ...).
281
+
282
+ Args:
283
+ start_states: Dict of `h` and `z` states in the shape of (B, ...) and
284
+ (B, num_categoricals, num_classes), respectively, as
285
+ computed by a train forward pass. From each individual h-/z-state pair
286
+ in the given batch, we will branch off a dreamed trajectory of len
287
+ `timesteps_H`.
288
+ start_is_terminated: Float flags of shape (B,) indicating whether the
289
+ first timesteps of each batch row is already a terminated timestep
290
+ (given by the actual environment).
291
+ """
292
+ # Dreamed actions (one-hot encoded for discrete actions).
293
+ a_dreamed_t0_to_H = []
294
+ a_dreamed_dist_params_t0_to_H = []
295
+
296
+ h = start_states["h"]
297
+ z = start_states["z"]
298
+
299
+ # GRU outputs.
300
+ h_states_t0_to_H = [h]
301
+ # Dynamics model outputs.
302
+ z_states_prior_t0_to_H = [z]
303
+
304
+ # Compute `a` using actor network (already the first step uses a dreamed action,
305
+ # not a sampled one).
306
+ a, a_dist_params = self.actor(
307
+ # We have to stop the gradients through the states. B/c we are using a
308
+ # differentiable Discrete action distribution (straight through gradients
309
+ # with `a = stop_gradient(sample(probs)) + probs - stop_gradient(probs)`,
310
+ # we otherwise would add dependencies of the `-log(pi(a|s))` REINFORCE loss
311
+ # term on actions further back in the trajectory.
312
+ h=tf.stop_gradient(h),
313
+ z=tf.stop_gradient(z),
314
+ )
315
+ a_dreamed_t0_to_H.append(a)
316
+ a_dreamed_dist_params_t0_to_H.append(a_dist_params)
317
+
318
+ for i in range(self.horizon):
319
+ # Move one step in the dream using the RSSM.
320
+ h = self.world_model.sequence_model(a=a, h=h, z=z)
321
+ h_states_t0_to_H.append(h)
322
+
323
+ # Compute prior z using dynamics model.
324
+ z, _ = self.world_model.dynamics_predictor(h=h)
325
+ z_states_prior_t0_to_H.append(z)
326
+
327
+ # Compute `a` using actor network.
328
+ a, a_dist_params = self.actor(
329
+ h=tf.stop_gradient(h),
330
+ z=tf.stop_gradient(z),
331
+ )
332
+ a_dreamed_t0_to_H.append(a)
333
+ a_dreamed_dist_params_t0_to_H.append(a_dist_params)
334
+
335
+ h_states_H_B = tf.stack(h_states_t0_to_H, axis=0) # (T, B, ...)
336
+ h_states_HxB = tf.reshape(h_states_H_B, [-1] + h_states_H_B.shape.as_list()[2:])
337
+
338
+ z_states_prior_H_B = tf.stack(z_states_prior_t0_to_H, axis=0) # (T, B, ...)
339
+ z_states_prior_HxB = tf.reshape(
340
+ z_states_prior_H_B, [-1] + z_states_prior_H_B.shape.as_list()[2:]
341
+ )
342
+
343
+ a_dreamed_H_B = tf.stack(a_dreamed_t0_to_H, axis=0) # (T, B, ...)
344
+ a_dreamed_dist_params_H_B = tf.stack(a_dreamed_dist_params_t0_to_H, axis=0)
345
+
346
+ # Compute r using reward predictor.
347
+ r_dreamed_HxB, _ = self.world_model.reward_predictor(
348
+ h=h_states_HxB, z=z_states_prior_HxB
349
+ )
350
+ r_dreamed_H_B = tf.reshape(
351
+ inverse_symlog(r_dreamed_HxB), shape=[self.horizon + 1, -1]
352
+ )
353
+
354
+ # Compute intrinsic rewards.
355
+ if self.use_curiosity:
356
+ results_HxB = self.disagree_nets.compute_intrinsic_rewards(
357
+ h=h_states_HxB,
358
+ z=z_states_prior_HxB,
359
+ a=tf.reshape(a_dreamed_H_B, [-1] + a_dreamed_H_B.shape.as_list()[2:]),
360
+ )
361
+ # TODO (sven): Wrong? -> Cut out last timestep as we always predict z-states
362
+ # for the NEXT timestep and derive ri (for the NEXT timestep) from the
363
+ # disagreement between our N disagreee nets.
364
+ r_intrinsic_H_B = tf.reshape(
365
+ results_HxB["rewards_intrinsic"], shape=[self.horizon + 1, -1]
366
+ )[
367
+ 1:
368
+ ] # cut out first ts instead
369
+ curiosity_forward_train_outs = results_HxB["forward_train_outs"]
370
+ del results_HxB
371
+
372
+ # Compute continues using continue predictor.
373
+ c_dreamed_HxB, _ = self.world_model.continue_predictor(
374
+ h=h_states_HxB,
375
+ z=z_states_prior_HxB,
376
+ )
377
+ c_dreamed_H_B = tf.reshape(c_dreamed_HxB, [self.horizon + 1, -1])
378
+ # Force-set first `continue` flags to False iff `start_is_terminated`.
379
+ # Note: This will cause the loss-weights for this row in the batch to be
380
+ # completely zero'd out. In general, we don't use dreamed data past any
381
+ # predicted (or actual first) continue=False flags.
382
+ c_dreamed_H_B = tf.concat(
383
+ [
384
+ 1.0
385
+ - tf.expand_dims(
386
+ tf.cast(start_is_terminated, tf.float32),
387
+ 0,
388
+ ),
389
+ c_dreamed_H_B[1:],
390
+ ],
391
+ axis=0,
392
+ )
393
+
394
+ # Loss weights for each individual dreamed timestep. Zero-out all timesteps
395
+ # that lie past continue=False flags. B/c our world model does NOT learn how
396
+ # to skip terminal/reset episode boundaries, dreamed data crossing such a
397
+ # boundary should not be used for critic/actor learning either.
398
+ dream_loss_weights_H_B = (
399
+ tf.math.cumprod(self.gamma * c_dreamed_H_B, axis=0) / self.gamma
400
+ )
401
+
402
+ # Compute the value estimates.
403
+ v, v_symlog_dreamed_logits_HxB = self.critic(
404
+ h=h_states_HxB,
405
+ z=z_states_prior_HxB,
406
+ use_ema=False,
407
+ )
408
+ v_dreamed_HxB = inverse_symlog(v)
409
+ v_dreamed_H_B = tf.reshape(v_dreamed_HxB, shape=[self.horizon + 1, -1])
410
+
411
+ v_symlog_dreamed_ema_HxB, _ = self.critic(
412
+ h=h_states_HxB,
413
+ z=z_states_prior_HxB,
414
+ use_ema=True,
415
+ )
416
+ v_symlog_dreamed_ema_H_B = tf.reshape(
417
+ v_symlog_dreamed_ema_HxB, shape=[self.horizon + 1, -1]
418
+ )
419
+
420
+ ret = {
421
+ "h_states_t0_to_H_BxT": h_states_H_B,
422
+ "z_states_prior_t0_to_H_BxT": z_states_prior_H_B,
423
+ "rewards_dreamed_t0_to_H_BxT": r_dreamed_H_B,
424
+ "continues_dreamed_t0_to_H_BxT": c_dreamed_H_B,
425
+ "actions_dreamed_t0_to_H_BxT": a_dreamed_H_B,
426
+ "actions_dreamed_dist_params_t0_to_H_BxT": a_dreamed_dist_params_H_B,
427
+ "values_dreamed_t0_to_H_BxT": v_dreamed_H_B,
428
+ "values_symlog_dreamed_logits_t0_to_HxBxT": v_symlog_dreamed_logits_HxB,
429
+ "v_symlog_dreamed_ema_t0_to_H_BxT": v_symlog_dreamed_ema_H_B,
430
+ # Loss weights for critic- and actor losses.
431
+ "dream_loss_weights_t0_to_H_BxT": dream_loss_weights_H_B,
432
+ }
433
+
434
+ if self.use_curiosity:
435
+ ret["rewards_intrinsic_t1_to_H_B"] = r_intrinsic_H_B
436
+ ret.update(curiosity_forward_train_outs)
437
+
438
+ if isinstance(self.action_space, gym.spaces.Discrete):
439
+ ret["actions_ints_dreamed_t0_to_H_B"] = tf.argmax(a_dreamed_H_B, axis=-1)
440
+
441
+ return ret
442
+
443
+ def dream_trajectory_with_burn_in(
444
+ self,
445
+ *,
446
+ start_states,
447
+ timesteps_burn_in: int,
448
+ timesteps_H: int,
449
+ observations, # [B, >=timesteps_burn_in]
450
+ actions, # [B, timesteps_burn_in (+timesteps_H)?]
451
+ use_sampled_actions_in_dream: bool = False,
452
+ use_random_actions_in_dream: bool = False,
453
+ ):
454
+ """Dreams trajectory from N initial observations and initial states.
455
+
456
+ Note: This is only used for reporting and debugging, not for actual world-model
457
+ or policy training.
458
+
459
+ Args:
460
+ start_states: The batch of start states (dicts with `a`, `h`, and `z` keys)
461
+ to begin dreaming with. These are used to compute the first h-state
462
+ using the sequence model.
463
+ timesteps_burn_in: For how many timesteps should be use the posterior
464
+ z-states (computed by the posterior net and actual observations from
465
+ the env)?
466
+ timesteps_H: For how many timesteps should we dream using the prior
467
+ z-states (computed by the dynamics (prior) net and h-states only)?
468
+ Note that the total length of the returned trajectories will
469
+ be `timesteps_burn_in` + `timesteps_H`.
470
+ observations: The batch (B, T, ...) of observations (to be used only during
471
+ burn-in over `timesteps_burn_in` timesteps).
472
+ actions: The batch (B, T, ...) of actions to use during a) burn-in over the
473
+ first `timesteps_burn_in` timesteps and - possibly - b) during
474
+ actual dreaming, iff use_sampled_actions_in_dream=True.
475
+ If applicable, actions must already be one-hot'd.
476
+ use_sampled_actions_in_dream: If True, instead of using our actor network
477
+ to compute fresh actions, we will use the one provided via the `actions`
478
+ argument. Note that in the latter case, the `actions` time dimension
479
+ must be at least `timesteps_burn_in` + `timesteps_H` long.
480
+ use_random_actions_in_dream: Whether to use randomly sampled actions in the
481
+ dream. Note that this does not apply to the burn-in phase, during which
482
+ we will always use the actions given in the `actions` argument.
483
+ """
484
+ assert not (use_sampled_actions_in_dream and use_random_actions_in_dream)
485
+
486
+ B = observations.shape[0]
487
+
488
+ # Produce initial N internal posterior states (burn-in) using the given
489
+ # observations:
490
+ states = start_states
491
+ for i in range(timesteps_burn_in):
492
+ states = self.world_model.forward_inference(
493
+ observations=observations[:, i],
494
+ previous_states=states,
495
+ is_first=tf.fill((B,), 1.0 if i == 0 else 0.0),
496
+ )
497
+ states["a"] = actions[:, i]
498
+
499
+ # Start producing the actual dream, using prior states and either the given
500
+ # actions, dreamed, or random ones.
501
+ h_states_t0_to_H = [states["h"]]
502
+ z_states_prior_t0_to_H = [states["z"]]
503
+ a_t0_to_H = [states["a"]]
504
+
505
+ for j in range(timesteps_H):
506
+ # Compute next h using sequence model.
507
+ h = self.world_model.sequence_model(
508
+ a=states["a"],
509
+ h=states["h"],
510
+ z=states["z"],
511
+ )
512
+ h_states_t0_to_H.append(h)
513
+ # Compute z from h, using the dynamics model (we don't have an actual
514
+ # observation at this timestep).
515
+ z, _ = self.world_model.dynamics_predictor(h=h)
516
+ z_states_prior_t0_to_H.append(z)
517
+
518
+ # Compute next dreamed action or use sampled one or random one.
519
+ if use_sampled_actions_in_dream:
520
+ a = actions[:, timesteps_burn_in + j]
521
+ elif use_random_actions_in_dream:
522
+ if isinstance(self.action_space, gym.spaces.Discrete):
523
+ a = tf.random.randint((B,), 0, self.action_space.n, tf.int64)
524
+ a = tf.one_hot(
525
+ a,
526
+ depth=self.action_space.n,
527
+ dtype=tf.keras.mixed_precision.global_policy().compute_dtype
528
+ or tf.float32,
529
+ )
530
+ # TODO: Support cont. action spaces with bound other than 0.0 and 1.0.
531
+ else:
532
+ a = tf.random.uniform(
533
+ shape=(B,) + self.action_space.shape,
534
+ dtype=self.action_space.dtype,
535
+ )
536
+ else:
537
+ a, _ = self.actor(h=h, z=z)
538
+ a_t0_to_H.append(a)
539
+
540
+ states = {"h": h, "z": z, "a": a}
541
+
542
+ # Fold time-rank for upcoming batch-predictions (no sequences needed anymore).
543
+ h_states_t0_to_H_B = tf.stack(h_states_t0_to_H, axis=0)
544
+ h_states_t0_to_HxB = tf.reshape(
545
+ h_states_t0_to_H_B, shape=[-1] + h_states_t0_to_H_B.shape.as_list()[2:]
546
+ )
547
+
548
+ z_states_prior_t0_to_H_B = tf.stack(z_states_prior_t0_to_H, axis=0)
549
+ z_states_prior_t0_to_HxB = tf.reshape(
550
+ z_states_prior_t0_to_H_B,
551
+ shape=[-1] + z_states_prior_t0_to_H_B.shape.as_list()[2:],
552
+ )
553
+
554
+ a_t0_to_H_B = tf.stack(a_t0_to_H, axis=0)
555
+
556
+ # Compute o using decoder.
557
+ o_dreamed_t0_to_HxB = self.world_model.decoder(
558
+ h=h_states_t0_to_HxB,
559
+ z=z_states_prior_t0_to_HxB,
560
+ )
561
+ if self.world_model.symlog_obs:
562
+ o_dreamed_t0_to_HxB = inverse_symlog(o_dreamed_t0_to_HxB)
563
+
564
+ # Compute r using reward predictor.
565
+ r_dreamed_t0_to_HxB, _ = self.world_model.reward_predictor(
566
+ h=h_states_t0_to_HxB,
567
+ z=z_states_prior_t0_to_HxB,
568
+ )
569
+ r_dreamed_t0_to_HxB = inverse_symlog(r_dreamed_t0_to_HxB)
570
+ # Compute continues using continue predictor.
571
+ c_dreamed_t0_to_HxB, _ = self.world_model.continue_predictor(
572
+ h=h_states_t0_to_HxB,
573
+ z=z_states_prior_t0_to_HxB,
574
+ )
575
+
576
+ # Return everything as time-major (H, B, ...), where H is the timesteps dreamed
577
+ # (NOT burn-in'd) and B is a batch dimension (this might or might not include
578
+ # an original time dimension from the real env, from all of which we then branch
579
+ # out our dream trajectories).
580
+ ret = {
581
+ "h_states_t0_to_H_BxT": h_states_t0_to_H_B,
582
+ "z_states_prior_t0_to_H_BxT": z_states_prior_t0_to_H_B,
583
+ # Unfold time-ranks in predictions.
584
+ "observations_dreamed_t0_to_H_BxT": tf.reshape(
585
+ o_dreamed_t0_to_HxB, [-1, B] + list(observations.shape)[2:]
586
+ ),
587
+ "rewards_dreamed_t0_to_H_BxT": tf.reshape(r_dreamed_t0_to_HxB, (-1, B)),
588
+ "continues_dreamed_t0_to_H_BxT": tf.reshape(c_dreamed_t0_to_HxB, (-1, B)),
589
+ }
590
+
591
+ # Figure out action key (random, sampled from env, dreamed?).
592
+ if use_sampled_actions_in_dream:
593
+ key = "actions_sampled_t0_to_H_BxT"
594
+ elif use_random_actions_in_dream:
595
+ key = "actions_random_t0_to_H_BxT"
596
+ else:
597
+ key = "actions_dreamed_t0_to_H_BxT"
598
+ ret[key] = a_t0_to_H_B
599
+
600
+ # Also provide int-actions, if discrete action space.
601
+ if isinstance(self.action_space, gym.spaces.Discrete):
602
+ ret[re.sub("^actions_", "actions_ints_", key)] = tf.argmax(
603
+ a_t0_to_H_B, axis=-1
604
+ )
605
+
606
+ return ret