ZTWHHH commited on
Commit
cc0c797
·
verified ·
1 Parent(s): 5966766

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +14 -0
  2. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/__init__.cpython-310.pyc +0 -0
  3. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/action_dist.cpython-310.pyc +0 -0
  4. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/catalog.cpython-310.pyc +0 -0
  5. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/distributions.cpython-310.pyc +0 -0
  6. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/modelv2.cpython-310.pyc +0 -0
  7. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/preprocessors.cpython-310.pyc +0 -0
  8. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/repeated_values.cpython-310.pyc +0 -0
  9. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/utils.cpython-310.pyc +0 -0
  10. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__init__.py +11 -0
  11. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/__init__.cpython-310.pyc +0 -0
  12. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/attention_net.cpython-310.pyc +0 -0
  13. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/complex_input_net.cpython-310.pyc +0 -0
  14. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/fcnet.cpython-310.pyc +0 -0
  15. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/misc.cpython-310.pyc +0 -0
  16. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/noop.cpython-310.pyc +0 -0
  17. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/recurrent_net.cpython-310.pyc +0 -0
  18. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_action_dist.cpython-310.pyc +0 -0
  19. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_distributions.cpython-310.pyc +0 -0
  20. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_modelv2.cpython-310.pyc +0 -0
  21. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/visionnet.cpython-310.pyc +0 -0
  22. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/attention_net.py +573 -0
  23. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/complex_input_net.py +214 -0
  24. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/fcnet.py +148 -0
  25. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/__init__.cpython-310.pyc +0 -0
  26. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/gru_gate.cpython-310.pyc +0 -0
  27. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/multi_head_attention.cpython-310.pyc +0 -0
  28. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/relative_multi_head_attention.cpython-310.pyc +0 -0
  29. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/skip_connection.cpython-310.pyc +0 -0
  30. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/relative_multi_head_attention.py +147 -0
  31. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/recurrent_net.py +292 -0
  32. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_distributions.py +552 -0
  33. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_modelv2.py +142 -0
  34. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/__init__.cpython-310.pyc +0 -0
  35. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/attention_net.cpython-310.pyc +0 -0
  36. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/complex_input_net.cpython-310.pyc +0 -0
  37. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/fcnet.cpython-310.pyc +0 -0
  38. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/mingpt.cpython-310.pyc +0 -0
  39. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/misc.cpython-310.pyc +0 -0
  40. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/noop.cpython-310.pyc +0 -0
  41. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/recurrent_net.cpython-310.pyc +0 -0
  42. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/torch_action_dist.cpython-310.pyc +0 -0
  43. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/torch_distributions.cpython-310.pyc +0 -0
  44. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/torch_modelv2.cpython-310.pyc +0 -0
  45. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/visionnet.cpython-310.pyc +0 -0
  46. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  47. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/gru_gate.cpython-310.pyc +0 -0
  48. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/multi_head_attention.cpython-310.pyc +0 -0
  49. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__init__.py +141 -0
  50. infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/from_config.py +325 -0
.gitattributes CHANGED
@@ -1513,3 +1513,17 @@ infer_4_33_0/lib/ossl-modules/legacy.so filter=lfs diff=lfs merge=lfs -text
1513
  emu3/lib/python3.10/site-packages/numpy.libs/libscipy_openblas64_-ff651d7f.so filter=lfs diff=lfs merge=lfs -text
1514
  infer_4_37_2/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1515
  infer_4_37_2/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1513
  emu3/lib/python3.10/site-packages/numpy.libs/libscipy_openblas64_-ff651d7f.so filter=lfs diff=lfs merge=lfs -text
1514
  infer_4_37_2/lib/python3.10/site-packages/xxhash/_xxhash.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1515
  infer_4_37_2/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1516
+ janus/lib/liblzma.so.5 filter=lfs diff=lfs merge=lfs -text
1517
+ janus/lib/libquadmath.so filter=lfs diff=lfs merge=lfs -text
1518
+ janus/lib/libbz2.so filter=lfs diff=lfs merge=lfs -text
1519
+ janus/lib/libncurses++.a filter=lfs diff=lfs merge=lfs -text
1520
+ janus/lib/libncurses.so filter=lfs diff=lfs merge=lfs -text
1521
+ janus/lib/libatomic.so.1.2.0 filter=lfs diff=lfs merge=lfs -text
1522
+ janus/lib/libtsan.so.0 filter=lfs diff=lfs merge=lfs -text
1523
+ janus/lib/libasan.so.6 filter=lfs diff=lfs merge=lfs -text
1524
+ janus/lib/liblsan.so.0 filter=lfs diff=lfs merge=lfs -text
1525
+ janus/lib/libsqlite3.so.0.8.6 filter=lfs diff=lfs merge=lfs -text
1526
+ janus/lib/liblsan.so filter=lfs diff=lfs merge=lfs -text
1527
+ janus/lib/libncursesw.so.6 filter=lfs diff=lfs merge=lfs -text
1528
+ janus/lib/libtsan.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
1529
+ janus/lib/libtsan.so filter=lfs diff=lfs merge=lfs -text
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (500 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/action_dist.cpython-310.pyc ADDED
Binary file (4.33 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/catalog.cpython-310.pyc ADDED
Binary file (21.1 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/distributions.cpython-310.pyc ADDED
Binary file (8.97 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/modelv2.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/preprocessors.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/repeated_values.cpython-310.pyc ADDED
Binary file (7.16 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.52 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ray.rllib.models.tf.tf_modelv2 import TFModelV2
2
+ from ray.rllib.models.tf.fcnet import FullyConnectedNetwork
3
+ from ray.rllib.models.tf.recurrent_net import RecurrentNetwork
4
+ from ray.rllib.models.tf.visionnet import VisionNetwork
5
+
6
+ __all__ = [
7
+ "FullyConnectedNetwork",
8
+ "RecurrentNetwork",
9
+ "TFModelV2",
10
+ "VisionNetwork",
11
+ ]
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (494 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/attention_net.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/complex_input_net.cpython-310.pyc ADDED
Binary file (4.71 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/fcnet.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/misc.cpython-310.pyc ADDED
Binary file (2.55 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/noop.cpython-310.pyc ADDED
Binary file (964 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/recurrent_net.cpython-310.pyc ADDED
Binary file (8.73 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_action_dist.cpython-310.pyc ADDED
Binary file (28.2 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_distributions.cpython-310.pyc ADDED
Binary file (20.9 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/tf_modelv2.cpython-310.pyc ADDED
Binary file (5.06 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/__pycache__/visionnet.cpython-310.pyc ADDED
Binary file (5.03 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/attention_net.py ADDED
@@ -0,0 +1,573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ [1] - Attention Is All You Need - Vaswani, Jones, Shazeer, Parmar,
3
+ Uszkoreit, Gomez, Kaiser - Google Brain/Research, U Toronto - 2017.
4
+ https://arxiv.org/pdf/1706.03762.pdf
5
+ [2] - Stabilizing Transformers for Reinforcement Learning - E. Parisotto
6
+ et al. - DeepMind - 2019. https://arxiv.org/pdf/1910.06764.pdf
7
+ [3] - Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context.
8
+ Z. Dai, Z. Yang, et al. - Carnegie Mellon U - 2019.
9
+ https://www.aclweb.org/anthology/P19-1285.pdf
10
+ """
11
+ import gymnasium as gym
12
+ from gymnasium.spaces import Box, Discrete, MultiDiscrete
13
+ import numpy as np
14
+ import tree # pip install dm_tree
15
+ from typing import Any, Dict, Optional, Union
16
+
17
+ from ray.rllib.models.modelv2 import ModelV2
18
+ from ray.rllib.models.tf.layers import (
19
+ GRUGate,
20
+ RelativeMultiHeadAttention,
21
+ SkipConnection,
22
+ )
23
+ from ray.rllib.models.tf.tf_modelv2 import TFModelV2
24
+ from ray.rllib.models.tf.recurrent_net import RecurrentNetwork
25
+ from ray.rllib.policy.sample_batch import SampleBatch
26
+ from ray.rllib.policy.view_requirement import ViewRequirement
27
+ from ray.rllib.utils.annotations import OldAPIStack, override
28
+ from ray.rllib.utils.framework import try_import_tf
29
+ from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
30
+ from ray.rllib.utils.tf_utils import flatten_inputs_to_1d_tensor, one_hot
31
+ from ray.rllib.utils.typing import ModelConfigDict, TensorType, List
32
+ from ray.rllib.utils.deprecation import deprecation_warning
33
+ from ray.util import log_once
34
+
35
+ tf1, tf, tfv = try_import_tf()
36
+
37
+
38
+ @OldAPIStack
39
+ class PositionwiseFeedforward(tf.keras.layers.Layer if tf else object):
40
+ """A 2x linear layer with ReLU activation in between described in [1].
41
+
42
+ Each timestep coming from the attention head will be passed through this
43
+ layer separately.
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ out_dim: int,
49
+ hidden_dim: int,
50
+ output_activation: Optional[Any] = None,
51
+ **kwargs,
52
+ ):
53
+ super().__init__(**kwargs)
54
+
55
+ self._hidden_layer = tf.keras.layers.Dense(
56
+ hidden_dim,
57
+ activation=tf.nn.relu,
58
+ )
59
+
60
+ self._output_layer = tf.keras.layers.Dense(
61
+ out_dim, activation=output_activation
62
+ )
63
+ if log_once("positionwise_feedforward_tf"):
64
+ deprecation_warning(
65
+ old="rllib.models.tf.attention_net.PositionwiseFeedforward",
66
+ )
67
+
68
+ def call(self, inputs: TensorType, **kwargs) -> TensorType:
69
+ del kwargs
70
+ output = self._hidden_layer(inputs)
71
+ return self._output_layer(output)
72
+
73
+
74
+ @OldAPIStack
75
+ class TrXLNet(RecurrentNetwork):
76
+ """A TrXL net Model described in [1]."""
77
+
78
+ def __init__(
79
+ self,
80
+ observation_space: gym.spaces.Space,
81
+ action_space: gym.spaces.Space,
82
+ num_outputs: int,
83
+ model_config: ModelConfigDict,
84
+ name: str,
85
+ num_transformer_units: int,
86
+ attention_dim: int,
87
+ num_heads: int,
88
+ head_dim: int,
89
+ position_wise_mlp_dim: int,
90
+ ):
91
+ """Initializes a TrXLNet object.
92
+
93
+ Args:
94
+ num_transformer_units: The number of Transformer repeats to
95
+ use (denoted L in [2]).
96
+ attention_dim: The input and output dimensions of one
97
+ Transformer unit.
98
+ num_heads: The number of attention heads to use in parallel.
99
+ Denoted as `H` in [3].
100
+ head_dim: The dimension of a single(!) attention head within
101
+ a multi-head attention unit. Denoted as `d` in [3].
102
+ position_wise_mlp_dim: The dimension of the hidden layer
103
+ within the position-wise MLP (after the multi-head attention
104
+ block within one Transformer unit). This is the size of the
105
+ first of the two layers within the PositionwiseFeedforward. The
106
+ second layer always has size=`attention_dim`.
107
+ """
108
+ if log_once("trxl_net_tf"):
109
+ deprecation_warning(
110
+ old="rllib.models.tf.attention_net.TrXLNet",
111
+ )
112
+ super().__init__(
113
+ observation_space, action_space, num_outputs, model_config, name
114
+ )
115
+
116
+ self.num_transformer_units = num_transformer_units
117
+ self.attention_dim = attention_dim
118
+ self.num_heads = num_heads
119
+ self.head_dim = head_dim
120
+ self.max_seq_len = model_config["max_seq_len"]
121
+ self.obs_dim = observation_space.shape[0]
122
+
123
+ inputs = tf.keras.layers.Input(
124
+ shape=(self.max_seq_len, self.obs_dim), name="inputs"
125
+ )
126
+ E_out = tf.keras.layers.Dense(attention_dim)(inputs)
127
+
128
+ for _ in range(self.num_transformer_units):
129
+ MHA_out = SkipConnection(
130
+ RelativeMultiHeadAttention(
131
+ out_dim=attention_dim,
132
+ num_heads=num_heads,
133
+ head_dim=head_dim,
134
+ input_layernorm=False,
135
+ output_activation=None,
136
+ ),
137
+ fan_in_layer=None,
138
+ )(E_out)
139
+ E_out = SkipConnection(
140
+ PositionwiseFeedforward(attention_dim, position_wise_mlp_dim)
141
+ )(MHA_out)
142
+ E_out = tf.keras.layers.LayerNormalization(axis=-1)(E_out)
143
+
144
+ # Postprocess TrXL output with another hidden layer and compute values.
145
+ logits = tf.keras.layers.Dense(
146
+ self.num_outputs, activation=tf.keras.activations.linear, name="logits"
147
+ )(E_out)
148
+
149
+ self.base_model = tf.keras.models.Model([inputs], [logits])
150
+
151
+ @override(RecurrentNetwork)
152
+ def forward_rnn(
153
+ self, inputs: TensorType, state: List[TensorType], seq_lens: TensorType
154
+ ) -> (TensorType, List[TensorType]):
155
+ # To make Attention work with current RLlib's ModelV2 API:
156
+ # We assume `state` is the history of L recent observations (all
157
+ # concatenated into one tensor) and append the current inputs to the
158
+ # end and only keep the most recent (up to `max_seq_len`). This allows
159
+ # us to deal with timestep-wise inference and full sequence training
160
+ # within the same logic.
161
+ observations = state[0]
162
+ observations = tf.concat((observations, inputs), axis=1)[:, -self.max_seq_len :]
163
+ logits = self.base_model([observations])
164
+ T = tf.shape(inputs)[1] # Length of input segment (time).
165
+ logits = logits[:, -T:]
166
+
167
+ return logits, [observations]
168
+
169
+ @override(RecurrentNetwork)
170
+ def get_initial_state(self) -> List[np.ndarray]:
171
+ # State is the T last observations concat'd together into one Tensor.
172
+ # Plus all Transformer blocks' E(l) outputs concat'd together (up to
173
+ # tau timesteps).
174
+ return [np.zeros((self.max_seq_len, self.obs_dim), np.float32)]
175
+
176
+
177
+ class GTrXLNet(RecurrentNetwork):
178
+ """A GTrXL net Model described in [2].
179
+
180
+ This is still in an experimental phase.
181
+ Can be used as a drop-in replacement for LSTMs in PPO and IMPALA.
182
+
183
+ To use this network as a replacement for an RNN, configure your Algorithm
184
+ as follows:
185
+
186
+ Examples:
187
+ >> config["model"]["custom_model"] = GTrXLNet
188
+ >> config["model"]["max_seq_len"] = 10
189
+ >> config["model"]["custom_model_config"] = {
190
+ >> num_transformer_units=1,
191
+ >> attention_dim=32,
192
+ >> num_heads=2,
193
+ >> memory_inference=100,
194
+ >> memory_training=50,
195
+ >> etc..
196
+ >> }
197
+ """
198
+
199
+ def __init__(
200
+ self,
201
+ observation_space: gym.spaces.Space,
202
+ action_space: gym.spaces.Space,
203
+ num_outputs: Optional[int],
204
+ model_config: ModelConfigDict,
205
+ name: str,
206
+ *,
207
+ num_transformer_units: int = 1,
208
+ attention_dim: int = 64,
209
+ num_heads: int = 2,
210
+ memory_inference: int = 50,
211
+ memory_training: int = 50,
212
+ head_dim: int = 32,
213
+ position_wise_mlp_dim: int = 32,
214
+ init_gru_gate_bias: float = 2.0,
215
+ ):
216
+ """Initializes a GTrXLNet instance.
217
+
218
+ Args:
219
+ num_transformer_units: The number of Transformer repeats to
220
+ use (denoted L in [2]).
221
+ attention_dim: The input and output dimensions of one
222
+ Transformer unit.
223
+ num_heads: The number of attention heads to use in parallel.
224
+ Denoted as `H` in [3].
225
+ memory_inference: The number of timesteps to concat (time
226
+ axis) and feed into the next transformer unit as inference
227
+ input. The first transformer unit will receive this number of
228
+ past observations (plus the current one), instead.
229
+ memory_training: The number of timesteps to concat (time
230
+ axis) and feed into the next transformer unit as training
231
+ input (plus the actual input sequence of len=max_seq_len).
232
+ The first transformer unit will receive this number of
233
+ past observations (plus the input sequence), instead.
234
+ head_dim: The dimension of a single(!) attention head within
235
+ a multi-head attention unit. Denoted as `d` in [3].
236
+ position_wise_mlp_dim: The dimension of the hidden layer
237
+ within the position-wise MLP (after the multi-head attention
238
+ block within one Transformer unit). This is the size of the
239
+ first of the two layers within the PositionwiseFeedforward. The
240
+ second layer always has size=`attention_dim`.
241
+ init_gru_gate_bias: Initial bias values for the GRU gates
242
+ (two GRUs per Transformer unit, one after the MHA, one after
243
+ the position-wise MLP).
244
+ """
245
+ super().__init__(
246
+ observation_space, action_space, num_outputs, model_config, name
247
+ )
248
+
249
+ self.num_transformer_units = num_transformer_units
250
+ self.attention_dim = attention_dim
251
+ self.num_heads = num_heads
252
+ self.memory_inference = memory_inference
253
+ self.memory_training = memory_training
254
+ self.head_dim = head_dim
255
+ self.max_seq_len = model_config["max_seq_len"]
256
+ self.obs_dim = observation_space.shape[0]
257
+
258
+ # Raw observation input (plus (None) time axis).
259
+ input_layer = tf.keras.layers.Input(shape=(None, self.obs_dim), name="inputs")
260
+ memory_ins = [
261
+ tf.keras.layers.Input(
262
+ shape=(None, self.attention_dim),
263
+ dtype=tf.float32,
264
+ name="memory_in_{}".format(i),
265
+ )
266
+ for i in range(self.num_transformer_units)
267
+ ]
268
+
269
+ # Map observation dim to input/output transformer (attention) dim.
270
+ E_out = tf.keras.layers.Dense(self.attention_dim)(input_layer)
271
+ # Output, collected and concat'd to build the internal, tau-len
272
+ # Memory units used for additional contextual information.
273
+ memory_outs = [E_out]
274
+
275
+ # 2) Create L Transformer blocks according to [2].
276
+ for i in range(self.num_transformer_units):
277
+ # RelativeMultiHeadAttention part.
278
+ MHA_out = SkipConnection(
279
+ RelativeMultiHeadAttention(
280
+ out_dim=self.attention_dim,
281
+ num_heads=num_heads,
282
+ head_dim=head_dim,
283
+ input_layernorm=True,
284
+ output_activation=tf.nn.relu,
285
+ ),
286
+ fan_in_layer=GRUGate(init_gru_gate_bias),
287
+ name="mha_{}".format(i + 1),
288
+ )(E_out, memory=memory_ins[i])
289
+ # Position-wise MLP part.
290
+ E_out = SkipConnection(
291
+ tf.keras.Sequential(
292
+ (
293
+ tf.keras.layers.LayerNormalization(axis=-1),
294
+ PositionwiseFeedforward(
295
+ out_dim=self.attention_dim,
296
+ hidden_dim=position_wise_mlp_dim,
297
+ output_activation=tf.nn.relu,
298
+ ),
299
+ )
300
+ ),
301
+ fan_in_layer=GRUGate(init_gru_gate_bias),
302
+ name="pos_wise_mlp_{}".format(i + 1),
303
+ )(MHA_out)
304
+ # Output of position-wise MLP == E(l-1), which is concat'd
305
+ # to the current Mem block (M(l-1)) to yield E~(l-1), which is then
306
+ # used by the next transformer block.
307
+ memory_outs.append(E_out)
308
+
309
+ self._logits = None
310
+ self._value_out = None
311
+
312
+ # Postprocess TrXL output with another hidden layer and compute values.
313
+ if num_outputs is not None:
314
+ self._logits = tf.keras.layers.Dense(
315
+ self.num_outputs, activation=None, name="logits"
316
+ )(E_out)
317
+ values_out = tf.keras.layers.Dense(1, activation=None, name="values")(E_out)
318
+ outs = [self._logits, values_out]
319
+ else:
320
+ outs = [E_out]
321
+ self.num_outputs = self.attention_dim
322
+
323
+ self.trxl_model = tf.keras.Model(
324
+ inputs=[input_layer] + memory_ins, outputs=outs + memory_outs[:-1]
325
+ )
326
+
327
+ self.trxl_model.summary()
328
+
329
+ # __sphinx_doc_begin__
330
+ # Setup trajectory views (`memory-inference` x past memory outs).
331
+ for i in range(self.num_transformer_units):
332
+ space = Box(-1.0, 1.0, shape=(self.attention_dim,))
333
+ self.view_requirements["state_in_{}".format(i)] = ViewRequirement(
334
+ "state_out_{}".format(i),
335
+ shift="-{}:-1".format(self.memory_inference),
336
+ # Repeat the incoming state every max-seq-len times.
337
+ batch_repeat_value=self.max_seq_len,
338
+ space=space,
339
+ )
340
+ self.view_requirements["state_out_{}".format(i)] = ViewRequirement(
341
+ space=space, used_for_training=False
342
+ )
343
+ # __sphinx_doc_end__
344
+
345
+ @override(ModelV2)
346
+ def forward(
347
+ self, input_dict, state: List[TensorType], seq_lens: TensorType
348
+ ) -> (TensorType, List[TensorType]):
349
+ assert seq_lens is not None
350
+
351
+ # Add the time dim to observations.
352
+ B = tf.shape(seq_lens)[0]
353
+ observations = input_dict[SampleBatch.OBS]
354
+
355
+ shape = tf.shape(observations)
356
+ T = shape[0] // B
357
+ observations = tf.reshape(observations, tf.concat([[-1, T], shape[1:]], axis=0))
358
+
359
+ all_out = self.trxl_model([observations] + state)
360
+
361
+ if self._logits is not None:
362
+ out = tf.reshape(all_out[0], [-1, self.num_outputs])
363
+ self._value_out = all_out[1]
364
+ memory_outs = all_out[2:]
365
+ else:
366
+ out = tf.reshape(all_out[0], [-1, self.attention_dim])
367
+ memory_outs = all_out[1:]
368
+
369
+ return out, [tf.reshape(m, [-1, self.attention_dim]) for m in memory_outs]
370
+
371
+ @override(RecurrentNetwork)
372
+ def get_initial_state(self) -> List[np.ndarray]:
373
+ return [
374
+ tf.zeros(self.view_requirements["state_in_{}".format(i)].space.shape)
375
+ for i in range(self.num_transformer_units)
376
+ ]
377
+
378
+ @override(ModelV2)
379
+ def value_function(self) -> TensorType:
380
+ return tf.reshape(self._value_out, [-1])
381
+
382
+
383
+ class AttentionWrapper(TFModelV2):
384
+ """GTrXL wrapper serving as interface for ModelV2s that set use_attention."""
385
+
386
+ def __init__(
387
+ self,
388
+ obs_space: gym.spaces.Space,
389
+ action_space: gym.spaces.Space,
390
+ num_outputs: int,
391
+ model_config: ModelConfigDict,
392
+ name: str,
393
+ ):
394
+ if log_once("attention_wrapper_tf_deprecation"):
395
+ deprecation_warning(
396
+ old="ray.rllib.models.tf.attention_net.AttentionWrapper"
397
+ )
398
+ super().__init__(obs_space, action_space, None, model_config, name)
399
+
400
+ self.use_n_prev_actions = model_config["attention_use_n_prev_actions"]
401
+ self.use_n_prev_rewards = model_config["attention_use_n_prev_rewards"]
402
+
403
+ self.action_space_struct = get_base_struct_from_space(self.action_space)
404
+ self.action_dim = 0
405
+
406
+ for space in tree.flatten(self.action_space_struct):
407
+ if isinstance(space, Discrete):
408
+ self.action_dim += space.n
409
+ elif isinstance(space, MultiDiscrete):
410
+ self.action_dim += np.sum(space.nvec)
411
+ elif space.shape is not None:
412
+ self.action_dim += int(np.prod(space.shape))
413
+ else:
414
+ self.action_dim += int(len(space))
415
+
416
+ # Add prev-action/reward nodes to input to LSTM.
417
+ if self.use_n_prev_actions:
418
+ self.num_outputs += self.use_n_prev_actions * self.action_dim
419
+ if self.use_n_prev_rewards:
420
+ self.num_outputs += self.use_n_prev_rewards
421
+
422
+ cfg = model_config
423
+
424
+ self.attention_dim = cfg["attention_dim"]
425
+
426
+ if self.num_outputs is not None:
427
+ in_space = gym.spaces.Box(
428
+ float("-inf"), float("inf"), shape=(self.num_outputs,), dtype=np.float32
429
+ )
430
+ else:
431
+ in_space = obs_space
432
+
433
+ # Construct GTrXL sub-module w/ num_outputs=None (so it does not
434
+ # create a logits/value output; we'll do this ourselves in this wrapper
435
+ # here).
436
+ self.gtrxl = GTrXLNet(
437
+ in_space,
438
+ action_space,
439
+ None,
440
+ model_config,
441
+ "gtrxl",
442
+ num_transformer_units=cfg["attention_num_transformer_units"],
443
+ attention_dim=self.attention_dim,
444
+ num_heads=cfg["attention_num_heads"],
445
+ head_dim=cfg["attention_head_dim"],
446
+ memory_inference=cfg["attention_memory_inference"],
447
+ memory_training=cfg["attention_memory_training"],
448
+ position_wise_mlp_dim=cfg["attention_position_wise_mlp_dim"],
449
+ init_gru_gate_bias=cfg["attention_init_gru_gate_bias"],
450
+ )
451
+
452
+ # `self.num_outputs` right now is the number of nodes coming from the
453
+ # attention net.
454
+ input_ = tf.keras.layers.Input(shape=(self.gtrxl.num_outputs,))
455
+
456
+ # Set final num_outputs to correct value (depending on action space).
457
+ self.num_outputs = num_outputs
458
+
459
+ # Postprocess GTrXL output with another hidden layer and compute
460
+ # values.
461
+ out = tf.keras.layers.Dense(self.num_outputs, activation=None)(input_)
462
+ self._logits_branch = tf.keras.models.Model([input_], [out])
463
+
464
+ out = tf.keras.layers.Dense(1, activation=None)(input_)
465
+ self._value_branch = tf.keras.models.Model([input_], [out])
466
+
467
+ self.view_requirements = self.gtrxl.view_requirements
468
+ self.view_requirements["obs"].space = self.obs_space
469
+
470
+ # Add prev-a/r to this model's view, if required.
471
+ if self.use_n_prev_actions:
472
+ self.view_requirements[SampleBatch.PREV_ACTIONS] = ViewRequirement(
473
+ SampleBatch.ACTIONS,
474
+ space=self.action_space,
475
+ shift="-{}:-1".format(self.use_n_prev_actions),
476
+ )
477
+ if self.use_n_prev_rewards:
478
+ self.view_requirements[SampleBatch.PREV_REWARDS] = ViewRequirement(
479
+ SampleBatch.REWARDS, shift="-{}:-1".format(self.use_n_prev_rewards)
480
+ )
481
+
482
+ @override(RecurrentNetwork)
483
+ def forward(
484
+ self,
485
+ input_dict: Dict[str, TensorType],
486
+ state: List[TensorType],
487
+ seq_lens: TensorType,
488
+ ) -> (TensorType, List[TensorType]):
489
+ assert seq_lens is not None
490
+ # Push obs through "unwrapped" net's `forward()` first.
491
+ wrapped_out, _ = self._wrapped_forward(input_dict, [], None)
492
+
493
+ # Concat. prev-action/reward if required.
494
+ prev_a_r = []
495
+
496
+ # Prev actions.
497
+ if self.use_n_prev_actions:
498
+ prev_n_actions = input_dict[SampleBatch.PREV_ACTIONS]
499
+ # If actions are not processed yet (in their original form as
500
+ # have been sent to environment):
501
+ # Flatten/one-hot into 1D array.
502
+ if self.model_config["_disable_action_flattening"]:
503
+ # Merge prev n actions into flat tensor.
504
+ flat = flatten_inputs_to_1d_tensor(
505
+ prev_n_actions,
506
+ spaces_struct=self.action_space_struct,
507
+ time_axis=True,
508
+ )
509
+ # Fold time-axis into flattened data.
510
+ flat = tf.reshape(flat, [tf.shape(flat)[0], -1])
511
+ prev_a_r.append(flat)
512
+ # If actions are already flattened (but not one-hot'd yet!),
513
+ # one-hot discrete/multi-discrete actions here and concatenate the
514
+ # n most recent actions together.
515
+ else:
516
+ if isinstance(self.action_space, Discrete):
517
+ for i in range(self.use_n_prev_actions):
518
+ prev_a_r.append(
519
+ one_hot(prev_n_actions[:, i], self.action_space)
520
+ )
521
+ elif isinstance(self.action_space, MultiDiscrete):
522
+ for i in range(
523
+ 0, self.use_n_prev_actions, self.action_space.shape[0]
524
+ ):
525
+ prev_a_r.append(
526
+ one_hot(
527
+ tf.cast(
528
+ prev_n_actions[
529
+ :, i : i + self.action_space.shape[0]
530
+ ],
531
+ tf.float32,
532
+ ),
533
+ space=self.action_space,
534
+ )
535
+ )
536
+ else:
537
+ prev_a_r.append(
538
+ tf.reshape(
539
+ tf.cast(prev_n_actions, tf.float32),
540
+ [-1, self.use_n_prev_actions * self.action_dim],
541
+ )
542
+ )
543
+ # Prev rewards.
544
+ if self.use_n_prev_rewards:
545
+ prev_a_r.append(
546
+ tf.reshape(
547
+ tf.cast(input_dict[SampleBatch.PREV_REWARDS], tf.float32),
548
+ [-1, self.use_n_prev_rewards],
549
+ )
550
+ )
551
+
552
+ # Concat prev. actions + rewards to the "main" input.
553
+ if prev_a_r:
554
+ wrapped_out = tf.concat([wrapped_out] + prev_a_r, axis=1)
555
+
556
+ # Then through our GTrXL.
557
+ input_dict["obs_flat"] = input_dict["obs"] = wrapped_out
558
+
559
+ self._features, memory_outs = self.gtrxl(input_dict, state, seq_lens)
560
+ model_out = self._logits_branch(self._features)
561
+ return model_out, memory_outs
562
+
563
+ @override(ModelV2)
564
+ def value_function(self) -> TensorType:
565
+ assert self._features is not None, "Must call forward() first!"
566
+ return tf.reshape(self._value_branch(self._features), [-1])
567
+
568
+ @override(ModelV2)
569
+ def get_initial_state(self) -> Union[List[np.ndarray], List[TensorType]]:
570
+ return [
571
+ np.zeros(self.gtrxl.view_requirements["state_in_{}".format(i)].space.shape)
572
+ for i in range(self.gtrxl.num_transformer_units)
573
+ ]
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/complex_input_net.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gymnasium.spaces import Box, Discrete, MultiDiscrete
2
+ import numpy as np
3
+ import tree # pip install dm_tree
4
+
5
+ from ray.rllib.models.catalog import ModelCatalog
6
+ from ray.rllib.models.modelv2 import ModelV2, restore_original_dimensions
7
+ from ray.rllib.models.tf.misc import normc_initializer
8
+ from ray.rllib.models.tf.tf_modelv2 import TFModelV2
9
+ from ray.rllib.models.utils import get_filter_config
10
+ from ray.rllib.policy.sample_batch import SampleBatch
11
+ from ray.rllib.utils.annotations import OldAPIStack, override
12
+ from ray.rllib.utils.framework import try_import_tf
13
+ from ray.rllib.utils.spaces.space_utils import flatten_space
14
+ from ray.rllib.utils.tf_utils import one_hot
15
+
16
+ tf1, tf, tfv = try_import_tf()
17
+
18
+
19
+ # __sphinx_doc_begin__
20
+ @OldAPIStack
21
+ class ComplexInputNetwork(TFModelV2):
22
+ """TFModelV2 concat'ing CNN outputs to flat input(s), followed by FC(s).
23
+
24
+ Note: This model should be used for complex (Dict or Tuple) observation
25
+ spaces that have one or more image components.
26
+
27
+ The data flow is as follows:
28
+
29
+ `obs` (e.g. Tuple[img0, img1, discrete0]) -> `CNN0 + CNN1 + ONE-HOT`
30
+ `CNN0 + CNN1 + ONE-HOT` -> concat all flat outputs -> `out`
31
+ `out` -> (optional) FC-stack -> `out2`
32
+ `out2` -> action (logits) and vaulue heads.
33
+ """
34
+
35
+ def __init__(self, obs_space, action_space, num_outputs, model_config, name):
36
+
37
+ self.original_space = (
38
+ obs_space.original_space
39
+ if hasattr(obs_space, "original_space")
40
+ else obs_space
41
+ )
42
+
43
+ self.processed_obs_space = (
44
+ self.original_space
45
+ if model_config.get("_disable_preprocessor_api")
46
+ else obs_space
47
+ )
48
+ super().__init__(
49
+ self.original_space, action_space, num_outputs, model_config, name
50
+ )
51
+
52
+ self.flattened_input_space = flatten_space(self.original_space)
53
+
54
+ # Build the CNN(s) given obs_space's image components.
55
+ self.cnns = {}
56
+ self.one_hot = {}
57
+ self.flatten_dims = {}
58
+ self.flatten = {}
59
+ concat_size = 0
60
+ for i, component in enumerate(self.flattened_input_space):
61
+ # Image space.
62
+ if len(component.shape) == 3 and isinstance(component, Box):
63
+ config = {
64
+ "conv_filters": model_config["conv_filters"]
65
+ if "conv_filters" in model_config
66
+ else get_filter_config(component.shape),
67
+ "conv_activation": model_config.get("conv_activation"),
68
+ "post_fcnet_hiddens": [],
69
+ }
70
+ self.cnns[i] = ModelCatalog.get_model_v2(
71
+ component,
72
+ action_space,
73
+ num_outputs=None,
74
+ model_config=config,
75
+ framework="tf",
76
+ name="cnn_{}".format(i),
77
+ )
78
+ concat_size += int(self.cnns[i].num_outputs)
79
+ # Discrete|MultiDiscrete inputs -> One-hot encode.
80
+ elif isinstance(component, (Discrete, MultiDiscrete)):
81
+ if isinstance(component, Discrete):
82
+ size = component.n
83
+ else:
84
+ size = np.sum(component.nvec)
85
+ config = {
86
+ "fcnet_hiddens": model_config["fcnet_hiddens"],
87
+ "fcnet_activation": model_config.get("fcnet_activation"),
88
+ "post_fcnet_hiddens": [],
89
+ }
90
+ self.one_hot[i] = ModelCatalog.get_model_v2(
91
+ Box(-1.0, 1.0, (size,), np.float32),
92
+ action_space,
93
+ num_outputs=None,
94
+ model_config=config,
95
+ framework="tf",
96
+ name="one_hot_{}".format(i),
97
+ )
98
+ concat_size += int(self.one_hot[i].num_outputs)
99
+ # Everything else (1D Box).
100
+ else:
101
+ size = int(np.prod(component.shape))
102
+ config = {
103
+ "fcnet_hiddens": model_config["fcnet_hiddens"],
104
+ "fcnet_activation": model_config.get("fcnet_activation"),
105
+ "post_fcnet_hiddens": [],
106
+ }
107
+ self.flatten[i] = ModelCatalog.get_model_v2(
108
+ Box(-1.0, 1.0, (size,), np.float32),
109
+ action_space,
110
+ num_outputs=None,
111
+ model_config=config,
112
+ framework="tf",
113
+ name="flatten_{}".format(i),
114
+ )
115
+ self.flatten_dims[i] = size
116
+ concat_size += int(self.flatten[i].num_outputs)
117
+
118
+ # Optional post-concat FC-stack.
119
+ post_fc_stack_config = {
120
+ "fcnet_hiddens": model_config.get("post_fcnet_hiddens", []),
121
+ "fcnet_activation": model_config.get("post_fcnet_activation", "relu"),
122
+ }
123
+ self.post_fc_stack = ModelCatalog.get_model_v2(
124
+ Box(float("-inf"), float("inf"), shape=(concat_size,), dtype=np.float32),
125
+ self.action_space,
126
+ None,
127
+ post_fc_stack_config,
128
+ framework="tf",
129
+ name="post_fc_stack",
130
+ )
131
+
132
+ # Actions and value heads.
133
+ self.logits_and_value_model = None
134
+ self._value_out = None
135
+ if num_outputs:
136
+ # Action-distribution head.
137
+ concat_layer = tf.keras.layers.Input((self.post_fc_stack.num_outputs,))
138
+ logits_layer = tf.keras.layers.Dense(
139
+ num_outputs,
140
+ activation=None,
141
+ kernel_initializer=normc_initializer(0.01),
142
+ name="logits",
143
+ )(concat_layer)
144
+
145
+ # Create the value branch model.
146
+ value_layer = tf.keras.layers.Dense(
147
+ 1,
148
+ activation=None,
149
+ kernel_initializer=normc_initializer(0.01),
150
+ name="value_out",
151
+ )(concat_layer)
152
+ self.logits_and_value_model = tf.keras.models.Model(
153
+ concat_layer, [logits_layer, value_layer]
154
+ )
155
+ else:
156
+ self.num_outputs = self.post_fc_stack.num_outputs
157
+
158
+ @override(ModelV2)
159
+ def forward(self, input_dict, state, seq_lens):
160
+ if SampleBatch.OBS in input_dict and "obs_flat" in input_dict:
161
+ orig_obs = input_dict[SampleBatch.OBS]
162
+ else:
163
+ orig_obs = restore_original_dimensions(
164
+ input_dict[SampleBatch.OBS], self.processed_obs_space, tensorlib="tf"
165
+ )
166
+ # Push image observations through our CNNs.
167
+ outs = []
168
+ for i, component in enumerate(tree.flatten(orig_obs)):
169
+ if i in self.cnns:
170
+ cnn_out, _ = self.cnns[i](SampleBatch({SampleBatch.OBS: component}))
171
+ outs.append(cnn_out)
172
+ elif i in self.one_hot:
173
+ if "int" in component.dtype.name:
174
+ one_hot_in = {
175
+ SampleBatch.OBS: one_hot(
176
+ component, self.flattened_input_space[i]
177
+ )
178
+ }
179
+ else:
180
+ one_hot_in = {SampleBatch.OBS: component}
181
+ one_hot_out, _ = self.one_hot[i](SampleBatch(one_hot_in))
182
+ outs.append(one_hot_out)
183
+ else:
184
+ nn_out, _ = self.flatten[i](
185
+ SampleBatch(
186
+ {
187
+ SampleBatch.OBS: tf.cast(
188
+ tf.reshape(component, [-1, self.flatten_dims[i]]),
189
+ tf.float32,
190
+ )
191
+ }
192
+ )
193
+ )
194
+ outs.append(nn_out)
195
+ # Concat all outputs and the non-image inputs.
196
+ out = tf.concat(outs, axis=1)
197
+ # Push through (optional) FC-stack (this may be an empty stack).
198
+ out, _ = self.post_fc_stack(SampleBatch({SampleBatch.OBS: out}))
199
+
200
+ # No logits/value branches.
201
+ if not self.logits_and_value_model:
202
+ return out, []
203
+
204
+ # Logits- and value branches.
205
+ logits, values = self.logits_and_value_model(out)
206
+ self._value_out = tf.reshape(values, [-1])
207
+ return logits, []
208
+
209
+ @override(ModelV2)
210
+ def value_function(self):
211
+ return self._value_out
212
+
213
+
214
+ # __sphinx_doc_end__
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/fcnet.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import gymnasium as gym
3
+ from typing import Dict
4
+
5
+ from ray.rllib.models.tf.misc import normc_initializer
6
+ from ray.rllib.models.tf.tf_modelv2 import TFModelV2
7
+ from ray.rllib.models.utils import get_activation_fn
8
+ from ray.rllib.utils.annotations import OldAPIStack
9
+ from ray.rllib.utils.framework import try_import_tf
10
+ from ray.rllib.utils.typing import TensorType, List, ModelConfigDict
11
+
12
+ tf1, tf, tfv = try_import_tf()
13
+
14
+
15
+ @OldAPIStack
16
+ class FullyConnectedNetwork(TFModelV2):
17
+ """Generic fully connected network implemented in ModelV2 API."""
18
+
19
+ def __init__(
20
+ self,
21
+ obs_space: gym.spaces.Space,
22
+ action_space: gym.spaces.Space,
23
+ num_outputs: int,
24
+ model_config: ModelConfigDict,
25
+ name: str,
26
+ ):
27
+ super(FullyConnectedNetwork, self).__init__(
28
+ obs_space, action_space, num_outputs, model_config, name
29
+ )
30
+
31
+ hiddens = list(model_config.get("fcnet_hiddens", [])) + list(
32
+ model_config.get("post_fcnet_hiddens", [])
33
+ )
34
+ activation = model_config.get("fcnet_activation")
35
+ if not model_config.get("fcnet_hiddens", []):
36
+ activation = model_config.get("post_fcnet_activation")
37
+ activation = get_activation_fn(activation)
38
+ no_final_linear = model_config.get("no_final_linear")
39
+ vf_share_layers = model_config.get("vf_share_layers")
40
+ free_log_std = model_config.get("free_log_std")
41
+
42
+ # Generate free-floating bias variables for the second half of
43
+ # the outputs.
44
+ if free_log_std:
45
+ assert num_outputs % 2 == 0, (
46
+ "num_outputs must be divisible by two",
47
+ num_outputs,
48
+ )
49
+ num_outputs = num_outputs // 2
50
+ self.log_std_var = tf.Variable(
51
+ [0.0] * num_outputs, dtype=tf.float32, name="log_std"
52
+ )
53
+
54
+ # We are using obs_flat, so take the flattened shape as input.
55
+ inputs = tf.keras.layers.Input(
56
+ shape=(int(np.prod(obs_space.shape)),), name="observations"
57
+ )
58
+ # Last hidden layer output (before logits outputs).
59
+ last_layer = inputs
60
+ # The action distribution outputs.
61
+ logits_out = None
62
+ i = 1
63
+
64
+ # Create layers 0 to second-last.
65
+ for size in hiddens[:-1]:
66
+ last_layer = tf.keras.layers.Dense(
67
+ size,
68
+ name="fc_{}".format(i),
69
+ activation=activation,
70
+ kernel_initializer=normc_initializer(1.0),
71
+ )(last_layer)
72
+ i += 1
73
+
74
+ # The last layer is adjusted to be of size num_outputs, but it's a
75
+ # layer with activation.
76
+ if no_final_linear and num_outputs:
77
+ logits_out = tf.keras.layers.Dense(
78
+ num_outputs,
79
+ name="fc_out",
80
+ activation=activation,
81
+ kernel_initializer=normc_initializer(1.0),
82
+ )(last_layer)
83
+ # Finish the layers with the provided sizes (`hiddens`), plus -
84
+ # iff num_outputs > 0 - a last linear layer of size num_outputs.
85
+ else:
86
+ if len(hiddens) > 0:
87
+ last_layer = tf.keras.layers.Dense(
88
+ hiddens[-1],
89
+ name="fc_{}".format(i),
90
+ activation=activation,
91
+ kernel_initializer=normc_initializer(1.0),
92
+ )(last_layer)
93
+ if num_outputs:
94
+ logits_out = tf.keras.layers.Dense(
95
+ num_outputs,
96
+ name="fc_out",
97
+ activation=None,
98
+ kernel_initializer=normc_initializer(0.01),
99
+ )(last_layer)
100
+ # Adjust num_outputs to be the number of nodes in the last layer.
101
+ else:
102
+ self.num_outputs = ([int(np.prod(obs_space.shape))] + hiddens[-1:])[-1]
103
+
104
+ # Concat the log std vars to the end of the state-dependent means.
105
+ if free_log_std and logits_out is not None:
106
+
107
+ def tiled_log_std(x):
108
+ return tf.tile(tf.expand_dims(self.log_std_var, 0), [tf.shape(x)[0], 1])
109
+
110
+ log_std_out = tf.keras.layers.Lambda(tiled_log_std)(inputs)
111
+ logits_out = tf.keras.layers.Concatenate(axis=1)([logits_out, log_std_out])
112
+
113
+ last_vf_layer = None
114
+ if not vf_share_layers:
115
+ # Build a parallel set of hidden layers for the value net.
116
+ last_vf_layer = inputs
117
+ i = 1
118
+ for size in hiddens:
119
+ last_vf_layer = tf.keras.layers.Dense(
120
+ size,
121
+ name="fc_value_{}".format(i),
122
+ activation=activation,
123
+ kernel_initializer=normc_initializer(1.0),
124
+ )(last_vf_layer)
125
+ i += 1
126
+
127
+ value_out = tf.keras.layers.Dense(
128
+ 1,
129
+ name="value_out",
130
+ activation=None,
131
+ kernel_initializer=normc_initializer(0.01),
132
+ )(last_vf_layer if last_vf_layer is not None else last_layer)
133
+
134
+ self.base_model = tf.keras.Model(
135
+ inputs, [(logits_out if logits_out is not None else last_layer), value_out]
136
+ )
137
+
138
+ def forward(
139
+ self,
140
+ input_dict: Dict[str, TensorType],
141
+ state: List[TensorType],
142
+ seq_lens: TensorType,
143
+ ) -> (TensorType, List[TensorType]):
144
+ model_out, self._value_out = self.base_model(input_dict["obs_flat"])
145
+ return model_out, state
146
+
147
+ def value_function(self) -> TensorType:
148
+ return tf.reshape(self._value_out, [-1])
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (683 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/gru_gate.cpython-310.pyc ADDED
Binary file (2.21 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/multi_head_attention.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/relative_multi_head_attention.cpython-310.pyc ADDED
Binary file (4.71 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/__pycache__/skip_connection.cpython-310.pyc ADDED
Binary file (1.85 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/layers/relative_multi_head_attention.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from ray.rllib.utils.framework import try_import_tf
4
+ from ray.rllib.utils.typing import TensorType
5
+ from ray.rllib.utils.deprecation import deprecation_warning
6
+ from ray.util import log_once
7
+
8
+ tf1, tf, tfv = try_import_tf()
9
+
10
+
11
+ class RelativeMultiHeadAttention(tf.keras.layers.Layer if tf else object):
12
+ """A RelativeMultiHeadAttention layer as described in [3].
13
+
14
+ Uses segment level recurrence with state reuse.
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ out_dim: int,
20
+ num_heads: int,
21
+ head_dim: int,
22
+ input_layernorm: bool = False,
23
+ output_activation: Optional["tf.nn.activation"] = None,
24
+ **kwargs
25
+ ):
26
+ """Initializes a RelativeMultiHeadAttention keras Layer object.
27
+
28
+ Args:
29
+ out_dim: The output dimensions of the multi-head attention
30
+ unit.
31
+ num_heads: The number of attention heads to use.
32
+ Denoted `H` in [2].
33
+ head_dim: The dimension of a single(!) attention head within
34
+ a multi-head attention unit. Denoted as `d` in [3].
35
+ input_layernorm: Whether to prepend a LayerNorm before
36
+ everything else. Should be True for building a GTrXL.
37
+ output_activation (Optional[tf.nn.activation]): Optional tf.nn
38
+ activation function. Should be relu for GTrXL.
39
+ **kwargs:
40
+ """
41
+ if log_once("relative_multi_head_attention"):
42
+ deprecation_warning(
43
+ old="rllib.models.tf.layers.RelativeMultiHeadAttention",
44
+ )
45
+ super().__init__(**kwargs)
46
+
47
+ # No bias or non-linearity.
48
+ self._num_heads = num_heads
49
+ self._head_dim = head_dim
50
+ # 3=Query, key, and value inputs.
51
+ self._qkv_layer = tf.keras.layers.Dense(
52
+ 3 * num_heads * head_dim, use_bias=False
53
+ )
54
+ self._linear_layer = tf.keras.layers.TimeDistributed(
55
+ tf.keras.layers.Dense(out_dim, use_bias=False, activation=output_activation)
56
+ )
57
+
58
+ self._uvar = self.add_weight(shape=(num_heads, head_dim))
59
+ self._vvar = self.add_weight(shape=(num_heads, head_dim))
60
+
61
+ # Constant (non-trainable) sinusoid rel pos encoding matrix, which
62
+ # depends on this incoming time dimension.
63
+ # For inference, we prepend the memory to the current timestep's
64
+ # input: Tau + 1. For training, we prepend the memory to the input
65
+ # sequence: Tau + T.
66
+ self._pos_embedding = PositionalEmbedding(out_dim)
67
+ self._pos_proj = tf.keras.layers.Dense(num_heads * head_dim, use_bias=False)
68
+
69
+ self._input_layernorm = None
70
+ if input_layernorm:
71
+ self._input_layernorm = tf.keras.layers.LayerNormalization(axis=-1)
72
+
73
+ def call(
74
+ self, inputs: TensorType, memory: Optional[TensorType] = None
75
+ ) -> TensorType:
76
+ T = tf.shape(inputs)[1] # length of segment (time)
77
+ H = self._num_heads # number of attention heads
78
+ d = self._head_dim # attention head dimension
79
+
80
+ # Add previous memory chunk (as const, w/o gradient) to input.
81
+ # Tau (number of (prev) time slices in each memory chunk).
82
+ Tau = tf.shape(memory)[1]
83
+ inputs = tf.concat([tf.stop_gradient(memory), inputs], axis=1)
84
+
85
+ # Apply the Layer-Norm.
86
+ if self._input_layernorm is not None:
87
+ inputs = self._input_layernorm(inputs)
88
+
89
+ qkv = self._qkv_layer(inputs)
90
+
91
+ queries, keys, values = tf.split(qkv, 3, -1)
92
+ # Cut out memory timesteps from query.
93
+ queries = queries[:, -T:]
94
+
95
+ # Splitting up queries into per-head dims (d).
96
+ queries = tf.reshape(queries, [-1, T, H, d])
97
+ keys = tf.reshape(keys, [-1, Tau + T, H, d])
98
+ values = tf.reshape(values, [-1, Tau + T, H, d])
99
+
100
+ R = self._pos_embedding(Tau + T)
101
+ R = self._pos_proj(R)
102
+ R = tf.reshape(R, [Tau + T, H, d])
103
+
104
+ # b=batch
105
+ # i and j=time indices (i=max-timesteps (inputs); j=Tau memory space)
106
+ # h=head
107
+ # d=head-dim (over which we will reduce-sum)
108
+ score = tf.einsum("bihd,bjhd->bijh", queries + self._uvar, keys)
109
+ pos_score = tf.einsum("bihd,jhd->bijh", queries + self._vvar, R)
110
+ score = score + self.rel_shift(pos_score)
111
+ score = score / d**0.5
112
+
113
+ # Causal mask of the same length as the sequence.
114
+ mask = tf.sequence_mask(tf.range(Tau + 1, Tau + T + 1), dtype=score.dtype)
115
+ mask = mask[None, :, :, None]
116
+
117
+ masked_score = score * mask + 1e30 * (mask - 1.0)
118
+ wmat = tf.nn.softmax(masked_score, axis=2)
119
+
120
+ out = tf.einsum("bijh,bjhd->bihd", wmat, values)
121
+ out = tf.reshape(out, tf.concat((tf.shape(out)[:2], [H * d]), axis=0))
122
+ return self._linear_layer(out)
123
+
124
+ @staticmethod
125
+ def rel_shift(x: TensorType) -> TensorType:
126
+ # Transposed version of the shift approach described in [3].
127
+ # https://github.com/kimiyoung/transformer-xl/blob/
128
+ # 44781ed21dbaec88b280f74d9ae2877f52b492a5/tf/model.py#L31
129
+ x_size = tf.shape(x)
130
+
131
+ x = tf.pad(x, [[0, 0], [0, 0], [1, 0], [0, 0]])
132
+ x = tf.reshape(x, [x_size[0], x_size[2] + 1, x_size[1], x_size[3]])
133
+ x = x[:, 1:, :, :]
134
+ x = tf.reshape(x, x_size)
135
+
136
+ return x
137
+
138
+
139
+ class PositionalEmbedding(tf.keras.layers.Layer if tf else object):
140
+ def __init__(self, out_dim, **kwargs):
141
+ super().__init__(**kwargs)
142
+ self.inverse_freq = 1 / (10000 ** (tf.range(0, out_dim, 2.0) / out_dim))
143
+
144
+ def call(self, seq_length):
145
+ pos_offsets = tf.cast(tf.range(seq_length - 1, -1, -1), tf.float32)
146
+ inputs = pos_offsets[:, None] * self.inverse_freq[None, :]
147
+ return tf.concat((tf.sin(inputs), tf.cos(inputs)), axis=-1)
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/recurrent_net.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import gymnasium as gym
3
+ from gymnasium.spaces import Discrete, MultiDiscrete
4
+ import logging
5
+ import tree # pip install dm_tree
6
+ from typing import Dict, List, Tuple
7
+
8
+ from ray.rllib.models.modelv2 import ModelV2
9
+ from ray.rllib.models.tf.tf_modelv2 import TFModelV2
10
+ from ray.rllib.policy.rnn_sequencing import add_time_dimension
11
+ from ray.rllib.policy.sample_batch import SampleBatch
12
+ from ray.rllib.policy.view_requirement import ViewRequirement
13
+ from ray.rllib.utils.annotations import OldAPIStack, override
14
+ from ray.rllib.utils.framework import try_import_tf
15
+ from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
16
+ from ray.rllib.utils.tf_utils import flatten_inputs_to_1d_tensor, one_hot
17
+ from ray.rllib.utils.typing import ModelConfigDict, TensorType
18
+ from ray.rllib.utils.deprecation import deprecation_warning
19
+ from ray.util.debug import log_once
20
+
21
+ tf1, tf, tfv = try_import_tf()
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ @OldAPIStack
26
+ class RecurrentNetwork(TFModelV2):
27
+ """Helper class to simplify implementing RNN models with TFModelV2.
28
+
29
+ Instead of implementing forward(), you can implement forward_rnn() which
30
+ takes batches with the time dimension added already.
31
+
32
+ Here is an example implementation for a subclass
33
+ ``MyRNNClass(RecurrentNetwork)``::
34
+
35
+ def __init__(self, *args, **kwargs):
36
+ super(MyModelClass, self).__init__(*args, **kwargs)
37
+ cell_size = 256
38
+
39
+ # Define input layers
40
+ input_layer = tf.keras.layers.Input(
41
+ shape=(None, obs_space.shape[0]))
42
+ state_in_h = tf.keras.layers.Input(shape=(256, ))
43
+ state_in_c = tf.keras.layers.Input(shape=(256, ))
44
+ seq_in = tf.keras.layers.Input(shape=(), dtype=tf.int32)
45
+
46
+ # Send to LSTM cell
47
+ lstm_out, state_h, state_c = tf.keras.layers.LSTM(
48
+ cell_size, return_sequences=True, return_state=True,
49
+ name="lstm")(
50
+ inputs=input_layer,
51
+ mask=tf.sequence_mask(seq_in),
52
+ initial_state=[state_in_h, state_in_c])
53
+ output_layer = tf.keras.layers.Dense(...)(lstm_out)
54
+
55
+ # Create the RNN model
56
+ self.rnn_model = tf.keras.Model(
57
+ inputs=[input_layer, seq_in, state_in_h, state_in_c],
58
+ outputs=[output_layer, state_h, state_c])
59
+ self.rnn_model.summary()
60
+ """
61
+
62
+ @override(ModelV2)
63
+ def forward(
64
+ self,
65
+ input_dict: Dict[str, TensorType],
66
+ state: List[TensorType],
67
+ seq_lens: TensorType,
68
+ ) -> Tuple[TensorType, List[TensorType]]:
69
+ """Adds time dimension to batch before sending inputs to forward_rnn().
70
+
71
+ You should implement forward_rnn() in your subclass."""
72
+ # Creating a __init__ function that acts as a passthrough and adding the warning
73
+ # there led to errors probably due to the multiple inheritance. We encountered
74
+ # the same error if we add the Deprecated decorator. We therefore add the
75
+ # deprecation warning here.
76
+ if log_once("recurrent_network_tf"):
77
+ deprecation_warning(
78
+ old="ray.rllib.models.tf.recurrent_net.RecurrentNetwork"
79
+ )
80
+ assert seq_lens is not None
81
+ flat_inputs = input_dict["obs_flat"]
82
+ inputs = add_time_dimension(
83
+ padded_inputs=flat_inputs, seq_lens=seq_lens, framework="tf"
84
+ )
85
+ output, new_state = self.forward_rnn(
86
+ inputs,
87
+ state,
88
+ seq_lens,
89
+ )
90
+ return tf.reshape(output, [-1, self.num_outputs]), new_state
91
+
92
+ def forward_rnn(
93
+ self, inputs: TensorType, state: List[TensorType], seq_lens: TensorType
94
+ ) -> Tuple[TensorType, List[TensorType]]:
95
+ """Call the model with the given input tensors and state.
96
+
97
+ Args:
98
+ inputs: observation tensor with shape [B, T, obs_size].
99
+ state: list of state tensors, each with shape [B, T, size].
100
+ seq_lens: 1d tensor holding input sequence lengths.
101
+
102
+ Returns:
103
+ (outputs, new_state): The model output tensor of shape
104
+ [B, T, num_outputs] and the list of new state tensors each with
105
+ shape [B, size].
106
+
107
+ Sample implementation for the ``MyRNNClass`` example::
108
+
109
+ def forward_rnn(self, inputs, state, seq_lens):
110
+ model_out, h, c = self.rnn_model([inputs, seq_lens] + state)
111
+ return model_out, [h, c]
112
+ """
113
+ raise NotImplementedError("You must implement this for a RNN model")
114
+
115
+ def get_initial_state(self) -> List[TensorType]:
116
+ """Get the initial recurrent state values for the model.
117
+
118
+ Returns:
119
+ list of np.array objects, if any
120
+
121
+ Sample implementation for the ``MyRNNClass`` example::
122
+
123
+ def get_initial_state(self):
124
+ return [
125
+ np.zeros(self.cell_size, np.float32),
126
+ np.zeros(self.cell_size, np.float32),
127
+ ]
128
+ """
129
+ raise NotImplementedError("You must implement this for a RNN model")
130
+
131
+
132
+ @OldAPIStack
133
+ class LSTMWrapper(RecurrentNetwork):
134
+ """An LSTM wrapper serving as an interface for ModelV2s that set use_lstm."""
135
+
136
+ def __init__(
137
+ self,
138
+ obs_space: gym.spaces.Space,
139
+ action_space: gym.spaces.Space,
140
+ num_outputs: int,
141
+ model_config: ModelConfigDict,
142
+ name: str,
143
+ ):
144
+ super(LSTMWrapper, self).__init__(
145
+ obs_space, action_space, None, model_config, name
146
+ )
147
+ # At this point, self.num_outputs is the number of nodes coming
148
+ # from the wrapped (underlying) model. In other words, self.num_outputs
149
+ # is the input size for the LSTM layer.
150
+ # If None, set it to the observation space.
151
+ if self.num_outputs is None:
152
+ self.num_outputs = int(np.prod(self.obs_space.shape))
153
+
154
+ self.cell_size = model_config["lstm_cell_size"]
155
+ self.use_prev_action = model_config["lstm_use_prev_action"]
156
+ self.use_prev_reward = model_config["lstm_use_prev_reward"]
157
+
158
+ self.action_space_struct = get_base_struct_from_space(self.action_space)
159
+ self.action_dim = 0
160
+
161
+ for space in tree.flatten(self.action_space_struct):
162
+ if isinstance(space, Discrete):
163
+ self.action_dim += space.n
164
+ elif isinstance(space, MultiDiscrete):
165
+ self.action_dim += np.sum(space.nvec)
166
+ elif space.shape is not None:
167
+ self.action_dim += int(np.prod(space.shape))
168
+ else:
169
+ self.action_dim += int(len(space))
170
+
171
+ # Add prev-action/reward nodes to input to LSTM.
172
+ if self.use_prev_action:
173
+ self.num_outputs += self.action_dim
174
+ if self.use_prev_reward:
175
+ self.num_outputs += 1
176
+
177
+ # Define input layers.
178
+ input_layer = tf.keras.layers.Input(
179
+ shape=(None, self.num_outputs), name="inputs"
180
+ )
181
+
182
+ # Set self.num_outputs to the number of output nodes desired by the
183
+ # caller of this constructor.
184
+ self.num_outputs = num_outputs
185
+
186
+ state_in_h = tf.keras.layers.Input(shape=(self.cell_size,), name="h")
187
+ state_in_c = tf.keras.layers.Input(shape=(self.cell_size,), name="c")
188
+ seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)
189
+
190
+ # Preprocess observation with a hidden layer and send to LSTM cell
191
+ lstm_out, state_h, state_c = tf.keras.layers.LSTM(
192
+ self.cell_size, return_sequences=True, return_state=True, name="lstm"
193
+ )(
194
+ inputs=input_layer,
195
+ mask=tf.sequence_mask(seq_in),
196
+ initial_state=[state_in_h, state_in_c],
197
+ )
198
+
199
+ # Postprocess LSTM output with another hidden layer and compute values
200
+ logits = tf.keras.layers.Dense(
201
+ self.num_outputs, activation=tf.keras.activations.linear, name="logits"
202
+ )(lstm_out)
203
+ values = tf.keras.layers.Dense(1, activation=None, name="values")(lstm_out)
204
+
205
+ # Create the RNN model
206
+ self._rnn_model = tf.keras.Model(
207
+ inputs=[input_layer, seq_in, state_in_h, state_in_c],
208
+ outputs=[logits, values, state_h, state_c],
209
+ )
210
+ # Print out model summary in INFO logging mode.
211
+ if logger.isEnabledFor(logging.INFO):
212
+ self._rnn_model.summary()
213
+
214
+ # Add prev-a/r to this model's view, if required.
215
+ if model_config["lstm_use_prev_action"]:
216
+ self.view_requirements[SampleBatch.PREV_ACTIONS] = ViewRequirement(
217
+ SampleBatch.ACTIONS, space=self.action_space, shift=-1
218
+ )
219
+ if model_config["lstm_use_prev_reward"]:
220
+ self.view_requirements[SampleBatch.PREV_REWARDS] = ViewRequirement(
221
+ SampleBatch.REWARDS, shift=-1
222
+ )
223
+
224
+ @override(RecurrentNetwork)
225
+ def forward(
226
+ self,
227
+ input_dict: Dict[str, TensorType],
228
+ state: List[TensorType],
229
+ seq_lens: TensorType,
230
+ ) -> Tuple[TensorType, List[TensorType]]:
231
+ assert seq_lens is not None
232
+ # Push obs through "unwrapped" net's `forward()` first.
233
+ wrapped_out, _ = self._wrapped_forward(input_dict, [], None)
234
+
235
+ # Concat. prev-action/reward if required.
236
+ prev_a_r = []
237
+
238
+ # Prev actions.
239
+ if self.model_config["lstm_use_prev_action"]:
240
+ prev_a = input_dict[SampleBatch.PREV_ACTIONS]
241
+ # If actions are not processed yet (in their original form as
242
+ # have been sent to environment):
243
+ # Flatten/one-hot into 1D array.
244
+ if self.model_config["_disable_action_flattening"]:
245
+ prev_a_r.append(
246
+ flatten_inputs_to_1d_tensor(
247
+ prev_a,
248
+ spaces_struct=self.action_space_struct,
249
+ time_axis=False,
250
+ )
251
+ )
252
+ # If actions are already flattened (but not one-hot'd yet!),
253
+ # one-hot discrete/multi-discrete actions here.
254
+ else:
255
+ if isinstance(self.action_space, (Discrete, MultiDiscrete)):
256
+ prev_a = one_hot(prev_a, self.action_space)
257
+ prev_a_r.append(
258
+ tf.reshape(tf.cast(prev_a, tf.float32), [-1, self.action_dim])
259
+ )
260
+ # Prev rewards.
261
+ if self.model_config["lstm_use_prev_reward"]:
262
+ prev_a_r.append(
263
+ tf.reshape(
264
+ tf.cast(input_dict[SampleBatch.PREV_REWARDS], tf.float32), [-1, 1]
265
+ )
266
+ )
267
+
268
+ # Concat prev. actions + rewards to the "main" input.
269
+ if prev_a_r:
270
+ wrapped_out = tf.concat([wrapped_out] + prev_a_r, axis=1)
271
+
272
+ # Push everything through our LSTM.
273
+ input_dict["obs_flat"] = wrapped_out
274
+ return super().forward(input_dict, state, seq_lens)
275
+
276
+ @override(RecurrentNetwork)
277
+ def forward_rnn(
278
+ self, inputs: TensorType, state: List[TensorType], seq_lens: TensorType
279
+ ) -> Tuple[TensorType, List[TensorType]]:
280
+ model_out, self._value_out, h, c = self._rnn_model([inputs, seq_lens] + state)
281
+ return model_out, [h, c]
282
+
283
+ @override(ModelV2)
284
+ def get_initial_state(self) -> List[np.ndarray]:
285
+ return [
286
+ np.zeros(self.cell_size, np.float32),
287
+ np.zeros(self.cell_size, np.float32),
288
+ ]
289
+
290
+ @override(ModelV2)
291
+ def value_function(self) -> TensorType:
292
+ return tf.reshape(self._value_out, [-1])
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_distributions.py ADDED
@@ -0,0 +1,552 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The main difference between this and the old ActionDistribution is that this one
2
+ has more explicit input args. So that the input format does not have to be guessed from
3
+ the code. This matches the design pattern of torch distribution which developers may
4
+ already be familiar with.
5
+ """
6
+ import gymnasium as gym
7
+ import tree
8
+ import numpy as np
9
+ from typing import Dict, Iterable, List, Optional
10
+ import abc
11
+
12
+
13
+ from ray.rllib.models.distributions import Distribution
14
+ from ray.rllib.utils.annotations import override, DeveloperAPI
15
+ from ray.rllib.utils.framework import try_import_tf, try_import_tfp
16
+ from ray.rllib.utils.typing import TensorType, Union, Tuple
17
+
18
+
19
+ _, tf, _ = try_import_tf()
20
+ tfp = try_import_tfp()
21
+
22
+ # TODO (Kourosh) Write unittest for this class similar to torch distributions.
23
+
24
+
25
+ @DeveloperAPI
26
+ class TfDistribution(Distribution, abc.ABC):
27
+ """Wrapper class for tfp.distributions."""
28
+
29
+ def __init__(self, *args, **kwargs):
30
+ super().__init__()
31
+ self._dist = self._get_tf_distribution(*args, **kwargs)
32
+
33
+ @abc.abstractmethod
34
+ def _get_tf_distribution(self, *args, **kwargs) -> "tfp.distributions.Distribution":
35
+ """Returns the tfp.distributions.Distribution object to use."""
36
+
37
+ @override(Distribution)
38
+ def logp(self, value: TensorType, **kwargs) -> TensorType:
39
+ return self._dist.log_prob(value, **kwargs)
40
+
41
+ @override(Distribution)
42
+ def entropy(self) -> TensorType:
43
+ return self._dist.entropy()
44
+
45
+ @override(Distribution)
46
+ def kl(self, other: "Distribution") -> TensorType:
47
+ return self._dist.kl_divergence(other._dist)
48
+
49
+ @override(Distribution)
50
+ def sample(
51
+ self, *, sample_shape=()
52
+ ) -> Union[TensorType, Tuple[TensorType, TensorType]]:
53
+ sample = self._dist.sample(sample_shape)
54
+ return sample
55
+
56
+ @override(Distribution)
57
+ def rsample(
58
+ self, *, sample_shape=()
59
+ ) -> Union[TensorType, Tuple[TensorType, TensorType]]:
60
+ raise NotImplementedError
61
+
62
+
63
+ @DeveloperAPI
64
+ class TfCategorical(TfDistribution):
65
+ """Wrapper class for Categorical distribution.
66
+
67
+ Creates a categorical distribution parameterized by either :attr:`probs` or
68
+ :attr:`logits` (but not both).
69
+
70
+ Samples are integers from :math:`\{0, \ldots, K-1\}` where `K` is
71
+ ``probs.size(-1)``.
72
+
73
+ If `probs` is 1-dimensional with length-`K`, each element is the relative
74
+ probability of sampling the class at that index.
75
+
76
+ If `probs` is N-dimensional, the first N-1 dimensions are treated as a batch of
77
+ relative probability vectors.
78
+
79
+ .. testcode::
80
+ :skipif: True
81
+
82
+ m = TfCategorical([ 0.25, 0.25, 0.25, 0.25 ])
83
+ m.sample(sample_shape=(2,)) # equal probability of 0, 1, 2, 3
84
+
85
+ .. testoutput::
86
+
87
+ tf.Tensor([2 3], shape=(2,), dtype=int32)
88
+
89
+ Args:
90
+ probs: The probablities of each event.
91
+ logits: Event log probabilities (unnormalized)
92
+ temperature: In case of using logits, this parameter can be used to determine
93
+ the sharpness of the distribution. i.e.
94
+ ``probs = softmax(logits / temperature)``. The temperature must be strictly
95
+ positive. A low value (e.g. 1e-10) will result in argmax sampling while a
96
+ larger value will result in uniform sampling.
97
+ """
98
+
99
+ @override(TfDistribution)
100
+ def __init__(
101
+ self,
102
+ probs: "tf.Tensor" = None,
103
+ logits: "tf.Tensor" = None,
104
+ ) -> None:
105
+ # We assert this here because to_deterministic makes this assumption.
106
+ assert (probs is None) != (
107
+ logits is None
108
+ ), "Exactly one out of `probs` and `logits` must be set!"
109
+
110
+ self.probs = probs
111
+ self.logits = logits
112
+ self.one_hot = tfp.distributions.OneHotCategorical(logits=logits, probs=probs)
113
+ super().__init__(logits=logits, probs=probs)
114
+
115
+ @override(Distribution)
116
+ def logp(self, value: TensorType, **kwargs) -> TensorType:
117
+ # This prevents an error in which float values at the boundaries of the range
118
+ # of the distribution are passed to this function.
119
+ return -tf.nn.sparse_softmax_cross_entropy_with_logits(
120
+ logits=self.logits if self.logits is not None else tf.log(self.probs),
121
+ labels=tf.cast(value, tf.int32),
122
+ )
123
+
124
+ @override(TfDistribution)
125
+ def _get_tf_distribution(
126
+ self,
127
+ probs: "tf.Tensor" = None,
128
+ logits: "tf.Tensor" = None,
129
+ ) -> "tfp.distributions.Distribution":
130
+ return tfp.distributions.Categorical(probs=probs, logits=logits)
131
+
132
+ @staticmethod
133
+ @override(Distribution)
134
+ def required_input_dim(space: gym.Space, **kwargs) -> int:
135
+ assert isinstance(space, gym.spaces.Discrete)
136
+ return int(space.n)
137
+
138
+ @override(Distribution)
139
+ def rsample(self, sample_shape=()):
140
+ one_hot_sample = self.one_hot.sample(sample_shape)
141
+ return tf.stop_gradients(one_hot_sample - self.probs) + self.probs
142
+
143
+ @classmethod
144
+ @override(Distribution)
145
+ def from_logits(cls, logits: TensorType, **kwargs) -> "TfCategorical":
146
+ return TfCategorical(logits=logits, **kwargs)
147
+
148
+ def to_deterministic(self) -> "TfDeterministic":
149
+ if self.probs is not None:
150
+ probs_or_logits = self.probs
151
+ else:
152
+ probs_or_logits = self.logits
153
+
154
+ return TfDeterministic(loc=tf.math.argmax(probs_or_logits, axis=-1))
155
+
156
+
157
+ @DeveloperAPI
158
+ class TfDiagGaussian(TfDistribution):
159
+ """Wrapper class for Normal distribution.
160
+
161
+ Creates a normal distribution parameterized by :attr:`loc` and :attr:`scale`. In
162
+ case of multi-dimensional distribution, the variance is assumed to be diagonal.
163
+
164
+ .. testcode::
165
+ :skipif: True
166
+
167
+ m = TfDiagGaussian(loc=[0.0, 0.0], scale=[1.0, 1.0])
168
+ m.sample(sample_shape=(2,)) # 2d normal dist with loc=0 and scale=1
169
+
170
+ .. testoutput::
171
+
172
+ tensor([[ 0.1046, -0.6120], [ 0.234, 0.556]])
173
+
174
+ .. testcode::
175
+ :skipif: True
176
+
177
+ # scale is None
178
+ m = TfDiagGaussian(loc=[0.0, 1.0])
179
+ m.sample(sample_shape=(2,)) # normally distributed with loc=0 and scale=1
180
+
181
+ .. testoutput::
182
+
183
+ tensor([0.1046, 0.6120])
184
+
185
+
186
+ Args:
187
+ loc: mean of the distribution (often referred to as mu). If scale is None, the
188
+ second half of the `loc` will be used as the log of scale.
189
+ scale: standard deviation of the distribution (often referred to as sigma).
190
+ Has to be positive.
191
+ """
192
+
193
+ @override(TfDistribution)
194
+ def __init__(
195
+ self,
196
+ loc: Union[float, TensorType],
197
+ scale: Optional[Union[float, TensorType]] = None,
198
+ ):
199
+ self.loc = loc
200
+ super().__init__(loc=loc, scale=scale)
201
+
202
+ @override(TfDistribution)
203
+ def _get_tf_distribution(self, loc, scale) -> "tfp.distributions.Distribution":
204
+ return tfp.distributions.Normal(loc=loc, scale=scale)
205
+
206
+ @override(TfDistribution)
207
+ def logp(self, value: TensorType) -> TensorType:
208
+ return tf.math.reduce_sum(super().logp(value), axis=-1)
209
+
210
+ @override(TfDistribution)
211
+ def entropy(self) -> TensorType:
212
+ return tf.math.reduce_sum(super().entropy(), axis=-1)
213
+
214
+ @override(TfDistribution)
215
+ def kl(self, other: "TfDistribution") -> TensorType:
216
+ return tf.math.reduce_sum(super().kl(other), axis=-1)
217
+
218
+ @staticmethod
219
+ @override(Distribution)
220
+ def required_input_dim(space: gym.Space, **kwargs) -> int:
221
+ assert isinstance(space, gym.spaces.Box)
222
+ return int(np.prod(space.shape, dtype=np.int32) * 2)
223
+
224
+ @override(Distribution)
225
+ def rsample(self, sample_shape=()):
226
+ eps = tf.random.normal(sample_shape)
227
+ return self._dist.loc + eps * self._dist.scale
228
+
229
+ @classmethod
230
+ @override(Distribution)
231
+ def from_logits(cls, logits: TensorType, **kwargs) -> "TfDiagGaussian":
232
+ loc, log_std = tf.split(logits, num_or_size_splits=2, axis=-1)
233
+ scale = tf.math.exp(log_std)
234
+ return TfDiagGaussian(loc=loc, scale=scale)
235
+
236
+ def to_deterministic(self) -> "TfDeterministic":
237
+ return TfDeterministic(loc=self.loc)
238
+
239
+
240
+ @DeveloperAPI
241
+ class TfDeterministic(Distribution):
242
+ """The distribution that returns the input values directly.
243
+
244
+ This is similar to DiagGaussian with standard deviation zero (thus only
245
+ requiring the "mean" values as NN output).
246
+
247
+ Note: entropy is always zero, ang logp and kl are not implemented.
248
+
249
+ .. testcode::
250
+ :skipif: True
251
+
252
+ m = TfDeterministic(loc=tf.constant([0.0, 0.0]))
253
+ m.sample(sample_shape=(2,))
254
+
255
+ .. testoutput::
256
+
257
+ Tensor([[ 0.0, 0.0], [ 0.0, 0.0]])
258
+
259
+ Args:
260
+ loc: the determinsitic value to return
261
+ """
262
+
263
+ @override(Distribution)
264
+ def __init__(self, loc: "tf.Tensor") -> None:
265
+ super().__init__()
266
+ self.loc = loc
267
+
268
+ @override(Distribution)
269
+ def sample(
270
+ self,
271
+ *,
272
+ sample_shape: Tuple[int, ...] = (),
273
+ **kwargs,
274
+ ) -> Union[TensorType, Tuple[TensorType, TensorType]]:
275
+ shape = sample_shape + self.loc.shape
276
+ return tf.ones(shape, dtype=self.loc.dtype) * self.loc
277
+
278
+ @override(Distribution)
279
+ def rsample(
280
+ self,
281
+ *,
282
+ sample_shape: Tuple[int, ...] = None,
283
+ **kwargs,
284
+ ) -> Union[TensorType, Tuple[TensorType, TensorType]]:
285
+ raise NotImplementedError
286
+
287
+ @override(Distribution)
288
+ def logp(self, value: TensorType, **kwargs) -> TensorType:
289
+ return tf.zeros_like(self.loc)
290
+
291
+ @override(Distribution)
292
+ def entropy(self, **kwargs) -> TensorType:
293
+ raise RuntimeError(f"`entropy()` not supported for {self.__class__.__name__}.")
294
+
295
+ @override(Distribution)
296
+ def kl(self, other: "Distribution", **kwargs) -> TensorType:
297
+ raise RuntimeError(f"`kl()` not supported for {self.__class__.__name__}.")
298
+
299
+ @staticmethod
300
+ @override(Distribution)
301
+ def required_input_dim(space: gym.Space, **kwargs) -> int:
302
+ assert isinstance(space, gym.spaces.Box)
303
+ return int(np.prod(space.shape, dtype=np.int32))
304
+
305
+ @classmethod
306
+ @override(Distribution)
307
+ def from_logits(cls, logits: TensorType, **kwargs) -> "TfDeterministic":
308
+ return TfDeterministic(loc=logits)
309
+
310
+ def to_deterministic(self) -> "TfDeterministic":
311
+ return self
312
+
313
+
314
+ @DeveloperAPI
315
+ class TfMultiCategorical(Distribution):
316
+ """MultiCategorical distribution for MultiDiscrete action spaces."""
317
+
318
+ @override(Distribution)
319
+ def __init__(
320
+ self,
321
+ categoricals: List[TfCategorical],
322
+ ):
323
+ super().__init__()
324
+ self._cats = categoricals
325
+
326
+ @override(Distribution)
327
+ def sample(self) -> TensorType:
328
+ arr = [cat.sample() for cat in self._cats]
329
+ sample_ = tf.stack(arr, axis=-1)
330
+ return sample_
331
+
332
+ @override(Distribution)
333
+ def rsample(self, sample_shape=()):
334
+ arr = [cat.rsample() for cat in self._cats]
335
+ sample_ = tf.stack(arr, axis=-1)
336
+ return sample_
337
+
338
+ @override(Distribution)
339
+ def logp(self, value: tf.Tensor) -> TensorType:
340
+ actions = tf.unstack(tf.cast(value, tf.int32), axis=-1)
341
+ logps = tf.stack([cat.logp(act) for cat, act in zip(self._cats, actions)])
342
+ return tf.reduce_sum(logps, axis=0)
343
+
344
+ @override(Distribution)
345
+ def entropy(self) -> TensorType:
346
+ return tf.reduce_sum(
347
+ tf.stack([cat.entropy() for cat in self._cats], axis=-1), axis=-1
348
+ )
349
+
350
+ @override(Distribution)
351
+ def kl(self, other: Distribution) -> TensorType:
352
+ kls = tf.stack(
353
+ [cat.kl(oth_cat) for cat, oth_cat in zip(self._cats, other._cats)], axis=-1
354
+ )
355
+ return tf.reduce_sum(kls, axis=-1)
356
+
357
+ @staticmethod
358
+ @override(Distribution)
359
+ def required_input_dim(space: gym.Space, **kwargs) -> int:
360
+ assert isinstance(space, gym.spaces.MultiDiscrete)
361
+ return int(np.sum(space.nvec))
362
+
363
+ @classmethod
364
+ @override(Distribution)
365
+ def from_logits(
366
+ cls,
367
+ logits: tf.Tensor,
368
+ input_lens: List[int],
369
+ **kwargs,
370
+ ) -> "TfMultiCategorical":
371
+ """Creates this Distribution from logits (and additional arguments).
372
+
373
+ If you wish to create this distribution from logits only, please refer to
374
+ `Distribution.get_partial_dist_cls()`.
375
+
376
+ Args:
377
+ logits: The tensor containing logits to be separated by logit_lens.
378
+ child_distribution_cls_struct: A struct of Distribution classes that can
379
+ be instantiated from the given logits.
380
+ input_lens: A list of integers that indicate the length of the logits
381
+ vectors to be passed into each child distribution.
382
+ **kwargs: Forward compatibility kwargs.
383
+ """
384
+ categoricals = [
385
+ TfCategorical(logits=logits)
386
+ for logits in tf.split(logits, input_lens, axis=-1)
387
+ ]
388
+
389
+ return TfMultiCategorical(categoricals=categoricals)
390
+
391
+ def to_deterministic(self) -> "TfMultiDistribution":
392
+ return TfMultiDistribution([cat.to_deterministic() for cat in self._cats])
393
+
394
+
395
+ @DeveloperAPI
396
+ class TfMultiDistribution(Distribution):
397
+ """Action distribution that operates on multiple, possibly nested actions."""
398
+
399
+ def __init__(
400
+ self,
401
+ child_distribution_struct: Union[Tuple, List, Dict],
402
+ ):
403
+ """Initializes a TfMultiDistribution object.
404
+
405
+ Args:
406
+ child_distribution_struct: Any struct
407
+ that contains the child distribution classes to use to
408
+ instantiate the child distributions from `logits`.
409
+ """
410
+ super().__init__()
411
+ self._original_struct = child_distribution_struct
412
+ self._flat_child_distributions = tree.flatten(child_distribution_struct)
413
+
414
+ @override(Distribution)
415
+ def rsample(
416
+ self,
417
+ *,
418
+ sample_shape: Tuple[int, ...] = None,
419
+ **kwargs,
420
+ ) -> Union[TensorType, Tuple[TensorType, TensorType]]:
421
+ rsamples = []
422
+ for dist in self._flat_child_distributions:
423
+ rsample = dist.rsample(sample_shape=sample_shape, **kwargs)
424
+ rsamples.append(rsample)
425
+
426
+ rsamples = tree.unflatten_as(self._original_struct, rsamples)
427
+ return rsamples
428
+
429
+ @override(Distribution)
430
+ def logp(self, value):
431
+ # Single tensor input (all merged).
432
+ if isinstance(value, (tf.Tensor, np.ndarray)):
433
+ split_indices = []
434
+ for dist in self._flat_child_distributions:
435
+ if isinstance(dist, TfCategorical):
436
+ split_indices.append(1)
437
+ elif isinstance(dist, TfMultiCategorical):
438
+ split_indices.append(len(dist._cats))
439
+ else:
440
+ sample = dist.sample()
441
+ # Cover Box(shape=()) case.
442
+ if len(sample.shape) == 1:
443
+ split_indices.append(1)
444
+ else:
445
+ split_indices.append(tf.shape(sample)[1])
446
+ split_value = tf.split(value, split_indices, axis=1)
447
+ # Structured or flattened (by single action component) input.
448
+ else:
449
+ split_value = tree.flatten(value)
450
+
451
+ def map_(val, dist):
452
+ # Remove extra dimension if present.
453
+ if (
454
+ isinstance(dist, TfCategorical)
455
+ and len(val.shape) > 1
456
+ and val.shape[-1] == 1
457
+ ):
458
+ val = tf.squeeze(val, axis=-1)
459
+
460
+ return dist.logp(val)
461
+
462
+ # Remove extra categorical dimension and take the logp of each
463
+ # component.
464
+ flat_logps = tree.map_structure(
465
+ map_, split_value, self._flat_child_distributions
466
+ )
467
+
468
+ return sum(flat_logps)
469
+
470
+ @override(Distribution)
471
+ def kl(self, other):
472
+ kl_list = [
473
+ d.kl(o)
474
+ for d, o in zip(
475
+ self._flat_child_distributions, other._flat_child_distributions
476
+ )
477
+ ]
478
+ return sum(kl_list)
479
+
480
+ @override(Distribution)
481
+ def entropy(self):
482
+ entropy_list = [d.entropy() for d in self._flat_child_distributions]
483
+ return sum(entropy_list)
484
+
485
+ @override(Distribution)
486
+ def sample(self):
487
+ child_distributions_struct = tree.unflatten_as(
488
+ self._original_struct, self._flat_child_distributions
489
+ )
490
+ return tree.map_structure(lambda s: s.sample(), child_distributions_struct)
491
+
492
+ @staticmethod
493
+ @override(Distribution)
494
+ def required_input_dim(space: gym.Space, input_lens: List[int], **kwargs) -> int:
495
+ return sum(input_lens)
496
+
497
+ @classmethod
498
+ @override(Distribution)
499
+ def from_logits(
500
+ cls,
501
+ logits: tf.Tensor,
502
+ child_distribution_cls_struct: Union[Dict, Iterable],
503
+ input_lens: Union[Dict, List[int]],
504
+ space: gym.Space,
505
+ **kwargs,
506
+ ) -> "TfMultiDistribution":
507
+ """Creates this Distribution from logits (and additional arguments).
508
+
509
+ If you wish to create this distribution from logits only, please refer to
510
+ `Distribution.get_partial_dist_cls()`.
511
+
512
+ Args:
513
+ logits: The tensor containing logits to be separated by `input_lens`.
514
+ child_distribution_cls_struct: A struct of Distribution classes that can
515
+ be instantiated from the given logits.
516
+ child_distribution_cls_struct: A struct of Distribution classes that can
517
+ be instantiated from the given logits.
518
+ input_lens: A list or dict of integers that indicate the length of each
519
+ logit. If this is given as a dict, the structure should match the
520
+ structure of child_distribution_cls_struct.
521
+ space: The possibly nested output space.
522
+ **kwargs: Forward compatibility kwargs.
523
+
524
+ Returns:
525
+ A TfMultiDistribution object.
526
+ """
527
+ logit_lens = tree.flatten(input_lens)
528
+ child_distribution_cls_list = tree.flatten(child_distribution_cls_struct)
529
+ split_logits = tf.split(logits, logit_lens, axis=1)
530
+
531
+ child_distribution_list = tree.map_structure(
532
+ lambda dist, input_: dist.from_logits(input_),
533
+ child_distribution_cls_list,
534
+ list(split_logits),
535
+ )
536
+
537
+ child_distribution_struct = tree.unflatten_as(
538
+ child_distribution_cls_struct, child_distribution_list
539
+ )
540
+
541
+ return TfMultiDistribution(
542
+ child_distribution_struct=child_distribution_struct,
543
+ )
544
+
545
+ def to_deterministic(self) -> "TfMultiDistribution":
546
+ flat_deterministic_dists = [
547
+ dist.to_deterministic for dist in self._flat_child_distributions
548
+ ]
549
+ deterministic_dists = tree.unflatten_as(
550
+ self._original_struct, flat_deterministic_dists
551
+ )
552
+ return TfMultiDistribution(deterministic_dists)
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/tf/tf_modelv2.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import gymnasium as gym
3
+ import re
4
+ from typing import Dict, List, Union
5
+
6
+ from ray.util import log_once
7
+ from ray.rllib.models.modelv2 import ModelV2
8
+ from ray.rllib.utils.annotations import OldAPIStack, override
9
+ from ray.rllib.utils.deprecation import deprecation_warning
10
+ from ray.rllib.utils.framework import try_import_tf
11
+ from ray.rllib.utils.typing import ModelConfigDict, TensorType
12
+
13
+ tf1, tf, tfv = try_import_tf()
14
+
15
+
16
+ @OldAPIStack
17
+ class TFModelV2(ModelV2):
18
+ """TF version of ModelV2, which should contain a tf keras Model.
19
+
20
+ Note that this class by itself is not a valid model unless you
21
+ implement forward() in a subclass."""
22
+
23
+ def __init__(
24
+ self,
25
+ obs_space: gym.spaces.Space,
26
+ action_space: gym.spaces.Space,
27
+ num_outputs: int,
28
+ model_config: ModelConfigDict,
29
+ name: str,
30
+ ):
31
+ """Initializes a TFModelV2 instance.
32
+
33
+ Here is an example implementation for a subclass
34
+ ``MyModelClass(TFModelV2)``::
35
+
36
+ def __init__(self, *args, **kwargs):
37
+ super(MyModelClass, self).__init__(*args, **kwargs)
38
+ input_layer = tf.keras.layers.Input(...)
39
+ hidden_layer = tf.keras.layers.Dense(...)(input_layer)
40
+ output_layer = tf.keras.layers.Dense(...)(hidden_layer)
41
+ value_layer = tf.keras.layers.Dense(...)(hidden_layer)
42
+ self.base_model = tf.keras.Model(
43
+ input_layer, [output_layer, value_layer])
44
+ """
45
+ super().__init__(
46
+ obs_space, action_space, num_outputs, model_config, name, framework="tf"
47
+ )
48
+
49
+ # Deprecated: TFModelV2 now automatically track their variables.
50
+ self.var_list = []
51
+
52
+ if tf1.executing_eagerly():
53
+ self.graph = None
54
+ else:
55
+ self.graph = tf1.get_default_graph()
56
+
57
+ def context(self) -> contextlib.AbstractContextManager:
58
+ """Returns a contextmanager for the current TF graph."""
59
+ if self.graph:
60
+ return self.graph.as_default()
61
+ else:
62
+ return ModelV2.context(self)
63
+
64
+ def update_ops(self) -> List[TensorType]:
65
+ """Return the list of update ops for this model.
66
+
67
+ For example, this should include any BatchNorm update ops."""
68
+ return []
69
+
70
+ def register_variables(self, variables: List[TensorType]) -> None:
71
+ """Register the given list of variables with this model."""
72
+ if log_once("deprecated_tfmodelv2_register_variables"):
73
+ deprecation_warning(old="TFModelV2.register_variables", error=False)
74
+ self.var_list.extend(variables)
75
+
76
+ @override(ModelV2)
77
+ def variables(
78
+ self, as_dict: bool = False
79
+ ) -> Union[List[TensorType], Dict[str, TensorType]]:
80
+ if as_dict:
81
+ # Old way using `register_variables`.
82
+ if self.var_list:
83
+ return {v.name: v for v in self.var_list}
84
+ # New way: Automatically determine the var tree.
85
+ else:
86
+ return self._find_sub_modules("", self.__dict__)
87
+
88
+ # Old way using `register_variables`.
89
+ if self.var_list:
90
+ return list(self.var_list)
91
+ # New way: Automatically determine the var tree.
92
+ else:
93
+ return list(self.variables(as_dict=True).values())
94
+
95
+ @override(ModelV2)
96
+ def trainable_variables(
97
+ self, as_dict: bool = False
98
+ ) -> Union[List[TensorType], Dict[str, TensorType]]:
99
+ if as_dict:
100
+ return {
101
+ k: v for k, v in self.variables(as_dict=True).items() if v.trainable
102
+ }
103
+ return [v for v in self.variables() if v.trainable]
104
+
105
+ @staticmethod
106
+ def _find_sub_modules(current_key, struct):
107
+ # Keras Model: key=k + "." + var-name (replace '/' by '.').
108
+ if isinstance(struct, tf.keras.models.Model) or isinstance(struct, tf.Module):
109
+ ret = {}
110
+ for var in struct.variables:
111
+ name = re.sub("/", ".", var.name)
112
+ key = current_key + "." + name
113
+ ret[key] = var
114
+ return ret
115
+ # Other TFModelV2: Include its vars into ours.
116
+ elif isinstance(struct, TFModelV2):
117
+ return {
118
+ current_key + "." + key: var
119
+ for key, var in struct.variables(as_dict=True).items()
120
+ }
121
+ # tf.Variable
122
+ elif isinstance(struct, tf.Variable):
123
+ return {current_key: struct}
124
+ # List/Tuple.
125
+ elif isinstance(struct, (tuple, list)):
126
+ ret = {}
127
+ for i, value in enumerate(struct):
128
+ sub_vars = TFModelV2._find_sub_modules(
129
+ current_key + "_{}".format(i), value
130
+ )
131
+ ret.update(sub_vars)
132
+ return ret
133
+ # Dict.
134
+ elif isinstance(struct, dict):
135
+ if current_key:
136
+ current_key += "_"
137
+ ret = {}
138
+ for key, value in struct.items():
139
+ sub_vars = TFModelV2._find_sub_modules(current_key + str(key), value)
140
+ ret.update(sub_vars)
141
+ return ret
142
+ return {}
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/attention_net.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/complex_input_net.cpython-310.pyc ADDED
Binary file (4.81 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/fcnet.cpython-310.pyc ADDED
Binary file (3.35 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/mingpt.cpython-310.pyc ADDED
Binary file (8.34 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/misc.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/noop.cpython-310.pyc ADDED
Binary file (883 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/recurrent_net.cpython-310.pyc ADDED
Binary file (8.45 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/torch_action_dist.cpython-310.pyc ADDED
Binary file (25 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/torch_distributions.cpython-310.pyc ADDED
Binary file (24.4 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/torch_modelv2.cpython-310.pyc ADDED
Binary file (3.02 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/__pycache__/visionnet.cpython-310.pyc ADDED
Binary file (4.81 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (590 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/gru_gate.cpython-310.pyc ADDED
Binary file (2.02 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/models/torch/modules/__pycache__/multi_head_attention.cpython-310.pyc ADDED
Binary file (2.43 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/__init__.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ from functools import partial
3
+
4
+ from ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI
5
+ from ray.rllib.utils.deprecation import deprecation_warning
6
+ from ray.rllib.utils.filter import Filter
7
+ from ray.rllib.utils.filter_manager import FilterManager
8
+ from ray.rllib.utils.framework import (
9
+ try_import_jax,
10
+ try_import_tf,
11
+ try_import_tfp,
12
+ try_import_torch,
13
+ )
14
+ from ray.rllib.utils.numpy import (
15
+ sigmoid,
16
+ softmax,
17
+ relu,
18
+ one_hot,
19
+ fc,
20
+ lstm,
21
+ SMALL_NUMBER,
22
+ LARGE_INTEGER,
23
+ MIN_LOG_NN_OUTPUT,
24
+ MAX_LOG_NN_OUTPUT,
25
+ )
26
+ from ray.rllib.utils.schedules import (
27
+ LinearSchedule,
28
+ PiecewiseSchedule,
29
+ PolynomialSchedule,
30
+ ExponentialSchedule,
31
+ ConstantSchedule,
32
+ )
33
+ from ray.rllib.utils.test_utils import (
34
+ check,
35
+ check_compute_single_action,
36
+ check_train_results,
37
+ )
38
+ from ray.tune.utils import merge_dicts, deep_update
39
+
40
+
41
+ @DeveloperAPI
42
+ def add_mixins(base, mixins, reversed=False):
43
+ """Returns a new class with mixins applied in priority order."""
44
+
45
+ mixins = list(mixins or [])
46
+
47
+ while mixins:
48
+ if reversed:
49
+
50
+ class new_base(base, mixins.pop()):
51
+ pass
52
+
53
+ else:
54
+
55
+ class new_base(mixins.pop(), base):
56
+ pass
57
+
58
+ base = new_base
59
+
60
+ return base
61
+
62
+
63
+ @DeveloperAPI
64
+ def force_list(elements=None, to_tuple=False):
65
+ """
66
+ Makes sure `elements` is returned as a list, whether `elements` is a single
67
+ item, already a list, or a tuple.
68
+
69
+ Args:
70
+ elements (Optional[any]): The inputs as single item, list, or tuple to
71
+ be converted into a list/tuple. If None, returns empty list/tuple.
72
+ to_tuple: Whether to use tuple (instead of list).
73
+
74
+ Returns:
75
+ Union[list,tuple]: All given elements in a list/tuple depending on
76
+ `to_tuple`'s value. If elements is None,
77
+ returns an empty list/tuple.
78
+ """
79
+ ctor = list
80
+ if to_tuple is True:
81
+ ctor = tuple
82
+ return (
83
+ ctor()
84
+ if elements is None
85
+ else ctor(elements)
86
+ if type(elements) in [list, set, tuple]
87
+ else ctor([elements])
88
+ )
89
+
90
+
91
+ @DeveloperAPI
92
+ class NullContextManager(contextlib.AbstractContextManager):
93
+ """No-op context manager"""
94
+
95
+ def __init__(self):
96
+ pass
97
+
98
+ def __enter__(self):
99
+ pass
100
+
101
+ def __exit__(self, *args):
102
+ pass
103
+
104
+
105
+ force_tuple = partial(force_list, to_tuple=True)
106
+
107
+ __all__ = [
108
+ "add_mixins",
109
+ "check",
110
+ "check_compute_single_action",
111
+ "check_train_results",
112
+ "deep_update",
113
+ "deprecation_warning",
114
+ "fc",
115
+ "force_list",
116
+ "force_tuple",
117
+ "lstm",
118
+ "merge_dicts",
119
+ "one_hot",
120
+ "override",
121
+ "relu",
122
+ "sigmoid",
123
+ "softmax",
124
+ "try_import_jax",
125
+ "try_import_tf",
126
+ "try_import_tfp",
127
+ "try_import_torch",
128
+ "ConstantSchedule",
129
+ "DeveloperAPI",
130
+ "ExponentialSchedule",
131
+ "Filter",
132
+ "FilterManager",
133
+ "LARGE_INTEGER",
134
+ "LinearSchedule",
135
+ "MAX_LOG_NN_OUTPUT",
136
+ "MIN_LOG_NN_OUTPUT",
137
+ "PiecewiseSchedule",
138
+ "PolynomialSchedule",
139
+ "PublicAPI",
140
+ "SMALL_NUMBER",
141
+ ]
infer_4_37_2/lib/python3.10/site-packages/ray/rllib/utils/from_config.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from copy import deepcopy
2
+ from functools import partial
3
+ import importlib
4
+ import json
5
+ import os
6
+ import re
7
+ import yaml
8
+
9
+ from ray.rllib.utils.annotations import DeveloperAPI
10
+ from ray.rllib.utils import force_list, merge_dicts
11
+
12
+
13
+ @DeveloperAPI
14
+ def from_config(cls, config=None, **kwargs):
15
+ """Uses the given config to create an object.
16
+
17
+ If `config` is a dict, an optional "type" key can be used as a
18
+ "constructor hint" to specify a certain class of the object.
19
+ If `config` is not a dict, `config`'s value is used directly as this
20
+ "constructor hint".
21
+
22
+ The rest of `config` (if it's a dict) will be used as kwargs for the
23
+ constructor. Additional keys in **kwargs will always have precedence
24
+ (overwrite keys in `config` (if a dict)).
25
+ Also, if the config-dict or **kwargs contains the special key "_args",
26
+ it will be popped from the dict and used as *args list to be passed
27
+ separately to the constructor.
28
+
29
+ The following constructor hints are valid:
30
+ - None: Use `cls` as constructor.
31
+ - An already instantiated object: Will be returned as is; no
32
+ constructor call.
33
+ - A string or an object that is a key in `cls`'s `__type_registry__`
34
+ dict: The value in `__type_registry__` for that key will be used
35
+ as the constructor.
36
+ - A python callable: Use that very callable as constructor.
37
+ - A string: Either a json/yaml filename or the name of a python
38
+ module+class (e.g. "ray.rllib. [...] .[some class name]")
39
+
40
+ Args:
41
+ cls: The class to build an instance for (from `config`).
42
+ config (Optional[dict, str]): The config dict or type-string or
43
+ filename.
44
+
45
+ Keyword Args:
46
+ kwargs: Optional possibility to pass the constructor arguments in
47
+ here and use `config` as the type-only info. Then we can call
48
+ this like: from_config([type]?, [**kwargs for constructor])
49
+ If `config` is already a dict, then `kwargs` will be merged
50
+ with `config` (overwriting keys in `config`) after "type" has
51
+ been popped out of `config`.
52
+ If a constructor of a Configurable needs *args, the special
53
+ key `_args` can be passed inside `kwargs` with a list value
54
+ (e.g. kwargs={"_args": [arg1, arg2, arg3]}).
55
+
56
+ Returns:
57
+ any: The object generated from the config.
58
+ """
59
+ # `cls` is the config (config is None).
60
+ if config is None and isinstance(cls, (dict, str)):
61
+ config = cls
62
+ cls = None
63
+ # `config` is already a created object of this class ->
64
+ # Take it as is.
65
+ elif isinstance(cls, type) and isinstance(config, cls):
66
+ return config
67
+
68
+ # `type_`: Indicator for the Configurable's constructor.
69
+ # `ctor_args`: *args arguments for the constructor.
70
+ # `ctor_kwargs`: **kwargs arguments for the constructor.
71
+ # Try to copy, so caller can reuse safely.
72
+ try:
73
+ config = deepcopy(config)
74
+ except Exception:
75
+ pass
76
+ if isinstance(config, dict):
77
+ type_ = config.pop("type", None)
78
+ if type_ is None and isinstance(cls, str):
79
+ type_ = cls
80
+ ctor_kwargs = config
81
+ # Give kwargs priority over things defined in config dict.
82
+ # This way, one can pass a generic `spec` and then override single
83
+ # constructor parameters via the kwargs in the call to `from_config`.
84
+ ctor_kwargs.update(kwargs)
85
+ else:
86
+ type_ = config
87
+ if type_ is None and "type" in kwargs:
88
+ type_ = kwargs.pop("type")
89
+ ctor_kwargs = kwargs
90
+ # Special `_args` field in kwargs for *args-utilizing constructors.
91
+ ctor_args = force_list(ctor_kwargs.pop("_args", []))
92
+
93
+ # Figure out the actual constructor (class) from `type_`.
94
+ # None: Try __default__object (if no args/kwargs), only then
95
+ # constructor of cls (using args/kwargs).
96
+ if type_ is None:
97
+ # We have a default constructor that was defined directly by cls
98
+ # (not by its children).
99
+ if (
100
+ cls is not None
101
+ and hasattr(cls, "__default_constructor__")
102
+ and cls.__default_constructor__ is not None
103
+ and ctor_args == []
104
+ and (
105
+ not hasattr(cls.__bases__[0], "__default_constructor__")
106
+ or cls.__bases__[0].__default_constructor__ is None
107
+ or cls.__bases__[0].__default_constructor__
108
+ is not cls.__default_constructor__
109
+ )
110
+ ):
111
+ constructor = cls.__default_constructor__
112
+ # Default constructor's keywords into ctor_kwargs.
113
+ if isinstance(constructor, partial):
114
+ kwargs = merge_dicts(ctor_kwargs, constructor.keywords)
115
+ constructor = partial(constructor.func, **kwargs)
116
+ ctor_kwargs = {} # erase to avoid duplicate kwarg error
117
+ # No default constructor -> Try cls itself as constructor.
118
+ else:
119
+ constructor = cls
120
+ # Try the __type_registry__ of this class.
121
+ else:
122
+ constructor = _lookup_type(cls, type_)
123
+
124
+ # Found in cls.__type_registry__.
125
+ if constructor is not None:
126
+ pass
127
+ # type_ is False or None (and this value is not registered) ->
128
+ # return value of type_.
129
+ elif type_ is False or type_ is None:
130
+ return type_
131
+ # Python callable.
132
+ elif callable(type_):
133
+ constructor = type_
134
+ # A string: Filename or a python module+class or a json/yaml str.
135
+ elif isinstance(type_, str):
136
+ if re.search("\\.(yaml|yml|json)$", type_):
137
+ return from_file(cls, type_, *ctor_args, **ctor_kwargs)
138
+ # Try un-json/un-yaml'ing the string into a dict.
139
+ obj = yaml.safe_load(type_)
140
+ if isinstance(obj, dict):
141
+ return from_config(cls, obj)
142
+ try:
143
+ obj = from_config(cls, json.loads(type_))
144
+ except json.JSONDecodeError:
145
+ pass
146
+ else:
147
+ return obj
148
+
149
+ # Test for absolute module.class path specifier.
150
+ if type_.find(".") != -1:
151
+ module_name, function_name = type_.rsplit(".", 1)
152
+ try:
153
+ module = importlib.import_module(module_name)
154
+ constructor = getattr(module, function_name)
155
+ # Module not found.
156
+ except (ModuleNotFoundError, ImportError, AttributeError):
157
+ pass
158
+
159
+ # If constructor still not found, try attaching cls' module,
160
+ # then look for type_ in there.
161
+ if constructor is None:
162
+ if isinstance(cls, str):
163
+ # Module found, but doesn't have the specified
164
+ # c'tor/function.
165
+ raise ValueError(
166
+ f"Full classpath specifier ({type_}) must be a valid "
167
+ "full [module].[class] string! E.g.: "
168
+ "`my.cool.module.MyCoolClass`."
169
+ )
170
+
171
+ try:
172
+ module = importlib.import_module(cls.__module__)
173
+ constructor = getattr(module, type_)
174
+ except (ModuleNotFoundError, ImportError, AttributeError):
175
+ # Try the package as well.
176
+ try:
177
+ package_name = importlib.import_module(
178
+ cls.__module__
179
+ ).__package__
180
+ module = __import__(package_name, fromlist=[type_])
181
+ constructor = getattr(module, type_)
182
+ except (ModuleNotFoundError, ImportError, AttributeError):
183
+ pass
184
+
185
+ if constructor is None:
186
+ raise ValueError(
187
+ f"String specifier ({type_}) must be a valid filename, "
188
+ f"a [module].[class], a class within '{cls.__module__}', "
189
+ f"or a key into {cls.__name__}.__type_registry__!"
190
+ )
191
+
192
+ if not constructor:
193
+ raise TypeError("Invalid type '{}'. Cannot create `from_config`.".format(type_))
194
+
195
+ # Create object with inferred constructor.
196
+ try:
197
+ object_ = constructor(*ctor_args, **ctor_kwargs)
198
+ # Catch attempts to construct from an abstract class and return None.
199
+ except TypeError as e:
200
+ if re.match("Can't instantiate abstract class", e.args[0]):
201
+ return None
202
+ raise e # Re-raise
203
+ # No sanity check for fake (lambda)-"constructors".
204
+ if type(constructor).__name__ != "function":
205
+ assert isinstance(
206
+ object_,
207
+ constructor.func if isinstance(constructor, partial) else constructor,
208
+ )
209
+
210
+ return object_
211
+
212
+
213
+ @DeveloperAPI
214
+ def from_file(cls, filename, *args, **kwargs):
215
+ """
216
+ Create object from config saved in filename. Expects json or yaml file.
217
+
218
+ Args:
219
+ filename: File containing the config (json or yaml).
220
+
221
+ Returns:
222
+ any: The object generated from the file.
223
+ """
224
+ path = os.path.join(os.getcwd(), filename)
225
+ if not os.path.isfile(path):
226
+ raise FileNotFoundError("File '{}' not found!".format(filename))
227
+
228
+ with open(path, "rt") as fp:
229
+ if path.endswith(".yaml") or path.endswith(".yml"):
230
+ config = yaml.safe_load(fp)
231
+ else:
232
+ config = json.load(fp)
233
+
234
+ # Add possible *args.
235
+ config["_args"] = args
236
+ return from_config(cls, config=config, **kwargs)
237
+
238
+
239
+ def _lookup_type(cls, type_):
240
+ if (
241
+ cls is not None
242
+ and hasattr(cls, "__type_registry__")
243
+ and isinstance(cls.__type_registry__, dict)
244
+ and (
245
+ type_ in cls.__type_registry__
246
+ or (
247
+ isinstance(type_, str)
248
+ and re.sub("[\\W_]", "", type_.lower()) in cls.__type_registry__
249
+ )
250
+ )
251
+ ):
252
+ available_class_for_type = cls.__type_registry__.get(type_)
253
+ if available_class_for_type is None:
254
+ available_class_for_type = cls.__type_registry__[
255
+ re.sub("[\\W_]", "", type_.lower())
256
+ ]
257
+ return available_class_for_type
258
+ return None
259
+
260
+
261
+ class _NotProvided:
262
+ """Singleton class to provide a "not provided" value for AlgorithmConfig signatures.
263
+
264
+ Using the only instance of this class indicates that the user does NOT wish to
265
+ change the value of some property.
266
+
267
+ .. testcode::
268
+ :skipif: True
269
+
270
+ from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
271
+ config = AlgorithmConfig()
272
+ # Print out the default learning rate.
273
+ print(config.lr)
274
+
275
+ .. testoutput::
276
+
277
+ 0.001
278
+
279
+ .. testcode::
280
+ :skipif: True
281
+
282
+ # Print out the default `preprocessor_pref`.
283
+ print(config.preprocessor_pref)
284
+
285
+ .. testoutput::
286
+
287
+ "deepmind"
288
+
289
+ .. testcode::
290
+ :skipif: True
291
+
292
+ # Will only set the `preprocessor_pref` property (to None) and leave
293
+ # all other properties at their default values.
294
+ config.training(preprocessor_pref=None)
295
+ config.preprocessor_pref is None
296
+
297
+ .. testoutput::
298
+
299
+ True
300
+
301
+ .. testcode::
302
+ :skipif: True
303
+
304
+ # Still the same value (didn't touch it in the call to `.training()`.
305
+ print(config.lr)
306
+
307
+ .. testoutput::
308
+
309
+ 0.001
310
+ """
311
+
312
+ class __NotProvided:
313
+ pass
314
+
315
+ instance = None
316
+
317
+ def __init__(self):
318
+ if _NotProvided.instance is None:
319
+ _NotProvided.instance = _NotProvided.__NotProvided()
320
+
321
+
322
+ # Use this object as default values in all method signatures of
323
+ # AlgorithmConfig, indicating that the respective property should NOT be touched
324
+ # in the call.
325
+ NotProvided = _NotProvided()