ZTWHHH commited on
Commit
eb9643f
·
verified ·
1 Parent(s): 19824f5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/__init__.cpython-310.pyc +0 -0
  2. evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/_backends.cpython-310.pyc +0 -0
  3. evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/_torch_specific.cpython-310.pyc +0 -0
  4. evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/einops.cpython-310.pyc +0 -0
  5. evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/packing.cpython-310.pyc +0 -0
  6. evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/parsing.cpython-310.pyc +0 -0
  7. evalkit_tf446/lib/python3.10/site-packages/einops/_torch_specific.py +102 -0
  8. evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__init__.py +0 -0
  9. evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/__init__.cpython-310.pyc +0 -0
  10. evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/data_api_packing.cpython-310.pyc +0 -0
  11. evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/indexing.cpython-310.pyc +0 -0
  12. evalkit_tf446/lib/python3.10/site-packages/einops/experimental/data_api_packing.py +137 -0
  13. evalkit_tf446/lib/python3.10/site-packages/einops/experimental/indexing.py +393 -0
  14. evalkit_tf446/lib/python3.10/site-packages/einops/layers/__init__.py +80 -0
  15. evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/__init__.cpython-310.pyc +0 -0
  16. evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/_einmix.cpython-310.pyc +0 -0
  17. evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/chainer.cpython-310.pyc +0 -0
  18. evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/flax.cpython-310.pyc +0 -0
  19. evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/gluon.cpython-310.pyc +0 -0
  20. evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/keras.cpython-310.pyc +0 -0
  21. evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/oneflow.cpython-310.pyc +0 -0
  22. evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/paddle.cpython-310.pyc +0 -0
  23. evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/tensorflow.cpython-310.pyc +0 -0
  24. evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/torch.cpython-310.pyc +0 -0
  25. evalkit_tf446/lib/python3.10/site-packages/einops/layers/_einmix.py +176 -0
  26. evalkit_tf446/lib/python3.10/site-packages/einops/layers/chainer.py +53 -0
  27. evalkit_tf446/lib/python3.10/site-packages/einops/layers/flax.py +80 -0
  28. evalkit_tf446/lib/python3.10/site-packages/einops/layers/gluon.py +50 -0
  29. evalkit_tf446/lib/python3.10/site-packages/einops/layers/keras.py +9 -0
  30. evalkit_tf446/lib/python3.10/site-packages/einops/layers/oneflow.py +53 -0
  31. evalkit_tf446/lib/python3.10/site-packages/einops/layers/paddle.py +59 -0
  32. evalkit_tf446/lib/python3.10/site-packages/einops/layers/tensorflow.py +85 -0
  33. evalkit_tf446/lib/python3.10/site-packages/einops/layers/torch.py +62 -0
  34. evalkit_tf446/lib/python3.10/site-packages/einops/parsing.py +149 -0
  35. evalkit_tf446/lib/python3.10/site-packages/einops/py.typed +0 -0
  36. evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/__pycache__/request_validator.cpython-310.pyc +0 -0
  37. evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/__init__.cpython-310.pyc +0 -0
  38. evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/access_token.cpython-310.pyc +0 -0
  39. evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/authorization.cpython-310.pyc +0 -0
  40. evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/base.cpython-310.pyc +0 -0
  41. evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/pre_configured.cpython-310.pyc +0 -0
  42. evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/request_token.cpython-310.pyc +0 -0
  43. evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/resource.cpython-310.pyc +0 -0
  44. evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/signature_only.cpython-310.pyc +0 -0
  45. evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/request_token.py +209 -0
  46. evalkit_tf446/lib/python3.10/site-packages/timm/__init__.py +4 -0
  47. evalkit_tf446/lib/python3.10/site-packages/timm/optim/__init__.py +15 -0
  48. evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adabelief.cpython-310.pyc +0 -0
  49. evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adahessian.cpython-310.pyc +0 -0
  50. evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adamp.cpython-310.pyc +0 -0
evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (702 Bytes). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/_backends.cpython-310.pyc ADDED
Binary file (27.5 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/_torch_specific.cpython-310.pyc ADDED
Binary file (3.86 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/einops.cpython-310.pyc ADDED
Binary file (25.3 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/packing.cpython-310.pyc ADDED
Binary file (6.57 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/parsing.cpython-310.pyc ADDED
Binary file (4.99 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/_torch_specific.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Specialization of einops for torch.
3
+
4
+ Unfortunately, torch's jit scripting mechanism isn't strong enough,
5
+ and to have scripting supported at least for layers,
6
+ a number of changes is required, and this layer helps.
7
+
8
+ Importantly, whole lib is designed so that you can't use it
9
+ """
10
+ import warnings
11
+ from typing import Dict, List
12
+
13
+ import torch
14
+ from einops.einops import TransformRecipe, _reconstruct_from_shape_uncached
15
+
16
+
17
+ class TorchJitBackend:
18
+ """
19
+ Completely static backend that mimics part of normal backend functionality
20
+ but restricted to torch stuff only
21
+ """
22
+
23
+ @staticmethod
24
+ def reduce(x: torch.Tensor, operation: str, reduced_axes: List[int]):
25
+ if operation == 'min':
26
+ return x.amin(dim=reduced_axes)
27
+ elif operation == 'max':
28
+ return x.amax(dim=reduced_axes)
29
+ elif operation == 'sum':
30
+ return x.sum(dim=reduced_axes)
31
+ elif operation == 'mean':
32
+ return x.mean(dim=reduced_axes)
33
+ elif operation == 'prod':
34
+ for i in list(sorted(reduced_axes))[::-1]:
35
+ x = x.prod(dim=i)
36
+ return x
37
+ else:
38
+ raise NotImplementedError('Unknown reduction ', operation)
39
+
40
+ @staticmethod
41
+ def transpose(x, axes: List[int]):
42
+ return x.permute(axes)
43
+
44
+ @staticmethod
45
+ def stack_on_zeroth_dimension(tensors: List[torch.Tensor]):
46
+ return torch.stack(tensors)
47
+
48
+ @staticmethod
49
+ def tile(x, repeats: List[int]):
50
+ return x.repeat(repeats)
51
+
52
+ @staticmethod
53
+ def add_axes(x, n_axes: int, pos2len: Dict[int, int]):
54
+ repeats = [-1] * n_axes
55
+ for axis_position, axis_length in pos2len.items():
56
+ x = torch.unsqueeze(x, axis_position)
57
+ repeats[axis_position] = axis_length
58
+ return x.expand(repeats)
59
+
60
+ @staticmethod
61
+ def is_float_type(x):
62
+ return x.dtype in [torch.float16, torch.float32, torch.float64, torch.bfloat16]
63
+
64
+ @staticmethod
65
+ def shape(x):
66
+ return x.shape
67
+
68
+ @staticmethod
69
+ def reshape(x, shape: List[int]):
70
+ return x.reshape(shape)
71
+
72
+
73
+ # mirrors einops.einops._apply_recipe
74
+ def apply_for_scriptable_torch(recipe: TransformRecipe, tensor: torch.Tensor, reduction_type: str) -> torch.Tensor:
75
+ backend = TorchJitBackend
76
+ init_shapes, reduced_axes, axes_reordering, added_axes, final_shapes = \
77
+ _reconstruct_from_shape_uncached(recipe, backend.shape(tensor))
78
+ tensor = backend.reshape(tensor, init_shapes)
79
+ if len(reduced_axes) > 0:
80
+ tensor = backend.reduce(tensor, operation=reduction_type, reduced_axes=reduced_axes)
81
+ tensor = backend.transpose(tensor, axes_reordering)
82
+ if len(added_axes) > 0:
83
+ tensor = backend.add_axes(tensor, n_axes=len(axes_reordering) + len(added_axes), pos2len=added_axes)
84
+ return backend.reshape(tensor, final_shapes)
85
+
86
+
87
+ def allow_ops_in_compiled_graph():
88
+ try:
89
+ from torch._dynamo import allow_in_graph
90
+ except ImportError:
91
+ from warnings import warn
92
+ warnings.warn("allow_ops_in_compiled_graph failed to import torch: ensure pytorch >=2.0", ImportWarning)
93
+
94
+ from .einops import rearrange, reduce, repeat, einsum
95
+ from .packing import pack, unpack
96
+
97
+ allow_in_graph(rearrange)
98
+ allow_in_graph(reduce)
99
+ allow_in_graph(repeat)
100
+ allow_in_graph(einsum)
101
+ allow_in_graph(pack)
102
+ allow_in_graph(unpack)
evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__init__.py ADDED
File without changes
evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (178 Bytes). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/data_api_packing.cpython-310.pyc ADDED
Binary file (3.83 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/indexing.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/experimental/data_api_packing.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, TypeVar, Tuple, Sequence
2
+
3
+ from einops import EinopsError
4
+
5
+ T = TypeVar('T')
6
+
7
+ Shape = Tuple[int, ...]
8
+
9
+
10
+ def pack(pattern: str, tensors: Sequence[T]) -> Tuple[T, List[Shape]]:
11
+ axes = pattern.split()
12
+ if len(axes) != len(set(axes)):
13
+ raise EinopsError(f'Duplicates in axes names in pack("{pattern}", ...)')
14
+ if '*' not in axes:
15
+ raise EinopsError(f'No *-axis in pack("{pattern}", ...)')
16
+
17
+ # need some validation of identifiers
18
+
19
+ n_axes_before = axes.index('*')
20
+ n_axes_after = len(axes) - n_axes_before - 1
21
+ min_axes = n_axes_before + n_axes_after
22
+
23
+ xp = tensors[0].__array_namespace__()
24
+
25
+ reshaped_tensors: List[T] = []
26
+ packed_shapes: List[Shape] = []
27
+ for i, tensor in enumerate(tensors):
28
+ shape = tensor.shape
29
+ if len(shape) < min_axes:
30
+ raise EinopsError(f'packed tensor #{i} (enumeration starts with 0) has shape {shape}, '
31
+ f'while pattern {pattern} assumes at least {min_axes} axes')
32
+ axis_after_packed_axes = len(shape) - n_axes_after
33
+ packed_shapes.append(shape[n_axes_before:])
34
+ reshaped_tensors.append(
35
+ xp.reshape(tensor, (*shape[:n_axes_before], -1, *shape[axis_after_packed_axes:]))
36
+ )
37
+
38
+ return xp.concat(reshaped_tensors, axis=n_axes_before), packed_shapes
39
+
40
+
41
+ def prod(x: Shape) -> int:
42
+ result = 1
43
+ for i in x:
44
+ result *= i
45
+ return result
46
+
47
+
48
+ def unpack(pattern: str, tensor: T, packed_shapes: List[Shape]) -> List[T]:
49
+ axes = pattern.split()
50
+ if len(axes) != len(set(axes)):
51
+ raise EinopsError(f'Duplicates in axes names in unpack("{pattern}", ...)')
52
+ if '*' not in axes:
53
+ raise EinopsError(f'No *-axis in unpack("{pattern}", ...)')
54
+
55
+ # need some validation of identifiers
56
+
57
+ input_shape = tensor.shape
58
+ if len(input_shape) != len(axes):
59
+ raise EinopsError(f'unpack({pattern}, ...) received input of wrong dim with shape {input_shape}')
60
+
61
+ unpacked_axis = axes.index('*')
62
+
63
+ lengths_of_composed_axes: List[int] = [
64
+ -1 if -1 in p_shape else prod(p_shape)
65
+ for p_shape in packed_shapes
66
+ ]
67
+
68
+ n_unknown_composed_axes = sum(x == -1 for x in lengths_of_composed_axes)
69
+ if n_unknown_composed_axes > 1:
70
+ raise EinopsError(
71
+ f"unpack({pattern}, ...) received more than one -1 in {packed_shapes} and can't infer dimensions"
72
+ )
73
+
74
+ # following manipulations allow to skip some shape verifications
75
+ # and leave them to backends
76
+
77
+ # [[], [2, 3], [4], [-1, 5], [6]] < examples of packed_axis
78
+ # split positions when computed should be
79
+ # [0, 1, 7, 11, N-6 , N ], where N = length of axis
80
+ split_positions = [0] * len(packed_shapes) + [input_shape[unpacked_axis]]
81
+ if n_unknown_composed_axes == 0:
82
+ for i, x in enumerate(lengths_of_composed_axes[:-1]):
83
+ split_positions[i + 1] = split_positions[i] + x
84
+ else:
85
+ unknown_composed_axis: int = lengths_of_composed_axes.index(-1)
86
+ for i in range(unknown_composed_axis):
87
+ split_positions[i + 1] = split_positions[i] + lengths_of_composed_axes[i]
88
+ for j in range(unknown_composed_axis + 1, len(lengths_of_composed_axes))[::-1]:
89
+ split_positions[j] = split_positions[j + 1] + lengths_of_composed_axes[j]
90
+
91
+ xp = tensor.__array_namespace__()
92
+ shape_start = input_shape[:unpacked_axis]
93
+ shape_end = input_shape[unpacked_axis + 1:]
94
+ slice_filler = (slice(None, None),) * unpacked_axis
95
+ return [
96
+ xp.reshape(
97
+ # shortest way slice arbitrary axis
98
+ tensor[(*slice_filler, slice(split_positions[i], split_positions[i + 1]))],
99
+ (*shape_start, *element_shape, *shape_end)
100
+ )
101
+ for i, element_shape in enumerate(packed_shapes)
102
+ ]
103
+
104
+
105
+ if __name__ == '__main__':
106
+ import numpy.array_api as np
107
+
108
+ H = 100
109
+ W = 101
110
+ C = 3
111
+
112
+ r = np.zeros((H, W))
113
+ g = np.zeros((H, W))
114
+ b = np.zeros((H, W))
115
+ embeddings = np.zeros((H, W, 32))
116
+
117
+ im = np.stack([r, g, b], axis=-1)
118
+ print(im.shape)
119
+
120
+ image, shapes = pack('h w *', [r, g, b])
121
+ print(image.shape, shapes)
122
+
123
+ print(type(image))
124
+ print(type(im))
125
+ assert np.all(np.equal(image, im))
126
+
127
+ images_and_embedding, shapes = pack('h w *', [r, g, b, embeddings])
128
+ print(images_and_embedding.shape, shapes)
129
+ r2, g2, b2, embeddings2 = unpack('h w *', images_and_embedding, shapes)
130
+ assert np.all(np.equal(r, r2))
131
+ assert np.all(np.equal(g, g2))
132
+ assert np.all(np.equal(b, b2))
133
+ assert np.all(np.equal(embeddings, embeddings2))
134
+
135
+ print([x.shape for x in unpack('h w *', images_and_embedding, shapes[1:])])
136
+
137
+ print('all is fine')
evalkit_tf446/lib/python3.10/site-packages/einops/experimental/indexing.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ Indexing one array with the other(s).
4
+
5
+ Concept for discussion.
6
+
7
+ Notation targets hard cases, not simple ones, like indexing of 1d-array with another 1d-array
8
+ (notation supports that, but you can't simplify arr[ind], and there is no reason to)
9
+
10
+ Examples
11
+
12
+ 1. query for every token in sequence a token in the image. Images and sequences are paired
13
+ einindex('b t c <- b h w c, [h, w] b t', arr_bhwc, [h_indices_bt, w_indices_bt])
14
+
15
+ this is equivalent, so you can pass indexers idependently or together
16
+ einindex('b t c <- b h w c, [h, w] b t', arr_bhwc, np.asarray([h_indices_bt, w_indices_bt]))
17
+
18
+ after some thinking I decided that having first axis for indexing variable is not too restrictive,
19
+ but should simplify mapping of such cases.
20
+ For this reason [...] part should always go first in indexer.
21
+
22
+ This makes the largest difference with einindex https://github.com/malmaud/einindex,
23
+ which has almost identical grammar, but puts special dimension last, while we put it first.
24
+ This trick allows naturally decomposing multiindex into individual dimensions or visa versa.
25
+
26
+
27
+ 2. query for every token in the video the most suitable word in a (matching) sentence
28
+ einindex('b t h w <- seq b, [seq] t b h w', arr_tbc, [t_indices_bhw])
29
+
30
+ note, that only one indexer is used, but still it has to be enclosed in the list.
31
+ That's a price for being generic. Alternatively leading singleton dimension can be added.
32
+
33
+
34
+ 3. (not supported now, future planning)
35
+ for every timeframe in a video, find the token with the highest norm (across h and w), and compose a new stack of them
36
+ indices_2bt = argmax(x_bthwc.norm(dim=-1), 'b t h w -> [h, w] b t')
37
+ selected_embeddings_btc = einindex('b t c <- b t h w c, [h, w] b t', x_bthwc, indices_2bt)
38
+
39
+ while currently question is around 'how do we index',
40
+ it is important to pre-align that with a question 'what are natural ways to get indices'.
41
+ Most common are min/max. less common options: topk (works here), random sampling.
42
+
43
+
44
+
45
+ Some important properties of this notation:
46
+ - support for multiple indexers, including using a single tensor to keep multiple indexers
47
+ - 'batch' indexing, when some axes of indexer and array should be matched
48
+ - universal (one-indexing-to-rule-them-all)
49
+ - extensible for (named) ellipses, including variadic number of indexers
50
+ - extensible for einops-style compositions and decompositions
51
+ - extensible for outer indexing when indexers are not aligned
52
+
53
+ Current implementation based on python array api and uses loops,
54
+ because no appropriate indexing available in the standard.
55
+
56
+ """
57
+
58
+ from typing import List, Union, TypeVar, Tuple
59
+
60
+ from einops import EinopsError
61
+
62
+ T = TypeVar('T')
63
+
64
+
65
+ class CompositionDecomposition:
66
+ def __init__(
67
+ self,
68
+ decomposed_shape: List[str],
69
+ composed_shape: List[List[str]],
70
+ ):
71
+ flat_shape = []
72
+ for x in composed_shape:
73
+ flat_shape.extend(x)
74
+
75
+ self.compose_transposition: Tuple[int, ...] = tuple([decomposed_shape.index(x) for x in flat_shape])
76
+ self.decompose_transposition: Tuple[int, ...] = tuple([flat_shape.index(x) for x in decomposed_shape])
77
+ self.composed_shape = composed_shape
78
+ self.decomposed_shape = decomposed_shape
79
+
80
+ def decompose(self, x, known_axes_lengths: dict[str, int]):
81
+ xp = x.__array_namespace__()
82
+ shape = x.shape
83
+
84
+ flat_shape = []
85
+
86
+ for i, axis_group in enumerate(self.composed_shape):
87
+ unknown_axis_name = None
88
+ known_sizes_prod = 1
89
+ for axis_name in axis_group:
90
+ if axis_name in known_axes_lengths:
91
+ known_sizes_prod *= known_axes_lengths[axis_name]
92
+ else:
93
+ if unknown_axis_name is None:
94
+ unknown_axis_name = axis_name
95
+ else:
96
+ raise EinopsError("Can't infer the size")
97
+
98
+ if unknown_axis_name is None:
99
+ assert shape[i] == known_sizes_prod
100
+ else:
101
+ known_axes_lengths[unknown_axis_name] = shape[i] // known_sizes_prod
102
+
103
+ for axis in axis_group:
104
+ flat_shape.append(known_axes_lengths[axis])
105
+
106
+ x = xp.reshape(x, flat_shape)
107
+ return xp.permute_dims(x, self.decompose_transposition)
108
+
109
+ def compose(self, x, known_axes_lengths: dict[str, int]):
110
+ xp = x.__array_namespace__()
111
+
112
+ for axis_len, axis_name in zip(x.shape, self.decomposed_shape):
113
+ if axis_name in known_axes_lengths:
114
+ assert known_axes_lengths[axis_name] == axis_len
115
+ else:
116
+ known_axes_lengths[axis_name] = axis_len
117
+
118
+ x = xp.permute_dims(x, self.compose_transposition)
119
+ new_shape = []
120
+ for axis_group in self.composed_shape:
121
+ composed_axis_size = 1
122
+ for axis_name in axis_group:
123
+ composed_axis_size *= known_axes_lengths[axis_name]
124
+ new_shape.append(composed_axis_size)
125
+
126
+ return xp.reshape(x, tuple(new_shape))
127
+
128
+
129
+ def arange_at_position(xp, n_axes, axis, axis_len, device=None):
130
+ x = xp.arange(axis_len, dtype=xp.int64, device=device)
131
+ shape = [1] * n_axes
132
+ shape[axis] = axis_len
133
+ x = xp.reshape(x, shape)
134
+ return x
135
+
136
+
137
+ class IndexingFormula:
138
+
139
+ def __init__(self, pattern: str):
140
+ """
141
+ :param pattern: example 'b t c <- b hsel wsel c, [hsel, wsel] b t'
142
+ """
143
+ self.pattern = pattern
144
+ left, right = pattern.split('<-')
145
+ arg_split = right.index(',')
146
+ arr_pattern, ind_pattern = right[:arg_split], right[arg_split + 1:]
147
+ ind_pattern = ind_pattern.strip()
148
+ # print(
149
+ # arr_pattern, '\n',
150
+ # ind_pattern,
151
+ # )
152
+ assert ind_pattern.startswith('['), 'composition axis should go first in indexer (second argument) [h w] i j k'
153
+ composition_start = ind_pattern.index('[')
154
+ composition_end = ind_pattern.index(']')
155
+ composition = ind_pattern[composition_start + 1: composition_end]
156
+ ind_other_axes = ind_pattern[composition_end + 1:]
157
+
158
+ self.result_axes_names = left.split()
159
+ self.array_axes_names = arr_pattern.split()
160
+ self.indexing_axes_names = [x.strip() for x in composition.split(',')]
161
+ self.indexer_other_axes_names = ind_other_axes.split()
162
+
163
+ for group_name, group in [
164
+ ('result', self.result_axes_names),
165
+ ('array', self.array_axes_names),
166
+ ('indexer', self.indexing_axes_names + self.indexer_other_axes_names),
167
+ ]:
168
+ if len(set(group)) != len(group):
169
+ # need more verbosity, which axis, raise
170
+ raise EinopsError(f'{group_name} pattern ({group}) contains a duplicated axis')
171
+
172
+ axis_groups = [
173
+ self.result_axes_names,
174
+ self.array_axes_names,
175
+ self.indexing_axes_names,
176
+ self.indexer_other_axes_names,
177
+ ]
178
+
179
+ all_axes = set()
180
+ for group in axis_groups:
181
+ all_axes.update(group)
182
+
183
+ self.indexer_axes = []
184
+ self.batch_axes = []
185
+ self.result_and_index_axes = []
186
+ self.result_and_array_axes = []
187
+
188
+ for axis in all_axes:
189
+ presence = tuple(axis in g for g in axis_groups)
190
+ # want match-case here. sweet dreams
191
+ if presence == (False, True, True, False):
192
+ self.indexer_axes.append(axis)
193
+ elif presence[2]:
194
+ raise EinopsError(f'Wrong usage of indexer variable {axis}')
195
+ elif presence == (True, True, False, True):
196
+ self.batch_axes.append(axis)
197
+ elif presence == (True, False, False, True):
198
+ self.result_and_index_axes.append(axis)
199
+ elif presence == (True, True, False, False):
200
+ self.result_and_array_axes.append(axis)
201
+ else:
202
+ # TODO better categorization of wrong usage patterns
203
+ raise EinopsError(f'{axis} is used incorrectly in {pattern}')
204
+
205
+ assert set(self.indexer_axes) == set(self.indexing_axes_names)
206
+ # order of these variables matters, since we can't lose mapping here
207
+ self.indexer_axes = self.indexing_axes_names
208
+
209
+ self.array_composition = CompositionDecomposition(
210
+ decomposed_shape=self.array_axes_names,
211
+ composed_shape=[self.batch_axes + self.indexer_axes, self.result_and_array_axes],
212
+ )
213
+
214
+ self.index_composition = CompositionDecomposition(
215
+ decomposed_shape=self.indexer_other_axes_names,
216
+ # single axis after composition
217
+ composed_shape=[self.batch_axes + self.result_and_index_axes],
218
+ )
219
+
220
+ self.result_composition = CompositionDecomposition(
221
+ decomposed_shape=self.result_axes_names,
222
+ composed_shape=[self.batch_axes + self.result_and_index_axes, self.result_and_array_axes],
223
+ )
224
+
225
+ def apply_to_array_api(self, arr: T, ind: Union[T, List[T]]):
226
+ known_axes_sizes: dict[str, int] = {}
227
+ xp = arr.__array_namespace__()
228
+
229
+ if not isinstance(ind, list):
230
+ ind = [ind[i, ...] for i in range(ind.shape[0])]
231
+
232
+ for indexer in ind:
233
+ assert len(indexer.shape) == len(self.indexer_other_axes_names)
234
+
235
+ # step 1. transpose, reshapes of arr; learn its dimensions
236
+ arr_2d = self.array_composition.compose(arr, known_axes_sizes)
237
+
238
+ # step 2. compute shifts and create an actual indexing array
239
+ shift = 1
240
+ full_index = xp.zeros([1] * len(ind[0].shape), dtype=xp.int64, device=arr.device)
241
+
242
+ # original order: [*batch-like axes, *indexing_axes,]
243
+ # now we need to traverse them in the opposite direction
244
+
245
+ for axis_name, indexer in list(zip(self.indexing_axes_names, ind))[::-1]:
246
+ full_index = full_index + shift * (indexer % known_axes_sizes[axis_name])
247
+ shift *= known_axes_sizes[axis_name]
248
+
249
+ for axis_name in self.batch_axes[::-1]:
250
+ axis_id = self.indexer_other_axes_names.index(axis_name)
251
+ full_index = full_index + arange_at_position(
252
+ xp, len(self.indexer_other_axes_names), axis=axis_id, axis_len=known_axes_sizes[axis_name],
253
+ device=arr.device,
254
+ ) * shift
255
+ shift *= known_axes_sizes[axis_name]
256
+
257
+ assert shift == arr_2d.shape[0]
258
+
259
+ # step 3. Flatten index
260
+ full_index = self.index_composition.compose(full_index, known_axes_sizes)
261
+
262
+ # step 4. indexing
263
+ # python array api lacks any integer indexing, so... I use loops.
264
+ # did you know that there is conceptual programming ... just like art?
265
+ # result_2d = arr_2d[full_index]
266
+ result_2d = xp.stack([arr_2d[full_index[i], :] for i in range(full_index.shape[0])])
267
+
268
+ # step 5. doing resulting
269
+ result = self.result_composition.decompose(result_2d, known_axes_sizes)
270
+ return result
271
+
272
+
273
+ def einindex(pattern: str, arr: T, /, ind: Union[T, List[T]]):
274
+ """
275
+ Demonstrates how einindex should work.
276
+ Supports data-api compliant arrays.
277
+ """
278
+ formula = IndexingFormula(pattern)
279
+ return formula.apply_to_array_api(arr, ind)
280
+
281
+
282
+ def test_composition_and_decomposition():
283
+ import numpy.array_api as np
284
+ x = np.arange(2 * 3 * 5 * 7)
285
+ x = np.reshape(x, (2, 3, 5, 7))
286
+ comp = CompositionDecomposition(
287
+ decomposed_shape=['a', 'b', 'c', 'd'],
288
+ composed_shape=[['a', 'b'], ['c', 'd']],
289
+ )
290
+ assert comp.compose(x, known_axes_lengths={}).shape == (2 * 3, 5 * 7)
291
+
292
+ y = CompositionDecomposition(
293
+ decomposed_shape=['a', 'b', 'c', 'd'],
294
+ composed_shape=[['a', 'b'], [], ['c', 'd']],
295
+ ).compose(x, {})
296
+ assert y.shape == (2 * 3, 1, 5 * 7)
297
+ assert np.all(np.reshape(x, (-1,)) == np.reshape(y, (-1,)))
298
+
299
+ comp = CompositionDecomposition(
300
+ decomposed_shape=['a', 'b', 'e', 'c', 'd'],
301
+ composed_shape=[['e', 'c'], ['b'], ['a', 'd']],
302
+ )
303
+ x = np.arange(2 * 3 * 5 * 7 * 3)
304
+ x = np.reshape(x, (2, 3, 5, 7, 3))
305
+
306
+ axes = {}
307
+ y = comp.compose(x, axes)
308
+ x2 = comp.decompose(y, axes)
309
+ assert np.all(x == x2)
310
+
311
+
312
+ def test_simple_indexing():
313
+ import numpy.array_api as np
314
+
315
+ # simple 2d test
316
+ arr = np.reshape(np.arange(5 * 7), (5, 7))
317
+ ind = np.arange(7) % 5
318
+ x = einindex('j <- i j, [i] j', arr, [ind])
319
+ for j, i in enumerate(ind):
320
+ assert arr[i, j] == x[j]
321
+
322
+ y = einindex('j <- j i, [i] j', np.permute_dims(arr, (1, 0)), [ind])
323
+ for j, i in enumerate(ind):
324
+ assert arr[i, j] == y[j]
325
+
326
+
327
+ def test_multidimensional_indexing():
328
+ import numpy.array_api as np
329
+
330
+ embedding_bhwc = (
331
+ + arange_at_position(np, 4, 0, 2) * 1000
332
+ + arange_at_position(np, 4, 1, 3) * 100
333
+ + arange_at_position(np, 4, 2, 5) * 10
334
+ + arange_at_position(np, 4, 3, 7) * 1
335
+ )
336
+
337
+ hindices_bt = np.reshape(np.arange(6), (2, 3)) % 3
338
+ windices_bt = np.reshape(np.arange(6), (2, 3)) % 5
339
+
340
+ # imagine that you have pairs of image <> sentence
341
+ # your goal is to get most suitable token from image for every token in sentence
342
+ # thus for every token in sentence you compute best k and v
343
+
344
+ result = einindex('c t b <- b h w c, [h, w] b t', embedding_bhwc, [hindices_bt, windices_bt])
345
+ # example of using a single array for indexing multiple axes
346
+ hw_indices_bt = np.stack([hindices_bt, windices_bt])
347
+ result2 = einindex('c t b <- b h w c, [h, w] b t', embedding_bhwc, hw_indices_bt)
348
+ assert np.all(result == result2)
349
+
350
+ # check vs manual element computation
351
+ result_manual = result * 0
352
+ for b in range(2):
353
+ for t in range(3):
354
+ for c in range(7):
355
+ h = hindices_bt[b, t]
356
+ w = windices_bt[b, t]
357
+ result_manual[c, t, b] = embedding_bhwc[b, h, w, c]
358
+
359
+ assert np.all(result == result_manual)
360
+
361
+
362
+ def test_reverse_indexing():
363
+ import numpy.array_api as np
364
+
365
+ C, T, B = 2, 3, 5
366
+ # G = GPU, batch-like varaible
367
+ G = 4
368
+ H = 7
369
+ W = 9
370
+
371
+ arr_gtbc = (
372
+ + arange_at_position(np, 4, 0, G) * 1000
373
+ + arange_at_position(np, 4, 1, T) * 100
374
+ + arange_at_position(np, 4, 2, B) * 10
375
+ + arange_at_position(np, 4, 3, C) * 1
376
+ )
377
+
378
+ t_indices_gbhw = np.reshape(np.arange(G * B * H * W), (G, B, H, W)) % T
379
+
380
+ result = einindex('g b c h w <- g t b c, [t] g b h w', arr_gtbc, [t_indices_gbhw])
381
+
382
+ result_manual = result * 0
383
+ for g in range(G):
384
+ for b in range(B):
385
+ for c in range(C):
386
+ for h in range(H):
387
+ for w in range(W):
388
+ t = t_indices_gbhw[g, b, h, w]
389
+ result_manual[g, b, c, h, w] = arr_gtbc[g, t, b, c]
390
+
391
+ assert np.all(result == result_manual)
392
+
393
+
evalkit_tf446/lib/python3.10/site-packages/einops/layers/__init__.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __author__ = 'Alex Rogozhnikov'
2
+
3
+ import functools
4
+ from typing import Any
5
+
6
+ from einops.einops import _apply_recipe
7
+
8
+ from ..einops import TransformRecipe, _prepare_transformation_recipe
9
+ from .. import EinopsError
10
+
11
+
12
+ class RearrangeMixin:
13
+ """
14
+ Rearrange layer behaves identically to einops.rearrange operation.
15
+
16
+ :param pattern: str, rearrangement pattern
17
+ :param axes_lengths: any additional specification of dimensions
18
+
19
+ See einops.rearrange for source_examples.
20
+ """
21
+
22
+ def __init__(self, pattern: str, **axes_lengths: Any) -> None:
23
+ super().__init__()
24
+ self.pattern = pattern
25
+ self.axes_lengths = axes_lengths
26
+ self._recipe = self.recipe() # checking parameters
27
+
28
+ def __repr__(self) -> str:
29
+ params = repr(self.pattern)
30
+ for axis, length in self.axes_lengths.items():
31
+ params += ', {}={}'.format(axis, length)
32
+ return '{}({})'.format(self.__class__.__name__, params)
33
+
34
+ @functools.lru_cache(maxsize=1024)
35
+ def recipe(self) -> TransformRecipe:
36
+ try:
37
+ hashable_lengths = tuple(sorted(self.axes_lengths.items()))
38
+ return _prepare_transformation_recipe(self.pattern, operation='rearrange', axes_lengths=hashable_lengths)
39
+ except EinopsError as e:
40
+ raise EinopsError(' Error while preparing {!r}\n {}'.format(self, e))
41
+
42
+ def _apply_recipe(self, x):
43
+ return _apply_recipe(self._recipe, x, reduction_type='rearrange')
44
+
45
+
46
+ class ReduceMixin:
47
+ """
48
+ Reduce layer behaves identically to einops.reduce operation.
49
+
50
+ :param pattern: str, rearrangement pattern
51
+ :param reduction: one of available reductions ('min', 'max', 'sum', 'mean', 'prod'), case-sensitive
52
+ :param axes_lengths: any additional specification of dimensions
53
+
54
+ See einops.reduce for source_examples.
55
+ """
56
+
57
+ def __init__(self, pattern: str, reduction: str, **axes_lengths: Any):
58
+ super().__init__()
59
+ self.pattern = pattern
60
+ self.reduction = reduction
61
+ self.axes_lengths = axes_lengths
62
+ self._recipe = self.recipe() # checking parameters
63
+
64
+ def __repr__(self):
65
+ params = '{!r}, {!r}'.format(self.pattern, self.reduction)
66
+ for axis, length in self.axes_lengths.items():
67
+ params += ', {}={}'.format(axis, length)
68
+ return '{}({})'.format(self.__class__.__name__, params)
69
+
70
+ @functools.lru_cache(maxsize=1024)
71
+ def recipe(self) -> TransformRecipe:
72
+ try:
73
+ hashable_lengths = tuple(sorted(self.axes_lengths.items()))
74
+ return _prepare_transformation_recipe(
75
+ self.pattern, operation=self.reduction, axes_lengths=hashable_lengths)
76
+ except EinopsError as e:
77
+ raise EinopsError(' Error while preparing {!r}\n {}'.format(self, e))
78
+
79
+ def _apply_recipe(self, x):
80
+ return _apply_recipe(self._recipe, x, reduction_type=self.reduction)
evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.37 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/_einmix.cpython-310.pyc ADDED
Binary file (7.91 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/chainer.cpython-310.pyc ADDED
Binary file (2.21 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/flax.cpython-310.pyc ADDED
Binary file (3.36 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/gluon.cpython-310.pyc ADDED
Binary file (2.26 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/keras.cpython-310.pyc ADDED
Binary file (352 Bytes). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/oneflow.cpython-310.pyc ADDED
Binary file (2.16 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/paddle.cpython-310.pyc ADDED
Binary file (2.15 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/tensorflow.cpython-310.pyc ADDED
Binary file (3.81 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/torch.cpython-310.pyc ADDED
Binary file (2.52 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/einops/layers/_einmix.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Optional, Dict
2
+
3
+ from einops import EinopsError
4
+ from einops.parsing import ParsedExpression
5
+ import warnings
6
+ import string
7
+ from ..einops import _product
8
+
9
+
10
+ def _report_axes(axes: set, report_message: str):
11
+ if len(axes) > 0:
12
+ raise EinopsError(report_message.format(axes))
13
+
14
+
15
+ class _EinmixMixin:
16
+ def __init__(self, pattern: str, weight_shape: str, bias_shape: Optional[str]=None, **axes_lengths: Any):
17
+ """
18
+ EinMix - Einstein summation with automated tensor management and axis packing/unpacking.
19
+
20
+ EinMix is an advanced tool, helpful tutorial:
21
+ https://github.com/arogozhnikov/einops/blob/master/docs/3-einmix-layer.ipynb
22
+
23
+ Imagine taking einsum with two arguments, one of each input, and one - tensor with weights
24
+ >>> einsum('time batch channel_in, channel_in channel_out -> time batch channel_out', input, weight)
25
+
26
+ This layer manages weights for you, syntax highlights separate role of weight matrix
27
+ >>> EinMix('time batch channel_in -> time batch channel_out', weight_shape='channel_in channel_out')
28
+ But otherwise it is the same einsum under the hood.
29
+
30
+ Simple linear layer with bias term (you have one like that in your framework)
31
+ >>> EinMix('t b cin -> t b cout', weight_shape='cin cout', bias_shape='cout', cin=10, cout=20)
32
+ There is restriction to mix the last axis. Let's mix along height
33
+ >>> EinMix('h w c-> hout w c', weight_shape='h hout', bias_shape='hout', h=32, hout=32)
34
+ Channel-wise multiplication (like one used in normalizations)
35
+ >>> EinMix('t b c -> t b c', weight_shape='c', c=128)
36
+ Separate dense layer within each head, no connection between different heads
37
+ >>> EinMix('t b (head cin) -> t b (head cout)', weight_shape='head cin cout', ...)
38
+
39
+ ... ah yes, you need to specify all dimensions of weight shape/bias shape in parameters.
40
+
41
+ Use cases:
42
+ - when channel dimension is not last, use EinMix, not transposition
43
+ - patch/segment embeddings
44
+ - when need only within-group connections to reduce number of weights and computations
45
+ - perfect as a part of sequential models
46
+ - next-gen MLPs (follow tutorial to learn more)
47
+
48
+ Uniform He initialization is applied to weight tensor and encounters for number of elements mixed.
49
+
50
+ Parameters
51
+ :param pattern: transformation pattern, left side - dimensions of input, right side - dimensions of output
52
+ :param weight_shape: axes of weight. A tensor of this shape is created, stored, and optimized in a layer
53
+ :param bias_shape: axes of bias added to output. Weights of this shape are created and stored. If `None` (the default), no bias is added.
54
+ :param axes_lengths: dimensions of weight tensor
55
+ """
56
+ super().__init__()
57
+ self.pattern = pattern
58
+ self.weight_shape = weight_shape
59
+ self.bias_shape = bias_shape
60
+ self.axes_lengths = axes_lengths
61
+ self.initialize_einmix(pattern=pattern, weight_shape=weight_shape, bias_shape=bias_shape, axes_lengths=axes_lengths)
62
+
63
+ def initialize_einmix(self, pattern: str, weight_shape: str, bias_shape: Optional[str], axes_lengths: dict):
64
+ left_pattern, right_pattern = pattern.split('->')
65
+ left = ParsedExpression(left_pattern)
66
+ right = ParsedExpression(right_pattern)
67
+ weight = ParsedExpression(weight_shape)
68
+ _report_axes(
69
+ set.difference(right.identifiers, {*left.identifiers, *weight.identifiers}),
70
+ 'Unrecognized identifiers on the right side of EinMix {}'
71
+ )
72
+
73
+ if left.has_ellipsis or right.has_ellipsis or weight.has_ellipsis:
74
+ raise EinopsError('Ellipsis is not supported in EinMix (right now)')
75
+ if any(x.has_non_unitary_anonymous_axes for x in [left, right, weight]):
76
+ raise EinopsError('Anonymous axes (numbers) are not allowed in EinMix')
77
+ if '(' in weight_shape or ')' in weight_shape:
78
+ raise EinopsError(f'Parenthesis is not allowed in weight shape: {weight_shape}')
79
+
80
+ pre_reshape_pattern = None
81
+ pre_reshape_lengths = None
82
+ post_reshape_pattern = None
83
+ if any(len(group) != 1 for group in left.composition):
84
+ names: List[str] = []
85
+ for group in left.composition:
86
+ names += group
87
+ composition = ' '.join(names)
88
+ pre_reshape_pattern = f'{left_pattern}->{composition}'
89
+ pre_reshape_lengths = {name: length for name, length in axes_lengths.items() if name in names}
90
+
91
+ if any(len(group) != 1 for group in right.composition):
92
+ names = []
93
+ for group in right.composition:
94
+ names += group
95
+ composition = ' '.join(names)
96
+ post_reshape_pattern = f'{composition}->{right_pattern}'
97
+
98
+ self._create_rearrange_layers(pre_reshape_pattern, pre_reshape_lengths, post_reshape_pattern, {})
99
+
100
+ for axis in weight.identifiers:
101
+ if axis not in axes_lengths:
102
+ raise EinopsError('Dimension {} of weight should be specified'.format(axis))
103
+ _report_axes(
104
+ set.difference(set(axes_lengths), {*left.identifiers, *weight.identifiers}),
105
+ 'Axes {} are not used in pattern',
106
+ )
107
+ _report_axes(
108
+ set.difference(weight.identifiers, {*left.identifiers, *right.identifiers}),
109
+ 'Weight axes {} are redundant'
110
+ )
111
+ if len(weight.identifiers) == 0:
112
+ warnings.warn('EinMix: weight has no dimensions (means multiplication by a number)')
113
+
114
+ _weight_shape = [axes_lengths[axis] for axis, in weight.composition]
115
+ # single output element is a combination of fan_in input elements
116
+ _fan_in = _product([axes_lengths[axis] for axis, in weight.composition if axis not in right.identifiers])
117
+ if bias_shape is not None:
118
+ if not isinstance(bias_shape, str):
119
+ raise EinopsError('bias shape should be string specifying which axes bias depends on')
120
+ bias = ParsedExpression(bias_shape)
121
+ _report_axes(
122
+ set.difference(bias.identifiers, right.identifiers),
123
+ 'Bias axes {} not present in output'
124
+ )
125
+ _report_axes(
126
+ set.difference(bias.identifiers, set(axes_lengths)),
127
+ 'Sizes not provided for bias axes {}',
128
+ )
129
+
130
+ _bias_shape = []
131
+ for axes in right.composition:
132
+ for axis in axes:
133
+ if axis in bias.identifiers:
134
+ _bias_shape.append(axes_lengths[axis])
135
+ else:
136
+ _bias_shape.append(1)
137
+ else:
138
+ _bias_shape = None
139
+
140
+ weight_bound = (3 / _fan_in) ** 0.5
141
+ bias_bound = (1 / _fan_in) ** 0.5
142
+ self._create_parameters(_weight_shape, weight_bound, _bias_shape, bias_bound)
143
+
144
+ # rewrite einsum expression with single-letter latin identifiers so that
145
+ # expression will be understood by any framework
146
+ mapped_identifiers = {*left.identifiers, *right.identifiers, *weight.identifiers}
147
+ mapping2letters = {k: letter for letter, k in zip(string.ascii_lowercase, mapped_identifiers)}
148
+
149
+ def write_flat(axes: list):
150
+ return ''.join(mapping2letters[axis] for axis in axes)
151
+
152
+ self.einsum_pattern: str = '{},{}->{}'.format(
153
+ write_flat(left.flat_axes_order()),
154
+ write_flat(weight.flat_axes_order()),
155
+ write_flat(right.flat_axes_order()),
156
+ )
157
+
158
+ def _create_rearrange_layers(self,
159
+ pre_reshape_pattern: Optional[str],
160
+ pre_reshape_lengths: Optional[Dict],
161
+ post_reshape_pattern: Optional[str],
162
+ post_reshape_lengths: Optional[Dict]):
163
+ raise NotImplementedError('Should be defined in framework implementations')
164
+
165
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
166
+ """ Shape and implementations """
167
+ raise NotImplementedError('Should be defined in framework implementations')
168
+
169
+ def __repr__(self):
170
+ params = repr(self.pattern)
171
+ params += f", '{self.weight_shape}'"
172
+ if self.bias_shape is not None:
173
+ params += f", '{self.bias_shape}'"
174
+ for axis, length in self.axes_lengths.items():
175
+ params += ', {}={}'.format(axis, length)
176
+ return '{}({})'.format(self.__class__.__name__, params)
evalkit_tf446/lib/python3.10/site-packages/einops/layers/chainer.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Dict, cast
2
+
3
+ import chainer
4
+
5
+ from . import RearrangeMixin, ReduceMixin
6
+ from ._einmix import _EinmixMixin
7
+
8
+ __author__ = 'Alex Rogozhnikov'
9
+
10
+
11
+ class Rearrange(RearrangeMixin, chainer.Link):
12
+ def __call__(self, x):
13
+ return self._apply_recipe(x)
14
+
15
+
16
+ class Reduce(ReduceMixin, chainer.Link):
17
+ def __call__(self, x):
18
+ return self._apply_recipe(x)
19
+
20
+
21
+ class EinMix(_EinmixMixin, chainer.Link):
22
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
23
+ uniform = chainer.variable.initializers.Uniform
24
+ with self.init_scope():
25
+ self.weight = chainer.variable.Parameter(uniform(weight_bound), weight_shape)
26
+ if bias_shape is not None:
27
+ self.bias = chainer.variable.Parameter(uniform(bias_bound), bias_shape)
28
+ else:
29
+ self.bias = None
30
+
31
+ def _create_rearrange_layers(self,
32
+ pre_reshape_pattern: Optional[str],
33
+ pre_reshape_lengths: Optional[Dict],
34
+ post_reshape_pattern: Optional[str],
35
+ post_reshape_lengths: Optional[Dict],
36
+ ):
37
+ self.pre_rearrange = None
38
+ if pre_reshape_pattern is not None:
39
+ self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
40
+
41
+ self.post_rearrange = None
42
+ if post_reshape_pattern is not None:
43
+ self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
44
+
45
+ def __call__(self, input):
46
+ if self.pre_rearrange is not None:
47
+ input = self.pre_rearrange(input)
48
+ result = chainer.functions.einsum(self.einsum_pattern, input, self.weight)
49
+ if self.bias is not None:
50
+ result = result + self.bias
51
+ if self.post_rearrange is not None:
52
+ result = self.post_rearrange(result)
53
+ return result
evalkit_tf446/lib/python3.10/site-packages/einops/layers/flax.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import field
2
+ from typing import Optional, Dict, cast
3
+
4
+ import flax.linen as nn
5
+ import jax
6
+ import jax.numpy as jnp
7
+
8
+ from . import RearrangeMixin, ReduceMixin
9
+ from ._einmix import _EinmixMixin
10
+
11
+ __author__ = 'Alex Rogozhnikov'
12
+
13
+
14
+ class Reduce(nn.Module):
15
+ pattern: str
16
+ reduction: str
17
+ sizes: dict = field(default_factory=lambda: {})
18
+
19
+ def setup(self):
20
+ self.reducer = ReduceMixin(self.pattern, self.reduction, **self.sizes)
21
+
22
+ def __call__(self, input):
23
+ return self.reducer._apply_recipe(input)
24
+
25
+
26
+ class Rearrange(nn.Module):
27
+ pattern: str
28
+ sizes: dict = field(default_factory=lambda: {})
29
+
30
+ def setup(self):
31
+ self.rearranger = RearrangeMixin(self.pattern, **self.sizes)
32
+
33
+ def __call__(self, input):
34
+ return self.rearranger._apply_recipe(input)
35
+
36
+
37
+ class EinMix(nn.Module, _EinmixMixin):
38
+ pattern: str
39
+ weight_shape: str
40
+ bias_shape: Optional[str] = None
41
+ sizes: dict = field(default_factory=lambda: {})
42
+
43
+ def setup(self):
44
+ self.initialize_einmix(
45
+ pattern=self.pattern,
46
+ weight_shape=self.weight_shape,
47
+ bias_shape=self.bias_shape,
48
+ axes_lengths=self.sizes,
49
+ )
50
+
51
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
52
+ self.weight = self.param("weight", jax.nn.initializers.uniform(weight_bound), weight_shape)
53
+
54
+ if bias_shape is not None:
55
+ self.bias = self.param("bias", jax.nn.initializers.uniform(bias_bound), bias_shape)
56
+ else:
57
+ self.bias = None
58
+
59
+ def _create_rearrange_layers(self,
60
+ pre_reshape_pattern: Optional[str],
61
+ pre_reshape_lengths: Optional[Dict],
62
+ post_reshape_pattern: Optional[str],
63
+ post_reshape_lengths: Optional[Dict]):
64
+ self.pre_rearrange = None
65
+ if pre_reshape_pattern is not None:
66
+ self.pre_rearrange = Rearrange(pre_reshape_pattern, sizes=cast(dict, pre_reshape_lengths))
67
+
68
+ self.post_rearrange = None
69
+ if post_reshape_pattern is not None:
70
+ self.post_rearrange = Rearrange(post_reshape_pattern, sizes=cast(dict, post_reshape_lengths))
71
+
72
+ def __call__(self, input):
73
+ if self.pre_rearrange is not None:
74
+ input = self.pre_rearrange(input)
75
+ result = jnp.einsum(self.einsum_pattern, input, self.weight)
76
+ if self.bias is not None:
77
+ result += self.bias
78
+ if self.post_rearrange is not None:
79
+ result = self.post_rearrange(result)
80
+ return result
evalkit_tf446/lib/python3.10/site-packages/einops/layers/gluon.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Dict
2
+
3
+ import mxnet
4
+
5
+ from . import RearrangeMixin, ReduceMixin
6
+ from ._einmix import _EinmixMixin
7
+
8
+ __author__ = 'Alex Rogozhnikov'
9
+
10
+
11
+ class Rearrange(RearrangeMixin, mxnet.gluon.HybridBlock):
12
+ def hybrid_forward(self, F, x):
13
+ return self._apply_recipe(x)
14
+
15
+
16
+ class Reduce(ReduceMixin, mxnet.gluon.HybridBlock):
17
+ def hybrid_forward(self, F, x):
18
+ return self._apply_recipe(x)
19
+
20
+
21
+ class EinMix(_EinmixMixin, mxnet.gluon.HybridBlock):
22
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
23
+ with self.name_scope():
24
+
25
+ self.weight = self.params.get(name='weight', shape=weight_shape,
26
+ init=mxnet.initializer.Uniform(weight_bound),
27
+ )
28
+ if bias_shape is not None:
29
+ self.bias = self.params.get(name='bias', shape=bias_shape,
30
+ init=mxnet.initializer.Uniform(bias_bound),
31
+ )
32
+ else:
33
+ self.bias = None
34
+
35
+ def _create_rearrange_layers(self,
36
+ pre_reshape_pattern: Optional[str],
37
+ pre_reshape_lengths: Optional[Dict],
38
+ post_reshape_pattern: Optional[str],
39
+ post_reshape_lengths: Optional[Dict]):
40
+ if (pre_reshape_pattern is not None) or (post_reshape_pattern is not None):
41
+ raise NotImplementedError("EinMix in mxnet/gluon doesn't support axis group/ungroup "
42
+ "because einsum in gluon defined only for mx.np.ndarrays")
43
+
44
+ def hybrid_forward(self, F, x, *args, **kwargs):
45
+ # mxnet.np can't work with 'usual' ndarrays; .data() is a standard way to get within in gluon
46
+ # .as_np_mndarray makes the necessary conversion
47
+ result = mxnet.np.einsum(self.einsum_pattern, x.as_np_ndarray(), self.weight.data())
48
+ if self.bias is not None:
49
+ result += self.bias.data()
50
+ return result
evalkit_tf446/lib/python3.10/site-packages/einops/layers/keras.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ __author__ = 'Alex Rogozhnikov'
2
+
3
+ from ..layers.tensorflow import Rearrange, Reduce, EinMix
4
+
5
+ keras_custom_objects = {
6
+ Rearrange.__name__: Rearrange,
7
+ Reduce.__name__: Reduce,
8
+ EinMix.__name__: EinMix,
9
+ }
evalkit_tf446/lib/python3.10/site-packages/einops/layers/oneflow.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Dict, cast
2
+
3
+ import oneflow as flow
4
+
5
+ from . import RearrangeMixin, ReduceMixin
6
+ from ._einmix import _EinmixMixin
7
+
8
+ __author__ = 'Tianhe Ren & Depeng Liang'
9
+
10
+
11
+ class Rearrange(RearrangeMixin, flow.nn.Module):
12
+ def forward(self, input):
13
+ return self._apply_recipe(input)
14
+
15
+
16
+ class Reduce(ReduceMixin, flow.nn.Module):
17
+ def forward(self, input):
18
+ return self._apply_recipe(input)
19
+
20
+
21
+ class EinMix(_EinmixMixin, flow.nn.Module):
22
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
23
+ self.weight = flow.nn.Parameter(flow.zeros(weight_shape).uniform_(-weight_bound, weight_bound),
24
+ requires_grad=True)
25
+ if bias_shape is not None:
26
+ self.bias = flow.nn.Parameter(flow.zeros(bias_shape).uniform_(-bias_bound, bias_bound),
27
+ requires_grad=True)
28
+ else:
29
+ self.bias = None
30
+
31
+ def _create_rearrange_layers(self,
32
+ pre_reshape_pattern: Optional[str],
33
+ pre_reshape_lengths: Optional[Dict],
34
+ post_reshape_pattern: Optional[str],
35
+ post_reshape_lengths: Optional[Dict],
36
+ ):
37
+ self.pre_rearrange = None
38
+ if pre_reshape_pattern is not None:
39
+ self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
40
+
41
+ self.post_rearrange = None
42
+ if post_reshape_pattern is not None:
43
+ self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
44
+
45
+ def forward(self, input):
46
+ if self.pre_rearrange is not None:
47
+ input = self.pre_rearrange(input)
48
+ result = flow.einsum(self.einsum_pattern, input, self.weight)
49
+ if self.bias is not None:
50
+ result += self.bias
51
+ if self.post_rearrange is not None:
52
+ result = self.post_rearrange(result)
53
+ return result
evalkit_tf446/lib/python3.10/site-packages/einops/layers/paddle.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Dict, cast
2
+
3
+ import paddle
4
+
5
+ from . import RearrangeMixin, ReduceMixin
6
+ from ._einmix import _EinmixMixin
7
+
8
+ __author__ = 'PaddlePaddle'
9
+
10
+
11
+ class Rearrange(RearrangeMixin, paddle.nn.Layer):
12
+ def forward(self, input):
13
+ return self._apply_recipe(input)
14
+
15
+
16
+ class Reduce(ReduceMixin, paddle.nn.Layer):
17
+ def forward(self, input):
18
+ return self._apply_recipe(input)
19
+
20
+
21
+ class EinMix(_EinmixMixin, paddle.nn.Layer):
22
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
23
+ self.weight = self.create_parameter(
24
+ weight_shape,
25
+ default_initializer=paddle.nn.initializer.Uniform(-weight_bound, weight_bound)
26
+ )
27
+
28
+ if bias_shape is not None:
29
+ self.bias = self.create_parameter(
30
+ bias_shape,
31
+ default_initializer=paddle.nn.initializer.Uniform(-bias_bound, bias_bound)
32
+ )
33
+ else:
34
+ self.bias = None
35
+
36
+ def _create_rearrange_layers(self,
37
+ pre_reshape_pattern: Optional[str],
38
+ pre_reshape_lengths: Optional[Dict],
39
+ post_reshape_pattern: Optional[str],
40
+ post_reshape_lengths: Optional[Dict],
41
+ ):
42
+ self.pre_rearrange = None
43
+ if pre_reshape_pattern is not None:
44
+ self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
45
+
46
+ self.post_rearrange = None
47
+ if post_reshape_pattern is not None:
48
+ self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
49
+
50
+ def forward(self, input):
51
+ if self.pre_rearrange is not None:
52
+ input = self.pre_rearrange(input)
53
+
54
+ result = paddle.einsum(self.einsum_pattern, input, self.weight)
55
+ if self.bias is not None:
56
+ result += self.bias
57
+ if self.post_rearrange is not None:
58
+ result = self.post_rearrange(result)
59
+ return result
evalkit_tf446/lib/python3.10/site-packages/einops/layers/tensorflow.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Dict, cast
2
+
3
+ import tensorflow as tf
4
+ from tensorflow.keras.layers import Layer
5
+
6
+ from .._backends import UnknownSize
7
+ from . import RearrangeMixin, ReduceMixin
8
+ from ._einmix import _EinmixMixin
9
+ from ..einops import TransformRecipe, _reconstruct_from_shape_uncached
10
+
11
+ __author__ = 'Alex Rogozhnikov'
12
+
13
+
14
+ def _compute_output_shape(recipe: TransformRecipe, input_shape) -> List[Optional[int]]:
15
+ input_shape = [UnknownSize() if d is None else int(d) for d in input_shape]
16
+ init_shapes, reduced_axes, axes_reordering, added_axes, final_shape = \
17
+ _reconstruct_from_shape_uncached(recipe, input_shape)
18
+ output_shape: List[Optional[int]] = [None if isinstance(d, UnknownSize) else int(d) for d in final_shape]
19
+ return output_shape
20
+
21
+
22
+ class Rearrange(RearrangeMixin, Layer):
23
+ def compute_output_shape(self, input_shape):
24
+ return _compute_output_shape(self.recipe(), input_shape)
25
+
26
+ def call(self, inputs):
27
+ return self._apply_recipe(inputs)
28
+
29
+ def get_config(self):
30
+ return {'pattern': self.pattern, **self.axes_lengths}
31
+
32
+
33
+ class Reduce(ReduceMixin, Layer):
34
+ def compute_output_shape(self, input_shape):
35
+ return _compute_output_shape(self.recipe(), input_shape)
36
+
37
+ def call(self, inputs):
38
+ return self._apply_recipe(inputs)
39
+
40
+ def get_config(self):
41
+ return {'pattern': self.pattern, 'reduction': self.reduction, **self.axes_lengths}
42
+
43
+
44
+ class EinMix(_EinmixMixin, Layer):
45
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
46
+ self.weight = tf.Variable(tf.random_uniform_initializer(-weight_bound, weight_bound)(shape=weight_shape),
47
+ trainable=True)
48
+ if bias_shape is not None:
49
+ self.bias = tf.Variable(tf.random_uniform_initializer(-bias_bound, bias_bound)(shape=bias_shape),
50
+ trainable=True)
51
+ else:
52
+ self.bias = None
53
+
54
+ def _create_rearrange_layers(self,
55
+ pre_reshape_pattern: Optional[str],
56
+ pre_reshape_lengths: Optional[Dict],
57
+ post_reshape_pattern: Optional[str],
58
+ post_reshape_lengths: Optional[Dict],
59
+ ):
60
+ self.pre_rearrange = None
61
+ if pre_reshape_pattern is not None:
62
+ self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
63
+
64
+ self.post_rearrange = None
65
+ if post_reshape_pattern is not None:
66
+ self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
67
+
68
+ def build(self, input_shape):
69
+ pass
70
+
71
+ def call(self, inputs):
72
+ if self.pre_rearrange is not None:
73
+ inputs = self.pre_rearrange(inputs)
74
+ result = tf.einsum(self.einsum_pattern, inputs, self.weight)
75
+ if self.bias is not None:
76
+ result = result + self.bias
77
+ if self.post_rearrange is not None:
78
+ result = self.post_rearrange(result)
79
+ return result
80
+
81
+ def get_config(self):
82
+ return {'pattern': self.pattern,
83
+ 'weight_shape': self.weight_shape,
84
+ 'bias_shape': self.bias_shape,
85
+ **self.axes_lengths}
evalkit_tf446/lib/python3.10/site-packages/einops/layers/torch.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Dict, cast
2
+
3
+ import torch
4
+
5
+ from . import RearrangeMixin, ReduceMixin
6
+ from ._einmix import _EinmixMixin
7
+ from .._torch_specific import apply_for_scriptable_torch
8
+
9
+ __author__ = 'Alex Rogozhnikov'
10
+
11
+
12
+ class Rearrange(RearrangeMixin, torch.nn.Module):
13
+ def forward(self, input):
14
+ return apply_for_scriptable_torch(self._recipe, input, reduction_type='rearrange')
15
+
16
+ def _apply_recipe(self, x):
17
+ # overriding parent method to prevent it's scripting
18
+ pass
19
+
20
+
21
+ class Reduce(ReduceMixin, torch.nn.Module):
22
+ def forward(self, input):
23
+ return apply_for_scriptable_torch(self._recipe, input, reduction_type=self.reduction)
24
+
25
+ def _apply_recipe(self, x):
26
+ # overriding parent method to prevent it's scripting
27
+ pass
28
+
29
+
30
+ class EinMix(_EinmixMixin, torch.nn.Module):
31
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
32
+ self.weight = torch.nn.Parameter(torch.zeros(weight_shape).uniform_(-weight_bound, weight_bound),
33
+ requires_grad=True)
34
+ if bias_shape is not None:
35
+ self.bias = torch.nn.Parameter(torch.zeros(bias_shape).uniform_(-bias_bound, bias_bound),
36
+ requires_grad=True)
37
+ else:
38
+ self.bias = None
39
+
40
+ def _create_rearrange_layers(self,
41
+ pre_reshape_pattern: Optional[str],
42
+ pre_reshape_lengths: Optional[Dict],
43
+ post_reshape_pattern: Optional[str],
44
+ post_reshape_lengths: Optional[Dict],
45
+ ):
46
+ self.pre_rearrange = None
47
+ if pre_reshape_pattern is not None:
48
+ self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
49
+
50
+ self.post_rearrange = None
51
+ if post_reshape_pattern is not None:
52
+ self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
53
+
54
+ def forward(self, input):
55
+ if self.pre_rearrange is not None:
56
+ input = self.pre_rearrange(input)
57
+ result = torch.einsum(self.einsum_pattern, input, self.weight)
58
+ if self.bias is not None:
59
+ result += self.bias
60
+ if self.post_rearrange is not None:
61
+ result = self.post_rearrange(result)
62
+ return result
evalkit_tf446/lib/python3.10/site-packages/einops/parsing.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from einops import EinopsError
2
+ import keyword
3
+ import warnings
4
+ from typing import List, Optional, Set, Tuple, Union
5
+
6
+ _ellipsis: str = '…' # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated
7
+
8
+
9
+ class AnonymousAxis(object):
10
+ """Important thing: all instances of this class are not equal to each other """
11
+
12
+ def __init__(self, value: str):
13
+ self.value = int(value)
14
+ if self.value <= 1:
15
+ if self.value == 1:
16
+ raise EinopsError('No need to create anonymous axis of length 1. Report this as an issue')
17
+ else:
18
+ raise EinopsError('Anonymous axis should have positive length, not {}'.format(self.value))
19
+
20
+ def __repr__(self):
21
+ return "{}-axis".format(str(self.value))
22
+
23
+
24
+ class ParsedExpression:
25
+ """
26
+ non-mutable structure that contains information about one side of expression (e.g. 'b c (h w)')
27
+ and keeps some information important for downstream
28
+ """
29
+ def __init__(self, expression: str, *, allow_underscore: bool = False,
30
+ allow_duplicates: bool = False):
31
+ self.has_ellipsis: bool = False
32
+ self.has_ellipsis_parenthesized: Optional[bool] = None
33
+ self.identifiers: Set[str] = set()
34
+ # that's axes like 2, 3, 4 or 5. Axes with size 1 are exceptional and replaced with empty composition
35
+ self.has_non_unitary_anonymous_axes: bool = False
36
+ # composition keeps structure of composite axes, see how different corner cases are handled in tests
37
+ self.composition: List[Union[List[str], str]] = []
38
+ if '.' in expression:
39
+ if '...' not in expression:
40
+ raise EinopsError('Expression may contain dots only inside ellipsis (...)')
41
+ if str.count(expression, '...') != 1 or str.count(expression, '.') != 3:
42
+ raise EinopsError(
43
+ 'Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor ')
44
+ expression = expression.replace('...', _ellipsis)
45
+ self.has_ellipsis = True
46
+
47
+ bracket_group: Optional[List[str]] = None
48
+
49
+ def add_axis_name(x):
50
+ if x in self.identifiers:
51
+ if not (allow_underscore and x == "_") and not allow_duplicates:
52
+ raise EinopsError('Indexing expression contains duplicate dimension "{}"'.format(x))
53
+ if x == _ellipsis:
54
+ self.identifiers.add(_ellipsis)
55
+ if bracket_group is None:
56
+ self.composition.append(_ellipsis)
57
+ self.has_ellipsis_parenthesized = False
58
+ else:
59
+ bracket_group.append(_ellipsis)
60
+ self.has_ellipsis_parenthesized = True
61
+ else:
62
+ is_number = str.isdecimal(x)
63
+ if is_number and int(x) == 1:
64
+ # handling the case of anonymous axis of length 1
65
+ if bracket_group is None:
66
+ self.composition.append([])
67
+ else:
68
+ pass # no need to think about 1s inside parenthesis
69
+ return
70
+ is_axis_name, reason = self.check_axis_name_return_reason(x, allow_underscore=allow_underscore)
71
+ if not (is_number or is_axis_name):
72
+ raise EinopsError('Invalid axis identifier: {}\n{}'.format(x, reason))
73
+ if is_number:
74
+ x = AnonymousAxis(x)
75
+ self.identifiers.add(x)
76
+ if is_number:
77
+ self.has_non_unitary_anonymous_axes = True
78
+ if bracket_group is None:
79
+ self.composition.append([x])
80
+ else:
81
+ bracket_group.append(x)
82
+
83
+ current_identifier = None
84
+ for char in expression:
85
+ if char in '() ':
86
+ if current_identifier is not None:
87
+ add_axis_name(current_identifier)
88
+ current_identifier = None
89
+ if char == '(':
90
+ if bracket_group is not None:
91
+ raise EinopsError("Axis composition is one-level (brackets inside brackets not allowed)")
92
+ bracket_group = []
93
+ elif char == ')':
94
+ if bracket_group is None:
95
+ raise EinopsError('Brackets are not balanced')
96
+ self.composition.append(bracket_group)
97
+ bracket_group = None
98
+ elif str.isalnum(char) or char in ['_', _ellipsis]:
99
+ if current_identifier is None:
100
+ current_identifier = char
101
+ else:
102
+ current_identifier += char
103
+ else:
104
+ raise EinopsError("Unknown character '{}'".format(char))
105
+
106
+ if bracket_group is not None:
107
+ raise EinopsError('Imbalanced parentheses in expression: "{}"'.format(expression))
108
+ if current_identifier is not None:
109
+ add_axis_name(current_identifier)
110
+
111
+ def flat_axes_order(self) -> List:
112
+ result = []
113
+ for composed_axis in self.composition:
114
+ assert isinstance(composed_axis, list), 'does not work with ellipsis'
115
+ for axis in composed_axis:
116
+ result.append(axis)
117
+ return result
118
+
119
+ def has_composed_axes(self) -> bool:
120
+ # this will ignore 1 inside brackets
121
+ for axes in self.composition:
122
+ if isinstance(axes, list) and len(axes) > 1:
123
+ return True
124
+ return False
125
+
126
+ @staticmethod
127
+ def check_axis_name_return_reason(name: str, allow_underscore: bool = False) -> Tuple[bool, str]:
128
+ if not str.isidentifier(name):
129
+ return False, 'not a valid python identifier'
130
+ elif name[0] == '_' or name[-1] == '_':
131
+ if name == '_' and allow_underscore:
132
+ return True, ''
133
+ return False, 'axis name should should not start or end with underscore'
134
+ else:
135
+ if keyword.iskeyword(name):
136
+ warnings.warn("It is discouraged to use axes names that are keywords: {}".format(name), RuntimeWarning)
137
+ if name in ['axis']:
138
+ warnings.warn("It is discouraged to use 'axis' as an axis name "
139
+ "and will raise an error in future", FutureWarning)
140
+ return True, ''
141
+
142
+ @staticmethod
143
+ def check_axis_name(name: str) -> bool:
144
+ """
145
+ Valid axes names are python identifiers except keywords,
146
+ and additionally should not start or end with underscore
147
+ """
148
+ is_valid, _reason = ParsedExpression.check_axis_name_return_reason(name)
149
+ return is_valid
evalkit_tf446/lib/python3.10/site-packages/einops/py.typed ADDED
File without changes
evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/__pycache__/request_validator.cpython-310.pyc ADDED
Binary file (32.8 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (574 Bytes). View file
 
evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/access_token.cpython-310.pyc ADDED
Binary file (6.33 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/authorization.cpython-310.pyc ADDED
Binary file (6.17 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/base.cpython-310.pyc ADDED
Binary file (6.13 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/pre_configured.cpython-310.pyc ADDED
Binary file (738 Bytes). View file
 
evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/request_token.cpython-310.pyc ADDED
Binary file (6.16 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/resource.cpython-310.pyc ADDED
Binary file (4.06 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/signature_only.cpython-310.pyc ADDED
Binary file (2.31 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/request_token.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ oauthlib.oauth1.rfc5849.endpoints.request_token
4
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5
+
6
+ This module is an implementation of the request token provider logic of
7
+ OAuth 1.0 RFC 5849. It validates the correctness of request token requests,
8
+ creates and persists tokens as well as create the proper response to be
9
+ returned to the client.
10
+ """
11
+ import logging
12
+
13
+ from oauthlib.common import urlencode
14
+
15
+ from .. import errors
16
+ from .base import BaseEndpoint
17
+
18
+ log = logging.getLogger(__name__)
19
+
20
+
21
+ class RequestTokenEndpoint(BaseEndpoint):
22
+
23
+ """An endpoint responsible for providing OAuth 1 request tokens.
24
+
25
+ Typical use is to instantiate with a request validator and invoke the
26
+ ``create_request_token_response`` from a view function. The tuple returned
27
+ has all information necessary (body, status, headers) to quickly form
28
+ and return a proper response. See :doc:`/oauth1/validator` for details on which
29
+ validator methods to implement for this endpoint.
30
+ """
31
+
32
+ def create_request_token(self, request, credentials):
33
+ """Create and save a new request token.
34
+
35
+ :param request: OAuthlib request.
36
+ :type request: oauthlib.common.Request
37
+ :param credentials: A dict of extra token credentials.
38
+ :returns: The token as an urlencoded string.
39
+ """
40
+ token = {
41
+ 'oauth_token': self.token_generator(),
42
+ 'oauth_token_secret': self.token_generator(),
43
+ 'oauth_callback_confirmed': 'true'
44
+ }
45
+ token.update(credentials)
46
+ self.request_validator.save_request_token(token, request)
47
+ return urlencode(token.items())
48
+
49
+ def create_request_token_response(self, uri, http_method='GET', body=None,
50
+ headers=None, credentials=None):
51
+ """Create a request token response, with a new request token if valid.
52
+
53
+ :param uri: The full URI of the token request.
54
+ :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
55
+ :param body: The request body as a string.
56
+ :param headers: The request headers as a dict.
57
+ :param credentials: A list of extra credentials to include in the token.
58
+ :returns: A tuple of 3 elements.
59
+ 1. A dict of headers to set on the response.
60
+ 2. The response body as a string.
61
+ 3. The response status code as an integer.
62
+
63
+ An example of a valid request::
64
+
65
+ >>> from your_validator import your_validator
66
+ >>> from oauthlib.oauth1 import RequestTokenEndpoint
67
+ >>> endpoint = RequestTokenEndpoint(your_validator)
68
+ >>> h, b, s = endpoint.create_request_token_response(
69
+ ... 'https://your.provider/request_token?foo=bar',
70
+ ... headers={
71
+ ... 'Authorization': 'OAuth realm=movies user, oauth_....'
72
+ ... },
73
+ ... credentials={
74
+ ... 'my_specific': 'argument',
75
+ ... })
76
+ >>> h
77
+ {'Content-Type': 'application/x-www-form-urlencoded'}
78
+ >>> b
79
+ 'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_callback_confirmed=true&my_specific=argument'
80
+ >>> s
81
+ 200
82
+
83
+ An response to invalid request would have a different body and status::
84
+
85
+ >>> b
86
+ 'error=invalid_request&description=missing+callback+uri'
87
+ >>> s
88
+ 400
89
+
90
+ The same goes for an an unauthorized request:
91
+
92
+ >>> b
93
+ ''
94
+ >>> s
95
+ 401
96
+ """
97
+ resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
98
+ try:
99
+ request = self._create_request(uri, http_method, body, headers)
100
+ valid, processed_request = self.validate_request_token_request(
101
+ request)
102
+ if valid:
103
+ token = self.create_request_token(request, credentials or {})
104
+ return resp_headers, token, 200
105
+ else:
106
+ return {}, None, 401
107
+ except errors.OAuth1Error as e:
108
+ return resp_headers, e.urlencoded, e.status_code
109
+
110
+ def validate_request_token_request(self, request):
111
+ """Validate a request token request.
112
+
113
+ :param request: OAuthlib request.
114
+ :type request: oauthlib.common.Request
115
+ :raises: OAuth1Error if the request is invalid.
116
+ :returns: A tuple of 2 elements.
117
+ 1. The validation result (True or False).
118
+ 2. The request object.
119
+ """
120
+ self._check_transport_security(request)
121
+ self._check_mandatory_parameters(request)
122
+
123
+ if request.realm:
124
+ request.realms = request.realm.split(' ')
125
+ else:
126
+ request.realms = self.request_validator.get_default_realms(
127
+ request.client_key, request)
128
+ if not self.request_validator.check_realms(request.realms):
129
+ raise errors.InvalidRequestError(
130
+ description='Invalid realm {}. Allowed are {!r}.'.format(
131
+ request.realms, self.request_validator.realms))
132
+
133
+ if not request.redirect_uri:
134
+ raise errors.InvalidRequestError(
135
+ description='Missing callback URI.')
136
+
137
+ if not self.request_validator.validate_timestamp_and_nonce(
138
+ request.client_key, request.timestamp, request.nonce, request,
139
+ request_token=request.resource_owner_key):
140
+ return False, request
141
+
142
+ # The server SHOULD return a 401 (Unauthorized) status code when
143
+ # receiving a request with invalid client credentials.
144
+ # Note: This is postponed in order to avoid timing attacks, instead
145
+ # a dummy client is assigned and used to maintain near constant
146
+ # time request verification.
147
+ #
148
+ # Note that early exit would enable client enumeration
149
+ valid_client = self.request_validator.validate_client_key(
150
+ request.client_key, request)
151
+ if not valid_client:
152
+ request.client_key = self.request_validator.dummy_client
153
+
154
+ # Note that `realm`_ is only used in authorization headers and how
155
+ # it should be interpreted is not included in the OAuth spec.
156
+ # However they could be seen as a scope or realm to which the
157
+ # client has access and as such every client should be checked
158
+ # to ensure it is authorized access to that scope or realm.
159
+ # .. _`realm`: https://tools.ietf.org/html/rfc2617#section-1.2
160
+ #
161
+ # Note that early exit would enable client realm access enumeration.
162
+ #
163
+ # The require_realm indicates this is the first step in the OAuth
164
+ # workflow where a client requests access to a specific realm.
165
+ # This first step (obtaining request token) need not require a realm
166
+ # and can then be identified by checking the require_resource_owner
167
+ # flag and absence of realm.
168
+ #
169
+ # Clients obtaining an access token will not supply a realm and it will
170
+ # not be checked. Instead the previously requested realm should be
171
+ # transferred from the request token to the access token.
172
+ #
173
+ # Access to protected resources will always validate the realm but note
174
+ # that the realm is now tied to the access token and not provided by
175
+ # the client.
176
+ valid_realm = self.request_validator.validate_requested_realms(
177
+ request.client_key, request.realms, request)
178
+
179
+ # Callback is normally never required, except for requests for
180
+ # a Temporary Credential as described in `Section 2.1`_
181
+ # .._`Section 2.1`: https://tools.ietf.org/html/rfc5849#section-2.1
182
+ valid_redirect = self.request_validator.validate_redirect_uri(
183
+ request.client_key, request.redirect_uri, request)
184
+ if not request.redirect_uri:
185
+ raise NotImplementedError('Redirect URI must either be provided '
186
+ 'or set to a default during validation.')
187
+
188
+ valid_signature = self._check_signature(request)
189
+
190
+ # log the results to the validator_log
191
+ # this lets us handle internal reporting and analysis
192
+ request.validator_log['client'] = valid_client
193
+ request.validator_log['realm'] = valid_realm
194
+ request.validator_log['callback'] = valid_redirect
195
+ request.validator_log['signature'] = valid_signature
196
+
197
+ # We delay checking validity until the very end, using dummy values for
198
+ # calculations and fetching secrets/keys to ensure the flow of every
199
+ # request remains almost identical regardless of whether valid values
200
+ # have been supplied. This ensures near constant time execution and
201
+ # prevents malicious users from guessing sensitive information
202
+ v = all((valid_client, valid_realm, valid_redirect, valid_signature))
203
+ if not v:
204
+ log.info("[Failure] request verification failed.")
205
+ log.info("Valid client: %s.", valid_client)
206
+ log.info("Valid realm: %s.", valid_realm)
207
+ log.info("Valid callback: %s.", valid_redirect)
208
+ log.info("Valid signature: %s.", valid_signature)
209
+ return v, request
evalkit_tf446/lib/python3.10/site-packages/timm/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .version import __version__
2
+ from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \
3
+ is_scriptable, is_exportable, set_scriptable, set_exportable, has_pretrained_cfg_key, is_pretrained_cfg_key, \
4
+ get_pretrained_cfg_value, is_model_pretrained
evalkit_tf446/lib/python3.10/site-packages/timm/optim/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .adabelief import AdaBelief
2
+ from .adafactor import Adafactor
3
+ from .adahessian import Adahessian
4
+ from .adamp import AdamP
5
+ from .adamw import AdamW
6
+ from .lamb import Lamb
7
+ from .lars import Lars
8
+ from .lookahead import Lookahead
9
+ from .madgrad import MADGRAD
10
+ from .nadam import Nadam
11
+ from .nvnovograd import NvNovoGrad
12
+ from .radam import RAdam
13
+ from .rmsprop_tf import RMSpropTF
14
+ from .sgdp import SGDP
15
+ from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs
evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adabelief.cpython-310.pyc ADDED
Binary file (6.53 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adahessian.cpython-310.pyc ADDED
Binary file (5.89 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adamp.cpython-310.pyc ADDED
Binary file (3.15 kB). View file