ZTWHHH commited on
Commit
0ba83ef
·
verified ·
1 Parent(s): e9e27a4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/__init__.cpython-310.pyc +0 -0
  3. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/backend.cpython-310.pyc +0 -0
  4. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/ccl.cpython-310.pyc +0 -0
  5. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/comm.cpython-310.pyc +0 -0
  6. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/config.cpython-310.pyc +0 -0
  7. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/constants.cpython-310.pyc +0 -0
  8. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/reduce_op.cpython-310.pyc +0 -0
  9. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/torch.cpython-310.pyc +0 -0
  10. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/utils.cpython-310.pyc +0 -0
  11. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/comm.py +771 -0
  12. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/constants.py +51 -0
  13. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/torch.py +374 -0
  14. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/utils.py +133 -0
  15. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__init__.py +8 -0
  16. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/__init__.cpython-310.pyc +0 -0
  17. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/basic_layer.cpython-310.pyc +0 -0
  18. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/compress.cpython-310.pyc +0 -0
  19. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/config.cpython-310.pyc +0 -0
  20. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/constants.cpython-310.pyc +0 -0
  21. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/helper.cpython-310.pyc +0 -0
  22. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/scheduler.cpython-310.pyc +0 -0
  23. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/utils.cpython-310.pyc +0 -0
  24. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/basic_layer.py +840 -0
  25. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/compress.py +239 -0
  26. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/config.py +452 -0
  27. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/constants.py +188 -0
  28. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/helper.py +322 -0
  29. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/scheduler.py +173 -0
  30. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/utils.py +222 -0
  31. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/__init__.cpython-310.pyc +0 -0
  32. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__init__.py +5 -0
  33. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/fused_optimizer.py +466 -0
  34. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/loss_scaler.py +270 -0
  35. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__init__.py +8 -0
  36. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/lamb.cpython-310.pyc +0 -0
  37. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/lamb.py +443 -0
  38. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/zoadam.py +359 -0
  39. evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/unfused_optimizer.py +427 -0
  40. evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__init__.py +4 -0
  41. evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__pycache__/__init__.cpython-310.pyc +0 -0
  42. evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__pycache__/constants.cpython-310.pyc +0 -0
  43. evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__pycache__/errors.cpython-310.pyc +0 -0
  44. evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__pycache__/store.cpython-310.pyc +0 -0
  45. evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__pycache__/utils.cpython-310.pyc +0 -0
  46. evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__pycache__/version.cpython-310.pyc +0 -0
  47. evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/constants.py +4 -0
  48. evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/errors.py +25 -0
  49. evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/store.py +107 -0
  50. evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/utils.py +38 -0
.gitattributes CHANGED
@@ -1537,3 +1537,4 @@ infer_4_47_1/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-l
1537
  infer_4_47_1/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
1538
  evalkit_tf449/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.11 filter=lfs diff=lfs merge=lfs -text
1539
  infer_4_47_1/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
1537
  infer_4_47_1/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
1538
  evalkit_tf449/lib/python3.10/site-packages/nvidia/cufft/lib/libcufft.so.11 filter=lfs diff=lfs merge=lfs -text
1539
  infer_4_47_1/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1540
+ infer_4_47_1/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (214 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/backend.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/ccl.cpython-310.pyc ADDED
Binary file (6.51 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/comm.cpython-310.pyc ADDED
Binary file (20 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/config.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/constants.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/reduce_op.cpython-310.pyc ADDED
Binary file (520 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/torch.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/__pycache__/utils.cpython-310.pyc ADDED
Binary file (2.72 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/comm.py ADDED
@@ -0,0 +1,771 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ DeepSpeed Communication Package: deepspeed.comm
7
+ deepspeed.comm
8
+ -- import and use deepspeed.ops.comm
9
+ -- use torch.distributed directly if both this package and torch.distributed use the same NCCL version
10
+ -- use custom collectives
11
+ -- can either use torch.dist or ds.ops.comm?
12
+ Note: the old 1-bit compressed allreduce variants that resided in deepspeed.runtime.comm will be moved here as well.
13
+ deepspeed.comm API
14
+ -- must be kept fully compatible (same signatures) as torch.dist API to ensure backward/cross-framework compatibility.
15
+ -- e.g. if a client code used
16
+ from deepspeed import comm as dist
17
+ instead of
18
+ import torch.distributed as dist
19
+ The code should work without breaking any of the public torch.distributed functionality
20
+ Future:
21
+ -- deepspeed groups API should be brought into ds.comm
22
+ """
23
+
24
+ import torch
25
+ import os
26
+
27
+ from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
28
+ from .constants import *
29
+ from .reduce_op import *
30
+ from deepspeed.accelerator import get_accelerator
31
+ from deepspeed.comm.ccl import CCLBackend
32
+
33
+ from deepspeed.utils.comms_logging import CommsLogger
34
+ from deepspeed.utils import timer, get_caller_func
35
+ from deepspeed.comm.torch import TorchBackend
36
+
37
+ from deepspeed import utils
38
+ from datetime import timedelta
39
+
40
+ # Current deepspeed.comm backend (cdb) global object for simple access by client code
41
+ cdb = None
42
+
43
+ # Create global timer for ops
44
+ timers = timer.SynchronizedWallClockTimer()
45
+ timer_summary = {}
46
+
47
+ comms_logger = CommsLogger()
48
+
49
+ # Maintain objects of all initialized ds backends and assign them using the API functions in this file
50
+ nccl_backend = None
51
+ mpi_backend = None
52
+ ccl_backend = None
53
+ hccl_backend = None
54
+
55
+ # This should be set here so all rank/size information from the launcher can be propagated
56
+ from deepspeed.comm.utils import *
57
+
58
+
59
+ class ProcessGroup():
60
+
61
+ def __init__(self, comm_id, ranks=[]):
62
+ self.ranks = ranks
63
+ self.comm_id = comm_id
64
+ self.size = len(ranks)
65
+
66
+
67
+ def _configure_using_config_file(config):
68
+ if config.comms_logger_enabled:
69
+ comms_logger.configure(config)
70
+
71
+
72
+ def configure(
73
+ deepspeed_config=None,
74
+ enabled=None,
75
+ prof_all=None,
76
+ prof_ops=None,
77
+ verbose=None,
78
+ debug=None,
79
+ ):
80
+
81
+ if deepspeed_config is not None:
82
+ _configure_using_config_file(deepspeed_config.comms_config)
83
+
84
+ if enabled is not None:
85
+ comms_logger.enabled = enabled
86
+
87
+ if prof_all is not None:
88
+ comms_logger.prof_all = prof_all
89
+
90
+ if prof_ops is not None:
91
+ comms_logger.prof_ops = prof_ops
92
+
93
+ if verbose is not None:
94
+ comms_logger.verbose = verbose
95
+
96
+ if debug is not None:
97
+ comms_logger.debug = debug
98
+
99
+
100
+ # Logging wrapper for timing ops
101
+ def timed_op(func):
102
+
103
+ def log_wrapper(*args, **kwargs):
104
+ # Add enabled flag so that overhead to each comm op is two if conditions at most
105
+ if comms_logger.enabled:
106
+ if ('prof' in kwargs
107
+ and kwargs['prof']) or comms_logger.prof_all or ('log_name' in kwargs
108
+ and kwargs['log_name'] in comms_logger.prof_ops):
109
+ # Need func args for their defaults
110
+ func_args = get_default_args(func)
111
+ func_args.update(kwargs)
112
+ msg_size = get_msg_size_from_args(func, *args, **kwargs)
113
+ log_name = get_debug_log_name(func_args, comms_logger.debug)
114
+ timers(log_name).start()
115
+ # Return the op, then stop the op's timer
116
+ try:
117
+ return func(*args, **kwargs)
118
+ finally:
119
+ if comms_logger.enabled:
120
+ # Need to make op blocking for accurate logging
121
+ get_accelerator().synchronize()
122
+ # If we're using MPI, we can't simply sync the stream
123
+ if cdb.using_mpi:
124
+ cdb.barrier()
125
+ if ('prof' in kwargs and kwargs['prof']) or comms_logger.prof_all or (
126
+ 'log_name' in kwargs and kwargs['log_name'] in comms_logger.prof_ops):
127
+ log_name = get_debug_log_name(func_args, comms_logger.debug)
128
+ raw_name = func.__name__
129
+ timers(log_name).stop()
130
+ # need temp var since 'elapsed' resets events
131
+ time_elapsed = timers(log_name).elapsed(reset=False)
132
+ comms_logger.append(raw_name, log_name, time_elapsed, msg_size)
133
+
134
+ return log_wrapper
135
+
136
+
137
+ # For compatibility with torch distributed's init_process_group, we shall retain the signature from PyTorch code.
138
+ # DeepSpeed NCCL/MPI backend may not need all these params as we will have our own implementation.
139
+ # Please read full torch.distributed API docs from https://pytorch.org/docs/stable/distributed.html
140
+
141
+
142
+ # UNUSED: Future helper function to initialize DS backends
143
+ def init_deepspeed_backend(ds_backend, timeout, init_method):
144
+ global cdb
145
+ global nccl_backend
146
+ global mpi_backend
147
+ global ccl_backend
148
+ global hccl_backend
149
+
150
+ rank = int(os.getenv('RANK', '-1'))
151
+ size = int(os.getenv('WORLD_SIZE', '-1'))
152
+
153
+ if ds_backend == NCCL_BACKEND:
154
+ utils.logger.debug("NCCL backend in DeepSpeed not yet implemented")
155
+ elif ds_backend == MPI_BACKEND:
156
+ utils.logger.debug("MPI backend in DeepSpeed not yet implemented")
157
+ elif ds_backend == GLOO_BACKEND:
158
+ utils.logger.debug("Gloo backend in DeepSpeed not yet implemented")
159
+ elif ds_backend == CCL_BACKEND:
160
+ ccl_backend = CCLBackend(rank=rank, world_size=size, timeout=timeout, init_method=init_method)
161
+ utils.logger.info(f"Initialize {ds_backend} backend")
162
+ elif ds_backend == HCCL_BACKEND:
163
+ utils.logger.warn("HCCL backend in DeepSpeed not yet implemented")
164
+ else:
165
+ utils.logger.debug(f"DeepSpeed does not support {ds_backend} backend")
166
+
167
+
168
+ def is_initialized():
169
+ #assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()'
170
+ if cdb is None:
171
+ return False
172
+ else:
173
+ return cdb.is_initialized()
174
+
175
+
176
+ def destroy_process_group(group=None):
177
+ global cdb
178
+ return cdb.destroy_process_group(group=group)
179
+
180
+
181
+ def new_group(ranks):
182
+ global cdb
183
+ assert cdb is not None and cdb.is_initialized(
184
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
185
+ return cdb.new_group(ranks)
186
+
187
+
188
+ def is_available() -> bool:
189
+
190
+ # Returns ``True`` if the deepspeed comm package is available.
191
+
192
+ # TODO: load other ops. Clients including deepspeed itself should use deepspeed.comm to import
193
+ # any communication related primitives from this package.
194
+ # use hasattr(deepspeed.csrc.ops, "_comm") or something
195
+ return True
196
+
197
+
198
+ def set_backend():
199
+ global cdb
200
+ global nccl_backend
201
+ global mpi_backend
202
+ global ccl_backend
203
+ global hccl_backend
204
+
205
+ backend_name = get_accelerator().communication_backend_name()
206
+
207
+ if backend_name == NCCL_BACKEND:
208
+ if nccl_backend is not None and nccl_backend.is_initialized():
209
+ cdb = nccl_backend
210
+ elif backend_name == MPI_BACKEND:
211
+ if mpi_backend is not None and mpi_backend.is_initialized():
212
+ cdb = mpi_backend
213
+ elif backend_name == CCL_BACKEND:
214
+ if ccl_backend is not None and ccl_backend.is_initialized():
215
+ cdb = ccl_backend
216
+ elif backend_name == HCCL_BACKEND:
217
+ if hccl_backend is not None and hccl_backend.is_initialized():
218
+ cdb = hccl_backend
219
+
220
+
221
+ @timed_op
222
+ def broadcast(tensor, src, group=None, async_op=False, prof=False, log_name='broadcast', debug=get_caller_func()):
223
+ global cdb
224
+ return cdb.broadcast(tensor=tensor, src=src, group=group, async_op=async_op)
225
+
226
+
227
+ @timed_op
228
+ def all_gather(tensor_list,
229
+ tensor,
230
+ group=None,
231
+ async_op=False,
232
+ prof=False,
233
+ log_name='all_gather',
234
+ debug=get_caller_func()):
235
+ global cdb
236
+ return cdb.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op)
237
+
238
+
239
+ def has_reduce_scatter_tensor():
240
+ global cdb
241
+ assert cdb is not None and cdb.is_initialized(
242
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
243
+ return cdb.has_reduce_scatter_tensor()
244
+
245
+
246
+ def reduce_scatter_fn(output_tensor,
247
+ tensor,
248
+ op=ReduceOp.SUM,
249
+ group=None,
250
+ async_op=False,
251
+ prof=False,
252
+ debug=get_caller_func()):
253
+ global cdb
254
+ assert cdb is not None and cdb.is_initialized(
255
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
256
+ if cdb.has_reduce_scatter_tensor():
257
+ return reduce_scatter_tensor(output_tensor,
258
+ tensor,
259
+ op=op,
260
+ group=group,
261
+ async_op=async_op,
262
+ prof=prof,
263
+ debug=debug)
264
+ else:
265
+ if get_rank() == 0:
266
+ utils.logger.warning_once("unable to find torch.distributed.reduce_scatter_tensor. will fall back to "
267
+ "torch.distributed.reduce_scatter which will result in suboptimal performance. "
268
+ "please consider upgrading your pytorch installation.")
269
+ input_tensor_lst = list(torch.chunk(tensor, cdb.get_world_size(group)))
270
+ return reduce_scatter(output_tensor,
271
+ input_tensor_lst,
272
+ op=op,
273
+ group=group,
274
+ async_op=async_op,
275
+ prof=prof,
276
+ debug=debug)
277
+
278
+
279
+ @timed_op
280
+ def reduce_scatter_tensor(output_tensor,
281
+ tensor,
282
+ op=ReduceOp.SUM,
283
+ group=None,
284
+ async_op=False,
285
+ prof=False,
286
+ log_name='reduce_scatter_tensor',
287
+ debug=get_caller_func()):
288
+ global cdb
289
+ return cdb.reduce_scatter_tensor(output_tensor=output_tensor,
290
+ input_tensor=tensor,
291
+ op=op,
292
+ group=group,
293
+ async_op=async_op)
294
+
295
+
296
+ @timed_op
297
+ def all_gather_into_tensor(output_tensor,
298
+ tensor,
299
+ group=None,
300
+ async_op=False,
301
+ prof=False,
302
+ log_name='all_gather_into_tensor',
303
+ debug=get_caller_func()):
304
+ global cdb
305
+ return cdb.all_gather_into_tensor(output_tensor=output_tensor, input_tensor=tensor, group=group, async_op=async_op)
306
+
307
+
308
+ def has_all_gather_into_tensor():
309
+ global cdb
310
+ assert cdb is not None and cdb.is_initialized(
311
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
312
+ return cdb.has_all_gather_into_tensor()
313
+
314
+
315
+ def allgather_fn(output_tensor, input_tensor, group=None, async_op=False, debug=get_caller_func()):
316
+ global cdb
317
+ assert cdb is not None and cdb.is_initialized(
318
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
319
+ if cdb.has_all_gather_into_tensor():
320
+ return all_gather_into_tensor(output_tensor, input_tensor, group=group, async_op=async_op, debug=debug)
321
+ else:
322
+ if get_rank() == 0:
323
+ utils.logger.warning_once("unable to find torch.distributed.all_gather_into_tensor. will fall back to "
324
+ "torch.distributed.all_gather which will result in suboptimal performance. "
325
+ "please consider upgrading your pytorch installation.")
326
+ output_tensors = list(torch.chunk(output_tensor, cdb.get_world_size(group)))
327
+ return all_gather(output_tensors, input_tensor, group=group, async_op=async_op, debug=debug)
328
+
329
+
330
+ @timed_op
331
+ def all_to_all_single(output,
332
+ tensor,
333
+ output_split_sizes=None,
334
+ input_split_sizes=None,
335
+ group=None,
336
+ async_op=False,
337
+ prof=False,
338
+ log_name='all_to_all_single',
339
+ debug=get_caller_func()):
340
+ global cdb
341
+ return cdb.all_to_all_single(output=output,
342
+ input=tensor,
343
+ output_split_sizes=output_split_sizes,
344
+ input_split_sizes=input_split_sizes,
345
+ group=group,
346
+ async_op=async_op)
347
+
348
+
349
+ @timed_op
350
+ def all_to_all(output_tensor_list, input_tensor_list, group=None, async_op=False):
351
+ global cdb
352
+ return cdb.all_to_all(output_tensor_list, input_tensor_list, group=group, async_op=async_op)
353
+
354
+
355
+ @timed_op
356
+ def send(tensor, dst, group=None, tag=0, prof=False, log_name='send', debug=get_caller_func()):
357
+ global cdb
358
+ return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag)
359
+
360
+
361
+ @timed_op
362
+ def recv(tensor, src=None, group=None, tag=0, prof=False, log_name='recv', debug=get_caller_func()):
363
+ global cdb
364
+ return cdb.recv(tensor=tensor, src=src, group=group, tag=tag)
365
+
366
+
367
+ @timed_op
368
+ def isend(tensor, dst, group=None, tag=0, prof=False, log_name='isend', debug=get_caller_func()):
369
+ global cdb
370
+ return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag)
371
+
372
+
373
+ @timed_op
374
+ def irecv(tensor, src=None, group=None, tag=0, prof=False, log_name='irecv', debug=get_caller_func()):
375
+ global cdb
376
+ return cdb.recv(tensor=tensor, src=src, group=group, tag=tag)
377
+
378
+
379
+ @timed_op
380
+ def gather(tensor,
381
+ gather_list=None,
382
+ dst=0,
383
+ group=None,
384
+ async_op=False,
385
+ prof=False,
386
+ log_name='gather',
387
+ debug=get_caller_func()):
388
+ global cdb
389
+ return cdb.gather(tensor=tensor, gather_list=gather_list, dst=dst, group=group, async_op=async_op)
390
+
391
+
392
+ @timed_op
393
+ def scatter(tensor,
394
+ scatter_list=None,
395
+ src=0,
396
+ group=None,
397
+ async_op=False,
398
+ prof=False,
399
+ log_name='scatter',
400
+ debug=get_caller_func()):
401
+ global cdb
402
+ return cdb.scatter(tensor=tensor, scatter_list=scatter_list, src=src, group=group, async_op=async_op)
403
+
404
+
405
+ @timed_op
406
+ def barrier(group=None, async_op=False, device_ids=None, prof=False, log_name='barrier', debug=get_caller_func()):
407
+ global cdb
408
+ return cdb.barrier(group=group, async_op=async_op)
409
+
410
+
411
+ @timed_op
412
+ def monitored_barrier(group=None,
413
+ timeout=None,
414
+ wait_all_ranks=False,
415
+ prof=False,
416
+ log_name='monitored_barrier',
417
+ debug=get_caller_func()):
418
+ global cdb
419
+ return cdb.monitored_barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks)
420
+
421
+
422
+ def log_summary(show_straggler=False):
423
+ global cdb
424
+ barrier(log_name='log_summary_barrier')
425
+ if cdb.get_rank() == 0:
426
+ comms_logger.log_all(print_log=True, show_straggler=show_straggler)
427
+ else:
428
+ comms_logger.log_all(print_log=False, show_straggler=show_straggler)
429
+ barrier(log_name='log_summary_barrier')
430
+
431
+
432
+ @timed_op
433
+ def reduce(tensor,
434
+ dst,
435
+ op=ReduceOp.SUM,
436
+ group=None,
437
+ async_op=False,
438
+ prof=False,
439
+ log_name='reduce',
440
+ debug=get_caller_func()):
441
+ global cdb
442
+ return cdb.reduce(tensor=tensor, dst=dst, op=op, group=group, async_op=async_op)
443
+
444
+
445
+ @timed_op
446
+ def reduce_scatter(output,
447
+ input_list,
448
+ op=ReduceOp.SUM,
449
+ group=None,
450
+ async_op=False,
451
+ prof=False,
452
+ log_name='reduce_scatter',
453
+ debug=get_caller_func()):
454
+ global cdb
455
+ return cdb.reduce_scatter(output=output, input_list=input_list, op=op, group=group, async_op=async_op)
456
+
457
+
458
+ def has_all_reduce_coalesced():
459
+ """"""
460
+ global cdb
461
+ assert cdb is not None and cdb.is_initialized(
462
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
463
+ assert cdb.has_all_reduce_coalesced is not None, 'has_all_reduce_coalesced is not yet defined'
464
+ return cdb.has_all_reduce_coalesced
465
+
466
+
467
+ def has_coalescing_manager():
468
+ global cdb
469
+ assert cdb is not None and cdb.is_initialized(
470
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
471
+ assert cdb.has_coalescing_manager is not None, 'has_coalescing_manager is not yet defined'
472
+ return cdb.has_coalescing_manager
473
+
474
+
475
+ def all_gather_coalesced(output_tensors, input_tensors, group=None, async_op=False):
476
+ global cdb
477
+ assert cdb is not None and cdb.is_initialized(
478
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
479
+ return cdb.all_gather_coalesced(output_tensors, input_tensors, group=group, async_op=async_op)
480
+
481
+
482
+ @timed_op
483
+ def all_reduce(tensor,
484
+ op=ReduceOp.SUM,
485
+ group=None,
486
+ async_op=False,
487
+ prof=False,
488
+ log_name='all_reduce',
489
+ debug=get_caller_func()):
490
+ #if profile_comm:
491
+ # context of the timers?
492
+ # timers.start()
493
+ # TensorBoard logging for comm calls.?
494
+ global cdb
495
+ #print(f'op = {op}, cdb= {cdb.name}')
496
+ return cdb.all_reduce(tensor, op, group, async_op)
497
+
498
+
499
+ @timed_op
500
+ def inference_all_reduce(tensor,
501
+ op=ReduceOp.SUM,
502
+ group=None,
503
+ async_op=False,
504
+ prof=False,
505
+ log_name='all_reduce',
506
+ debug=get_caller_func()):
507
+ global cdb
508
+ return cdb.inference_all_reduce(tensor, op, group, async_op)
509
+
510
+
511
+ @timed_op
512
+ def all_reduce_coalesced(tensors,
513
+ op=ReduceOp.SUM,
514
+ group=None,
515
+ async_op=False,
516
+ prof=False,
517
+ log_name='all_reduce',
518
+ debug=get_caller_func()):
519
+ global cdb
520
+ return cdb.all_reduce_coalesced(tensors, op, group, async_op)
521
+
522
+
523
+ def get_world_group():
524
+ global cdb
525
+ assert cdb is not None and cdb.is_initialized(
526
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
527
+ return cdb.get_world_group()
528
+
529
+
530
+ def get_world_size(group=None) -> int:
531
+ """
532
+ Returns the number of processes in the current process group
533
+ Args:
534
+ group (ProcessGroup, optional): The process group to work on. If None,
535
+ the default process group will be used.
536
+ Returns:
537
+ The world size of the process group
538
+ -1, if not part of the group
539
+ """
540
+ global cdb
541
+
542
+ assert cdb is not None and cdb.is_initialized(
543
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
544
+ return cdb.get_world_size(group)
545
+
546
+
547
+ def get_rank(group=None):
548
+ """
549
+ Returns the rank of the current process in the provided ``group`` or the
550
+ default group if none was provided.
551
+ Rank is a unique identifier assigned to each process within a distributed
552
+ process group. They are always consecutive integers ranging from 0 to
553
+ ``world_size``.
554
+ Args:
555
+ group (ProcessGroup, optional): The process group to work on. If None,
556
+ the default process group will be used.
557
+ Returns:
558
+ The rank of the process group
559
+ -1, if not part of the group
560
+ """
561
+ global cdb
562
+ assert cdb is not None and cdb.is_initialized(
563
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
564
+ return cdb.get_rank(group)
565
+
566
+
567
+ def get_local_rank():
568
+ """
569
+ Helper function to get local rank after a backend has been set and initialized
570
+ Args:
571
+ None
572
+ Returns:
573
+ local rank (= GPU device ID)
574
+ """
575
+ global cdb
576
+ assert cdb is not None and cdb.is_initialized(
577
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
578
+ return get_local_rank_from_launcher()
579
+
580
+
581
+ def get_global_rank(group=None, group_rank=0):
582
+ global cdb
583
+ assert cdb is not None and cdb.is_initialized(
584
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
585
+ return cdb.get_global_rank(group, group_rank)
586
+
587
+
588
+ def get_all_ranks_from_group(group=None):
589
+ global cdb
590
+ assert cdb is not None and cdb.is_initialized(
591
+ ), 'DeepSpeed backend not set, please initialize it using init_process_group()'
592
+ rank = 0
593
+ group_ranks = []
594
+ try:
595
+ while True:
596
+ group_ranks.append(cdb.get_global_rank(group, rank))
597
+ rank += 1
598
+ except RuntimeError:
599
+ pass
600
+ return group_ranks
601
+
602
+
603
+ # Main DeepSpeed Comms. public API.
604
+ def init_distributed(dist_backend=None,
605
+ auto_mpi_discovery=True,
606
+ distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT,
607
+ verbose=True,
608
+ timeout=default_pg_timeout,
609
+ init_method=None,
610
+ dist_init_required=None,
611
+ config=None,
612
+ rank=-1,
613
+ world_size=-1):
614
+ ''' Initialize dist backend, potentially performing MPI discovery if needed
615
+
616
+ Arguments:
617
+ dist_backend: Optional (str). torch distributed backend, e.g., nccl, mpi, gloo
618
+ auto_mpi_discovery Optional (bool). if distributed environment variables are not set, attempt to discover them from MPI
619
+ distributed_port: Optional (int). torch distributed backend port
620
+ verbose: Optional (bool). verbose logging
621
+ timeout: Optional (timedelta). Timeout for operations executed against the process group. Default value equals 30 minutes.
622
+ init_method: Optional (string). Torch distributed, URL specifying how to initialize the process group. Default is “env://” if no init_method or store is specified.
623
+ config: Optional (dict). DeepSpeed configuration for setting up comms options (e.g. Comms profiling)
624
+ rank: Optional (int). The current manually specified rank. Some init_method like “tcp://” need the rank and world_size as well (see: https://pytorch.org/docs/stable/distributed.html#tcp-initialization)
625
+ world_size: Optional (int). Desired world_size for the TCP or Shared file-system initialization.
626
+ '''
627
+ global cdb
628
+
629
+ configure(deepspeed_config=config)
630
+
631
+ if dist_init_required is None:
632
+ dist_init_required = cdb is None or not cdb.is_initialized()
633
+
634
+ if cdb is None:
635
+ init_deepspeed_backend(get_accelerator().communication_backend_name(), timeout, init_method)
636
+ set_backend()
637
+ utils.logger.info(f'cdb={cdb}')
638
+ if cdb is None and torch.distributed.is_initialized():
639
+ # The user initialized torch.dist themselves, create cdb and short-circuit
640
+ cdb = TorchBackend(dist_backend, timeout, init_method)
641
+ return
642
+
643
+ if dist_init_required is False:
644
+ assert (
645
+ cdb is not None and cdb.is_initialized() is True
646
+ ), "Distributed backend is not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()"
647
+ else:
648
+ # Initialize torch distributed if needed
649
+ required_env = ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
650
+ if auto_mpi_discovery and not all(map(lambda v: v in os.environ, required_env)):
651
+ if verbose:
652
+ utils.logger.info("Not using the DeepSpeed or dist launchers, attempting to detect MPI environment...")
653
+ if in_aml() and not in_dlts():
654
+ patch_aml_env_for_torch_nccl_backend(verbose=verbose)
655
+ elif in_aws_sm():
656
+ patch_aws_sm_env_for_torch_nccl_backend(verbose=verbose)
657
+ else:
658
+ mpi_discovery(distributed_port=distributed_port, verbose=verbose)
659
+
660
+ if cdb is not None and cdb.is_initialized():
661
+ if int(os.getenv('RANK', '0')) == 0:
662
+ utils.logger.info('Distributed backend already initialized')
663
+ else:
664
+ assert isinstance(timeout, timedelta)
665
+ if dist_backend is None:
666
+ dist_backend = get_accelerator().communication_backend_name()
667
+ if int(os.getenv('RANK', '0')) == 0:
668
+ utils.logger.info('Initializing TorchBackend in DeepSpeed with backend {}'.format(dist_backend))
669
+ # Create a torch backend object, initialize torch distributed, and assign to cdb
670
+ cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size)
671
+
672
+
673
+ def mpi_discovery(distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, verbose=True):
674
+ '''
675
+ Discovery MPI environment via mpi4py and map to relevant dist state
676
+ '''
677
+ from mpi4py import MPI
678
+ import subprocess
679
+ comm = MPI.COMM_WORLD
680
+ rank = comm.Get_rank()
681
+ world_size = comm.Get_size()
682
+
683
+ master_addr = None
684
+ if rank == 0:
685
+ hostname_cmd = ["hostname -I"]
686
+ result = subprocess.check_output(hostname_cmd, shell=True)
687
+ master_addr = result.decode('utf-8').split()[0]
688
+ master_addr = comm.bcast(master_addr, root=0)
689
+
690
+ # Determine local rank by assuming hostnames are unique
691
+ proc_name = MPI.Get_processor_name()
692
+ all_procs = comm.allgather(proc_name)
693
+ local_rank = sum([i == proc_name for i in all_procs[:rank]])
694
+
695
+ os.environ['RANK'] = str(rank)
696
+ os.environ['WORLD_SIZE'] = str(world_size)
697
+ os.environ['LOCAL_RANK'] = str(local_rank)
698
+ os.environ['MASTER_ADDR'] = master_addr
699
+ os.environ['MASTER_PORT'] = str(distributed_port)
700
+
701
+ if verbose:
702
+ utils.logger.info(
703
+ "Discovered MPI settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}".
704
+ format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
705
+ os.environ['MASTER_PORT']))
706
+
707
+ if cdb is not None and cdb.is_initialized():
708
+ assert cdb.get_rank() == rank, "MPI rank {} does not match torch rank {}".format(rank, cdb.get_rank())
709
+ assert cdb.get_world_size() == world_size, "MPI world size {} does not match torch world size {}".format(
710
+ world_size, cdb.get_world_size())
711
+
712
+
713
+ def in_aml():
714
+ # Are we running inside an Azure Machine Learning (AML) environment?
715
+ return 'AZUREML_EXPERIMENT_ID' in os.environ
716
+
717
+
718
+ def in_aws_sm():
719
+ # Are we running inside an AWS SageMaker environment?
720
+ return 'SM_TRAINING_ENV' in os.environ
721
+
722
+
723
+ def in_dlts():
724
+ # Are we running on a DLTS cluster?
725
+ return 'DLTS_JOB_ID' in os.environ
726
+
727
+
728
+ def patch_aml_env_for_torch_nccl_backend(master_port=6105, verbose=True):
729
+ """Helper routine to get and set environment variables.
730
+ This is adapted from Azure ML's documentation available from:
731
+ https://azure.github.io/azureml-web/docs/cheatsheet/distributed-training/#environment-variables-from-openmpi
732
+ """
733
+ os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
734
+ os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
735
+ single_node = int(os.environ["OMPI_COMM_WORLD_LOCAL_SIZE"]) == int(os.environ["WORLD_SIZE"])
736
+
737
+ if not single_node:
738
+ master_node_params = os.environ["AZ_BATCH_MASTER_NODE"].split(":")
739
+ os.environ["MASTER_ADDR"] = master_node_params[0]
740
+ # Do not overwrite master port with that defined in AZ_BATCH_MASTER_NODE
741
+ if "MASTER_PORT" not in os.environ:
742
+ os.environ["MASTER_PORT"] = str(master_port)
743
+ else:
744
+ os.environ["MASTER_ADDR"] = os.environ["AZ_BATCHAI_MPI_MASTER_NODE"]
745
+ os.environ["MASTER_PORT"] = DEFAULT_AML_MASTER_PORT
746
+
747
+ if verbose:
748
+ utils.logger.info("NCCL_SOCKET_IFNAME original value = {}".format(os.environ["NCCL_SOCKET_IFNAME"]))
749
+
750
+ os.environ["NCCL_SOCKET_IFNAME"] = DEFAULT_AML_NCCL_SOCKET_IFNAME
751
+ os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
752
+
753
+ if verbose:
754
+ utils.logger.info(
755
+ "Discovered AzureML settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
756
+ .format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
757
+ os.environ['MASTER_PORT']))
758
+
759
+
760
+ def patch_aws_sm_env_for_torch_nccl_backend(verbose=True):
761
+ """Helper routine to get and set environment variables when running inside an AWS SageMaker environment.
762
+ """
763
+ os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
764
+ os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
765
+ os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
766
+
767
+ if verbose:
768
+ utils.logger.info(
769
+ "Discovered AWS SageMaker settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
770
+ .format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
771
+ os.environ['MASTER_PORT']))
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/constants.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ NCCL_BACKEND = 'nccl'
7
+ CCL_BACKEND = 'ccl'
8
+ MPI_BACKEND = 'mpi'
9
+ GLOO_BACKEND = 'gloo'
10
+ SCCL_BACKEND = 'sccl'
11
+ HCCL_BACKEND = 'hccl'
12
+
13
+ DEFAULT_AML_MASTER_PORT = "54965"
14
+ DEFAULT_AML_NCCL_SOCKET_IFNAME = "^docker0,lo"
15
+
16
+ #########################################
17
+ # Comms Logger
18
+ #########################################
19
+ # Comms Logger. By default, this feature is not enabled.
20
+ # Users can configure in ds_config.json as below example:
21
+ COMMS_LOGGER_FORMAT = '''
22
+ The Comms Logger can be specified as:
23
+ "comms_logger": {
24
+ "enabled": true,
25
+ "verbose": false,
26
+ "prof_all": true,
27
+ "debug": false,
28
+ "prof_ops": ["all_reduce", "custom_all_reduce_name"]
29
+ }
30
+ '''
31
+ COMMS_LOGGER = "comms_logger"
32
+
33
+ # Comms logger enable signal
34
+ COMMS_LOGGER_ENABLED = "enabled"
35
+ COMMS_LOGGER_ENABLED_DEFAULT = False
36
+
37
+ # Comms logger verbose signal
38
+ COMMS_LOGGER_VERBOSE = "verbose"
39
+ COMMS_LOGGER_VERBOSE_DEFAULT = False
40
+
41
+ # comms logger profile all ops signal
42
+ COMMS_LOGGER_PROF_ALL = "prof_all"
43
+ COMMS_LOGGER_PROF_ALL_DEFAULT = True
44
+
45
+ # comms logger show all ops signal
46
+ COMMS_LOGGER_DEBUG = "debug"
47
+ COMMS_LOGGER_DEBUG_DEFAULT = False
48
+
49
+ # comms logger profile specific ops in list
50
+ COMMS_LOGGER_PROF_OPS = "prof_ops"
51
+ COMMS_LOGGER_PROF_OPS_DEFAULT = []
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/torch.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed import utils
7
+
8
+ from .utils import *
9
+ from .backend import *
10
+ from .comm import *
11
+ import os
12
+
13
+ DS_COMM_ALL_GATHER_OFF = False
14
+ DS_COMM_REDUCE_SCATTER_OFF = False
15
+ DS_COMM_BROADCAST_OFF = False
16
+ DS_COMM_ALL_REDUCE_OFF = False
17
+ DS_COMM_REDUCE_OFF = False
18
+
19
+
20
+ def is_torch_ver_eq_2_0():
21
+ TORCH_MAJOR, TORCH_MINOR = map(int, torch.__version__.split('.')[:2])
22
+ if TORCH_MAJOR == 2 and TORCH_MINOR == 0:
23
+ return True
24
+ return False
25
+
26
+
27
+ def is_torch_ver_ge_2_1():
28
+ TORCH_MAJOR, TORCH_MINOR = map(int, torch.__version__.split('.')[:2])
29
+ if TORCH_MAJOR >= 2 and TORCH_MINOR >= 1:
30
+ return True
31
+ return False
32
+
33
+
34
+ def torch_ver_ge_1_13():
35
+ TORCH_MAJOR, TORCH_MINOR = map(int, torch.__version__.split('.')[:2])
36
+ if TORCH_MAJOR >= 1 and TORCH_MINOR >= 13:
37
+ return True
38
+ return False
39
+
40
+
41
+ def has_coalescing_manager():
42
+ has_c10d = hasattr(torch.distributed, 'distributed_c10d')
43
+ return has_c10d and hasattr(torch.distributed.distributed_c10d, '_coalescing_manager')
44
+
45
+
46
+ def has_all_reduce_coalesced():
47
+ return hasattr(torch.distributed, "all_reduce_coalesced") and torch_ver_ge_1_13()
48
+
49
+
50
+ def get_coalescing_manager(group, device, reqs, async_op):
51
+ if is_torch_ver_eq_2_0():
52
+ return torch.distributed.distributed_c10d._coalescing_manager(group, device=device, reqs=reqs)
53
+ elif is_torch_ver_ge_2_1():
54
+ return torch.distributed.distributed_c10d._coalescing_manager(group, device=device, async_ops=async_op)
55
+ else:
56
+ return torch.distributed.distributed_c10d._coalescing_manager(group, reqs)
57
+
58
+
59
+ ##Utilities to turn comm off
60
+ ##TODO: move to base comm (wrapper)
61
+ def all_gather_comm_off(flag=False):
62
+ global DS_COMM_ALL_GATHER_OFF
63
+ DS_COMM_ALL_GATHER_OFF = flag
64
+
65
+
66
+ def reduce_scatter_comm_off(flag=False):
67
+ global DS_COMM_REDUCE_SCATTER_OFF
68
+ DS_COMM_REDUCE_SCATTER_OFF = flag
69
+
70
+
71
+ def broadcast_comm_off(flag=False):
72
+ global DS_COMM_BROADCAST_OFF
73
+ DS_COMM_BROADCAST_OFF = flag
74
+
75
+
76
+ def all_reduce_comm_off(flag=False):
77
+ global DS_COMM_ALL_REDUCE_OFF
78
+ DS_COMM_ALL_REDUCE_OFF = flag
79
+
80
+
81
+ def reduce_comm_off(flag=False):
82
+ global DS_COMM_REDUCE_OFF
83
+ DS_COMM_REDUCE_OFF = flag
84
+
85
+
86
+ #assumption: all_gather and reduce scatter
87
+ ## are what we care about
88
+ def backward_comm_off(flag=False):
89
+ all_gather_comm_off(flag)
90
+ reduce_scatter_comm_off(flag)
91
+
92
+
93
+ class Noop:
94
+
95
+ def wait(self):
96
+ return None
97
+
98
+
99
+ class TorchBackend(Backend):
100
+ """
101
+ A light-weight wrapper class for torch.distributed API.
102
+ Only a subset of functions are wrapped. Once the init_process_group
103
+ is initialized, standard torch.distributed.* can be used directly
104
+ so no need to wrap all the functions. We can keep adding wrappers as
105
+ needed.
106
+ """
107
+
108
+ def __init__(self, backend, timeout, init_method, rank=-1, world_size=-1, name='torch'):
109
+ super(TorchBackend, self).__init__()
110
+ self.has_all_reduce_coalesced = has_all_reduce_coalesced()
111
+ self.has_coalescing_manager = has_coalescing_manager()
112
+ self.all_gather_function = self.get_all_gather_function()
113
+ self.reduce_scatter_function = self.get_reduce_scatter_function()
114
+ self.initialized = True
115
+ self.name = name
116
+ # Future functionality to support ds.initialize() on a single GPU
117
+ # The idea is to fake that dist backend is initialized even when
118
+ # it is not so we can run on a single GPU without doing any init_process_group
119
+ self.single_gpu_mode = True
120
+ self.init_process_group(backend, timeout, init_method, rank, world_size)
121
+
122
+ @classmethod
123
+ def get_all_gather_function(self):
124
+ if hasattr(torch.distributed, "all_gather_into_tensor"):
125
+ return torch.distributed.all_gather_into_tensor
126
+ elif hasattr(torch.distributed, "_all_gather_base"):
127
+ return torch.distributed._all_gather_base
128
+ return None
129
+
130
+ @classmethod
131
+ def get_reduce_scatter_function(self):
132
+ if hasattr(torch.distributed, "reduce_scatter_tensor"):
133
+ return torch.distributed.reduce_scatter_tensor
134
+ elif hasattr(torch.distributed, "_reduce_scatter_base"):
135
+ return torch.distributed._reduce_scatter_base
136
+ return None
137
+
138
+ def has_all_gather_into_tensor(self):
139
+ return self.all_gather_function is not None
140
+
141
+ def has_reduce_scatter_tensor(self):
142
+ return self.reduce_scatter_function is not None
143
+
144
+ def init_process_group(self, backend, timeout, init_method, rank, world_size):
145
+ if not torch.distributed.is_initialized():
146
+ torch.distributed.init_process_group(backend,
147
+ timeout=timeout,
148
+ init_method=init_method,
149
+ rank=rank,
150
+ world_size=world_size)
151
+ self.using_mpi = torch.distributed.get_backend() == 'mpi'
152
+
153
+ def all_reduce(self, tensor, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False):
154
+ op = self._reduce_op(op)
155
+ return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=async_op)
156
+
157
+ def inference_all_reduce(self, tensor, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False):
158
+ op = self._reduce_op(op)
159
+ return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=async_op)
160
+
161
+ def all_reduce_coalesced(self, tensors, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False):
162
+ """ proxy func to torch.distributed.all_reduce_coalesced,
163
+ which is included in PyTorch 1.13 and above
164
+ """
165
+ if not self.has_all_reduce_coalesced:
166
+ raise RuntimeError(f"Current torch version does not have all_reduce_coalesced "
167
+ f"api (torch.__version__: {torch.__version__})")
168
+ op = self._reduce_op(op)
169
+ return torch.distributed.all_reduce_coalesced(tensors=tensors, op=op, group=group, async_op=async_op)
170
+
171
+ def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False):
172
+ if DS_COMM_REDUCE_OFF:
173
+ if int(os.getenv('RANK', '0')) == 0:
174
+ utils.logger.warning("REDUCE is OFF")
175
+ return Noop()
176
+ return torch.distributed.reduce(tensor=tensor, dst=dst, op=self._reduce_op(op), group=group, async_op=async_op)
177
+
178
+ def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_op=False):
179
+ if DS_COMM_REDUCE_SCATTER_OFF:
180
+ if int(os.getenv('RANK', '0')) == 0:
181
+ utils.logger.warning("REDUCE SCATTER is OFF")
182
+ return Noop()
183
+ else:
184
+ return torch.distributed.reduce_scatter(output=output,
185
+ input_list=input_list,
186
+ op=self._reduce_op(op),
187
+ group=group,
188
+ async_op=async_op)
189
+
190
+ def broadcast(self, tensor, src, group=None, async_op=False):
191
+ if DS_COMM_BROADCAST_OFF:
192
+ if int(os.getenv('RANK', '0')) == 0:
193
+ utils.logger.warning("BROADCAST is OFF")
194
+ return Noop()
195
+ else:
196
+ return torch.distributed.broadcast(tensor=tensor, src=src, group=group, async_op=async_op)
197
+
198
+ def all_gather(self, tensor_list, tensor, group=None, async_op=False):
199
+ if DS_COMM_ALL_GATHER_OFF:
200
+ if int(os.getenv('RANK', '0')) == 0:
201
+ utils.logger.warning("All Gather is OFF")
202
+ return Noop()
203
+ else:
204
+ return torch.distributed.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op)
205
+
206
+ def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_op=False):
207
+ if self.has_all_gather_into_tensor():
208
+ return self.all_gather_function(output_tensor=output_tensor,
209
+ input_tensor=input_tensor,
210
+ group=group,
211
+ async_op=async_op)
212
+
213
+ def all_gather_base(self, output_tensor, input_tensor, group=None, async_op=False):
214
+ if DS_COMM_ALL_GATHER_OFF:
215
+ if int(os.getenv('RANK', '0')) == 0:
216
+ utils.logger.warning("All Gather is OFF")
217
+ return Noop()
218
+ else:
219
+ if self.has_allgather_base:
220
+ return torch.distributed.distributed_c10d._all_gather_base(output_tensor=output_tensor,
221
+ input_tensor=input_tensor,
222
+ group=group,
223
+ async_op=async_op)
224
+ else:
225
+ utils.logger.warning("unable to find torch.distributed._all_gather_base. will fall back to "
226
+ "torch.distributed.reduce_scatter which will result in suboptimal performance. "
227
+ "please consider upgrading your pytorch installation.")
228
+ pass
229
+
230
+ def all_gather_coalesced(self, output_tensors, input_tensors, group=None, async_op=False):
231
+ """"""
232
+ assert len(output_tensors) == len(input_tensors), ""
233
+ if hasattr(torch.distributed.distributed_c10d, '_all_gather_base_coalesced'):
234
+ # customized PyTorch
235
+ return torch.distributed.distributed_c10d._all_gather_base_coalesced(output_tensors,
236
+ input_tensors,
237
+ group=group,
238
+ async_op=async_op)
239
+ elif has_coalescing_manager():
240
+ reqs = []
241
+ with get_coalescing_manager(group, input_tensors[0].device, reqs, async_op):
242
+ for output, input in zip(output_tensors, input_tensors):
243
+ handle = torch.distributed.distributed_c10d.all_gather_into_tensor(output,
244
+ input,
245
+ group=group,
246
+ async_op=True)
247
+ reqs.append(handle)
248
+ if async_op:
249
+ return reqs[-1]
250
+ else:
251
+ reqs[-1].wait()
252
+
253
+ def reduce_scatter_tensor(self, output_tensor, input_tensor, op=ReduceOp.SUM, group=None, async_op=False):
254
+ if self.has_reduce_scatter_tensor():
255
+ return self.reduce_scatter_function(output_tensor,
256
+ input_tensor,
257
+ op=self._reduce_op(op),
258
+ group=group,
259
+ async_op=async_op)
260
+ else:
261
+ utils.logger.warning("unable to find torch.distributed.reduce_scatter_tensor. will fall back to "
262
+ "torch.distributed.reduce_scatter which will result in suboptimal performance. "
263
+ "please consider upgrading your pytorch installation.")
264
+ pass
265
+
266
+ def all_to_all_single(self,
267
+ output,
268
+ input,
269
+ output_split_sizes=None,
270
+ input_split_sizes=None,
271
+ group=None,
272
+ async_op=False):
273
+ return torch.distributed.all_to_all_single(output=output,
274
+ input=input,
275
+ output_split_sizes=output_split_sizes,
276
+ input_split_sizes=input_split_sizes,
277
+ group=group,
278
+ async_op=async_op)
279
+
280
+ def all_to_all(self, output_tensor_list, input_tensor_list, group=None, async_op=False):
281
+ return torch.distributed.all_to_all(output_tensor_list, input_tensor_list, group=group, async_op=async_op)
282
+
283
+ def send(self, tensor, dst, group=None, tag=0):
284
+ return torch.distributed.send(tensor=tensor, dst=dst, group=group, tag=tag)
285
+
286
+ def recv(self, tensor, src=None, group=None, tag=0):
287
+ return torch.distributed.recv(tensor=tensor, src=src, group=group, tag=tag)
288
+
289
+ def isend(self, tensor, dst, group=None, tag=0):
290
+ return torch.distributed.isend(tensor=tensor, dst=dst, group=group, tag=tag)
291
+
292
+ def irecv(self, tensor, src=None, group=None, tag=0):
293
+ return torch.distributed.irecv(tensor=tensor, src=src, group=group, tag=tag)
294
+
295
+ def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False):
296
+ return torch.distributed.gather(tensor=tensor,
297
+ gather_list=gather_list,
298
+ dst=dst,
299
+ group=group,
300
+ async_op=async_op)
301
+
302
+ def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False):
303
+ return torch.distributed.scatter(tensor=tensor,
304
+ scatter_list=scatter_list,
305
+ src=src,
306
+ group=group,
307
+ async_op=async_op)
308
+
309
+ def barrier(self, group=torch.distributed.GroupMember.WORLD, async_op=False, device_ids=None):
310
+ if group is None:
311
+ group = torch.distributed.GroupMember.WORLD
312
+ return torch.distributed.barrier(group=group, async_op=async_op, device_ids=device_ids)
313
+
314
+ def monitored_barrier(self, group=torch.distributed.GroupMember.WORLD, timeout=None, wait_all_ranks=False):
315
+ if group is None:
316
+ group = torch.distributed.GroupMember.WORLD
317
+ return torch.distributed.monitored_barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks)
318
+
319
+ def get_rank(self, group=None):
320
+ return torch.distributed.get_rank(group=group)
321
+
322
+ def get_world_size(self, group=None):
323
+ return torch.distributed.get_world_size(group=group)
324
+
325
+ def is_initialized(self):
326
+ return torch.distributed.is_initialized()
327
+
328
+ def get_backend(self, group=None):
329
+ return torch.distributed.get_backend(group=group)
330
+
331
+ def new_group(self, ranks):
332
+ return torch.distributed.new_group(ranks)
333
+
334
+ def get_global_rank(self, group, group_rank):
335
+ if hasattr(torch.distributed.distributed_c10d, "get_global_rank"):
336
+ from torch.distributed.distributed_c10d import get_global_rank as _get_global_rank
337
+ else:
338
+ from torch.distributed.distributed_c10d import _get_global_rank
339
+ return _get_global_rank(group, group_rank)
340
+
341
+ def get_world_group(self):
342
+ return torch.distributed.group.WORLD
343
+
344
+ def destroy_process_group(self, group=None):
345
+ return torch.distributed.destroy_process_group(group=group)
346
+
347
+ def _reduce_op(self, op):
348
+ '''
349
+ Helper function. If the op provided is not a torch.dist.ReduceOp, convert it and return
350
+ '''
351
+ if not isinstance(op, torch.distributed.ReduceOp):
352
+ if op == ReduceOp.SUM:
353
+ op = torch.distributed.ReduceOp.SUM
354
+ elif op == ReduceOp.PRODUCT:
355
+ op = torch.distributed.ReduceOp.PRODUCT
356
+ elif op == ReduceOp.AVG:
357
+ op = torch.distributed.ReduceOp.AVG
358
+ elif op == ReduceOp.MIN:
359
+ op = torch.distributed.ReduceOp.MIN
360
+ elif op == ReduceOp.MAX:
361
+ op = torch.distributed.ReduceOp.MAX
362
+ elif op == ReduceOp.BAND:
363
+ op = torch.distributed.ReduceOp.BAND
364
+ elif op == ReduceOp.BOR:
365
+ op = torch.distributed.ReduceOp.BOR
366
+ elif op == ReduceOp.BXOR:
367
+ op = torch.distributed.ReduceOp.BXOR
368
+ return op
369
+
370
+
371
+ # This will become a light-weight wrapper around torch.distributed functions
372
+ # TODO: create some example to show how this wrapper can help profile communication
373
+ # TODO: make sure there is no performance regression with this approach
374
+ # TODO: explore monkey-patching if this does not work
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/comm/utils.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import inspect
8
+ from deepspeed.utils import get_caller_func
9
+
10
+
11
+ def get_local_rank_from_launcher():
12
+
13
+ # DeepSpeed launcher will set it so get from there
14
+ rank = os.environ.get('LOCAL_RANK')
15
+
16
+ if rank is None:
17
+ rank = os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK')
18
+
19
+ # Make it a single process job and set rank to 0
20
+ if rank is None:
21
+ rank = 0
22
+
23
+ return int(rank)
24
+
25
+
26
+ def get_world_rank_from_launcher():
27
+
28
+ # DeepSpeed launcher will set it so get from there
29
+ rank = os.environ.get('RANK')
30
+
31
+ if rank is None:
32
+ rank = os.environ.get('OMPI_COMM_WORLD_RANK')
33
+
34
+ # Make it a single process job and set rank to 0
35
+ if rank is None:
36
+ rank = 0
37
+
38
+ return int(rank)
39
+
40
+
41
+ def get_world_size_from_launcher():
42
+ # DeepSpeed launcher will set it so get from there
43
+ size = os.environ.get('WORLD_SIZE')
44
+ rank = os.environ.get('RANK')
45
+
46
+ if size is None:
47
+ size = os.environ.get('OMPI_COMM_WORLD_SIZE')
48
+
49
+ # Make it a single process job and set size to 1
50
+ if size is None:
51
+ size = 1
52
+
53
+ if rank == 0:
54
+ print(f"set world size to {size}")
55
+
56
+ return int(size)
57
+
58
+
59
+ def get_default_args(func):
60
+ signature = inspect.signature(func)
61
+ return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
62
+
63
+
64
+ # We need this hacky function since torch doesn't consistently name or place the input tensor args
65
+ def get_tensor_position(func):
66
+ sig_params = inspect.signature(func).parameters
67
+ arg = None
68
+ # most colls
69
+ if 'tensor' in sig_params:
70
+ arg = 'tensor'
71
+ # all_reduce_coalesced coll
72
+ elif 'tensors' in sig_params:
73
+ arg = 'tensors'
74
+ # reduce scatter coll
75
+ elif 'input_list' in sig_params:
76
+ arg = 'input_list'
77
+ # all_to_all and torch multiGPU colls
78
+ elif 'input_tensor_list' in sig_params:
79
+ arg = 'input_tensor_list'
80
+ if arg is None:
81
+ return -1
82
+ else:
83
+ return list(sig_params).index(arg)
84
+
85
+
86
+ def get_tensor_kwarg(func, kwargs):
87
+ func_args = get_default_args(func)
88
+ func_args.update(kwargs)
89
+ arg = None
90
+
91
+ if 'tensor' in func_args:
92
+ arg = func_args['tensor']
93
+ elif 'tensors' in func_args:
94
+ arg = func_args['tensors']
95
+ elif 'input_list' in func_args:
96
+ arg = func_args['input_list']
97
+ elif 'input_tensor_list' in func_args:
98
+ arg = func_args['input_tensor_list']
99
+ return arg
100
+
101
+
102
+ def get_msg_size_from_args(func, *args, **kwargs):
103
+ # 3 cases:
104
+ # - tensor arg is in args
105
+ # - tensor arg is in kwargs
106
+ # - tensor arg is not present (e.g. barrier)
107
+ tensor_arg_position = -1
108
+ tensor_arg = None
109
+ # check if tensor arg is in args
110
+ if len(args) > 0:
111
+ tensor_arg_position = get_tensor_position(func)
112
+ if tensor_arg_position > -1:
113
+ tensor_arg = args[get_tensor_position(func)]
114
+ # check if tensor arg is in kwargs
115
+ if tensor_arg is None and len(kwargs) > 0:
116
+ tensor_arg = get_tensor_kwarg(func, kwargs)
117
+ # if tensor arg is not present, no data is being transmitted
118
+ if tensor_arg is None:
119
+ return 0
120
+ else:
121
+ # Sum of tensor sizes for list colls such as torch's all_to_all
122
+ # NOTE: msg_size for list colls will not be the actual size transmitted by a given MPI/NCCL call within the coll op. Instead, it's the total amount of data transmitted.
123
+ if type(tensor_arg) is list:
124
+ return sum(x.element_size() * x.nelement() for x in tensor_arg)
125
+ else:
126
+ return tensor_arg.element_size() * tensor_arg.nelement()
127
+
128
+
129
+ def get_debug_log_name(func_args, debug):
130
+ if debug:
131
+ return func_args['log_name'] + ' | [Caller Func: ' + get_caller_func() + ']'
132
+ else:
133
+ return func_args['log_name']
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .compress import init_compression, redundancy_clean
7
+ from .scheduler import compression_scheduler
8
+ from .helper import convert_conv1d_to_linear
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (369 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/basic_layer.cpython-310.pyc ADDED
Binary file (22.8 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/compress.cpython-310.pyc ADDED
Binary file (6.6 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/config.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/constants.cpython-310.pyc ADDED
Binary file (5.02 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/helper.cpython-310.pyc ADDED
Binary file (9.92 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/scheduler.cpython-310.pyc ADDED
Binary file (4.63 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.08 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/basic_layer.py ADDED
@@ -0,0 +1,840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ import math
8
+ from torch import nn
9
+ from torch.nn import init
10
+ import deepspeed.comm as dist
11
+ from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer
12
+ from deepspeed.utils import logger
13
+
14
+ g_mpu = None
15
+
16
+
17
+ class QuantAct(nn.Module):
18
+ """
19
+ Class to quantize given activations. Note that when using this function, the input activation quantization range will be fixed for all
20
+ tokens/images for inference. This generally will affect some accuracy but achieve better latency performance.
21
+ Parameters:
22
+ ----------
23
+ act_range_momentum : float, default 0.95
24
+ Momentum for updating the activation quantization range.
25
+ quant_mode : str, default 'symmetric'
26
+ """
27
+
28
+ def __init__(self, act_range_momentum=0.95, quant_mode='symmetric'):
29
+ super(QuantAct, self).__init__()
30
+
31
+ self.act_range_momentum = act_range_momentum
32
+ self.quant_mode = quant_mode
33
+ if quant_mode == 'symmetric':
34
+ self.act_function = SymQuantizer.apply
35
+ else:
36
+ self.act_function = AsymQuantizer.apply
37
+
38
+ self.register_buffer('x_min_max', torch.zeros(2))
39
+
40
+ def forward(self, x, num_bits, *args):
41
+ """
42
+ x: the activation that we need to quantize
43
+ num_bits: the number of bits we need to quantize the activation to
44
+ *args: some extra arguments that are useless but needed for align with the interface of other quantization functions
45
+ """
46
+
47
+ if self.training:
48
+ x_min = x.data.min()
49
+ x_max = x.data.max()
50
+
51
+ # Initialization
52
+ if self.x_min_max[0] == self.x_min_max[1]:
53
+ self.x_min_max[0] = x_min
54
+ self.x_min_max[1] = x_max
55
+
56
+ # if do not need momentum, please set self.act_range_momentum = 0
57
+ self.x_min_max[0] = self.x_min_max[0] * self.act_range_momentum + x_min * (1 - self.act_range_momentum)
58
+ self.x_min_max[1] = self.x_min_max[1] * self.act_range_momentum + x_max * (1 - self.act_range_momentum)
59
+
60
+ x_q = self.act_function(x, num_bits, self.x_min_max[0], self.x_min_max[1])
61
+
62
+ return x_q
63
+
64
+
65
+ class Embedding_Compress(nn.Embedding):
66
+
67
+ def __init__(self, *kargs):
68
+ super(Embedding_Compress, self).__init__(*kargs)
69
+ self.weight.start_bits = None
70
+ self.weight.target_bits = None
71
+ self.weight.q_period = None
72
+ self.weight_quantization_enabled_in_forward = False
73
+ self.weight_quantization_enabled = False
74
+
75
+ def extra_repr(self):
76
+ return 'num_embeddings={}, embedding_dim={}, weight_quantization={}'.format(
77
+ self.num_embeddings, self.embedding_dim, self.weight.target_bits)
78
+
79
+ def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
80
+ weight_quantization_enabled_in_forward, quantization_type, num_groups):
81
+ self.weight.start_bits = start_bits
82
+ self.weight.target_bits = target_bits
83
+ self.weight.q_period = quantization_period
84
+ self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
85
+ if self.weight_quantization_enabled_in_forward:
86
+ logger.warning(
87
+ "************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
88
+ )
89
+ if self.weight.target_bits >= 3:
90
+ if quantization_type == 'symmetric':
91
+ self.weight_quantizer = SymQuantizer.apply
92
+ else:
93
+ self.weight_quantizer = AsymQuantizer.apply
94
+ elif self.weight.target_bits == 2:
95
+ assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization'
96
+ self.weight_quantizer = TernaryQuantizer.apply
97
+ elif self.weight.target_bits == 1:
98
+ assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization'
99
+ self.weight_quantizer = BinaryQuantizer.apply
100
+ # for embedding, we always use token-wise quantization
101
+ self.weight_quantize_num_groups = self.weight.size(0)
102
+
103
+ def fix_weight_quantization(self):
104
+ self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
105
+ self.weight_quantize_num_groups).data
106
+ self.weight_quantization_enabled_in_forward = False
107
+ return None
108
+
109
+ def forward(self, input):
110
+ if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
111
+ weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
112
+ self.weight_quantize_num_groups)
113
+ else:
114
+ weight = self.weight
115
+
116
+ out = nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type,
117
+ self.scale_grad_by_freq, self.sparse)
118
+ return out
119
+
120
+
121
+ class LinearLayer_Compress(nn.Linear):
122
+ """
123
+ Linear layer with compression.
124
+ """
125
+
126
+ def __init__(self, *kargs, bias=True):
127
+ super(LinearLayer_Compress, self).__init__(*kargs, bias=bias)
128
+ self.sparse_pruning_method = None
129
+ self.row_pruning_method = None
130
+ self.head_pruning_method = None
131
+ self.activation_quantization_method = None
132
+ self.weight.start_bits = None
133
+ self.weight.target_bits = None
134
+ self.weight.q_period = None
135
+ self.weight_quantization_enabled_in_forward = False
136
+ self.weight_quantization_enabled = False
137
+ self.sparse_pruning_enabled = False
138
+ self.row_pruning_enabled = False
139
+ self.head_pruning_enabled = False
140
+ self.activation_quantization_enabled = False
141
+
142
+ def extra_repr(self):
143
+ return 'in_features={}, out_features={}, bias={}, sparse pruning={}, row pruning={}, head pruning={}, activation quantization={}, weight_quantization={}'.format(
144
+ self.in_features, self.out_features, self.bias is not None, self.sparse_pruning_method is not None, \
145
+ self.row_pruning_method is not None, self.head_pruning_method is not None, self.activation_quantization_method is not None, self.weight.target_bits)
146
+
147
+ def enable_sparse_pruning(self, ratio, method):
148
+ # Here, we support two cases: L1 norm based pruning and topk based pruning
149
+ self.sparse_pruning_ratio = ratio
150
+ self.sparse_pruning_method = method
151
+ if method == 'l1':
152
+ weight_norm = torch.abs(self.weight.data)
153
+ mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False)
154
+ mask = mask.view(self.weight.size())
155
+ mask = mask.to(self.weight.device)
156
+ elif method == 'topk':
157
+ self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
158
+ self.sparse_mask_scores.data = self.sparse_mask_scores.data.to(self.weight.device)
159
+ init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5))
160
+ mask = None
161
+ else:
162
+ raise NotImplementedError
163
+
164
+ self.register_buffer('sparse_pruning_mask', mask)
165
+
166
+ def enable_row_pruning(self, ratio, method):
167
+ # Here, we support two cases: L1 norm based pruning and topk based pruning
168
+ self.row_pruning_ratio = ratio
169
+ self.row_pruning_method = method
170
+
171
+ if method == 'l1':
172
+ # compute the l1 norm of each column
173
+ weight_norm = torch.linalg.norm(self.weight.data, ord=1, dim=1)
174
+ mask = TopKBinarizer.apply(weight_norm, self.row_pruning_ratio, False)
175
+ mask = mask.view(-1, 1)
176
+ mask = mask.to(self.weight.device)
177
+ elif method == 'topk':
178
+ self.row_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1))
179
+ self.row_mask_scores.data = self.row_mask_scores.data.to(self.weight.device)
180
+ init.kaiming_uniform_(self.row_mask_scores, a=math.sqrt(5))
181
+ mask = None
182
+ else:
183
+ raise NotImplementedError
184
+
185
+ self.register_buffer('row_pruning_mask', mask)
186
+
187
+ def enable_head_pruning(self, ratio, method, num_heads):
188
+ # Here, we support only topk based pruning
189
+ self.num_heads = num_heads
190
+ self.head_pruning_ratio = ratio
191
+ self.head_pruning_method = method
192
+
193
+ if method not in ['topk']:
194
+ raise NotImplementedError
195
+ else:
196
+ self.head_pruning_ratio = ratio
197
+ self.head_pruning_scores = nn.Parameter(torch.Tensor(1,
198
+ self.num_heads)) # we apply the pruning to O matrix
199
+ self.head_pruning_scores.data = self.head_pruning_scores.data.to(self.weight.device)
200
+ init.kaiming_uniform_(self.head_pruning_scores, a=math.sqrt(5))
201
+
202
+ def fix_sparse_pruning_helper(self):
203
+ mask = self.get_mask(pruning_type='sparse')
204
+ self.weight.data = self.weight.data * mask
205
+ del self.sparse_pruning_mask
206
+ if self.sparse_pruning_method == 'topk':
207
+ del self.sparse_mask_scores
208
+ self.sparse_pruning_method = None
209
+ self.sparse_pruning_enabled = False
210
+ return None
211
+
212
+ def fix_row_col_pruning_helper(self, mask=None, dim_reduction=False):
213
+ # This function is used for row/col pruning
214
+ # particularly, if we have two back-to-back layers, F1 and F2; when
215
+ # we remove rows from F1, we also need to remove columns from F2
216
+ # However, if we only have one layer, F1, then we only need to mask pruned
217
+ # rows as 0 in F1
218
+ if mask is None:
219
+ mask = self.get_mask(pruning_type='row').bool()
220
+ if dim_reduction:
221
+ start_bits = self.weight.start_bits
222
+ target_bits = self.weight.target_bits
223
+ q_period = self.weight.q_period
224
+ self.weight = nn.Parameter(self.weight.data[mask.view(-1), :])
225
+ self.weight.start_bits = start_bits
226
+ self.weight.target_bits = target_bits
227
+ self.weight.q_period = q_period
228
+ if self.bias is not None:
229
+ self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
230
+ self.out_features = self.weight.size(0)
231
+ else:
232
+ self.weight.data = self.weight.data * mask.view(-1, 1)
233
+ if self.bias is not None:
234
+ self.bias.data = self.bias.data * mask.view(-1)
235
+
236
+ del self.row_pruning_mask
237
+ if self.row_pruning_method == 'topk':
238
+ del self.row_mask_scores
239
+ self.row_pruning_method = None
240
+ else:
241
+ # this is generally for column pruning
242
+ start_bits = self.weight.start_bits
243
+ target_bits = self.weight.target_bits
244
+ q_period = self.weight.q_period
245
+ self.weight = nn.Parameter(self.weight.data[:, mask.view(-1)])
246
+ self.weight.start_bits = start_bits
247
+ self.weight.target_bits = target_bits
248
+ self.weight.q_period = q_period
249
+ self.in_features = self.weight.size(1)
250
+ mask = None
251
+ self.row_pruning_enabled = False
252
+ return mask
253
+
254
+ def fix_head_pruning_helper(self, mask=None, num_heads=None, dim_reduction=False):
255
+ # similar as row/col pruning, head pruning also needs to prune QKV which is associated with O matrix
256
+ num_heads = num_heads if num_heads else self.num_heads
257
+ if mask is None:
258
+ if self.head_pruning_method == 'topk':
259
+ mask = self.get_mask(pruning_type='head').bool()
260
+ if dim_reduction:
261
+ shape = self.weight.size(0)
262
+ start_bits = self.weight.start_bits
263
+ target_bits = self.weight.target_bits
264
+ q_period = self.weight.q_period
265
+ self.weight = nn.Parameter(self.weight.data.t().reshape(num_heads,
266
+ -1)[mask.view(-1), :].reshape(-1,
267
+ shape).t())
268
+ self.weight.start_bits = start_bits
269
+ self.weight.target_bits = target_bits
270
+ self.weight.q_period = q_period
271
+ else:
272
+
273
+ shape = self.weight.size()
274
+ self.weight.data = (self.weight.data.t().reshape(self.num_heads, -1) * mask.view(-1, 1)).reshape(
275
+ shape[1], shape[0]).t()
276
+
277
+ if self.head_pruning_method == 'topk':
278
+ del self.head_pruning_scores
279
+ self.head_pruning_method = None
280
+ else:
281
+ raise NotImplementedError
282
+ else:
283
+ start_bits = self.weight.start_bits
284
+ target_bits = self.weight.target_bits
285
+ q_period = self.weight.q_period
286
+ shape = self.weight.size(1)
287
+ self.weight = nn.Parameter(self.weight.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1, shape))
288
+ self.weight.start_bits = start_bits
289
+ self.weight.target_bits = target_bits
290
+ self.weight.q_period = q_period
291
+ if self.bias is not None:
292
+ self.bias = nn.Parameter(self.bias.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1))
293
+ self.head_pruning_enabled = False
294
+ return mask
295
+
296
+ def get_mask(self, pruning_type='row'):
297
+ if pruning_type == 'sparse':
298
+ if self.sparse_pruning_method == 'l1':
299
+ return self.sparse_pruning_mask.to(self.weight.device)
300
+ elif self.sparse_pruning_method == 'topk':
301
+ return TopKBinarizer.apply(self.sparse_mask_scores, self.sparse_pruning_ratio, False)
302
+ else:
303
+ raise NotImplementedError
304
+ if pruning_type == 'row':
305
+ if self.row_pruning_method == 'l1':
306
+ return self.row_pruning_mask.to(self.weight.device)
307
+ elif self.row_pruning_method == 'topk':
308
+ return TopKBinarizer.apply(self.row_mask_scores, self.row_pruning_ratio, False)
309
+ else:
310
+ raise NotImplementedError
311
+ elif pruning_type == 'head':
312
+ if self.head_pruning_method == 'topk':
313
+ return TopKBinarizer.apply(self.head_pruning_scores, self.head_pruning_ratio, False)
314
+ else:
315
+ raise NotImplementedError
316
+ else:
317
+ raise NotImplementedError
318
+
319
+ def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
320
+ weight_quantization_enabled_in_forward, quantization_type, num_groups):
321
+ self.weight.start_bits = start_bits
322
+ self.weight.target_bits = target_bits
323
+ self.weight.q_period = quantization_period
324
+ self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
325
+ if self.weight_quantization_enabled_in_forward:
326
+ logger.warning(
327
+ "************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
328
+ )
329
+ if self.weight.target_bits >= 3:
330
+ if quantization_type == 'symmetric':
331
+ self.weight_quantizer = SymQuantizer.apply
332
+ else:
333
+ self.weight_quantizer = AsymQuantizer.apply
334
+ elif self.weight.target_bits == 2:
335
+ assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization'
336
+ self.weight_quantizer = TernaryQuantizer.apply
337
+ elif self.weight.target_bits == 1:
338
+ assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization'
339
+ self.weight_quantizer = BinaryQuantizer.apply
340
+ self.weight_quantize_num_groups = num_groups
341
+
342
+ def fix_weight_quantization(self):
343
+ self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
344
+ self.weight_quantize_num_groups).data
345
+ self.weight_quantization_enabled_in_forward = False
346
+ return None
347
+
348
+ def enable_activation_quantization(self, bits, quantization_type, range_calibration):
349
+ assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now'
350
+ self.activation_quantization_bits = bits
351
+ self.activation_quantization_method = f"{quantization_type}_{range_calibration}"
352
+ if range_calibration == 'static':
353
+ self.activation_quantizer = QuantAct(quant_mode=quantization_type)
354
+ else:
355
+ if quantization_type == 'symmetric':
356
+ self.activation_quantizer = SymQuantizer.apply
357
+ else:
358
+ self.activation_quantizer = AsymQuantizer.apply
359
+
360
+ def head_pruning_reshape(self, w, mask):
361
+ shape = w.shape
362
+ return (w.t().reshape(self.num_heads, -1) * mask.view(-1, 1)).reshape(shape[1], shape[0]).t()
363
+
364
+ def forward(self, input, skip_bias_add=False):
365
+
366
+ if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
367
+ weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
368
+ self.weight_quantize_num_groups)
369
+ bias = self.bias
370
+ else:
371
+ weight = self.weight
372
+ bias = self.bias
373
+
374
+ if self.sparse_pruning_enabled and self.sparse_pruning_method:
375
+ mask = self.get_mask(pruning_type='sparse')
376
+ weight = weight * mask.view(self.weight.size())
377
+
378
+ if self.row_pruning_enabled and self.row_pruning_method:
379
+ mask = self.get_mask(pruning_type='row')
380
+ weight = weight * mask.view(-1, 1)
381
+ if bias is not None:
382
+ bias = bias * mask.view(-1)
383
+
384
+ if self.head_pruning_enabled and self.head_pruning_method:
385
+ mask = self.get_mask(pruning_type='head')
386
+ weight = self.head_pruning_reshape(weight, mask)
387
+
388
+ if self.activation_quantization_enabled:
389
+ if 'dynamic' in self.activation_quantization_method:
390
+ num_groups = input.numel() // input.size(-1)
391
+ else:
392
+ num_groups = 1
393
+ input = self.activation_quantizer(input, self.activation_quantization_bits, None, None, num_groups)
394
+
395
+ if skip_bias_add:
396
+ # used for mpu linear layers
397
+ output = nn.functional.linear(input, weight, None)
398
+ return output, bias
399
+ else:
400
+ output = nn.functional.linear(input, weight, bias)
401
+ return output
402
+
403
+
404
+ class Conv2dLayer_Compress(nn.Conv2d):
405
+ """
406
+ Conv2D layer with compression.
407
+ """
408
+
409
+ def __init__(self, *kargs):
410
+ super(Conv2dLayer_Compress, self).__init__(*kargs)
411
+ self.sparse_pruning_method = None
412
+ self.channel_pruning_method = None
413
+ self.activation_quantization_method = None
414
+ self.weight.start_bits = None
415
+ self.weight.target_bits = None
416
+ self.weight.q_period = None
417
+ self.weight_quantization_enabled_in_forward = False
418
+ self.sparse_pruning_enabled = False
419
+ self.channel_pruning_enabled = False
420
+ self.activation_quantization_enabled = False
421
+
422
+ def __repr__(self):
423
+ s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
424
+ ', stride={stride}')
425
+ if self.padding != (0, ) * len(self.padding):
426
+ s += ', padding={padding}'
427
+ if self.dilation != (1, ) * len(self.dilation):
428
+ s += ', dilation={dilation}'
429
+ if self.output_padding != (0, ) * len(self.output_padding):
430
+ s += ', output_padding={output_padding}'
431
+ if self.groups != 1:
432
+ s += ', groups={groups}'
433
+ if self.bias is None:
434
+ s += ', bias=False'
435
+ if self.padding_mode != 'zeros':
436
+ s += ', padding_mode={padding_mode}'
437
+ output = s.format(**self.__dict__)
438
+
439
+ return output + ' sparse pruning={}, channel pruning={}, activation quantization={}, weight_quantization={}'.format(
440
+ self.sparse_pruning_method is not None, self.channel_pruning_method is not None,
441
+ self.activation_quantization_method is not None, self.weight.target_bits)
442
+
443
+ def enable_sparse_pruning(self, ratio, method):
444
+ self.sparse_pruning_ratio = ratio
445
+ self.sparse_pruning_method = method
446
+ if method == 'l1':
447
+ weight_norm = torch.abs(self.weight.data)
448
+ mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False)
449
+ mask = mask.view(self.weight.size())
450
+ mask = mask.to(self.weight.device)
451
+ elif method == 'topk':
452
+ self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
453
+ self.sparse_mask_scores.data = self.sparse_mask_scores.data.to(self.weight.device)
454
+ init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5))
455
+ mask = None
456
+ else:
457
+ raise NotImplementedError
458
+
459
+ self.register_buffer('sparse_pruning_mask', mask)
460
+
461
+ def enable_channel_pruning(self, ratio, method):
462
+ # Here, we support two cases: L1 norm based pruning and topk based pruning
463
+ self.channel_pruning_ratio = ratio
464
+ self.channel_pruning_method = method
465
+
466
+ if method == 'l1':
467
+ # compute the l1 norm of each conv2d kernel (the last three dimension)
468
+ weight_norm = torch.linalg.norm(self.weight.data, ord=1, dim=[1, 2, 3])
469
+ mask = TopKBinarizer.apply(weight_norm, self.channel_pruning_ratio, False)
470
+ mask = mask.view(-1, 1, 1, 1)
471
+ mask = mask.to(self.weight.device)
472
+ elif method == 'topk':
473
+ self.channel_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1, 1, 1))
474
+ self.channel_mask_scores.data = self.channel_mask_scores.data.to(self.weight.device)
475
+ init.kaiming_uniform_(self.channel_mask_scores, a=math.sqrt(5))
476
+ mask = None
477
+ else:
478
+ raise NotImplementedError
479
+
480
+ self.register_buffer('channel_pruning_mask', mask)
481
+
482
+ def fix_sparse_pruning_helper(self):
483
+ mask = self.get_mask(pruning_type='sparse')
484
+ self.weight.data = self.weight.data * mask
485
+ del self.sparse_pruning_mask
486
+ if self.sparse_pruning_method == 'topk':
487
+ del self.sparse_mask_scores
488
+ self.sparse_pruning_method = None
489
+ self.sparse_pruning_enabled = False
490
+ return None
491
+
492
+ def fix_channel_pruning_helper(self, mask=None, dim_reduction=False):
493
+ if mask is None:
494
+ if self.channel_pruning_method in ['l1', 'topk']:
495
+ mask = self.get_mask(pruning_type='channel').bool()
496
+ if dim_reduction:
497
+ start_bits = self.weight.start_bits
498
+ target_bits = self.weight.target_bits
499
+ q_period = self.weight.q_period
500
+ self.weight = nn.Parameter(self.weight.data[mask.view(-1), ...])
501
+ self.weight.start_bits = start_bits
502
+ self.weight.target_bits = target_bits
503
+ self.weight.q_period = q_period
504
+ if self.bias is not None:
505
+ self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
506
+ else:
507
+ self.weight.data = self.weight.data * mask.view(-1, 1, 1, 1)
508
+ if self.bias is not None:
509
+ self.bias.data = self.bias.data * mask.view(-1)
510
+ del self.channel_pruning_mask
511
+ if self.channel_pruning_method == 'topk':
512
+ del self.channel_mask_scores
513
+ self.channel_pruning_method = None
514
+ else:
515
+ raise NotImplementedError
516
+ else:
517
+ start_bits = self.weight.start_bits
518
+ target_bits = self.weight.target_bits
519
+ q_period = self.weight.q_period
520
+ self.weight = nn.Parameter(self.weight.data[:, mask.view(-1), ...])
521
+ self.weight.start_bits = start_bits
522
+ self.weight.target_bits = target_bits
523
+ self.weight.q_period = q_period
524
+ mask = None
525
+ self.channel_pruning_enabled = False
526
+ return mask
527
+
528
+ def get_mask(self, pruning_type='sparse'):
529
+ if pruning_type == 'sparse':
530
+ if self.sparse_pruning_method == 'l1':
531
+ return self.sparse_pruning_mask.to(self.weight.device)
532
+ elif self.sparse_pruning_method == 'topk':
533
+ return TopKBinarizer.apply(self.sparse_mask_scores, self.sparse_pruning_ratio, False)
534
+ else:
535
+ raise NotImplementedError
536
+ elif pruning_type == 'channel':
537
+ if self.channel_pruning_method == 'l1':
538
+ return self.channel_pruning_mask.to(self.weight.device)
539
+ elif self.channel_pruning_method == 'topk':
540
+ return TopKBinarizer.apply(self.channel_mask_scores, self.channel_pruning_ratio, False)
541
+ else:
542
+ raise NotImplementedError
543
+ else:
544
+ raise NotImplementedError
545
+
546
+ def fix_weight_quantization(self):
547
+ self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
548
+ self.weight_quantize_num_groups).data
549
+ self.weight_quantization_enabled_in_forward = False
550
+ return None
551
+
552
+ def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
553
+ weight_quantization_enabled_in_forward, quantization_type, num_groups):
554
+ self.weight.start_bits = start_bits
555
+ self.weight.target_bits = target_bits
556
+ self.weight.q_period = quantization_period
557
+ self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
558
+ if self.weight_quantization_enabled_in_forward:
559
+ assert self.weight.target_bits >= 4, 'Only >=4 bits weight quantization are supported during forward pass for now'
560
+ logger.warning(
561
+ "************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
562
+ )
563
+ if quantization_type == 'symmetric':
564
+ self.weight_quantizer = SymQuantizer.apply
565
+ else:
566
+ self.weight_quantizer = AsymQuantizer.apply
567
+ self.weight_quantize_num_groups = num_groups
568
+
569
+ def enable_activation_quantization(self, bits, quantization_type, range_calibration):
570
+ assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now'
571
+ self.activation_quantization_bits = bits
572
+ self.activation_quantization_method = f"{quantization_type}_{range_calibration}"
573
+ if range_calibration == 'static':
574
+ self.activation_quantizer = QuantAct(quant_mode=quantization_type)
575
+ else:
576
+ if quantization_type == 'symmetric':
577
+ self.activation_quantizer = SymQuantizer.apply
578
+ else:
579
+ self.activation_quantizer = AsymQuantizer.apply
580
+
581
+ def forward(self, input):
582
+
583
+ if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
584
+ weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
585
+ self.weight_quantize_num_groups)
586
+ bias = self.bias
587
+ else:
588
+ weight = self.weight
589
+ bias = self.bias
590
+
591
+ if self.sparse_pruning_enabled and self.sparse_pruning_method:
592
+ mask = self.get_mask(pruning_type='sparse')
593
+ weight = weight * mask.view(self.weight.size())
594
+
595
+ if self.channel_pruning_enabled:
596
+ mask = self.get_mask(pruning_type='channel')
597
+ weight = weight * mask.view(-1, 1, 1, 1)
598
+ if bias is not None:
599
+ bias = bias * mask.view(-1)
600
+
601
+ if self.activation_quantization_enabled:
602
+ if 'dynamic' in self.activation_quantization_method:
603
+ num_groups = input.numel() // input[0].numel()
604
+ else:
605
+ num_groups = 1
606
+ input = self.activation_quantizer(input, self.activation_quantization_bits, None, None, num_groups)
607
+
608
+ return nn.functional.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
609
+
610
+
611
+ class BNLayer_Compress(nn.BatchNorm2d):
612
+
613
+ def fix_channel_pruning_helper(self, mask, dim_reduction=True):
614
+ self.weight = nn.Parameter(self.weight.data[mask.view(-1)])
615
+ self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
616
+ self.running_mean = self.running_mean[mask.view(-1)]
617
+ self.running_var = self.running_var[mask.view(-1)]
618
+
619
+
620
+ def _reduce(input_):
621
+ """All-reduce the input tensor across model parallel group."""
622
+ group = g_mpu.get_model_parallel_group()
623
+
624
+ # Bypass the function if we are using only 1 GPU.
625
+ if dist.get_world_size(group=group) == 1:
626
+ return input_
627
+
628
+ # All-reduce.
629
+ dist.all_reduce(input_, group=group)
630
+
631
+ return input_
632
+
633
+
634
+ def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False):
635
+ """Split a tensor along its last dimension.
636
+ Arguments:
637
+ tensor: input tensor.
638
+ num_partitions: number of partitions to split the tensor
639
+ contiguous_split_chunks: If True, make each chunk contiguous
640
+ in memory.
641
+ """
642
+ # Get the size and dimension.
643
+ last_dim = tensor.dim() - 1
644
+ assert tensor.size()[last_dim] % num_partitions == 0
645
+ last_dim_size = tensor.size()[last_dim] // num_partitions
646
+ # Split.
647
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
648
+ # Note: torch.split does not create contiguous tensors by default.
649
+ if contiguous_split_chunks:
650
+ return tuple(chunk.contiguous() for chunk in tensor_list)
651
+
652
+ return tensor_list
653
+
654
+
655
+ def _split(input_):
656
+ """Split the tensor along its last dimension and keep the
657
+ corresponding slice."""
658
+ group = g_mpu.get_model_parallel_group()
659
+
660
+ # Bypass the function if we are using only 1 GPU.
661
+ if dist.get_world_size(group=group) == 1:
662
+ return input_
663
+
664
+ # Split along last dimension.
665
+ world_size = dist.get_world_size(group=group)
666
+ input_list = split_tensor_along_last_dim(input_, world_size)
667
+
668
+ # Note: torch.split does not create contiguous tensors by default.
669
+ rank = dist.get_rank(group=group)
670
+ output = input_list[rank].contiguous()
671
+
672
+ return output
673
+
674
+
675
+ def _gather(input_):
676
+ """Gather tensors and concatenate along the last dimension."""
677
+ group = g_mpu.get_model_parallel_group()
678
+
679
+ # Bypass the function if we are using only 1 GPU.
680
+ if dist.get_world_size(group=group) == 1:
681
+ return input_
682
+
683
+ # Size and dimension.
684
+ last_dim = input_.dim() - 1
685
+ rank = dist.get_rank(group=group)
686
+ world_size = dist.get_world_size(group=group)
687
+
688
+ tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
689
+ tensor_list[rank] = input_
690
+ dist.all_gather(tensor_list, input_, group=group)
691
+
692
+ # Note: torch.cat already creates a contiguous tensor.
693
+ output = torch.cat(tensor_list, dim=last_dim).contiguous()
694
+
695
+ return output
696
+
697
+
698
+ class _CopyToModelParallelRegion(torch.autograd.Function):
699
+ """Pass the input to the model parallel region."""
700
+
701
+ @staticmethod
702
+ def forward(ctx, input_):
703
+ return input_
704
+
705
+ @staticmethod
706
+ def backward(ctx, grad_output):
707
+ return _reduce(grad_output)
708
+
709
+
710
+ class _ReduceFromModelParallelRegion(torch.autograd.Function):
711
+ """All-reduce the input from the model parallel region."""
712
+
713
+ @staticmethod
714
+ def forward(ctx, input_):
715
+ return _reduce(input_)
716
+
717
+ @staticmethod
718
+ def backward(ctx, grad_output):
719
+ return grad_output
720
+
721
+
722
+ class _ScatterToModelParallelRegion(torch.autograd.Function):
723
+ """Split the input and keep only the corresponding chuck to the rank."""
724
+
725
+ @staticmethod
726
+ def forward(ctx, input_):
727
+ return _split(input_)
728
+
729
+ @staticmethod
730
+ def backward(ctx, grad_output):
731
+ return _gather(grad_output)
732
+
733
+
734
+ class _GatherFromModelParallelRegion(torch.autograd.Function):
735
+ """Gather the input from model parallel region and concatenate."""
736
+
737
+ @staticmethod
738
+ def forward(ctx, input_):
739
+ return _gather(input_)
740
+
741
+ @staticmethod
742
+ def backward(ctx, grad_output):
743
+ return _split(grad_output)
744
+
745
+
746
+ # -----------------
747
+ # Helper functions.
748
+ # -----------------
749
+
750
+
751
+ def copy_to_model_parallel_region(input_):
752
+ return _CopyToModelParallelRegion.apply(input_)
753
+
754
+
755
+ def reduce_from_model_parallel_region(input_):
756
+ return _ReduceFromModelParallelRegion.apply(input_)
757
+
758
+
759
+ def scatter_to_model_parallel_region(input_):
760
+ return _ScatterToModelParallelRegion.apply(input_)
761
+
762
+
763
+ def gather_from_model_parallel_region(input_):
764
+ return _GatherFromModelParallelRegion.apply(input_)
765
+
766
+
767
+ class ColumnParallelLinear_Compress(LinearLayer_Compress):
768
+
769
+ def __init__(self, mpu, input_size, output_size, bias=True, gather_output=True, skip_bias_add=False):
770
+ # Keep input parameters
771
+ global g_mpu
772
+ g_mpu = mpu
773
+ self.input_size = input_size
774
+ self.output_size = output_size
775
+ self.gather_output = gather_output
776
+ self.skip_bias_add = skip_bias_add
777
+
778
+ # Divide the weight matrix along the last dimension.
779
+ world_size = mpu.get_model_parallel_world_size()
780
+ assert output_size % world_size == 0
781
+ self.output_size_per_partition = output_size // world_size
782
+
783
+ super(ColumnParallelLinear_Compress, self).__init__(self.input_size, self.output_size_per_partition, bias=bias)
784
+
785
+ def forward(self, input_):
786
+ # Set up backprop all-reduce.
787
+ input_parallel = copy_to_model_parallel_region(input_)
788
+ # Matrix multiply.
789
+ if self.skip_bias_add:
790
+ output_parallel, bias = super().forward(input_parallel, True)
791
+ else:
792
+ output_parallel = super().forward(input_parallel)
793
+ bias = None
794
+ if self.gather_output:
795
+ # All-gather across the partitions.
796
+ output = gather_from_model_parallel_region(output_parallel)
797
+ else:
798
+ output = output_parallel
799
+ return output, bias
800
+
801
+
802
+ class RowParallelLinear_Compress(LinearLayer_Compress):
803
+
804
+ def __init__(self, mpu, input_size, output_size, bias=True, input_is_parallel=False, skip_bias_add=False):
805
+ # Keep input parameters
806
+ global g_mpu
807
+ g_mpu = mpu
808
+ self.input_size = input_size
809
+ self.output_size = output_size
810
+ self.input_is_parallel = input_is_parallel
811
+ self.skip_bias_add = skip_bias_add
812
+
813
+ # Divide the weight matrix along the last dimension.
814
+ world_size = mpu.get_model_parallel_world_size()
815
+ assert input_size % world_size == 0
816
+ self.input_size_per_partition = input_size // world_size
817
+
818
+ super(RowParallelLinear_Compress, self).__init__(self.input_size_per_partition, self.output_size, bias=bias)
819
+
820
+ def forward(self, input_):
821
+ # Set up backprop all-reduce.
822
+ if self.input_is_parallel:
823
+ input_parallel = input_
824
+ else:
825
+ input_parallel = scatter_to_model_parallel_region(input_)
826
+ # Matrix multiply.
827
+ output_parallel, bias = super().forward(input_parallel, True)
828
+
829
+ # All-reduce across all the partitions.
830
+ output_ = reduce_from_model_parallel_region(output_parallel)
831
+ if not self.skip_bias_add:
832
+ if bias is not None:
833
+ output = output_ + bias
834
+ else:
835
+ output = output_
836
+ output_bias = None
837
+ else:
838
+ output = output_
839
+ output_bias = bias
840
+ return output, output_bias
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/compress.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import re
7
+ from .helper import compression_preparation, fix_compression, recursive_getattr, is_module_compressible
8
+ from .config import get_compression_config
9
+ from ..runtime.config_utils import dict_raise_error_on_duplicate_keys
10
+ from .constants import *
11
+ import os
12
+ import json
13
+
14
+ try:
15
+ import neural_compressor as nc
16
+ except ImportError as e:
17
+ nc = None
18
+
19
+
20
+ def check_deepspeed_config(config):
21
+ if isinstance(config, dict):
22
+ return config
23
+ elif os.path.exists(config):
24
+ return json.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
25
+ else:
26
+ raise ValueError(
27
+ f"Expected a string path to an existing deepspeed config, or a dictionary. Received: {config}")
28
+
29
+
30
+ def get_module_name(group_name, model, key_word, exist_module_name, mpu=None, verbose=True):
31
+ '''
32
+ get the associated module name from the model based on the key_word provided by users
33
+ '''
34
+ return_module_name = []
35
+ for name, module in model.named_modules():
36
+
37
+ module_check = is_module_compressible(module, mpu)
38
+
39
+ if re.search(key_word, name) is not None and module_check:
40
+ if name in exist_module_name and verbose:
41
+ # logger.warning
42
+ raise ValueError(
43
+ f"{name} is already added to compression, please check your config file for {group_name}.")
44
+ if name not in exist_module_name:
45
+ exist_module_name.add(name)
46
+ return_module_name.append(name)
47
+ return return_module_name, exist_module_name
48
+
49
+
50
+ def get_compress_methods(model, compress_methods, mpu=None):
51
+ # extract the compression module for each method in compress_methods
52
+ layer_added_compress_methods = []
53
+ for method, method_content in compress_methods.items():
54
+ if LAYER_REDUCTION in method:
55
+ continue
56
+ # for loop different methods, i.e., weight quantization, activation quantization etc
57
+ exist_module_name = set()
58
+ shared_parameters = method_content[SHARED_PARAMETERS] # get all the shared parameters
59
+ for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items():
60
+ # for loop different groups, i.e., weight quantization group 1, weight quantization group 2 etc
61
+ module_name_list = []
62
+ related_module_name_list = []
63
+ if method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]:
64
+ # this is used for head/row/channel pruning, if users provide the related module scope, we can shrink the layer dim for them
65
+ # otherwise we just mask those as zeros
66
+ for key_word, related_key_words in zip(method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE],
67
+ method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]):
68
+ module_name, exist_module_name = get_module_name(group_name,
69
+ model,
70
+ key_word,
71
+ exist_module_name,
72
+ mpu=mpu)
73
+ module_name_list.append(module_name)
74
+ tmp_related_module_name_list = []
75
+ for rkw in related_key_words:
76
+ # related key word can be a list, for instance the QKV for O matrix in Attention
77
+ module_name, _ = get_module_name(group_name, model, rkw, set(), mpu=mpu)
78
+ tmp_related_module_name_list.append(module_name)
79
+ related_module_name_list.append(tmp_related_module_name_list)
80
+ else:
81
+ for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]:
82
+ module_name, exist_module_name = get_module_name(group_name,
83
+ model,
84
+ key_word,
85
+ exist_module_name,
86
+ mpu=mpu)
87
+ module_name_list.append(module_name)
88
+
89
+ if module_name_list:
90
+ # combine shared parameters with each group
91
+ combined_method_parameters = {
92
+ **(method_parameters.copy().pop(DIFFERENT_GROUPS_PARAMETERS)),
93
+ **shared_parameters
94
+ }
95
+ compression_item = [module_name_list, related_module_name_list, {method: combined_method_parameters}]
96
+ layer_added_compress_methods.append(compression_item)
97
+ return layer_added_compress_methods
98
+
99
+
100
+ def init_compression(model, deepspeed_config, teacher_model=None, mpu=None):
101
+ """
102
+ Compress a model: replace linear/conv2d layer with deepspeed compression-aware modules
103
+ Args:
104
+ model (`torch.nn.Module`)
105
+ The model to compress.
106
+ deepspeed_config (`DeepSpeedConfig`)
107
+ The path of ds_config
108
+ mpu
109
+ The mpu module for Row/Column parallelism
110
+ """
111
+ compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config))
112
+ if hasattr(model, 'module'):
113
+ c_model = model.module
114
+ else:
115
+ c_model = model
116
+
117
+ # For layer reduction
118
+ if compress_methods[LAYER_REDUCTION][LAYER_REDUCTION_ENABLED]:
119
+ assert teacher_model is not None, "Teacher model is required for layer reduction"
120
+ student_initialization(c_model, teacher_model, deepspeed_config)
121
+
122
+ layer_added_compress_methods = get_compress_methods(c_model, compress_methods, mpu=mpu)
123
+ compression_preparation(c_model, layer_added_compress_methods, mpu)
124
+
125
+ # For sparse pruning snip_momentum method
126
+ shared_parameters = compress_methods[SPARSE_PRUNING][SHARED_PARAMETERS]
127
+ if shared_parameters[SPARSE_PRUNING_ENABLED] and \
128
+ shared_parameters[SPARSE_PRUNING_METHOD] == SPARSE_PRUNING_METHOD_SNIP_MOMENTUM:
129
+
130
+ assert nc is not None, "please ensure the neural_compressor python package is installed by pip or conda if user wants to use snip_momentum sparse pruning"
131
+
132
+ from .helper import generate_pruners, register_on_step_begin
133
+ from nc import WeightPruningConfig
134
+
135
+ config = WeightPruningConfig(target_sparsity=1 - shared_parameters[SPARSE_PRUNING_DENSE_RATIO],
136
+ pattern=shared_parameters[SPARSE_PRUNING_BLOCK_PATTERN],
137
+ pruning_frequency=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE],
138
+ start_step=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET],
139
+ end_step=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET_END],
140
+ excluded_op_names=shared_parameters[SPARSE_PRUNING_EXCLUDED_MODULES])
141
+ pruners = generate_pruners(config, c_model)
142
+ c_model.pruners = pruners
143
+ register_on_step_begin(c_model)
144
+
145
+ return model
146
+
147
+
148
+ def redundancy_clean(model, deepspeed_config, mpu=None):
149
+ """
150
+ Remove the redundancy of a model
151
+ Args:
152
+ model (`torch.nn.Module`)
153
+ The model to compress.
154
+ deepspeed_config (`DeepSpeedConfig`)
155
+ The path of ds_config
156
+ mpu
157
+ The mpu module for Row/Column parallelism
158
+ """
159
+ compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config))
160
+ if hasattr(model, 'module'):
161
+ c_model = model.module
162
+ else:
163
+ c_model = model
164
+
165
+ layer_added_compress_methods_tmp = get_compress_methods(c_model, compress_methods, mpu=mpu)
166
+ # sort methods
167
+ order_list = [
168
+ WEIGHT_QUANTIZATION, SPARSE_PRUNING, ROW_PRUNING, HEAD_PRUNING, CHANNEL_PRUNING, ACTIVATION_QUANTIZATION
169
+ ]
170
+ layer_added_compress_methods = sorted(layer_added_compress_methods_tmp,
171
+ key=lambda x: order_list.index(list(x[2].keys())[0]))
172
+
173
+ for module_name_lists, related_module_name_lists, compression_technique in layer_added_compress_methods:
174
+ stored_mask = []
175
+ need_mask = True if related_module_name_lists else False
176
+ for i, mnl in enumerate(module_name_lists):
177
+ for module_name in mnl:
178
+ mask = fix_compression(c_model, module_name, compression_technique, dim_reduction=need_mask)
179
+ if need_mask:
180
+ stored_mask.append(mask)
181
+ if need_mask:
182
+ for rmnl in related_module_name_lists[i]:
183
+ for j, module_name in enumerate(rmnl):
184
+ mask = fix_compression(c_model,
185
+ module_name,
186
+ compression_technique,
187
+ mask=stored_mask[j],
188
+ dim_reduction=True)
189
+ return model
190
+
191
+
192
+ def student_initialization(student_model, teacher_model, deepspeed_config):
193
+ '''
194
+ Given a student model and a teacher model, select the
195
+ Args:
196
+ student_model (`torch.nn.Module`)
197
+ The model we will update weight
198
+ teacher_model (`torch.nn.Module`)
199
+ The model guide the student to learn
200
+ deepspeed_config (`DeepSpeedConfig`)
201
+ The path of ds_config
202
+ '''
203
+ config = get_compression_config(check_deepspeed_config(deepspeed_config))
204
+ compress_methods = config[LAYER_REDUCTION]
205
+
206
+ module_name_prefix = compress_methods[MODULE_NAME_PREFIX]
207
+ teacher_layer = compress_methods[TEACHER_LAYER]
208
+ student_layer = [i for i in range(len(teacher_layer))]
209
+ other_module_name = compress_methods[OTHER_MODULE_NAME]
210
+ '''
211
+ name_prefix (`str`)
212
+ The prefix name before the layer #.
213
+ Example 1: bert.encoder.layer, for BERT_base model's prefix name
214
+ Example 2: transformer.h, for GPT-2 hugging face prefix name
215
+ teacher_layer (`list of integers`)
216
+ The layer of teacher will be used for student's reinitialization
217
+ Example 1: [1,3,5,7,9], means we want to matches the 2nd/4th/6th/8th/10th layer of teacher to the first 5 layers of student
218
+ student_layer (`list` or None)
219
+ The layer of student need to be re-initialized
220
+ Example 1: None, means we want to reinitialize all the layers
221
+ Example 1: [0,1,2,3,4], means we want to reinitialize the first 5 layers
222
+ other_module_name (`list of string`)
223
+ The modules will be used for student's reinitialization
224
+ Example 1: ['bert.pooler', 'bert.embeddings', 'classifier'], means we want to apply the weight in teacher's embedding/pooler/classier module to the student
225
+ Example 2: ['transformer.w', 'transformer.ln_f', 'lm_head'], means we want to apply the weight in teacher's embedding layers module to the student
226
+ Note that teacher_layer should matches student layer
227
+ '''
228
+ assert len(student_layer) == len(teacher_layer)
229
+ for s_name, t_name in zip(student_layer, teacher_layer):
230
+ s_module = recursive_getattr(student_model, module_name_prefix + '.' + str(s_name))
231
+ t_module = recursive_getattr(teacher_model, module_name_prefix + '.' + str(t_name))
232
+ for s_param, t_param in zip(s_module.parameters(), t_module.parameters()):
233
+ s_param.data.copy_(t_param.data)
234
+ for name in other_module_name:
235
+ s_module = recursive_getattr(student_model, name)
236
+ t_module = recursive_getattr(teacher_model, name)
237
+ print(name)
238
+ for s_param, t_param in zip(s_module.parameters(), t_module.parameters()):
239
+ s_param.data.copy_(t_param.data)
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/config.py ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .constants import *
7
+ import copy
8
+ from ..runtime.config_utils import get_scalar_param, get_list_param
9
+
10
+
11
+ def get_compression_config(param_dict):
12
+ #
13
+ output = {}
14
+
15
+ if COMPRESSION_TRAINING not in param_dict.keys():
16
+ param_dict[COMPRESSION_TRAINING] = {}
17
+ sub_param_dict = param_dict[COMPRESSION_TRAINING]
18
+ output[WEIGHT_QUANTIZATION] = get_weight_quantization(sub_param_dict)
19
+ output[ACTIVATION_QUANTIZATION] = get_activation_quantization(sub_param_dict)
20
+ output[SPARSE_PRUNING] = get_sparse_pruning(sub_param_dict)
21
+ output[ROW_PRUNING] = get_row_pruning(sub_param_dict)
22
+ output[HEAD_PRUNING] = get_head_pruning(sub_param_dict)
23
+ output[CHANNEL_PRUNING] = get_channel_pruning(sub_param_dict)
24
+
25
+ output[LAYER_REDUCTION] = get_layer_reduction(sub_param_dict)
26
+
27
+ return output
28
+
29
+
30
+ def get_layer_reduction(param_dict):
31
+ output = {}
32
+ output[LAYER_REDUCTION_ENABLED] = LAYER_REDUCTION_ENABLED_DEFAULT
33
+ if get_layer_reduction_enabled(param_dict):
34
+ output[LAYER_REDUCTION_ENABLED] = get_layer_reduction_enabled(param_dict)
35
+ for key, val in get_layer_reduction_params(param_dict).items():
36
+ output[key] = val
37
+ return output
38
+
39
+
40
+ def get_layer_reduction_enabled(param_dict):
41
+ if LAYER_REDUCTION in param_dict.keys():
42
+ return get_scalar_param(param_dict[LAYER_REDUCTION], LAYER_REDUCTION_ENABLED, LAYER_REDUCTION_ENABLED_DEFAULT)
43
+ else:
44
+ return False
45
+
46
+
47
+ def get_layer_reduction_params(param_dict):
48
+ if LAYER_REDUCTION in param_dict.keys():
49
+ layer_reduction_params = copy.copy(param_dict[LAYER_REDUCTION])
50
+ layer_reduction_params.pop(LAYER_REDUCTION_ENABLED)
51
+ return layer_reduction_params
52
+ else:
53
+ return False
54
+
55
+
56
+ def get_quantize_enabled(param_dict):
57
+ if COMPRESSION_TRAINING not in param_dict.keys():
58
+ return False
59
+
60
+ sub_param_dict = param_dict[COMPRESSION_TRAINING]
61
+ output = get_weight_quantization_shared_parameters(sub_param_dict)
62
+ return output[WEIGHT_QUANTIZE_ENABLED]
63
+
64
+
65
+ def get_weight_quantization(param_dict):
66
+ output = {}
67
+ if WEIGHT_QUANTIZATION not in param_dict.keys():
68
+ param_dict[WEIGHT_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
69
+ sub_param_dict = param_dict[WEIGHT_QUANTIZATION]
70
+ # shared parameters
71
+ output[SHARED_PARAMETERS] = get_weight_quantization_shared_parameters(sub_param_dict)
72
+ # each sub-groups
73
+ if output[SHARED_PARAMETERS][WEIGHT_QUANTIZE_ENABLED]:
74
+ assert DIFFERENT_GROUPS in sub_param_dict.keys(
75
+ ), f"Weigh Quantization is enabled, {DIFFERENT_GROUPS} must be specified"
76
+ output[DIFFERENT_GROUPS] = get_weight_quantization_different_groups(sub_param_dict)
77
+ return output
78
+
79
+
80
+ def get_weight_quantization_shared_parameters(param_dict):
81
+ output = {}
82
+ if SHARED_PARAMETERS in param_dict.keys():
83
+ sub_param_dict = param_dict[SHARED_PARAMETERS]
84
+ output[WEIGHT_QUANTIZE_ENABLED] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_ENABLED,
85
+ WEIGHT_QUANTIZE_ENABLED_DEFAULT)
86
+ output[WEIGHT_QUANTIZE_KERNEL] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_KERNEL,
87
+ WEIGHT_QUANTIZE_KERNEL_DEFAULT)
88
+ output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_SCHEDULE_OFFSET,
89
+ WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT)
90
+ output[WEIGHT_QUANTIZE_GROUPS] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_GROUPS,
91
+ WEIGHT_QUANTIZE_GROUPS_DEFAULT)
92
+ output[WEIGHT_QUANTIZE_VERBOSE] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_VERBOSE,
93
+ WEIGHT_QUANTIZE_VERBOSE_DEFAULT)
94
+ output[WEIGHT_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_TYPE,
95
+ WEIGHT_QUANTIZE_TYPE_DEFAULT)
96
+ output[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] = get_scalar_param(sub_param_dict,
97
+ WEIGHT_QUANTIZE_IN_FORWARD_ENABLED,
98
+ WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT)
99
+ assert output[WEIGHT_QUANTIZE_TYPE] in [
100
+ WEIGHT_QUANTIZE_SYMMETRIC, WEIGHT_QUANTIZE_ASYMMETRIC
101
+ ], f"Invalid weight quantize type. Supported types: [{WEIGHT_QUANTIZE_SYMMETRIC}, {WEIGHT_QUANTIZE_ASYMMETRIC}]"
102
+ output[WEIGHT_QUANTIZE_ROUNDING] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_ROUNDING,
103
+ WEIGHT_QUANTIZE_ROUNDING_DEFAULT)
104
+ assert output[WEIGHT_QUANTIZE_ROUNDING] in [
105
+ WEIGHT_QUANTIZE_NEAREST_ROUNDING, WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING
106
+ ], f"Invalid weight quantize rounding. Supported types: [{WEIGHT_QUANTIZE_NEAREST_ROUNDING}, {WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING}]"
107
+ if WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE in sub_param_dict.keys():
108
+ output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = get_scalar_param(
109
+ sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED,
110
+ WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT)
111
+ output[WEIGHT_QUANTIZE_CHANGE_RATIO] = get_scalar_param(
112
+ sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], WEIGHT_QUANTIZE_CHANGE_RATIO,
113
+ WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT)
114
+ else:
115
+ output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT
116
+ output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT
117
+ else:
118
+ output[WEIGHT_QUANTIZE_ENABLED] = WEIGHT_QUANTIZE_ENABLED_DEFAULT
119
+ output[WEIGHT_QUANTIZE_KERNEL] = WEIGHT_QUANTIZE_KERNEL_DEFAULT
120
+ output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT
121
+ output[WEIGHT_QUANTIZE_GROUPS] = WEIGHT_QUANTIZE_GROUPS_DEFAULT
122
+ output[WEIGHT_QUANTIZE_VERBOSE] = WEIGHT_QUANTIZE_VERBOSE_DEFAULT
123
+ output[WEIGHT_QUANTIZE_TYPE] = WEIGHT_QUANTIZE_TYPE_DEFAULT
124
+ output[WEIGHT_QUANTIZE_ROUNDING] = WEIGHT_QUANTIZE_ROUNDING_DEFAULT
125
+ output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT
126
+ output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT
127
+ return output
128
+
129
+
130
+ def get_weight_quantization_different_groups(param_dict):
131
+ output = {}
132
+ sub_param_dict = param_dict[DIFFERENT_GROUPS]
133
+
134
+ def get_params(name, group_dict):
135
+ assert WEIGHT_QUANTIZE_START_BITS in group_dict.keys(
136
+ ), f"{WEIGHT_QUANTIZE_START_BITS} must be specified for weight quantization group {name}"
137
+ assert WEIGHT_QUANTIZE_TARGET_BITS in group_dict.keys(
138
+ ), f"{WEIGHT_QUANTIZE_TARGET_BITS} must be specified for weight quantization group {name}"
139
+ group_dict[WEIGHT_QUANTIZATION_PERIOD] = get_scalar_param(group_dict, WEIGHT_QUANTIZATION_PERIOD,
140
+ WEIGHT_QUANTIZATION_PERIOD_DEFAULT)
141
+ return group_dict
142
+
143
+ for k, v in sub_param_dict.items():
144
+ output[k] = {}
145
+ output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
146
+ output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
147
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
148
+ output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
149
+ sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
150
+
151
+ return output
152
+
153
+
154
+ def get_activation_quantization(param_dict):
155
+ output = {}
156
+ if ACTIVATION_QUANTIZATION not in param_dict.keys():
157
+ param_dict[ACTIVATION_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
158
+ sub_param_dict = param_dict[ACTIVATION_QUANTIZATION]
159
+ # shared parameters
160
+ output[SHARED_PARAMETERS] = get_activation_quantization_shared_parameters(sub_param_dict)
161
+ # each sub-groups
162
+ if output[SHARED_PARAMETERS][ACTIVATION_QUANTIZATION_ENABLED]:
163
+ assert DIFFERENT_GROUPS in sub_param_dict.keys(
164
+ ), f"Activation Quantization is enabled, {DIFFERENT_GROUPS} must be specified"
165
+ output[DIFFERENT_GROUPS] = get_activation_quantization_different_groups(sub_param_dict)
166
+ return output
167
+
168
+
169
+ def get_activation_quantization_shared_parameters(param_dict):
170
+ output = {}
171
+ if SHARED_PARAMETERS in param_dict.keys():
172
+ sub_param_dict = param_dict[SHARED_PARAMETERS]
173
+ output[ACTIVATION_QUANTIZATION_ENABLED] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZATION_ENABLED,
174
+ ACTIVATION_QUANTIZATION_ENABLED_DEFAULT)
175
+ output[ACTIVATION_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZE_TYPE,
176
+ ACTIVATION_QUANTIZE_TYPE_DEFAULT)
177
+ assert output[ACTIVATION_QUANTIZE_TYPE] in [
178
+ ACTIVATION_QUANTIZE_SYMMETRIC, ACTIVATION_QUANTIZE_ASYMMETRIC
179
+ ], f"Invalid activation quantize type. Supported types: [{ACTIVATION_QUANTIZE_SYMMETRIC}, {ACTIVATION_QUANTIZE_ASYMMETRIC}]"
180
+ output[ACTIVATION_QUANTIZE_RANGE] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZE_RANGE,
181
+ ACTIVATION_QUANTIZE_RANGE_DEFAULT)
182
+ assert output[ACTIVATION_QUANTIZE_RANGE] in [
183
+ ACTIVATION_QUANTIZE_RANGE_DYNAMIC, ACTIVATION_QUANTIZE_RANGE_STATIC
184
+ ], f"Invalid activation quantize range calibration. Supported types: [{ACTIVATION_QUANTIZE_RANGE_DYNAMIC}, {ACTIVATION_QUANTIZE_RANGE_STATIC}]"
185
+ output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict,
186
+ ACTIVATION_QUANTIZE_SCHEDULE_OFFSET,
187
+ ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT)
188
+ else:
189
+ output[ACTIVATION_QUANTIZATION_ENABLED] = ACTIVATION_QUANTIZATION_ENABLED_DEFAULT
190
+ output[ACTIVATION_QUANTIZE_TYPE] = ACTIVATION_QUANTIZE_TYPE_DEFAULT
191
+ output[ACTIVATION_QUANTIZE_RANGE] = ACTIVATION_QUANTIZE_RANGE_DEFAULT
192
+ output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT
193
+ return output
194
+
195
+
196
+ def get_activation_quantization_different_groups(param_dict):
197
+ output = {}
198
+ sub_param_dict = param_dict[DIFFERENT_GROUPS]
199
+
200
+ def get_params(name, group_dict):
201
+ assert ACTIVATION_QUANTIZE_BITS in group_dict.keys(
202
+ ), f"{ACTIVATION_QUANTIZE_BITS} must be specified for activation quantization group {name}"
203
+ return group_dict
204
+
205
+ for k, v in sub_param_dict.items():
206
+ output[k] = {}
207
+ output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
208
+ output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
209
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
210
+ output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
211
+ sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
212
+
213
+ return output
214
+
215
+
216
+ def get_sparse_pruning(param_dict):
217
+ output = {}
218
+ if SPARSE_PRUNING not in param_dict.keys():
219
+ param_dict[SPARSE_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
220
+ sub_param_dict = param_dict[SPARSE_PRUNING]
221
+ # shared parameters
222
+ output[SHARED_PARAMETERS] = get_sparse_pruning_shared_parameters(sub_param_dict)
223
+ # each sub-groups
224
+ if output[SHARED_PARAMETERS][SPARSE_PRUNING_ENABLED] and output[SHARED_PARAMETERS][
225
+ SPARSE_PRUNING_METHOD] != SPARSE_PRUNING_METHOD_SNIP_MOMENTUM:
226
+ assert DIFFERENT_GROUPS in sub_param_dict.keys(
227
+ ), f"Sparse Pruning is enabled and not snip_momentum method, {DIFFERENT_GROUPS} must be specified"
228
+ output[DIFFERENT_GROUPS] = get_sparse_pruning_different_groups(sub_param_dict)
229
+ return output
230
+
231
+
232
+ def get_sparse_pruning_shared_parameters(param_dict):
233
+ output = {}
234
+
235
+ if SHARED_PARAMETERS in param_dict.keys():
236
+ sub_param_dict = param_dict[SHARED_PARAMETERS]
237
+ output[SPARSE_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_ENABLED,
238
+ SPARSE_PRUNING_ENABLED_DEFAULT)
239
+ output[SPARSE_PRUNING_METHOD] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_METHOD,
240
+ SPARSE_PRUNING_METHOD_DEFAULT)
241
+ assert output[SPARSE_PRUNING_METHOD] in [
242
+ SPARSE_PRUNING_METHOD_L1, SPARSE_PRUNING_METHOD_TOPK, SPARSE_PRUNING_METHOD_SNIP_MOMENTUM
243
+ ], f"Invalid sparse pruning method. Supported types: [{SPARSE_PRUNING_METHOD_L1}, {SPARSE_PRUNING_METHOD_TOPK}, {SPARSE_PRUNING_METHOD_SNIP_MOMENTUM}]"
244
+ output[SPARSE_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_SCHEDULE_OFFSET,
245
+ SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT)
246
+ if output[SPARSE_PRUNING_METHOD] == SPARSE_PRUNING_METHOD_SNIP_MOMENTUM:
247
+ output[SPARSE_PRUNING_BLOCK_PATTERN] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_BLOCK_PATTERN,
248
+ SPARSE_PRUNING_BLOCK_PATTERN_DEFAULT)
249
+ output[SPARSE_PRUNING_DENSE_RATIO] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_DENSE_RATIO,
250
+ SPARSE_PRUNING_DENSE_RATIO_DEFAULT)
251
+ assert output[SPARSE_PRUNING_DENSE_RATIO] > 0 and output[
252
+ SPARSE_PRUNING_DENSE_RATIO] < 1, f"Invalid dense_ratio value. Must be less than 1"
253
+ output[SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE] = get_scalar_param(
254
+ sub_param_dict, SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE, SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE_DEFAULT)
255
+ output[SPARSE_PRUNING_EXCLUDED_MODULES] = get_list_param(sub_param_dict, SPARSE_PRUNING_EXCLUDED_MODULES,
256
+ SPARSE_PRUNING_EXCLUDED_MODULES_DEFAULT)
257
+ output[SPARSE_PRUNING_SCHEDULE_OFFSET_END] = get_scalar_param(sub_param_dict,
258
+ SPARSE_PRUNING_SCHEDULE_OFFSET_END,
259
+ output[SPARSE_PRUNING_SCHEDULE_OFFSET])
260
+ assert output[SPARSE_PRUNING_SCHEDULE_OFFSET] <= output[
261
+ SPARSE_PRUNING_SCHEDULE_OFFSET_END], f"Invalid schedule_offset and schedule_offset_end values"
262
+ else:
263
+ output[SPARSE_PRUNING_ENABLED] = SPARSE_PRUNING_ENABLED_DEFAULT
264
+ output[SPARSE_PRUNING_METHOD] = SPARSE_PRUNING_METHOD_DEFAULT
265
+ output[SPARSE_PRUNING_SCHEDULE_OFFSET] = SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT
266
+ return output
267
+
268
+
269
+ def get_sparse_pruning_different_groups(param_dict):
270
+ output = {}
271
+ sub_param_dict = param_dict[DIFFERENT_GROUPS]
272
+
273
+ def get_params(name, group_dict):
274
+ assert SPARSE_PRUNING_DENSE_RATIO in group_dict.keys(
275
+ ), f"{SPARSE_PRUNING_DENSE_RATIO} must be specified for sparse pruning group {name}"
276
+ return group_dict
277
+
278
+ for k, v in sub_param_dict.items():
279
+ output[k] = {}
280
+ output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
281
+ output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
282
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
283
+ output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
284
+ sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
285
+
286
+ return output
287
+
288
+
289
+ def get_row_pruning(param_dict):
290
+ output = {}
291
+ if ROW_PRUNING not in param_dict.keys():
292
+ param_dict[ROW_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
293
+ sub_param_dict = param_dict[ROW_PRUNING]
294
+ # shared parameters
295
+ output[SHARED_PARAMETERS] = get_row_pruning_shared_parameters(sub_param_dict)
296
+ # each sub-groups
297
+ if output[SHARED_PARAMETERS][ROW_PRUNING_ENABLED]:
298
+ assert DIFFERENT_GROUPS in sub_param_dict.keys(
299
+ ), f"Row Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
300
+ output[DIFFERENT_GROUPS] = get_row_pruning_different_groups(sub_param_dict)
301
+ return output
302
+
303
+
304
+ def get_row_pruning_shared_parameters(param_dict):
305
+ output = {}
306
+ if SHARED_PARAMETERS in param_dict.keys():
307
+ sub_param_dict = param_dict[SHARED_PARAMETERS]
308
+ output[ROW_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, ROW_PRUNING_ENABLED,
309
+ ROW_PRUNING_ENABLED_DEFAULT)
310
+ output[ROW_PRUNING_METHOD] = get_scalar_param(sub_param_dict, ROW_PRUNING_METHOD, ROW_PRUNING_METHOD_DEFAULT)
311
+ assert output[ROW_PRUNING_METHOD] in [
312
+ ROW_PRUNING_METHOD_L1, ROW_PRUNING_METHOD_TOPK
313
+ ], f"Invalid row pruning method. Supported types: [{ROW_PRUNING_METHOD_L1}, {ROW_PRUNING_METHOD_TOPK}]"
314
+ output[ROW_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, ROW_PRUNING_SCHEDULE_OFFSET,
315
+ ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT)
316
+ else:
317
+ output[ROW_PRUNING_ENABLED] = ROW_PRUNING_ENABLED_DEFAULT
318
+ output[ROW_PRUNING_METHOD] = ROW_PRUNING_METHOD_DEFAULT
319
+ output[ROW_PRUNING_SCHEDULE_OFFSET] = ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT
320
+ return output
321
+
322
+
323
+ def get_row_pruning_different_groups(param_dict):
324
+ output = {}
325
+ sub_param_dict = param_dict[DIFFERENT_GROUPS]
326
+
327
+ def get_params(name, group_dict):
328
+ assert ROW_PRUNING_DENSE_RATIO in group_dict.keys(
329
+ ), f"{ROW_PRUNING_DENSE_RATIO} must be specified for row pruning group {name}"
330
+ return group_dict
331
+
332
+ for k, v in sub_param_dict.items():
333
+ output[k] = {}
334
+ output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
335
+ output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
336
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
337
+ output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
338
+ sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
339
+ return output
340
+
341
+
342
+ def get_head_pruning(param_dict):
343
+ output = {}
344
+ if HEAD_PRUNING not in param_dict.keys():
345
+ param_dict[HEAD_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
346
+ sub_param_dict = param_dict[HEAD_PRUNING]
347
+ # shared parameters
348
+ output[SHARED_PARAMETERS] = get_head_pruning_shared_parameters(sub_param_dict)
349
+ # each sub-groups
350
+ if output[SHARED_PARAMETERS][HEAD_PRUNING_ENABLED]:
351
+ assert DIFFERENT_GROUPS in sub_param_dict.keys(
352
+ ), f"Head Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
353
+ output[DIFFERENT_GROUPS] = get_head_pruning_different_groups(sub_param_dict)
354
+ return output
355
+
356
+
357
+ def get_head_pruning_shared_parameters(param_dict):
358
+ output = {}
359
+ if SHARED_PARAMETERS in param_dict.keys():
360
+ sub_param_dict = param_dict[SHARED_PARAMETERS]
361
+ output[HEAD_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, HEAD_PRUNING_ENABLED,
362
+ HEAD_PRUNING_ENABLED_DEFAULT)
363
+ output[HEAD_PRUNING_METHOD] = get_scalar_param(sub_param_dict, HEAD_PRUNING_METHOD,
364
+ HEAD_PRUNING_METHOD_DEFAULT)
365
+ assert output[HEAD_PRUNING_METHOD] in [
366
+ HEAD_PRUNING_METHOD_L1, HEAD_PRUNING_METHOD_TOPK
367
+ ], f"Invalid head pruning method. Supported types: [{HEAD_PRUNING_METHOD_L1}, {HEAD_PRUNING_METHOD_TOPK}]"
368
+ output[HEAD_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, HEAD_PRUNING_SCHEDULE_OFFSET,
369
+ HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT)
370
+ if output[HEAD_PRUNING_ENABLED]:
371
+ assert HEAD_PRUNING_NUM_HEADS in sub_param_dict.keys(
372
+ ), f"{HEAD_PRUNING_NUM_HEADS} must be specified for head pruning"
373
+ output[HEAD_PRUNING_NUM_HEADS] = sub_param_dict[HEAD_PRUNING_NUM_HEADS]
374
+ else:
375
+ output[HEAD_PRUNING_ENABLED] = HEAD_PRUNING_ENABLED_DEFAULT
376
+ output[HEAD_PRUNING_METHOD] = HEAD_PRUNING_METHOD_DEFAULT
377
+ output[HEAD_PRUNING_SCHEDULE_OFFSET] = HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT
378
+ return output
379
+
380
+
381
+ def get_head_pruning_different_groups(param_dict):
382
+ output = {}
383
+ sub_param_dict = param_dict[DIFFERENT_GROUPS]
384
+
385
+ def get_params(name, group_dict):
386
+ assert HEAD_PRUNING_DENSE_RATIO in group_dict.keys(
387
+ ), f"dense_ratio must be specified for head pruning group {name}"
388
+ return group_dict
389
+
390
+ for k, v in sub_param_dict.items():
391
+ output[k] = {}
392
+ output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
393
+ output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
394
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
395
+ output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
396
+ sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
397
+ return output
398
+
399
+
400
+ def get_channel_pruning(param_dict):
401
+ output = {}
402
+ if CHANNEL_PRUNING not in param_dict.keys():
403
+ param_dict[CHANNEL_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
404
+ sub_param_dict = param_dict[CHANNEL_PRUNING]
405
+ # shared parameters
406
+ output[SHARED_PARAMETERS] = get_channel_pruning_shared_parameters(sub_param_dict)
407
+ # each sub-groups
408
+ if output[SHARED_PARAMETERS][CHANNEL_PRUNING_ENABLED]:
409
+ assert DIFFERENT_GROUPS in sub_param_dict.keys(
410
+ ), f"Sparse Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
411
+ output[DIFFERENT_GROUPS] = get_channel_pruning_different_groups(sub_param_dict)
412
+ return output
413
+
414
+
415
+ def get_channel_pruning_shared_parameters(param_dict):
416
+ output = {}
417
+ if SHARED_PARAMETERS in param_dict.keys():
418
+ sub_param_dict = param_dict[SHARED_PARAMETERS]
419
+ output[CHANNEL_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_ENABLED,
420
+ CHANNEL_PRUNING_ENABLED_DEFAULT)
421
+ output[CHANNEL_PRUNING_METHOD] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_METHOD,
422
+ CHANNEL_PRUNING_METHOD_DEFAULT)
423
+ assert output[CHANNEL_PRUNING_METHOD] in [
424
+ CHANNEL_PRUNING_METHOD_L1, CHANNEL_PRUNING_METHOD_TOPK
425
+ ], f"Invalid channel pruning method. Supported types: [{CHANNEL_PRUNING_METHOD_L1}, {CHANNEL_PRUNING_METHOD_TOPK}]"
426
+ output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_SCHEDULE_OFFSET,
427
+ CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT)
428
+ else:
429
+ output[CHANNEL_PRUNING_ENABLED] = CHANNEL_PRUNING_ENABLED_DEFAULT
430
+ output[CHANNEL_PRUNING_METHOD] = CHANNEL_PRUNING_METHOD_DEFAULT
431
+ output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT
432
+ return output
433
+
434
+
435
+ def get_channel_pruning_different_groups(param_dict):
436
+ output = {}
437
+ sub_param_dict = param_dict[DIFFERENT_GROUPS]
438
+
439
+ def get_params(name, group_dict):
440
+ assert CHANNEL_PRUNING_DENSE_RATIO in group_dict.keys(
441
+ ), f"{CHANNEL_PRUNING_DENSE_RATIO} must be specified for channel pruning group {name}"
442
+ return group_dict
443
+
444
+ for k, v in sub_param_dict.items():
445
+ output[k] = {}
446
+ output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
447
+ output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
448
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
449
+ output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
450
+ sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
451
+
452
+ return output
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/constants.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ #########################################
7
+ # Compression Methods
8
+ # It has several sub-components
9
+ # #########################################
10
+ COMPRESSION_TRAINING = "compression_training"
11
+ SHARED_PARAMETERS = "shared_parameters"
12
+ DIFFERENT_GROUPS = "different_groups"
13
+ TECHNIQUE_ENABLED = "enabled"
14
+ TECHNIQUE_SCHEDULE_OFFSET = "schedule_offset"
15
+ TECHNIQUE_SCHEDULE_OFFSET_END = "schedule_offset_end"
16
+ DIFFERENT_GROUPS_PARAMETERS = "params"
17
+ DIFFERENT_GROUPS_MODULE_SCOPE = "modules"
18
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT = "*"
19
+ DIFFERENT_GROUPS_RELATED_MODULE_SCOPE = "related_modules"
20
+ DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT = None
21
+ # COMPRESSION_TRAINING_ENABLED = "enabled"
22
+ # COMPRESSION_TRAINING_ENABLED_DEFAULT = False
23
+
24
+ ####
25
+ # Layer Reduction
26
+ ####
27
+ LAYER_REDUCTION = "layer_reduction"
28
+ LAYER_REDUCTION_ENABLED = "enabled"
29
+ LAYER_REDUCTION_ENABLED_DEFAULT = False
30
+ KEEP_NUMBER_LAYER = "keep_number_layer"
31
+ MODULE_NAME_PREFIX = "module_name_prefix"
32
+ TEACHER_LAYER = "teacher_layer"
33
+ OTHER_MODULE_NAME = "other_module_name"
34
+
35
+ ####
36
+ # Weight Quantization
37
+ ####
38
+ WEIGHT_QUANTIZATION = "weight_quantization"
39
+
40
+ WEIGHT_QUANTIZATION_PERIOD = "quantization_period"
41
+ WEIGHT_QUANTIZATION_PERIOD_DEFAULT = 1
42
+
43
+ WEIGHT_QUANTIZE_IN_FORWARD_ENABLED = "quantize_weight_in_forward"
44
+ WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT = False
45
+
46
+ WEIGHT_QUANTIZE_ENABLED = TECHNIQUE_ENABLED
47
+ WEIGHT_QUANTIZE_ENABLED_DEFAULT = False
48
+
49
+ WEIGHT_QUANTIZE_KERNEL = "quantizer_kernel"
50
+ WEIGHT_QUANTIZE_KERNEL_DEFAULT = False
51
+
52
+ WEIGHT_QUANTIZE_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
53
+ WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT = 0
54
+
55
+ WEIGHT_QUANTIZE_GROUPS = "quantize_groups"
56
+ WEIGHT_QUANTIZE_GROUPS_DEFAULT = 1
57
+
58
+ WEIGHT_QUANTIZE_VERBOSE = "quantize_verbose"
59
+ WEIGHT_QUANTIZE_VERBOSE_DEFAULT = False
60
+
61
+ WEIGHT_QUANTIZE_TYPE = "quantization_type"
62
+ WEIGHT_QUANTIZE_TYPE_DEFAULT = "symmetric"
63
+ WEIGHT_QUANTIZE_SYMMETRIC = "symmetric"
64
+ WEIGHT_QUANTIZE_ASYMMETRIC = "asymmetric"
65
+
66
+ WEIGHT_QUANTIZE_ROUNDING = "rounding"
67
+ WEIGHT_QUANTIZE_ROUNDING_DEFAULT = "nearest"
68
+ WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING = "stochastic"
69
+ WEIGHT_QUANTIZE_NEAREST_ROUNDING = "nearest"
70
+ # maybe deleted for a cleaner version
71
+ WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE = "fp16_mixed_quantize"
72
+
73
+ WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED = "enabled"
74
+ WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT = False
75
+
76
+ WEIGHT_QUANTIZE_CHANGE_RATIO = "quantize_change_ratio"
77
+ WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT = 0.001
78
+
79
+ WEIGHT_QUANTIZE_START_BITS = "start_bits"
80
+ WEIGHT_QUANTIZE_TARGET_BITS = "target_bits"
81
+ ###
82
+ # Activation Quantization
83
+ ###
84
+ ACTIVATION_QUANTIZATION = "activation_quantization"
85
+
86
+ ACTIVATION_QUANTIZATION_ENABLED = TECHNIQUE_ENABLED
87
+ ACTIVATION_QUANTIZATION_ENABLED_DEFAULT = False
88
+
89
+ ACTIVATION_QUANTIZE_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
90
+ ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT = 1000
91
+
92
+ ACTIVATION_QUANTIZE_TYPE = "quantization_type"
93
+ ACTIVATION_QUANTIZE_TYPE_DEFAULT = "symmetric"
94
+ ACTIVATION_QUANTIZE_SYMMETRIC = "symmetric"
95
+ ACTIVATION_QUANTIZE_ASYMMETRIC = "asymmetric"
96
+
97
+ ACTIVATION_QUANTIZE_RANGE = 'range_calibration'
98
+ ACTIVATION_QUANTIZE_RANGE_DEFAULT = 'dynamic'
99
+ ACTIVATION_QUANTIZE_RANGE_STATIC = 'static'
100
+ ACTIVATION_QUANTIZE_RANGE_DYNAMIC = 'dynamic'
101
+
102
+ ACTIVATION_QUANTIZE_BITS = "bits"
103
+ ###
104
+ # Sparse Pruning
105
+ ###
106
+ SPARSE_PRUNING = "sparse_pruning"
107
+
108
+ SPARSE_PRUNING_ENABLED = TECHNIQUE_ENABLED
109
+ SPARSE_PRUNING_ENABLED_DEFAULT = False
110
+
111
+ SPARSE_PRUNING_METHOD = "method"
112
+ SPARSE_PRUNING_METHOD_DEFAULT = "l1"
113
+ SPARSE_PRUNING_METHOD_L1 = "l1"
114
+ SPARSE_PRUNING_METHOD_TOPK = "topk"
115
+ SPARSE_PRUNING_METHOD_SNIP_MOMENTUM = "snip_momentum"
116
+
117
+ SPARSE_PRUNING_BLOCK_PATTERN = "block_pattern"
118
+ SPARSE_PRUNING_BLOCK_PATTERN_DEFAULT = "4x1"
119
+
120
+ SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE = "schedule_offset_stride"
121
+ SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE_DEFAULT = 1
122
+
123
+ SPARSE_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
124
+ SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
125
+
126
+ SPARSE_PRUNING_SCHEDULE_OFFSET_END = TECHNIQUE_SCHEDULE_OFFSET_END
127
+ SPARSE_PRUNING_SCHEDULE_OFFSET_END_DEFAULT = SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT
128
+
129
+ SPARSE_PRUNING_DENSE_RATIO = "dense_ratio"
130
+ SPARSE_PRUNING_DENSE_RATIO_DEFAULT = 0.1
131
+
132
+ SPARSE_PRUNING_EXCLUDED_MODULES = "excluded_modules"
133
+ SPARSE_PRUNING_EXCLUDED_MODULES_DEFAULT = []
134
+ ###
135
+ # Row Pruning
136
+ ###
137
+ ROW_PRUNING = "row_pruning"
138
+
139
+ ROW_PRUNING_ENABLED = TECHNIQUE_ENABLED
140
+ ROW_PRUNING_ENABLED_DEFAULT = False
141
+
142
+ ROW_PRUNING_METHOD = "method"
143
+ ROW_PRUNING_METHOD_DEFAULT = "l1"
144
+ ROW_PRUNING_METHOD_L1 = "l1"
145
+ ROW_PRUNING_METHOD_TOPK = "topk"
146
+
147
+ ROW_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
148
+ ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
149
+
150
+ ROW_PRUNING_DENSE_RATIO = "dense_ratio"
151
+
152
+ ###
153
+ # Head Pruning
154
+ ###
155
+ HEAD_PRUNING = "head_pruning"
156
+
157
+ HEAD_PRUNING_ENABLED = TECHNIQUE_ENABLED
158
+ HEAD_PRUNING_ENABLED_DEFAULT = False
159
+
160
+ HEAD_PRUNING_METHOD = "method"
161
+ HEAD_PRUNING_METHOD_DEFAULT = "topk"
162
+ HEAD_PRUNING_METHOD_L1 = "l1"
163
+ HEAD_PRUNING_METHOD_TOPK = "topk"
164
+
165
+ HEAD_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
166
+ HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
167
+
168
+ HEAD_PRUNING_NUM_HEADS = "num_heads"
169
+
170
+ HEAD_PRUNING_DENSE_RATIO = "dense_ratio"
171
+
172
+ ###
173
+ # Channel Pruning
174
+ ###
175
+ CHANNEL_PRUNING = "channel_pruning"
176
+
177
+ CHANNEL_PRUNING_ENABLED = TECHNIQUE_ENABLED
178
+ CHANNEL_PRUNING_ENABLED_DEFAULT = False
179
+
180
+ CHANNEL_PRUNING_METHOD = "method"
181
+ CHANNEL_PRUNING_METHOD_DEFAULT = "l1"
182
+ CHANNEL_PRUNING_METHOD_L1 = "l1"
183
+ CHANNEL_PRUNING_METHOD_TOPK = "topk"
184
+
185
+ CHANNEL_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
186
+ CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
187
+
188
+ CHANNEL_PRUNING_DENSE_RATIO = "dense_ratio"
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/helper.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from .basic_layer import Embedding_Compress, LinearLayer_Compress, Conv2dLayer_Compress, BNLayer_Compress, ColumnParallelLinear_Compress, RowParallelLinear_Compress
8
+ from .constants import *
9
+ from deepspeed.utils import logger
10
+
11
+ try:
12
+ from neural_compressor.compression import pruner as nc_pruner
13
+ except ImportError as e:
14
+ nc_pruner = None
15
+
16
+
17
+ def recursive_getattr(model, module_name):
18
+ """
19
+ Recursively get the attribute of a module.
20
+ Args:
21
+ model (`torch.nn.Module`)
22
+ The model to get the attribute from.
23
+ module_name (`str`)
24
+ The name of the module to get the attribute from.
25
+ """
26
+ split_list = module_name.split('.')
27
+ output = model
28
+ for name in split_list:
29
+ output = getattr(output, name)
30
+ return output
31
+
32
+
33
+ def recursive_setattr(model, module_name, module):
34
+ """
35
+ Recursively set the attribute of a module.
36
+ Args:
37
+ model (`torch.nn.Module`)
38
+ The model to set the attribute in.
39
+ module_name (`str`)
40
+ The name of the module to set the attribute in.
41
+ module (`torch.nn.Module`)
42
+ The module to set the attribute to.
43
+ """
44
+ split_list = module_name.split('.')
45
+ output = model
46
+ for name in split_list[:-1]:
47
+ output = getattr(output, name)
48
+ output.__setattr__(split_list[-1], module)
49
+
50
+
51
+ def module_replacement(model, module_name, compression_technique=None, mpu=None):
52
+ """
53
+ Replace a module with a new module.
54
+ Args:
55
+ model (`torch.nn.Module`)
56
+ The model to replace the module in.
57
+ module_name (`str`)
58
+ The name of the module to replace.
59
+ compression_technique (`str`)
60
+ The compression technique to use for the new module.
61
+ """
62
+
63
+ # Get the old module
64
+ old_module = recursive_getattr(model, module_name)
65
+
66
+ need_bias = False
67
+ if hasattr(old_module, 'bias') and old_module.bias is not None:
68
+ need_bias = True
69
+
70
+ # Initialize the new module
71
+ if isinstance(old_module, LinearLayer_Compress) or isinstance(old_module, torch.nn.Linear):
72
+ if isinstance(old_module, LinearLayer_Compress):
73
+ new_module = old_module
74
+ else:
75
+ new_module = LinearLayer_Compress(old_module.in_features, old_module.out_features,
76
+ bias=need_bias).to(device=old_module.weight.device,
77
+ dtype=old_module.weight.dtype)
78
+ new_module.weight.data = old_module.weight.data
79
+ if need_bias:
80
+ new_module.bias.data = old_module.bias.data
81
+ elif isinstance(old_module, Conv2dLayer_Compress) or isinstance(old_module, torch.nn.Conv2d):
82
+ if isinstance(old_module, Conv2dLayer_Compress):
83
+ new_module = old_module
84
+ else:
85
+ new_module = Conv2dLayer_Compress(old_module.in_channels, old_module.out_channels, old_module.kernel_size, old_module.stride, old_module.padding, \
86
+ old_module.dilation, old_module.groups, need_bias, \
87
+ old_module.padding_mode).to(device=old_module.weight.device, dtype=old_module.weight.dtype)
88
+ new_module.weight.data = old_module.weight.data
89
+ if need_bias:
90
+ new_module.bias.data = old_module.bias.data
91
+ elif isinstance(old_module, torch.nn.BatchNorm2d):
92
+ new_module = BNLayer_Compress(old_module.num_features, old_module.eps, old_module.momentum, old_module.affine,
93
+ old_module.track_running_stats).to(old_module.weight.device,
94
+ old_module.weight.dtype)
95
+ new_module.weight.data = old_module.weight.data
96
+ if need_bias:
97
+ new_module.bias.data = old_module.bias.data
98
+ new_module.running_mean.data = old_module.running_mean.data
99
+ new_module.running_var.data = old_module.running_var.data
100
+ elif isinstance(old_module, Embedding_Compress) or isinstance(old_module, torch.nn.Embedding):
101
+ if isinstance(old_module, Embedding_Compress):
102
+ new_module = old_module
103
+ else:
104
+ new_module = Embedding_Compress(old_module.num_embeddings, old_module.embedding_dim, old_module.padding_idx, old_module.max_norm, old_module.norm_type, \
105
+ old_module.scale_grad_by_freq, old_module.sparse).to(device=old_module.weight.device, dtype=old_module.weight.dtype)
106
+ new_module.weight.data = old_module.weight.data
107
+ elif mpu is not None and (isinstance(old_module, ColumnParallelLinear_Compress)
108
+ or isinstance(old_module, mpu.ColumnParallelLinear)):
109
+ if isinstance(old_module, ColumnParallelLinear_Compress):
110
+ new_module = old_module
111
+ else:
112
+ new_module = ColumnParallelLinear_Compress(mpu,
113
+ old_module.input_size,
114
+ old_module.output_size,
115
+ gather_output=old_module.gather_output,
116
+ skip_bias_add=old_module.skip_bias_add,
117
+ bias=need_bias).to(device=old_module.weight.device,
118
+ dtype=old_module.weight.dtype)
119
+ new_module.weight.data = old_module.weight.data
120
+ if need_bias:
121
+ new_module.bias.data = old_module.bias.data
122
+ elif mpu is not None and (isinstance(old_module, RowParallelLinear_Compress)
123
+ or isinstance(old_module, mpu.RowParallelLinear)):
124
+ if isinstance(old_module, RowParallelLinear_Compress):
125
+ new_module = old_module
126
+ else:
127
+ new_module = RowParallelLinear_Compress(mpu,
128
+ old_module.input_size,
129
+ old_module.output_size,
130
+ input_is_parallel=old_module.input_is_parallel,
131
+ skip_bias_add=old_module.skip_bias_add,
132
+ bias=need_bias).to(device=old_module.weight.device,
133
+ dtype=old_module.weight.dtype)
134
+ new_module.weight.data = old_module.weight.data
135
+ if need_bias:
136
+ new_module.bias.data = old_module.bias.data
137
+ else:
138
+ new_module = None
139
+
140
+ if compression_technique is not None:
141
+ for k, v in compression_technique.items():
142
+ if k == SPARSE_PRUNING:
143
+ if v[SPARSE_PRUNING_ENABLED]:
144
+ new_module.enable_sparse_pruning(v[SPARSE_PRUNING_DENSE_RATIO], v[SPARSE_PRUNING_METHOD])
145
+ elif k == ROW_PRUNING:
146
+ if v[ROW_PRUNING_ENABLED]:
147
+ new_module.enable_row_pruning(v[ROW_PRUNING_DENSE_RATIO], v[ROW_PRUNING_METHOD])
148
+ elif k == HEAD_PRUNING:
149
+ if v[HEAD_PRUNING_ENABLED]:
150
+ new_module.enable_head_pruning(v[HEAD_PRUNING_DENSE_RATIO], v[HEAD_PRUNING_METHOD],
151
+ v[HEAD_PRUNING_NUM_HEADS])
152
+ elif k == ACTIVATION_QUANTIZATION:
153
+ if v[ACTIVATION_QUANTIZATION_ENABLED]:
154
+ new_module.enable_activation_quantization(v[ACTIVATION_QUANTIZE_BITS], v[ACTIVATION_QUANTIZE_TYPE],
155
+ v[ACTIVATION_QUANTIZE_RANGE])
156
+ elif k == WEIGHT_QUANTIZATION:
157
+ if v[WEIGHT_QUANTIZE_ENABLED]:
158
+ new_module.enable_weight_quantization(v[WEIGHT_QUANTIZE_START_BITS],
159
+ v[WEIGHT_QUANTIZE_TARGET_BITS],
160
+ v[WEIGHT_QUANTIZATION_PERIOD],
161
+ v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED],
162
+ v[WEIGHT_QUANTIZE_TYPE], v[WEIGHT_QUANTIZE_GROUPS])
163
+ elif k == CHANNEL_PRUNING:
164
+ if v[CHANNEL_PRUNING_ENABLED]:
165
+ new_module.enable_channel_pruning(v[CHANNEL_PRUNING_DENSE_RATIO], v[CHANNEL_PRUNING_METHOD])
166
+ else:
167
+ raise NotImplementedError('Compression technique {} is not implemented'.format(k))
168
+
169
+ # Replace the old module with the new one
170
+ recursive_setattr(model, module_name, new_module)
171
+
172
+
173
+ def is_module_compressible(module, mpu=None):
174
+ ret = isinstance(module, torch.nn.Linear) or \
175
+ isinstance(module, torch.nn.Conv2d) or \
176
+ isinstance(module, torch.nn.Embedding) or \
177
+ isinstance(module, torch.nn.BatchNorm2d)
178
+
179
+ if mpu is not None:
180
+ ret = ret or isinstance(module, mpu.RowParallelLinear) or isinstance(module, mpu.ColumnParallelLinear)
181
+
182
+ return ret
183
+
184
+
185
+ def compression_preparation(model, compression_technique_list, mpu):
186
+ """
187
+ Prepare the compression techniques of a model.
188
+ Args:
189
+ model (`torch.nn.Module`)
190
+ The model to prepare the compression techniques of.
191
+ compression_technique_list (`list`)
192
+ The list of compression techniques to prepare the model to.
193
+ list[]
194
+ """
195
+ # Here we first replace all module with our linear wrapper
196
+ for module_name, module in model.named_modules():
197
+ if is_module_compressible(module, mpu):
198
+ module_replacement(model, module_name, mpu=mpu)
199
+ for module_name_lists, _, compression_technique in compression_technique_list:
200
+ for mnl in module_name_lists:
201
+ for module_name in mnl:
202
+ module_replacement(model, module_name, compression_technique)
203
+
204
+ return model
205
+
206
+
207
+ def fix_compression(model, module_name, compression_technique, mask=None, dim_reduction=False):
208
+ """
209
+ Fix the compression technique of a module.
210
+ Args:
211
+ model (`torch.nn.Module`)
212
+ The model to fix the compression technique of.
213
+ module_name (`str`)
214
+ The name of the module to fix the compression technique of.
215
+ compression_technique (`str`)
216
+ The compression technique to fix the module to.
217
+ """
218
+ # Here we can make things much simpler by just replacing the module
219
+ module = recursive_getattr(model, module_name)
220
+ for k, v in compression_technique.items():
221
+ if k == WEIGHT_QUANTIZATION and v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] and v[WEIGHT_QUANTIZE_ENABLED]:
222
+ return module.fix_weight_quantization()
223
+ elif k == SPARSE_PRUNING and v[SPARSE_PRUNING_ENABLED]:
224
+ return module.fix_sparse_pruning_helper()
225
+ elif k == ROW_PRUNING and (v[ROW_PRUNING_ENABLED] or mask is not None):
226
+ return module.fix_row_col_pruning_helper(mask, dim_reduction=dim_reduction)
227
+ elif k == HEAD_PRUNING and (v[HEAD_PRUNING_ENABLED] or mask is not None):
228
+ return module.fix_head_pruning_helper(mask, v[HEAD_PRUNING_NUM_HEADS], dim_reduction=dim_reduction)
229
+ elif k == CHANNEL_PRUNING and (v[CHANNEL_PRUNING_ENABLED] or mask is not None):
230
+ return module.fix_channel_pruning_helper(mask, dim_reduction=dim_reduction)
231
+
232
+
233
+ def convert_conv1d_to_linear(model, convert_type):
234
+ '''
235
+ This is a help function to convert conv1d to linear (e.g., convert GPT2 from HF)
236
+ '''
237
+ if hasattr(model, 'module'):
238
+ c_model = model.module
239
+ else:
240
+ c_model = model
241
+
242
+ for name, module in c_model.named_modules():
243
+ if isinstance(module, convert_type):
244
+ old_module = recursive_getattr(c_model, name)
245
+ new_module = torch.nn.Linear(old_module.weight.data.size(0),
246
+ old_module.weight.data.size(1),
247
+ bias=True if old_module.bias is not None else False)
248
+ new_module.weight.data = old_module.weight.data.t().contiguous()
249
+ if new_module.bias is not None:
250
+ new_module.bias.data = old_module.bias.data.view(-1)
251
+
252
+ recursive_setattr(c_model, name, new_module)
253
+
254
+ return model
255
+
256
+
257
+ def generate_pruners(config, model):
258
+ """Generate pruners.
259
+ Args:
260
+ config (`neural_compressor.WeightPruningConfig`)
261
+ The object to the class WeightPruningConfig.
262
+ model (`torch.nn.module`)
263
+ The torch module object to be pruned.
264
+ """
265
+ assert nc_pruner is not None, "please ensure the neural_compressor python package is installed by pip or conda if user wants to use snip_momentum sparse pruning"
266
+ from nc_pruner.utils import process_config, parse_to_prune
267
+ from nc_pruner.pruners import get_pruner
268
+ assert isinstance(model, torch.nn.Module)
269
+ pruners_info = process_config(config)
270
+ pruners = []
271
+ for info in pruners_info:
272
+ modules = parse_to_prune(info, model)
273
+ if modules == {}:
274
+ logger.warning("one pruner hooks no layers, please have a check")
275
+
276
+ pruners.append(get_pruner(info, modules))
277
+ info['modules'] = [key for key in modules.keys()]
278
+ info['len_of_modules'] = len(info['modules'])
279
+ logger.info(info)
280
+ return pruners
281
+
282
+
283
+ def register_on_step_begin(model):
284
+ """Mount on_step_begin to the model.
285
+ Args:
286
+ model (`torch.nn.module`)
287
+ The torch module object to be pruned.
288
+ """
289
+
290
+ def hook(module, input):
291
+ for pruner in module.pruners:
292
+ pruner.on_step_begin(0)
293
+
294
+ hook_handle = model.register_forward_pre_hook(hook)
295
+ return hook_handle
296
+
297
+
298
+ def rewrite_optimizer_step(opt: torch.optim.Optimizer):
299
+ """Mount on_before/after_optimizer_step to the optimizer.
300
+ Args:
301
+ model (`torch.opt.Optimizer`)
302
+ The torch optimizer object to be hooked.
303
+ """
304
+
305
+ def new_step(self, closure=None):
306
+ if hasattr(self, "pruners"):
307
+ for pruner in self.pruners:
308
+ pruner.on_before_optimizer_step()
309
+
310
+ if closure is not None:
311
+ res = self.orig_step(closure)
312
+ else:
313
+ res = self.orig_step()
314
+ if hasattr(self, "pruners"):
315
+ for pruner in self.pruners:
316
+ pruner.on_after_optimizer_step()
317
+ return res
318
+
319
+ opt.orig_step = opt.step
320
+ import types
321
+ opt.step = types.MethodType(new_step, opt)
322
+ return opt
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/scheduler.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .compress import get_module_name
7
+ from .constants import *
8
+ from .helper import recursive_getattr
9
+ from deepspeed.utils import logger
10
+
11
+
12
+ class compression_scheduler():
13
+ '''
14
+ Used to schedule different compression methods
15
+ '''
16
+
17
+ def __init__(self, model, compression_config):
18
+ self.model = model
19
+ self.compression_config = compression_config
20
+ self.make_init()
21
+ self.training_steps = 0
22
+ self.weight_quantization_enabled = False
23
+
24
+ self.verbose = {
25
+ WEIGHT_QUANTIZATION: False,
26
+ ACTIVATION_QUANTIZATION: False,
27
+ SPARSE_PRUNING: False,
28
+ HEAD_PRUNING: False,
29
+ ROW_PRUNING: False,
30
+ CHANNEL_PRUNING: False
31
+ }
32
+
33
+ def make_init(self):
34
+ self.different_compression_methods = {}
35
+ for method, method_content in self.compression_config.items():
36
+ if LAYER_REDUCTION in method:
37
+ continue
38
+ self.different_compression_methods[method] = {
39
+ TECHNIQUE_ENABLED: False,
40
+ SHARED_PARAMETERS: None,
41
+ DIFFERENT_GROUPS: []
42
+ }
43
+ exist_module_name = set()
44
+ shared_parameters = method_content[SHARED_PARAMETERS]
45
+ self.different_compression_methods[method][TECHNIQUE_ENABLED] = shared_parameters[TECHNIQUE_ENABLED]
46
+ self.different_compression_methods[method][SHARED_PARAMETERS] = shared_parameters
47
+
48
+ for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items():
49
+ module_name_list = []
50
+ for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]:
51
+ module_name, exist_module_name = get_module_name(group_name,
52
+ self.model,
53
+ key_word,
54
+ exist_module_name,
55
+ verbose=False)
56
+ module_name_list.extend(module_name)
57
+ if module_name_list:
58
+ self.different_compression_methods[method][DIFFERENT_GROUPS].append(
59
+ [group_name, module_name_list,
60
+ method_parameters.copy().pop('params')])
61
+
62
+ def check_weight_quantization(self):
63
+ # check weight quantization
64
+ wq = self.different_compression_methods[WEIGHT_QUANTIZATION]
65
+ if not wq[TECHNIQUE_ENABLED]:
66
+ return
67
+ else:
68
+ shared_parameters = wq[SHARED_PARAMETERS]
69
+ if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
70
+ for group_name, module_name_list, method_parameters in wq[DIFFERENT_GROUPS]:
71
+ for module_name in module_name_list:
72
+ module = recursive_getattr(self.model, module_name)
73
+ module.weight_quantization_enabled = True
74
+
75
+ if not self.verbose[WEIGHT_QUANTIZATION]:
76
+ logger.info(f'Weight quantization is enabled at step {self.training_steps}')
77
+ self.weight_quantization_enabled = True
78
+ self.verbose[WEIGHT_QUANTIZATION] = True
79
+
80
+ def check_activation_quantization(self):
81
+ # check activation quantization
82
+ aq = self.different_compression_methods[ACTIVATION_QUANTIZATION]
83
+ if not aq[TECHNIQUE_ENABLED]:
84
+ return
85
+ else:
86
+ shared_parameters = aq[SHARED_PARAMETERS]
87
+ if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
88
+ for group_name, module_name_list, method_parameters in aq[DIFFERENT_GROUPS]:
89
+ for module_name in module_name_list:
90
+ module = recursive_getattr(self.model, module_name)
91
+ module.activation_quantization_enabled = True
92
+ if not self.verbose[ACTIVATION_QUANTIZATION]:
93
+ logger.info(f'Activation quantization is enabled at step {self.training_steps}')
94
+ self.verbose[ACTIVATION_QUANTIZATION] = True
95
+
96
+ def check_sparse_pruning(self):
97
+ # check sparse pruning
98
+ sp = self.different_compression_methods[SPARSE_PRUNING]
99
+ if not sp[TECHNIQUE_ENABLED]:
100
+ return
101
+ else:
102
+ shared_parameters = sp[SHARED_PARAMETERS]
103
+ if shared_parameters[TECHNIQUE_SCHEDULE_OFFSET] <= self.training_steps <= shared_parameters[
104
+ TECHNIQUE_SCHEDULE_OFFSET_END]:
105
+ for group_name, module_name_list, method_parameters in sp[DIFFERENT_GROUPS]:
106
+ for module_name in module_name_list:
107
+ module = recursive_getattr(self.model, module_name)
108
+ module.sparse_pruning_enabled = True
109
+ if not self.verbose[SPARSE_PRUNING]:
110
+ logger.info(f'Sparse pruning is enabled at step {self.training_steps}')
111
+ self.verbose[SPARSE_PRUNING] = True
112
+
113
+ def check_head_pruning(self):
114
+ # check head pruning
115
+ hp = self.different_compression_methods[HEAD_PRUNING]
116
+ if not hp[TECHNIQUE_ENABLED]:
117
+ return
118
+ else:
119
+ shared_parameters = hp[SHARED_PARAMETERS]
120
+ if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
121
+ for group_name, module_name_list, method_parameters in hp[DIFFERENT_GROUPS]:
122
+ for module_name in module_name_list:
123
+ module = recursive_getattr(self.model, module_name)
124
+ module.head_pruning_enabled = True
125
+ if not self.verbose[HEAD_PRUNING]:
126
+ logger.info(f'Head pruning is enabled at step {self.training_steps}')
127
+ self.verbose[HEAD_PRUNING] = True
128
+
129
+ def check_row_pruning(self):
130
+ # check row pruning
131
+ rp = self.different_compression_methods[ROW_PRUNING]
132
+ if not rp[TECHNIQUE_ENABLED]:
133
+ return
134
+ else:
135
+ shared_parameters = rp[SHARED_PARAMETERS]
136
+ if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
137
+ for group_name, module_name_list, method_parameters in rp[DIFFERENT_GROUPS]:
138
+ for module_name in module_name_list:
139
+ module = recursive_getattr(self.model, module_name)
140
+ module.row_pruning_enabled = True
141
+ if not self.verbose[ROW_PRUNING]:
142
+ logger.info(f'Row pruning is enabled at step {self.training_steps}')
143
+ self.verbose[ROW_PRUNING] = True
144
+
145
+ def check_channel_pruning(self):
146
+ # check channel pruning
147
+ cp = self.different_compression_methods[CHANNEL_PRUNING]
148
+ if not cp[TECHNIQUE_ENABLED]:
149
+ return
150
+ else:
151
+ shared_parameters = cp[SHARED_PARAMETERS]
152
+ if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
153
+ for group_name, module_name_list, method_parameters in cp[DIFFERENT_GROUPS]:
154
+ for module_name in module_name_list:
155
+ module = recursive_getattr(self.model, module_name)
156
+ module.channel_pruning_enabled = True
157
+ if not self.verbose[CHANNEL_PRUNING]:
158
+ logger.info(f'Channel pruning is enabled at step {self.training_steps}')
159
+ self.verbose[CHANNEL_PRUNING] = True
160
+
161
+ def check_all_modules(self):
162
+ # check all different compression methods we have
163
+ self.check_weight_quantization()
164
+ self.check_activation_quantization()
165
+ self.check_sparse_pruning()
166
+ self.check_head_pruning()
167
+ self.check_row_pruning()
168
+ self.check_channel_pruning()
169
+
170
+ def step(self, step_zero_check=False):
171
+ if not step_zero_check:
172
+ self.training_steps += 1
173
+ self.check_all_modules()
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/compression/utils.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from torch import autograd
8
+ import math
9
+
10
+
11
+ class TopKBinarizer(autograd.Function):
12
+ """
13
+ Top-k Binarizer.
14
+ Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}`
15
+ is among the k% highest values of S.
16
+ Implementation is inspired from:
17
+ https://github.com/yaozhewei/MLPruning
18
+ """
19
+
20
+ @staticmethod
21
+ def forward(ctx, inputs: torch.tensor, threshold: float, sigmoid: bool):
22
+ """
23
+ Args:
24
+ inputs (`torch.FloatTensor`)
25
+ The input matrix from which the binarizer computes the binary mask.
26
+ threshold (`float`)
27
+ The percentage of weights to keep (the rest is pruned).
28
+ `threshold` is a float between 0 and 1.
29
+ sigmoid (`bool`)
30
+ Whether to apply a sigmoid on the threshold
31
+ Returns:
32
+ mask (`torch.FloatTensor`)
33
+ Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is
34
+ retained, 0 - the associated weight is pruned).
35
+ """
36
+ # Get the subnetwork by sorting the inputs and using the top threshold
37
+ if sigmoid:
38
+ threshold = torch.sigmoid(threshold).item()
39
+ ctx.sigmoid = sigmoid
40
+ mask = inputs.clone()
41
+
42
+ _, idx = inputs.flatten().sort(descending=True)
43
+ j = math.ceil(threshold * inputs.numel())
44
+
45
+ # flat_out and mask access the same memory.
46
+ flat_out = mask.flatten()
47
+ flat_out[idx[j:]] = 0.
48
+ flat_out[idx[:j]] = 1.
49
+ ctx.save_for_backward(mask)
50
+
51
+ return mask
52
+
53
+ @staticmethod
54
+ def backward(ctx, gradOutput):
55
+ mask, = ctx.saved_tensors
56
+ if ctx.sigmoid:
57
+ return gradOutput.clone(), ((gradOutput * mask).sum()).view(-1), None
58
+ else:
59
+ return gradOutput.clone(), None, None
60
+
61
+
62
+ class SymQuantizer(torch.autograd.Function):
63
+ """
64
+ Symmetric quantization
65
+ """
66
+
67
+ @staticmethod
68
+ def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
69
+ """
70
+ Args:
71
+ inputs (`torch.FloatTensor`)
72
+ The input which needs to be quantized
73
+ num_bits (int, >=4)
74
+ Number of bits to use for quantization
75
+ min_value/max_value (torch.FloatTensor)
76
+ Used for static activation quantization
77
+ num_groups (int)
78
+ How many groups to partition the quantization into
79
+ Returns:
80
+ quantized_input (`torch.FloatTensor`)
81
+ Quantized input
82
+ """
83
+ assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None
84
+ and num_groups == 1)
85
+ q_range = 2**num_bits
86
+ input_shape = input.shape
87
+ if min_value is None:
88
+ input = input.reshape(num_groups, -1)
89
+ max_input = torch.amax(torch.abs(input), dim=-1).view(num_groups, -1)
90
+ else:
91
+ max_input = torch.max(min_value.abs(), max_value).view(-1)
92
+
93
+ scale = 2 * max_input / q_range
94
+ output = (input / scale).round().clamp(-q_range // 2, q_range // 2 - 1) * scale
95
+ output = output.reshape(input_shape).contiguous()
96
+ return output
97
+
98
+ @staticmethod
99
+ def backward(ctx, grad_output):
100
+ grad_input = grad_output.clone()
101
+ return grad_input, None, None, None, None
102
+
103
+
104
+ class AsymQuantizer(torch.autograd.Function):
105
+ """
106
+ Asymmetric quantization
107
+ """
108
+
109
+ @staticmethod
110
+ def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
111
+ """
112
+ Args:
113
+ inputs (`torch.FloatTensor`)
114
+ The input which needs to be quantized
115
+ num_bits (int, >=4)
116
+ Number of bits to use for quantization
117
+ min_value/max_value (torch.FloatTensor)
118
+ Used for static activation quantization
119
+ num_groups (int)
120
+ How many groups to partition the quantization into
121
+ Returns:
122
+ quantized_input (`torch.FloatTensor`)
123
+ Quantized input
124
+ """
125
+
126
+ assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None
127
+ and num_groups == 1)
128
+ q_range = 2**num_bits
129
+ input_shape = input.shape
130
+ if min_value is None:
131
+ input = input.reshape(num_groups, -1)
132
+ min_value = input.amin(dim=-1, keepdim=True)
133
+ max_value = input.amax(dim=-1, keepdim=True)
134
+
135
+ scale = (max_value - min_value) / q_range
136
+ zero_point = (min_value / scale).round() * scale
137
+
138
+ output = ((input - zero_point) / scale).round().clamp(0, q_range - 1) * scale + zero_point
139
+ output = output.reshape(input_shape).contiguous()
140
+ return output
141
+
142
+ @staticmethod
143
+ def backward(ctx, grad_output):
144
+ grad_input = grad_output.clone()
145
+ return grad_input, None, None, None, None
146
+
147
+
148
+ class TernaryQuantizer(torch.autograd.Function):
149
+ """
150
+ Ternary quantization
151
+ """
152
+
153
+ @staticmethod
154
+ def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
155
+ """
156
+ Args:
157
+ inputs (`torch.FloatTensor`)
158
+ The input which needs to be quantized
159
+ num_bits (int)
160
+ Dummy variable
161
+ min_value/max_value (torch.FloatTensor)
162
+ Used for static activation quantization; for now they are dummy variable
163
+ num_groups (int)
164
+ How many groups to partition the quantization into
165
+ Returns:
166
+ quantized_input (`torch.FloatTensor`)
167
+ Quantized input
168
+ """
169
+
170
+ assert (min_value is None and max_value is None)
171
+ input_flat = input.reshape(num_groups, -1)
172
+ n = input_flat.shape[1]
173
+ m = input_flat.norm(p=1, dim=1).div(n)
174
+ thres = (0.7 * m).view(-1, 1)
175
+ pos = (input_flat > thres).type(input.type())
176
+ neg = (input_flat < -thres).type(input.type())
177
+ mask = (input_flat.abs() > thres).type(input.type())
178
+ alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1)
179
+ output = alpha * pos - alpha * neg
180
+ output = output.reshape(input.shape).contiguous()
181
+ return output
182
+
183
+ @staticmethod
184
+ def backward(ctx, grad_output):
185
+ grad_input = grad_output.clone()
186
+ return grad_input, None, None, None, None
187
+
188
+
189
+ class BinaryQuantizer(torch.autograd.Function):
190
+ """
191
+ Binary quantization
192
+ """
193
+
194
+ @staticmethod
195
+ def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
196
+ """
197
+ Args:
198
+ inputs (`torch.FloatTensor`)
199
+ The input which needs to be quantized
200
+ num_bits (int)
201
+ Dummy variable
202
+ min_value/max_value (torch.FloatTensor)
203
+ Used for static activation quantization; for now they are dummy variable
204
+ num_groups (int)
205
+ How many groups to partition the quantization into
206
+ Returns:
207
+ quantized_input (`torch.FloatTensor`)
208
+ Quantized input
209
+ """
210
+
211
+ assert (min_value is None and max_value is None)
212
+ input_flat = input.reshape(num_groups, -1)
213
+ n = input_flat.shape[1]
214
+ m = input_flat.norm(p=1, dim=1, keepdim=True).div(n)
215
+ output = input_flat.sign().mul(m)
216
+ output = output.reshape(input.shape).contiguous()
217
+ return output
218
+
219
+ @staticmethod
220
+ def backward(ctx, grad_output):
221
+ grad_input = grad_output.clone()
222
+ return grad_input, None, None, None, None
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (241 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/fused_optimizer.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Copyright NVIDIA/apex
7
+ This file is adapted from FP16_Optimizer in NVIDIA/apex
8
+ """
9
+
10
+ import torch
11
+ from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
12
+
13
+ from deepspeed.runtime import DeepSpeedOptimizer
14
+ from deepspeed.runtime.utils import get_global_norm, get_grad_norm, CheckOverflow, get_weight_norm, required_torch_version
15
+ from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE
16
+ from deepspeed.utils import groups, logger, log_dist
17
+ from deepspeed import comm as dist
18
+ from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, CLIP_GRAD
19
+ from deepspeed.accelerator import get_accelerator
20
+
21
+ OVERFLOW_CHECK_TIMER = 'overflow_check'
22
+ COMPUTE_NORM_TIMER = 'compute_norm'
23
+ UNSCALE_AND_CLIP_TIMER = 'unscale_and_clip'
24
+ BASIC_STEP_TIMER = 'basic_step'
25
+ UPDATE_FP16_TIMER = 'update_fp16'
26
+
27
+ OVERFLOW_TIMERS = [COMPUTE_NORM_TIMER, OVERFLOW_CHECK_TIMER]
28
+ STEP_TIMERS = OVERFLOW_TIMERS + [UNSCALE_AND_CLIP_TIMER, BASIC_STEP_TIMER, UPDATE_FP16_TIMER]
29
+
30
+
31
+ class FP16_Optimizer(DeepSpeedOptimizer):
32
+ """
33
+ FP16 Optimizer for training fp16 models. Handles loss scaling.
34
+
35
+ For usage example please see, TODO: DeepSpeed V2 Tutorial
36
+ """
37
+
38
+ def __init__(self,
39
+ init_optimizer,
40
+ deepspeed=None,
41
+ static_loss_scale=1.0,
42
+ dynamic_loss_scale=False,
43
+ initial_dynamic_scale=2**32,
44
+ dynamic_loss_args=None,
45
+ verbose=True,
46
+ mpu=None,
47
+ clip_grad=0.0,
48
+ fused_adam_legacy=False,
49
+ has_moe_layers=False,
50
+ timers=None):
51
+
52
+ self.fused_adam_legacy = fused_adam_legacy
53
+ self.timers = timers
54
+ self.deepspeed = deepspeed
55
+ self.has_moe_layers = has_moe_layers
56
+ self.using_pipeline = self.deepspeed.pipeline_parallelism
57
+ if not get_accelerator().is_available():
58
+ raise SystemError("Cannot use fp16 without accelerator.")
59
+ self.optimizer = init_optimizer
60
+
61
+ # param flattened by groups
62
+ self.fp16_groups = []
63
+ self.fp16_groups_flat = []
64
+ self.fp32_groups_flat = []
65
+
66
+ self._global_grad_norm = 0.
67
+
68
+ # loop to deal with groups
69
+ for i, param_group in enumerate(self.optimizer.param_groups):
70
+ # push this group to list before modify
71
+ self.fp16_groups.append(param_group['params'])
72
+ # init fp16 weight buffer, flattened
73
+ self.fp16_groups_flat.append(_flatten_dense_tensors([p.clone().detach() for p in self.fp16_groups[i]]))
74
+ # set model fp16 weight to slices of flattened buffer
75
+ updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i])
76
+ for p, q in zip(self.fp16_groups[i], updated_params):
77
+ p.data = q.data
78
+ # init master weight, flattened
79
+ self.fp32_groups_flat.append(self.fp16_groups_flat[i].clone().float().detach())
80
+ # modify optimizer of have flat master weight
81
+ self.fp32_groups_flat[i].requires_grad = True # keep this in case internal optimizer uses it
82
+ param_group['params'] = [self.fp32_groups_flat[i]]
83
+
84
+ # we may have a way of fusing dynamic scale. Do not support for now
85
+ if dynamic_loss_scale:
86
+ self.dynamic_loss_scale = True
87
+ self.cur_iter = 0
88
+ self.last_overflow_iter = -1
89
+ self.scale_factor = 2
90
+
91
+ if dynamic_loss_args is None:
92
+ self.cur_scale = initial_dynamic_scale
93
+ self.scale_window = 1000
94
+ self.min_loss_scale = 1
95
+ else:
96
+ self.cur_scale = dynamic_loss_args[INITIAL_LOSS_SCALE]
97
+ self.scale_window = dynamic_loss_args[SCALE_WINDOW]
98
+ self.min_loss_scale = dynamic_loss_args[MIN_LOSS_SCALE]
99
+ else:
100
+ self.dynamic_loss_scale = False
101
+ self.cur_iter = 0
102
+ self.cur_scale = static_loss_scale
103
+ self.verbose = verbose
104
+
105
+ self.custom_loss_scaler = False
106
+ self.external_loss_scale = None
107
+
108
+ self.clip_grad = clip_grad
109
+ self.norm_type = 2
110
+
111
+ if required_torch_version(max_version=0.4):
112
+ self.clip_grad_norm = torch.nn.utils.clip_grad_norm
113
+ else:
114
+ self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
115
+
116
+ #model parallel object
117
+ self.mpu = mpu
118
+
119
+ self.overflow = False
120
+ self.overflow_checker = CheckOverflow(self.fp16_groups, mpu=self.mpu, deepspeed=deepspeed)
121
+ self.initialize_optimizer_states()
122
+
123
+ def initialize_optimizer_states(self):
124
+ for i, group in enumerate(self.fp16_groups):
125
+ self.fp32_groups_flat[i].grad = torch.zeros(self.fp32_groups_flat[i].size(),
126
+ device=self.fp32_groups_flat[i].device)
127
+
128
+ self.optimizer.step()
129
+
130
+ for i, group in enumerate(self.fp16_groups):
131
+ self.fp32_groups_flat[i].grad = None
132
+
133
+ return
134
+
135
+ def zero_grad(self, set_to_none=True):
136
+ """
137
+ Zero FP16 parameter grads.
138
+ """
139
+ # For speed, set model fp16 grad to None by default
140
+ for group in self.fp16_groups:
141
+ for p in group:
142
+ if set_to_none:
143
+ p.grad = None
144
+ else:
145
+ if p.grad is not None:
146
+ p.grad.detach_()
147
+ p.grad.zero_()
148
+
149
+ def step_fused_adam(self, closure=None):
150
+ """
151
+ Not supporting closure.
152
+ """
153
+
154
+ # First compute norm for all group so we know if there is overflow
155
+ grads_groups_flat = []
156
+ norm_groups = []
157
+ for i, group in enumerate(self.fp16_groups):
158
+ grads_groups_flat.append(
159
+ _flatten_dense_tensors([
160
+ torch.zeros(p.size(), dtype=p.dtype, device=p.device) if p.grad is None else p.grad for p in group
161
+ ]))
162
+ norm_groups.append(get_weight_norm(grads_groups_flat[i], mpu=self.mpu))
163
+
164
+ self.overflow = self.overflow_checker.check_using_norm(norm_groups)
165
+ prev_scale = self.cur_scale
166
+ self._update_scale(self.overflow)
167
+
168
+ if self.overflow:
169
+ if self.verbose:
170
+ logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss "
171
+ "scale: {}, reducing to {}".format(prev_scale, self.cur_scale))
172
+ return self.overflow
173
+
174
+ scaled_grad_norm = get_global_norm(norm_list=norm_groups)
175
+
176
+ combined_scale = self.unscale_and_clip_grads(grads_groups_flat, scaled_grad_norm, apply_scale=False)
177
+
178
+ # Stash unscaled gradient norm
179
+ self._global_grad_norm = scaled_grad_norm / self.cur_scale
180
+
181
+ # norm is in fact norm*cur_scale
182
+ self.optimizer.step(grads=[[g] for g in grads_groups_flat],
183
+ output_params=[[p] for p in self.fp16_groups_flat],
184
+ scale=combined_scale,
185
+ grad_norms=norm_groups)
186
+ # TODO: we probably don't need this? just to be safe
187
+ for i in range(len(norm_groups)):
188
+ updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i])
189
+ for p, q in zip(self.fp16_groups[i], updated_params):
190
+ p.data = q.data
191
+ return self.overflow
192
+
193
+ def set_lr(self, lr):
194
+ """Set the learning rate."""
195
+ for param_group in self.optimizer.param_groups:
196
+ param_group["lr"] = lr
197
+
198
+ def get_lr(self):
199
+ """Return the current learning rate."""
200
+ return self.optimizer.param_groups[0]["lr"]
201
+
202
+ def override_loss_scale(self, loss_scale):
203
+ if loss_scale != self.external_loss_scale:
204
+ logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}')
205
+ self.custom_loss_scaler = True
206
+ self.external_loss_scale = loss_scale
207
+
208
+ def step(self, closure=None):
209
+ """
210
+ Not supporting closure.
211
+ """
212
+
213
+ if self.fused_adam_legacy:
214
+ return self.step_fused_adam()
215
+
216
+ # First determine if there is overflow.
217
+ self.timers(OVERFLOW_CHECK_TIMER).start()
218
+ fp16_params = []
219
+ for i, group in enumerate(self.fp16_groups):
220
+ fp16_params.extend([p for p in group if p.grad is not None])
221
+ self.overflow = self.overflow_checker.has_overflow(fp16_params)
222
+ self.timers(OVERFLOW_CHECK_TIMER).stop()
223
+ prev_scale = self.cur_scale
224
+ self._update_scale(self.overflow)
225
+ if self.overflow:
226
+ if self.verbose:
227
+ log_dist(
228
+ "Overflow detected. Skipping step. Attempted loss "
229
+ f"scale: {prev_scale}, reducing to {self.cur_scale}",
230
+ ranks=[0])
231
+ # Clear gradients
232
+ for i, group in enumerate(self.fp16_groups):
233
+ for p in group:
234
+ p.grad = None
235
+
236
+ self.timers.log(OVERFLOW_TIMERS)
237
+ return self.overflow
238
+
239
+ grads_groups_flat = []
240
+ for i, group in enumerate(self.fp16_groups):
241
+ data_type = self.fp32_groups_flat[i].dtype
242
+
243
+ grads_groups_flat.append(
244
+ _flatten_dense_tensors([
245
+ torch.zeros(p.size(), dtype=data_type, device=p.device) if p.grad is None else p.grad.to(data_type)
246
+ for p in group
247
+ ]))
248
+
249
+ for p in group:
250
+ p.grad = None
251
+
252
+ self.fp32_groups_flat[i].grad = grads_groups_flat[i]
253
+
254
+ self.timers(COMPUTE_NORM_TIMER).start()
255
+
256
+ all_groups_norm = get_grad_norm(self.fp32_groups_flat, mpu=self.mpu)
257
+
258
+ self.timers(COMPUTE_NORM_TIMER).stop()
259
+
260
+ if self.has_moe_layers:
261
+ all_groups_norm = self._get_norm_with_moe_layers(all_groups_norm)
262
+
263
+ scaled_global_grad_norm = get_global_norm(norm_list=[all_groups_norm])
264
+
265
+ # Stash unscaled gradient norm
266
+ self._global_grad_norm = scaled_global_grad_norm / self.cur_scale
267
+
268
+ self.timers(UNSCALE_AND_CLIP_TIMER).start()
269
+ self.unscale_and_clip_grads(grads_groups_flat, scaled_global_grad_norm)
270
+ self.timers(UNSCALE_AND_CLIP_TIMER).stop()
271
+
272
+ self.timers(BASIC_STEP_TIMER).start()
273
+ self.optimizer.step()
274
+ self.timers(BASIC_STEP_TIMER).stop()
275
+
276
+ #get rid of the fp32 gradients. Not needed anymore
277
+ for group in self.fp32_groups_flat:
278
+ group.grad = None
279
+
280
+ self.timers(UPDATE_FP16_TIMER).start()
281
+
282
+ for i in range(len(self.fp16_groups)):
283
+ updated_params = _unflatten_dense_tensors(self.fp32_groups_flat[i], self.fp16_groups[i])
284
+ for p, q in zip(self.fp16_groups[i], updated_params):
285
+ p.data.copy_(q.data)
286
+
287
+ self.timers(UPDATE_FP16_TIMER).stop()
288
+
289
+ self.timers.log(STEP_TIMERS)
290
+
291
+ return self.overflow
292
+
293
+ def _get_norm_with_moe_layers(self, all_groups_norm):
294
+ #all_groups_norm_old = all_groups_norm
295
+ # Need to allreduce (avg) the norms across different ranks because moe params will not be synced during allreduce
296
+ if self.using_pipeline:
297
+ pg = self.deepspeed.mpu.get_data_parallel_group()
298
+ else:
299
+ pg = groups._get_data_parallel_group()
300
+ scaled_norm = all_groups_norm * 1.0 / float(dist.get_world_size(group=pg))
301
+ scaled_norm_tensor = torch.tensor(scaled_norm, device=self.fp32_groups_flat[0].device, dtype=torch.float)
302
+ dist.all_reduce(scaled_norm_tensor, group=pg)
303
+ all_groups_norm = scaled_norm_tensor.item()
304
+ #print(f"old = {all_groups_norm_old} and new = {all_groups_norm} at rank: {deepspeed.comm.get_rank()}")
305
+ return all_groups_norm
306
+
307
+ def unscale_and_clip_grads(self, grad_groups_flat, total_norm, apply_scale=True):
308
+ # compute combined scale factor for this group
309
+ combined_scale = self.cur_scale
310
+ if self.clip_grad > 0.:
311
+ # norm is in fact norm*scale
312
+ clip = ((total_norm / self.cur_scale) + 1e-6) / self.clip_grad
313
+ if clip > 1:
314
+ combined_scale = clip * self.cur_scale
315
+
316
+ if apply_scale:
317
+ for grad in grad_groups_flat:
318
+ grad.data.mul_(1. / combined_scale)
319
+
320
+ return combined_scale
321
+
322
+ def backward(self, loss, create_graph=False, retain_graph=False):
323
+ """
324
+ :attr:`backward` performs the following steps:
325
+
326
+ 1. fp32_loss = loss.float()
327
+ 2. scaled_loss = fp32_loss*loss_scale
328
+ 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
329
+ """
330
+ if self.custom_loss_scaler:
331
+ scaled_loss = self.external_loss_scale * loss
332
+ scaled_loss.backward()
333
+ else:
334
+ scaled_loss = (loss.float()) * self.cur_scale
335
+ scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph)
336
+
337
+ def _update_scale(self, skip):
338
+ if self.dynamic_loss_scale:
339
+ prev_scale = self.cur_scale
340
+ if skip:
341
+ self.cur_scale = max(self.cur_scale / self.scale_factor, self.min_loss_scale)
342
+ self.last_overflow_iter = self.cur_iter
343
+ if self.verbose:
344
+ logger.info(f"\nGrad overflow on iteration {self.cur_iter}")
345
+ logger.info(f"Reducing dynamic loss scale from {prev_scale} to {self.cur_scale}")
346
+ else:
347
+ # Ensure self.scale_window updates since last overflow
348
+ stable_interval = (self.cur_iter - self.last_overflow_iter) - 1
349
+ if (stable_interval > 0) and (stable_interval % self.scale_window == 0):
350
+ self.cur_scale *= self.scale_factor
351
+ if self.verbose:
352
+ logger.info(f"No Grad overflow for {self.scale_window} iterations")
353
+ logger.info(f"Increasing dynamic loss scale from {prev_scale} to {self.cur_scale}")
354
+ else:
355
+ if skip:
356
+ logger.info("Grad overflow on iteration: %s", self.cur_iter)
357
+ logger.info("Using static loss scale of: %s", self.cur_scale)
358
+ self.cur_iter += 1
359
+ return
360
+
361
+ # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
362
+ def _get_state(self):
363
+ return self.optimizer.state
364
+
365
+ def _set_state(self, value):
366
+ self.optimizer.state = value
367
+
368
+ state = property(_get_state, _set_state)
369
+
370
+ # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
371
+ # (for example, to adjust the learning rate)
372
+ def _get_param_groups(self):
373
+ return self.optimizer.param_groups
374
+
375
+ def _set_param_groups(self, value):
376
+ self.optimizer.param_groups = value
377
+
378
+ param_groups = property(_get_param_groups, _set_param_groups)
379
+
380
+ def state_dict(self):
381
+ """
382
+ Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
383
+ This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
384
+ of the contained Pytorch optimizer.
385
+ Example::
386
+ checkpoint = {}
387
+ checkpoint['model'] = model.state_dict()
388
+ checkpoint['optimizer'] = optimizer.state_dict()
389
+ torch.save(checkpoint, "saved.pth")
390
+ """
391
+ state_dict = {}
392
+ state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
393
+ state_dict['cur_scale'] = self.cur_scale
394
+ state_dict['cur_iter'] = self.cur_iter
395
+ if state_dict['dynamic_loss_scale']:
396
+ state_dict['last_overflow_iter'] = self.last_overflow_iter
397
+ state_dict['scale_factor'] = self.scale_factor
398
+ state_dict['scale_window'] = self.scale_window
399
+ state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()
400
+ state_dict['fp32_groups_flat'] = self.fp32_groups_flat
401
+ state_dict[CLIP_GRAD] = self.clip_grad
402
+ return state_dict
403
+
404
+ # Refresh fp32 master params from fp16 copies
405
+ def refresh_fp32_params(self):
406
+ for current, saved in zip(self.fp32_groups_flat, self.fp16_groups_flat):
407
+ current.data.copy_(saved.data)
408
+
409
+ def load_state_dict(self, state_dict, load_optimizer_states=True):
410
+ """
411
+ Loads a state_dict created by an earlier call to state_dict().
412
+ If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
413
+ whose parameters in turn came from ``model``, it is expected that the user
414
+ will call ``model.load_state_dict()`` before
415
+ ``fp16_optimizer_instance.load_state_dict()`` is called.
416
+ Example::
417
+ model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half()
418
+ optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
419
+ optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
420
+ ...
421
+ checkpoint = torch.load("saved.pth")
422
+ model.load_state_dict(checkpoint['model'])
423
+ optimizer.load_state_dict(checkpoint['optimizer'])
424
+ """
425
+ # I think it should actually be ok to reload the optimizer before the model.
426
+ self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
427
+ self.cur_scale = state_dict['cur_scale']
428
+ self.cur_iter = state_dict['cur_iter']
429
+ if state_dict['dynamic_loss_scale']:
430
+ self.last_overflow_iter = state_dict['last_overflow_iter']
431
+ self.scale_factor = state_dict['scale_factor']
432
+ self.scale_window = state_dict['scale_window']
433
+ if load_optimizer_states:
434
+ self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT])
435
+ self.clip_grad = state_dict[CLIP_GRAD]
436
+ # At this point, the optimizer's references to the model's fp32 parameters are up to date.
437
+ # The optimizer's hyperparameters and internal buffers are also up to date.
438
+ # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
439
+ # out of date. There are two options.
440
+ # 1: Refresh the master params from the model's fp16 params.
441
+ # This requires less storage but incurs precision loss.
442
+ # 2: Save and restore the fp32 master copies separately.
443
+ # We choose option 2.
444
+ #
445
+ # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
446
+ # of their associated parameters, because it's possible those buffers might not exist yet in
447
+ # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
448
+ # constructed in the same way as the one whose state_dict we are loading, the same master params
449
+ # are guaranteed to exist, so we can just copy_() from the saved master params.
450
+ for current, saved in zip(self.fp32_groups_flat, state_dict['fp32_groups_flat']):
451
+ current.data.copy_(saved.data)
452
+
453
+ def __repr__(self):
454
+ return repr(self.optimizer)
455
+
456
+ # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
457
+ def _get_loss_scale(self):
458
+ if self.custom_loss_scaler:
459
+ return self.external_loss_scale
460
+ else:
461
+ return self.cur_scale
462
+
463
+ def _set_loss_scale(self, value):
464
+ self.loss_scaler.cur_scale = value
465
+
466
+ loss_scale = property(_get_loss_scale, _set_loss_scale)
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/loss_scaler.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
7
+
8
+ Licensed under the Apache License, Version 2.0 (the "License");
9
+ you may not use this file except in compliance with the License.
10
+ You may obtain a copy of the License at
11
+
12
+ http://www.apache.org/licenses/LICENSE-2.0
13
+
14
+ Unless required by applicable law or agreed to in writing, software
15
+ distributed under the License is distributed on an "AS IS" BASIS,
16
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ See the License for the specific language governing permissions and
18
+ limitations under the License.
19
+ Taken and modified for DeepSpeed from:
20
+ https://github.com/NVIDIA/Megatron-LM/blob/master/fp16/loss_scaler.py
21
+ Commit: 93ab4bea59dc5cbf97c079d313741866af4deac9
22
+ """
23
+
24
+ import torch
25
+ from deepspeed import comm as dist
26
+ from deepspeed.utils import logger
27
+
28
+ INITIAL_LOSS_SCALE = 'init_scale'
29
+ SCALE_WINDOW = 'scale_window'
30
+ DELAYED_SHIFT = 'delayed_shift'
31
+ CONSECUTIVE_HYSTERESIS = 'consecutive_hysteresis'
32
+ MIN_LOSS_SCALE = 'min_scale'
33
+
34
+
35
+ # item() is a recent addition, so this helps with backward compatibility.
36
+ def to_python_float(t):
37
+ if hasattr(t, 'item'):
38
+ return t.item()
39
+ return t[0]
40
+
41
+
42
+ class LossScalerBase:
43
+ """LossScalarBase
44
+ Base class for a loss scaler
45
+ """
46
+
47
+ def __init__(self, cur_scale):
48
+ self.cur_scale = cur_scale
49
+ self.dynamic = False
50
+
51
+ @property
52
+ def loss_scale(self):
53
+ return self.cur_scale
54
+
55
+ def scale_gradient(self, module, grad_in, grad_out):
56
+ return tuple(self.loss_scale * g for g in grad_in)
57
+
58
+ def update_scale(self, overflow):
59
+ pass
60
+
61
+ def backward(self, loss, retain_graph=False):
62
+ scaled_loss = loss * self.loss_scale
63
+ scaled_loss.backward(retain_graph=retain_graph)
64
+ # print(f'LossScalerBackward: {scaled_loss=}')
65
+
66
+
67
+ class LossScaler(LossScalerBase):
68
+ """
69
+ Class that manages a static loss scale. This class is intended to interact with
70
+ :class:`FP16_Optimizer`, and should not be directly manipulated by the user.
71
+
72
+ Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to
73
+ :class:`FP16_Optimizer`'s constructor.
74
+
75
+ Args:
76
+ scale (float, optional, default=1.0): The loss scale.
77
+ """
78
+
79
+ def __init__(self, scale=1):
80
+ super(LossScaler, self).__init__(scale)
81
+
82
+ # `params` is a list / generator of torch.Variable
83
+ def has_overflow(self, params):
84
+ return False
85
+
86
+ # `x` is a torch.Tensor
87
+ def _has_inf_or_nan(x):
88
+ return False
89
+
90
+
91
+ class DynamicLossScaler(LossScalerBase):
92
+ """
93
+ Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler`
94
+ indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of
95
+ :class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler`
96
+ operates, because the default options can be changed using the
97
+ the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor.
98
+
99
+ Loss scaling is designed to combat the problem of underflowing gradients encountered at long
100
+ times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss
101
+ scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are
102
+ encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has
103
+ occurred.
104
+ :class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch,
105
+ and :class:`DynamicLossScaler` adjusts the loss scale to a lower value.
106
+ If a certain number of iterations occur without overflowing gradients detected,
107
+ :class:`DynamicLossScaler` increases the loss scale once more.
108
+ In this way :class:`DynamicLossScaler` attempts to "ride the edge" of
109
+ always using the highest loss scale possible without incurring overflow.
110
+
111
+ Args:
112
+ init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.`
113
+ scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``.
114
+ scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale.
115
+ consecutive_hysteresis (bool, optional, default=False): Whether to refill hysteresis if we reach an iteration that doesn't overflow
116
+ """
117
+
118
+ def __init__(self,
119
+ init_scale=2**32,
120
+ scale_factor=2.,
121
+ scale_window=1000,
122
+ min_scale=1,
123
+ delayed_shift=1,
124
+ consecutive_hysteresis=False,
125
+ raise_error_at_min_scale=True,
126
+ dtype=torch.half):
127
+ super(DynamicLossScaler, self).__init__(init_scale)
128
+ self.cur_iter = 0
129
+ self.last_overflow_iter = -1
130
+ self.scale_factor = scale_factor
131
+ self.scale_window = scale_window
132
+ self.min_scale = min_scale
133
+ self.delayed_shift = delayed_shift
134
+ self.cur_hysteresis = delayed_shift
135
+ self.consecutive_hysteresis = consecutive_hysteresis
136
+ self.raise_error_at_min_scale = raise_error_at_min_scale
137
+ self.dynamic = True
138
+ self.dtype = dtype
139
+
140
+ # `params` is a list / generator of torch.Variable
141
+ def has_overflow_serial(self, params):
142
+ for p in params:
143
+ if p.grad is not None and self._has_inf_or_nan(p.grad.data):
144
+ return True
145
+
146
+ return False
147
+
148
+ # `x` is a torch.Tensor
149
+ def _has_inf_or_nan(x):
150
+ try:
151
+ # if x is half, the .float() incurs an additional deep copy, but it's necessary if
152
+ # Pytorch's .sum() creates a one-element tensor of the same type as x
153
+ # (which is true for some recent version of pytorch).
154
+ cpu_sum = float(x.float().sum())
155
+ # More efficient version that can be used if .sum() returns a Python scalar
156
+ # cpu_sum = float(x.sum())
157
+ except RuntimeError as instance:
158
+ # We want to check if inst is actually an overflow exception.
159
+ # RuntimeError could come from a different error.
160
+ # If so, we still want the exception to propagate.
161
+ if "value cannot be converted" not in instance.args[0]:
162
+ raise
163
+ return True
164
+ else:
165
+ if cpu_sum in [float('inf'), -float('inf')] or cpu_sum != cpu_sum:
166
+ return True
167
+ return False
168
+
169
+ # `overflow` is boolean indicating whether the gradient overflowed
170
+ def update_scale(self, overflow):
171
+ if overflow:
172
+ # self.cur_scale /= self.scale_factor
173
+ if self.delayed_shift == 1 or self.cur_hysteresis == 1:
174
+ if (self.cur_scale == self.min_scale) and self.raise_error_at_min_scale:
175
+ raise Exception(
176
+ "Current loss scale already at minimum - cannot decrease scale anymore. Exiting run.")
177
+ else:
178
+ next_scale = max(self.cur_scale / self.scale_factor, self.min_scale)
179
+ if dist.get_rank() == 0:
180
+ overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step."
181
+ if self.dtype == torch.half:
182
+ overflow_msg += f" Attempted loss scale: {int(self.cur_scale)}, reducing to {int(next_scale)}"
183
+ logger.info(overflow_msg)
184
+ self.cur_scale = next_scale
185
+ else:
186
+ if dist.get_rank() == 0:
187
+ overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step."
188
+ if self.dtype == torch.half:
189
+ overflow_msg += f" Attempted loss scale: {int(self.cur_scale)}, but hysteresis is {self.cur_hysteresis}. Reducing hysteresis to {self.cur_hysteresis-1}"
190
+ logger.info(overflow_msg)
191
+ self.cur_hysteresis -= 1
192
+ self.last_overflow_iter = self.cur_iter
193
+ else:
194
+ if self.consecutive_hysteresis:
195
+ if dist.get_rank() == 0:
196
+ hysteresis_msg = f"Consecutive hysteresis is enabled. Restoring hysteresis to {self.delayed_shift}"
197
+ logger.info(hysteresis_msg)
198
+ self.cur_hysteresis = self.delayed_shift
199
+ if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
200
+ if not self.consecutive_hysteresis:
201
+ self.cur_hysteresis = self.delayed_shift
202
+ self.cur_scale *= self.scale_factor
203
+ self.cur_iter += 1
204
+
205
+
206
+ # Although loss scaling is only defined for fp16, yet for backwards compatibility
207
+ # we still create a scaler for other dtypes (fp32, bf16) which does not perform any scaling.
208
+ def CreateLossScaler(dtype, static_loss_scale, dynamic_scaling, dynamic_loss_args):
209
+ if dtype == torch.half and dynamic_scaling:
210
+ if dynamic_loss_args is None:
211
+ return DynamicLossScaler(dtype=dtype)
212
+ return DynamicLossScaler(dtype=dtype, **dynamic_loss_args)
213
+
214
+ loss_scale_value = static_loss_scale if dtype == torch.half else 1.0
215
+ return LossScaler(scale=loss_scale_value)
216
+
217
+
218
+ ##############################################################
219
+ # Example usage below here -- assuming it's in a separate file
220
+ ##############################################################
221
+ """
222
+ TO-DO separate out into an example.
223
+ if __name__ == "__main__":
224
+ import torch
225
+ from torch.autograd import Variable
226
+ from dynamic_loss_scaler import DynamicLossScaler
227
+
228
+ # N is batch size; D_in is input dimension;
229
+ # H is hidden dimension; D_out is output dimension.
230
+ N, D_in, H, D_out = 64, 1000, 100, 10
231
+
232
+ # Create random Tensors to hold inputs and outputs, and wrap them in Variables.
233
+ x = Variable(torch.randn(N, D_in), requires_grad=False)
234
+ y = Variable(torch.randn(N, D_out), requires_grad=False)
235
+
236
+ w1 = Variable(torch.randn(D_in, H), requires_grad=True)
237
+ w2 = Variable(torch.randn(H, D_out), requires_grad=True)
238
+ parameters = [w1, w2]
239
+
240
+ learning_rate = 1e-6
241
+ optimizer = torch.optim.SGD(parameters, lr=learning_rate)
242
+ loss_scaler = DynamicLossScaler()
243
+
244
+ for t in range(500):
245
+ y_pred = x.mm(w1).clamp(min=0).mm(w2)
246
+ loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale
247
+ print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale))
248
+ print('Iter {} scaled loss: {}'.format(t, loss.data[0]))
249
+ print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale))
250
+
251
+ # Run backprop
252
+ optimizer.zero_grad()
253
+ loss.backward()
254
+
255
+ # Check for overflow
256
+ has_overflow = DynamicLossScaler.has_overflow(parameters)
257
+
258
+ # If no overflow, unscale grad and update as usual
259
+ if not has_overflow:
260
+ for param in parameters:
261
+ param.grad.data.mul_(1. / loss_scaler.loss_scale)
262
+ optimizer.step()
263
+ # Otherwise, don't do anything -- ie, skip iteration
264
+ else:
265
+ print('fp16 dynamic loss scale overflow!')
266
+
267
+ # Update loss scale for next iteration
268
+ loss_scaler.update_scale(has_overflow)
269
+
270
+ """
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .adam import OnebitAdam
7
+ from .lamb import OnebitLamb
8
+ from .zoadam import ZeroOneAdam
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/lamb.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/lamb.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import types
7
+ import torch
8
+ import numpy as np
9
+ from deepspeed import comm as dist
10
+ from deepspeed.runtime.utils import required_torch_version
11
+ from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
12
+ from deepspeed.accelerator import get_accelerator
13
+
14
+
15
+ class OnebitLamb(torch.optim.Optimizer):
16
+ """Implements the 1-bit Lamb algorithm. Currently GPU-only.
17
+ For usage example please see https://www.deepspeed.ai/tutorials/onebit-lamb/
18
+ For technical details please see our paper https://arxiv.org/abs/2104.06069.
19
+
20
+ Arguments:
21
+ params (iterable): iterable of parameters to optimize or dicts defining
22
+ parameter groups.
23
+ lr (float, optional): learning rate. (default: 1e-3)
24
+ freeze_step (int, optional): Number of steps for warmup (uncompressed)
25
+ stage before we start using compressed communication. (default 100000)
26
+ betas (Tuple[float, float], optional): coefficients used for computing
27
+ running averages of gradient and its square. (default: (0.9, 0.999))
28
+ eps (float, optional): term added to the denominator to improve
29
+ numerical stability. (default: 1e-8)
30
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
31
+ max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0)
32
+ min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01)
33
+ amsgrad (boolean, optional): whether to use the AMSGrad variant of this
34
+ algorithm from the paper `On the Convergence of Adam and Beyond`_
35
+ (default: False) NOT SUPPORTED in 1-bit Lamb!
36
+ eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
37
+ adds eps to the bias-corrected second moment estimate before
38
+ evaluating square root instead of adding it to the square root of
39
+ second moment estimate as in the original paper. (default: False)
40
+ cuda_aware (boolean, required): Set True if the underlying MPI implementation
41
+ supports CUDA-Aware communication. (default: False)
42
+ comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
43
+ coeff_beta (float, optional): coefficient used for computing
44
+ running averages of lamb coefficient (default: 0.9) note that you may want to
45
+ increase or decrease this beta depending on the freeze_step you choose, as
46
+ 1/(1 - coeff_beta) should be smaller than or equal to freeze_step
47
+ factor_max (float, optional): maximum value of scaling factor to the frozen lamb
48
+ coefficient during compression stage (default: 4.0)
49
+ factor_min (float, optional): minimum value of scaling factor to the frozen lamb
50
+ coefficient during compression stage (default: 0.5)
51
+ factor_threshold (float, optional): threshold of how much the scaling factor can
52
+ fluctuate between steps (default: 0.1)
53
+ .. _Large Batch Optimization for Deep Learning\\: Training BERT in 76 minutes:
54
+ https://arxiv.org/abs/1904.00962
55
+ .. _Adam\\: A Method for Stochastic Optimization:
56
+ https://arxiv.org/abs/1412.6980
57
+ .. _On the Convergence of Adam and Beyond:
58
+ https://openreview.net/forum?id=ryQu7f-RZ
59
+ """
60
+
61
+ def __init__(self,
62
+ params,
63
+ deepspeed=None,
64
+ lr=1e-3,
65
+ freeze_step=100000,
66
+ bias_correction=True,
67
+ betas=(0.9, 0.999),
68
+ eps=1e-8,
69
+ eps_inside_sqrt=False,
70
+ weight_decay=0.,
71
+ max_grad_norm=0.,
72
+ max_coeff=10.0,
73
+ min_coeff=0.01,
74
+ amsgrad=False,
75
+ cuda_aware=False,
76
+ comm_backend_name='nccl',
77
+ coeff_beta=0.9,
78
+ factor_max=4.0,
79
+ factor_min=0.5,
80
+ factor_threshold=0.1):
81
+
82
+ if amsgrad:
83
+ raise RuntimeError('1-bit Lamb does not support the AMSGrad variant.')
84
+
85
+ defaults = dict(lr=lr,
86
+ bias_correction=bias_correction,
87
+ betas=betas,
88
+ eps=eps,
89
+ weight_decay=weight_decay,
90
+ max_grad_norm=max_grad_norm,
91
+ max_coeff=max_coeff,
92
+ min_coeff=min_coeff)
93
+
94
+ super(OnebitLamb, self).__init__(params, defaults)
95
+ self.eps_mode = 0 if eps_inside_sqrt else 1
96
+ self.deepspeed = deepspeed
97
+ self.lamb_freeze_key = False
98
+ self.initialize = False
99
+ self.freeze_step = freeze_step
100
+ self.cuda_aware = cuda_aware
101
+ self.coeff_beta = coeff_beta
102
+ self.factor_max = factor_max
103
+ self.factor_min = factor_min
104
+ self.factor_threshold = factor_threshold
105
+ self.using_pipeline = False
106
+
107
+ self.comm_backend_name = comm_backend_name
108
+
109
+ assert dist.is_initialized(), "Please initialize the torch distributed backend."
110
+ # Empty initializer. Set handle based on the comm backend as follows.
111
+ self.comm_backend_handle = None
112
+ if self.comm_backend_name == 'nccl':
113
+ assert (
114
+ required_torch_version(min_version=1.8)
115
+ ), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
116
+ from deepspeed.runtime.comm.nccl import NcclBackend
117
+ self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
118
+ self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
119
+ elif self.comm_backend_name == 'mpi':
120
+ from deepspeed.runtime.comm.mpi import MpiBackend
121
+ self.comm_backend_handle = MpiBackend(cuda_aware)
122
+ elif self.comm_backend_name == 'hccl':
123
+ from deepspeed.runtime.comm.hccl import HcclBackend
124
+ self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
125
+ self.comm_backend_handle = HcclBackend(self.deepspeed.mpu)
126
+
127
+ self.size = self.comm_backend_handle.size
128
+
129
+ self.divider = int(self.size * 8 / np.gcd(self.size, 8))
130
+
131
+ self.exp_avg_flat = []
132
+ self.dummy_exp_avg = {}
133
+ self.corrected_tensor_sizes = []
134
+ self.server_chunk_sizes = []
135
+ self.worker_errors = []
136
+ self.server_errors = []
137
+
138
+ self.lamb_coeffs = []
139
+
140
+ def step(self, closure=None, grads=None):
141
+ """Performs a single optimization step.
142
+ Arguments:
143
+ closure (callable, optional): A closure that reevaluates the model
144
+ and returns the loss.
145
+ grads (list of tensors, optional): weight gradient to use for the
146
+ optimizer update. If gradients have type torch.half, parameters
147
+ are expected to be in type torch.float. (default: None)
148
+ """
149
+ loss = None
150
+ if closure is not None:
151
+ loss = closure()
152
+
153
+ if grads is None:
154
+ grads_group = [None] * len(self.param_groups)
155
+ # backward compatibility
156
+ # assuming a list/generator of parameter means single group
157
+ elif isinstance(grads, types.GeneratorType):
158
+ grads_group = [grads]
159
+ elif type(grads[0]) != list:
160
+ grads_group = [grads]
161
+ else:
162
+ grads_group = grads
163
+
164
+ # remove the previous stats
165
+ del self.lamb_coeffs[:]
166
+
167
+ if self.lamb_freeze_key:
168
+ exp_avg_last_step = []
169
+ for group in self.param_groups:
170
+ exp_avg_last_step.append([self.state[p]['exp_avg'].detach().clone() for p in group['params']])
171
+ if 'scaling_coeff' not in self.state[self.param_groups[0]['params'][0]]:
172
+ # Compute the scaling_coeff for each momentum at the end of warmup stage.
173
+ # This is used to reduce compression error during compression stage.
174
+ momentum_scales = []
175
+ for group in self.param_groups:
176
+ momentum_scales.append([(torch.linalg.norm(self.state[p]['exp_avg']) /
177
+ np.sqrt(torch.numel(self.state[p]['exp_avg']))).item()
178
+ for p in group['params']])
179
+ united_scale = sum([sum(x) for x in momentum_scales]) / sum([len(x) for x in momentum_scales])
180
+ for i, group in enumerate(self.param_groups):
181
+ for j, p in enumerate(group['params']):
182
+ self.state[p]['scaling_coeff'] = united_scale / momentum_scales[i][j]
183
+
184
+ for group, grads_this_group in zip(self.param_groups, grads_group):
185
+ if grads_this_group is None:
186
+ grads_this_group = [None] * len(group['params'])
187
+
188
+ bias_correction = 1 if group['bias_correction'] else 0
189
+
190
+ for p, grad in zip(group['params'], grads_this_group):
191
+ if p.grad is None and grad is None:
192
+ continue
193
+ if grad is None:
194
+ grad = p.grad.data
195
+ if grad.is_sparse:
196
+ raise RuntimeError('1-bit Lamb does not support sparse gradients')
197
+
198
+ state = self.state[p]
199
+
200
+ # State initialization
201
+ if len(state) == 0 or (len(state) == 1 and 'scaling_coeff' in state.keys()):
202
+ state['step'] = 0
203
+ state['lamb_coeff_freeze'] = 0.0
204
+ state['last_factor'] = 1.0
205
+ # Exponential moving average of gradient values
206
+ state['exp_avg'] = torch.zeros_like(p.data)
207
+ # Exponential moving average of squared gradient values
208
+ state['exp_avg_sq'] = torch.zeros_like(p.data)
209
+ state['exp_avg_sq_fresh'] = torch.zeros_like(p.data)
210
+
211
+ if not self.initialize:
212
+ self.lamb_freeze_key = True
213
+
214
+ exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state[
215
+ 'exp_avg_sq_fresh']
216
+ beta1, beta2 = group['betas']
217
+ max_coeff = group['max_coeff']
218
+ min_coeff = group['min_coeff']
219
+
220
+ state['step'] += 1
221
+
222
+ if self.lamb_freeze_key is False:
223
+ # warmup stage, baseline Lamb optimization
224
+ exp_avg.mul_(beta1).add_(1 - beta1, grad)
225
+ exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
226
+ if state['step'] == self.freeze_step:
227
+ exp_avg_sq_fresh.data = exp_avg_sq.detach().clone()
228
+ grad = None
229
+ if self.initialize:
230
+ weight_norm = p.data.pow(2).sum().sqrt()
231
+ update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
232
+ if group['weight_decay'] > 0.0:
233
+ update += group['weight_decay'] * p.data
234
+ update_norm = update.pow(2).sum().sqrt()
235
+ lamb_coeff = 1.0
236
+ if weight_norm != 0 and update_norm != 0:
237
+ lamb_coeff = (weight_norm / update_norm).item()
238
+ if lamb_coeff > max_coeff:
239
+ lamb_coeff = max_coeff
240
+ if lamb_coeff < min_coeff:
241
+ lamb_coeff = min_coeff
242
+ if lamb_coeff != 1.0:
243
+ state['lamb_coeff_freeze'] = self.coeff_beta * state['lamb_coeff_freeze'] + (
244
+ 1 - self.coeff_beta) * lamb_coeff
245
+ self.lamb_coeffs.append(lamb_coeff)
246
+ with torch.no_grad():
247
+ p.add_(-group['lr'] * lamb_coeff * update)
248
+ else:
249
+ # compression stage, update each momentum locally, then
250
+ # communicate based on the compressed_allreduce below
251
+ if self.initialize:
252
+ exp_avg.mul_(beta1).add_(1 - beta1, grad)
253
+ exp_avg.mul_(self.state[p]['scaling_coeff'])
254
+ grad = None
255
+
256
+ # init fused momentum
257
+ if len(self.exp_avg_flat) == 0:
258
+ momentum_groups = []
259
+ tensor_size = 0
260
+ for group in self.param_groups:
261
+ for p in group['params']:
262
+ momentum_groups.append(self.state[p]['exp_avg'])
263
+ tensor_size += torch.numel(p.data)
264
+ corrected_tensor_size = tensor_size
265
+ if tensor_size % (self.size * self.divider) != 0:
266
+ difference = ((self.size * self.divider) - (tensor_size % (self.size * self.divider)))
267
+ corrected_tensor_size += difference
268
+ self.dummy_exp_avg[0] = torch.zeros(difference, device=momentum_groups[0].data.device)
269
+ momentum_groups.append(self.dummy_exp_avg[0])
270
+ self.corrected_tensor_sizes.append(corrected_tensor_size)
271
+ self.server_chunk_sizes.append(corrected_tensor_size // self.size)
272
+
273
+ self.exp_avg_flat.append(_flatten_dense_tensors([p.detach().clone() for p in momentum_groups]))
274
+ updated_params = _unflatten_dense_tensors(self.exp_avg_flat[0], momentum_groups)
275
+ for p, q in zip(momentum_groups, updated_params):
276
+ p.data = q.data
277
+
278
+ if self.initialize and len(self.worker_errors) == 0:
279
+ get_accelerator().empty_cache()
280
+ for i in range(len(self.exp_avg_flat)):
281
+ self.worker_errors.append(
282
+ torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device))
283
+ self.server_errors.append(torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device))
284
+ get_accelerator().empty_cache()
285
+
286
+ if self.lamb_freeze_key:
287
+ if self.size > 1:
288
+ for i in range(len(self.exp_avg_flat)):
289
+ if not self.initialize:
290
+ get_accelerator().empty_cache()
291
+ self.worker_errors.append(
292
+ torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device))
293
+ self.server_errors.append(
294
+ torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device))
295
+ get_accelerator().empty_cache()
296
+ if dist.get_rank() == 0:
297
+ print("Cupy Buffers Initialized Successfully.")
298
+
299
+ self.comm_backend_handle.compressed_allreduce(self.exp_avg_flat[i], self.worker_errors[0],
300
+ self.server_errors[0], self.deepspeed.local_rank)
301
+
302
+ if dist.get_rank() == 0:
303
+ print('Pop out errors', flush=True)
304
+ del self.worker_errors[:]
305
+ del self.server_errors[:]
306
+ else:
307
+ self.comm_backend_handle.compressed_allreduce(self.exp_avg_flat[i], self.worker_errors[i],
308
+ self.server_errors[i], self.deepspeed.local_rank)
309
+
310
+ if self.lamb_freeze_key and self.initialize:
311
+ for i, group in enumerate(self.param_groups):
312
+ bias_correction = 1 if group['bias_correction'] else 0
313
+
314
+ for j, p in enumerate(group['params']):
315
+ state = self.state[p]
316
+ exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state[
317
+ 'exp_avg_sq_fresh']
318
+ beta1, beta2 = group['betas']
319
+ exp_avg.div_(self.state[p]['scaling_coeff'])
320
+ # Because 1-bit compression cannot represent exact zero, it is required to
321
+ # provide a momentum mask for those params that have constant exact zeros in their
322
+ # momentums, otherwise the compression error would keep accumulating.
323
+ # For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight
324
+ # always have exact zeros in its momentum for row 129 to 512, because it only
325
+ # learns up to seq length 128 while the model supports up to 512 seq length.
326
+ # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py about how
327
+ # to add this exp_avg_mask for BERT pre-training.)
328
+ if 'exp_avg_mask' in group:
329
+ if exp_avg.device != group['exp_avg_mask'].device:
330
+ group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device)
331
+ exp_avg.mul_(group['exp_avg_mask'])
332
+
333
+ grad_reconstruct = ((exp_avg - exp_avg_last_step[i][j] * beta1) / (1 - beta1))
334
+ exp_avg_sq_fresh.mul_(beta2).addcmul_(1 - beta2, grad_reconstruct, grad_reconstruct)
335
+ denom = exp_avg_sq.sqrt() + group['eps']
336
+ update_prelim = exp_avg / denom
337
+
338
+ if group['weight_decay'] > 0.0:
339
+ update = update_prelim + group['weight_decay'] * p.data
340
+ else:
341
+ update = update_prelim
342
+
343
+ lamb_coeff = 1.0
344
+ update_norm = update.pow(2).sum().sqrt()
345
+ denom_real = exp_avg_sq_fresh.sqrt() + group['eps']
346
+ factor = (denom / denom_real).max().item()
347
+ if group['weight_decay'] > 0.0:
348
+ update_ratio = min(1.0, (update_prelim.pow(2).sum().sqrt() / update_norm).item())
349
+ factor = factor * update_ratio + (1.0 - update_ratio)
350
+ if factor > self.factor_max:
351
+ factor = self.factor_max
352
+ if factor < self.factor_min:
353
+ factor = self.factor_min
354
+ if factor > state['last_factor'] * (1.0 + self.factor_threshold):
355
+ factor = state['last_factor'] * (1.0 + self.factor_threshold)
356
+ if factor < state['last_factor'] * (1.0 - self.factor_threshold):
357
+ factor = state['last_factor'] * (1.0 - self.factor_threshold)
358
+ state['last_factor'] = factor
359
+ lamb_coeff = state['lamb_coeff_freeze'] * factor
360
+ self.lamb_coeffs.append(lamb_coeff)
361
+ with torch.no_grad():
362
+ p.add_(-group['lr'] * lamb_coeff * update)
363
+ del exp_avg_last_step[:]
364
+ exp_avg_last_step = None
365
+
366
+ if not self.initialize:
367
+ self.lamb_freeze_key = False
368
+ self.initialize = True
369
+ print(f"Finished the initialization step at rank {dist.get_rank()}")
370
+ return loss
371
+
372
+ if self.lamb_freeze_key is False:
373
+ if state['step'] >= self.freeze_step:
374
+ print('OnebitLamb - starting compressed communication')
375
+ self.lamb_freeze_key = True
376
+ if self.using_pipeline:
377
+ self.deepspeed.pipeline_enable_backward_allreduce = False
378
+ else:
379
+ self.deepspeed.enable_backward_allreduce = False
380
+
381
+ return loss
382
+
383
+ def load_state_dict(self, state_dict):
384
+ """
385
+ Overrides load_state_dict() to add special handling when loading checkpoints
386
+ """
387
+ # Because at different stage exp_avg_mask may change (e.g.,
388
+ # BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
389
+ # in checkpoints but always use the one user provided in training script.
390
+ # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
391
+ # Thus here we keep the exp_avg_mask unchanged when loading checkpoint
392
+ for i, group in enumerate(self.param_groups):
393
+ if 'exp_avg_mask' in group:
394
+ state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
395
+ elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]:
396
+ state_dict['param_groups'][i].pop('exp_avg_mask')
397
+ super().load_state_dict(state_dict)
398
+ # need to reset the fused momentum since loading states will break the linking
399
+ del self.exp_avg_flat[:]
400
+ self.dummy_exp_avg.clear()
401
+ del self.corrected_tensor_sizes[:]
402
+ del self.server_chunk_sizes[:]
403
+ if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step:
404
+ if dist.get_rank() == 0:
405
+ print("Checkpoint loaded and OnebitLamb warmup stage starts/continues.")
406
+ if self.lamb_freeze_key is True:
407
+ self.lamb_freeze_key = False
408
+ if self.using_pipeline:
409
+ self.deepspeed.pipeline_enable_backward_allreduce = True
410
+ else:
411
+ self.deepspeed.enable_backward_allreduce = True
412
+ for group in self.param_groups:
413
+ for p in group['params']:
414
+ self.state[p]['lamb_coeff_freeze'] = 0.0
415
+ self.state[p]['last_factor'] = 1.0
416
+ if 'scaling_coeff' in self.state[p]:
417
+ self.state[p].pop('scaling_coeff')
418
+ else:
419
+ if dist.get_rank() == 0:
420
+ print("Checkpoint loaded and OnebitLamb compression stage starts/continues.")
421
+ if self.lamb_freeze_key is False:
422
+ self.lamb_freeze_key = True
423
+ if self.using_pipeline:
424
+ self.deepspeed.pipeline_enable_backward_allreduce = False
425
+ else:
426
+ self.deepspeed.enable_backward_allreduce = False
427
+ # We reset the compression errors when loading checkpoints for 3 reasons:
428
+ # 1) The worker and server error at each GPU are distinct, so in current implementation
429
+ # only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors.
430
+ # If we want to save them correctly we need O(num_gpu*model_size) memory in order to
431
+ # gather all the error, which is a very large memory requirement. It's possible to save
432
+ # them in a distributed way, but it will make the checkpoint saving/loading much more complicated.
433
+ # 2) Even if we are able to save the compression errors correctly, you need to have the
434
+ # exact same number of GPUs in order to load them correctly.
435
+ # 3) We verified on BERT pre-training that occasionally resetting the compression error
436
+ # at checkpoint loading does not affect the convergence.
437
+ # However, please avoid frequent checkpoint loading which could break the error
438
+ # compensation mechanism thus affect the convergence.
439
+ del self.worker_errors[:]
440
+ del self.server_errors[:]
441
+
442
+ def get_lamb_coeffs(self):
443
+ return self.lamb_coeffs
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/zoadam.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import types
7
+ import torch
8
+ import numpy as np
9
+ from deepspeed.accelerator import get_accelerator
10
+ from deepspeed.runtime.utils import required_torch_version
11
+ from deepspeed import comm as dist
12
+
13
+
14
+ class ZeroOneAdam(torch.optim.Optimizer):
15
+ """Implements the 0/1 Adam algorithm. Currently GPU-only.
16
+ For usage example please see https://www.deepspeed.ai/tutorials/zero-one-adam/
17
+ For technical details please read https://arxiv.org/abs/2202.06009
18
+ Arguments:
19
+ params (iterable): iterable of parameters to optimize or dicts defining
20
+ parameter groups.
21
+ lr (float, optional): learning rate. (default: 1e-3)
22
+ betas (Tuple[float, float], optional): coefficients used for computing
23
+ running averages of gradient and its square. (default: (0.9, 0.999))
24
+ eps (float, optional): term added to the denominator to improve
25
+ numerical stability. (default: 1e-8)
26
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
27
+ var_freeze_step (int, optional): The latest step to update the variance,
28
+ using the notation from https://arxiv.org/abs/2202.06009, it denotes the
29
+ max{i|i in T_v}. Note that this is different from the freeze step from the
30
+ 1-bit Adam. The var_freeze_step is usually the end of the learning rate warmup
31
+ and thus does not require tuning. (default: 100000)
32
+ var_update_scaler (int, optional): The interval to update the variance. Note that
33
+ the update policy for variance follows an exponential rule, where var_update_scaler
34
+ denotes the kappa in the 0/1 Adam paper. (default: 16)
35
+ local_step_scaler (int, optional): The interval to scale the local steps interval
36
+ according to the learning rate policy. (default: 32678)
37
+ local_step_clipper (int, optional): The largest interval for local steps with
38
+ learning rate policy. This corresponds to the variable H in the 0/1 Adam paper.
39
+ (default: 16)
40
+ amsgrad (boolean, optional): whether to use the AMSGrad variant of this
41
+ algorithm from the paper `On the Convergence of Adam and Beyond`_
42
+ (default: False) NOT SUPPORTED in 0/1 Adam!
43
+ eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
44
+ adds eps to the bias-corrected second moment estimate before
45
+ evaluating square root instead of adding it to the square root of
46
+ second moment estimate as in the original paper. (default: False)
47
+ cuda_aware (boolean, required): Set True if the underlying MPI implementation
48
+ supports CUDA-Aware communication. (default: False)
49
+ comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
50
+ .. _Adam\\: A Method for Stochastic Optimization:
51
+ https://arxiv.org/abs/1412.6980
52
+ .. _On the Convergence of Adam and Beyond:
53
+ https://openreview.net/forum?id=ryQu7f-RZ
54
+ """
55
+
56
+ def __init__(self,
57
+ params,
58
+ deepspeed=None,
59
+ lr=1e-3,
60
+ bias_correction=True,
61
+ betas=(0.9, 0.999),
62
+ eps=1e-8,
63
+ eps_inside_sqrt=False,
64
+ weight_decay=0.,
65
+ max_grad_norm=0.,
66
+ var_freeze_step=100000,
67
+ var_update_scaler=16,
68
+ local_step_scaler=32678,
69
+ local_step_clipper=16,
70
+ amsgrad=False,
71
+ cuda_aware=False,
72
+ comm_backend_name='nccl'):
73
+
74
+ if amsgrad:
75
+ raise RuntimeError('0/1 Adam does not support the AMSGrad variant.')
76
+
77
+ defaults = dict(lr=lr,
78
+ bias_correction=bias_correction,
79
+ betas=betas,
80
+ eps=eps,
81
+ weight_decay=weight_decay,
82
+ max_grad_norm=max_grad_norm)
83
+
84
+ super(ZeroOneAdam, self).__init__(params, defaults)
85
+ self.eps_mode = 0 if eps_inside_sqrt else 1
86
+ self.deepspeed = deepspeed
87
+ self.initialize = False
88
+ self.cuda_aware = cuda_aware
89
+ self.using_pipeline = False
90
+
91
+ self.var_freeze_step = var_freeze_step
92
+ self.var_update_scaler = var_update_scaler
93
+ self.local_step_scaler = local_step_scaler
94
+ self.local_step_clipper = local_step_clipper
95
+ self.freeze_key = False
96
+ self.reinitial_error_buffer = False
97
+
98
+ self.comm_backend_name = comm_backend_name
99
+
100
+ assert dist.is_initialized(), "Please initialize the torch distributed backend."
101
+ # Empty initializer. Set handle based on the comm backend as follows.
102
+ self.comm_backend_handle = None
103
+ if self.comm_backend_name == 'nccl':
104
+ assert (
105
+ required_torch_version(min_version=1.8)
106
+ ), "Please use torch 1.8 or greater to enable NCCL backend in 0/1 Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
107
+ from deepspeed.runtime.comm.nccl import NcclBackend
108
+ self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
109
+ self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
110
+ elif self.comm_backend_name == 'mpi':
111
+ from deepspeed.runtime.comm.mpi import MpiBackend
112
+ self.comm_backend_handle = MpiBackend(cuda_aware)
113
+ elif self.comm_backend_name == 'hccl':
114
+ from deepspeed.runtime.comm.hccl import HcclBackend
115
+ self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
116
+ self.comm_backend_handle = HcclBackend(self.deepspeed.mpu)
117
+ self.size = self.comm_backend_handle.size
118
+
119
+ self.divider = int(self.size * 8 / np.gcd(self.size, 8))
120
+
121
+ def step(self, closure=None, grads=None):
122
+ """Performs a single optimization step.
123
+ Arguments:
124
+ closure (callable, optional): A closure that reevaluates the model
125
+ and returns the loss.
126
+ grads (list of tensors, optional): weight gradient to use for the
127
+ optimizer update. If gradients have type torch.half, parameters
128
+ are expected to be in type torch.float. (default: None)
129
+ output params (list of tensors, optional): A reduced precision copy
130
+ of the updated weights written out in addition to the regular
131
+ updated weights. Have to be of same type as gradients. (default: None)
132
+ scale (float, optional): factor to divide gradient tensor values
133
+ by before applying to weights. (default: 1)
134
+ """
135
+ loss = None
136
+ if closure is not None:
137
+ loss = closure()
138
+
139
+ if grads is None:
140
+ grads_group = [None] * len(self.param_groups)
141
+ # backward compatibility
142
+ # assuming a list/generator of parameter means single group
143
+ elif isinstance(grads, types.GeneratorType):
144
+ grads_group = [grads]
145
+ elif type(grads[0]) != list:
146
+ grads_group = [grads]
147
+ else:
148
+ grads_group = grads
149
+
150
+ for group, grads_this_group in zip(self.param_groups, grads_group):
151
+ if grads_this_group is None:
152
+ grads_this_group = [None] * len(group['params'])
153
+
154
+ bias_correction = 1 if group['bias_correction'] else 0
155
+
156
+ for p, grad in zip(group['params'], grads_this_group):
157
+ if p.grad is None and grad is None:
158
+ continue
159
+ if grad is None:
160
+ grad = p.grad.data
161
+ if grad.is_sparse:
162
+ raise RuntimeError('0/1 Adam does not support sparse gradients')
163
+
164
+ state = self.state[p]
165
+
166
+ # State initialization
167
+ if len(state) == 0:
168
+ state['step'] = 0
169
+ # Exponential moving average of gradient values
170
+ state['exp_avg'] = torch.zeros_like(p.data)
171
+ # Exponential moving average of squared gradient values
172
+ state['exp_avg_sq'] = torch.zeros_like(p.data)
173
+
174
+ if not self.initialize or 'worker_error' not in state.keys():
175
+ # Some scalars to help scale the variance update/local step policies
176
+ state['var_interval'] = 1
177
+ state['var_counter'] = 0
178
+ state['local_step_interval'] = 1
179
+ state['local_step_counter'] = 0
180
+ state['lrs'] = 0
181
+ state['tensor_size'] = torch.numel(p.data)
182
+ state['corrected_tensor_size'] = state['tensor_size']
183
+
184
+ if state['tensor_size'] % (self.size * self.divider) != 0:
185
+ state['corrected_tensor_size'] += ((self.size * self.divider) - (state['tensor_size'] %
186
+ (self.size * self.divider)))
187
+ state['server_chunk_size'] = state['corrected_tensor_size'] // self.size
188
+ get_accelerator().empty_cache()
189
+ state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device)
190
+ state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device)
191
+ # Accumulation of momentum, i.e., the u variable in the 0/1 Adam paper
192
+ state['momentum_accumulator'] = torch.zeros_like(p.data)
193
+ get_accelerator().empty_cache()
194
+ # self.freeze_key = True
195
+ if not self.initialize and dist.get_rank() == 0:
196
+ print("Cupy Buffers Initialized Successfully.")
197
+
198
+ exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
199
+ comm_buffer = state['momentum_accumulator']
200
+ beta1, beta2 = group['betas']
201
+
202
+ state['step'] += 1
203
+
204
+ if self.initialize:
205
+ if self.freeze_key is False:
206
+ if state['step'] % state['var_interval'] == 0:
207
+ exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
208
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
209
+ else:
210
+ if self.size > 1:
211
+ with torch.no_grad():
212
+ grad_onebit = self.comm_backend_handle.compressed_allreduce(
213
+ grad, state['worker_error'], state['server_error'], self.deepspeed.local_rank)
214
+ if 'exp_avg_mask' in group:
215
+ if grad_onebit.device != group['exp_avg_mask'].device:
216
+ group['exp_avg_mask'] = group['exp_avg_mask'].to(device=grad_onebit.device)
217
+ grad_onebit.mul_(group['exp_avg_mask'])
218
+ exp_avg.mul_(beta1).add_(1 - beta1, grad_onebit)
219
+ else:
220
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
221
+ state['lrs'] += group['lr']
222
+ grad = None
223
+
224
+ if not self.initialize:
225
+ if self.size > 1:
226
+ comm_buffer.set_(
227
+ self.comm_backend_handle.compressed_allreduce(comm_buffer, state['worker_error'],
228
+ state['server_error'],
229
+ self.deepspeed.local_rank))
230
+ if 'exp_avg_mask' in group:
231
+ if comm_buffer.device != group['exp_avg_mask'].device:
232
+ group['exp_avg_mask'] = group['exp_avg_mask'].to(device=comm_buffer.device)
233
+ comm_buffer.mul_(group['exp_avg_mask'])
234
+
235
+ if self.initialize:
236
+ update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
237
+ if group['weight_decay'] > 0.0:
238
+ update += group['weight_decay'] * p.data
239
+ with torch.no_grad():
240
+ p.data.add_(-group['lr'] * update)
241
+ if self.freeze_key is True:
242
+ comm_buffer.add_(-group['lr'] * update)
243
+ if state['step'] % state['local_step_interval'] == 0 and self.freeze_key:
244
+ with torch.no_grad():
245
+ p.data.add_(-1 * comm_buffer)
246
+ comm_buffer.mul_(exp_avg_sq.sqrt() + group['eps'])
247
+ if self.size > 1:
248
+ comm_buffer.copy_(
249
+ self.comm_backend_handle.compressed_allreduce(comm_buffer, state['worker_error'],
250
+ state['server_error'],
251
+ self.deepspeed.local_rank))
252
+ if 'exp_avg_mask' in group:
253
+ if comm_buffer.device != group['exp_avg_mask'].device:
254
+ group['exp_avg_mask'] = group['exp_avg_mask'].to(device=comm_buffer.device)
255
+ comm_buffer.mul_(group['exp_avg_mask'])
256
+ exp_avg.zero_().add_(comm_buffer / state['lrs'], alpha=-1)
257
+ p.data.add_(comm_buffer / (exp_avg_sq.sqrt() + group['eps']))
258
+ comm_buffer.zero_()
259
+
260
+ state['lrs'] = 0
261
+
262
+ # According to 0/1 Adam theory, a fixed variance would allow more accurate estimation of momentum
263
+ # However, in practice, we can also disable the manual freezing of variance, since the interval of
264
+ # updating variance will increase exponentially, so that it has negligible effect on the estimation.
265
+ if self.freeze_key is False:
266
+ if state['step'] % state['var_interval'] == 0:
267
+ state['var_counter'] += 1
268
+ if state['var_counter'] == self.var_update_scaler:
269
+ state['var_counter'] = 0
270
+ state['var_interval'] *= 2
271
+ if (state['step'] + 1) % state['var_interval'] == 0:
272
+ if self.using_pipeline:
273
+ self.deepspeed.pipeline_enable_backward_allreduce = True
274
+ else:
275
+ self.deepspeed.enable_backward_allreduce = True
276
+ else:
277
+ if self.using_pipeline:
278
+ self.deepspeed.pipeline_enable_backward_allreduce = False
279
+ else:
280
+ self.deepspeed.enable_backward_allreduce = False
281
+ else:
282
+ state['local_step_counter'] += 1
283
+ if state['local_step_counter'] == self.local_step_scaler:
284
+ state['local_step_counter'] = 0
285
+ state['local_step_interval'] = min(self.local_step_clipper,
286
+ state['local_step_interval'] * 2)
287
+
288
+ if not self.initialize:
289
+ print('Pop out errors', flush=True)
290
+ self.freeze_key = False
291
+ state.pop('worker_error')
292
+ state.pop('server_error')
293
+
294
+ if not self.initialize:
295
+ self.initialize = True
296
+ print(f"Finished the initialization step at rank {dist.get_rank()}")
297
+ return loss
298
+
299
+ if self.state[self.param_groups[0]['params'][0]]['step'] > self.var_freeze_step:
300
+ self.freeze_key = True
301
+ if self.using_pipeline:
302
+ self.deepspeed.pipeline_enable_backward_allreduce = False
303
+ else:
304
+ self.deepspeed.enable_backward_allreduce = False
305
+
306
+ if self.freeze_key is True and self.reinitial_error_buffer is False:
307
+ # We need to reinitialize the error buffers when local step > 1 since
308
+ # the errors will be logged for different metrics (gradient vs. accumulated momentum).
309
+ for group in self.param_groups:
310
+ for p in group['params']:
311
+ self.state[p]['worker_error'].zero_()
312
+ self.state[p]['server_error'].zero_()
313
+ self.reinitial_error_buffer = True
314
+
315
+ return loss
316
+
317
+ def load_state_dict(self, state_dict):
318
+ """
319
+ Overrides load_state_dict() to add special handling when loading checkpoints
320
+ """
321
+ # Because at different stage exp_avg_mask may change (e.g.,
322
+ # BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
323
+ # in checkpoints but always use the one user provided in training script.
324
+ # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
325
+ # Thus here we keep the exp_avg_mask unchanged when loading checkpoint
326
+ for i, group in enumerate(self.param_groups):
327
+ if 'exp_avg_mask' in group:
328
+ state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
329
+ elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]:
330
+ state_dict['param_groups'][i].pop('exp_avg_mask')
331
+ super().load_state_dict(state_dict)
332
+ if self.state[self.param_groups[0]['params'][0]]['step'] < self.var_freeze_step:
333
+ self.var_freeze_key = False
334
+ if (self.state[self.param_groups[0]['params'][0]]['step'] +
335
+ 1) % self.state[self.param_groups[0]['params'][0]]['var_interval'] == 0:
336
+ if self.using_pipeline:
337
+ self.deepspeed.pipeline_enable_backward_allreduce = True
338
+ else:
339
+ self.deepspeed.enable_backward_allreduce = True
340
+ else:
341
+ if self.using_pipeline:
342
+ self.deepspeed.pipeline_enable_backward_allreduce = False
343
+ else:
344
+ self.deepspeed.enable_backward_allreduce = False
345
+ else:
346
+ self.var_freeze_key = True
347
+ if self.using_pipeline:
348
+ self.deepspeed.pipeline_enable_backward_allreduce = False
349
+ else:
350
+ self.deepspeed.enable_backward_allreduce = False
351
+ self.reinitial_error_buffer = False
352
+ for group in self.param_groups:
353
+ for p in group['params']:
354
+ if 'worker_error' in self.state[p]:
355
+ self.state[p].pop('worker_error')
356
+ if 'server_error' in self.state[p]:
357
+ self.state[p].pop('server_error')
358
+ if 'momentum_accumulator' in self.state[p]:
359
+ self.state[p].pop('momentum_accumulator')
evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/fp16/unfused_optimizer.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Copyright NVIDIA/apex
7
+ This file is adapted from FP16_Optimizer in NVIDIA/apex
8
+ """
9
+
10
+ from deepspeed.moe.utils import split_params_grads_into_shared_and_expert_params
11
+ import torch
12
+ from torch._utils import _flatten_dense_tensors
13
+
14
+ from deepspeed.runtime import DeepSpeedOptimizer
15
+ from deepspeed.runtime.utils import get_global_norm, CheckOverflow, get_weight_norm, required_torch_version
16
+ from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE
17
+ from deepspeed.utils import logger
18
+ from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT
19
+ from deepspeed.accelerator import get_accelerator
20
+ from deepspeed import comm as dist
21
+
22
+
23
+ class FP16_UnfusedOptimizer(DeepSpeedOptimizer):
24
+ """
25
+ FP16 Optimizer without weight fusion to support LAMB optimizer
26
+
27
+ For usage example please see, TODO: DeepSpeed V2 Tutorial
28
+ """
29
+
30
+ def __init__(self,
31
+ init_optimizer,
32
+ deepspeed=None,
33
+ static_loss_scale=1.0,
34
+ dynamic_loss_scale=False,
35
+ dynamic_loss_args=None,
36
+ verbose=True,
37
+ mpu=None,
38
+ clip_grad=0.0,
39
+ fused_lamb_legacy=False):
40
+
41
+ self.fused_lamb_legacy = fused_lamb_legacy
42
+ self._global_grad_norm = 0.
43
+
44
+ if dist.get_rank() == 0:
45
+ logger.info(f'Fused Lamb Legacy : {self.fused_lamb_legacy} ')
46
+
47
+ if not get_accelerator().is_available():
48
+ raise SystemError("Cannot use fp16 without accelerator.")
49
+ self.optimizer = init_optimizer
50
+
51
+ # param groups
52
+ self.fp16_groups = []
53
+ self.fp32_groups = []
54
+
55
+ # loop to deal with groups
56
+ for i, param_group in enumerate(self.optimizer.param_groups):
57
+ #fp16 weights that represents the actual model weights
58
+ self.fp16_groups.append(param_group['params'])
59
+
60
+ #creating a fp32 copy of the weights that will be updated first then
61
+ #copied to fp16 weights
62
+ fp32_group = [p.clone().float().detach() for p in param_group['params']]
63
+
64
+ #in case the internal optimizer needs it
65
+ for p in fp32_group:
66
+ p.requires_grad = True
67
+
68
+ #setting the param groups in the optimizer to point to fp32
69
+ #note these are not the weights used by the model
70
+ #the model uses the fp16 version that we added to fp16_group
71
+ self.fp32_groups.append(fp32_group)
72
+ param_group['params'] = self.fp32_groups[i]
73
+
74
+ # we may have a way of fusing dynamic scale. Do not support for now
75
+ if dynamic_loss_scale:
76
+ self.dynamic_loss_scale = True
77
+ self.cur_iter = 0
78
+ self.last_overflow_iter = -1
79
+ self.scale_factor = 2.0
80
+ if dynamic_loss_args is None:
81
+ self.cur_scale = 1.0 * 2**16
82
+ self.scale_window = 1000
83
+ self.min_loss_scale = 0.25
84
+ else:
85
+ self.cur_scale = dynamic_loss_args[INITIAL_LOSS_SCALE]
86
+ self.scale_window = dynamic_loss_args[SCALE_WINDOW]
87
+ self.min_loss_scale = dynamic_loss_args[MIN_LOSS_SCALE]
88
+ else:
89
+ self.dynamic_loss_scale = False
90
+ self.cur_iter = 0
91
+ self.cur_scale = static_loss_scale
92
+
93
+ self.custom_loss_scaler = False
94
+ self.external_loss_scale = None
95
+
96
+ self.verbose = verbose
97
+
98
+ self.clip_grad = clip_grad
99
+ self.norm_type = 2
100
+
101
+ if required_torch_version(max_version=0.4):
102
+ self.clip_grad_norm = torch.nn.utils.clip_grad_norm
103
+ else:
104
+ self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
105
+
106
+ self.mpu = mpu
107
+
108
+ self.overflow = False
109
+ self.overflow_checker = CheckOverflow(self.fp16_groups, mpu=self.mpu, deepspeed=deepspeed)
110
+
111
+ self.initialize_optimizer_states()
112
+
113
+ def zero_grad(self, set_to_none=True):
114
+ """
115
+ Zero FP16 parameter grads.
116
+ """
117
+ # FP32 grad should never exist outside of the step function
118
+ # For speed, set model fp16 grad to None by default
119
+ for group in self.fp16_groups:
120
+ for p in group:
121
+ if set_to_none:
122
+ p.grad = None
123
+ else:
124
+ if p.grad is not None:
125
+ p.grad.detach_()
126
+ p.grad.zero_()
127
+
128
+ def step_fused_lamb(self, closure=None):
129
+ """
130
+ Not supporting closure.
131
+ """
132
+ # First compute norm for all group so we know if there is overflow
133
+ grads_groups_flat = []
134
+ grads_groups = []
135
+ norm_groups = []
136
+ expert_norm_groups = []
137
+ for i, group in enumerate(self.fp16_groups):
138
+ grads = [
139
+ torch.zeros(p.size(), dtype=p.dtype, device=p.device) if p.grad is None else p.grad for p in group
140
+ ]
141
+ grads_groups.append(grads)
142
+ grads_groups_flat.append(_flatten_dense_tensors(grads))
143
+ grads_for_norm, expert_grads_for_norm = split_params_grads_into_shared_and_expert_params(group)
144
+ norm_group_value = 0.0
145
+ if len(grads_for_norm) > 0:
146
+ norm_group_value = get_weight_norm(_flatten_dense_tensors(grads_for_norm), mpu=self.mpu)
147
+ norm_groups.append(norm_group_value)
148
+ expert_norm_group_value = 0.0
149
+ if len(expert_grads_for_norm) > 0:
150
+ expert_norm_group_value = get_weight_norm(_flatten_dense_tensors(expert_grads_for_norm), mpu=self.mpu)
151
+ expert_norm_groups.append(expert_norm_group_value)
152
+
153
+ self.overflow = self.overflow_checker.check_using_norm(norm_groups + expert_norm_groups)
154
+ prev_scale = self.cur_scale
155
+
156
+ self._update_scale(self.overflow)
157
+ if self.overflow:
158
+ if self.verbose:
159
+ logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss "
160
+ "scale: {}, reducing to {}".format(prev_scale, self.cur_scale))
161
+ return self.overflow
162
+
163
+ self._global_grad_norm = get_global_norm(norm_list=norm_groups)
164
+ combined_scale = self.unscale_and_clip_grads(self._global_grad_norm, apply_scale=False)
165
+ self.optimizer.step(grads=grads_groups, output_params=self.fp16_groups, scale=combined_scale)
166
+
167
+ for fp32_group, fp16_group in zip(self.fp32_groups, self.fp16_groups):
168
+ for idx, (fp32_param, fp16_param) in enumerate(zip(fp32_group, fp16_group)):
169
+
170
+ #remove the fp32 grad
171
+ fp32_param.grad = None
172
+
173
+ #copy data from fp32 to fp16
174
+ fp16_param.data.copy_(fp32_param.data)
175
+
176
+ return self.overflow
177
+
178
+ def set_lr(self, lr):
179
+ """Set the learning rate."""
180
+ for param_group in self.optimizer.param_groups:
181
+ param_group["lr"] = lr
182
+
183
+ def get_lr(self):
184
+ """Return the current learning rate."""
185
+ return self.optimizer.param_groups[0]["lr"]
186
+
187
+ def override_loss_scale(self, loss_scale):
188
+ if loss_scale != self.external_loss_scale:
189
+ logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}')
190
+ self.custom_loss_scaler = True
191
+ self.external_loss_scale = loss_scale
192
+
193
+ def step(self, closure=None):
194
+ """
195
+ Not supporting closure.
196
+ """
197
+
198
+ if self.fused_lamb_legacy:
199
+ return self.step_fused_lamb()
200
+
201
+ self.overflow = self.overflow_checker.check()
202
+ prev_scale = self.cur_scale
203
+
204
+ self._update_scale(self.overflow)
205
+ if self.overflow:
206
+ if self.verbose:
207
+ logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss "
208
+ "scale: {}, reducing to {}".format(prev_scale, self.cur_scale))
209
+ return self.overflow
210
+
211
+ norm_groups = []
212
+ for i, group in enumerate(self.fp16_groups):
213
+ grads_for_norm, _ = split_params_grads_into_shared_and_expert_params(group)
214
+ norm_group_value = 0.0
215
+ if len(grads_for_norm) > 0:
216
+ norm_group_value = get_weight_norm(grads_for_norm, mpu=self.mpu)
217
+ norm_groups.append(norm_group_value)
218
+
219
+ # copying gradients to fp32 to work with fp32 parameters
220
+ for fp32_param, fp16_param in zip(self.fp32_groups[i], self.fp16_groups[i]):
221
+ if fp16_param.grad is None:
222
+ fp32_param.grad = torch.zeros(fp16_param.size(), dtype=fp32_param.dtype, device=fp32_param.device)
223
+ else:
224
+ fp32_param.grad = fp16_param.grad.to(fp32_param.dtype)
225
+
226
+ self._global_grad_norm = get_global_norm(norm_list=norm_groups)
227
+ self.unscale_and_clip_grads(self._global_grad_norm)
228
+
229
+ self.optimizer.step()
230
+
231
+ for fp32_group, fp16_group in zip(self.fp32_groups, self.fp16_groups):
232
+ for idx, (fp32_param, fp16_param) in enumerate(zip(fp32_group, fp16_group)):
233
+
234
+ #remove the fp32 grad
235
+ fp32_param.grad = None
236
+
237
+ #copy data from fp32 to fp16
238
+ fp16_param.data.copy_(fp32_param.data)
239
+
240
+ return self.overflow
241
+
242
+ def unscale_and_clip_grads(self, total_norm, apply_scale=True):
243
+ # compute combined scale factor for this group
244
+ combined_scale = self.cur_scale
245
+ if self.clip_grad > 0.:
246
+ # norm is in fact norm*scale
247
+ clip = ((total_norm / self.cur_scale) + 1e-6) / self.clip_grad
248
+ if clip > 1:
249
+ combined_scale = clip * self.cur_scale
250
+
251
+ if apply_scale:
252
+ for group in self.fp32_groups:
253
+ for param in group:
254
+ if param.grad is not None:
255
+ param.grad.data.mul_(1. / combined_scale)
256
+
257
+ return combined_scale
258
+
259
+ def backward(self, loss, create_graph=False, retain_graph=False):
260
+ """
261
+ :attr:`backward` performs the following steps:
262
+
263
+ 1. fp32_loss = loss.float()
264
+ 2. scaled_loss = fp32_loss*loss_scale
265
+ 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
266
+ """
267
+ if self.custom_loss_scaler:
268
+ scaled_loss = self.external_loss_scale * loss
269
+ scaled_loss.backward()
270
+ else:
271
+ scaled_loss = (loss.float()) * self.cur_scale
272
+ scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph)
273
+
274
+ def _update_scale(self, skip):
275
+ if self.dynamic_loss_scale:
276
+ prev_scale = self.cur_scale
277
+ if skip:
278
+ self.cur_scale = max(self.cur_scale / self.scale_factor, self.min_loss_scale)
279
+ self.last_overflow_iter = self.cur_iter
280
+ if self.verbose:
281
+ logger.info("Grad overflow on iteration: %s", self.cur_iter)
282
+ logger.info(f"Reducing dynamic loss scale from {prev_scale} to {self.cur_scale}")
283
+ else:
284
+ # Ensure self.scale_window updates since last overflow
285
+ stable_interval = (self.cur_iter - self.last_overflow_iter) - 1
286
+ if (stable_interval > 0) and (stable_interval % self.scale_window == 0):
287
+ self.cur_scale *= self.scale_factor
288
+ if self.verbose:
289
+ logger.info(f"No Grad overflow for {self.scale_window} iterations")
290
+ logger.info(f"Increasing dynamic loss scale from {prev_scale} to {self.cur_scale}")
291
+ else:
292
+ if skip:
293
+ logger.info("Grad overflow on iteration %s", self.cur_iter)
294
+ logger.info("Using static loss scale of %s", self.cur_scale)
295
+ self.cur_iter += 1
296
+ return
297
+
298
+ # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
299
+ def _get_state(self):
300
+ return self.optimizer.state
301
+
302
+ def _set_state(self, value):
303
+ self.optimizer.state = value
304
+
305
+ state = property(_get_state, _set_state)
306
+
307
+ # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
308
+ # (for example, to adjust the learning rate)
309
+ def _get_param_groups(self):
310
+ return self.optimizer.param_groups
311
+
312
+ def _set_param_groups(self, value):
313
+ self.optimizer.param_groups = value
314
+
315
+ param_groups = property(_get_param_groups, _set_param_groups)
316
+
317
+ # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
318
+ def _get_loss_scale(self):
319
+ if self.custom_loss_scaler:
320
+ return self.external_loss_scale
321
+ else:
322
+ return self.cur_scale
323
+
324
+ def _set_loss_scale(self, value):
325
+ self.loss_scaler.cur_scale = value
326
+
327
+ loss_scale = property(_get_loss_scale, _set_loss_scale)
328
+
329
+ def state_dict(self):
330
+ """
331
+ Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
332
+ This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
333
+ of the contained Pytorch optimizer.
334
+ Example::
335
+ checkpoint = {}
336
+ checkpoint['model'] = model.state_dict()
337
+ checkpoint['optimizer'] = optimizer.state_dict()
338
+ torch.save(checkpoint, "saved.pth")
339
+ """
340
+ state_dict = {}
341
+ state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
342
+ state_dict['cur_scale'] = self.cur_scale
343
+ state_dict['cur_iter'] = self.cur_iter
344
+ if state_dict['dynamic_loss_scale']:
345
+ state_dict['last_overflow_iter'] = self.last_overflow_iter
346
+ state_dict['scale_factor'] = self.scale_factor
347
+ state_dict['scale_window'] = self.scale_window
348
+ state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()
349
+ state_dict['fp32_groups'] = self.fp32_groups
350
+ return state_dict
351
+
352
+ # Refresh fp32 master params from fp16 copies
353
+ def refresh_fp32_params(self):
354
+ for current_group, saved_group in zip(self.fp32_groups, self.fp16_groups):
355
+ for current, saved in zip(current_group, saved_group):
356
+ current.data.copy_(saved.data)
357
+
358
+ def load_state_dict(self, state_dict, load_optimizer_states=True):
359
+ """
360
+ Loads a state_dict created by an earlier call to state_dict().
361
+ If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
362
+ whose parameters in turn came from ``model``, it is expected that the user
363
+ will call ``model.load_state_dict()`` before
364
+ ``fp16_optimizer_instance.load_state_dict()`` is called.
365
+ Example::
366
+ model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half()
367
+ optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
368
+ optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
369
+ ...
370
+ checkpoint = torch.load("saved.pth")
371
+ model.load_state_dict(checkpoint['model'])
372
+ optimizer.load_state_dict(checkpoint['optimizer'])
373
+ """
374
+ # I think it should actually be ok to reload the optimizer before the model.
375
+ self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
376
+ self.cur_scale = state_dict['cur_scale']
377
+ self.cur_iter = state_dict['cur_iter']
378
+ if state_dict['dynamic_loss_scale']:
379
+ self.last_overflow_iter = state_dict['last_overflow_iter']
380
+ self.scale_factor = state_dict['scale_factor']
381
+ self.scale_window = state_dict['scale_window']
382
+
383
+ if load_optimizer_states:
384
+ self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT])
385
+ # At this point, the optimizer's references to the model's fp32 parameters are up to date.
386
+ # The optimizer's hyperparameters and internal buffers are also up to date.
387
+ # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
388
+ # out of date. There are two options.
389
+ # 1: Refresh the master params from the model's fp16 params.
390
+ # This requires less storage but incurs precision loss.
391
+ # 2: Save and restore the fp32 master copies separately.
392
+ # We choose option 2.
393
+ #
394
+ # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
395
+ # of their associated parameters, because it's possible those buffers might not exist yet in
396
+ # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
397
+ # constructed in the same way as the one whose state_dict we are loading, the same master params
398
+ # are guaranteed to exist, so we can just copy_() from the saved master params.
399
+ for current_group, saved_group in zip(self.fp32_groups, state_dict['fp32_groups']):
400
+ for current, saved in zip(current_group, saved_group):
401
+ current.data.copy_(saved.data)
402
+
403
+ def __repr__(self):
404
+ return repr(self.optimizer)
405
+
406
+ def initialize_optimizer_states(self):
407
+ for i, group in enumerate(self.fp16_groups):
408
+ for param in group:
409
+ param.grad = torch.zeros(param.size(),
410
+ dtype=param.dtype,
411
+ device=get_accelerator().current_device_name())
412
+
413
+ for i, group in enumerate(self.fp32_groups):
414
+ for param in group:
415
+ param.grad = torch.zeros(param.size(),
416
+ dtype=param.dtype,
417
+ device=get_accelerator().current_device_name())
418
+
419
+ self.optimizer.step()
420
+
421
+ for i, group in enumerate(self.fp16_groups):
422
+ for param in group:
423
+ param.grad = None
424
+
425
+ for i, group in enumerate(self.fp32_groups):
426
+ for param in group:
427
+ param.grad = None
evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # flake8: noqa
2
+ from .store import Store
3
+ from .errors import StoreError, CredentialsNotFound
4
+ from .constants import *
evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (307 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__pycache__/constants.cpython-310.pyc ADDED
Binary file (329 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__pycache__/errors.cpython-310.pyc ADDED
Binary file (960 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__pycache__/store.cpython-310.pyc ADDED
Binary file (3.21 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/__pycache__/version.cpython-310.pyc ADDED
Binary file (380 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/constants.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ PROGRAM_PREFIX = 'docker-credential-'
2
+ DEFAULT_LINUX_STORE = 'secretservice'
3
+ DEFAULT_OSX_STORE = 'osxkeychain'
4
+ DEFAULT_WIN32_STORE = 'wincred'
evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/errors.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class StoreError(RuntimeError):
2
+ pass
3
+
4
+
5
+ class CredentialsNotFound(StoreError):
6
+ pass
7
+
8
+
9
+ class InitializationError(StoreError):
10
+ pass
11
+
12
+
13
+ def process_store_error(cpe, program):
14
+ message = cpe.output.decode('utf-8')
15
+ if 'credentials not found in native keychain' in message:
16
+ return CredentialsNotFound(
17
+ 'No matching credentials in {0}'.format(
18
+ program
19
+ )
20
+ )
21
+ return StoreError(
22
+ 'Credentials store {0} exited with "{1}".'.format(
23
+ program, cpe.output.decode('utf-8').strip()
24
+ )
25
+ )
evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/store.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import subprocess
4
+
5
+ import six
6
+
7
+ from . import constants
8
+ from . import errors
9
+ from .utils import create_environment_dict
10
+ from .utils import find_executable
11
+
12
+
13
+ class Store(object):
14
+ def __init__(self, program, environment=None):
15
+ """ Create a store object that acts as an interface to
16
+ perform the basic operations for storing, retrieving
17
+ and erasing credentials using `program`.
18
+ """
19
+ self.program = constants.PROGRAM_PREFIX + program
20
+ self.exe = find_executable(self.program)
21
+ self.environment = environment
22
+ if self.exe is None:
23
+ raise errors.InitializationError(
24
+ '{0} not installed or not available in PATH'.format(
25
+ self.program
26
+ )
27
+ )
28
+
29
+ def get(self, server):
30
+ """ Retrieve credentials for `server`. If no credentials are found,
31
+ a `StoreError` will be raised.
32
+ """
33
+ if not isinstance(server, six.binary_type):
34
+ server = server.encode('utf-8')
35
+ data = self._execute('get', server)
36
+ result = json.loads(data.decode('utf-8'))
37
+
38
+ # docker-credential-pass will return an object for inexistent servers
39
+ # whereas other helpers will exit with returncode != 0. For
40
+ # consistency, if no significant data is returned,
41
+ # raise CredentialsNotFound
42
+ if result['Username'] == '' and result['Secret'] == '':
43
+ raise errors.CredentialsNotFound(
44
+ 'No matching credentials in {}'.format(self.program)
45
+ )
46
+
47
+ return result
48
+
49
+ def store(self, server, username, secret):
50
+ """ Store credentials for `server`. Raises a `StoreError` if an error
51
+ occurs.
52
+ """
53
+ data_input = json.dumps({
54
+ 'ServerURL': server,
55
+ 'Username': username,
56
+ 'Secret': secret
57
+ }).encode('utf-8')
58
+ return self._execute('store', data_input)
59
+
60
+ def erase(self, server):
61
+ """ Erase credentials for `server`. Raises a `StoreError` if an error
62
+ occurs.
63
+ """
64
+ if not isinstance(server, six.binary_type):
65
+ server = server.encode('utf-8')
66
+ self._execute('erase', server)
67
+
68
+ def list(self):
69
+ """ List stored credentials. Requires v0.4.0+ of the helper.
70
+ """
71
+ data = self._execute('list', None)
72
+ return json.loads(data.decode('utf-8'))
73
+
74
+ def _execute(self, subcmd, data_input):
75
+ output = None
76
+ env = create_environment_dict(self.environment)
77
+ try:
78
+ if six.PY3:
79
+ output = subprocess.check_output(
80
+ [self.exe, subcmd], input=data_input, env=env,
81
+ )
82
+ else:
83
+ process = subprocess.Popen(
84
+ [self.exe, subcmd], stdin=subprocess.PIPE,
85
+ stdout=subprocess.PIPE, env=env,
86
+ )
87
+ output, err = process.communicate(data_input)
88
+ if process.returncode != 0:
89
+ raise subprocess.CalledProcessError(
90
+ returncode=process.returncode, cmd='', output=output
91
+ )
92
+ except subprocess.CalledProcessError as e:
93
+ raise errors.process_store_error(e, self.program)
94
+ except OSError as e:
95
+ if e.errno == os.errno.ENOENT:
96
+ raise errors.StoreError(
97
+ '{0} not installed or not available in PATH'.format(
98
+ self.program
99
+ )
100
+ )
101
+ else:
102
+ raise errors.StoreError(
103
+ 'Unexpected OS error "{0}", errno={1}'.format(
104
+ e.strerror, e.errno
105
+ )
106
+ )
107
+ return output
evalkit_cambrian/lib/python3.10/site-packages/dockerpycreds/utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import distutils.spawn
2
+ import os
3
+ import sys
4
+
5
+
6
+ def find_executable(executable, path=None):
7
+ """
8
+ As distutils.spawn.find_executable, but on Windows, look up
9
+ every extension declared in PATHEXT instead of just `.exe`
10
+ """
11
+ if sys.platform != 'win32':
12
+ return distutils.spawn.find_executable(executable, path)
13
+
14
+ if path is None:
15
+ path = os.environ['PATH']
16
+
17
+ paths = path.split(os.pathsep)
18
+ extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
19
+ base, ext = os.path.splitext(executable)
20
+
21
+ if not os.path.isfile(executable):
22
+ for p in paths:
23
+ for ext in extensions:
24
+ f = os.path.join(p, base + ext)
25
+ if os.path.isfile(f):
26
+ return f
27
+ return None
28
+ else:
29
+ return executable
30
+
31
+
32
+ def create_environment_dict(overrides):
33
+ """
34
+ Create and return a copy of os.environ with the specified overrides
35
+ """
36
+ result = os.environ.copy()
37
+ result.update(overrides or {})
38
+ return result