dacorvo HF Staff commited on
Commit
cde9e01
·
verified ·
1 Parent(s): 8b52b83

Synchronizing local compiler cache.

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +16 -0
  2. neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/5c9a7ddb696911cf7f93.json +221 -0
  3. neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/f9d1010b22a4e5da4bd5.json +221 -0
  4. neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/qwen3_moe/Qwen/Qwen3-30B-A3B-Instruct-2507/0dd3b941d43fb01b72a8.json +66 -0
  5. neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/qwen3_moe/Qwen/Qwen3-30B-A3B-Instruct-2507/5c776e5f1eb55e76831a.json +66 -0
  6. neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/t5/hf-internal-testing/tiny-random-t5/15f7e443873c3474a6aa.json +91 -0
  7. neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/t5/hf-internal-testing/tiny-random-t5/5d5dadc2c2138bf8ab44.json +91 -0
  8. neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/t5/hf-internal-testing/tiny-random-t5/d83e395611ddc3a29d9f.json +91 -0
  9. neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/t5/hf-internal-testing/tiny-random-t5/e57cbde649dde732937d.json +91 -0
  10. neuronxcc-2.19.8089.0+8ab9f450/MODULE_02da278f167522769f43+253d6470/compile_flags.json +1 -0
  11. neuronxcc-2.19.8089.0+8ab9f450/MODULE_02da278f167522769f43+253d6470/model.done +0 -0
  12. neuronxcc-2.19.8089.0+8ab9f450/MODULE_02da278f167522769f43+253d6470/model.hlo_module.pb +3 -0
  13. neuronxcc-2.19.8089.0+8ab9f450/MODULE_02da278f167522769f43+253d6470/model.neff +3 -0
  14. neuronxcc-2.19.8089.0+8ab9f450/MODULE_15f7e443873c3474a6aa/decoder/model.neuron +2 -2
  15. neuronxcc-2.19.8089.0+8ab9f450/MODULE_15f7e443873c3474a6aa/encoder/model.neuron +2 -2
  16. neuronxcc-2.19.8089.0+8ab9f450/MODULE_16364384937141820797+e30acd3a/model.neff +0 -0
  17. neuronxcc-2.19.8089.0+8ab9f450/MODULE_234b5c34641e778cc5d2+ed72d204/compile_flags.json +1 -0
  18. neuronxcc-2.19.8089.0+8ab9f450/MODULE_234b5c34641e778cc5d2+ed72d204/model.hlo_module.pb +3 -0
  19. neuronxcc-2.19.8089.0+8ab9f450/MODULE_234b5c34641e778cc5d2+ed72d204/model.log +3 -0
  20. neuronxcc-2.19.8089.0+8ab9f450/MODULE_31248f3f93aafaf6dbaa+cd3419b6/compile_flags.json +1 -0
  21. neuronxcc-2.19.8089.0+8ab9f450/MODULE_31248f3f93aafaf6dbaa+cd3419b6/model.done +0 -0
  22. neuronxcc-2.19.8089.0+8ab9f450/MODULE_31248f3f93aafaf6dbaa+cd3419b6/model.hlo_module.pb +3 -0
  23. neuronxcc-2.19.8089.0+8ab9f450/MODULE_31248f3f93aafaf6dbaa+cd3419b6/model.neff +3 -0
  24. neuronxcc-2.19.8089.0+8ab9f450/MODULE_31248f3f93aafaf6dbaa+cd3419b6/wrapped_neff.hlo +3 -0
  25. neuronxcc-2.19.8089.0+8ab9f450/MODULE_3cbaae1c02abefd7f494+a9d440f5/compile_flags.json +1 -0
  26. neuronxcc-2.19.8089.0+8ab9f450/MODULE_3cbaae1c02abefd7f494+a9d440f5/model.done +0 -0
  27. neuronxcc-2.19.8089.0+8ab9f450/MODULE_3cbaae1c02abefd7f494+a9d440f5/model.hlo_module.pb +3 -0
  28. neuronxcc-2.19.8089.0+8ab9f450/MODULE_3cbaae1c02abefd7f494+a9d440f5/model.neff +3 -0
  29. neuronxcc-2.19.8089.0+8ab9f450/MODULE_3cbaae1c02abefd7f494+a9d440f5/wrapped_neff.hlo +3 -0
  30. neuronxcc-2.19.8089.0+8ab9f450/MODULE_43116d82b5805ba3ae20+cd3419b6/compile_flags.json +1 -0
  31. neuronxcc-2.19.8089.0+8ab9f450/MODULE_43116d82b5805ba3ae20+cd3419b6/model.done +0 -0
  32. neuronxcc-2.19.8089.0+8ab9f450/MODULE_43116d82b5805ba3ae20+cd3419b6/model.hlo_module.pb +3 -0
  33. neuronxcc-2.19.8089.0+8ab9f450/MODULE_43116d82b5805ba3ae20+cd3419b6/model.neff +3 -0
  34. neuronxcc-2.19.8089.0+8ab9f450/MODULE_43116d82b5805ba3ae20+cd3419b6/wrapped_neff.hlo +3 -0
  35. neuronxcc-2.19.8089.0+8ab9f450/MODULE_4ab214a26135c9602b57+ed72d204/compile_flags.json +1 -0
  36. neuronxcc-2.19.8089.0+8ab9f450/MODULE_4ab214a26135c9602b57+ed72d204/model.done +0 -0
  37. neuronxcc-2.19.8089.0+8ab9f450/MODULE_4ab214a26135c9602b57+ed72d204/model.hlo_module.pb +3 -0
  38. neuronxcc-2.19.8089.0+8ab9f450/MODULE_4ab214a26135c9602b57+ed72d204/model.neff +3 -0
  39. neuronxcc-2.19.8089.0+8ab9f450/MODULE_5d5dadc2c2138bf8ab44/decoder/model.neuron +2 -2
  40. neuronxcc-2.19.8089.0+8ab9f450/MODULE_5d5dadc2c2138bf8ab44/encoder/model.neuron +2 -2
  41. neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ddd5b729b9e4ecede70+a9d440f5/compile_flags.json +1 -0
  42. neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ddd5b729b9e4ecede70+a9d440f5/model.done +0 -0
  43. neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ddd5b729b9e4ecede70+a9d440f5/model.hlo_module.pb +3 -0
  44. neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ddd5b729b9e4ecede70+a9d440f5/model.neff +3 -0
  45. neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ddd5b729b9e4ecede70+a9d440f5/wrapped_neff.hlo +3 -0
  46. neuronxcc-2.19.8089.0+8ab9f450/MODULE_7557177e60b224d7d0cc+253d6470/compile_flags.json +1 -0
  47. neuronxcc-2.19.8089.0+8ab9f450/MODULE_7557177e60b224d7d0cc+253d6470/model.hlo_module.pb +3 -0
  48. neuronxcc-2.19.8089.0+8ab9f450/MODULE_7557177e60b224d7d0cc+253d6470/model.log +3 -0
  49. neuronxcc-2.19.8089.0+8ab9f450/MODULE_869715862416998377+e30acd3a/model.neff +0 -0
  50. neuronxcc-2.19.8089.0+8ab9f450/MODULE_bbb0c02d6e06f337e549+ed72d204/compile_flags.json +1 -0
.gitattributes CHANGED
@@ -10936,3 +10936,19 @@ neuronxcc-2.19.8089.0+8ab9f450/MODULE_aa6b654bd981334a723e+a9d440f5/wrapped_neff
10936
  neuronxcc-2.19.8089.0+8ab9f450/MODULE_5cdc2024ee2e6c48bd40+a9d440f5/model.neff filter=lfs diff=lfs merge=lfs -text
10937
  neuronxcc-2.19.8089.0+8ab9f450/MODULE_5cdc2024ee2e6c48bd40+a9d440f5/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
10938
  neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ca134f082760cd304e7+ed72d204/model.neff filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10936
  neuronxcc-2.19.8089.0+8ab9f450/MODULE_5cdc2024ee2e6c48bd40+a9d440f5/model.neff filter=lfs diff=lfs merge=lfs -text
10937
  neuronxcc-2.19.8089.0+8ab9f450/MODULE_5cdc2024ee2e6c48bd40+a9d440f5/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
10938
  neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ca134f082760cd304e7+ed72d204/model.neff filter=lfs diff=lfs merge=lfs -text
10939
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_02da278f167522769f43+253d6470/model.neff filter=lfs diff=lfs merge=lfs -text
10940
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_31248f3f93aafaf6dbaa+cd3419b6/model.neff filter=lfs diff=lfs merge=lfs -text
10941
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_31248f3f93aafaf6dbaa+cd3419b6/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
10942
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_3cbaae1c02abefd7f494+a9d440f5/model.neff filter=lfs diff=lfs merge=lfs -text
10943
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_3cbaae1c02abefd7f494+a9d440f5/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
10944
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_43116d82b5805ba3ae20+cd3419b6/model.neff filter=lfs diff=lfs merge=lfs -text
10945
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_43116d82b5805ba3ae20+cd3419b6/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
10946
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_4ab214a26135c9602b57+ed72d204/model.neff filter=lfs diff=lfs merge=lfs -text
10947
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ddd5b729b9e4ecede70+a9d440f5/model.neff filter=lfs diff=lfs merge=lfs -text
10948
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ddd5b729b9e4ecede70+a9d440f5/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
10949
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_bbb0c02d6e06f337e549+ed72d204/model.neff filter=lfs diff=lfs merge=lfs -text
10950
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_d0af99bdae3af4d95a22+a9d440f5/model.neff filter=lfs diff=lfs merge=lfs -text
10951
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_d0af99bdae3af4d95a22+a9d440f5/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
10952
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_e5e262df0f4573dfd367+253d6470/model.neff filter=lfs diff=lfs merge=lfs -text
10953
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_efa7fe965caa79aca41c+cd3419b6/model.neff filter=lfs diff=lfs merge=lfs -text
10954
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_efa7fe965caa79aca41c+cd3419b6/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/5c9a7ddb696911cf7f93.json ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 1,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 10485760,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 6,
79
+ 7,
80
+ 8,
81
+ 9,
82
+ 10,
83
+ 11,
84
+ 12,
85
+ 13,
86
+ 14,
87
+ 15,
88
+ 16,
89
+ 17,
90
+ 18,
91
+ 19,
92
+ 20,
93
+ 21,
94
+ 22,
95
+ 23,
96
+ 24,
97
+ 25,
98
+ 26,
99
+ 27,
100
+ 28,
101
+ 29,
102
+ 30,
103
+ 31,
104
+ 32,
105
+ 33,
106
+ 34,
107
+ 35,
108
+ 36,
109
+ 37,
110
+ 38,
111
+ 39,
112
+ 40,
113
+ 41,
114
+ 42,
115
+ 43,
116
+ 44,
117
+ 45,
118
+ 46,
119
+ 47
120
+ ],
121
+ "neuron": {
122
+ "_serialized_key": "NxDNeuronConfig",
123
+ "batch_size": 4,
124
+ "capacity_factor": null,
125
+ "checkpoint_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
126
+ "checkpoint_revision": "92f3b1597a195b523d8d9e5700e57e4fbb8f20d3",
127
+ "continuous_batching": true,
128
+ "enable_bucketing": false,
129
+ "ep_degree": 1,
130
+ "fused_qkv": false,
131
+ "glu_mlp": true,
132
+ "local_ranks_size": 16,
133
+ "logical_nc_config": 1,
134
+ "max_batch_size": 4,
135
+ "max_context_length": 4096,
136
+ "max_topk": 256,
137
+ "n_active_tokens": 4096,
138
+ "neuronxcc_version": "2.19.8089.0+8ab9f450",
139
+ "on_device_sampling": false,
140
+ "optimum_neuron_version": "0.3.1.dev6",
141
+ "output_logits": false,
142
+ "pp_degree": 1,
143
+ "sequence_length": 4096,
144
+ "speculation_length": 0,
145
+ "start_rank_id": 0,
146
+ "target": null,
147
+ "torch_dtype": "bfloat16",
148
+ "tp_degree": 16
149
+ },
150
+ "no_rope_layers": [
151
+ 1,
152
+ 1,
153
+ 1,
154
+ 0,
155
+ 1,
156
+ 1,
157
+ 1,
158
+ 0,
159
+ 1,
160
+ 1,
161
+ 1,
162
+ 0,
163
+ 1,
164
+ 1,
165
+ 1,
166
+ 0,
167
+ 1,
168
+ 1,
169
+ 1,
170
+ 0,
171
+ 1,
172
+ 1,
173
+ 1,
174
+ 0,
175
+ 1,
176
+ 1,
177
+ 1,
178
+ 0,
179
+ 1,
180
+ 1,
181
+ 1,
182
+ 0,
183
+ 1,
184
+ 1,
185
+ 1,
186
+ 0,
187
+ 1,
188
+ 1,
189
+ 1,
190
+ 0,
191
+ 1,
192
+ 1,
193
+ 1,
194
+ 0,
195
+ 1,
196
+ 1,
197
+ 1,
198
+ 0
199
+ ],
200
+ "num_attention_heads": 40,
201
+ "num_experts_per_tok": 1,
202
+ "num_hidden_layers": 48,
203
+ "num_key_value_heads": 8,
204
+ "num_local_experts": 16,
205
+ "output_router_logits": false,
206
+ "rms_norm_eps": 1e-05,
207
+ "rope_scaling": {
208
+ "factor": 16.0,
209
+ "high_freq_factor": 1.0,
210
+ "low_freq_factor": 1.0,
211
+ "original_max_position_embeddings": 8192,
212
+ "rope_type": "llama3"
213
+ },
214
+ "rope_theta": 500000.0,
215
+ "router_aux_loss_coef": 0.001,
216
+ "router_jitter_noise": 0.0,
217
+ "tie_word_embeddings": false,
218
+ "use_cache": true,
219
+ "use_qk_norm": true,
220
+ "vocab_size": 202048
221
+ }
neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/llama4_text/meta-llama/Llama-4-Scout-17B-16E-Instruct/f9d1010b22a4e5da4bd5.json ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
4
+ "_task": "text-generation",
5
+ "attention_bias": false,
6
+ "attention_chunk_size": 8192,
7
+ "attention_dropout": 0.0,
8
+ "attn_scale": 0.1,
9
+ "attn_temperature_tuning": true,
10
+ "floor_scale": 8192,
11
+ "for_llm_compressor": false,
12
+ "head_dim": 128,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "interleave_moe_layer_step": 1,
17
+ "intermediate_size": 8192,
18
+ "intermediate_size_mlp": 16384,
19
+ "layer_types": [
20
+ "chunked_attention",
21
+ "chunked_attention",
22
+ "chunked_attention",
23
+ "full_attention",
24
+ "chunked_attention",
25
+ "chunked_attention",
26
+ "chunked_attention",
27
+ "full_attention",
28
+ "chunked_attention",
29
+ "chunked_attention",
30
+ "chunked_attention",
31
+ "full_attention",
32
+ "chunked_attention",
33
+ "chunked_attention",
34
+ "chunked_attention",
35
+ "full_attention",
36
+ "chunked_attention",
37
+ "chunked_attention",
38
+ "chunked_attention",
39
+ "full_attention",
40
+ "chunked_attention",
41
+ "chunked_attention",
42
+ "chunked_attention",
43
+ "full_attention",
44
+ "chunked_attention",
45
+ "chunked_attention",
46
+ "chunked_attention",
47
+ "full_attention",
48
+ "chunked_attention",
49
+ "chunked_attention",
50
+ "chunked_attention",
51
+ "full_attention",
52
+ "chunked_attention",
53
+ "chunked_attention",
54
+ "chunked_attention",
55
+ "full_attention",
56
+ "chunked_attention",
57
+ "chunked_attention",
58
+ "chunked_attention",
59
+ "full_attention",
60
+ "chunked_attention",
61
+ "chunked_attention",
62
+ "chunked_attention",
63
+ "full_attention",
64
+ "chunked_attention",
65
+ "chunked_attention",
66
+ "chunked_attention",
67
+ "full_attention"
68
+ ],
69
+ "max_position_embeddings": 10485760,
70
+ "model_type": "llama4_text",
71
+ "moe_layers": [
72
+ 0,
73
+ 1,
74
+ 2,
75
+ 3,
76
+ 4,
77
+ 5,
78
+ 6,
79
+ 7,
80
+ 8,
81
+ 9,
82
+ 10,
83
+ 11,
84
+ 12,
85
+ 13,
86
+ 14,
87
+ 15,
88
+ 16,
89
+ 17,
90
+ 18,
91
+ 19,
92
+ 20,
93
+ 21,
94
+ 22,
95
+ 23,
96
+ 24,
97
+ 25,
98
+ 26,
99
+ 27,
100
+ 28,
101
+ 29,
102
+ 30,
103
+ 31,
104
+ 32,
105
+ 33,
106
+ 34,
107
+ 35,
108
+ 36,
109
+ 37,
110
+ 38,
111
+ 39,
112
+ 40,
113
+ 41,
114
+ 42,
115
+ 43,
116
+ 44,
117
+ 45,
118
+ 46,
119
+ 47
120
+ ],
121
+ "neuron": {
122
+ "_serialized_key": "NxDNeuronConfig",
123
+ "batch_size": 4,
124
+ "capacity_factor": null,
125
+ "checkpoint_id": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
126
+ "checkpoint_revision": "92f3b1597a195b523d8d9e5700e57e4fbb8f20d3",
127
+ "continuous_batching": true,
128
+ "enable_bucketing": false,
129
+ "ep_degree": 1,
130
+ "fused_qkv": false,
131
+ "glu_mlp": true,
132
+ "local_ranks_size": 16,
133
+ "logical_nc_config": 1,
134
+ "max_batch_size": 4,
135
+ "max_context_length": 4096,
136
+ "max_topk": 256,
137
+ "n_active_tokens": 4096,
138
+ "neuronxcc_version": "2.19.8089.0+8ab9f450",
139
+ "on_device_sampling": true,
140
+ "optimum_neuron_version": "0.3.1.dev6",
141
+ "output_logits": false,
142
+ "pp_degree": 1,
143
+ "sequence_length": 4096,
144
+ "speculation_length": 0,
145
+ "start_rank_id": 0,
146
+ "target": null,
147
+ "torch_dtype": "bfloat16",
148
+ "tp_degree": 16
149
+ },
150
+ "no_rope_layers": [
151
+ 1,
152
+ 1,
153
+ 1,
154
+ 0,
155
+ 1,
156
+ 1,
157
+ 1,
158
+ 0,
159
+ 1,
160
+ 1,
161
+ 1,
162
+ 0,
163
+ 1,
164
+ 1,
165
+ 1,
166
+ 0,
167
+ 1,
168
+ 1,
169
+ 1,
170
+ 0,
171
+ 1,
172
+ 1,
173
+ 1,
174
+ 0,
175
+ 1,
176
+ 1,
177
+ 1,
178
+ 0,
179
+ 1,
180
+ 1,
181
+ 1,
182
+ 0,
183
+ 1,
184
+ 1,
185
+ 1,
186
+ 0,
187
+ 1,
188
+ 1,
189
+ 1,
190
+ 0,
191
+ 1,
192
+ 1,
193
+ 1,
194
+ 0,
195
+ 1,
196
+ 1,
197
+ 1,
198
+ 0
199
+ ],
200
+ "num_attention_heads": 40,
201
+ "num_experts_per_tok": 1,
202
+ "num_hidden_layers": 48,
203
+ "num_key_value_heads": 8,
204
+ "num_local_experts": 16,
205
+ "output_router_logits": false,
206
+ "rms_norm_eps": 1e-05,
207
+ "rope_scaling": {
208
+ "factor": 16.0,
209
+ "high_freq_factor": 1.0,
210
+ "low_freq_factor": 1.0,
211
+ "original_max_position_embeddings": 8192,
212
+ "rope_type": "llama3"
213
+ },
214
+ "rope_theta": 500000.0,
215
+ "router_aux_loss_coef": 0.001,
216
+ "router_jitter_noise": 0.0,
217
+ "tie_word_embeddings": false,
218
+ "use_cache": true,
219
+ "use_qk_norm": true,
220
+ "vocab_size": 202048
221
+ }
neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/qwen3_moe/Qwen/Qwen3-30B-A3B-Instruct-2507/0dd3b941d43fb01b72a8.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-30B-A3B-Instruct-2507",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen3MoeForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "decoder_sparse_step": 1,
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2048,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 6144,
16
+ "max_position_embeddings": 262144,
17
+ "max_window_layers": 48,
18
+ "mlp_only_layers": [],
19
+ "model_type": "qwen3_moe",
20
+ "moe_intermediate_size": 768,
21
+ "neuron": {
22
+ "_serialized_key": "NxDNeuronConfig",
23
+ "batch_size": 4,
24
+ "capacity_factor": null,
25
+ "checkpoint_id": "Qwen/Qwen3-30B-A3B-Instruct-2507",
26
+ "checkpoint_revision": "61082d4deaa4785f64943b443cbc2b5de7524fad",
27
+ "continuous_batching": true,
28
+ "enable_bucketing": false,
29
+ "ep_degree": 1,
30
+ "fused_qkv": false,
31
+ "glu_mlp": true,
32
+ "local_ranks_size": 16,
33
+ "logical_nc_config": 1,
34
+ "max_batch_size": 4,
35
+ "max_context_length": 4096,
36
+ "max_topk": 256,
37
+ "n_active_tokens": 4096,
38
+ "neuronxcc_version": "2.19.8089.0+8ab9f450",
39
+ "on_device_sampling": false,
40
+ "optimum_neuron_version": "0.3.1.dev6",
41
+ "output_logits": false,
42
+ "pp_degree": 1,
43
+ "sequence_length": 4096,
44
+ "speculation_length": 0,
45
+ "start_rank_id": 0,
46
+ "target": null,
47
+ "torch_dtype": "bfloat16",
48
+ "tp_degree": 16
49
+ },
50
+ "norm_topk_prob": true,
51
+ "num_attention_heads": 32,
52
+ "num_experts": 128,
53
+ "num_experts_per_tok": 8,
54
+ "num_hidden_layers": 48,
55
+ "num_key_value_heads": 4,
56
+ "output_router_logits": false,
57
+ "rms_norm_eps": 1e-06,
58
+ "rope_scaling": null,
59
+ "rope_theta": 10000000,
60
+ "router_aux_loss_coef": 0.001,
61
+ "sliding_window": null,
62
+ "tie_word_embeddings": false,
63
+ "use_cache": true,
64
+ "use_sliding_window": false,
65
+ "vocab_size": 151936
66
+ }
neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/qwen3_moe/Qwen/Qwen3-30B-A3B-Instruct-2507/5c776e5f1eb55e76831a.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "Qwen/Qwen3-30B-A3B-Instruct-2507",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "Qwen3MoeForCausalLM"
7
+ ],
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "decoder_sparse_step": 1,
11
+ "head_dim": 128,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2048,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 6144,
16
+ "max_position_embeddings": 262144,
17
+ "max_window_layers": 48,
18
+ "mlp_only_layers": [],
19
+ "model_type": "qwen3_moe",
20
+ "moe_intermediate_size": 768,
21
+ "neuron": {
22
+ "_serialized_key": "NxDNeuronConfig",
23
+ "batch_size": 4,
24
+ "capacity_factor": null,
25
+ "checkpoint_id": "Qwen/Qwen3-30B-A3B-Instruct-2507",
26
+ "checkpoint_revision": "61082d4deaa4785f64943b443cbc2b5de7524fad",
27
+ "continuous_batching": true,
28
+ "enable_bucketing": false,
29
+ "ep_degree": 1,
30
+ "fused_qkv": false,
31
+ "glu_mlp": true,
32
+ "local_ranks_size": 16,
33
+ "logical_nc_config": 1,
34
+ "max_batch_size": 4,
35
+ "max_context_length": 4096,
36
+ "max_topk": 256,
37
+ "n_active_tokens": 4096,
38
+ "neuronxcc_version": "2.19.8089.0+8ab9f450",
39
+ "on_device_sampling": true,
40
+ "optimum_neuron_version": "0.3.1.dev6",
41
+ "output_logits": false,
42
+ "pp_degree": 1,
43
+ "sequence_length": 4096,
44
+ "speculation_length": 0,
45
+ "start_rank_id": 0,
46
+ "target": null,
47
+ "torch_dtype": "bfloat16",
48
+ "tp_degree": 16
49
+ },
50
+ "norm_topk_prob": true,
51
+ "num_attention_heads": 32,
52
+ "num_experts": 128,
53
+ "num_experts_per_tok": 8,
54
+ "num_hidden_layers": 48,
55
+ "num_key_value_heads": 4,
56
+ "output_router_logits": false,
57
+ "rms_norm_eps": 1e-06,
58
+ "rope_scaling": null,
59
+ "rope_theta": 10000000,
60
+ "router_aux_loss_coef": 0.001,
61
+ "sliding_window": null,
62
+ "tie_word_embeddings": false,
63
+ "use_cache": true,
64
+ "use_sliding_window": false,
65
+ "vocab_size": 151936
66
+ }
neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/t5/hf-internal-testing/tiny-random-t5/15f7e443873c3474a6aa.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "MultiModelCacheEntry",
3
+ "_model_id": "hf-internal-testing/tiny-random-t5",
4
+ "_task": null,
5
+ "decoder": {
6
+ "classifier_dropout": 0.0,
7
+ "d_ff": 37,
8
+ "d_kv": 8,
9
+ "d_model": 32,
10
+ "decoder_start_token_id": 0,
11
+ "dense_act_fn": "relu",
12
+ "dropout_rate": 0.1,
13
+ "feed_forward_proj": "relu",
14
+ "gradient_checkpointing": false,
15
+ "initializer_factor": 0.002,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": false,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "model_type": "t5",
20
+ "neuron": {
21
+ "auto_cast": "matmul",
22
+ "auto_cast_type": "bf16",
23
+ "compiler_type": "neuronx-cc",
24
+ "compiler_version": "2.19.8089.0+8ab9f450",
25
+ "disable_fallback": false,
26
+ "disable_fast_relayout": false,
27
+ "dynamic_batch_size": false,
28
+ "float_dtype": "fp32",
29
+ "inline_weights_to_neff": true,
30
+ "int_dtype": "int64",
31
+ "optlevel": "2",
32
+ "output_attentions": false,
33
+ "output_hidden_states": false,
34
+ "static_batch_size": 1,
35
+ "static_num_beams": 4,
36
+ "static_sequence_length": 64,
37
+ "task": "text2text-generation",
38
+ "tensor_parallel_size": 1
39
+ },
40
+ "num_decoder_layers": 5,
41
+ "num_heads": 4,
42
+ "num_layers": 5,
43
+ "relative_attention_max_distance": 128,
44
+ "relative_attention_num_buckets": 8,
45
+ "use_cache": true,
46
+ "vocab_size": 1103
47
+ },
48
+ "encoder": {
49
+ "classifier_dropout": 0.0,
50
+ "d_ff": 37,
51
+ "d_kv": 8,
52
+ "d_model": 32,
53
+ "decoder_start_token_id": 0,
54
+ "dense_act_fn": "relu",
55
+ "dropout_rate": 0.1,
56
+ "feed_forward_proj": "relu",
57
+ "gradient_checkpointing": false,
58
+ "initializer_factor": 0.002,
59
+ "is_encoder_decoder": true,
60
+ "is_gated_act": false,
61
+ "layer_norm_epsilon": 1e-06,
62
+ "model_type": "t5",
63
+ "neuron": {
64
+ "auto_cast": "matmul",
65
+ "auto_cast_type": "bf16",
66
+ "compiler_type": "neuronx-cc",
67
+ "compiler_version": "2.19.8089.0+8ab9f450",
68
+ "disable_fallback": false,
69
+ "disable_fast_relayout": false,
70
+ "dynamic_batch_size": false,
71
+ "float_dtype": "fp32",
72
+ "inline_weights_to_neff": true,
73
+ "int_dtype": "int64",
74
+ "optlevel": "2",
75
+ "output_attentions": false,
76
+ "output_hidden_states": false,
77
+ "static_batch_size": 1,
78
+ "static_num_beams": 4,
79
+ "static_sequence_length": 64,
80
+ "task": "text2text-generation",
81
+ "tensor_parallel_size": 1
82
+ },
83
+ "num_decoder_layers": 5,
84
+ "num_heads": 4,
85
+ "num_layers": 5,
86
+ "relative_attention_max_distance": 128,
87
+ "relative_attention_num_buckets": 8,
88
+ "use_cache": true,
89
+ "vocab_size": 1103
90
+ }
91
+ }
neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/t5/hf-internal-testing/tiny-random-t5/5d5dadc2c2138bf8ab44.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "MultiModelCacheEntry",
3
+ "_model_id": "hf-internal-testing/tiny-random-t5",
4
+ "_task": null,
5
+ "decoder": {
6
+ "classifier_dropout": 0.0,
7
+ "d_ff": 37,
8
+ "d_kv": 8,
9
+ "d_model": 32,
10
+ "decoder_start_token_id": 0,
11
+ "dense_act_fn": "relu",
12
+ "dropout_rate": 0.1,
13
+ "feed_forward_proj": "relu",
14
+ "gradient_checkpointing": false,
15
+ "initializer_factor": 0.002,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": false,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "model_type": "t5",
20
+ "neuron": {
21
+ "auto_cast": "matmul",
22
+ "auto_cast_type": "bf16",
23
+ "compiler_type": "neuronx-cc",
24
+ "compiler_version": "2.19.8089.0+8ab9f450",
25
+ "disable_fallback": false,
26
+ "disable_fast_relayout": false,
27
+ "dynamic_batch_size": false,
28
+ "float_dtype": "fp32",
29
+ "inline_weights_to_neff": true,
30
+ "int_dtype": "int64",
31
+ "optlevel": "2",
32
+ "output_attentions": true,
33
+ "output_hidden_states": true,
34
+ "static_batch_size": 1,
35
+ "static_num_beams": 1,
36
+ "static_sequence_length": 64,
37
+ "task": "text2text-generation",
38
+ "tensor_parallel_size": 1
39
+ },
40
+ "num_decoder_layers": 5,
41
+ "num_heads": 4,
42
+ "num_layers": 5,
43
+ "relative_attention_max_distance": 128,
44
+ "relative_attention_num_buckets": 8,
45
+ "use_cache": true,
46
+ "vocab_size": 1103
47
+ },
48
+ "encoder": {
49
+ "classifier_dropout": 0.0,
50
+ "d_ff": 37,
51
+ "d_kv": 8,
52
+ "d_model": 32,
53
+ "decoder_start_token_id": 0,
54
+ "dense_act_fn": "relu",
55
+ "dropout_rate": 0.1,
56
+ "feed_forward_proj": "relu",
57
+ "gradient_checkpointing": false,
58
+ "initializer_factor": 0.002,
59
+ "is_encoder_decoder": true,
60
+ "is_gated_act": false,
61
+ "layer_norm_epsilon": 1e-06,
62
+ "model_type": "t5",
63
+ "neuron": {
64
+ "auto_cast": "matmul",
65
+ "auto_cast_type": "bf16",
66
+ "compiler_type": "neuronx-cc",
67
+ "compiler_version": "2.19.8089.0+8ab9f450",
68
+ "disable_fallback": false,
69
+ "disable_fast_relayout": false,
70
+ "dynamic_batch_size": false,
71
+ "float_dtype": "fp32",
72
+ "inline_weights_to_neff": true,
73
+ "int_dtype": "int64",
74
+ "optlevel": "2",
75
+ "output_attentions": true,
76
+ "output_hidden_states": true,
77
+ "static_batch_size": 1,
78
+ "static_num_beams": 1,
79
+ "static_sequence_length": 64,
80
+ "task": "text2text-generation",
81
+ "tensor_parallel_size": 1
82
+ },
83
+ "num_decoder_layers": 5,
84
+ "num_heads": 4,
85
+ "num_layers": 5,
86
+ "relative_attention_max_distance": 128,
87
+ "relative_attention_num_buckets": 8,
88
+ "use_cache": true,
89
+ "vocab_size": 1103
90
+ }
91
+ }
neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/t5/hf-internal-testing/tiny-random-t5/d83e395611ddc3a29d9f.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "MultiModelCacheEntry",
3
+ "_model_id": "hf-internal-testing/tiny-random-t5",
4
+ "_task": null,
5
+ "decoder": {
6
+ "classifier_dropout": 0.0,
7
+ "d_ff": 37,
8
+ "d_kv": 8,
9
+ "d_model": 32,
10
+ "decoder_start_token_id": 0,
11
+ "dense_act_fn": "relu",
12
+ "dropout_rate": 0.1,
13
+ "feed_forward_proj": "relu",
14
+ "gradient_checkpointing": false,
15
+ "initializer_factor": 0.002,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": false,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "model_type": "t5",
20
+ "neuron": {
21
+ "auto_cast": "matmul",
22
+ "auto_cast_type": "bf16",
23
+ "compiler_type": "neuronx-cc",
24
+ "compiler_version": "2.19.8089.0+8ab9f450",
25
+ "disable_fallback": false,
26
+ "disable_fast_relayout": false,
27
+ "dynamic_batch_size": false,
28
+ "float_dtype": "fp32",
29
+ "inline_weights_to_neff": true,
30
+ "int_dtype": "int64",
31
+ "optlevel": "2",
32
+ "output_attentions": false,
33
+ "output_hidden_states": false,
34
+ "static_batch_size": 1,
35
+ "static_num_beams": 1,
36
+ "static_sequence_length": 64,
37
+ "task": "text2text-generation",
38
+ "tensor_parallel_size": 1
39
+ },
40
+ "num_decoder_layers": 5,
41
+ "num_heads": 4,
42
+ "num_layers": 5,
43
+ "relative_attention_max_distance": 128,
44
+ "relative_attention_num_buckets": 8,
45
+ "use_cache": true,
46
+ "vocab_size": 1103
47
+ },
48
+ "encoder": {
49
+ "classifier_dropout": 0.0,
50
+ "d_ff": 37,
51
+ "d_kv": 8,
52
+ "d_model": 32,
53
+ "decoder_start_token_id": 0,
54
+ "dense_act_fn": "relu",
55
+ "dropout_rate": 0.1,
56
+ "feed_forward_proj": "relu",
57
+ "gradient_checkpointing": false,
58
+ "initializer_factor": 0.002,
59
+ "is_encoder_decoder": true,
60
+ "is_gated_act": false,
61
+ "layer_norm_epsilon": 1e-06,
62
+ "model_type": "t5",
63
+ "neuron": {
64
+ "auto_cast": "matmul",
65
+ "auto_cast_type": "bf16",
66
+ "compiler_type": "neuronx-cc",
67
+ "compiler_version": "2.19.8089.0+8ab9f450",
68
+ "disable_fallback": false,
69
+ "disable_fast_relayout": false,
70
+ "dynamic_batch_size": false,
71
+ "float_dtype": "fp32",
72
+ "inline_weights_to_neff": true,
73
+ "int_dtype": "int64",
74
+ "optlevel": "2",
75
+ "output_attentions": false,
76
+ "output_hidden_states": false,
77
+ "static_batch_size": 1,
78
+ "static_num_beams": 1,
79
+ "static_sequence_length": 64,
80
+ "task": "text2text-generation",
81
+ "tensor_parallel_size": 1
82
+ },
83
+ "num_decoder_layers": 5,
84
+ "num_heads": 4,
85
+ "num_layers": 5,
86
+ "relative_attention_max_distance": 128,
87
+ "relative_attention_num_buckets": 8,
88
+ "use_cache": true,
89
+ "vocab_size": 1103
90
+ }
91
+ }
neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev6/t5/hf-internal-testing/tiny-random-t5/e57cbde649dde732937d.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "MultiModelCacheEntry",
3
+ "_model_id": "hf-internal-testing/tiny-random-t5",
4
+ "_task": null,
5
+ "decoder": {
6
+ "classifier_dropout": 0.0,
7
+ "d_ff": 37,
8
+ "d_kv": 8,
9
+ "d_model": 32,
10
+ "decoder_start_token_id": 0,
11
+ "dense_act_fn": "relu",
12
+ "dropout_rate": 0.1,
13
+ "feed_forward_proj": "relu",
14
+ "gradient_checkpointing": false,
15
+ "initializer_factor": 0.002,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": false,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "model_type": "t5",
20
+ "neuron": {
21
+ "auto_cast": "matmul",
22
+ "auto_cast_type": "bf16",
23
+ "compiler_type": "neuronx-cc",
24
+ "compiler_version": "2.19.8089.0+8ab9f450",
25
+ "disable_fallback": false,
26
+ "disable_fast_relayout": false,
27
+ "dynamic_batch_size": false,
28
+ "float_dtype": "fp32",
29
+ "inline_weights_to_neff": true,
30
+ "int_dtype": "int64",
31
+ "optlevel": "2",
32
+ "output_attentions": true,
33
+ "output_hidden_states": true,
34
+ "static_batch_size": 1,
35
+ "static_num_beams": 4,
36
+ "static_sequence_length": 64,
37
+ "task": "text2text-generation",
38
+ "tensor_parallel_size": 1
39
+ },
40
+ "num_decoder_layers": 5,
41
+ "num_heads": 4,
42
+ "num_layers": 5,
43
+ "relative_attention_max_distance": 128,
44
+ "relative_attention_num_buckets": 8,
45
+ "use_cache": true,
46
+ "vocab_size": 1103
47
+ },
48
+ "encoder": {
49
+ "classifier_dropout": 0.0,
50
+ "d_ff": 37,
51
+ "d_kv": 8,
52
+ "d_model": 32,
53
+ "decoder_start_token_id": 0,
54
+ "dense_act_fn": "relu",
55
+ "dropout_rate": 0.1,
56
+ "feed_forward_proj": "relu",
57
+ "gradient_checkpointing": false,
58
+ "initializer_factor": 0.002,
59
+ "is_encoder_decoder": true,
60
+ "is_gated_act": false,
61
+ "layer_norm_epsilon": 1e-06,
62
+ "model_type": "t5",
63
+ "neuron": {
64
+ "auto_cast": "matmul",
65
+ "auto_cast_type": "bf16",
66
+ "compiler_type": "neuronx-cc",
67
+ "compiler_version": "2.19.8089.0+8ab9f450",
68
+ "disable_fallback": false,
69
+ "disable_fast_relayout": false,
70
+ "dynamic_batch_size": false,
71
+ "float_dtype": "fp32",
72
+ "inline_weights_to_neff": true,
73
+ "int_dtype": "int64",
74
+ "optlevel": "2",
75
+ "output_attentions": true,
76
+ "output_hidden_states": true,
77
+ "static_batch_size": 1,
78
+ "static_num_beams": 4,
79
+ "static_sequence_length": 64,
80
+ "task": "text2text-generation",
81
+ "tensor_parallel_size": 1
82
+ },
83
+ "num_decoder_layers": 5,
84
+ "num_heads": 4,
85
+ "num_layers": 5,
86
+ "relative_attention_max_distance": 128,
87
+ "relative_attention_num_buckets": 8,
88
+ "use_cache": true,
89
+ "vocab_size": 1103
90
+ }
91
+ }
neuronxcc-2.19.8089.0+8ab9f450/MODULE_02da278f167522769f43+253d6470/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--enable-saturate-infinity", "--enable-mixed-precision-accumulation", "--model-type", "transformer", "-O1", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2", "--auto-cast=none", "--internal-enable-dge-levels", "vector_dynamic_offsets", "--internal-hlo2tensorizer-options=--verify-hlo=true", "--logfile=/tmp/nxd_model/context_encoding_model/_tp0_bk0/log-neuron-cc.txt"]
neuronxcc-2.19.8089.0+8ab9f450/MODULE_02da278f167522769f43+253d6470/model.done ADDED
File without changes
neuronxcc-2.19.8089.0+8ab9f450/MODULE_02da278f167522769f43+253d6470/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67438285964b08bb19d5cc976e3c54297f08d1b0df76c2f3fe542e1624483de2
3
+ size 9670898
neuronxcc-2.19.8089.0+8ab9f450/MODULE_02da278f167522769f43+253d6470/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:851ad19d69a28aebcb76eeb4e4529f9d13bd810ffc4849ea085036f1af5e2beb
3
+ size 17439744
neuronxcc-2.19.8089.0+8ab9f450/MODULE_15f7e443873c3474a6aa/decoder/model.neuron CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b788d306aa621145bd4c1b566e270308aa9d03f560b54a659020c5087dc28413
3
- size 1190094
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c062fb6a1f3d277faac7225d1bec3b60dff7c9c7f780cc21ef74a345ad1e0409
3
+ size 1189966
neuronxcc-2.19.8089.0+8ab9f450/MODULE_15f7e443873c3474a6aa/encoder/model.neuron CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd726585ac540b16f29cf45b77f7fa0beb2fa5363f3d09a5a0c25c4007501304
3
- size 349882
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c3b2291d6e1e91d862de919369aef0c73f10af7f0d3d06bdac64df2f247ceb8
3
+ size 349818
neuronxcc-2.19.8089.0+8ab9f450/MODULE_16364384937141820797+e30acd3a/model.neff CHANGED
Binary files a/neuronxcc-2.19.8089.0+8ab9f450/MODULE_16364384937141820797+e30acd3a/model.neff and b/neuronxcc-2.19.8089.0+8ab9f450/MODULE_16364384937141820797+e30acd3a/model.neff differ
 
neuronxcc-2.19.8089.0+8ab9f450/MODULE_234b5c34641e778cc5d2+ed72d204/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--auto-cast=none", "--model-type=transformer", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ", "-O2", "--lnc=1", "--logfile=/tmp/nxd_model/context_encoding_model/_tp0_bk0/log-neuron-cc.txt"]
neuronxcc-2.19.8089.0+8ab9f450/MODULE_234b5c34641e778cc5d2+ed72d204/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddfdd3b97bc014c3753edd934bfc9558878a8ef0a424799cec49b546e76cb5c8
3
+ size 110959906
neuronxcc-2.19.8089.0+8ab9f450/MODULE_234b5c34641e778cc5d2+ed72d204/model.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Failed compilation with ['neuronx-cc', 'compile', '--framework=XLA', '/tmp/nxd_model/context_encoding_model/_tp0_bk0/model.MODULE_234b5c34641e778cc5d2+ed72d204.hlo_module.pb', '--output', '/tmp/nxd_model/context_encoding_model/_tp0_bk0/model.MODULE_234b5c34641e778cc5d2+ed72d204.neff', '--target=trn1', '--auto-cast=none', '--model-type=transformer', '--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ', '-O2', '--lnc=1', '--logfile=/tmp/nxd_model/context_encoding_model/_tp0_bk0/log-neuron-cc.txt', '--verbose=35']: [XCG815] Estimated peak HBM usage (18.327946) exceeds 16GB. Neff won't be able to load on chip - Please open a support ticket at https://github.com/aws-neuron/aws-neuron-sdk/issues/new. You may also be able to obtain more information using the 'XLA_IR_DEBUG' and 'XLA_HLO_DEBUG' environment variables.
2
+ 2025-09-10T13:49:56Z Non-signal exit. Backend exited with code 1 and stderr: [XCG815] Estimated peak HBM usage (18.327946) exceeds 16GB. Neff won't be able to load on chip - Please open a support ticket at https://github.com/aws-neuron/aws-neuron-sdk/issues/new. You may also be able to obtain more information using the 'XLA_IR_DEBUG' and 'XLA_HLO_DEBUG' environment variables.
3
+
neuronxcc-2.19.8089.0+8ab9f450/MODULE_31248f3f93aafaf6dbaa+cd3419b6/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--enable-saturate-infinity", "--enable-mixed-precision-accumulation", "--model-type", "transformer", "-O1", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2", "--auto-cast=none", "--internal-enable-dge-levels", "vector_dynamic_offsets", "--internal-hlo2tensorizer-options=--verify-hlo=true", "--logfile=/tmp/nxd_model/token_generation_model/_tp0_bk0/log-neuron-cc.txt", "--enable-internal-neff-wrapper"]
neuronxcc-2.19.8089.0+8ab9f450/MODULE_31248f3f93aafaf6dbaa+cd3419b6/model.done ADDED
File without changes
neuronxcc-2.19.8089.0+8ab9f450/MODULE_31248f3f93aafaf6dbaa+cd3419b6/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0058240e54b46c6b0db4c14f906f09f3ec484e7e04c3f2cb6e1f8eda3c5888d7
3
+ size 2507590
neuronxcc-2.19.8089.0+8ab9f450/MODULE_31248f3f93aafaf6dbaa+cd3419b6/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c74f90bab7cf259aea0daac2a556646ed4ff30addb7831a0a31f1a3b3cb58934
3
+ size 3533824
neuronxcc-2.19.8089.0+8ab9f450/MODULE_31248f3f93aafaf6dbaa+cd3419b6/wrapped_neff.hlo ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd51c5d89811b2361e4f8267cb657832f235da127ac6940aed8e0a70761b6052
3
+ size 3783895
neuronxcc-2.19.8089.0+8ab9f450/MODULE_3cbaae1c02abefd7f494+a9d440f5/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--auto-cast=none", "--model-type=transformer", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ", "-O2", "--lnc=1", "--logfile=/tmp/nxd_model/token_generation_model/_tp0_bk0/log-neuron-cc.txt", "--enable-internal-neff-wrapper"]
neuronxcc-2.19.8089.0+8ab9f450/MODULE_3cbaae1c02abefd7f494+a9d440f5/model.done ADDED
File without changes
neuronxcc-2.19.8089.0+8ab9f450/MODULE_3cbaae1c02abefd7f494+a9d440f5/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12093b664d384d4f227211cf30520c33cb2f168bf18efd5680e7a6f0759e3946
3
+ size 103130389
neuronxcc-2.19.8089.0+8ab9f450/MODULE_3cbaae1c02abefd7f494+a9d440f5/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fafff6c8809235ac7997becb99200b923ac6f9c9084ee79e97d7948b30d87e8f
3
+ size 7803904
neuronxcc-2.19.8089.0+8ab9f450/MODULE_3cbaae1c02abefd7f494+a9d440f5/wrapped_neff.hlo ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4013a9888058e7c0e8ac2ce33000d806be95273272575932d5e51e724e37a029
3
+ size 8121064
neuronxcc-2.19.8089.0+8ab9f450/MODULE_43116d82b5805ba3ae20+cd3419b6/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--enable-saturate-infinity", "--enable-mixed-precision-accumulation", "--model-type", "transformer", "-O1", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2", "--auto-cast=none", "--internal-enable-dge-levels", "vector_dynamic_offsets", "--internal-hlo2tensorizer-options=--verify-hlo=true", "--logfile=/tmp/nxd_model/token_generation_model/_tp0_bk0/log-neuron-cc.txt", "--enable-internal-neff-wrapper"]
neuronxcc-2.19.8089.0+8ab9f450/MODULE_43116d82b5805ba3ae20+cd3419b6/model.done ADDED
File without changes
neuronxcc-2.19.8089.0+8ab9f450/MODULE_43116d82b5805ba3ae20+cd3419b6/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9b145a643ac4afcec643c5a4cf0003579212a43c2ea7b2cab1f7635446c3c40
3
+ size 2458664
neuronxcc-2.19.8089.0+8ab9f450/MODULE_43116d82b5805ba3ae20+cd3419b6/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ecc4a985b50e6be447dcba49851f6869f4bc0661ddb70c5528f11b5a721ac0a
3
+ size 3492864
neuronxcc-2.19.8089.0+8ab9f450/MODULE_43116d82b5805ba3ae20+cd3419b6/wrapped_neff.hlo ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b58e441608e9340359965eeeb8e0d0229dfbd7a847939c284e69325334c98780
3
+ size 3742824
neuronxcc-2.19.8089.0+8ab9f450/MODULE_4ab214a26135c9602b57+ed72d204/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--auto-cast=none", "--model-type=transformer", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ", "-O2", "--lnc=1", "--logfile=/tmp/nxd_model/context_encoding_model/_tp0_bk0/log-neuron-cc.txt"]
neuronxcc-2.19.8089.0+8ab9f450/MODULE_4ab214a26135c9602b57+ed72d204/model.done ADDED
File without changes
neuronxcc-2.19.8089.0+8ab9f450/MODULE_4ab214a26135c9602b57+ed72d204/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df61ac9ae7cc3b8d0b92f84adaaafb47ca0c3968defd912edc4a081cd7c233d6
3
+ size 104317794
neuronxcc-2.19.8089.0+8ab9f450/MODULE_4ab214a26135c9602b57+ed72d204/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2632ba92083ecea71ec19ae561a0ca4e56249ffbd65c624502bc366b7b3cb3d4
3
+ size 38933504
neuronxcc-2.19.8089.0+8ab9f450/MODULE_5d5dadc2c2138bf8ab44/decoder/model.neuron CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e7ed179c776a09dd00aaa5347656ca55f30a25cec07b7fbb278051a49788c69
3
- size 633172
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9d1936dd388b9f965a4e1b24fedb24fb7c6a748789332d0d885bfbff8eded56
3
+ size 633236
neuronxcc-2.19.8089.0+8ab9f450/MODULE_5d5dadc2c2138bf8ab44/encoder/model.neuron CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8bfc4ed25f9998aa656120620c0a3c0ce35bfc8aeefd672bc1695585a6bd1320
3
- size 351030
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12ca8cdd5d2f8186de84a66171ac1d9d021405c8a3c6b37c33e4945f018f175d
3
+ size 351094
neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ddd5b729b9e4ecede70+a9d440f5/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--auto-cast=none", "--model-type=transformer", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ", "-O2", "--lnc=1", "--logfile=/tmp/nxd_model/token_generation_model/_tp0_bk0/log-neuron-cc.txt", "--enable-internal-neff-wrapper"]
neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ddd5b729b9e4ecede70+a9d440f5/model.done ADDED
File without changes
neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ddd5b729b9e4ecede70+a9d440f5/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a428bd8f83c4a722785ed440b47277011681572669d6eb5771db9d093585f13a
3
+ size 104067050
neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ddd5b729b9e4ecede70+a9d440f5/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d0fc0d34f44ca4b39e2257e2fc912b5d9662e0cf5c5772c7343dd108a85eef3
3
+ size 11377664
neuronxcc-2.19.8089.0+8ab9f450/MODULE_6ddd5b729b9e4ecede70+a9d440f5/wrapped_neff.hlo ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a48a1ffb1043c4f172ef65685fbbcae53db0a3670657a8f321d47245718a5a9
3
+ size 11652584
neuronxcc-2.19.8089.0+8ab9f450/MODULE_7557177e60b224d7d0cc+253d6470/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--enable-saturate-infinity", "--enable-mixed-precision-accumulation", "--model-type", "transformer", "-O1", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2", "--auto-cast=none", "--internal-enable-dge-levels", "vector_dynamic_offsets", "--internal-hlo2tensorizer-options=--verify-hlo=true", "--logfile=/tmp/nxd_model/context_encoding_model/_tp0_bk0/log-neuron-cc.txt"]
neuronxcc-2.19.8089.0+8ab9f450/MODULE_7557177e60b224d7d0cc+253d6470/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2553215122056d902b1c55e7e737da5a9dafdc192698ac91c466be727a94caf7
3
+ size 9674651
neuronxcc-2.19.8089.0+8ab9f450/MODULE_7557177e60b224d7d0cc+253d6470/model.log ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Failed compilation with ['neuronx-cc', 'compile', '--framework=XLA', '/tmp/nxd_model/context_encoding_model/_tp0_bk0/model.MODULE_7557177e60b224d7d0cc+253d6470.hlo_module.pb', '--output', '/tmp/nxd_model/context_encoding_model/_tp0_bk0/model.MODULE_7557177e60b224d7d0cc+253d6470.neff', '--target=trn1', '--enable-saturate-infinity', '--enable-mixed-precision-accumulation', '--model-type', 'transformer', '-O1', '--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2', '--auto-cast=none', '--internal-enable-dge-levels', 'vector_dynamic_offsets', '--internal-hlo2tensorizer-options=--verify-hlo=true', '--logfile=/tmp/nxd_model/context_encoding_model/_tp0_bk0/log-neuron-cc.txt', '--verbose=35']: [XCG815] Estimated peak HBM usage (17.053570) exceeds 16GB. Neff won't be able to load on chip - Please open a support ticket at https://github.com/aws-neuron/aws-neuron-sdk/issues/new. You may also be able to obtain more information using the 'XLA_IR_DEBUG' and 'XLA_HLO_DEBUG' environment variables.
2
+ 2025-09-11T08:10:18Z Non-signal exit. Backend exited with code 1 and stderr: [XCG815] Estimated peak HBM usage (17.053570) exceeds 16GB. Neff won't be able to load on chip - Please open a support ticket at https://github.com/aws-neuron/aws-neuron-sdk/issues/new. You may also be able to obtain more information using the 'XLA_IR_DEBUG' and 'XLA_HLO_DEBUG' environment variables.
3
+
neuronxcc-2.19.8089.0+8ab9f450/MODULE_869715862416998377+e30acd3a/model.neff CHANGED
Binary files a/neuronxcc-2.19.8089.0+8ab9f450/MODULE_869715862416998377+e30acd3a/model.neff and b/neuronxcc-2.19.8089.0+8ab9f450/MODULE_869715862416998377+e30acd3a/model.neff differ
 
neuronxcc-2.19.8089.0+8ab9f450/MODULE_bbb0c02d6e06f337e549+ed72d204/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--auto-cast=none", "--model-type=transformer", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2 --vectorize-strided-dma ", "-O2", "--lnc=1", "--logfile=/tmp/nxd_model/context_encoding_model/_tp0_bk0/log-neuron-cc.txt"]