hf-transformers-bot commited on
Commit
a40d5b8
·
verified ·
1 Parent(s): 7c29066

Upload benchmark results for run 18246840149

Browse files
2025-10-04/18246840149/benchmark_results/Llama-2-7b-hf/Llama-2-7b-hf_benchmark_20251004_164933.json ADDED
@@ -0,0 +1,1175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name": "Llama-2-7b-hf",
3
+ "benchmark_scenarios": [
4
+ {
5
+ "scenario_name": "eager_eager_attn",
6
+ "metadata": {
7
+ "timestamp": "2025-10-04T16:43:30.893107",
8
+ "commit_id": "e11a00a16f925b7d3b52f5007bdce3464edb361f",
9
+ "hardware_info": {
10
+ "gpu_name": "NVIDIA A10G",
11
+ "gpu_memory_total_mb": 23028,
12
+ "cpu_count": 16,
13
+ "memory_total_mb": 63607,
14
+ "python_version": "3.10.12",
15
+ "torch_version": "2.8.0+cu126",
16
+ "cuda_version": "12.6"
17
+ },
18
+ "config": {
19
+ "name": "eager",
20
+ "model_id": "meta-llama/Llama-2-7b-hf",
21
+ "variant": "eager",
22
+ "warmup_iterations": 3,
23
+ "measurement_iterations": 5,
24
+ "num_tokens_to_generate": 100,
25
+ "device": "cuda",
26
+ "torch_dtype": "float16",
27
+ "compile_mode": null,
28
+ "compile_options": {},
29
+ "use_cache": true,
30
+ "batch_size": 1,
31
+ "sequence_length": null,
32
+ "attn_implementation": "eager",
33
+ "sdpa_backend": null,
34
+ "custom_params": {}
35
+ }
36
+ },
37
+ "measurements": {
38
+ "latency_seconds": {
39
+ "name": "latency_seconds",
40
+ "measurements": [
41
+ 3.668982666015625,
42
+ 3.6699453125,
43
+ 3.656045654296875,
44
+ 3.659677734375,
45
+ 3.663490478515625
46
+ ],
47
+ "mean": 3.663628369140625,
48
+ "median": 3.663490478515625,
49
+ "std": 0.005323464317690821,
50
+ "min": 3.656045654296875,
51
+ "max": 3.6699453125,
52
+ "p25": 3.659677734375,
53
+ "p75": 3.668982666015625,
54
+ "p90": 3.66956025390625,
55
+ "p95": 3.669752783203125,
56
+ "p99": 3.669906806640625,
57
+ "unit": "seconds"
58
+ },
59
+ "time_to_first_token_seconds": {
60
+ "name": "time_to_first_token_seconds",
61
+ "measurements": [
62
+ 0.03946054458618164,
63
+ 0.03757347106933594,
64
+ 0.03708943939208984,
65
+ 0.03731062316894531,
66
+ 0.038892608642578125
67
+ ],
68
+ "mean": 0.03806533737182617,
69
+ "median": 0.03757347106933594,
70
+ "std": 0.0009375376025322986,
71
+ "min": 0.03708943939208984,
72
+ "max": 0.03946054458618164,
73
+ "p25": 0.03731062316894531,
74
+ "p75": 0.038892608642578125,
75
+ "p90": 0.03923337020874023,
76
+ "p95": 0.039346957397460934,
77
+ "p99": 0.0394378271484375,
78
+ "unit": "seconds"
79
+ },
80
+ "tokens_per_second": {
81
+ "name": "tokens_per_second",
82
+ "measurements": [
83
+ 27.255511705264112,
84
+ 27.24836243728087,
85
+ 27.3519560354702,
86
+ 27.324810340732913,
87
+ 27.296372294795223
88
+ ],
89
+ "mean": 27.295402562708666,
90
+ "median": 27.296372294795223,
91
+ "std": 0.0396688970806018,
92
+ "min": 27.24836243728087,
93
+ "max": 27.3519560354702,
94
+ "p25": 27.255511705264112,
95
+ "p75": 27.324810340732913,
96
+ "p90": 27.341097757575284,
97
+ "p95": 27.346526896522743,
98
+ "p99": 27.350870207680707,
99
+ "unit": "tokens/sec"
100
+ },
101
+ "time_per_output_token_seconds": {
102
+ "name": "time_per_output_token_seconds",
103
+ "measurements": [
104
+ 0.03668982666015625,
105
+ 0.036699453124999996,
106
+ 0.03656045654296875,
107
+ 0.036596777343750005,
108
+ 0.03663490478515625
109
+ ],
110
+ "mean": 0.03663628369140624,
111
+ "median": 0.03663490478515625,
112
+ "std": 5.323464317690722e-05,
113
+ "min": 0.03656045654296875,
114
+ "max": 0.036699453124999996,
115
+ "p25": 0.036596777343750005,
116
+ "p75": 0.03668982666015625,
117
+ "p90": 0.0366956025390625,
118
+ "p95": 0.03669752783203125,
119
+ "p99": 0.03669906806640625,
120
+ "unit": "seconds/token"
121
+ }
122
+ },
123
+ "gpu_metrics": {
124
+ "gpu_utilization_mean": 90.76086956521739,
125
+ "gpu_utilization_max": 94,
126
+ "gpu_utilization_min": 86,
127
+ "gpu_memory_used_mean": 13253,
128
+ "gpu_memory_used_max": 13253,
129
+ "gpu_memory_used_min": 13253,
130
+ "sample_count": 92,
131
+ "gpu_monitoring_status": "success"
132
+ }
133
+ },
134
+ {
135
+ "scenario_name": "compiled_compile_max-autotune_eager_attn",
136
+ "metadata": {
137
+ "timestamp": "2025-10-04T16:44:06.172354",
138
+ "commit_id": "e11a00a16f925b7d3b52f5007bdce3464edb361f",
139
+ "hardware_info": {
140
+ "gpu_name": "NVIDIA A10G",
141
+ "gpu_memory_total_mb": 23028,
142
+ "cpu_count": 16,
143
+ "memory_total_mb": 63607,
144
+ "python_version": "3.10.12",
145
+ "torch_version": "2.8.0+cu126",
146
+ "cuda_version": "12.6"
147
+ },
148
+ "config": {
149
+ "name": "compiled",
150
+ "model_id": "meta-llama/Llama-2-7b-hf",
151
+ "variant": "compiled",
152
+ "warmup_iterations": 3,
153
+ "measurement_iterations": 5,
154
+ "num_tokens_to_generate": 100,
155
+ "device": "cuda",
156
+ "torch_dtype": "float16",
157
+ "compile_mode": "max-autotune",
158
+ "compile_options": {},
159
+ "use_cache": true,
160
+ "batch_size": 1,
161
+ "sequence_length": null,
162
+ "attn_implementation": "eager",
163
+ "sdpa_backend": null,
164
+ "custom_params": {}
165
+ }
166
+ },
167
+ "measurements": {
168
+ "latency_seconds": {
169
+ "name": "latency_seconds",
170
+ "measurements": [
171
+ 7.86089013671875,
172
+ 7.95289990234375,
173
+ 7.8584091796875,
174
+ 7.896072265625,
175
+ 7.95690478515625
176
+ ],
177
+ "mean": 7.9050352539062505,
178
+ "median": 7.896072265625,
179
+ "std": 0.04285928177097823,
180
+ "min": 7.8584091796875,
181
+ "max": 7.95690478515625,
182
+ "p25": 7.86089013671875,
183
+ "p75": 7.95289990234375,
184
+ "p90": 7.95530283203125,
185
+ "p95": 7.95610380859375,
186
+ "p99": 7.95674458984375,
187
+ "unit": "seconds"
188
+ },
189
+ "time_to_first_token_seconds": {
190
+ "name": "time_to_first_token_seconds",
191
+ "measurements": [
192
+ 0.03886195373535156,
193
+ 0.038317825317382814,
194
+ 0.03914854431152344,
195
+ 0.038165824890136715,
196
+ 0.038163326263427735
197
+ ],
198
+ "mean": 0.03853149490356446,
199
+ "median": 0.038317825317382814,
200
+ "std": 0.00040121575485716686,
201
+ "min": 0.038163326263427735,
202
+ "max": 0.03914854431152344,
203
+ "p25": 0.038165824890136715,
204
+ "p75": 0.03886195373535156,
205
+ "p90": 0.03903390808105469,
206
+ "p95": 0.03909122619628906,
207
+ "p99": 0.039137080688476565,
208
+ "unit": "seconds"
209
+ },
210
+ "tokens_per_second": {
211
+ "name": "tokens_per_second",
212
+ "measurements": [
213
+ 12.721205647296003,
214
+ 12.574029753666284,
215
+ 12.725221824600462,
216
+ 12.664524416188922,
217
+ 12.567700971683337
218
+ ],
219
+ "mean": 12.650536522687002,
220
+ "median": 12.664524416188922,
221
+ "std": 0.06853129923706171,
222
+ "min": 12.567700971683337,
223
+ "max": 12.725221824600462,
224
+ "p25": 12.574029753666284,
225
+ "p75": 12.721205647296003,
226
+ "p90": 12.723615353678678,
227
+ "p95": 12.724418589139571,
228
+ "p99": 12.725061177508284,
229
+ "unit": "tokens/sec"
230
+ },
231
+ "time_per_output_token_seconds": {
232
+ "name": "time_per_output_token_seconds",
233
+ "measurements": [
234
+ 0.0786089013671875,
235
+ 0.0795289990234375,
236
+ 0.078584091796875,
237
+ 0.07896072265625001,
238
+ 0.0795690478515625
239
+ ],
240
+ "mean": 0.0790503525390625,
241
+ "median": 0.07896072265625001,
242
+ "std": 0.00042859281770978307,
243
+ "min": 0.078584091796875,
244
+ "max": 0.0795690478515625,
245
+ "p25": 0.0786089013671875,
246
+ "p75": 0.0795289990234375,
247
+ "p90": 0.07955302832031251,
248
+ "p95": 0.07956103808593751,
249
+ "p99": 0.0795674458984375,
250
+ "unit": "seconds/token"
251
+ }
252
+ },
253
+ "gpu_metrics": {
254
+ "gpu_utilization_mean": 44.205882352941174,
255
+ "gpu_utilization_max": 94,
256
+ "gpu_utilization_min": 0,
257
+ "gpu_memory_used_mean": 13629.364705882354,
258
+ "gpu_memory_used_max": 13777,
259
+ "gpu_memory_used_min": 13415,
260
+ "sample_count": 170,
261
+ "gpu_monitoring_status": "success"
262
+ }
263
+ },
264
+ {
265
+ "scenario_name": "eager_sdpa_default",
266
+ "metadata": {
267
+ "timestamp": "2025-10-04T16:45:34.592053",
268
+ "commit_id": "e11a00a16f925b7d3b52f5007bdce3464edb361f",
269
+ "hardware_info": {
270
+ "gpu_name": "NVIDIA A10G",
271
+ "gpu_memory_total_mb": 23028,
272
+ "cpu_count": 16,
273
+ "memory_total_mb": 63607,
274
+ "python_version": "3.10.12",
275
+ "torch_version": "2.8.0+cu126",
276
+ "cuda_version": "12.6"
277
+ },
278
+ "config": {
279
+ "name": "eager",
280
+ "model_id": "meta-llama/Llama-2-7b-hf",
281
+ "variant": "eager",
282
+ "warmup_iterations": 3,
283
+ "measurement_iterations": 5,
284
+ "num_tokens_to_generate": 100,
285
+ "device": "cuda",
286
+ "torch_dtype": "float16",
287
+ "compile_mode": null,
288
+ "compile_options": {},
289
+ "use_cache": true,
290
+ "batch_size": 1,
291
+ "sequence_length": null,
292
+ "attn_implementation": "sdpa",
293
+ "sdpa_backend": null,
294
+ "custom_params": {}
295
+ }
296
+ },
297
+ "measurements": {
298
+ "latency_seconds": {
299
+ "name": "latency_seconds",
300
+ "measurements": [
301
+ 3.414248046875,
302
+ 3.41309228515625,
303
+ 3.413102783203125,
304
+ 3.41368896484375,
305
+ 3.41013134765625
306
+ ],
307
+ "mean": 3.412852685546875,
308
+ "median": 3.413102783203125,
309
+ "std": 0.0014263262616831434,
310
+ "min": 3.41013134765625,
311
+ "max": 3.414248046875,
312
+ "p25": 3.41309228515625,
313
+ "p75": 3.41368896484375,
314
+ "p90": 3.4140244140625,
315
+ "p95": 3.41413623046875,
316
+ "p99": 3.41422568359375,
317
+ "unit": "seconds"
318
+ },
319
+ "time_to_first_token_seconds": {
320
+ "name": "time_to_first_token_seconds",
321
+ "measurements": [
322
+ 0.03506668853759766,
323
+ 0.034638111114501956,
324
+ 0.03458784103393555,
325
+ 0.03458556747436523,
326
+ 0.03457414245605469
327
+ ],
328
+ "mean": 0.03469047012329102,
329
+ "median": 0.03458784103393555,
330
+ "std": 0.00018939439430088853,
331
+ "min": 0.03457414245605469,
332
+ "max": 0.03506668853759766,
333
+ "p25": 0.03458556747436523,
334
+ "p75": 0.034638111114501956,
335
+ "p90": 0.03489525756835938,
336
+ "p95": 0.034980973052978516,
337
+ "p99": 0.03504954544067383,
338
+ "unit": "seconds"
339
+ },
340
+ "tokens_per_second": {
341
+ "name": "tokens_per_second",
342
+ "measurements": [
343
+ 29.289026054076007,
344
+ 29.29894407921702,
345
+ 29.298853961307344,
346
+ 29.293822908255837,
347
+ 29.324383668895635
348
+ ],
349
+ "mean": 29.30100613435037,
350
+ "median": 29.298853961307344,
351
+ "std": 0.012251645620899332,
352
+ "min": 29.289026054076007,
353
+ "max": 29.324383668895635,
354
+ "p25": 29.293822908255837,
355
+ "p75": 29.29894407921702,
356
+ "p90": 29.314207833024188,
357
+ "p95": 29.319295750959913,
358
+ "p99": 29.32336608530849,
359
+ "unit": "tokens/sec"
360
+ },
361
+ "time_per_output_token_seconds": {
362
+ "name": "time_per_output_token_seconds",
363
+ "measurements": [
364
+ 0.03414248046875,
365
+ 0.0341309228515625,
366
+ 0.03413102783203125,
367
+ 0.0341368896484375,
368
+ 0.0341013134765625
369
+ ],
370
+ "mean": 0.03412852685546876,
371
+ "median": 0.03413102783203125,
372
+ "std": 1.4263262616831551e-05,
373
+ "min": 0.0341013134765625,
374
+ "max": 0.03414248046875,
375
+ "p25": 0.0341309228515625,
376
+ "p75": 0.0341368896484375,
377
+ "p90": 0.034140244140625,
378
+ "p95": 0.0341413623046875,
379
+ "p99": 0.0341422568359375,
380
+ "unit": "seconds/token"
381
+ }
382
+ },
383
+ "gpu_metrics": {
384
+ "gpu_utilization_mean": 96.81395348837209,
385
+ "gpu_utilization_max": 97,
386
+ "gpu_utilization_min": 95,
387
+ "gpu_memory_used_mean": 13849,
388
+ "gpu_memory_used_max": 13849,
389
+ "gpu_memory_used_min": 13849,
390
+ "sample_count": 86,
391
+ "gpu_monitoring_status": "success"
392
+ }
393
+ },
394
+ {
395
+ "scenario_name": "eager_sdpa_math",
396
+ "metadata": {
397
+ "timestamp": "2025-10-04T16:46:07.040164",
398
+ "commit_id": "e11a00a16f925b7d3b52f5007bdce3464edb361f",
399
+ "hardware_info": {
400
+ "gpu_name": "NVIDIA A10G",
401
+ "gpu_memory_total_mb": 23028,
402
+ "cpu_count": 16,
403
+ "memory_total_mb": 63607,
404
+ "python_version": "3.10.12",
405
+ "torch_version": "2.8.0+cu126",
406
+ "cuda_version": "12.6"
407
+ },
408
+ "config": {
409
+ "name": "eager",
410
+ "model_id": "meta-llama/Llama-2-7b-hf",
411
+ "variant": "eager",
412
+ "warmup_iterations": 3,
413
+ "measurement_iterations": 5,
414
+ "num_tokens_to_generate": 100,
415
+ "device": "cuda",
416
+ "torch_dtype": "float16",
417
+ "compile_mode": null,
418
+ "compile_options": {},
419
+ "use_cache": true,
420
+ "batch_size": 1,
421
+ "sequence_length": null,
422
+ "attn_implementation": "sdpa",
423
+ "sdpa_backend": "math",
424
+ "custom_params": {}
425
+ }
426
+ },
427
+ "measurements": {
428
+ "latency_seconds": {
429
+ "name": "latency_seconds",
430
+ "measurements": [
431
+ 3.599397705078125,
432
+ 3.5661982421875,
433
+ 3.588318115234375,
434
+ 3.604014404296875,
435
+ 3.5912861328125
436
+ ],
437
+ "mean": 3.5898429199218747,
438
+ "median": 3.5912861328125,
439
+ "std": 0.013081311606395642,
440
+ "min": 3.5661982421875,
441
+ "max": 3.604014404296875,
442
+ "p25": 3.588318115234375,
443
+ "p75": 3.599397705078125,
444
+ "p90": 3.602167724609375,
445
+ "p95": 3.603091064453125,
446
+ "p99": 3.603829736328125,
447
+ "unit": "seconds"
448
+ },
449
+ "time_to_first_token_seconds": {
450
+ "name": "time_to_first_token_seconds",
451
+ "measurements": [
452
+ 0.03866854476928711,
453
+ 0.040586593627929685,
454
+ 0.04006124877929688,
455
+ 0.040501216888427734,
456
+ 0.04053580856323242
457
+ ],
458
+ "mean": 0.04007068252563476,
459
+ "median": 0.040501216888427734,
460
+ "std": 0.0007258023997223484,
461
+ "min": 0.03866854476928711,
462
+ "max": 0.040586593627929685,
463
+ "p25": 0.04006124877929688,
464
+ "p75": 0.04053580856323242,
465
+ "p90": 0.04056627960205078,
466
+ "p95": 0.04057643661499023,
467
+ "p99": 0.04058456222534179,
468
+ "unit": "seconds"
469
+ },
470
+ "tokens_per_second": {
471
+ "name": "tokens_per_second",
472
+ "measurements": [
473
+ 27.782425892786833,
474
+ 28.041065921971903,
475
+ 27.868209224663012,
476
+ 27.74683693849151,
477
+ 27.84517754971683
478
+ ],
479
+ "mean": 27.85674310552602,
480
+ "median": 27.84517754971683,
481
+ "std": 0.10182587912050783,
482
+ "min": 27.74683693849151,
483
+ "max": 28.041065921971903,
484
+ "p25": 27.782425892786833,
485
+ "p75": 27.868209224663012,
486
+ "p90": 27.971923243048348,
487
+ "p95": 28.006494582510125,
488
+ "p99": 28.034151654079547,
489
+ "unit": "tokens/sec"
490
+ },
491
+ "time_per_output_token_seconds": {
492
+ "name": "time_per_output_token_seconds",
493
+ "measurements": [
494
+ 0.03599397705078125,
495
+ 0.035661982421874995,
496
+ 0.03588318115234375,
497
+ 0.03604014404296875,
498
+ 0.035912861328125
499
+ ],
500
+ "mean": 0.03589842919921875,
501
+ "median": 0.035912861328125,
502
+ "std": 0.0001308131160639578,
503
+ "min": 0.035661982421874995,
504
+ "max": 0.03604014404296875,
505
+ "p25": 0.03588318115234375,
506
+ "p75": 0.03599397705078125,
507
+ "p90": 0.03602167724609375,
508
+ "p95": 0.03603091064453125,
509
+ "p99": 0.03603829736328125,
510
+ "unit": "seconds/token"
511
+ }
512
+ },
513
+ "gpu_metrics": {
514
+ "gpu_utilization_mean": 94.56043956043956,
515
+ "gpu_utilization_max": 97,
516
+ "gpu_utilization_min": 87,
517
+ "gpu_memory_used_mean": 13849,
518
+ "gpu_memory_used_max": 13849,
519
+ "gpu_memory_used_min": 13849,
520
+ "sample_count": 91,
521
+ "gpu_monitoring_status": "success"
522
+ }
523
+ },
524
+ {
525
+ "scenario_name": "eager_sdpa_flash_attention",
526
+ "metadata": {
527
+ "timestamp": "2025-10-04T16:46:40.980350",
528
+ "commit_id": "e11a00a16f925b7d3b52f5007bdce3464edb361f",
529
+ "hardware_info": {
530
+ "gpu_name": "NVIDIA A10G",
531
+ "gpu_memory_total_mb": 23028,
532
+ "cpu_count": 16,
533
+ "memory_total_mb": 63607,
534
+ "python_version": "3.10.12",
535
+ "torch_version": "2.8.0+cu126",
536
+ "cuda_version": "12.6"
537
+ },
538
+ "config": {
539
+ "name": "eager",
540
+ "model_id": "meta-llama/Llama-2-7b-hf",
541
+ "variant": "eager",
542
+ "warmup_iterations": 3,
543
+ "measurement_iterations": 5,
544
+ "num_tokens_to_generate": 100,
545
+ "device": "cuda",
546
+ "torch_dtype": "float16",
547
+ "compile_mode": null,
548
+ "compile_options": {},
549
+ "use_cache": true,
550
+ "batch_size": 1,
551
+ "sequence_length": null,
552
+ "attn_implementation": "sdpa",
553
+ "sdpa_backend": "flash_attention",
554
+ "custom_params": {}
555
+ }
556
+ },
557
+ "measurements": {
558
+ "latency_seconds": {
559
+ "name": "latency_seconds",
560
+ "measurements": [
561
+ 3.412175537109375,
562
+ 3.41375732421875,
563
+ 3.41194482421875,
564
+ 3.410708740234375,
565
+ 3.41327685546875
566
+ ],
567
+ "mean": 3.4123726562499996,
568
+ "median": 3.412175537109375,
569
+ "std": 0.0010700835780897762,
570
+ "min": 3.410708740234375,
571
+ "max": 3.41375732421875,
572
+ "p25": 3.41194482421875,
573
+ "p75": 3.41327685546875,
574
+ "p90": 3.41356513671875,
575
+ "p95": 3.41366123046875,
576
+ "p99": 3.41373810546875,
577
+ "unit": "seconds"
578
+ },
579
+ "time_to_first_token_seconds": {
580
+ "name": "time_to_first_token_seconds",
581
+ "measurements": [
582
+ 0.03540671920776367,
583
+ 0.03467203140258789,
584
+ 0.03462319946289062,
585
+ 0.03462572860717773,
586
+ 0.03463056182861328
587
+ ],
588
+ "mean": 0.03479164810180664,
589
+ "median": 0.03463056182861328,
590
+ "std": 0.0003080498758893079,
591
+ "min": 0.03462319946289062,
592
+ "max": 0.03540671920776367,
593
+ "p25": 0.03462572860717773,
594
+ "p75": 0.03467203140258789,
595
+ "p90": 0.03511284408569336,
596
+ "p95": 0.035259781646728515,
597
+ "p99": 0.03537733169555664,
598
+ "unit": "seconds"
599
+ },
600
+ "tokens_per_second": {
601
+ "name": "tokens_per_second",
602
+ "measurements": [
603
+ 29.30681581660802,
604
+ 29.29323630902362,
605
+ 29.30879751928506,
606
+ 29.319419398188852,
607
+ 29.297359761421074
608
+ ],
609
+ "mean": 29.305125760905327,
610
+ "median": 29.30681581660802,
611
+ "std": 0.009190386049641215,
612
+ "min": 29.29323630902362,
613
+ "max": 29.319419398188852,
614
+ "p25": 29.297359761421074,
615
+ "p75": 29.30879751928506,
616
+ "p90": 29.315170646627337,
617
+ "p95": 29.317295022408093,
618
+ "p99": 29.3189945230327,
619
+ "unit": "tokens/sec"
620
+ },
621
+ "time_per_output_token_seconds": {
622
+ "name": "time_per_output_token_seconds",
623
+ "measurements": [
624
+ 0.03412175537109375,
625
+ 0.0341375732421875,
626
+ 0.034119448242187496,
627
+ 0.03410708740234375,
628
+ 0.0341327685546875
629
+ ],
630
+ "mean": 0.0341237265625,
631
+ "median": 0.03412175537109375,
632
+ "std": 1.070083578089782e-05,
633
+ "min": 0.03410708740234375,
634
+ "max": 0.0341375732421875,
635
+ "p25": 0.034119448242187496,
636
+ "p75": 0.0341327685546875,
637
+ "p90": 0.0341356513671875,
638
+ "p95": 0.034136612304687494,
639
+ "p99": 0.0341373810546875,
640
+ "unit": "seconds/token"
641
+ }
642
+ },
643
+ "gpu_metrics": {
644
+ "gpu_utilization_mean": 96.82558139534883,
645
+ "gpu_utilization_max": 97,
646
+ "gpu_utilization_min": 95,
647
+ "gpu_memory_used_mean": 13849,
648
+ "gpu_memory_used_max": 13849,
649
+ "gpu_memory_used_min": 13849,
650
+ "sample_count": 86,
651
+ "gpu_monitoring_status": "success"
652
+ }
653
+ },
654
+ {
655
+ "scenario_name": "eager_sdpa_efficient_attention",
656
+ "metadata": {
657
+ "timestamp": "2025-10-04T16:47:13.413167",
658
+ "commit_id": "e11a00a16f925b7d3b52f5007bdce3464edb361f",
659
+ "hardware_info": {
660
+ "gpu_name": "NVIDIA A10G",
661
+ "gpu_memory_total_mb": 23028,
662
+ "cpu_count": 16,
663
+ "memory_total_mb": 63607,
664
+ "python_version": "3.10.12",
665
+ "torch_version": "2.8.0+cu126",
666
+ "cuda_version": "12.6"
667
+ },
668
+ "config": {
669
+ "name": "eager",
670
+ "model_id": "meta-llama/Llama-2-7b-hf",
671
+ "variant": "eager",
672
+ "warmup_iterations": 3,
673
+ "measurement_iterations": 5,
674
+ "num_tokens_to_generate": 100,
675
+ "device": "cuda",
676
+ "torch_dtype": "float16",
677
+ "compile_mode": null,
678
+ "compile_options": {},
679
+ "use_cache": true,
680
+ "batch_size": 1,
681
+ "sequence_length": null,
682
+ "attn_implementation": "sdpa",
683
+ "sdpa_backend": "efficient_attention",
684
+ "custom_params": {}
685
+ }
686
+ },
687
+ "measurements": {
688
+ "latency_seconds": {
689
+ "name": "latency_seconds",
690
+ "measurements": [
691
+ 3.41012744140625,
692
+ 3.410645751953125,
693
+ 3.409737060546875,
694
+ 3.4096982421875,
695
+ 3.411416015625
696
+ ],
697
+ "mean": 3.41032490234375,
698
+ "median": 3.41012744140625,
699
+ "std": 0.0006435408947035409,
700
+ "min": 3.4096982421875,
701
+ "max": 3.411416015625,
702
+ "p25": 3.409737060546875,
703
+ "p75": 3.410645751953125,
704
+ "p90": 3.41110791015625,
705
+ "p95": 3.411261962890625,
706
+ "p99": 3.411385205078125,
707
+ "unit": "seconds"
708
+ },
709
+ "time_to_first_token_seconds": {
710
+ "name": "time_to_first_token_seconds",
711
+ "measurements": [
712
+ 0.035205535888671875,
713
+ 0.03573561477661133,
714
+ 0.034724609375,
715
+ 0.03489875030517578,
716
+ 0.03474809646606445
717
+ ],
718
+ "mean": 0.035062521362304686,
719
+ "median": 0.03489875030517578,
720
+ "std": 0.00037772381042979315,
721
+ "min": 0.034724609375,
722
+ "max": 0.03573561477661133,
723
+ "p25": 0.03474809646606445,
724
+ "p75": 0.035205535888671875,
725
+ "p90": 0.03552358322143555,
726
+ "p95": 0.03562959899902344,
727
+ "p99": 0.035714411621093754,
728
+ "unit": "seconds"
729
+ },
730
+ "tokens_per_second": {
731
+ "name": "tokens_per_second",
732
+ "measurements": [
733
+ 29.324417259538706,
734
+ 29.319960873313935,
735
+ 29.327774612615254,
736
+ 29.3281085002539,
737
+ 29.313340718921133
738
+ ],
739
+ "mean": 29.322720392928584,
740
+ "median": 29.324417259538706,
741
+ "std": 0.0055326201111678824,
742
+ "min": 29.313340718921133,
743
+ "max": 29.3281085002539,
744
+ "p25": 29.319960873313935,
745
+ "p75": 29.327774612615254,
746
+ "p90": 29.32797494519844,
747
+ "p95": 29.328041722726173,
748
+ "p99": 29.328095144748357,
749
+ "unit": "tokens/sec"
750
+ },
751
+ "time_per_output_token_seconds": {
752
+ "name": "time_per_output_token_seconds",
753
+ "measurements": [
754
+ 0.0341012744140625,
755
+ 0.03410645751953125,
756
+ 0.034097370605468746,
757
+ 0.034096982421875,
758
+ 0.03411416015625
759
+ ],
760
+ "mean": 0.0341032490234375,
761
+ "median": 0.0341012744140625,
762
+ "std": 6.435408947035269e-06,
763
+ "min": 0.034096982421875,
764
+ "max": 0.03411416015625,
765
+ "p25": 0.034097370605468746,
766
+ "p75": 0.03410645751953125,
767
+ "p90": 0.0341110791015625,
768
+ "p95": 0.03411261962890625,
769
+ "p99": 0.03411385205078125,
770
+ "unit": "seconds/token"
771
+ }
772
+ },
773
+ "gpu_metrics": {
774
+ "gpu_utilization_mean": 96.9186046511628,
775
+ "gpu_utilization_max": 98,
776
+ "gpu_utilization_min": 94,
777
+ "gpu_memory_used_mean": 13849,
778
+ "gpu_memory_used_max": 13849,
779
+ "gpu_memory_used_min": 13849,
780
+ "sample_count": 86,
781
+ "gpu_monitoring_status": "success"
782
+ }
783
+ },
784
+ {
785
+ "scenario_name": "compiled_compile_max-autotune_sdpa_default",
786
+ "metadata": {
787
+ "timestamp": "2025-10-04T16:47:45.838628",
788
+ "commit_id": "e11a00a16f925b7d3b52f5007bdce3464edb361f",
789
+ "hardware_info": {
790
+ "gpu_name": "NVIDIA A10G",
791
+ "gpu_memory_total_mb": 23028,
792
+ "cpu_count": 16,
793
+ "memory_total_mb": 63607,
794
+ "python_version": "3.10.12",
795
+ "torch_version": "2.8.0+cu126",
796
+ "cuda_version": "12.6"
797
+ },
798
+ "config": {
799
+ "name": "compiled",
800
+ "model_id": "meta-llama/Llama-2-7b-hf",
801
+ "variant": "compiled",
802
+ "warmup_iterations": 3,
803
+ "measurement_iterations": 5,
804
+ "num_tokens_to_generate": 100,
805
+ "device": "cuda",
806
+ "torch_dtype": "float16",
807
+ "compile_mode": "max-autotune",
808
+ "compile_options": {},
809
+ "use_cache": true,
810
+ "batch_size": 1,
811
+ "sequence_length": null,
812
+ "attn_implementation": "sdpa",
813
+ "sdpa_backend": null,
814
+ "custom_params": {}
815
+ }
816
+ },
817
+ "measurements": {
818
+ "latency_seconds": {
819
+ "name": "latency_seconds",
820
+ "measurements": [
821
+ 3.692891845703125,
822
+ 3.69575537109375,
823
+ 3.69946240234375,
824
+ 3.688886962890625,
825
+ 3.700034912109375
826
+ ],
827
+ "mean": 3.695406298828125,
828
+ "median": 3.69575537109375,
829
+ "std": 0.004167063802421549,
830
+ "min": 3.688886962890625,
831
+ "max": 3.700034912109375,
832
+ "p25": 3.692891845703125,
833
+ "p75": 3.69946240234375,
834
+ "p90": 3.699805908203125,
835
+ "p95": 3.69992041015625,
836
+ "p99": 3.70001201171875,
837
+ "unit": "seconds"
838
+ },
839
+ "time_to_first_token_seconds": {
840
+ "name": "time_to_first_token_seconds",
841
+ "measurements": [
842
+ 0.03825590515136719,
843
+ 0.03750640106201172,
844
+ 0.03749631881713867,
845
+ 0.03770518493652344,
846
+ 0.03741900634765625
847
+ ],
848
+ "mean": 0.03767656326293945,
849
+ "median": 0.03750640106201172,
850
+ "std": 0.00030470806466602297,
851
+ "min": 0.03741900634765625,
852
+ "max": 0.03825590515136719,
853
+ "p25": 0.03749631881713867,
854
+ "p75": 0.03770518493652344,
855
+ "p90": 0.03803561706542969,
856
+ "p95": 0.038145761108398436,
857
+ "p99": 0.03823387634277344,
858
+ "unit": "seconds"
859
+ },
860
+ "tokens_per_second": {
861
+ "name": "tokens_per_second",
862
+ "measurements": [
863
+ 27.079049205395847,
864
+ 27.05806796146392,
865
+ 27.030954534541614,
866
+ 27.108447888476267,
867
+ 27.02677201037284
868
+ ],
869
+ "mean": 27.060658320050095,
870
+ "median": 27.05806796146392,
871
+ "std": 0.030526625522130365,
872
+ "min": 27.02677201037284,
873
+ "max": 27.108447888476267,
874
+ "p25": 27.030954534541614,
875
+ "p75": 27.079049205395847,
876
+ "p90": 27.0966884152441,
877
+ "p95": 27.102568151860183,
878
+ "p99": 27.10727194115305,
879
+ "unit": "tokens/sec"
880
+ },
881
+ "time_per_output_token_seconds": {
882
+ "name": "time_per_output_token_seconds",
883
+ "measurements": [
884
+ 0.03692891845703125,
885
+ 0.0369575537109375,
886
+ 0.0369946240234375,
887
+ 0.03688886962890625,
888
+ 0.037000349121093755
889
+ ],
890
+ "mean": 0.03695406298828126,
891
+ "median": 0.0369575537109375,
892
+ "std": 4.16706380242157e-05,
893
+ "min": 0.03688886962890625,
894
+ "max": 0.037000349121093755,
895
+ "p25": 0.03692891845703125,
896
+ "p75": 0.0369946240234375,
897
+ "p90": 0.03699805908203125,
898
+ "p95": 0.0369992041015625,
899
+ "p99": 0.03700012011718751,
900
+ "unit": "seconds/token"
901
+ }
902
+ },
903
+ "gpu_metrics": {
904
+ "gpu_utilization_mean": 90.26881720430107,
905
+ "gpu_utilization_max": 93,
906
+ "gpu_utilization_min": 87,
907
+ "gpu_memory_used_mean": 13845,
908
+ "gpu_memory_used_max": 13845,
909
+ "gpu_memory_used_min": 13845,
910
+ "sample_count": 93,
911
+ "gpu_monitoring_status": "success"
912
+ }
913
+ },
914
+ {
915
+ "scenario_name": "compiled_compile_max-autotune_sdpa_math",
916
+ "metadata": {
917
+ "timestamp": "2025-10-04T16:48:20.606292",
918
+ "commit_id": "e11a00a16f925b7d3b52f5007bdce3464edb361f",
919
+ "hardware_info": {
920
+ "gpu_name": "NVIDIA A10G",
921
+ "gpu_memory_total_mb": 23028,
922
+ "cpu_count": 16,
923
+ "memory_total_mb": 63607,
924
+ "python_version": "3.10.12",
925
+ "torch_version": "2.8.0+cu126",
926
+ "cuda_version": "12.6"
927
+ },
928
+ "config": {
929
+ "name": "compiled",
930
+ "model_id": "meta-llama/Llama-2-7b-hf",
931
+ "variant": "compiled",
932
+ "warmup_iterations": 3,
933
+ "measurement_iterations": 5,
934
+ "num_tokens_to_generate": 100,
935
+ "device": "cuda",
936
+ "torch_dtype": "float16",
937
+ "compile_mode": "max-autotune",
938
+ "compile_options": {},
939
+ "use_cache": true,
940
+ "batch_size": 1,
941
+ "sequence_length": null,
942
+ "attn_implementation": "sdpa",
943
+ "sdpa_backend": "math",
944
+ "custom_params": {}
945
+ }
946
+ },
947
+ "measurements": {
948
+ "latency_seconds": {
949
+ "name": "latency_seconds",
950
+ "measurements": [
951
+ 3.887630126953125,
952
+ 3.882024658203125,
953
+ 3.8843828125,
954
+ 3.87432568359375,
955
+ 3.880162353515625
956
+ ],
957
+ "mean": 3.8817051269531246,
958
+ "median": 3.882024658203125,
959
+ "std": 0.004454527119265353,
960
+ "min": 3.87432568359375,
961
+ "max": 3.887630126953125,
962
+ "p25": 3.880162353515625,
963
+ "p75": 3.8843828125,
964
+ "p90": 3.886331201171875,
965
+ "p95": 3.8869806640625,
966
+ "p99": 3.887500234375,
967
+ "unit": "seconds"
968
+ },
969
+ "time_to_first_token_seconds": {
970
+ "name": "time_to_first_token_seconds",
971
+ "measurements": [
972
+ 0.04092006301879883,
973
+ 0.042399681091308594,
974
+ 0.040891265869140624,
975
+ 0.0404101448059082,
976
+ 0.04103868865966797
977
+ ],
978
+ "mean": 0.04113196868896485,
979
+ "median": 0.04092006301879883,
980
+ "std": 0.0006692783393465821,
981
+ "min": 0.0404101448059082,
982
+ "max": 0.042399681091308594,
983
+ "p25": 0.040891265869140624,
984
+ "p75": 0.04103868865966797,
985
+ "p90": 0.041855284118652344,
986
+ "p95": 0.04212748260498047,
987
+ "p99": 0.04234524139404297,
988
+ "unit": "seconds"
989
+ },
990
+ "tokens_per_second": {
991
+ "name": "tokens_per_second",
992
+ "measurements": [
993
+ 25.722611651426206,
994
+ 25.759753944037815,
995
+ 25.744115558898713,
996
+ 25.810943159337583,
997
+ 25.77211747580482
998
+ ],
999
+ "mean": 25.761908357901028,
1000
+ "median": 25.759753944037815,
1001
+ "std": 0.029577336511237805,
1002
+ "min": 25.722611651426206,
1003
+ "max": 25.810943159337583,
1004
+ "p25": 25.744115558898713,
1005
+ "p75": 25.77211747580482,
1006
+ "p90": 25.795412885924478,
1007
+ "p95": 25.80317802263103,
1008
+ "p99": 25.809390131996274,
1009
+ "unit": "tokens/sec"
1010
+ },
1011
+ "time_per_output_token_seconds": {
1012
+ "name": "time_per_output_token_seconds",
1013
+ "measurements": [
1014
+ 0.038876301269531253,
1015
+ 0.038820246582031245,
1016
+ 0.038843828125,
1017
+ 0.038743256835937497,
1018
+ 0.03880162353515625
1019
+ ],
1020
+ "mean": 0.03881705126953125,
1021
+ "median": 0.038820246582031245,
1022
+ "std": 4.4545271192654645e-05,
1023
+ "min": 0.038743256835937497,
1024
+ "max": 0.038876301269531253,
1025
+ "p25": 0.03880162353515625,
1026
+ "p75": 0.038843828125,
1027
+ "p90": 0.038863312011718755,
1028
+ "p95": 0.038869806640625004,
1029
+ "p99": 0.038875002343750006,
1030
+ "unit": "seconds/token"
1031
+ }
1032
+ },
1033
+ "gpu_metrics": {
1034
+ "gpu_utilization_mean": 89.04081632653062,
1035
+ "gpu_utilization_max": 92,
1036
+ "gpu_utilization_min": 80,
1037
+ "gpu_memory_used_mean": 13845,
1038
+ "gpu_memory_used_max": 13845,
1039
+ "gpu_memory_used_min": 13845,
1040
+ "sample_count": 98,
1041
+ "gpu_monitoring_status": "success"
1042
+ }
1043
+ },
1044
+ {
1045
+ "scenario_name": "compiled_compile_max-autotune_sdpa_efficient_attention",
1046
+ "metadata": {
1047
+ "timestamp": "2025-10-04T16:49:01.557155",
1048
+ "commit_id": "e11a00a16f925b7d3b52f5007bdce3464edb361f",
1049
+ "hardware_info": {
1050
+ "gpu_name": "NVIDIA A10G",
1051
+ "gpu_memory_total_mb": 23028,
1052
+ "cpu_count": 16,
1053
+ "memory_total_mb": 63607,
1054
+ "python_version": "3.10.12",
1055
+ "torch_version": "2.8.0+cu126",
1056
+ "cuda_version": "12.6"
1057
+ },
1058
+ "config": {
1059
+ "name": "compiled",
1060
+ "model_id": "meta-llama/Llama-2-7b-hf",
1061
+ "variant": "compiled",
1062
+ "warmup_iterations": 3,
1063
+ "measurement_iterations": 5,
1064
+ "num_tokens_to_generate": 100,
1065
+ "device": "cuda",
1066
+ "torch_dtype": "float16",
1067
+ "compile_mode": "max-autotune",
1068
+ "compile_options": {},
1069
+ "use_cache": true,
1070
+ "batch_size": 1,
1071
+ "sequence_length": null,
1072
+ "attn_implementation": "sdpa",
1073
+ "sdpa_backend": "efficient_attention",
1074
+ "custom_params": {}
1075
+ }
1076
+ },
1077
+ "measurements": {
1078
+ "latency_seconds": {
1079
+ "name": "latency_seconds",
1080
+ "measurements": [
1081
+ 3.698673095703125,
1082
+ 3.699529541015625,
1083
+ 3.692784423828125,
1084
+ 3.701526611328125,
1085
+ 3.700415771484375
1086
+ ],
1087
+ "mean": 3.698585888671875,
1088
+ "median": 3.699529541015625,
1089
+ "std": 0.003051275143080165,
1090
+ "min": 3.692784423828125,
1091
+ "max": 3.701526611328125,
1092
+ "p25": 3.698673095703125,
1093
+ "p75": 3.700415771484375,
1094
+ "p90": 3.701082275390625,
1095
+ "p95": 3.701304443359375,
1096
+ "p99": 3.7014821777343747,
1097
+ "unit": "seconds"
1098
+ },
1099
+ "time_to_first_token_seconds": {
1100
+ "name": "time_to_first_token_seconds",
1101
+ "measurements": [
1102
+ 0.0381610221862793,
1103
+ 0.0375266227722168,
1104
+ 0.0375437126159668,
1105
+ 0.03771459197998047,
1106
+ 0.03789110565185547
1107
+ ],
1108
+ "mean": 0.03776741104125977,
1109
+ "median": 0.03771459197998047,
1110
+ "std": 0.00023708223020534378,
1111
+ "min": 0.0375266227722168,
1112
+ "max": 0.0381610221862793,
1113
+ "p25": 0.0375437126159668,
1114
+ "p75": 0.03789110565185547,
1115
+ "p90": 0.038053055572509764,
1116
+ "p95": 0.03810703887939453,
1117
+ "p99": 0.03815022552490235,
1118
+ "unit": "seconds"
1119
+ },
1120
+ "tokens_per_second": {
1121
+ "name": "tokens_per_second",
1122
+ "measurements": [
1123
+ 27.036723011875104,
1124
+ 27.030463979630014,
1125
+ 27.079836925962496,
1126
+ 27.01588033811799,
1127
+ 27.02399032308909
1128
+ ],
1129
+ "mean": 27.03737891573494,
1130
+ "median": 27.030463979630014,
1131
+ "std": 0.022326542214504575,
1132
+ "min": 27.01588033811799,
1133
+ "max": 27.079836925962496,
1134
+ "p25": 27.02399032308909,
1135
+ "p75": 27.036723011875104,
1136
+ "p90": 27.062591360327538,
1137
+ "p95": 27.071214143145017,
1138
+ "p99": 27.078112369399,
1139
+ "unit": "tokens/sec"
1140
+ },
1141
+ "time_per_output_token_seconds": {
1142
+ "name": "time_per_output_token_seconds",
1143
+ "measurements": [
1144
+ 0.03698673095703125,
1145
+ 0.03699529541015625,
1146
+ 0.03692784423828125,
1147
+ 0.03701526611328125,
1148
+ 0.03700415771484375
1149
+ ],
1150
+ "mean": 0.03698585888671875,
1151
+ "median": 0.03699529541015625,
1152
+ "std": 3.0512751430802064e-05,
1153
+ "min": 0.03692784423828125,
1154
+ "max": 0.03701526611328125,
1155
+ "p25": 0.03698673095703125,
1156
+ "p75": 0.03700415771484375,
1157
+ "p90": 0.03701082275390625,
1158
+ "p95": 0.03701304443359375,
1159
+ "p99": 0.03701482177734375,
1160
+ "unit": "seconds/token"
1161
+ }
1162
+ },
1163
+ "gpu_metrics": {
1164
+ "gpu_utilization_mean": 90.25806451612904,
1165
+ "gpu_utilization_max": 94,
1166
+ "gpu_utilization_min": 88,
1167
+ "gpu_memory_used_mean": 13845,
1168
+ "gpu_memory_used_max": 13845,
1169
+ "gpu_memory_used_min": 13845,
1170
+ "sample_count": 93,
1171
+ "gpu_monitoring_status": "success"
1172
+ }
1173
+ }
1174
+ ]
1175
+ }