zhangyue66 commited on
Commit
d7d1f37
·
1 Parent(s): 4fe79f6
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -200,12 +200,12 @@ print(outputs)
200
  <details>
201
  <summary>👉 Click to expand: Use flash-attn to boost performance and reduce memory usage</summary>
202
 
203
- <pre><code>
204
  # ensure the flash-attn2 is installed
205
  pip install flash-attn --no-build-isolation
206
- </code></pre>
207
 
208
- <pre><code>
209
  import torch
210
  from transformers import AutoModelForCausalLM, AutoProcessor
211
  from PIL import Image
@@ -260,7 +260,7 @@ with torch.inference_mode():
260
 
261
  outputs = processor.batch_decode(out, skip_special_tokens=True)[0]
262
  print(outputs)
263
- </code></pre>
264
 
265
  </details>
266
 
 
200
  <details>
201
  <summary>👉 Click to expand: Use flash-attn to boost performance and reduce memory usage</summary>
202
 
203
+ ```shell
204
  # ensure the flash-attn2 is installed
205
  pip install flash-attn --no-build-isolation
206
+ ```
207
 
208
+ ```python
209
  import torch
210
  from transformers import AutoModelForCausalLM, AutoProcessor
211
  from PIL import Image
 
260
 
261
  outputs = processor.batch_decode(out, skip_special_tokens=True)[0]
262
  print(outputs)
263
+ ```
264
 
265
  </details>
266