Commit
·
7f72798
1
Parent(s):
0e1f401
Update README.md
Browse files
README.md
CHANGED
@@ -299,7 +299,8 @@ inputs = tokenizer(txt, max_length = 1024, truncation=True, padding="max_length"
|
|
299 |
|
300 |
### For Multiple Question Generation (👍)
|
301 |
```python
|
302 |
-
|
|
|
303 |
```
|
304 |
### For Single Question Generation
|
305 |
```python
|
@@ -342,8 +343,10 @@ model = BetterTransformer.transform(model)
|
|
342 |
|
343 |
### For Multiple Question Generation (👍)
|
344 |
```python
|
345 |
-
# use to(device)
|
346 |
-
|
|
|
|
|
347 |
```
|
348 |
|
349 |
|
|
|
299 |
|
300 |
### For Multiple Question Generation (👍)
|
301 |
```python
|
302 |
+
num_generate_sequence = 4 #8, 16, 2, 1
|
303 |
+
summaries = model.generate(input_ids =inputs["input_ids"], max_new_tokens=100, do_sample = True, top_p = 0.95, num_return_sequences = num_generate_sequence)
|
304 |
```
|
305 |
### For Single Question Generation
|
306 |
```python
|
|
|
343 |
|
344 |
### For Multiple Question Generation (👍)
|
345 |
```python
|
346 |
+
# use to(device)
|
347 |
+
|
348 |
+
num_generate_sequence = 16
|
349 |
+
summaries = model.generate(input_ids =inputs["input_ids"].to(device), max_new_tokens=100, do_sample = True, top_p = 0.95, num_return_sequences = num_generate_sequence)
|
350 |
```
|
351 |
|
352 |
|