fix: dummy_agent_library.ipynb
Browse filesThese calls need larger `max_new_tokens`.
200 is too low and truncates meaningful output.
dummy_agent_library.ipynb
CHANGED
|
@@ -436,7 +436,7 @@
|
|
| 436 |
"# Do you see the problem?\n",
|
| 437 |
"output = client.text_generation(\n",
|
| 438 |
" prompt,\n",
|
| 439 |
-
" max_new_tokens=
|
| 440 |
")\n",
|
| 441 |
"\n",
|
| 442 |
"print(output)"
|
|
@@ -488,7 +488,7 @@
|
|
| 488 |
"# The answer was hallucinated by the model. We need to stop to actually execute the function!\n",
|
| 489 |
"output = client.text_generation(\n",
|
| 490 |
" prompt,\n",
|
| 491 |
-
" max_new_tokens=
|
| 492 |
" stop=[\"Observation:\"] # Let's stop before any actual function is called\n",
|
| 493 |
")\n",
|
| 494 |
"\n",
|
|
|
|
| 436 |
"# Do you see the problem?\n",
|
| 437 |
"output = client.text_generation(\n",
|
| 438 |
" prompt,\n",
|
| 439 |
+
" max_new_tokens=2000,\n",
|
| 440 |
")\n",
|
| 441 |
"\n",
|
| 442 |
"print(output)"
|
|
|
|
| 488 |
"# The answer was hallucinated by the model. We need to stop to actually execute the function!\n",
|
| 489 |
"output = client.text_generation(\n",
|
| 490 |
" prompt,\n",
|
| 491 |
+
" max_new_tokens=2000,\n",
|
| 492 |
" stop=[\"Observation:\"] # Let's stop before any actual function is called\n",
|
| 493 |
")\n",
|
| 494 |
"\n",
|