Makhinur commited on
Commit
3ddd231
·
verified ·
1 Parent(s): 67a3da9

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +2 -2
main.py CHANGED
@@ -287,14 +287,14 @@ async def generate_story_endpoint(image_file: UploadFile = File(...), language:
287
 
288
  # Step 2: Construct the prompt text for the language model
289
  # This prompt instructs the model on what to write and incorporates the caption.
290
- prompt_text = f"Write an attractive story of around 300 words about {story_theme}. Incorporate the following details from an image description into the story: {caption}\n\nStory:"
291
 
292
  # Step 3: Generate the story using the local language model (Qwen 0.5B via llama.cpp)
293
  try:
294
  # Call the Qwen 0.5B story generation function
295
  story = generate_story_qwen_0_5b( # <--- Use the updated function name
296
  prompt_text,
297
- max_new_tokens=300, # Request ~300 new tokens
298
  temperature=0.7, # Sampling parameters
299
  top_p=0.9,
300
  top_k=50 # Note: top_k may not be directly used by llama_cpp.create_chat_completion
 
287
 
288
  # Step 2: Construct the prompt text for the language model
289
  # This prompt instructs the model on what to write and incorporates the caption.
290
+ prompt_text = f"Write a detailed story that is approximately 300 words long. Ensure the story has a clear beginning, middle, and end about {story_theme}. Incorporate the following details from an image description into the story: {caption}\n\nStory:"
291
 
292
  # Step 3: Generate the story using the local language model (Qwen 0.5B via llama.cpp)
293
  try:
294
  # Call the Qwen 0.5B story generation function
295
  story = generate_story_qwen_0_5b( # <--- Use the updated function name
296
  prompt_text,
297
+ max_new_tokens=350, # Request ~300 new tokens
298
  temperature=0.7, # Sampling parameters
299
  top_p=0.9,
300
  top_k=50 # Note: top_k may not be directly used by llama_cpp.create_chat_completion