nutritionv2 / chat_gradio.py
Fyahdii's picture
Upload folder using huggingface_hub
0382563 verified
raw
history blame
1.12 kB
import keras_hub
import gradio as gr
base = keras_hub.models.GemmaCausalLM.from_preset("kaggle://favouryahdii/gemma-nutritionx/keras/gemma-nutritionx-2b")
template = "Instruction:\n{instruction}\n\nResponse:\n{response}"
def process_user_input(user_input, api_data="", max_length=512):
# Preprocess the input
processed_input = template.format(instruction=user_input, response="")
# Assuming the model (base) is already defined and loaded
prediction = base.generate(processed_input, max_length) # Adjust based on your model's predict method
return prediction.split("Response:\n")[1].strip()
# Step 5: Handle user input, model response, and function calling
def handle_function_call(user_input, history):
history_list = []
for msg in history:
history_list.append(msg)
history_list.append(user_input)
# Always generate a model response first
model_response = process_user_input(user_input)
return model_response
# Create a Gradio interface
chat = gr.ChatInterface(handle_function_call, type="messages")
chat.launch(debug=True)