File size: 1,117 Bytes
0382563
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import keras_hub
import gradio as gr
base = keras_hub.models.GemmaCausalLM.from_preset("kaggle://favouryahdii/gemma-nutritionx/keras/gemma-nutritionx-2b")
template = "Instruction:\n{instruction}\n\nResponse:\n{response}"


def process_user_input(user_input, api_data="", max_length=512):
    # Preprocess the input
    processed_input = template.format(instruction=user_input, response="")

    # Assuming the model (base) is already defined and loaded
    prediction = base.generate(processed_input, max_length)  # Adjust based on your model's predict method
    return prediction.split("Response:\n")[1].strip()


# Step 5: Handle user input, model response, and function calling
def handle_function_call(user_input, history):
    history_list = []
    for msg in history:
        history_list.append(msg)
    history_list.append(user_input)

    # Always generate a model response first
    model_response = process_user_input(user_input)

    return model_response


# Create a Gradio interface
chat = gr.ChatInterface(handle_function_call, type="messages")
chat.launch(debug=True)