File size: 1,241 Bytes
e72331f 175787a e72331f 175787a e72331f 175787a e72331f 175787a 1db160d e72331f 557cf37 175787a e72331f 175787a e72331f 557cf37 175787a 557cf37 e72331f 175787a e72331f 557cf37 175787a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import streamlit as st
from transformers import pipeline
from huggingface_hub import login
# Title of the Streamlit app
st.title("WhiteRabbitNEO Q&A App")
# Hugging Face API token input (only required if the model is private)
token = st.text_input("Enter your Hugging Face token (if required):", type="password")
# Load the model
@st.cache_resource
def load_model():
try:
if token:
login(token=token)
model = pipeline("question-answering", model="WhiteRabbitNEO")
return model
except Exception as e:
st.error(f"Failed to load model: {e}")
return None
# Load the model
model = load_model()
if model:
question = st.text_input("Ask a question:")
context = st.text_area("Provide context for your question:")
if st.button("Get Answer"):
if question and context:
try:
answer = model(question=question, context=context)
st.write("Answer:", answer['answer'])
except Exception as e:
st.error(f"Error generating answer: {e}")
else:
st.warning("Please provide both a question and context.")
else:
st.error("Model could not be loaded. Please check your configuration.")
|