|
import streamlit as st |
|
from transformers import pipeline |
|
from huggingface_hub import login |
|
|
|
|
|
st.title("WhiteRabbitNEO Q&A App") |
|
|
|
|
|
token = st.text_input("Enter your Hugging Face token (if required):", type="password") |
|
|
|
|
|
@st.cache_resource |
|
def load_model(): |
|
try: |
|
if token: |
|
login(token=token) |
|
model = pipeline("question-answering", model="WhiteRabbitNEO") |
|
return model |
|
except Exception as e: |
|
st.error(f"Failed to load model: {e}") |
|
return None |
|
|
|
|
|
model = load_model() |
|
|
|
if model: |
|
question = st.text_input("Ask a question:") |
|
context = st.text_area("Provide context for your question:") |
|
|
|
if st.button("Get Answer"): |
|
if question and context: |
|
try: |
|
answer = model(question=question, context=context) |
|
st.write("Answer:", answer['answer']) |
|
except Exception as e: |
|
st.error(f"Error generating answer: {e}") |
|
else: |
|
st.warning("Please provide both a question and context.") |
|
else: |
|
st.error("Model could not be loaded. Please check your configuration.") |
|
|
|
|