|
|
import streamlit as st |
|
|
from transformers import BertTokenizer, BertForSequenceClassification |
|
|
import torch |
|
|
|
|
|
|
|
|
@st.cache_resource(allow_output_mutation=True) |
|
|
def load_model(): |
|
|
model = BertForSequenceClassification.from_pretrained("your-huggingface-username/your-model-repo") |
|
|
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") |
|
|
return model, tokenizer |
|
|
|
|
|
model, tokenizer = load_model() |
|
|
|
|
|
|
|
|
st.title("AI vs Human Text Classifier") |
|
|
|
|
|
user_input = st.text_area("Enter the text to classify:") |
|
|
|
|
|
if st.button("Classify"): |
|
|
|
|
|
inputs = tokenizer(user_input, return_tensors="pt", max_length=256, padding="max_length", truncation=True) |
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
|
|
|
|
|
|
prediction = torch.argmax(outputs.logits, dim=1).item() |
|
|
|
|
|
|
|
|
label_mapping = {0: "Human-written", 1: "AI-generated"} |
|
|
st.write(f"The text is classified as: {label_mapping[prediction]}") |
|
|
|