| | import gradio as gr |
| | from fastai.vision.all import * |
| | from huggingface_hub import from_pretrained_fastai |
| | import torch, os |
| |
|
| | os.environ.setdefault("OMP_NUM_THREADS", "1") |
| | torch.set_num_threads(1) |
| |
|
| | learn = from_pretrained_fastai("Pablogps/castle-classifier-25") |
| | try: |
| | learn.to_fp32() |
| | except: |
| | pass |
| | labels = learn.dls.vocab |
| |
|
| | def predict(img): |
| | img = PILImage.create(img) |
| | pred, pred_idx, probs = learn.predict(img) |
| | return {labels[i]: float(probs[i]) for i in range(len(labels))} |
| |
|
| | title = "Bad castle predictor" |
| | description = "A bad model that tries to identify the type of castle." |
| | examples = ['spanish', 'french', 'japanese'] |
| |
|
| | demo = gr.Interface( |
| | fn=predict, |
| | inputs=gr.Image(type="pil"), |
| | outputs=gr.Label(num_top_classes=3), |
| | title=title, |
| | description=description, |
| | examples=examples, |
| | cache_examples=False, |
| | ) |
| |
|
| | demo.queue(max_size=8).launch(show_error=True, debug=True) |