JungminChung commited on
Commit
a776606
Β·
verified Β·
1 Parent(s): 8a4ebb9

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. Blocks.py +39 -0
  2. README.md +2 -8
  3. components.py +36 -0
  4. hello_gradio.py +11 -0
  5. introduce.py +22 -0
  6. requirements.txt +3 -0
Blocks.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ classifier = pipeline(
5
+ "image-classification",
6
+ model="google/vit-base-patch16-224"
7
+ )
8
+
9
+ def classify_image(image, top_k=3):
10
+ results = classifier(image)
11
+ sorted_results = sorted(results,
12
+ key=lambda x: x["score"],
13
+ reverse=True)
14
+ output_dict = {result["label"]: result["score"]
15
+ for result in sorted_results[:top_k]}
16
+ return output_dict
17
+
18
+ with gr.Blocks() as demo:
19
+ with gr.Row():
20
+ gr.HTML("<h1 style='text-align: center;'>ViT 기반 이미지 λΆ„λ₯˜ 데λͺ¨</h1>")
21
+ with gr.Row():
22
+ with gr.Column():
23
+ image = gr.Image(type="pil", label="이미지 μ—…λ‘œλ“œ")
24
+ with gr.Column():
25
+ with gr.Row():
26
+ top_k = gr.Slider(minimum=1, maximum=10,
27
+ value=3, step=1, label="μƒμœ„ K개 κ²°κ³Ό")
28
+ with gr.Row():
29
+ classify = gr.Button('λΆ„λ₯˜!')
30
+ with gr.Row():
31
+ output = gr.Label(label="λΆ„λ₯˜ κ²°κ³Ό")
32
+
33
+ classify.click(
34
+ fn=classify_image,
35
+ inputs=[image, top_k],
36
+ outputs=output
37
+ )
38
+
39
+ demo.launch()
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
  title: GradioRecipe
3
- emoji: πŸ”₯
4
- colorFrom: blue
5
- colorTo: blue
6
  sdk: gradio
7
- sdk_version: 5.12.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: GradioRecipe
3
+ app_file: Blocks.py
 
 
4
  sdk: gradio
5
+ sdk_version: 5.11.0
 
 
6
  ---
 
 
components.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ classifier = pipeline(
5
+ "image-classification",
6
+ model="google/vit-base-patch16-224"
7
+ )
8
+
9
+ def classify_image(image, top_k=3):
10
+ results = classifier(image)
11
+ sorted_results = sorted(results,
12
+ key=lambda x: x["score"],
13
+ reverse=True)
14
+ output_dict = {result["label"]: result["score"]
15
+ for result in sorted_results[:top_k]}
16
+ return output_dict
17
+
18
+ demo = gr.Interface(
19
+ fn=classify_image,
20
+ inputs=[
21
+ gr.Image(
22
+ type="pil",
23
+ label="이미지 μ—…λ‘œλ“œ"
24
+ ),
25
+ gr.Slider(
26
+ minimum=1,
27
+ maximum=10,
28
+ value=3,
29
+ step=1,
30
+ label="μƒμœ„ K개 κ²°κ³Ό"
31
+ ),
32
+ ],
33
+ outputs=gr.Label(label="λΆ„λ₯˜ κ²°κ³Ό"),
34
+ )
35
+
36
+ demo.launch()
hello_gradio.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def greet(name):
4
+ return name + "λ‹˜!, λ°˜κ°‘μŠ΅λ‹ˆλ‹€. \nGradio의 μ„Έκ³„λ‘œ μ˜€μ‹ κ²ƒμ„ ν™˜μ˜ν•΄μš”! :)"
5
+
6
+ demo = gr.Interface(
7
+ fn=greet,
8
+ inputs="text",
9
+ outputs="text"
10
+ )
11
+ demo.launch(share=True)
introduce.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def do_introduce(name, age):
4
+ return f'μ•ˆλ…•ν•˜μ„Έμš”. μ €λŠ” {name}μž…λ‹ˆλ‹€. μ €μ˜ λ‚˜μ΄λŠ” {age}μ‚΄ μž…λ‹ˆλ‹€.'
5
+
6
+ with gr.Blocks() as demo:
7
+ with gr.Row():
8
+ with gr.Column():
9
+ name = gr.Text(label='이름')
10
+ age = gr.Text(label='λ‚˜μ΄')
11
+ with gr.Column():
12
+ intro = gr.Button('μ‹œμž‘!')
13
+ with gr.Row():
14
+ output = gr.Text(label='μ†Œκ°œ 문ꡬ')
15
+
16
+ intro.click(
17
+ fn=do_introduce,
18
+ inputs=[name, age],
19
+ outputs=output
20
+ )
21
+
22
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio==5.11.0
2
+ transformers==4.48.0
3
+ torch