aliabd HF staff commited on
Commit
886a968
·
verified ·
1 Parent(s): ed76088

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. README.md +6 -6
  2. cheetah.jpg +0 -0
  3. frog.jpg +0 -0
  4. requirements.txt +2 -0
  5. run.ipynb +1 -0
  6. run.py +82 -0
README.md CHANGED
@@ -1,12 +1,12 @@
 
1
  ---
2
- title: Sub Block Render Main
3
- emoji: 👁
4
  colorFrom: indigo
5
- colorTo: purple
6
  sdk: gradio
7
  sdk_version: 4.36.1
8
- app_file: app.py
9
  pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+
2
  ---
3
+ title: sub_block_render_main
4
+ emoji: 🔥
5
  colorFrom: indigo
6
+ colorTo: indigo
7
  sdk: gradio
8
  sdk_version: 4.36.1
9
+ app_file: run.py
10
  pinned: false
11
+ hf_oauth: true
12
  ---
 
 
cheetah.jpg ADDED
frog.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio-client @ git+https://github.com/gradio-app/gradio@546d14e4f2773f91be7afc37355327044e7cb6e6#subdirectory=client/python
2
+ https://gradio-builds.s3.amazonaws.com/546d14e4f2773f91be7afc37355327044e7cb6e6/gradio-4.36.1-py3-none-any.whl
run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: sub_block_render"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/sub_block_render/cheetah.jpg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/sub_block_render/frog.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "from pathlib import Path\n", "\n", "from PIL import Image\n", "\n", "\n", "root = Path(os.path.abspath(''))\n", "\n", "\n", "def infer(\n", " text,\n", " guidance_scale,\n", "):\n", "\n", " img = (\n", " Image.open(root / \"cheetah.jpg\")\n", " if text == \"Cheetah\"\n", " else Image.open(root / \"frog.jpg\")\n", " )\n", " img = img.resize((224, 224))\n", "\n", " return ([img, img, img, img], \"image\")\n", "\n", "\n", "block = gr.Blocks()\n", "\n", "examples = [\n", " [\"A serious capybara at work, wearing a suit\", 7],\n", " [\"A Squirtle fine dining with a view to the London Eye\", 7],\n", " [\"A tamale food cart in front of a Japanese Castle\", 7],\n", " [\"a graffiti of a robot serving meals to people\", 7],\n", " [\"a beautiful cabin in Attersee, Austria, 3d animation style\", 7],\n", "]\n", "\n", "\n", "with block as demo:\n", " with gr.Row(elem_id=\"prompt-container\", equal_height=True):\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " show_label=False,\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " elem_id=\"prompt-text-input\",\n", " )\n", "\n", " gallery = gr.Gallery(\n", " label=\"Generated images\", show_label=False, elem_id=\"gallery\", rows=2, columns=2\n", " )\n", " out_txt = gr.Textbox(\n", " label=\"Prompt\",\n", " placeholder=\"Enter a prompt to generate an image\",\n", " lines=3,\n", " elem_id=\"prompt-text-input\",\n", " )\n", "\n", " guidance_scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", "\n", " ex = gr.Examples(\n", " examples=examples,\n", " fn=infer,\n", " inputs=[text, guidance_scale],\n", " outputs=[gallery, out_txt],\n", " cache_examples=True,\n", " )\n", "\n", " text.submit(\n", " infer,\n", " inputs=[text, guidance_scale],\n", " outputs=[gallery, out_txt],\n", " concurrency_id=\"infer\",\n", " concurrency_limit=8,\n", " )\n", "\n", "with gr.Blocks() as demo:\n", " block.render()\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue(max_size=10, api_open=False).launch(show_api=False)\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from pathlib import Path
4
+
5
+ from PIL import Image
6
+
7
+
8
+ root = Path(os.path.dirname(__file__))
9
+
10
+
11
+ def infer(
12
+ text,
13
+ guidance_scale,
14
+ ):
15
+
16
+ img = (
17
+ Image.open(root / "cheetah.jpg")
18
+ if text == "Cheetah"
19
+ else Image.open(root / "frog.jpg")
20
+ )
21
+ img = img.resize((224, 224))
22
+
23
+ return ([img, img, img, img], "image")
24
+
25
+
26
+ block = gr.Blocks()
27
+
28
+ examples = [
29
+ ["A serious capybara at work, wearing a suit", 7],
30
+ ["A Squirtle fine dining with a view to the London Eye", 7],
31
+ ["A tamale food cart in front of a Japanese Castle", 7],
32
+ ["a graffiti of a robot serving meals to people", 7],
33
+ ["a beautiful cabin in Attersee, Austria, 3d animation style", 7],
34
+ ]
35
+
36
+
37
+ with block as demo:
38
+ with gr.Row(elem_id="prompt-container", equal_height=True):
39
+ text = gr.Textbox(
40
+ label="Enter your prompt",
41
+ show_label=False,
42
+ max_lines=1,
43
+ placeholder="Enter your prompt",
44
+ elem_id="prompt-text-input",
45
+ )
46
+
47
+ gallery = gr.Gallery(
48
+ label="Generated images", show_label=False, elem_id="gallery", rows=2, columns=2
49
+ )
50
+ out_txt = gr.Textbox(
51
+ label="Prompt",
52
+ placeholder="Enter a prompt to generate an image",
53
+ lines=3,
54
+ elem_id="prompt-text-input",
55
+ )
56
+
57
+ guidance_scale = gr.Slider(
58
+ label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1
59
+ )
60
+
61
+ ex = gr.Examples(
62
+ examples=examples,
63
+ fn=infer,
64
+ inputs=[text, guidance_scale],
65
+ outputs=[gallery, out_txt],
66
+ cache_examples=True,
67
+ )
68
+
69
+ text.submit(
70
+ infer,
71
+ inputs=[text, guidance_scale],
72
+ outputs=[gallery, out_txt],
73
+ concurrency_id="infer",
74
+ concurrency_limit=8,
75
+ )
76
+
77
+ with gr.Blocks() as demo:
78
+ block.render()
79
+
80
+
81
+ if __name__ == "__main__":
82
+ demo.queue(max_size=10, api_open=False).launch(show_api=False)