AnkitS1997 commited on
Commit
01a7a71
·
1 Parent(s): 20ed9e7

updated streamlit app

Browse files
.ipynb_checkpoints/Dockerfile-checkpoint CHANGED
@@ -30,7 +30,7 @@ RUN mkdir -p /app/cache && chmod -R 777 /app/cache
30
  ENV HF_HOME=/app/cache
31
 
32
  # Expose the necessary ports
33
- EXPOSE 8501 8502
34
 
35
  # Create a supervisor configuration file
36
  RUN mkdir -p /etc/supervisor/conf.d/
 
30
  ENV HF_HOME=/app/cache
31
 
32
  # Expose the necessary ports
33
+ EXPOSE 7860 8502
34
 
35
  # Create a supervisor configuration file
36
  RUN mkdir -p /etc/supervisor/conf.d/
.ipynb_checkpoints/start-checkpoint.sh CHANGED
@@ -1,7 +1,7 @@
1
  #!/bin/bash
2
 
3
  # Start FastAPI
4
- uvicorn app:app --host 0.0.0.0 --port 8502 &
5
 
6
  # Start Streamlit
7
  streamlit run streamlit_app.py --server.port=7860 --server.address=0.0.0.0
 
1
  #!/bin/bash
2
 
3
  # Start FastAPI
4
+ # uvicorn app:app --host 0.0.0.0 --port 8502 &
5
 
6
  # Start Streamlit
7
  streamlit run streamlit_app.py --server.port=7860 --server.address=0.0.0.0
.ipynb_checkpoints/streamlit_app-checkpoint.py CHANGED
@@ -1,6 +1,21 @@
1
  import streamlit as st
2
  import requests
3
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  st.title("Image Captioning with Fine-Tuned BLiPv2 Model")
6
 
@@ -11,8 +26,15 @@ if uploaded_file is not None:
11
  st.image(image, caption="Uploaded Image", use_column_width=True)
12
 
13
  files = {"file": uploaded_file.getvalue()}
14
- response = requests.post("http://0.0.0.0:8502/generate-caption/", files=files)
15
- caption = response.json().get("caption")
 
 
 
 
 
 
 
16
 
17
  st.write("Generated Caption:")
18
  st.write(f"**{caption}**")
 
1
  import streamlit as st
2
  import requests
3
  from PIL import Image
4
+ from transformers import AutoProcessor, Blip2ForConditionalGeneration
5
+ import torch
6
+ import io
7
+
8
+
9
+ @st.cache_resource
10
+ def load_model():
11
+ model = Blip2ForConditionalGeneration.from_pretrained("ybelkada/blip2-opt-2.7b-fp16-sharded")
12
+ model.load_adapter('blip-cpu-model')
13
+ processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
14
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+ model.to(device)
16
+ return model, processor
17
+
18
+ model, processor = load_model()
19
 
20
  st.title("Image Captioning with Fine-Tuned BLiPv2 Model")
21
 
 
26
  st.image(image, caption="Uploaded Image", use_column_width=True)
27
 
28
  files = {"file": uploaded_file.getvalue()}
29
+ print("Sending API request")
30
+ # response = requests.post("http://0.0.0.0:8502/generate-caption/", files=files)
31
+ # caption = response.json().get("caption")
32
+
33
+ inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
34
+
35
+ with torch.no_grad():
36
+ caption_ids = model.generate(**inputs, max_length=128)
37
+ caption = processor.decode(caption_ids[0], skip_special_tokens=True)
38
 
39
  st.write("Generated Caption:")
40
  st.write(f"**{caption}**")
Dockerfile CHANGED
@@ -30,7 +30,7 @@ RUN mkdir -p /app/cache && chmod -R 777 /app/cache
30
  ENV HF_HOME=/app/cache
31
 
32
  # Expose the necessary ports
33
- EXPOSE 8501 8502
34
 
35
  # Create a supervisor configuration file
36
  RUN mkdir -p /etc/supervisor/conf.d/
 
30
  ENV HF_HOME=/app/cache
31
 
32
  # Expose the necessary ports
33
+ EXPOSE 7860 8502
34
 
35
  # Create a supervisor configuration file
36
  RUN mkdir -p /etc/supervisor/conf.d/
start.sh CHANGED
@@ -1,7 +1,7 @@
1
  #!/bin/bash
2
 
3
  # Start FastAPI
4
- uvicorn app:app --host 0.0.0.0 --port 8502 &
5
 
6
  # Start Streamlit
7
  streamlit run streamlit_app.py --server.port=7860 --server.address=0.0.0.0
 
1
  #!/bin/bash
2
 
3
  # Start FastAPI
4
+ # uvicorn app:app --host 0.0.0.0 --port 8502 &
5
 
6
  # Start Streamlit
7
  streamlit run streamlit_app.py --server.port=7860 --server.address=0.0.0.0
streamlit_app.py CHANGED
@@ -1,6 +1,21 @@
1
  import streamlit as st
2
  import requests
3
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  st.title("Image Captioning with Fine-Tuned BLiPv2 Model")
6
 
@@ -12,8 +27,14 @@ if uploaded_file is not None:
12
 
13
  files = {"file": uploaded_file.getvalue()}
14
  print("Sending API request")
15
- response = requests.post("http://0.0.0.0:8502/generate-caption/", files=files)
16
- caption = response.json().get("caption")
 
 
 
 
 
 
17
 
18
  st.write("Generated Caption:")
19
  st.write(f"**{caption}**")
 
1
  import streamlit as st
2
  import requests
3
  from PIL import Image
4
+ from transformers import AutoProcessor, Blip2ForConditionalGeneration
5
+ import torch
6
+ import io
7
+
8
+
9
+ @st.cache_resource
10
+ def load_model():
11
+ model = Blip2ForConditionalGeneration.from_pretrained("ybelkada/blip2-opt-2.7b-fp16-sharded")
12
+ model.load_adapter('blip-cpu-model')
13
+ processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
14
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+ model.to(device)
16
+ return model, processor
17
+
18
+ model, processor = load_model()
19
 
20
  st.title("Image Captioning with Fine-Tuned BLiPv2 Model")
21
 
 
27
 
28
  files = {"file": uploaded_file.getvalue()}
29
  print("Sending API request")
30
+ # response = requests.post("http://0.0.0.0:8502/generate-caption/", files=files)
31
+ # caption = response.json().get("caption")
32
+
33
+ inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
34
+
35
+ with torch.no_grad():
36
+ caption_ids = model.generate(**inputs, max_length=128)
37
+ caption = processor.decode(caption_ids[0], skip_special_tokens=True)
38
 
39
  st.write("Generated Caption:")
40
  st.write(f"**{caption}**")