Varu96 commited on
Commit
47a684d
·
1 Parent(s): 6a6ea70

Added the FRONT_END Jupyter Notebook

Browse files
Files changed (1) hide show
  1. FRONT_END.ipynb +1411 -0
FRONT_END.ipynb ADDED
@@ -0,0 +1,1411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "machine_shape": "hm"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ }
16
+ },
17
+ "cells": [
18
+ {
19
+ "cell_type": "code",
20
+ "execution_count": null,
21
+ "metadata": {
22
+ "id": "XICISQU4VQ7j",
23
+ "colab": {
24
+ "base_uri": "https://localhost:8080/"
25
+ },
26
+ "outputId": "d9e49c0c-16ff-4c31-f898-66af11852672"
27
+ },
28
+ "outputs": [
29
+ {
30
+ "output_type": "stream",
31
+ "name": "stdout",
32
+ "text": [
33
+ "/content/drive/MyDrive/Colab Notebooks/Medical_Project/My_new_LLaVA\n"
34
+ ]
35
+ }
36
+ ],
37
+ "source": [
38
+ "%cd /content/drive/MyDrive/Colab Notebooks/Medical_Project/My_new_LLaVA"
39
+ ]
40
+ },
41
+ {
42
+ "cell_type": "code",
43
+ "source": [
44
+ "!git init"
45
+ ],
46
+ "metadata": {
47
+ "id": "VsFOxVleVRpA",
48
+ "colab": {
49
+ "base_uri": "https://localhost:8080/"
50
+ },
51
+ "outputId": "678568f0-b355-423e-83e7-6340b236b80a"
52
+ },
53
+ "execution_count": null,
54
+ "outputs": [
55
+ {
56
+ "output_type": "stream",
57
+ "name": "stdout",
58
+ "text": [
59
+ "\u001b[33mhint: Using 'master' as the name for the initial branch. This default branch name\u001b[m\n",
60
+ "\u001b[33mhint: is subject to change. To configure the initial branch name to use in all\u001b[m\n",
61
+ "\u001b[33mhint: of your new repositories, which will suppress this warning, call:\u001b[m\n",
62
+ "\u001b[33mhint: \u001b[m\n",
63
+ "\u001b[33mhint: \tgit config --global init.defaultBranch <name>\u001b[m\n",
64
+ "\u001b[33mhint: \u001b[m\n",
65
+ "\u001b[33mhint: Names commonly chosen instead of 'master' are 'main', 'trunk' and\u001b[m\n",
66
+ "\u001b[33mhint: 'development'. The just-created branch can be renamed via this command:\u001b[m\n",
67
+ "\u001b[33mhint: \u001b[m\n",
68
+ "\u001b[33mhint: \tgit branch -m <name>\u001b[m\n",
69
+ "Initialized empty Git repository in /content/drive/MyDrive/Colab Notebooks/Medical_Project/My_new_LLaVA/.git/\n"
70
+ ]
71
+ }
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "code",
76
+ "source": [
77
+ "!git remote add origin https://huggingface.co/Varu96/health\n"
78
+ ],
79
+ "metadata": {
80
+ "colab": {
81
+ "base_uri": "https://localhost:8080/"
82
+ },
83
+ "id": "gVhjQ_NAcd3V",
84
+ "outputId": "b10be489-58f8-4672-c7e0-bd50b89c3bcb"
85
+ },
86
+ "execution_count": null,
87
+ "outputs": [
88
+ {
89
+ "output_type": "stream",
90
+ "name": "stdout",
91
+ "text": [
92
+ "error: remote origin already exists.\n"
93
+ ]
94
+ }
95
+ ]
96
+ },
97
+ {
98
+ "cell_type": "code",
99
+ "source": [
100
+ "!git add ."
101
+ ],
102
+ "metadata": {
103
+ "colab": {
104
+ "base_uri": "https://localhost:8080/"
105
+ },
106
+ "id": "ZSEkO1yheEVn",
107
+ "outputId": "320a2c7c-26b2-4224-fc7e-9a4058b8436d"
108
+ },
109
+ "execution_count": null,
110
+ "outputs": [
111
+ {
112
+ "output_type": "stream",
113
+ "name": "stdout",
114
+ "text": [
115
+ "warning: adding embedded git repository: LLaVA\n",
116
+ "\u001b[33mhint: You've added another git repository inside your current repository.\u001b[m\n",
117
+ "\u001b[33mhint: Clones of the outer repository will not contain the contents of\u001b[m\n",
118
+ "\u001b[33mhint: the embedded repository and will not know how to obtain it.\u001b[m\n",
119
+ "\u001b[33mhint: If you meant to add a submodule, use:\u001b[m\n",
120
+ "\u001b[33mhint: \u001b[m\n",
121
+ "\u001b[33mhint: \tgit submodule add <url> LLaVA\u001b[m\n",
122
+ "\u001b[33mhint: \u001b[m\n",
123
+ "\u001b[33mhint: If you added this path by mistake, you can remove it from the\u001b[m\n",
124
+ "\u001b[33mhint: index with:\u001b[m\n",
125
+ "\u001b[33mhint: \u001b[m\n",
126
+ "\u001b[33mhint: \tgit rm --cached LLaVA\u001b[m\n",
127
+ "\u001b[33mhint: \u001b[m\n",
128
+ "\u001b[33mhint: See \"git help submodule\" for more information.\u001b[m\n",
129
+ "^C\n"
130
+ ]
131
+ }
132
+ ]
133
+ },
134
+ {
135
+ "cell_type": "code",
136
+ "source": [
137
+ "!git status"
138
+ ],
139
+ "metadata": {
140
+ "colab": {
141
+ "base_uri": "https://localhost:8080/"
142
+ },
143
+ "id": "aLP6pAyxeN90",
144
+ "outputId": "82ab6973-8b67-4fa5-a7ef-bd7ae6009faf"
145
+ },
146
+ "execution_count": null,
147
+ "outputs": [
148
+ {
149
+ "output_type": "stream",
150
+ "name": "stdout",
151
+ "text": [
152
+ "On branch master\n",
153
+ "\n",
154
+ "No commits yet\n",
155
+ "\n",
156
+ "Untracked files:\n",
157
+ " (use \"git add <file>...\" to include in what will be committed)\n",
158
+ "\t\u001b[31mCopy of llava-1.5-fine-tune-gpu-a100.ipynb\u001b[m\n",
159
+ "\t\u001b[31mLLaVA/\u001b[m\n",
160
+ "\t\u001b[31mcheckpoints/\u001b[m\n",
161
+ "\t\u001b[31mllava-ftmodel/\u001b[m\n",
162
+ "\t\u001b[31mwandb/\u001b[m\n",
163
+ "\n",
164
+ "nothing added to commit but untracked files present (use \"git add\" to track)\n"
165
+ ]
166
+ }
167
+ ]
168
+ },
169
+ {
170
+ "cell_type": "code",
171
+ "source": [],
172
+ "metadata": {
173
+ "id": "4xnwMAXUeOED"
174
+ },
175
+ "execution_count": null,
176
+ "outputs": []
177
+ },
178
+ {
179
+ "cell_type": "code",
180
+ "source": [],
181
+ "metadata": {
182
+ "id": "t9IE6XO8eOGX"
183
+ },
184
+ "execution_count": null,
185
+ "outputs": []
186
+ },
187
+ {
188
+ "cell_type": "code",
189
+ "source": [],
190
+ "metadata": {
191
+ "id": "QgbX91EaeOJA"
192
+ },
193
+ "execution_count": null,
194
+ "outputs": []
195
+ },
196
+ {
197
+ "cell_type": "code",
198
+ "source": [
199
+ "!git clone https://huggingface.co/spaces/Varu96/health"
200
+ ],
201
+ "metadata": {
202
+ "colab": {
203
+ "base_uri": "https://localhost:8080/"
204
+ },
205
+ "id": "uu5ZqoDIcdx2",
206
+ "outputId": "981f1d11-abab-4c5c-b08d-a1492e5d6e41"
207
+ },
208
+ "execution_count": null,
209
+ "outputs": [
210
+ {
211
+ "output_type": "stream",
212
+ "name": "stdout",
213
+ "text": [
214
+ "Cloning into 'health'...\n",
215
+ "fatal: could not read Username for 'https://huggingface.co': No such device or address\n"
216
+ ]
217
+ }
218
+ ]
219
+ },
220
+ {
221
+ "cell_type": "code",
222
+ "source": [],
223
+ "metadata": {
224
+ "id": "yn3EBEG3cdvf"
225
+ },
226
+ "execution_count": null,
227
+ "outputs": []
228
+ },
229
+ {
230
+ "cell_type": "code",
231
+ "source": [
232
+ "%cd /content/drive/MyDrive/Colab Notebooks/Medical_Project/My_new_LLaVA/LLaVA"
233
+ ],
234
+ "metadata": {
235
+ "id": "gCBnzorOcdtQ",
236
+ "colab": {
237
+ "base_uri": "https://localhost:8080/"
238
+ },
239
+ "outputId": "2f7ddcdc-d13e-468d-8357-e4c7d878c891"
240
+ },
241
+ "execution_count": 4,
242
+ "outputs": [
243
+ {
244
+ "output_type": "stream",
245
+ "name": "stdout",
246
+ "text": [
247
+ "/content/drive/MyDrive/Colab Notebooks/Medical_Project/My_new_LLaVA/LLaVA\n"
248
+ ]
249
+ }
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "code",
254
+ "source": [
255
+ "!pip install gradio"
256
+ ],
257
+ "metadata": {
258
+ "colab": {
259
+ "base_uri": "https://localhost:8080/"
260
+ },
261
+ "id": "iJz4Bub-wYvG",
262
+ "outputId": "0823d583-c3ae-445d-e177-f68cac65192f"
263
+ },
264
+ "execution_count": 5,
265
+ "outputs": [
266
+ {
267
+ "output_type": "stream",
268
+ "name": "stdout",
269
+ "text": [
270
+ "Collecting gradio\n",
271
+ " Downloading gradio-4.44.1-py3-none-any.whl.metadata (15 kB)\n",
272
+ "Collecting aiofiles<24.0,>=22.0 (from gradio)\n",
273
+ " Downloading aiofiles-23.2.1-py3-none-any.whl.metadata (9.7 kB)\n",
274
+ "Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (3.7.1)\n",
275
+ "Collecting fastapi<1.0 (from gradio)\n",
276
+ " Downloading fastapi-0.115.0-py3-none-any.whl.metadata (27 kB)\n",
277
+ "Collecting ffmpy (from gradio)\n",
278
+ " Downloading ffmpy-0.4.0-py3-none-any.whl.metadata (2.9 kB)\n",
279
+ "Collecting gradio-client==1.3.0 (from gradio)\n",
280
+ " Downloading gradio_client-1.3.0-py3-none-any.whl.metadata (7.1 kB)\n",
281
+ "Collecting httpx>=0.24.1 (from gradio)\n",
282
+ " Downloading httpx-0.27.2-py3-none-any.whl.metadata (7.1 kB)\n",
283
+ "Requirement already satisfied: huggingface-hub>=0.19.3 in /usr/local/lib/python3.10/dist-packages (from gradio) (0.24.7)\n",
284
+ "Requirement already satisfied: importlib-resources<7.0,>=1.3 in /usr/local/lib/python3.10/dist-packages (from gradio) (6.4.5)\n",
285
+ "Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (3.1.4)\n",
286
+ "Requirement already satisfied: markupsafe~=2.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (2.1.5)\n",
287
+ "Requirement already satisfied: matplotlib~=3.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (3.7.1)\n",
288
+ "Requirement already satisfied: numpy<3.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (1.26.4)\n",
289
+ "Collecting orjson~=3.0 (from gradio)\n",
290
+ " Downloading orjson-3.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (50 kB)\n",
291
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m50.4/50.4 kB\u001b[0m \u001b[31m3.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
292
+ "\u001b[?25hRequirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from gradio) (24.1)\n",
293
+ "Requirement already satisfied: pandas<3.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (2.1.4)\n",
294
+ "Requirement already satisfied: pillow<11.0,>=8.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (10.4.0)\n",
295
+ "Requirement already satisfied: pydantic>=2.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (2.9.2)\n",
296
+ "Collecting pydub (from gradio)\n",
297
+ " Downloading pydub-0.25.1-py2.py3-none-any.whl.metadata (1.4 kB)\n",
298
+ "Collecting python-multipart>=0.0.9 (from gradio)\n",
299
+ " Downloading python_multipart-0.0.12-py3-none-any.whl.metadata (1.9 kB)\n",
300
+ "Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (6.0.2)\n",
301
+ "Collecting ruff>=0.2.2 (from gradio)\n",
302
+ " Downloading ruff-0.6.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (25 kB)\n",
303
+ "Collecting semantic-version~=2.0 (from gradio)\n",
304
+ " Downloading semantic_version-2.10.0-py2.py3-none-any.whl.metadata (9.7 kB)\n",
305
+ "Collecting tomlkit==0.12.0 (from gradio)\n",
306
+ " Downloading tomlkit-0.12.0-py3-none-any.whl.metadata (2.7 kB)\n",
307
+ "Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.10/dist-packages (from gradio) (0.12.5)\n",
308
+ "Requirement already satisfied: typing-extensions~=4.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (4.12.2)\n",
309
+ "Requirement already satisfied: urllib3~=2.0 in /usr/local/lib/python3.10/dist-packages (from gradio) (2.2.3)\n",
310
+ "Collecting uvicorn>=0.14.0 (from gradio)\n",
311
+ " Downloading uvicorn-0.31.0-py3-none-any.whl.metadata (6.6 kB)\n",
312
+ "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from gradio-client==1.3.0->gradio) (2024.6.1)\n",
313
+ "Collecting websockets<13.0,>=10.0 (from gradio-client==1.3.0->gradio)\n",
314
+ " Downloading websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (6.6 kB)\n",
315
+ "Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.10/dist-packages (from anyio<5.0,>=3.0->gradio) (3.10)\n",
316
+ "Requirement already satisfied: sniffio>=1.1 in /usr/local/lib/python3.10/dist-packages (from anyio<5.0,>=3.0->gradio) (1.3.1)\n",
317
+ "Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio<5.0,>=3.0->gradio) (1.2.2)\n",
318
+ "Collecting starlette<0.39.0,>=0.37.2 (from fastapi<1.0->gradio)\n",
319
+ " Downloading starlette-0.38.6-py3-none-any.whl.metadata (6.0 kB)\n",
320
+ "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx>=0.24.1->gradio) (2024.8.30)\n",
321
+ "Collecting httpcore==1.* (from httpx>=0.24.1->gradio)\n",
322
+ " Downloading httpcore-1.0.5-py3-none-any.whl.metadata (20 kB)\n",
323
+ "Collecting h11<0.15,>=0.13 (from httpcore==1.*->httpx>=0.24.1->gradio)\n",
324
+ " Downloading h11-0.14.0-py3-none-any.whl.metadata (8.2 kB)\n",
325
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from huggingface-hub>=0.19.3->gradio) (3.16.1)\n",
326
+ "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from huggingface-hub>=0.19.3->gradio) (2.32.3)\n",
327
+ "Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub>=0.19.3->gradio) (4.66.5)\n",
328
+ "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib~=3.0->gradio) (1.3.0)\n",
329
+ "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib~=3.0->gradio) (0.12.1)\n",
330
+ "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib~=3.0->gradio) (4.53.1)\n",
331
+ "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib~=3.0->gradio) (1.4.7)\n",
332
+ "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib~=3.0->gradio) (3.1.4)\n",
333
+ "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib~=3.0->gradio) (2.8.2)\n",
334
+ "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas<3.0,>=1.0->gradio) (2024.2)\n",
335
+ "Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas<3.0,>=1.0->gradio) (2024.1)\n",
336
+ "Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.10/dist-packages (from pydantic>=2.0->gradio) (0.7.0)\n",
337
+ "Requirement already satisfied: pydantic-core==2.23.4 in /usr/local/lib/python3.10/dist-packages (from pydantic>=2.0->gradio) (2.23.4)\n",
338
+ "Requirement already satisfied: click>=8.0.0 in /usr/local/lib/python3.10/dist-packages (from typer<1.0,>=0.12->gradio) (8.1.7)\n",
339
+ "Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.10/dist-packages (from typer<1.0,>=0.12->gradio) (1.5.4)\n",
340
+ "Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.10/dist-packages (from typer<1.0,>=0.12->gradio) (13.8.1)\n",
341
+ "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib~=3.0->gradio) (1.16.0)\n",
342
+ "Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio) (3.0.0)\n",
343
+ "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio) (2.18.0)\n",
344
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->huggingface-hub>=0.19.3->gradio) (3.3.2)\n",
345
+ "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio) (0.1.2)\n",
346
+ "Downloading gradio-4.44.1-py3-none-any.whl (18.1 MB)\n",
347
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m18.1/18.1 MB\u001b[0m \u001b[31m103.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
348
+ "\u001b[?25hDownloading gradio_client-1.3.0-py3-none-any.whl (318 kB)\n",
349
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m318.7/318.7 kB\u001b[0m \u001b[31m18.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
350
+ "\u001b[?25hDownloading tomlkit-0.12.0-py3-none-any.whl (37 kB)\n",
351
+ "Downloading aiofiles-23.2.1-py3-none-any.whl (15 kB)\n",
352
+ "Downloading fastapi-0.115.0-py3-none-any.whl (94 kB)\n",
353
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m94.6/94.6 kB\u001b[0m \u001b[31m6.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
354
+ "\u001b[?25hDownloading httpx-0.27.2-py3-none-any.whl (76 kB)\n",
355
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m76.4/76.4 kB\u001b[0m \u001b[31m4.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
356
+ "\u001b[?25hDownloading httpcore-1.0.5-py3-none-any.whl (77 kB)\n",
357
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.9/77.9 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
358
+ "\u001b[?25hDownloading orjson-3.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (141 kB)\n",
359
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m141.9/141.9 kB\u001b[0m \u001b[31m8.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
360
+ "\u001b[?25hDownloading python_multipart-0.0.12-py3-none-any.whl (23 kB)\n",
361
+ "Downloading ruff-0.6.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (10.9 MB)\n",
362
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10.9/10.9 MB\u001b[0m \u001b[31m127.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
363
+ "\u001b[?25hDownloading semantic_version-2.10.0-py2.py3-none-any.whl (15 kB)\n",
364
+ "Downloading uvicorn-0.31.0-py3-none-any.whl (63 kB)\n",
365
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m63.7/63.7 kB\u001b[0m \u001b[31m4.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
366
+ "\u001b[?25hDownloading ffmpy-0.4.0-py3-none-any.whl (5.8 kB)\n",
367
+ "Downloading pydub-0.25.1-py2.py3-none-any.whl (32 kB)\n",
368
+ "Downloading h11-0.14.0-py3-none-any.whl (58 kB)\n",
369
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m3.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
370
+ "\u001b[?25hDownloading starlette-0.38.6-py3-none-any.whl (71 kB)\n",
371
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m71.5/71.5 kB\u001b[0m \u001b[31m4.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
372
+ "\u001b[?25hDownloading websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (130 kB)\n",
373
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m130.2/130.2 kB\u001b[0m \u001b[31m9.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
374
+ "\u001b[?25hInstalling collected packages: pydub, websockets, tomlkit, semantic-version, ruff, python-multipart, orjson, h11, ffmpy, aiofiles, uvicorn, starlette, httpcore, httpx, fastapi, gradio-client, gradio\n",
375
+ "Successfully installed aiofiles-23.2.1 fastapi-0.115.0 ffmpy-0.4.0 gradio-4.44.1 gradio-client-1.3.0 h11-0.14.0 httpcore-1.0.5 httpx-0.27.2 orjson-3.10.7 pydub-0.25.1 python-multipart-0.0.12 ruff-0.6.8 semantic-version-2.10.0 starlette-0.38.6 tomlkit-0.12.0 uvicorn-0.31.0 websockets-12.0\n"
376
+ ]
377
+ }
378
+ ]
379
+ },
380
+ {
381
+ "cell_type": "code",
382
+ "source": [
383
+ "import json\n",
384
+ "import os\n",
385
+ "from transformers import AutoProcessor, AutoModelForVision2Seq\n",
386
+ "import torch\n",
387
+ "from PIL import Image\n",
388
+ "import gradio as gr\n",
389
+ "import subprocess\n",
390
+ "from llava.model.builder import load_pretrained_model\n",
391
+ "from llava.mm_utils import get_model_name_from_path\n",
392
+ "from llava.eval.run_llava import eval_model\n",
393
+ "\n",
394
+ "# Load the LLaVA model and processor\n",
395
+ "llava_model_path = \"/content/drive/MyDrive/Colab Notebooks/Medical_Project/My_new_LLaVA/llava-ftmodel\"\n",
396
+ "\n",
397
+ "# Load the LLaVA-Med model and processor\n",
398
+ "llava_med_model_path = \"/content/drive/MyDrive/Colab Notebooks/Medical_Project/My_new_LLaVA/llava-ftmodel\"\n",
399
+ "\n",
400
+ "# Args class to store arguments for LLaVA models\n",
401
+ "class Args:\n",
402
+ " def __init__(self, model_path, model_base, model_name, query, image_path, conv_mode, image_file, sep, temperature, top_p, num_beams, max_new_tokens):\n",
403
+ " self.model_path = model_path\n",
404
+ " self.model_base = model_base\n",
405
+ " self.model_name = model_name\n",
406
+ " self.query = query\n",
407
+ " self.image_path = image_path\n",
408
+ " self.conv_mode = conv_mode\n",
409
+ " self.image_file = image_file\n",
410
+ " self.sep = sep\n",
411
+ " self.temperature = temperature\n",
412
+ " self.top_p = top_p\n",
413
+ " self.num_beams = num_beams\n",
414
+ " self.max_new_tokens = max_new_tokens\n",
415
+ "\n",
416
+ "# Function to predict using Idefics2\n",
417
+ "# def predict_idefics2(image, question, temperature, max_tokens):\n",
418
+ "# image = image.convert(\"RGB\")\n",
419
+ "# images = [image]\n",
420
+ "\n",
421
+ "# messages = [\n",
422
+ "# {\n",
423
+ "# \"role\": \"user\",\n",
424
+ "# \"content\": [\n",
425
+ "# {\"type\": \"image\"},\n",
426
+ "# {\"type\": \"text\", \"text\": question}\n",
427
+ "# ]\n",
428
+ "# }\n",
429
+ "# ]\n",
430
+ "# input_text = idefics2_processor.apply_chat_template(messages, add_generation_prompt=False).strip()\n",
431
+ "\n",
432
+ "# inputs = idefics2_processor(text=[input_text], images=images, return_tensors=\"pt\", padding=True).to(\"cuda:0\")\n",
433
+ "\n",
434
+ "# with torch.no_grad():\n",
435
+ "# outputs = idefics2_model.generate(**inputs, max_length=max_tokens, max_new_tokens=max_tokens, temperature=temperature)\n",
436
+ "\n",
437
+ "# predictions = idefics2_processor.decode(outputs[0], skip_special_tokens=True)\n",
438
+ "\n",
439
+ "# return predictions\n",
440
+ "\n",
441
+ "# Function to predict using LLaVA\n",
442
+ "def predict_llava(image, question, temperature, max_tokens):\n",
443
+ " # Save the image temporarily\n",
444
+ " image.save(\"temp_image.jpg\")\n",
445
+ "\n",
446
+ " # Setup evaluation arguments\n",
447
+ " args = Args(\n",
448
+ " model_path=llava_model_path,\n",
449
+ " model_base=None,\n",
450
+ " model_name=get_model_name_from_path(llava_model_path),\n",
451
+ " query=question,\n",
452
+ " image_path=\"temp_image.jpg\",\n",
453
+ " conv_mode=None,\n",
454
+ " image_file=\"temp_image.jpg\",\n",
455
+ " sep=\",\",\n",
456
+ " temperature=temperature,\n",
457
+ " top_p=None,\n",
458
+ " num_beams=1,\n",
459
+ " max_new_tokens=max_tokens\n",
460
+ " )\n",
461
+ "\n",
462
+ " # Generate the answer using the selected model\n",
463
+ " output = eval_model(args)\n",
464
+ "\n",
465
+ " return output\n",
466
+ "\n",
467
+ "# Function to predict using LLaVA-Med\n",
468
+ "def predict_llava_med(image, question, temperature, max_tokens):\n",
469
+ " # Save the image temporarily\n",
470
+ " image_path = \"temp_image_med.jpg\"\n",
471
+ " image.save(image_path)\n",
472
+ "\n",
473
+ " # Command to run the LLaVA-Med model\n",
474
+ " command = [\n",
475
+ " \"python\", \"-m\", \"llava.eval.run_llava\",\n",
476
+ " \"--model-name\", llava_med_model_path,\n",
477
+ " \"--image-file\", image_path,\n",
478
+ " \"--query\", question,\n",
479
+ " \"--temperature\", str(temperature),\n",
480
+ " \"--max-new-tokens\", str(max_tokens)\n",
481
+ " ]\n",
482
+ "\n",
483
+ " # Execute the command and capture the output\n",
484
+ " result = subprocess.run(command, capture_output=True, text=True)\n",
485
+ "\n",
486
+ " return result.stdout.strip() # Return the output as text\n",
487
+ "\n",
488
+ "# Main prediction function\n",
489
+ "def predict(model_name, image, text, temperature, max_tokens):\n",
490
+ " if model_name == \"LLaVA\":\n",
491
+ " return predict_llava(image, text, temperature, max_tokens)\n",
492
+ " elif model_name == \"LLaVA\":\n",
493
+ " return predict_llava_med(image, text, temperature, max_tokens)\n",
494
+ "\n",
495
+ "# Define the Gradio interface\n",
496
+ "interface = gr.Interface(\n",
497
+ " fn=predict,\n",
498
+ " inputs=[\n",
499
+ " gr.Radio(choices=[\"LLaVA\", \"LLaVA_Med\"], label=\"Select Model\"),\n",
500
+ " gr.Image(type=\"pil\", label=\"Input Image\"),\n",
501
+ " gr.Textbox(label=\"Input Text\"),\n",
502
+ " gr.Slider(minimum=0.1, maximum=1.0, label=\"Temperature\"),\n",
503
+ " gr.Slider(minimum=1, maximum=512, label=\"Max Tokens\"),\n",
504
+ " ],\n",
505
+ " outputs=gr.Textbox(label=\"Output Text\"),\n",
506
+ " title=\"Multimodal LLM Interface\",\n",
507
+ " description=\"Switch between models and adjust parameters.\",\n",
508
+ ")\n",
509
+ "\n",
510
+ "# Launch the Gradio interface\n",
511
+ "interface.launch()\n"
512
+ ],
513
+ "metadata": {
514
+ "id": "pCJxQjryVRrh",
515
+ "colab": {
516
+ "base_uri": "https://localhost:8080/",
517
+ "height": 646
518
+ },
519
+ "outputId": "3f381d46-23d6-455f-8147-1491b52d443b"
520
+ },
521
+ "execution_count": 6,
522
+ "outputs": [
523
+ {
524
+ "output_type": "stream",
525
+ "name": "stdout",
526
+ "text": [
527
+ "Setting queue=True in a Colab notebook requires sharing enabled. Setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
528
+ "\n",
529
+ "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
530
+ "Running on public URL: https://4eea8c29f43ab04905.gradio.live\n",
531
+ "\n",
532
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
533
+ ]
534
+ },
535
+ {
536
+ "output_type": "display_data",
537
+ "data": {
538
+ "text/plain": [
539
+ "<IPython.core.display.HTML object>"
540
+ ],
541
+ "text/html": [
542
+ "<div><iframe src=\"https://4eea8c29f43ab04905.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
543
+ ]
544
+ },
545
+ "metadata": {}
546
+ },
547
+ {
548
+ "output_type": "execute_result",
549
+ "data": {
550
+ "text/plain": []
551
+ },
552
+ "metadata": {},
553
+ "execution_count": 6
554
+ }
555
+ ]
556
+ },
557
+ {
558
+ "cell_type": "code",
559
+ "source": [
560
+ "from google.colab import drive\n",
561
+ "drive.mount('/content/drive')"
562
+ ],
563
+ "metadata": {
564
+ "colab": {
565
+ "base_uri": "https://localhost:8080/"
566
+ },
567
+ "id": "sPHT9J5rX_oX",
568
+ "outputId": "8d60533a-b70d-45c0-a28e-9f655f7ecfc0"
569
+ },
570
+ "execution_count": null,
571
+ "outputs": [
572
+ {
573
+ "output_type": "stream",
574
+ "name": "stdout",
575
+ "text": [
576
+ "Mounted at /content/drive\n"
577
+ ]
578
+ }
579
+ ]
580
+ },
581
+ {
582
+ "cell_type": "code",
583
+ "source": [],
584
+ "metadata": {
585
+ "id": "YBSsgQNwVRto",
586
+ "colab": {
587
+ "base_uri": "https://localhost:8080/"
588
+ },
589
+ "outputId": "6ca48e1b-3d74-44d2-d5ac-10a9fee3004b"
590
+ },
591
+ "execution_count": 1,
592
+ "outputs": [
593
+ {
594
+ "output_type": "stream",
595
+ "name": "stdout",
596
+ "text": [
597
+ "/bin/bash: line 1: brew: command not found\n"
598
+ ]
599
+ }
600
+ ]
601
+ },
602
+ {
603
+ "cell_type": "code",
604
+ "source": [
605
+ "!pip install pyngrok"
606
+ ],
607
+ "metadata": {
608
+ "id": "UjB_xxubVRu7",
609
+ "colab": {
610
+ "base_uri": "https://localhost:8080/"
611
+ },
612
+ "outputId": "d9208333-b8b5-4e58-bc15-b7bca1f0a9e9"
613
+ },
614
+ "execution_count": 2,
615
+ "outputs": [
616
+ {
617
+ "output_type": "stream",
618
+ "name": "stdout",
619
+ "text": [
620
+ "Collecting pyngrok\n",
621
+ " Downloading pyngrok-7.2.0-py3-none-any.whl.metadata (7.4 kB)\n",
622
+ "Requirement already satisfied: PyYAML>=5.1 in /usr/local/lib/python3.10/dist-packages (from pyngrok) (6.0.2)\n",
623
+ "Downloading pyngrok-7.2.0-py3-none-any.whl (22 kB)\n",
624
+ "Installing collected packages: pyngrok\n",
625
+ "Successfully installed pyngrok-7.2.0\n"
626
+ ]
627
+ }
628
+ ]
629
+ },
630
+ {
631
+ "cell_type": "code",
632
+ "source": [
633
+ "!ngrok authtoken 2mpmC3TjnTE7KBulOBWhzy6K1vU_5PJRy3nmWHYCMyyRwUe87"
634
+ ],
635
+ "metadata": {
636
+ "colab": {
637
+ "base_uri": "https://localhost:8080/"
638
+ },
639
+ "id": "cR3mulN41B5a",
640
+ "outputId": "d2e3537b-24a0-48f2-8d20-560ca92852f4"
641
+ },
642
+ "execution_count": 3,
643
+ "outputs": [
644
+ {
645
+ "output_type": "stream",
646
+ "name": "stdout",
647
+ "text": [
648
+ "Authtoken saved to configuration file: /root/.config/ngrok/ngrok.yml\n"
649
+ ]
650
+ }
651
+ ]
652
+ },
653
+ {
654
+ "cell_type": "code",
655
+ "source": [],
656
+ "metadata": {
657
+ "id": "zJ8TVJh_1dv5"
658
+ },
659
+ "execution_count": null,
660
+ "outputs": []
661
+ },
662
+ {
663
+ "cell_type": "code",
664
+ "source": [
665
+ "import gradio as gr\n",
666
+ "from pyngrok import ngrok\n",
667
+ "\n",
668
+ "# Define your Gradio app (for example)\n",
669
+ "def my_model(input_text):\n",
670
+ " return \"Hello \" + input_text\n",
671
+ "\n",
672
+ "# Create Gradio Interface\n",
673
+ "iface = gr.Interface(fn=my_model, inputs=\"text\", outputs=\"text\")\n",
674
+ "\n",
675
+ "# Launch Gradio app without sharing\n",
676
+ "iface.launch(share=False)\n",
677
+ "\n",
678
+ "# Create a public URL using ngrok\n",
679
+ "public_url = ngrok.connect(7860)\n",
680
+ "print(f\"Public URL: {public_url}\")\n"
681
+ ],
682
+ "metadata": {
683
+ "colab": {
684
+ "base_uri": "https://localhost:8080/",
685
+ "height": 634
686
+ },
687
+ "id": "TfWWaO7o1YVt",
688
+ "outputId": "7f25c22c-deed-4aa9-f1cc-75ee299592a4"
689
+ },
690
+ "execution_count": 6,
691
+ "outputs": [
692
+ {
693
+ "output_type": "stream",
694
+ "name": "stdout",
695
+ "text": [
696
+ "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
697
+ "Note: opening Chrome Inspector may crash demo inside Colab notebooks.\n",
698
+ "\n",
699
+ "To create a public link, set `share=True` in `launch()`.\n"
700
+ ]
701
+ },
702
+ {
703
+ "output_type": "display_data",
704
+ "data": {
705
+ "text/plain": [
706
+ "<IPython.core.display.Javascript object>"
707
+ ],
708
+ "application/javascript": [
709
+ "(async (port, path, width, height, cache, element) => {\n",
710
+ " if (!google.colab.kernel.accessAllowed && !cache) {\n",
711
+ " return;\n",
712
+ " }\n",
713
+ " element.appendChild(document.createTextNode(''));\n",
714
+ " const url = await google.colab.kernel.proxyPort(port, {cache});\n",
715
+ "\n",
716
+ " const external_link = document.createElement('div');\n",
717
+ " external_link.innerHTML = `\n",
718
+ " <div style=\"font-family: monospace; margin-bottom: 0.5rem\">\n",
719
+ " Running on <a href=${new URL(path, url).toString()} target=\"_blank\">\n",
720
+ " https://localhost:${port}${path}\n",
721
+ " </a>\n",
722
+ " </div>\n",
723
+ " `;\n",
724
+ " element.appendChild(external_link);\n",
725
+ "\n",
726
+ " const iframe = document.createElement('iframe');\n",
727
+ " iframe.src = new URL(path, url).toString();\n",
728
+ " iframe.height = height;\n",
729
+ " iframe.allow = \"autoplay; camera; microphone; clipboard-read; clipboard-write;\"\n",
730
+ " iframe.width = width;\n",
731
+ " iframe.style.border = 0;\n",
732
+ " element.appendChild(iframe);\n",
733
+ " })(7860, \"/\", \"100%\", 500, false, window.element)"
734
+ ]
735
+ },
736
+ "metadata": {}
737
+ },
738
+ {
739
+ "output_type": "stream",
740
+ "name": "stdout",
741
+ "text": [
742
+ "Public URL: NgrokTunnel: \"https://356b-34-141-162-119.ngrok-free.app\" -> \"http://localhost:7860\"\n"
743
+ ]
744
+ }
745
+ ]
746
+ },
747
+ {
748
+ "cell_type": "code",
749
+ "source": [],
750
+ "metadata": {
751
+ "id": "l8g-3Cpk1cWp"
752
+ },
753
+ "execution_count": null,
754
+ "outputs": []
755
+ },
756
+ {
757
+ "cell_type": "code",
758
+ "source": [],
759
+ "metadata": {
760
+ "id": "PFSD8Btm2Lh-"
761
+ },
762
+ "execution_count": null,
763
+ "outputs": []
764
+ },
765
+ {
766
+ "cell_type": "code",
767
+ "source": [],
768
+ "metadata": {
769
+ "id": "V3374UGQ2LkK"
770
+ },
771
+ "execution_count": null,
772
+ "outputs": []
773
+ },
774
+ {
775
+ "cell_type": "code",
776
+ "source": [
777
+ "# Install Git LFS if you haven't already\n",
778
+ "!apt-get install git-lfs\n",
779
+ "\n",
780
+ "# Initialize Git LFS in your repository\n",
781
+ "!git lfs install\n"
782
+ ],
783
+ "metadata": {
784
+ "colab": {
785
+ "base_uri": "https://localhost:8080/"
786
+ },
787
+ "id": "PIivpoyonz4c",
788
+ "outputId": "126add67-05e3-4f91-c1d5-d93cae19c56e"
789
+ },
790
+ "execution_count": 1,
791
+ "outputs": [
792
+ {
793
+ "output_type": "stream",
794
+ "name": "stdout",
795
+ "text": [
796
+ "Reading package lists... Done\n",
797
+ "Building dependency tree... Done\n",
798
+ "Reading state information... Done\n",
799
+ "git-lfs is already the newest version (3.0.2-1ubuntu0.2).\n",
800
+ "0 upgraded, 0 newly installed, 0 to remove and 49 not upgraded.\n",
801
+ "Git LFS initialized.\n"
802
+ ]
803
+ }
804
+ ]
805
+ },
806
+ {
807
+ "cell_type": "code",
808
+ "source": [
809
+ "# Clone the new Hugging Face Space repository (replace with your new space URL)\n",
810
+ "!git clone https://huggingface.co/spaces/Varu96/healthcare_project\n"
811
+ ],
812
+ "metadata": {
813
+ "colab": {
814
+ "base_uri": "https://localhost:8080/"
815
+ },
816
+ "id": "7sLbt8ranz6_",
817
+ "outputId": "50df417c-c0fc-452e-c134-00763dc83988"
818
+ },
819
+ "execution_count": 2,
820
+ "outputs": [
821
+ {
822
+ "output_type": "stream",
823
+ "name": "stdout",
824
+ "text": [
825
+ "Cloning into 'healthcare_project'...\n",
826
+ "remote: Enumerating objects: 4, done.\u001b[K\n",
827
+ "remote: Total 4 (delta 0), reused 0 (delta 0), pack-reused 4 (from 1)\u001b[K\n",
828
+ "Unpacking objects: 100% (4/4), 1.28 KiB | 1.28 MiB/s, done.\n"
829
+ ]
830
+ }
831
+ ]
832
+ },
833
+ {
834
+ "cell_type": "code",
835
+ "source": [
836
+ "%cd healthcare_project"
837
+ ],
838
+ "metadata": {
839
+ "colab": {
840
+ "base_uri": "https://localhost:8080/"
841
+ },
842
+ "id": "Hnuh3iyZnz9F",
843
+ "outputId": "ee7fe20f-5576-4a50-fd30-2ff649441ea6"
844
+ },
845
+ "execution_count": 3,
846
+ "outputs": [
847
+ {
848
+ "output_type": "stream",
849
+ "name": "stdout",
850
+ "text": [
851
+ "/content/healthcare_project\n"
852
+ ]
853
+ }
854
+ ]
855
+ },
856
+ {
857
+ "cell_type": "code",
858
+ "source": [
859
+ "# Copy the My_new_LLaVA folder to your Hugging Face repository directory\n",
860
+ "# Correct command to copy the My_new_LLaVA folder to your local directory\n",
861
+ "!cp -r \"/content/drive/MyDrive/Colab Notebooks/Medical_Project/My_new_LLaVA\" /content/healthcare_project\n"
862
+ ],
863
+ "metadata": {
864
+ "id": "JCAEMTAGnz_a"
865
+ },
866
+ "execution_count": 6,
867
+ "outputs": []
868
+ },
869
+ {
870
+ "cell_type": "code",
871
+ "source": [
872
+ "# Track specific binary files with Git LFS\n",
873
+ "!git lfs track \"*.safetensors\" \"*.gif\"\n"
874
+ ],
875
+ "metadata": {
876
+ "colab": {
877
+ "base_uri": "https://localhost:8080/"
878
+ },
879
+ "id": "WLhEZECAobXG",
880
+ "outputId": "96ee4c0f-3148-4aed-a0ab-bfcbc7af8831"
881
+ },
882
+ "execution_count": 7,
883
+ "outputs": [
884
+ {
885
+ "output_type": "stream",
886
+ "name": "stdout",
887
+ "text": [
888
+ "\"*.safetensors\" already supported\n",
889
+ "Tracking \"*.gif\"\n"
890
+ ]
891
+ }
892
+ ]
893
+ },
894
+ {
895
+ "cell_type": "code",
896
+ "source": [
897
+ "# Stage all files for commit\n",
898
+ "!git add .\n"
899
+ ],
900
+ "metadata": {
901
+ "colab": {
902
+ "base_uri": "https://localhost:8080/"
903
+ },
904
+ "id": "f3iktJ0wtJAu",
905
+ "outputId": "3d6b4020-3863-4845-d2ac-05fb2b8aac3e"
906
+ },
907
+ "execution_count": 8,
908
+ "outputs": [
909
+ {
910
+ "output_type": "stream",
911
+ "name": "stdout",
912
+ "text": [
913
+ "warning: adding embedded git repository: My_new_LLaVA/LLaVA\n",
914
+ "\u001b[33mhint: You've added another git repository inside your current repository.\u001b[m\n",
915
+ "\u001b[33mhint: Clones of the outer repository will not contain the contents of\u001b[m\n",
916
+ "\u001b[33mhint: the embedded repository and will not know how to obtain it.\u001b[m\n",
917
+ "\u001b[33mhint: If you meant to add a submodule, use:\u001b[m\n",
918
+ "\u001b[33mhint: \u001b[m\n",
919
+ "\u001b[33mhint: \tgit submodule add <url> My_new_LLaVA/LLaVA\u001b[m\n",
920
+ "\u001b[33mhint: \u001b[m\n",
921
+ "\u001b[33mhint: If you added this path by mistake, you can remove it from the\u001b[m\n",
922
+ "\u001b[33mhint: index with:\u001b[m\n",
923
+ "\u001b[33mhint: \u001b[m\n",
924
+ "\u001b[33mhint: \tgit rm --cached My_new_LLaVA/LLaVA\u001b[m\n",
925
+ "\u001b[33mhint: \u001b[m\n",
926
+ "\u001b[33mhint: See \"git help submodule\" for more information.\u001b[m\n"
927
+ ]
928
+ }
929
+ ]
930
+ },
931
+ {
932
+ "cell_type": "code",
933
+ "source": [
934
+ "!git status\n"
935
+ ],
936
+ "metadata": {
937
+ "colab": {
938
+ "base_uri": "https://localhost:8080/"
939
+ },
940
+ "id": "r69auF2StMOC",
941
+ "outputId": "5330400b-9067-4cd6-d23b-7e30f275e5f0"
942
+ },
943
+ "execution_count": 9,
944
+ "outputs": [
945
+ {
946
+ "output_type": "stream",
947
+ "name": "stdout",
948
+ "text": [
949
+ "On branch main\n",
950
+ "Your branch is up to date with 'origin/main'.\n",
951
+ "\n",
952
+ "Changes to be committed:\n",
953
+ " (use \"git restore --staged <file>...\" to unstage)\n",
954
+ "\t\u001b[32mmodified: .gitattributes\u001b[m\n",
955
+ "\t\u001b[32mnew file: My_new_LLaVA/.DS_Store\u001b[m\n",
956
+ "\t\u001b[32mnew file: My_new_LLaVA/Copy of llava-1.5-fine-tune-gpu-a100.ipynb\u001b[m\n",
957
+ "\t\u001b[32mnew file: My_new_LLaVA/LLaVA\u001b[m\n",
958
+ "\t\u001b[32mnew file: My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/README.md\u001b[m\n",
959
+ "\t\u001b[32mnew file: My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/adapter_config.json\u001b[m\n",
960
+ "\t\u001b[32mnew file: My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/adapter_model.safetensors\u001b[m\n",
961
+ "\t\u001b[32mnew file: My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/config.json\u001b[m\n",
962
+ "\t\u001b[32mnew file: My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/non_lora_trainables.bin\u001b[m\n",
963
+ "\t\u001b[32mnew file: My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/runs/Aug18_06-09-01_decfd3532853/events.out.tfevents.1723961423.decfd3532853.7960.0\u001b[m\n",
964
+ "\t\u001b[32mnew file: My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/trainer_state.json\u001b[m\n",
965
+ "\t\u001b[32mnew file: My_new_LLaVA/llava-ftmodel/config.json\u001b[m\n",
966
+ "\t\u001b[32mnew file: My_new_LLaVA/llava-ftmodel/generation_config.json\u001b[m\n",
967
+ "\t\u001b[32mnew file: My_new_LLaVA/llava-ftmodel/model.safetensors.index.json\u001b[m\n",
968
+ "\t\u001b[32mnew file: My_new_LLaVA/llava-ftmodel/special_tokens_map.json\u001b[m\n",
969
+ "\t\u001b[32mnew file: My_new_LLaVA/llava-ftmodel/tokenizer.model\u001b[m\n",
970
+ "\t\u001b[32mnew file: My_new_LLaVA/llava-ftmodel/tokenizer_config.json\u001b[m\n",
971
+ "\t\u001b[32mnew file: My_new_LLaVA/wandb/debug-internal.log\u001b[m\n",
972
+ "\t\u001b[32mnew file: My_new_LLaVA/wandb/debug.log\u001b[m\n",
973
+ "\t\u001b[32mnew file: My_new_LLaVA/wandb/latest-run\u001b[m\n",
974
+ "\t\u001b[32mnew file: My_new_LLaVA/wandb/offline-run-20240818_061029-xy8wzk0g/files/wandb-metadata.json\u001b[m\n",
975
+ "\t\u001b[32mnew file: My_new_LLaVA/wandb/offline-run-20240818_061029-xy8wzk0g/files/wandb-summary.json\u001b[m\n",
976
+ "\t\u001b[32mnew file: My_new_LLaVA/wandb/offline-run-20240818_061029-xy8wzk0g/logs/debug-internal.log\u001b[m\n",
977
+ "\t\u001b[32mnew file: My_new_LLaVA/wandb/offline-run-20240818_061029-xy8wzk0g/logs/debug.log\u001b[m\n",
978
+ "\t\u001b[32mnew file: My_new_LLaVA/wandb/offline-run-20240818_061029-xy8wzk0g/run-xy8wzk0g.wandb\u001b[m\n",
979
+ "\n",
980
+ "Changes not staged for commit:\n",
981
+ " (use \"git add <file>...\" to update what will be committed)\n",
982
+ " (use \"git restore <file>...\" to discard changes in working directory)\n",
983
+ " (commit or discard the untracked or modified content in submodules)\n",
984
+ "\t\u001b[31mmodified: My_new_LLaVA/LLaVA\u001b[m (modified content, untracked content)\n",
985
+ "\n"
986
+ ]
987
+ }
988
+ ]
989
+ },
990
+ {
991
+ "cell_type": "code",
992
+ "source": [
993
+ "!git commit -m \"Initial commit for new Hugging Face Space with all project files\"\n"
994
+ ],
995
+ "metadata": {
996
+ "colab": {
997
+ "base_uri": "https://localhost:8080/"
998
+ },
999
+ "id": "aHvb-pEGtPgr",
1000
+ "outputId": "71c0beed-afe0-4d8d-c45a-045113af6147"
1001
+ },
1002
+ "execution_count": 12,
1003
+ "outputs": [
1004
+ {
1005
+ "output_type": "stream",
1006
+ "name": "stdout",
1007
+ "text": [
1008
+ "[main cd5e78d] Initial commit for new Hugging Face Space with all project files\n",
1009
+ " 25 files changed, 1778 insertions(+)\n",
1010
+ " create mode 100644 My_new_LLaVA/.DS_Store\n",
1011
+ " create mode 100644 My_new_LLaVA/Copy of llava-1.5-fine-tune-gpu-a100.ipynb\n",
1012
+ " create mode 160000 My_new_LLaVA/LLaVA\n",
1013
+ " create mode 100644 My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/README.md\n",
1014
+ " create mode 100644 My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/adapter_config.json\n",
1015
+ " create mode 100644 My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/adapter_model.safetensors\n",
1016
+ " create mode 100644 My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/config.json\n",
1017
+ " create mode 100644 My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/non_lora_trainables.bin\n",
1018
+ " create mode 100644 My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/runs/Aug18_06-09-01_decfd3532853/events.out.tfevents.1723961423.decfd3532853.7960.0\n",
1019
+ " create mode 100644 My_new_LLaVA/checkpoints/llava-v1.5-7b-task-lora/trainer_state.json\n",
1020
+ " create mode 100644 My_new_LLaVA/llava-ftmodel/config.json\n",
1021
+ " create mode 100644 My_new_LLaVA/llava-ftmodel/generation_config.json\n",
1022
+ " create mode 100644 My_new_LLaVA/llava-ftmodel/model.safetensors.index.json\n",
1023
+ " create mode 100644 My_new_LLaVA/llava-ftmodel/special_tokens_map.json\n",
1024
+ " create mode 100644 My_new_LLaVA/llava-ftmodel/tokenizer.model\n",
1025
+ " create mode 100644 My_new_LLaVA/llava-ftmodel/tokenizer_config.json\n",
1026
+ " create mode 100644 My_new_LLaVA/wandb/debug-internal.log\n",
1027
+ " create mode 100644 My_new_LLaVA/wandb/debug.log\n",
1028
+ " create mode 100644 My_new_LLaVA/wandb/latest-run\n",
1029
+ " create mode 100644 My_new_LLaVA/wandb/offline-run-20240818_061029-xy8wzk0g/files/wandb-metadata.json\n",
1030
+ " create mode 100644 My_new_LLaVA/wandb/offline-run-20240818_061029-xy8wzk0g/files/wandb-summary.json\n",
1031
+ " create mode 100644 My_new_LLaVA/wandb/offline-run-20240818_061029-xy8wzk0g/logs/debug-internal.log\n",
1032
+ " create mode 100644 My_new_LLaVA/wandb/offline-run-20240818_061029-xy8wzk0g/logs/debug.log\n",
1033
+ " create mode 100644 My_new_LLaVA/wandb/offline-run-20240818_061029-xy8wzk0g/run-xy8wzk0g.wandb\n"
1034
+ ]
1035
+ }
1036
+ ]
1037
+ },
1038
+ {
1039
+ "cell_type": "code",
1040
+ "source": [
1041
+ "!git config --global user.email \"[email protected]\"\n",
1042
+ "!git config --global user.name \"Varu96\""
1043
+ ],
1044
+ "metadata": {
1045
+ "id": "lgKdINcwtX4T"
1046
+ },
1047
+ "execution_count": 11,
1048
+ "outputs": []
1049
+ },
1050
+ {
1051
+ "cell_type": "code",
1052
+ "source": [
1053
+ "!git remote set-url origin https://Varu96:[email protected]/spaces/Varu96/healthcare_project"
1054
+ ],
1055
+ "metadata": {
1056
+ "id": "qHbfFKNntw05"
1057
+ },
1058
+ "execution_count": 14,
1059
+ "outputs": []
1060
+ },
1061
+ {
1062
+ "cell_type": "code",
1063
+ "source": [
1064
+ "!git push origin main\n"
1065
+ ],
1066
+ "metadata": {
1067
+ "colab": {
1068
+ "base_uri": "https://localhost:8080/"
1069
+ },
1070
+ "id": "oAo_ByZptgLa",
1071
+ "outputId": "b6395d97-c705-4686-d23a-1f671bc13337"
1072
+ },
1073
+ "execution_count": 15,
1074
+ "outputs": [
1075
+ {
1076
+ "output_type": "stream",
1077
+ "name": "stdout",
1078
+ "text": [
1079
+ "Uploading LFS objects: 100% (4/4), 682 MB | 96 MB/s, done.\n",
1080
+ "Enumerating objects: 37, done.\n",
1081
+ "Counting objects: 100% (37/37), done.\n",
1082
+ "Delta compression using up to 8 threads\n",
1083
+ "Compressing objects: 100% (33/33), done.\n",
1084
+ "Writing objects: 100% (35/35), 82.40 KiB | 5.49 MiB/s, done.\n",
1085
+ "Total 35 (delta 1), reused 0 (delta 0), pack-reused 0\n",
1086
+ "To https://huggingface.co/spaces/Varu96/healthcare_project\n",
1087
+ " 1a43077..cd5e78d main -> main\n"
1088
+ ]
1089
+ }
1090
+ ]
1091
+ },
1092
+ {
1093
+ "cell_type": "code",
1094
+ "source": [
1095
+ "cd ../../"
1096
+ ],
1097
+ "metadata": {
1098
+ "colab": {
1099
+ "base_uri": "https://localhost:8080/"
1100
+ },
1101
+ "id": "2WQ4ysZ-tmAc",
1102
+ "outputId": "df164b98-8e6c-410c-a806-71bf10d138f9"
1103
+ },
1104
+ "execution_count": 19,
1105
+ "outputs": [
1106
+ {
1107
+ "output_type": "stream",
1108
+ "name": "stdout",
1109
+ "text": [
1110
+ "/content/healthcare_project\n"
1111
+ ]
1112
+ }
1113
+ ]
1114
+ },
1115
+ {
1116
+ "cell_type": "code",
1117
+ "source": [
1118
+ "%cd /content/healthcare_project/My_new_LLaVA/LLaVA"
1119
+ ],
1120
+ "metadata": {
1121
+ "colab": {
1122
+ "base_uri": "https://localhost:8080/"
1123
+ },
1124
+ "id": "uKPm5Y3quXb3",
1125
+ "outputId": "b09ef2c3-ec00-404d-c2db-071ff3e63b3c"
1126
+ },
1127
+ "execution_count": 17,
1128
+ "outputs": [
1129
+ {
1130
+ "output_type": "stream",
1131
+ "name": "stdout",
1132
+ "text": [
1133
+ "/content/healthcare_project/My_new_LLaVA/LLaVA\n"
1134
+ ]
1135
+ }
1136
+ ]
1137
+ },
1138
+ {
1139
+ "cell_type": "code",
1140
+ "source": [
1141
+ "# Add the entire LLaVA folder from My_new_LLaVA\n",
1142
+ "!git add /content/healthcare_project/My_new_LLaVA/LLaVA\n"
1143
+ ],
1144
+ "metadata": {
1145
+ "id": "86pV4lQSub3d"
1146
+ },
1147
+ "execution_count": 20,
1148
+ "outputs": []
1149
+ },
1150
+ {
1151
+ "cell_type": "code",
1152
+ "source": [
1153
+ "# Check the status to see if the files are staged\n",
1154
+ "!git status\n"
1155
+ ],
1156
+ "metadata": {
1157
+ "colab": {
1158
+ "base_uri": "https://localhost:8080/"
1159
+ },
1160
+ "id": "1bFPhtK4veji",
1161
+ "outputId": "85291650-d5b4-4c8b-ecd1-c9bfcbb32e78"
1162
+ },
1163
+ "execution_count": 21,
1164
+ "outputs": [
1165
+ {
1166
+ "output_type": "stream",
1167
+ "name": "stdout",
1168
+ "text": [
1169
+ "On branch main\n",
1170
+ "Your branch is up to date with 'origin/main'.\n",
1171
+ "\n",
1172
+ "Changes not staged for commit:\n",
1173
+ " (use \"git add <file>...\" to update what will be committed)\n",
1174
+ " (use \"git restore <file>...\" to discard changes in working directory)\n",
1175
+ " (commit or discard the untracked or modified content in submodules)\n",
1176
+ "\t\u001b[31mmodified: My_new_LLaVA/LLaVA\u001b[m (modified content, untracked content)\n",
1177
+ "\n",
1178
+ "no changes added to commit (use \"git add\" and/or \"git commit -a\")\n"
1179
+ ]
1180
+ }
1181
+ ]
1182
+ },
1183
+ {
1184
+ "cell_type": "code",
1185
+ "source": [
1186
+ "# Add all files recursively in the LLaVA folder\n",
1187
+ "!git add -A My_new_LLaVA/LLaVA/\n"
1188
+ ],
1189
+ "metadata": {
1190
+ "id": "EaMzQ2O3vgQf"
1191
+ },
1192
+ "execution_count": 23,
1193
+ "outputs": []
1194
+ },
1195
+ {
1196
+ "cell_type": "code",
1197
+ "source": [
1198
+ "# 1. Remove the submodule reference\n",
1199
+ "!git rm --cached My_new_LLaVA/LLaVA\n",
1200
+ "\n",
1201
+ "# 2. Remove submodule entry from .gitmodules\n",
1202
+ "!sed -i '/My_new_LLaVA\\/LLaVA/d' .gitmodules\n",
1203
+ "\n",
1204
+ "# 3. Remove the submodule's .git directory (if it exists)\n",
1205
+ "!rm -rf My_new_LLaVA/LLaVA/.git\n",
1206
+ "\n",
1207
+ "# 4. Add the LLaVA folder as a regular folder\n",
1208
+ "!git add My_new_LLaVA/LLaVA\n",
1209
+ "\n",
1210
+ "# 5. Commit the changes\n",
1211
+ "!git commit -m \"Removed submodule and added LLaVA as a regular folder\"\n",
1212
+ "\n",
1213
+ "# 6. Push the changes\n",
1214
+ "!git push origin main\n"
1215
+ ],
1216
+ "metadata": {
1217
+ "colab": {
1218
+ "base_uri": "https://localhost:8080/"
1219
+ },
1220
+ "id": "r1vnQunYw6r1",
1221
+ "outputId": "266c25a6-9139-4eb6-c348-74d59cfc0cdc"
1222
+ },
1223
+ "execution_count": 36,
1224
+ "outputs": [
1225
+ {
1226
+ "output_type": "stream",
1227
+ "name": "stdout",
1228
+ "text": [
1229
+ "rm 'My_new_LLaVA/LLaVA'\n",
1230
+ "sed: can't read .gitmodules: No such file or directory\n",
1231
+ "[main 6a6ea70] Removed submodule and added LLaVA as a regular folder\n",
1232
+ " 154 files changed, 11488 insertions(+), 1 deletion(-)\n",
1233
+ " delete mode 160000 My_new_LLaVA/LLaVA\n",
1234
+ " create mode 100644 My_new_LLaVA/LLaVA/.devcontainer/Dockerfile\n",
1235
+ " create mode 100644 My_new_LLaVA/LLaVA/.devcontainer/devcontainer.env\n",
1236
+ " create mode 100644 My_new_LLaVA/LLaVA/.devcontainer/devcontainer.json\n",
1237
+ " create mode 100644 My_new_LLaVA/LLaVA/.devcontainer/postCreateCommand.sh\n",
1238
+ " create mode 100644 My_new_LLaVA/LLaVA/.dockerignore\n",
1239
+ " create mode 100644 My_new_LLaVA/LLaVA/.editorconfig\n",
1240
+ " create mode 100644 My_new_LLaVA/LLaVA/.gitattributes\n",
1241
+ " create mode 100644 My_new_LLaVA/LLaVA/.github/ISSUE_TEMPLATE/1-usage.yaml\n",
1242
+ " create mode 100644 My_new_LLaVA/LLaVA/.github/ISSUE_TEMPLATE/2-feature-request.yaml\n",
1243
+ " create mode 100644 My_new_LLaVA/LLaVA/.github/ISSUE_TEMPLATE/3-question.yaml\n",
1244
+ " create mode 100644 My_new_LLaVA/LLaVA/.github/ISSUE_TEMPLATE/4-discussion.yaml\n",
1245
+ " create mode 100644 My_new_LLaVA/LLaVA/.gitignore\n",
1246
+ " create mode 100644 My_new_LLaVA/LLaVA/LICENSE\n",
1247
+ " create mode 100644 My_new_LLaVA/LLaVA/README.md\n",
1248
+ " create mode 100644 My_new_LLaVA/LLaVA/cog.yaml\n",
1249
+ " create mode 100644 My_new_LLaVA/LLaVA/docs/Customize_Component.md\n",
1250
+ " create mode 100644 My_new_LLaVA/LLaVA/docs/Data.md\n",
1251
+ " create mode 100644 My_new_LLaVA/LLaVA/docs/Evaluation.md\n",
1252
+ " create mode 100644 My_new_LLaVA/LLaVA/docs/Finetune_Custom_Data.md\n",
1253
+ " create mode 100644 My_new_LLaVA/LLaVA/docs/Intel.md\n",
1254
+ " create mode 100644 My_new_LLaVA/LLaVA/docs/LLaVA_Bench.md\n",
1255
+ " create mode 100644 My_new_LLaVA/LLaVA/docs/LLaVA_from_LLaMA2.md\n",
1256
+ " create mode 100644 My_new_LLaVA/LLaVA/docs/LoRA.md\n",
1257
+ " create mode 100644 My_new_LLaVA/LLaVA/docs/MODEL_ZOO.md\n",
1258
+ " create mode 100644 My_new_LLaVA/LLaVA/docs/ScienceQA.md\n",
1259
+ " create mode 100644 My_new_LLaVA/LLaVA/docs/Windows.md\n",
1260
+ " create mode 100644 My_new_LLaVA/LLaVA/docs/macOS.md\n",
1261
+ " create mode 100644 My_new_LLaVA/LLaVA/images/demo_cli.gif\n",
1262
+ " create mode 100644 My_new_LLaVA/LLaVA/images/llava_example_cmp.png\n",
1263
+ " create mode 100644 My_new_LLaVA/LLaVA/images/llava_logo.png\n",
1264
+ " create mode 100644 My_new_LLaVA/LLaVA/images/llava_v1_5_radar.jpg\n",
1265
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/__init__.py\n",
1266
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/constants.py\n",
1267
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/conversation.py\n",
1268
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/eval_gpt_review.py\n",
1269
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/eval_gpt_review_bench.py\n",
1270
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/eval_gpt_review_visual.py\n",
1271
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/eval_pope.py\n",
1272
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/eval_science_qa.py\n",
1273
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/eval_science_qa_gpt4.py\n",
1274
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/eval_science_qa_gpt4_requery.py\n",
1275
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/eval_textvqa.py\n",
1276
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/generate_webpage_data_from_table.py\n",
1277
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/m4c_evaluator.py\n",
1278
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/model_qa.py\n",
1279
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/model_vqa.py\n",
1280
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/model_vqa_loader.py\n",
1281
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/model_vqa_mmbench.py\n",
1282
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/model_vqa_science.py\n",
1283
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/qa_baseline_gpt35.py\n",
1284
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/run_llava.py\n",
1285
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/summarize_gpt_review.py\n",
1286
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/webpage/figures/alpaca.png\n",
1287
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/webpage/figures/bard.jpg\n",
1288
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/webpage/figures/chatgpt.svg\n",
1289
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/webpage/figures/llama.jpg\n",
1290
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/webpage/figures/swords_FILL0_wght300_GRAD0_opsz48.svg\n",
1291
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/webpage/figures/vicuna.jpeg\n",
1292
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/webpage/index.html\n",
1293
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/webpage/script.js\n",
1294
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/eval/webpage/styles.css\n",
1295
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/mm_utils.py\n",
1296
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/__init__.py\n",
1297
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/apply_delta.py\n",
1298
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/builder.py\n",
1299
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/consolidate.py\n",
1300
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/language_model/llava_llama.py\n",
1301
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/language_model/llava_mistral.py\n",
1302
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/language_model/llava_mpt.py\n",
1303
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/llava_arch.py\n",
1304
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/make_delta.py\n",
1305
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/multimodal_encoder/builder.py\n",
1306
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/multimodal_encoder/clip_encoder.py\n",
1307
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/multimodal_projector/builder.py\n",
1308
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/model/utils.py\n",
1309
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/serve/__init__.py\n",
1310
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/serve/cli.py\n",
1311
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/serve/controller.py\n",
1312
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/serve/examples/extreme_ironing.jpg\n",
1313
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/serve/examples/waterview.jpg\n",
1314
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/serve/gradio_web_server.py\n",
1315
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/serve/model_worker.py\n",
1316
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/serve/register_worker.py\n",
1317
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/serve/sglang_worker.py\n",
1318
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/serve/test_message.py\n",
1319
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/train/llama_flash_attn_monkey_patch.py\n",
1320
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/train/llama_xformers_attn_monkey_patch.py\n",
1321
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/train/llava_trainer.py\n",
1322
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/train/train.py\n",
1323
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/train/train_mem.py\n",
1324
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/train/train_xformers.py\n",
1325
+ " create mode 100644 My_new_LLaVA/LLaVA/llava/utils.py\n",
1326
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/complex_reasoning/000_caps.txt\n",
1327
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/complex_reasoning/000_conv.txt\n",
1328
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/complex_reasoning/001_caps.txt\n",
1329
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/complex_reasoning/001_conv.txt\n",
1330
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/complex_reasoning/002_caps.txt\n",
1331
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/complex_reasoning/002_conv.txt\n",
1332
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/complex_reasoning/system_message.txt\n",
1333
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/conversation/000_caps.txt\n",
1334
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/conversation/000_conv.txt\n",
1335
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/conversation/001_caps.txt\n",
1336
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/conversation/001_conv.txt\n",
1337
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/conversation/system_message.txt\n",
1338
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/detail_description/000_caps.txt\n",
1339
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/detail_description/000_conv.txt\n",
1340
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/detail_description/001_caps.txt\n",
1341
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/detail_description/001_conv.txt\n",
1342
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/detail_description/002_caps.txt\n",
1343
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/detail_description/002_conv.txt\n",
1344
+ " create mode 100644 My_new_LLaVA/LLaVA/playground/data/prompts/detail_description/system_message.txt\n",
1345
+ " create mode 100644 My_new_LLaVA/LLaVA/predict.py\n",
1346
+ " create mode 100644 My_new_LLaVA/LLaVA/pyproject.toml\n",
1347
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/convert_gqa_for_eval.py\n",
1348
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/convert_mmbench_for_submission.py\n",
1349
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/convert_mmvet_for_eval.py\n",
1350
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/convert_seed_for_submission.py\n",
1351
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/convert_sqa_to_llava.py\n",
1352
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/convert_sqa_to_llava_base_prompt.py\n",
1353
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/convert_vizwiz_for_submission.py\n",
1354
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/convert_vqav2_for_submission.py\n",
1355
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/extract_mm_projector.py\n",
1356
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/finetune.sh\n",
1357
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/finetune_full_schedule.sh\n",
1358
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/finetune_lora.sh\n",
1359
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/finetune_qlora.sh\n",
1360
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/finetune_sqa.sh\n",
1361
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/merge_lora_weights.py\n",
1362
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/pretrain.sh\n",
1363
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/pretrain_xformers.sh\n",
1364
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/sqa_eval_batch.sh\n",
1365
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/sqa_eval_gather.sh\n",
1366
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/upload_pypi.sh\n",
1367
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/gqa.sh\n",
1368
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/llavabench.sh\n",
1369
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/mmbench.sh\n",
1370
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/mmbench_cn.sh\n",
1371
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/mme.sh\n",
1372
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/mmvet.sh\n",
1373
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/pope.sh\n",
1374
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/qbench.sh\n",
1375
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/qbench_zh.sh\n",
1376
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/seed.sh\n",
1377
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/sqa.sh\n",
1378
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/textvqa.sh\n",
1379
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/vizwiz.sh\n",
1380
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/eval/vqav2.sh\n",
1381
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/finetune.sh\n",
1382
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/finetune_lora.sh\n",
1383
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/finetune_task.sh\n",
1384
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/finetune_task_lora.sh\n",
1385
+ " create mode 100644 My_new_LLaVA/LLaVA/scripts/v1_5/pretrain.sh\n",
1386
+ " create mode 100644 My_new_LLaVA/LLaVA/temp_image.jpg\n",
1387
+ "Uploading LFS objects: 100% (1/1), 10 MB | 0 B/s, done.\n",
1388
+ "Enumerating objects: 183, done.\n",
1389
+ "Counting objects: 100% (183/183), done.\n",
1390
+ "Delta compression using up to 8 threads\n",
1391
+ "Compressing objects: 100% (175/175), done.\n",
1392
+ "Writing objects: 100% (181/181), 1.17 MiB | 2.92 MiB/s, done.\n",
1393
+ "Total 181 (delta 26), reused 0 (delta 0), pack-reused 0\n",
1394
+ "remote: Resolving deltas: 100% (26/26), completed with 1 local object.\u001b[K\n",
1395
+ "To https://huggingface.co/spaces/Varu96/healthcare_project\n",
1396
+ " f2fb0bb..6a6ea70 main -> main\n"
1397
+ ]
1398
+ }
1399
+ ]
1400
+ },
1401
+ {
1402
+ "cell_type": "code",
1403
+ "source": [],
1404
+ "metadata": {
1405
+ "id": "z4PHI0JExTIh"
1406
+ },
1407
+ "execution_count": null,
1408
+ "outputs": []
1409
+ }
1410
+ ]
1411
+ }