Spaces:
Running
Running
Commit
·
1d06c07
1
Parent(s):
af233cd
Update app.py
Browse files
app.py
CHANGED
|
@@ -122,7 +122,7 @@ def process_url(url, profile, do_download=True, folder="."):
|
|
| 122 |
else:
|
| 123 |
raise gr.Error("Something went wrong in fetching CivitAI API")
|
| 124 |
|
| 125 |
-
def create_readme(info, downloaded_files, link_civit=False, is_author=True, folder="."):
|
| 126 |
readme_content = ""
|
| 127 |
original_url = f"https://civitai.com/models/{info['modelId']}"
|
| 128 |
link_civit_disclaimer = f'([CivitAI]({original_url}))'
|
|
@@ -131,6 +131,9 @@ def create_readme(info, downloaded_files, link_civit=False, is_author=True, fold
|
|
| 131 |
civit_tags = [t for t in info["tags"] if t not in default_tags]
|
| 132 |
tags = default_tags + civit_tags
|
| 133 |
unpacked_tags = "\n- ".join(tags)
|
|
|
|
|
|
|
|
|
|
| 134 |
|
| 135 |
widget_content = ""
|
| 136 |
for index, (prompt, image) in enumerate(zip(downloaded_files["imagePrompt"], downloaded_files["imageName"])):
|
|
@@ -162,14 +165,39 @@ widget:
|
|
| 162 |
|
| 163 |
{link_civit_disclaimer if link_civit else ''}
|
| 164 |
|
|
|
|
|
|
|
| 165 |
{info["description"]}
|
| 166 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
"""
|
| 168 |
-
for index, (image, prompt) in enumerate(zip(downloaded_files["imageName"], downloaded_files["imagePrompt"])):
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
readme_content += content + "\n"
|
| 174 |
print(readme_content)
|
| 175 |
with open(f"{folder}/README.md", "w") as file:
|
|
@@ -262,7 +290,8 @@ def upload_civit_to_hf(profile: Optional[gr.OAuthProfile], url, link_civit=False
|
|
| 262 |
folder = str(uuid.uuid4())
|
| 263 |
os.makedirs(folder, exist_ok=False)
|
| 264 |
info, downloaded_files = process_url(url, profile, folder=folder)
|
| 265 |
-
|
|
|
|
| 266 |
try:
|
| 267 |
api = HfApi(token=os.environ["HUGGING_FACE_HUB_TOKEN"])
|
| 268 |
username = api.whoami()["name"]
|
|
@@ -279,7 +308,7 @@ def upload_civit_to_hf(profile: Optional[gr.OAuthProfile], url, link_civit=False
|
|
| 279 |
raise gr.Error("something went wrong")
|
| 280 |
|
| 281 |
transfer_repos = gr.load("multimodalart/transfer_repos", hf_token=os.environ["HUGGING_FACE_HUB_TOKEN"], src="spaces")
|
| 282 |
-
|
| 283 |
response_code = transfer_repos(repo_id, user_repo_id)
|
| 284 |
i = 0
|
| 285 |
while response_code != "200":
|
|
|
|
| 122 |
else:
|
| 123 |
raise gr.Error("Something went wrong in fetching CivitAI API")
|
| 124 |
|
| 125 |
+
def create_readme(info, downloaded_files, user_repo_id, link_civit=False, is_author=True, folder="."):
|
| 126 |
readme_content = ""
|
| 127 |
original_url = f"https://civitai.com/models/{info['modelId']}"
|
| 128 |
link_civit_disclaimer = f'([CivitAI]({original_url}))'
|
|
|
|
| 131 |
civit_tags = [t for t in info["tags"] if t not in default_tags]
|
| 132 |
tags = default_tags + civit_tags
|
| 133 |
unpacked_tags = "\n- ".join(tags)
|
| 134 |
+
|
| 135 |
+
trained_words = info['trainedWords'] if 'trainedWords' in info and info['trainedWords'] else []
|
| 136 |
+
formatted_words = ', '.join(f'`{word}`' for word in trained_words)
|
| 137 |
|
| 138 |
widget_content = ""
|
| 139 |
for index, (prompt, image) in enumerate(zip(downloaded_files["imagePrompt"], downloaded_files["imageName"])):
|
|
|
|
| 165 |
|
| 166 |
{link_civit_disclaimer if link_civit else ''}
|
| 167 |
|
| 168 |
+
## Model description
|
| 169 |
+
|
| 170 |
{info["description"]}
|
| 171 |
|
| 172 |
+
## Trigger words
|
| 173 |
+
|
| 174 |
+
You should use {formatted_words} to trigger the image generation.
|
| 175 |
+
|
| 176 |
+
## Download model
|
| 177 |
+
|
| 178 |
+
Weights for this model are available in Safetensors format.
|
| 179 |
+
|
| 180 |
+
[Download](/{user_repo_id}/tree/main) them in the Files & versions tab.
|
| 181 |
+
|
| 182 |
+
## Use it with diffusers
|
| 183 |
+
|
| 184 |
+
```py
|
| 185 |
+
from diffusers import AutoPipelineForText2Image
|
| 186 |
+
import torch
|
| 187 |
+
|
| 188 |
+
pipeline = AutoPipelineForText2Image.from_pretrained('{info["baseModel"]}', torch_dtype=torch.float16).to("cuda")
|
| 189 |
+
pipeline.load_lora_weights("{{user_repo_id}, weight_name='{downloaded_files["weightName"]}')
|
| 190 |
+
image = pipeline('{prompt if prompt else (formatted_words if formatted_words else 'Your custom prompt')}').images[0]
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
|
| 194 |
+
|
| 195 |
"""
|
| 196 |
+
#for index, (image, prompt) in enumerate(zip(downloaded_files["imageName"], downloaded_files["imagePrompt"])):
|
| 197 |
+
# if index == 1:
|
| 198 |
+
# content += f"## Image examples for the model:\n\n> {prompt}\n"
|
| 199 |
+
# elif index > 1:
|
| 200 |
+
# content += f"\n\n> {prompt}\n"
|
| 201 |
readme_content += content + "\n"
|
| 202 |
print(readme_content)
|
| 203 |
with open(f"{folder}/README.md", "w") as file:
|
|
|
|
| 290 |
folder = str(uuid.uuid4())
|
| 291 |
os.makedirs(folder, exist_ok=False)
|
| 292 |
info, downloaded_files = process_url(url, profile, folder=folder)
|
| 293 |
+
user_repo_id = f"{profile.preferred_username}/{slug_name}"
|
| 294 |
+
create_readme(info, downloaded_files, user_repo_id, link_civit, folder=folder)
|
| 295 |
try:
|
| 296 |
api = HfApi(token=os.environ["HUGGING_FACE_HUB_TOKEN"])
|
| 297 |
username = api.whoami()["name"]
|
|
|
|
| 308 |
raise gr.Error("something went wrong")
|
| 309 |
|
| 310 |
transfer_repos = gr.load("multimodalart/transfer_repos", hf_token=os.environ["HUGGING_FACE_HUB_TOKEN"], src="spaces")
|
| 311 |
+
|
| 312 |
response_code = transfer_repos(repo_id, user_repo_id)
|
| 313 |
i = 0
|
| 314 |
while response_code != "200":
|