multimodalart HF staff commited on
Commit
ecc48ff
·
1 Parent(s): 6a50ac3

Update script.py

Browse files
Files changed (1) hide show
  1. script.py +21 -16
script.py CHANGED
@@ -4,6 +4,7 @@ from safetensors.torch import load_file
4
  from diffusers import AutoPipelineForText2Image
5
  from datasets import load_dataset
6
  from huggingface_hub.repocard import RepoCard
 
7
  import torch
8
  import re
9
  import argparse
@@ -23,6 +24,7 @@ def do_train(script_args):
23
  subprocess.run(['python', 'trainer.py'] + script_args)
24
 
25
  def do_inference(dataset_name, output_dir, num_tokens):
 
26
  try:
27
  print("Starting inference to generate example images...")
28
  dataset = load_dataset(dataset_name)
@@ -33,7 +35,6 @@ def do_inference(dataset_name, output_dir, num_tokens):
33
  pipe.load_lora_weights(f'{output_dir}/pytorch_lora_weights.safetensors')
34
 
35
  prompts = dataset["train"]["prompt"]
36
- widget_content = []
37
  if(num_tokens > 0):
38
  tokens_sequence = ''.join(f'<s{i}>' for i in range(num_tokens))
39
  tokens_list = [f'<s{i}>' for i in range(num_tokens)]
@@ -55,7 +56,12 @@ def do_inference(dataset_name, output_dir, num_tokens):
55
  }
56
  }
57
  widget_content.append(card_dict)
58
-
 
 
 
 
 
59
  repo_id = api.create_repo(f"{username}/{output_dir}", exist_ok=True).repo_id
60
 
61
  with open(f'{output_dir}/README.md', 'r') as file:
@@ -63,21 +69,20 @@ def do_inference(dataset_name, output_dir, num_tokens):
63
  readme_content = readme_content.replace(f'{output_dir}', f'{username}/{output_dir}')
64
 
65
  card = RepoCard(readme_content)
66
- card.data["widget"] = widget_content
67
- card.save(f'{output_dir}/README.md')
68
- except Exception as e:
69
- print("Something went wrong with generating images, specifically: ", e)
70
 
71
- from huggingface_hub import HfApi
72
- api = HfApi()
73
- username = api.whoami()["name"]
74
- print("Starting upload...")
75
- api.upload_folder(
76
- folder_path=output_dir,
77
- repo_id=f"{username}/{output_dir}",
78
- repo_type="model",
79
- )
80
- print("Upload finished!")
81
 
82
  import sys
83
  import argparse
 
4
  from diffusers import AutoPipelineForText2Image
5
  from datasets import load_dataset
6
  from huggingface_hub.repocard import RepoCard
7
+ from huggingface_hub import HfApi
8
  import torch
9
  import re
10
  import argparse
 
24
  subprocess.run(['python', 'trainer.py'] + script_args)
25
 
26
  def do_inference(dataset_name, output_dir, num_tokens):
27
+ widget_content = []
28
  try:
29
  print("Starting inference to generate example images...")
30
  dataset = load_dataset(dataset_name)
 
35
  pipe.load_lora_weights(f'{output_dir}/pytorch_lora_weights.safetensors')
36
 
37
  prompts = dataset["train"]["prompt"]
 
38
  if(num_tokens > 0):
39
  tokens_sequence = ''.join(f'<s{i}>' for i in range(num_tokens))
40
  tokens_list = [f'<s{i}>' for i in range(num_tokens)]
 
56
  }
57
  }
58
  widget_content.append(card_dict)
59
+ except Exception as e:
60
+ print("Something went wrong with generating images, specifically: ", e)
61
+
62
+ try:
63
+ api = HfApi()
64
+ username = api.whoami()["name"]
65
  repo_id = api.create_repo(f"{username}/{output_dir}", exist_ok=True).repo_id
66
 
67
  with open(f'{output_dir}/README.md', 'r') as file:
 
69
  readme_content = readme_content.replace(f'{output_dir}', f'{username}/{output_dir}')
70
 
71
  card = RepoCard(readme_content)
72
+ if widget_content:
73
+ card.data["widget"] = widget_content
74
+ card.save(f'{output_dir}/README.md')
 
75
 
76
+ print("Starting upload...")
77
+ api.upload_folder(
78
+ folder_path=output_dir,
79
+ repo_id=f"{username}/{output_dir}",
80
+ repo_type="model",
81
+ )
82
+ except Exception as e:
83
+ print("Something went wrong with uploading your model, specificaly: ", e)
84
+ else:
85
+ print("Upload finished!")
86
 
87
  import sys
88
  import argparse