Commit
·
f611983
1
Parent(s):
61a562d
Update script.py
Browse files
script.py
CHANGED
@@ -34,7 +34,8 @@ def do_inference(dataset_name, output_dir, num_tokens):
|
|
34 |
card_string = ''
|
35 |
if(num_tokens > 0):
|
36 |
tokens_sequence = ''.join(f'<s{i}>' for i in range(num_tokens))
|
37 |
-
tokens_list =
|
|
|
38 |
state_dict = load_file(f"{output_dir}/embeddings.safetensors")
|
39 |
pipe.load_textual_inversion(state_dict["clip_l"], token=tokens_list, text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
|
40 |
pipe.load_textual_inversion(state_dict["clip_g"], token=tokens_list, text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
|
@@ -94,5 +95,6 @@ def main():
|
|
94 |
print("Training finished!")
|
95 |
do_inference(args.dataset_name, args.output_dir, args.num_new_tokens_per_abstraction)
|
96 |
print("All finished!")
|
|
|
97 |
if __name__ == "__main__":
|
98 |
-
main()
|
|
|
34 |
card_string = ''
|
35 |
if(num_tokens > 0):
|
36 |
tokens_sequence = ''.join(f'<s{i}>' for i in range(num_tokens))
|
37 |
+
tokens_list = [f'<s{i}>' for i in range(num_tokens)]
|
38 |
+
|
39 |
state_dict = load_file(f"{output_dir}/embeddings.safetensors")
|
40 |
pipe.load_textual_inversion(state_dict["clip_l"], token=tokens_list, text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
|
41 |
pipe.load_textual_inversion(state_dict["clip_g"], token=tokens_list, text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
|
|
|
95 |
print("Training finished!")
|
96 |
do_inference(args.dataset_name, args.output_dir, args.num_new_tokens_per_abstraction)
|
97 |
print("All finished!")
|
98 |
+
|
99 |
if __name__ == "__main__":
|
100 |
+
main()
|