Upload 3 files
Browse files- .gitattributes +37 -37
- README.md +56 -0
- model_index.json +39 -0
.gitattributes
CHANGED
@@ -1,37 +1,37 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
-
assets/teaser.png filter=lfs diff=lfs merge=lfs -text
|
37 |
-
assets/teaser2.png filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
assets/teaser.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
assets/teaser2.png filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: stable-diffusion-xl-1.0-inpainting-0.1
|
3 |
+
tags:
|
4 |
+
- stable-diffusion-xl
|
5 |
+
- inpainting
|
6 |
+
- virtual try-on
|
7 |
+
license: cc-by-nc-sa-4.0
|
8 |
+
---
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
# Check out more codes on our [github repository](https://github.com/yisol/IDM-VTON)!
|
13 |
+
|
14 |
+
# IDM-VTON : Improving Diffusion Models for Authentic Virtual Try-on in the Wild
|
15 |
+
This is an official implementation of paper 'Improving Diffusion Models for Authentic Virtual Try-on in the Wild'
|
16 |
+
- [paper](https://arxiv.org/abs/2403.05139)
|
17 |
+
- [project page](https://idm-vton.github.io/)
|
18 |
+
|
19 |
+
🤗 Try our huggingface [Demo](https://huggingface.co/spaces/yisol/IDM-VTON)
|
20 |
+
|
21 |
+

|
22 |
+

|
23 |
+
|
24 |
+
|
25 |
+
## TODO LIST
|
26 |
+
|
27 |
+
|
28 |
+
- [x] demo model
|
29 |
+
- [x] inference code
|
30 |
+
- [ ] training code
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
## Acknowledgements
|
36 |
+
|
37 |
+
For the demo, GPUs are supported from [zerogpu](https://huggingface.co/zero-gpu-explorers), and auto masking generation codes are based on [OOTDiffusion](https://github.com/levihsu/OOTDiffusion) and [DCI-VTON](https://github.com/bcmi/DCI-VTON-Virtual-Try-On).
|
38 |
+
Parts of the code are based on [IP-Adapter](https://github.com/tencent-ailab/IP-Adapter).
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
## Citation
|
43 |
+
```
|
44 |
+
@article{choi2024improving,
|
45 |
+
title={Improving Diffusion Models for Virtual Try-on},
|
46 |
+
author={Choi, Yisol and Kwak, Sangkyung and Lee, Kyungmin and Choi, Hyungwon and Shin, Jinwoo},
|
47 |
+
journal={arXiv preprint arXiv:2403.05139},
|
48 |
+
year={2024}
|
49 |
+
}
|
50 |
+
```
|
51 |
+
|
52 |
+
## License
|
53 |
+
The codes and checkpoints in this repository are under the [CC BY-NC-SA 4.0 license](https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
|
54 |
+
|
55 |
+
|
56 |
+
|
model_index.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "StableDiffusionXLInpaintPipeline",
|
3 |
+
"_diffusers_version": "0.25.0",
|
4 |
+
"_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
|
5 |
+
"force_zeros_for_empty_prompt": true,
|
6 |
+
"requires_aesthetics_score": false,
|
7 |
+
"scheduler": [
|
8 |
+
"diffusers",
|
9 |
+
"DDPMScheduler"
|
10 |
+
],
|
11 |
+
"text_encoder": [
|
12 |
+
"transformers",
|
13 |
+
"CLIPTextModel"
|
14 |
+
],
|
15 |
+
"feature_extractor": [
|
16 |
+
"transformers",
|
17 |
+
"CLIPImageProcessor"
|
18 |
+
],
|
19 |
+
"text_encoder_2": [
|
20 |
+
"transformers",
|
21 |
+
"CLIPTextModelWithProjection"
|
22 |
+
],
|
23 |
+
"tokenizer": [
|
24 |
+
"transformers",
|
25 |
+
"CLIPTokenizer"
|
26 |
+
],
|
27 |
+
"tokenizer_2": [
|
28 |
+
"transformers",
|
29 |
+
"CLIPTokenizer"
|
30 |
+
],
|
31 |
+
"unet": [
|
32 |
+
"diffusers",
|
33 |
+
"UNet2DConditionModel"
|
34 |
+
],
|
35 |
+
"vae": [
|
36 |
+
"diffusers",
|
37 |
+
"AutoencoderKL"
|
38 |
+
]
|
39 |
+
}
|