jobs-git xiaohk commited on
Commit
0c9fe35
·
verified ·
0 Parent(s):

Duplicate from poloclub/diffusiondb

Browse files

Co-authored-by: Jay Wang <[email protected]>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +53 -0
  2. .gitignore +3 -0
  3. README.md +404 -0
  4. diffusiondb-large-part-1/part-000001.zip +3 -0
  5. diffusiondb-large-part-1/part-000002.zip +3 -0
  6. diffusiondb-large-part-1/part-000003.zip +3 -0
  7. diffusiondb-large-part-1/part-000004.zip +3 -0
  8. diffusiondb-large-part-1/part-000005.zip +3 -0
  9. diffusiondb-large-part-1/part-000006.zip +3 -0
  10. diffusiondb-large-part-1/part-000007.zip +3 -0
  11. diffusiondb-large-part-1/part-000008.zip +3 -0
  12. diffusiondb-large-part-1/part-000009.zip +3 -0
  13. diffusiondb-large-part-1/part-000010.zip +3 -0
  14. diffusiondb-large-part-1/part-000011.zip +3 -0
  15. diffusiondb-large-part-1/part-000012.zip +3 -0
  16. diffusiondb-large-part-1/part-000013.zip +3 -0
  17. diffusiondb-large-part-1/part-000014.zip +3 -0
  18. diffusiondb-large-part-1/part-000015.zip +3 -0
  19. diffusiondb-large-part-1/part-000016.zip +3 -0
  20. diffusiondb-large-part-1/part-000017.zip +3 -0
  21. diffusiondb-large-part-1/part-000018.zip +3 -0
  22. diffusiondb-large-part-1/part-000019.zip +3 -0
  23. diffusiondb-large-part-1/part-000020.zip +3 -0
  24. diffusiondb-large-part-1/part-000021.zip +3 -0
  25. diffusiondb-large-part-1/part-000022.zip +3 -0
  26. diffusiondb-large-part-1/part-000023.zip +3 -0
  27. diffusiondb-large-part-1/part-000024.zip +3 -0
  28. diffusiondb-large-part-1/part-000025.zip +3 -0
  29. diffusiondb-large-part-1/part-000026.zip +3 -0
  30. diffusiondb-large-part-1/part-000027.zip +3 -0
  31. diffusiondb-large-part-1/part-000028.zip +3 -0
  32. diffusiondb-large-part-1/part-000029.zip +3 -0
  33. diffusiondb-large-part-1/part-000030.zip +3 -0
  34. diffusiondb-large-part-1/part-000031.zip +3 -0
  35. diffusiondb-large-part-1/part-000032.zip +3 -0
  36. diffusiondb-large-part-1/part-000033.zip +3 -0
  37. diffusiondb-large-part-1/part-000034.zip +3 -0
  38. diffusiondb-large-part-1/part-000035.zip +3 -0
  39. diffusiondb-large-part-1/part-000036.zip +3 -0
  40. diffusiondb-large-part-1/part-000037.zip +3 -0
  41. diffusiondb-large-part-1/part-000038.zip +3 -0
  42. diffusiondb-large-part-1/part-000039.zip +3 -0
  43. diffusiondb-large-part-1/part-000040.zip +3 -0
  44. diffusiondb-large-part-1/part-000041.zip +3 -0
  45. diffusiondb-large-part-1/part-000042.zip +3 -0
  46. diffusiondb-large-part-1/part-000043.zip +3 -0
  47. diffusiondb-large-part-1/part-000044.zip +3 -0
  48. diffusiondb-large-part-1/part-000045.zip +3 -0
  49. diffusiondb-large-part-1/part-000046.zip +3 -0
  50. diffusiondb-large-part-1/part-000047.zip +3 -0
.gitattributes ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ # Audio files - uncompressed
36
+ *.pcm filter=lfs diff=lfs merge=lfs -text
37
+ *.sam filter=lfs diff=lfs merge=lfs -text
38
+ *.raw filter=lfs diff=lfs merge=lfs -text
39
+ # Audio files - compressed
40
+ *.aac filter=lfs diff=lfs merge=lfs -text
41
+ *.flac filter=lfs diff=lfs merge=lfs -text
42
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
43
+ *.ogg filter=lfs diff=lfs merge=lfs -text
44
+ *.wav filter=lfs diff=lfs merge=lfs -text
45
+ # Image files - uncompressed
46
+ *.bmp filter=lfs diff=lfs merge=lfs -text
47
+ *.gif filter=lfs diff=lfs merge=lfs -text
48
+ *.png filter=lfs diff=lfs merge=lfs -text
49
+ *.tiff filter=lfs diff=lfs merge=lfs -text
50
+ # Image files - compressed
51
+ *.jpg filter=lfs diff=lfs merge=lfs -text
52
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
53
+ *.webp filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ *.log
2
+ .DS_Store
3
+ git-add.py
README.md ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ layout: default
3
+ title: Home
4
+ nav_order: 1
5
+ has_children: false
6
+
7
+ annotations_creators:
8
+ - no-annotation
9
+ language:
10
+ - en
11
+ language_creators:
12
+ - found
13
+ license:
14
+ - cc0-1.0
15
+ multilinguality:
16
+ - multilingual
17
+ pretty_name: DiffusionDB
18
+ size_categories:
19
+ - n>1T
20
+ source_datasets:
21
+ - original
22
+ tags:
23
+ - stable diffusion
24
+ - prompt engineering
25
+ - prompts
26
+ - research paper
27
+ task_categories:
28
+ - text-to-image
29
+ - image-to-text
30
+ task_ids:
31
+ - image-captioning
32
+ ---
33
+
34
+ # DiffusionDB
35
+
36
+ <img width="100%" src="https://user-images.githubusercontent.com/15007159/201762588-f24db2b8-dbb2-4a94-947b-7de393fc3d33.gif">
37
+
38
+ ## Table of Contents
39
+
40
+ - [DiffusionDB](#diffusiondb)
41
+ - [Table of Contents](#table-of-contents)
42
+ - [Dataset Description](#dataset-description)
43
+ - [Dataset Summary](#dataset-summary)
44
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
45
+ - [Languages](#languages)
46
+ - [Two Subsets](#two-subsets)
47
+ - [Key Differences](#key-differences)
48
+ - [Dataset Structure](#dataset-structure)
49
+ - [Data Instances](#data-instances)
50
+ - [Data Fields](#data-fields)
51
+ - [Dataset Metadata](#dataset-metadata)
52
+ - [Metadata Schema](#metadata-schema)
53
+ - [Data Splits](#data-splits)
54
+ - [Loading Data Subsets](#loading-data-subsets)
55
+ - [Method 1: Using Hugging Face Datasets Loader](#method-1-using-hugging-face-datasets-loader)
56
+ - [Method 2. Use the PoloClub Downloader](#method-2-use-the-poloclub-downloader)
57
+ - [Usage/Examples](#usageexamples)
58
+ - [Downloading a single file](#downloading-a-single-file)
59
+ - [Downloading a range of files](#downloading-a-range-of-files)
60
+ - [Downloading to a specific directory](#downloading-to-a-specific-directory)
61
+ - [Setting the files to unzip once they've been downloaded](#setting-the-files-to-unzip-once-theyve-been-downloaded)
62
+ - [Method 3. Use `metadata.parquet` (Text Only)](#method-3-use-metadataparquet-text-only)
63
+ - [Dataset Creation](#dataset-creation)
64
+ - [Curation Rationale](#curation-rationale)
65
+ - [Source Data](#source-data)
66
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
67
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
68
+ - [Annotations](#annotations)
69
+ - [Annotation process](#annotation-process)
70
+ - [Who are the annotators?](#who-are-the-annotators)
71
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
72
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
73
+ - [Social Impact of Dataset](#social-impact-of-dataset)
74
+ - [Discussion of Biases](#discussion-of-biases)
75
+ - [Other Known Limitations](#other-known-limitations)
76
+ - [Additional Information](#additional-information)
77
+ - [Dataset Curators](#dataset-curators)
78
+ - [Licensing Information](#licensing-information)
79
+ - [Citation Information](#citation-information)
80
+ - [Contributions](#contributions)
81
+
82
+ ## Dataset Description
83
+
84
+ - **Homepage:** [DiffusionDB homepage](https://poloclub.github.io/diffusiondb)
85
+ - **Repository:** [DiffusionDB repository](https://github.com/poloclub/diffusiondb)
86
+ - **Distribution:** [DiffusionDB Hugging Face Dataset](https://huggingface.co/datasets/poloclub/diffusiondb)
87
+ - **Paper:** [DiffusionDB: A Large-scale Prompt Gallery Dataset for Text-to-Image Generative Models](https://arxiv.org/abs/2210.14896)
88
+ - **Point of Contact:** [Jay Wang](mailto:[email protected])
89
+
90
+ ### Dataset Summary
91
+
92
+ DiffusionDB is the first large-scale text-to-image prompt dataset. It contains **14 million** images generated by Stable Diffusion using prompts and hyperparameters specified by real users.
93
+
94
+ DiffusionDB is publicly available at [🤗 Hugging Face Dataset](https://huggingface.co/datasets/poloclub/diffusiondb).
95
+
96
+ ### Supported Tasks and Leaderboards
97
+
98
+ The unprecedented scale and diversity of this human-actuated dataset provide exciting research opportunities in understanding the interplay between prompts and generative models, detecting deepfakes, and designing human-AI interaction tools to help users more easily use these models.
99
+
100
+ ### Languages
101
+
102
+ The text in the dataset is mostly English. It also contains other languages such as Spanish, Chinese, and Russian.
103
+
104
+ ### Two Subsets
105
+
106
+ DiffusionDB provides two subsets (DiffusionDB 2M and DiffusionDB Large) to support different needs.
107
+
108
+ |Subset|Num of Images|Num of Unique Prompts|Size|Image Directory|Metadata Table|
109
+ |:--|--:|--:|--:|--:|--:|
110
+ |DiffusionDB 2M|2M|1.5M|1.6TB|`images/`|`metadata.parquet`|
111
+ |DiffusionDB Large|14M|1.8M|6.5TB|`diffusiondb-large-part-1/` `diffusiondb-large-part-2/`|`metadata-large.parquet`|
112
+
113
+ ##### Key Differences
114
+
115
+ 1. Two subsets have a similar number of unique prompts, but DiffusionDB Large has much more images. DiffusionDB Large is a superset of DiffusionDB 2M.
116
+ 2. Images in DiffusionDB 2M are stored in `png` format; images in DiffusionDB Large use a lossless `webp` format.
117
+
118
+ ## Dataset Structure
119
+
120
+ We use a modularized file structure to distribute DiffusionDB. The 2 million images in DiffusionDB 2M are split into 2,000 folders, where each folder contains 1,000 images and a JSON file that links these 1,000 images to their prompts and hyperparameters. Similarly, the 14 million images in DiffusionDB Large are split into 14,000 folders.
121
+
122
+ ```bash
123
+ # DiffusionDB 2M
124
+ ./
125
+ ├── images
126
+ │   ├── part-000001
127
+ │   │   ├── 3bfcd9cf-26ea-4303-bbe1-b095853f5360.png
128
+ │   │   ├── 5f47c66c-51d4-4f2c-a872-a68518f44adb.png
129
+ │   │   ├── 66b428b9-55dc-4907-b116-55aaa887de30.png
130
+ │   │   ├── [...]
131
+ │   │   └── part-000001.json
132
+ │   ├── part-000002
133
+ │   ├── part-000003
134
+ │   ├── [...]
135
+ │   └── part-002000
136
+ └── metadata.parquet
137
+ ```
138
+
139
+ ```bash
140
+ # DiffusionDB Large
141
+ ./
142
+ ├── diffusiondb-large-part-1
143
+ │   ├── part-000001
144
+ │   │   ├── 0a8dc864-1616-4961-ac18-3fcdf76d3b08.webp
145
+ │   │   ├── 0a25cacb-5d91-4f27-b18a-bd423762f811.webp
146
+ │   │   ├── 0a52d584-4211-43a0-99ef-f5640ee2fc8c.webp
147
+ │   │   ├── [...]
148
+ │   │   └── part-000001.json
149
+ │   ├── part-000002
150
+ │   ├── part-000003
151
+ │   ├── [...]
152
+ │   └── part-010000
153
+ ├── diffusiondb-large-part-2
154
+ │   ├── part-010001
155
+ │   │   ├── 0a68f671-3776-424c-91b6-c09a0dd6fc2d.webp
156
+ │   │   ├── 0a0756e9-1249-4fe2-a21a-12c43656c7a3.webp
157
+ │   │   ├── 0aa48f3d-f2d9-40a8-a800-c2c651ebba06.webp
158
+ │   │   ├── [...]
159
+ │   │   └── part-000001.json
160
+ │   ├── part-010002
161
+ │   ├── part-010003
162
+ │   ├── [...]
163
+ │   └── part-014000
164
+ └── metadata-large.parquet
165
+ ```
166
+
167
+ These sub-folders have names `part-0xxxxx`, and each image has a unique name generated by [UUID Version 4](https://en.wikipedia.org/wiki/Universally_unique_identifier). The JSON file in a sub-folder has the same name as the sub-folder. Each image is a `PNG` file (DiffusionDB 2M) or a lossless `WebP` file (DiffusionDB Large). The JSON file contains key-value pairs mapping image filenames to their prompts and hyperparameters.
168
+
169
+
170
+ ### Data Instances
171
+
172
+ For example, below is the image of `f3501e05-aef7-4225-a9e9-f516527408ac.png` and its key-value pair in `part-000001.json`.
173
+
174
+ <img width="300" src="https://i.imgur.com/gqWcRs2.png">
175
+
176
+ ```json
177
+ {
178
+ "f3501e05-aef7-4225-a9e9-f516527408ac.png": {
179
+ "p": "geodesic landscape, john chamberlain, christopher balaskas, tadao ando, 4 k, ",
180
+ "se": 38753269,
181
+ "c": 12.0,
182
+ "st": 50,
183
+ "sa": "k_lms"
184
+ },
185
+ }
186
+ ```
187
+
188
+ ### Data Fields
189
+
190
+ - key: Unique image name
191
+ - `p`: Prompt
192
+ - `se`: Random seed
193
+ - `c`: CFG Scale (guidance scale)
194
+ - `st`: Steps
195
+ - `sa`: Sampler
196
+
197
+ ### Dataset Metadata
198
+
199
+ To help you easily access prompts and other attributes of images without downloading all the Zip files, we include two metadata tables `metadata.parquet` and `metadata-large.parquet` for DiffusionDB 2M and DiffusionDB Large, respectively.
200
+
201
+ The shape of `metadata.parquet` is (2000000, 13) and the shape of `metatable-large.parquet` is (14000000, 13). Two tables share the same schema, and each row represents an image. We store these tables in the Parquet format because Parquet is column-based: you can efficiently query individual columns (e.g., prompts) without reading the entire table.
202
+
203
+ Below are three random rows from `metadata.parquet`.
204
+
205
+ | image_name | prompt | part_id | seed | step | cfg | sampler | width | height | user_name | timestamp | image_nsfw | prompt_nsfw |
206
+ |:-----------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------:|-----------:|-------:|------:|----------:|--------:|---------:|:-----------------------------------------------------------------|:--------------------------|-------------:|--------------:|
207
+ | 0c46f719-1679-4c64-9ba9-f181e0eae811.png | a small liquid sculpture, corvette, viscous, reflective, digital art | 1050 | 2026845913 | 50 | 7 | 8 | 512 | 512 | c2f288a2ba9df65c38386ffaaf7749106fed29311835b63d578405db9dbcafdb | 2022-08-11 09:05:00+00:00 | 0.0845108 | 0.00383462 |
208
+ | a00bdeaa-14eb-4f6c-a303-97732177eae9.png | human sculpture of lanky tall alien on a romantic date at italian restaurant with smiling woman, nice restaurant, photography, bokeh | 905 | 1183522603 | 50 | 10 | 8 | 512 | 768 | df778e253e6d32168eb22279a9776b3cde107cc82da05517dd6d114724918651 | 2022-08-19 17:55:00+00:00 | 0.692934 | 0.109437 |
209
+ | 6e5024ce-65ed-47f3-b296-edb2813e3c5b.png | portrait of barbaric spanish conquistador, symmetrical, by yoichi hatakenaka, studio ghibli and dan mumford | 286 | 1713292358 | 50 | 7 | 8 | 512 | 640 | 1c2e93cfb1430adbd956be9c690705fe295cbee7d9ac12de1953ce5e76d89906 | 2022-08-12 03:26:00+00:00 | 0.0773138 | 0.0249675 |
210
+
211
+ #### Metadata Schema
212
+
213
+ `metadata.parquet` and `metatable-large.parquet` share the same schema.
214
+
215
+ |Column|Type|Description|
216
+ |:---|:---|:---|
217
+ |`image_name`|`string`|Image UUID filename.|
218
+ |`prompt`|`string`|The text prompt used to generate this image.|
219
+ |`part_id`|`uint16`|Folder ID of this image.|
220
+ |`seed`|`uint32`| Random seed used to generate this image.|
221
+ |`step`|`uint16`| Step count (hyperparameter).|
222
+ |`cfg`|`float32`| Guidance scale (hyperparameter).|
223
+ |`sampler`|`uint8`| Sampler method (hyperparameter). Mapping: `{1: "ddim", 2: "plms", 3: "k_euler", 4: "k_euler_ancestral", 5: "k_heun", 6: "k_dpm_2", 7: "k_dpm_2_ancestral", 8: "k_lms", 9: "others"}`.
224
+ |`width`|`uint16`|Image width.|
225
+ |`height`|`uint16`|Image height.|
226
+ |`user_name`|`string`|The unique discord ID's SHA256 hash of the user who generated this image. For example, the hash for `xiaohk#3146` is `e285b7ef63be99e9107cecd79b280bde602f17e0ca8363cb7a0889b67f0b5ed0`. "deleted_account" refer to users who have deleted their accounts. None means the image has been deleted before we scrape it for the second time.|
227
+ |`timestamp`|`timestamp`|UTC Timestamp when this image was generated. None means the image has been deleted before we scrape it for the second time. Note that timestamp is not accurate for duplicate images that have the same prompt, hypareparameters, width, height.|
228
+ |`image_nsfw`|`float32`|Likelihood of an image being NSFW. Scores are predicted by [LAION's state-of-art NSFW detector](https://github.com/LAION-AI/LAION-SAFETY) (range from 0 to 1). A score of 2.0 means the image has already been flagged as NSFW and blurred by Stable Diffusion.|
229
+ |`prompt_nsfw`|`float32`|Likelihood of a prompt being NSFW. Scores are predicted by the library [Detoxicy](https://github.com/unitaryai/detoxify). Each score represents the maximum of `toxicity` and `sexual_explicit` (range from 0 to 1).|
230
+
231
+ > **Warning**
232
+ > Although the Stable Diffusion model has an NSFW filter that automatically blurs user-generated NSFW images, this NSFW filter is not perfect—DiffusionDB still contains some NSFW images. Therefore, we compute and provide the NSFW scores for images and prompts using the state-of-the-art models. The distribution of these scores is shown below. Please decide an appropriate NSFW score threshold to filter out NSFW images before using DiffusionDB in your projects.
233
+
234
+ <img src="https://i.imgur.com/1RiGAXL.png" width="100%">
235
+
236
+ ### Data Splits
237
+
238
+ For DiffusionDB 2M, we split 2 million images into 2,000 folders where each folder contains 1,000 images and a JSON file. For DiffusionDB Large, we split 14 million images into 14,000 folders where each folder contains 1,000 images and a JSON file.
239
+
240
+ ### Loading Data Subsets
241
+
242
+ DiffusionDB is large (1.6TB or 6.5 TB)! However, with our modularized file structure, you can easily load a desirable number of images and their prompts and hyperparameters. In the [`example-loading.ipynb`](https://github.com/poloclub/diffusiondb/blob/main/notebooks/example-loading.ipynb) notebook, we demonstrate three methods to load a subset of DiffusionDB. Below is a short summary.
243
+
244
+ #### Method 1: Using Hugging Face Datasets Loader
245
+
246
+ You can use the Hugging Face [`Datasets`](https://huggingface.co/docs/datasets/quickstart) library to easily load prompts and images from DiffusionDB. We pre-defined 16 DiffusionDB subsets (configurations) based on the number of instances. You can see all subsets in the [Dataset Preview](https://huggingface.co/datasets/poloclub/diffusiondb/viewer/all/train).
247
+
248
+ ```python
249
+ import numpy as np
250
+ from datasets import load_dataset
251
+
252
+ # Load the dataset with the `large_random_1k` subset
253
+ dataset = load_dataset('poloclub/diffusiondb', 'large_random_1k')
254
+ ```
255
+
256
+ #### Method 2. Use the PoloClub Downloader
257
+
258
+ This repo includes a Python downloader [`download.py`](https://github.com/poloclub/diffusiondb/blob/main/scripts/download.py) that allows you to download and load DiffusionDB. You can use it from your command line. Below is an example of loading a subset of DiffusionDB.
259
+
260
+ ##### Usage/Examples
261
+
262
+ The script is run using command-line arguments as follows:
263
+
264
+ - `-i` `--index` - File to download or lower bound of a range of files if `-r` is also set.
265
+ - `-r` `--range` - Upper bound of range of files to download if `-i` is set.
266
+ - `-o` `--output` - Name of custom output directory. Defaults to the current directory if not set.
267
+ - `-z` `--unzip` - Unzip the file/files after downloading
268
+ - `-l` `--large` - Download from Diffusion DB Large. Defaults to Diffusion DB 2M.
269
+
270
+ ###### Downloading a single file
271
+
272
+ The specific file to download is supplied as the number at the end of the file on HuggingFace. The script will automatically pad the number out and generate the URL.
273
+
274
+ ```bash
275
+ python download.py -i 23
276
+ ```
277
+
278
+ ###### Downloading a range of files
279
+
280
+ The upper and lower bounds of the set of files to download are set by the `-i` and `-r` flags respectively.
281
+
282
+ ```bash
283
+ python download.py -i 1 -r 2000
284
+ ```
285
+
286
+ Note that this range will download the entire dataset. The script will ask you to confirm that you have 1.7Tb free at the download destination.
287
+
288
+ ###### Downloading to a specific directory
289
+
290
+ The script will default to the location of the dataset's `part` .zip files at `images/`. If you wish to move the download location, you should move these files as well or use a symbolic link.
291
+
292
+ ```bash
293
+ python download.py -i 1 -r 2000 -o /home/$USER/datahoarding/etc
294
+ ```
295
+
296
+ Again, the script will automatically add the `/` between the directory and the file when it downloads.
297
+
298
+ ###### Setting the files to unzip once they've been downloaded
299
+
300
+ The script is set to unzip the files _after_ all files have downloaded as both can be lengthy processes in certain circumstances.
301
+
302
+ ```bash
303
+ python download.py -i 1 -r 2000 -z
304
+ ```
305
+
306
+ #### Method 3. Use `metadata.parquet` (Text Only)
307
+
308
+ If your task does not require images, then you can easily access all 2 million prompts and hyperparameters in the `metadata.parquet` table.
309
+
310
+ ```python
311
+ from urllib.request import urlretrieve
312
+ import pandas as pd
313
+
314
+ # Download the parquet table
315
+ table_url = f'https://huggingface.co/datasets/poloclub/diffusiondb/resolve/main/metadata.parquet'
316
+ urlretrieve(table_url, 'metadata.parquet')
317
+
318
+ # Read the table using Pandas
319
+ metadata_df = pd.read_parquet('metadata.parquet')
320
+ ```
321
+
322
+ ## Dataset Creation
323
+
324
+ ### Curation Rationale
325
+
326
+ Recent diffusion models have gained immense popularity by enabling high-quality and controllable image generation based on text prompts written in natural language. Since the release of these models, people from different domains have quickly applied them to create award-winning artworks, synthetic radiology images, and even hyper-realistic videos.
327
+
328
+ However, generating images with desired details is difficult, as it requires users to write proper prompts specifying the exact expected results. Developing such prompts requires trial and error, and can often feel random and unprincipled. Simon Willison analogizes writing prompts to wizards learning “magical spells”: users do not understand why some prompts work, but they will add these prompts to their “spell book.” For example, to generate highly-detailed images, it has become a common practice to add special keywords such as “trending on artstation” and “unreal engine” in the prompt.
329
+
330
+ Prompt engineering has become a field of study in the context of text-to-text generation, where researchers systematically investigate how to construct prompts to effectively solve different down-stream tasks. As large text-to-image models are relatively new, there is a pressing need to understand how these models react to prompts, how to write effective prompts, and how to design tools to help users generate images.
331
+ To help researchers tackle these critical challenges, we create DiffusionDB, the first large-scale prompt dataset with 14 million real prompt-image pairs.
332
+
333
+ ### Source Data
334
+
335
+ #### Initial Data Collection and Normalization
336
+
337
+ We construct DiffusionDB by scraping user-generated images on the official Stable Diffusion Discord server. We choose Stable Diffusion because it is currently the only open-source large text-to-image generative model, and all generated images have a CC0 1.0 Universal Public Domain Dedication license that waives all copyright and allows uses for any purpose. We choose the official [Stable Diffusion Discord server](https://discord.gg/stablediffusion) because it is public, and it has strict rules against generating and sharing illegal, hateful, or NSFW (not suitable for work, such as sexual and violent content) images. The server also disallows users to write or share prompts with personal information.
338
+
339
+ #### Who are the source language producers?
340
+
341
+ The language producers are users of the official [Stable Diffusion Discord server](https://discord.gg/stablediffusion).
342
+
343
+ ### Annotations
344
+
345
+ The dataset does not contain any additional annotations.
346
+
347
+ #### Annotation process
348
+
349
+ [N/A]
350
+
351
+ #### Who are the annotators?
352
+
353
+ [N/A]
354
+
355
+ ### Personal and Sensitive Information
356
+
357
+ The authors removed the discord usernames from the dataset.
358
+ We decide to anonymize the dataset because some prompts might include sensitive information: explicitly linking them to their creators can cause harm to creators.
359
+
360
+ ## Considerations for Using the Data
361
+
362
+ ### Social Impact of Dataset
363
+
364
+ The purpose of this dataset is to help develop better understanding of large text-to-image generative models.
365
+ The unprecedented scale and diversity of this human-actuated dataset provide exciting research opportunities in understanding the interplay between prompts and generative models, detecting deepfakes, and designing human-AI interaction tools to help users more easily use these models.
366
+
367
+ It should note that we collect images and their prompts from the Stable Diffusion Discord server. The Discord server has rules against users generating or sharing harmful or NSFW (not suitable for work, such as sexual and violent content) images. The Stable Diffusion model used in the server also has an NSFW filter that blurs the generated images if it detects NSFW content. However, it is still possible that some users had generated harmful images that were not detected by the NSFW filter or removed by the server moderators. Therefore, DiffusionDB can potentially contain these images. To mitigate the potential harm, we provide a [Google Form](https://forms.gle/GbYaSpRNYqxCafMZ9) on the [DiffusionDB website](https://poloclub.github.io/diffusiondb/) where users can report harmful or inappropriate images and prompts. We will closely monitor this form and remove reported images and prompts from DiffusionDB.
368
+
369
+ ### Discussion of Biases
370
+
371
+ The 14 million images in DiffusionDB have diverse styles and categories. However, Discord can be a biased data source. Our images come from channels where early users could use a bot to use Stable Diffusion before release. As these users had started using Stable Diffusion before the model was public, we hypothesize that they are AI art enthusiasts and are likely to have experience with other text-to-image generative models. Therefore, the prompting style in DiffusionDB might not represent novice users. Similarly, the prompts in DiffusionDB might not generalize to domains that require specific knowledge, such as medical images.
372
+
373
+ ### Other Known Limitations
374
+
375
+ **Generalizability.** Previous research has shown a prompt that works well on one generative model might not give the optimal result when used in other models.
376
+ Therefore, different models can need users to write different prompts. For example, many Stable Diffusion prompts use commas to separate keywords, while this pattern is less seen in prompts for DALL-E 2 or Midjourney. Thus, we caution researchers that some research findings from DiffusionDB might not be generalizable to other text-to-image generative models.
377
+
378
+ ## Additional Information
379
+
380
+ ### Dataset Curators
381
+
382
+ DiffusionDB is created by [Jay Wang](https://zijie.wang), [Evan Montoya](https://www.linkedin.com/in/evan-montoya-b252391b4/), [David Munechika](https://www.linkedin.com/in/dmunechika/), [Alex Yang](https://alexanderyang.me), [Ben Hoover](https://www.bhoov.com), [Polo Chau](https://faculty.cc.gatech.edu/~dchau/).
383
+
384
+
385
+ ### Licensing Information
386
+
387
+ The DiffusionDB dataset is available under the [CC0 1.0 License](https://creativecommons.org/publicdomain/zero/1.0/).
388
+ The Python code in this repository is available under the [MIT License](https://github.com/poloclub/diffusiondb/blob/main/LICENSE).
389
+
390
+ ### Citation Information
391
+
392
+ ```bibtex
393
+ @article{wangDiffusionDBLargescalePrompt2022,
394
+ title = {{{DiffusionDB}}: {{A}} Large-Scale Prompt Gallery Dataset for Text-to-Image Generative Models},
395
+ author = {Wang, Zijie J. and Montoya, Evan and Munechika, David and Yang, Haoyang and Hoover, Benjamin and Chau, Duen Horng},
396
+ year = {2022},
397
+ journal = {arXiv:2210.14896 [cs]},
398
+ url = {https://arxiv.org/abs/2210.14896}
399
+ }
400
+ ```
401
+
402
+ ### Contributions
403
+
404
+ If you have any questions, feel free to [open an issue](https://github.com/poloclub/diffusiondb/issues/new) or contact [Jay Wang](https://zijie.wang).
diffusiondb-large-part-1/part-000001.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a938ffb16d7d4ccecf768445f9860df31c0f56e54511bb790685afc4fe3357f2
3
+ size 527750267
diffusiondb-large-part-1/part-000002.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1f771aa0d64356458bb98f2f3432ac6114286eb1a0573a56edfdebb20892bbb
3
+ size 511878673
diffusiondb-large-part-1/part-000003.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86d71d4dd5bdf9dacf86baf02cbfe1d04d7d0d981230d289313b2904995031c9
3
+ size 524201255
diffusiondb-large-part-1/part-000004.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f69a1a138992fd7aff69a1104c2ad70561573d8c9abb0f029ee47c0caccf32c7
3
+ size 450824491
diffusiondb-large-part-1/part-000005.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddad7f4b0f85aa613a2cd1c71e87fb191fc4227f36a2d0727b003a9ec55ef8d7
3
+ size 457805663
diffusiondb-large-part-1/part-000006.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:136c29307ec5f7bef0a34f19554b0b2039c23d047534726e71c429b9ddb0ef36
3
+ size 424401911
diffusiondb-large-part-1/part-000007.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3d83704817370b9c7cbe5280848cc82b6a2f1b89f9809fefad39e2b9bc5cbe6
3
+ size 454401098
diffusiondb-large-part-1/part-000008.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94ab15280682c593c3d8fadc0849065200c11b63c3d6da4855f8d905922ab720
3
+ size 529703005
diffusiondb-large-part-1/part-000009.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:423b4a8c86e674aa10118017f0bc4eaf429a678060aa146f5c813a1eef7c1773
3
+ size 518939067
diffusiondb-large-part-1/part-000010.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35b64ab610bde594b3802d60046ff6f3704683fe64cd13dcad6802894aaf2312
3
+ size 459184861
diffusiondb-large-part-1/part-000011.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caa5887319dc8e4b60a22290b1a0e4cb85234e0d4828f71c24b51c4add51275b
3
+ size 477347514
diffusiondb-large-part-1/part-000012.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f365d94781c10c6b1084c1e9a5211c6f9c1849f8bb48e7ab676cf357306cfa3c
3
+ size 475684864
diffusiondb-large-part-1/part-000013.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7c70161395ba325f7b4fea4306bf7404ab2be45e8a20aa51c137965163ec16f
3
+ size 386796215
diffusiondb-large-part-1/part-000014.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab2886b5914d7226b0286a87c44e4250821f2dd6393a2cc740517b578d62ba9f
3
+ size 400528244
diffusiondb-large-part-1/part-000015.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37f291d836c8192ef14d6e95452201524ee772e8eeb0f9c0685334715ff0f693
3
+ size 441858847
diffusiondb-large-part-1/part-000016.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d71e3f149bac58748030cc630d8bbdaa9a7da67f558f486e094388c5042224f
3
+ size 416800141
diffusiondb-large-part-1/part-000017.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e6247fa53081454c6a01414cf891f49ec82bf8c30ca8702ca4a8b6ca434b274
3
+ size 432916349
diffusiondb-large-part-1/part-000018.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bceae68e24bf24a647574014612955d9fc83ac7b66dadda5597ee6ea97c30490
3
+ size 490023663
diffusiondb-large-part-1/part-000019.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c2c9d3ee1a8c52877e4653c0851bc80a93eea0aa2bf83748090da2941cfddc7
3
+ size 456780169
diffusiondb-large-part-1/part-000020.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b465a99320488b46ac6b776a87df2c0347b5db7c1ed445894f51e81c0b8f0f06
3
+ size 429276908
diffusiondb-large-part-1/part-000021.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1fe4eec07208b82ef15c6901b4831de524787d8fc14eeecc9917c1a298bb303
3
+ size 445545490
diffusiondb-large-part-1/part-000022.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac54d964a4cd0084fc2509d5f1d38101419ca8310b07214ded8aa52ae4a3fdc4
3
+ size 416667808
diffusiondb-large-part-1/part-000023.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbf95fc75393de2d0a30754286a96319a1cfd330d1d00cbcfa77e19b7c9d39ff
3
+ size 483763394
diffusiondb-large-part-1/part-000024.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90b87152e91642fce10e1e53d62871462bfec5e8894744d7fc7b3156456d9259
3
+ size 444930184
diffusiondb-large-part-1/part-000025.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2452fb70f12d927d67e544f9e1530064580b612bfedb4a95d134fe8e4fd30292
3
+ size 473853867
diffusiondb-large-part-1/part-000026.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a7877f2eec0991672a244a07739db3cf36be5e12dc8972be77484ce18270c07
3
+ size 429063194
diffusiondb-large-part-1/part-000027.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3cbcb15977571b7a1ec315dd3d20062928e01c4247c9f7ec5b2fb9c178c8367
3
+ size 471210385
diffusiondb-large-part-1/part-000028.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05a83829c054cd0941c07e158b2c43ead219fc6a1251d342454e1db71b7b2545
3
+ size 431512528
diffusiondb-large-part-1/part-000029.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16855434af18767a52d088116eb994dda7f12d5d45be0b285fb9635aea29dbc2
3
+ size 411115194
diffusiondb-large-part-1/part-000030.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5006e40d4c068bc37d9fff8b6365889b931e49551944c624ef2fda9b7b1d5791
3
+ size 428026739
diffusiondb-large-part-1/part-000031.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c67f8d479781a71d68be4e4f0b42bfef4ce842d0d3680bd303ef2a409ec11aea
3
+ size 442314562
diffusiondb-large-part-1/part-000032.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a67415606cb86a25b82309e7d6c32f15aef987b088ad32f3358c2d006333d21
3
+ size 525996470
diffusiondb-large-part-1/part-000033.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba7e1ee78613e4dac9a5dd47f163c616d48e67a913b461c994c09fb52784e500
3
+ size 447843876
diffusiondb-large-part-1/part-000034.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32ee7431a2101215c56211fd1c5db81b1e97b2808f94e56fe60da4bcb90b253e
3
+ size 441726707
diffusiondb-large-part-1/part-000035.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7de1ebb5d6e68334a36612fd504951b862db60d0ec5586097024ac60d8f28002
3
+ size 447710269
diffusiondb-large-part-1/part-000036.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c63daf48a9954c4fe7b76b622516bb4fb4b05fe60481583ab380fd72f51e5c3
3
+ size 496958176
diffusiondb-large-part-1/part-000037.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6927b5741302ff839cd6aa5f358f448e09e5109068a00da1473d7cbb00386cf
3
+ size 419824663
diffusiondb-large-part-1/part-000038.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:857018e5bc066d70495b27e31a4d29ffb71d74f0e9a3379abe9f6c7b1e40ac76
3
+ size 462915210
diffusiondb-large-part-1/part-000039.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9a953b40cce4220366c1f7c8f64e97ba5cd0b44a416fe19f73604bcd4a33124
3
+ size 497688119
diffusiondb-large-part-1/part-000040.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e187ea729927c199cc89df943871bcdfe48cfd2f3b8ca989462a7f60c84692a
3
+ size 489732610
diffusiondb-large-part-1/part-000041.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f650d991b73806b46275aad8624d24df9e6a0d060b7f3e98ef915de3ac2778f5
3
+ size 493727265
diffusiondb-large-part-1/part-000042.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:042b87f16c3dce669ac4f7b4be5c1e216d01124940868a0f77a1f34b18781b64
3
+ size 461387388
diffusiondb-large-part-1/part-000043.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cee2c2f0836d7890a4ef39af819de76de61c51632895649d3fe327950ce94d4f
3
+ size 445859284
diffusiondb-large-part-1/part-000044.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ea993c20eb87356e6156ef0c751429eef37de1eecaab8020032ad8fb292628d
3
+ size 476220658
diffusiondb-large-part-1/part-000045.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e6e5bed87bc3fb3437e42ba4b7b2861e3b80d1c8fc0a45f566981d363c9220d
3
+ size 471755551
diffusiondb-large-part-1/part-000046.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8575a79dd290dc646a09e16f0bbaa0972b6159d98df851a0b64a6d17ca932c23
3
+ size 482678871
diffusiondb-large-part-1/part-000047.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21d0e79521d5c98ab04e409d95be7c84fea32d7f131a6852a0d6fe343f1259e2
3
+ size 526700668