saddam213 commited on
Commit
f380096
·
verified ·
1 Parent(s): bed9ea4

Upload 22 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,11 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Sample.png filter=lfs diff=lfs merge=lfs -text
37
+ Sample2.png filter=lfs diff=lfs merge=lfs -text
38
+ Sample3.png filter=lfs diff=lfs merge=lfs -text
39
+ Sample4.png filter=lfs diff=lfs merge=lfs -text
40
+ text_encoder_2/model.onnx.data filter=lfs diff=lfs merge=lfs -text
41
+ transformer/model.onnx.data filter=lfs diff=lfs merge=lfs -text
42
+ vae_decoder/model.onnx.data filter=lfs diff=lfs merge=lfs -text
43
+ vae_encoder/model.onnx.data filter=lfs diff=lfs merge=lfs -text
Icon.png ADDED
README.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: text-to-image
3
+ ---
4
+ # FLUX.1-schnell - Onnx Olive DirectML Optimized
5
+
6
+ ## Original Model
7
+ https://huggingface.co/black-forest-labs/FLUX.1-schnell
8
+
9
+
10
+ ## C# Inference Demo
11
+ https://github.com/TensorStack-AI/OnnxStack
12
+
13
+ ```csharp
14
+ // Create Pipeline
15
+ var pipeline = FluxPipeline.CreatePipeline("D:\\Models\\Flux_schnell-f16-onnx");
16
+
17
+ // Prompt
18
+ var promptOptions = new PromptOptions
19
+ {
20
+ Prompt = "A cow in a suit holding a sign that says 'OnnxStack Flux', intricate design and details, hyperrealism, photorealistic, cinematic."
21
+ };
22
+
23
+ // Scheduler Options
24
+ var schedulerOptions = pipeline.DefaultSchedulerOptions with
25
+ {
26
+ GuidanceScale = 0f,
27
+ InferenceSteps = 4,
28
+ SchedulerType = SchedulerType.FlowMatchEulerDiscrete,
29
+ };
30
+
31
+ // Run pipeline
32
+ var result = await pipeline.GenerateImageAsync(promptOptions, schedulerOptions);
33
+
34
+ // Save Image Result
35
+ await result.SaveAsync("Result.png");
36
+ ```
37
+ ## Inference Result
38
+ ![Intro Image](Sample.png)
Sample.png ADDED

Git LFS Details

  • SHA256: 0f0c400aa9c9398799f70148d92eeea6a1d4909dd37230d2d2f8840c92dce987
  • Pointer size: 132 Bytes
  • Size of remote file: 2.57 MB
Sample2.png ADDED

Git LFS Details

  • SHA256: 49d8900ce43e7d4c4ce224169f09a3503e6c179500513d745ad6eeac469fe5a3
  • Pointer size: 132 Bytes
  • Size of remote file: 2.18 MB
Sample3.png ADDED

Git LFS Details

  • SHA256: b6a0dbf2da7f781744a43e3efbbe86b4cb8ace1bae07104107091bb7471defab
  • Pointer size: 132 Bytes
  • Size of remote file: 1.65 MB
Sample4.png ADDED

Git LFS Details

  • SHA256: bc03e49fe00f56a8d07197490ea10bb0927f2f1f5927bfd551777efb3ba6dc64
  • Pointer size: 132 Bytes
  • Size of remote file: 1.82 MB
amuse_template.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Id": "2C5CEBD1-2354-4042-BE7A-65F88DB31828",
3
+ "FileVersion": "1",
4
+ "Created": "2025-03-07T00:00:00",
5
+ "IsProtected": false,
6
+ "Name": "FLUX.1-Schnell",
7
+ "ImageIcon": "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/Icon.png",
8
+ "Author": "BlackForestLabs",
9
+ "Description": "FLUX.1 [schnell] is a 12-billion parameter rectified flow transformer designed to generate detailed and high-quality images directly from text descriptions. This model offers impressive capabilities in transforming prompts into vivid and accurate visual representations.",
10
+ "Rank": 200,
11
+ "Group": "Online",
12
+ "Template": "FluxSchnell",
13
+ "Category": "StableDiffusion",
14
+ "StableDiffusionTemplate": {
15
+ "PipelineType": "Flux",
16
+ "ModelType": "Base",
17
+ "SampleSize": 1024,
18
+ "TokenizerLength": 768,
19
+ "Tokenizer2Limit": 256,
20
+ "Optimization": "None",
21
+ "DiffuserTypes": [
22
+ "TextToImage",
23
+ "ImageToImage"
24
+ ],
25
+ "SchedulerDefaults": {
26
+ "SchedulerType": "FlowMatchEulerDiscrete",
27
+ "Steps": 4,
28
+ "StepsMin": 1,
29
+ "StepsMax": 50,
30
+ "Guidance": 0,
31
+ "GuidanceMin": 0,
32
+ "GuidanceMax": 0,
33
+ "TimestepSpacing": "Linspace",
34
+ "BetaSchedule": "ScaledLinear",
35
+ "BetaStart": 0.00085,
36
+ "BetaEnd": 0.011
37
+ }
38
+ },
39
+ "MemoryMin": 24,
40
+ "MemoryMax": 44,
41
+ "DownloadSize": 34,
42
+ "Website": "https://blackforestlabs.ai",
43
+ "Licence": "https://github.com/black-forest-labs/flux/blob/main/model_licenses/LICENSE-FLUX1-schnell",
44
+ "LicenceType": "Commercial",
45
+ "IsLicenceAccepted": false,
46
+ "Repository": "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse",
47
+ "RepositoryFiles": [
48
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/text_encoder/model.onnx",
49
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/text_encoder_2/model.onnx",
50
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/text_encoder_2/model.onnx.data",
51
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/tokenizer/merges.txt",
52
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/tokenizer/special_tokens_map.json",
53
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/tokenizer/vocab.json",
54
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/tokenizer_2/special_tokens_map.json",
55
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/tokenizer_2/spiece.model",
56
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/tokenizer_2/tokenizer.json",
57
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/transformer/model.onnx",
58
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/transformer/model.onnx.data",
59
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/vae_decoder/model.onnx",
60
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/vae_decoder/model.onnx.data",
61
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/vae_encoder/model.onnx",
62
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/vae_encoder/model.onnx.data",
63
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/aumes_template.json",
64
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/README.md"
65
+ ],
66
+ "PreviewImages": [
67
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/Sample.png",
68
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/Sample2.png",
69
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/Sample3.png",
70
+ "https://huggingface.co/TensorStack/FLUX.1-Schnell-amuse/resolve/main/Sample4.png"
71
+ ],
72
+ "Tags": []
73
+ }
text_encoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d08ac3785c984816486db824e3620804648e50536771a5c5f21945c6aaa21285
3
+ size 246372136
text_encoder_2/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d9ac8f4de28b98b1d97321261fd0ea6ca8f3c975b499d89e5455492f2ebd8d8
3
+ size 488267
text_encoder_2/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:523a4913b0170e5ac5994a557f65d7fadc893a0392b1d6ecaccc02451b08aa73
3
+ size 11537887232
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": {
105
+ "content": "</s>",
106
+ "lstrip": false,
107
+ "normalized": false,
108
+ "rstrip": false,
109
+ "single_word": false
110
+ },
111
+ "pad_token": {
112
+ "content": "<pad>",
113
+ "lstrip": false,
114
+ "normalized": false,
115
+ "rstrip": false,
116
+ "single_word": false
117
+ },
118
+ "unk_token": {
119
+ "content": "<unk>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false
124
+ }
125
+ }
tokenizer_2/spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
3
+ size 791656
tokenizer_2/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
transformer/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6330d6488bd6ccf5dc7443a0c30ef559d6dd9af93acdfb1df6604c4799a7dca
3
+ size 4876083
transformer/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0cfc6c6e979aabfce5cadf23691808f46bfc9967d2a661e2894457e285d8c67
3
+ size 23782318080
vae_decoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d9921d5ceab77f8ce59a7b077c71dba9652722e36610c0ccd4aa817796e26b5
3
+ size 134332
vae_decoder/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7851af2be3f720d2e383974f0421a9a0ea7d5c6863cb23c1f001d5847d31c40
3
+ size 198172160
vae_encoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9049b13e7fad9cea48da83d9425cf68389431c57b6ec97b9fe33caabd58f27a7
3
+ size 119375
vae_encoder/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c64a1d631cba8cc459b8c35b172a308e3de308f921091b1d05982acbec8c6db
3
+ size 137088512