zerostratos commited on
Commit
cc787a9
·
verified ·
1 Parent(s): af79d87

Upload Qwen3ForCausalLM

Browse files
Files changed (3) hide show
  1. config.json +33 -3
  2. generation_config.json +1 -1
  3. model.safetensors +3 -0
config.json CHANGED
@@ -8,9 +8,39 @@
8
  "eos_token_id": 151645,
9
  "head_dim": 128,
10
  "hidden_act": "silu",
11
- "hidden_size": 2048,
12
  "initializer_range": 0.02,
13
- "intermediate_size": 6144,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  "max_position_embeddings": 40960,
15
  "max_window_layers": 28,
16
  "model_type": "qwen3",
@@ -23,7 +53,7 @@
23
  "sliding_window": null,
24
  "tie_word_embeddings": true,
25
  "torch_dtype": "float32",
26
- "transformers_version": "4.52.4",
27
  "use_cache": true,
28
  "use_sliding_window": false,
29
  "vocab_size": 151936
 
8
  "eos_token_id": 151645,
9
  "head_dim": 128,
10
  "hidden_act": "silu",
11
+ "hidden_size": 1024,
12
  "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_types": [
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention"
43
+ ],
44
  "max_position_embeddings": 40960,
45
  "max_window_layers": 28,
46
  "model_type": "qwen3",
 
53
  "sliding_window": null,
54
  "tie_word_embeddings": true,
55
  "torch_dtype": "float32",
56
+ "transformers_version": "4.53.0",
57
  "use_cache": true,
58
  "use_sliding_window": false,
59
  "vocab_size": 151936
generation_config.json CHANGED
@@ -9,5 +9,5 @@
9
  "temperature": 0.6,
10
  "top_k": 20,
11
  "top_p": 0.95,
12
- "transformers_version": "4.52.4"
13
  }
 
9
  "temperature": 0.6,
10
  "top_k": 20,
11
  "top_p": 0.95,
12
+ "transformers_version": "4.53.0"
13
  }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0013beb29261fb78ef2f1932eece31c07e1376794000cb11119d923330bcef1a
3
+ size 2384234968