Haiyang-W commited on
Commit
27e5f68
1 Parent(s): 91ae9d9

Upload config.json

Browse files
Files changed (1) hide show
  1. config.json +32 -0
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "TokenFormerForCausalLM"
4
+ ],
5
+ "num_layers": 24,
6
+ "hidden_size": 1024,
7
+ "num_attention_heads": 16,
8
+ "qkv_slot_num": 1024,
9
+ "proj_slot_num": 1024,
10
+ "ffn_slot_num": 4096,
11
+ "seq_length": 2048,
12
+ "max_position_embeddings": 2048,
13
+ "pos_emb": "rotary",
14
+ "rotary_pct": 0.25,
15
+ "no_weight_tying": false,
16
+ "norm": "layernorm_nonparam",
17
+ "final_norm": "layernorm",
18
+ "gpt_j_residual": false,
19
+ "output_layer_parallelism": "column",
20
+ "use_bias_in_attn_linear": false,
21
+ "attention_config": [[["tokenformer"], 24]],
22
+ "norm_activation_type": "l2_norm_gelu",
23
+ "scaled_upper_triang_masked_softmax_fusion": false,
24
+ "bias_gelu_fusion": false,
25
+ "rope_fusion": false,
26
+ "layernorm_fusion": false,
27
+ "init_method": "normal",
28
+ "output_layer_init_method": "wang_init",
29
+ "use_cache": true,
30
+ "torch_dtype": "float16",
31
+ "transformers_version": "4.36.0"
32
+ }