Upload SAE l12a_8x
Browse files- l12a_8x/cfg.json +1 -0
- l12a_8x/sae_weights.safetensors +3 -0
l12a_8x/cfg.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"architecture": "jumprelu", "d_in": 4096, "d_sae": 32768, "dtype": "bfloat16", "device": "cuda:0", "model_name": "meta-llama/Llama-3.1-8B", "hook_name": "blocks.12.hook_attn_out", "hook_layer": 12, "hook_head_index": null, "activation_fn_str": "relu", "activation_fn_kwargs": {}, "apply_b_dec_to_input": false, "finetuning_scaling_factor": false, "sae_lens_training_version": "5.6.1", "prepend_bos": true, "dataset_path": "cerebras/SlimPajama-627B", "dataset_trust_remote_code": true, "context_size": 1024, "normalize_activations": "none", "neuronpedia_id": null, "model_from_pretrained_kwargs": {"center_writing_weights": false}, "seqpos_slice": [null], "l1_coefficient": 0.001, "lp_norm": 1, "use_ghost_grads": false, "normalize_sae_decoder": true, "noise_scale": 0.0, "decoder_orthogonal_init": false, "init_encoder_as_decoder_transpose": false, "mse_loss_normalization": null, "decoder_heuristic_init": false, "scale_sparsity_penalty_by_decoder_norm": false, "jumprelu_init_threshold": 0.00946044921875, "jumprelu_bandwidth": 0.001}
|
l12a_8x/sae_weights.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:099203c255ccda0962d0f5da32d5df43967415b6f1a6085bb43e37b9b6865adb
|
3 |
+
size 537010584
|