andreuka18 commited on
Commit
097bbb2
·
verified ·
1 Parent(s): 244b3a1

Upload SAE model.layers.12

Browse files
model.layers.12/cfg.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_name": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "model_class_name": "AutoModelForCausalLM", "hook_name": "model.layers.12", "hook_eval": "NOT_IN_USE", "hook_layer": 12, "hook_head_index": null, "dataset_path": "andreuka18/DeepSeek-R1-Distill-Llama-8B-lmsys-openthoughts-tokenized", "dataset_trust_remote_code": true, "streaming": false, "is_dataset_tokenized": true, "context_size": 1024, "use_cached_activations": false, "cached_activations_path": null, "architecture": "standard", "d_in": 4096, "d_sae": 65536, "b_enc_init_method": null, "b_dec_init_method": "geometric_median", "expansion_factor": 16, "activation_fn": "relu", "activation_fn_kwargs": {}, "normalize_sae_decoder": false, "noise_scale": 0.0, "from_pretrained_path": null, "apply_b_dec_to_input": false, "decoder_orthogonal_init": false, "decoder_heuristic_init": true, "decoder_constant_norm": 0.1, "init_encoder_as_decoder_transpose": true, "scale_encoder": false, "n_batches_in_buffer": 32, "training_tokens": 1024000000, "finetuning_tokens": 0, "store_batch_size_prompts": 8, "normalize_activations": "expected_average_only_in", "seqpos_slice": [null], "device": "cuda", "act_store_device": "cuda", "seed": 42, "dtype": "float32", "prepend_bos": false, "jumprelu_init_threshold": 0.001, "jumprelu_bandwidth": 0.001, "autocast": false, "autocast_lm": false, "compile_llm": false, "llm_compilation_mode": null, "compile_sae": false, "sae_compilation_mode": null, "train_batch_size_tokens": 4096, "adam_beta1": 0.9, "adam_beta2": 0.999, "mse_loss_normalization": null, "l1_coefficient": 5, "lp_norm": 1.0, "scale_sparsity_penalty_by_decoder_norm": true, "l1_warm_up_steps": 12500, "lr": 5e-05, "lr_scheduler_name": "constant", "lr_warm_up_steps": 0, "lr_end": 5e-06, "lr_decay_steps": 50000, "n_restart_cycles": 1, "finetuning_method": null, "use_ghost_grads": false, "feature_sampling_window": 1000, "dead_feature_window": 1000, "dead_feature_threshold": 0.0001, "n_eval_batches": 10, "eval_batch_size_prompts": null, "log_to_wandb": true, "log_activations_store_to_wandb": false, "log_optimizer_state_to_wandb": false, "wandb_project": "sae-lmsys-openthoughts", "wandb_id": null, "run_name": "r1-distill-llama-8b_ctx-1024_l1-5_l-12", "wandb_entity": null, "wandb_log_frequency": 30, "eval_every_n_wandb_logs": 20, "resume": false, "n_checkpoints": 5, "checkpoint_path": "sae_runs/u9g713l9", "verbose": true, "model_kwargs": {}, "model_from_pretrained_kwargs": {"torch_dtype": "bfloat16"}, "sae_lens_version": "5.5.2", "sae_lens_training_version": "5.5.2", "exclude_special_tokens": [128000, 128001], "tokens_per_buffer": 134217728}
model.layers.12/sae_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:917a0c32d98ef5d74f1f9c6227a73097dc39ab834d036e10b2a600551e1ee3f1
3
+ size 2147762504
model.layers.12/sparsity.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7b823fb3343197321cc3370a5ed002289247a9901c3322e86d59a27603ccf24
3
+ size 262224