{ "architectures": [ "CustomStudentModel" ], "hidden_size": 256, "num_attention_heads": 4, "num_decoder_layers": 4, "num_encoder_layers": 4, "torch_dtype": "float32", "transformers_version": "4.47.0", "vocab_size": 250054 }