File size: 1,235 Bytes
e56a4ba
d2eea34
e56a4ba
 
 
 
d2eea34
 
 
 
 
 
 
 
 
 
 
 
 
e56a4ba
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
{
  "model_type": "gpt2",  // Change to the appropriate model type
  "model_name": "RabbitRedux",
  "version": "1.0",
  "author": "Canstralian",
  "description": "This configuration file defines the model settings for RabbitRedux.",
  "vocab_size": 50257,   // Adjust to your model's vocabulary size
  "hidden_size": 768,    // Adjust according to your architecture
  "num_hidden_layers": 12,// Adjust according to your architecture
  "num_attention_heads": 12, // Adjust according to your architecture
  "intermediate_size": 3072, // Adjust according to your architecture
  "hidden_act": "gelu",  // Activation function used in the model
  "layer_norm_epsilon": 1e-5, // Epsilon for layer normalization
  "initializer_range": 0.02,   // Weight initialization range
  "dropout": 0.1,        // Dropout rate for layers
  "attention_probs_dropout_prob": 0.1, // Dropout for attention probabilities
  "pad_token_id": 50256, // Token ID for padding
  "eos_token_id": 50256, // Token ID for end-of-sequence
  "bos_token_id": 50256, // Token ID for beginning-of-sequence
  "training_params": {
    "batch_size": 32,
    "learning_rate": 0.001,
    "epochs": 10
  },
  "inference_params": {
    "max_length": 128,
    "temperature": 0.7
  }
}