name: tinyllama 1B | |
model: tinyllama:1B | |
version: 1 | |
# Results Preferences | |
stop: | |
- </s> | |
top_p: 0.95 | |
temperature: 0.7 | |
frequency_penalty: 0 | |
presence_penalty: 0 | |
max_tokens: 4096 # Infer from base config.json -> max_position_embeddings | |
stream: true # true | false | |
# Engine / Model Settings | |
ngl: 33 # Infer from base config.json -> num_attention_heads | |
ctx_len: 4096 # Infer from base config.json -> max_position_embeddings | |
engine: llama-cpp | |
prompt_template: "\n\n<|system|>\n{system_message}</s>\n\n\n\n\n<|user|>\n{prompt}</s>\n\n\n<|assistant|>\n\n" | |