interior-defective specialist model

Instruct following

from mistral_inference.transformer import Transformer
from mistral_inference.generate import generate

from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
from mistral_common.protocol.instruct.messages import UserMessage
from mistral_common.protocol.instruct.request import ChatCompletionRequest

tokenizer = MistralTokenizer.from_file(f"{mistral_models_path}/tekken.json")
model = Transformer.from_folder(mistral_models_path)

prompt = "๋ฒฝ์ง€ํ•˜์ž๋Š” ์ข…๋ฅ˜๊ฐ€ ๋งŽ์Šต๋‹ˆ๋‹ค. ํ•ด๊ฒฐ๋ฐฉ์•ˆ์— ๋Œ€ํ•ด ์•Œ๋ ค์ฃผ์„ธ์š”."

completion_request = ChatCompletionRequest(messages=[UserMessage(content=prompt)])

tokens = tokenizer.encode_chat_completion(completion_request).tokens

out_tokens, _ = generate([tokens], model, max_tokens=64, temperature=0.35, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
result = tokenizer.decode(out_tokens[0])

print(result)
Downloads last month
29
Safetensors
Model size
12.2B params
Tensor type
F32
ยท
F16
ยท
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support