Uploaded model
- Developed by: Dai-Osaka
- License: apache-2.0
- Finetuned from model : llm-jp/llm-jp-3-13b
This llama model was trained 2x faster with Unsloth and Huggingface's TRL library.
HOW TO INFERENCE
以下は、ELYZA-tasks-100-TVの回答を得るためのコードです。
from transformers import ( AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, ) import torch from tqdm import tqdm import json
HF_TOKEN = "your-key"
model_name = "Dai-Osaka/llm-jp-3-13b-it"
bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=False, )
model = AutoModelForCausalLM.from_pretrained( model_name, quantization_config=bnb_config, device_map="auto", token = HF_TOKEN )
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, token = HF_TOKEN)
datasets = [] with open("./elyza-tasks-100-TV_0.jsonl", "r") as f: item = "" for line in f: line = line.strip() item += line if item.endswith("}"): datasets.append(json.loads(item)) item = ""
results = [] for data in tqdm(datasets):
input = data["input"]
prompt = f"""### 指示 {input}
回答:
"""
tokenized_input = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model.generate( tokenized_input, max_new_tokens=100, do_sample=False, repetition_penalty=1.2 )[0] output = tokenizer.decode(outputs[tokenized_input.size(1):], skip_special_tokens=True)
results.append({"task_id": data["task_id"], "input": input, "output": output})
Model tree for Dai-Osaka/llm-jp-3-13b-it
Base model
llm-jp/llm-jp-3-13b