metascroy's picture
Update README.md
c2a8bb5 verified
metadata
{}
model: opt-125m
config: IntxWeightOnlyConfig
config version: 1
torchao version: 0.14.dev
import logging

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TorchAoConfig
from huggingface_hub import HfApi
import io

# Configure logging to see warnings and debug information
logging.basicConfig(
    level=logging.INFO, format="%(name)s - %(levelname)s - %(message)s"
)

# Enable specific loggers that might contain the serialization warnings
logging.getLogger("transformers").setLevel(logging.INFO)
logging.getLogger("torchao").setLevel(logging.INFO)
logging.getLogger("safetensors").setLevel(logging.INFO)
logging.getLogger("huggingface_hub").setLevel(logging.INFO)

model_id = "facebook/opt-125m"

from torchao.quantization import IntxWeightOnlyConfig
from torchao.quantization.granularity import PerGroup

version = 1
quant_config = IntxWeightOnlyConfig(
    weight_dtype=torch.int4,
    granularity=PerGroup(32),
    version=version
)
quantization_config = TorchAoConfig(quant_type=quant_config)
quantized_model = AutoModelForCausalLM.from_pretrained(
    model_id,
    device_map="auto",
    torch_dtype=torch.bfloat16,
    quantization_config=quantization_config,
)
tokenizer = AutoTokenizer.from_pretrained(model_id)

# Push to hub
MODEL_NAME = model_id.split("/")[-1]
save_to = f"torchao-testing/{MODEL_NAME}-IntxWeightOnlyConfig-v{version}-0.14.0.dev"
quantized_model.push_to_hub(save_to, safe_serialization=False)
tokenizer.push_to_hub(save_to)


# Manual Testing
prompt = "Hey, are you conscious? Can you talk to me?"
print("Prompt:", prompt)
inputs = tokenizer(
    prompt,
    return_tensors="pt",
).to("cuda")

# setting temperature to 0 to make sure result deterministic
generated_ids = quantized_model.generate(**inputs, max_new_tokens=128, temperature=0)

api = HfApi()
buf = io.BytesIO()
torch.save(prompt, buf)
api.upload_file(
    path_or_fileobj=buf,
    path_in_repo="model_prompt.pt",
    repo_id=save_to,
)

buf = io.BytesIO()
torch.save(generated_ids, buf)
api.upload_file(
    path_or_fileobj=buf,
    path_in_repo="model_output.pt",
    repo_id=save_to,
)

output_text = tokenizer.batch_decode(
    generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print("Response:", output_text[0][len(prompt) :])