|
|
|
|
|
""" |
|
|
Comprehensive analysis of the GPT-OSS-120B model |
|
|
""" |
|
|
|
|
|
from transformers import AutoTokenizer, AutoConfig |
|
|
import json |
|
|
import logging |
|
|
from pathlib import Path |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
def analyze_model(): |
|
|
"""Comprehensive model analysis""" |
|
|
model_path = "./my_model" |
|
|
|
|
|
logger.info("=" * 60) |
|
|
logger.info("🔍 GPT-OSS-120B-MXFP4-Q4 Comprehensive Analysis") |
|
|
logger.info("=" * 60) |
|
|
|
|
|
|
|
|
config = AutoConfig.from_pretrained(model_path) |
|
|
logger.info("📊 Model Configuration:") |
|
|
logger.info(f" Architecture: {config.architectures[0]}") |
|
|
logger.info(f" Model type: {config.model_type}") |
|
|
logger.info(f" Vocab size: {config.vocab_size:,}") |
|
|
logger.info(f" Hidden size: {config.hidden_size}") |
|
|
logger.info(f" Num hidden layers: {config.num_hidden_layers}") |
|
|
logger.info(f" Num attention heads: {config.num_attention_heads}") |
|
|
logger.info(f" Max position embeddings: {config.max_position_embeddings}") |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_path) |
|
|
logger.info("\n🔤 Tokenizer Analysis:") |
|
|
logger.info(f" Vocabulary size: {tokenizer.vocab_size:,}") |
|
|
logger.info(f" Special tokens: {len(tokenizer.special_tokens_map)}") |
|
|
logger.info(f" Padding token: {tokenizer.pad_token}") |
|
|
logger.info(f" EOS token: {tokenizer.eos_token}") |
|
|
|
|
|
|
|
|
test_prompts = [ |
|
|
"The capital of France is", |
|
|
"Artificial intelligence is", |
|
|
"The future of machine learning will", |
|
|
"Once upon a time", |
|
|
"import numpy as np", |
|
|
"量子コンピューティングとは", |
|
|
"El aprendizaje automático es", |
|
|
"机器学习是", |
|
|
"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nHello!<|im_end|>\n<|im_start|>assistant" |
|
|
] |
|
|
|
|
|
logger.info("\n🧪 Tokenization Examples:") |
|
|
for prompt in test_prompts: |
|
|
tokens = tokenizer.encode(prompt) |
|
|
decoded = tokenizer.decode(tokens[:10]) + ("..." if len(tokens) > 10 else "") |
|
|
logger.info(f" '{prompt[:30]}{'...' if len(prompt) > 30 else ''}'") |
|
|
logger.info(f" → {len(tokens)} tokens: {tokens[:10]}{'...' if len(tokens) > 10 else ''}") |
|
|
logger.info(f" → decoded: {decoded}") |
|
|
|
|
|
|
|
|
model_files = list(Path(model_path).glob("*.safetensors")) |
|
|
logger.info(f"\n📦 Model Files: {len(model_files)} safetensors files") |
|
|
|
|
|
|
|
|
total_params = 120_000_000_000 |
|
|
param_size = 0.5 |
|
|
total_memory_gb = (total_params * param_size) / (1024 ** 3) |
|
|
|
|
|
logger.info("\n💾 Memory Requirements (Estimated):") |
|
|
logger.info(f" Model size (4-bit): ~{total_memory_gb:.1f} GB") |
|
|
logger.info(f" Inference RAM: ~{total_memory_gb * 1.5:.1f} GB+") |
|
|
logger.info(f" GPU VRAM: ~{total_memory_gb:.1f} GB+ (recommended)") |
|
|
|
|
|
return config, tokenizer |
|
|
|
|
|
if __name__ == "__main__": |
|
|
analyze_model() |