news-embed-test / generate-embeddings-uv-vllm.py
yjernite's picture
yjernite HF Staff
Upload generate-embeddings-uv-vllm.py
93cb9bb verified
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "datasets",
# "huggingface-hub[hf_transfer]",
# "flashinfer-python",
# "hf-xet>= 1.1.7",
# "torch",
# "transformers",
# "vllm",
# ]
#
# ///
"""
Generate responses for prompts in a dataset using vLLM for efficient GPU inference.
This script loads a dataset from Hugging Face Hub containing chat-formatted messages,
applies the model's chat template, generates responses using vLLM, and saves the
results back to the Hub with a comprehensive dataset card.
Example usage:
# Local execution with auto GPU detection
uv run generate-responses.py \\
username/input-dataset \\
username/output-dataset \\
--messages-column messages
# With custom model and sampling parameters
uv run generate-responses.py \\
username/input-dataset \\
username/output-dataset \\
--model-id meta-llama/Llama-3.1-8B-Instruct \\
--temperature 0.9 \\
--top-p 0.95 \\
--max-tokens 2048
# HF Jobs execution (see script output for full command)
hf jobs uv run --flavor a100x4 ...
"""
import argparse
import logging
import os
import sys
from datetime import datetime
from typing import Optional
from datasets import load_dataset
from huggingface_hub import get_token, login
from torch import cuda
from tqdm.auto import tqdm
from transformers import AutoTokenizer
from vllm import LLM
from dotenv import load_dotenv
# Enable HF Transfer for faster downloads
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
def check_gpu_availability() -> int:
"""Check if CUDA is available and return the number of GPUs."""
if not cuda.is_available():
logger.error("CUDA is not available. This script requires a GPU.")
logger.error(
"Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor."
)
sys.exit(1)
num_gpus = cuda.device_count()
for i in range(num_gpus):
gpu_name = cuda.get_device_name(i)
gpu_memory = cuda.get_device_properties(i).total_memory / 1024**3
logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory")
return num_gpus
def main(
src_dataset_hub_id: str,
output_dataset_hub_id: str,
model_id: str = "Qwen/Qwen3-Embedding-0.6B",
input_column: str = "text",
output_column: str = "embeddings",
gpu_memory_utilization: float = 0.90,
input_truncation_len: Optional[int] = None,
tensor_parallel_size: Optional[int] = None,
max_samples: Optional[int] = None,
hf_token: Optional[str] = None,
):
"""
Main generation pipeline.
Args:
src_dataset_hub_id: Input dataset on Hugging Face Hub
output_dataset_hub_id: Where to save results on Hugging Face Hub
model_id: Hugging Face model ID for embedding generation
input_column: Column name containing documents to embed
output_column: Column name for generated embeddings
gpu_memory_utilization: GPU memory utilization factor
input_truncation_len: Maximum input length (None uses model default)
tensor_parallel_size: Number of GPUs to use (auto-detect if None)
max_samples: Maximum number of samples to process (None for all)
hf_token: Hugging Face authentication token
"""
generation_start_time = datetime.now().isoformat()
# GPU check and configuration
num_gpus = check_gpu_availability()
if tensor_parallel_size is None:
tensor_parallel_size = num_gpus
logger.info(
f"Auto-detected {num_gpus} GPU(s), using tensor_parallel_size={tensor_parallel_size}"
)
else:
logger.info(f"Using specified tensor_parallel_size={tensor_parallel_size}")
if tensor_parallel_size > num_gpus:
logger.warning(
f"Requested {tensor_parallel_size} GPUs but only {num_gpus} available"
)
# Authentication - try multiple methods
load_dotenv()
HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token()
if not HF_TOKEN:
logger.error("No HuggingFace token found. Please provide token via:")
logger.error(" 1. --hf-token argument")
logger.error(" 2. HF_TOKEN environment variable")
logger.error(" 3. Run 'huggingface-cli login' or use login() in Python")
sys.exit(1)
logger.info("HuggingFace token found, authenticating...")
login(token=HF_TOKEN)
# Initialize vLLM
logger.info(f"Loading model: {model_id}")
vllm_kwargs = {
"model": model_id,
"tensor_parallel_size": tensor_parallel_size,
"gpu_memory_utilization": gpu_memory_utilization,
"task": "embed",
"max_model_len": input_truncation_len + 128,
}
llm = LLM(**vllm_kwargs)
# Load tokenizer for chat template
logger.info("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_id)
# Load dataset
logger.info(f"Loading dataset: {src_dataset_hub_id}")
dataset = load_dataset(src_dataset_hub_id, split="train")
# Apply max_samples if specified
if max_samples is not None and max_samples < len(dataset):
logger.info(f"Limiting dataset to {max_samples} samples")
dataset = dataset.select(range(max_samples))
total_examples = len(dataset)
logger.info(f"Dataset loaded with {total_examples:,} examples")
# Determine which column to use and validate
if input_column not in dataset.column_names:
logger.error(
f"Column '{input_column}' not found. Available columns: {dataset.column_names}"
)
sys.exit(1)
logger.info(f"Using input column mode with column: '{input_column}'")
# Process documents and truncate if specified
logger.info("Preparing documents...")
all_documents = []
for example in tqdm(dataset, desc="Processing documents"):
document = f"# {example['title_dl']}\n\nFrom: {example['source_url']}\n\n{example[input_column]}"
# apply tokenizer to the document, then truncate using token counts
if input_truncation_len is not None:
tokens = tokenizer.encode(document)
if len(tokens) > input_truncation_len:
document = tokenizer.decode(tokens[:input_truncation_len])
all_documents.append(document) # this is a list of strings
# Generate embeddings - vLLM handles batching internally
logger.info("vLLM will handle batching and scheduling automatically")
outputs = llm.embed(all_documents)
# Extract generated embeddings and create full response list
logger.info("Extracting generated embeddings...")
embeddings = [o.outputs.embedding for o in outputs]
# Add responses to dataset
logger.info("Adding responses to dataset...")
dataset = dataset.add_column(output_column, embeddings)
# Push dataset to hub
logger.info(f"Pushing dataset to: {output_dataset_hub_id}")
dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN)
logger.info("✅ Embedding generation complete!")
logger.info(
f"Dataset available at: https://huggingface.co/datasets/{output_dataset_hub_id}"
)
if __name__ == "__main__":
if len(sys.argv) > 1:
parser = argparse.ArgumentParser(
description="Generate responses for dataset prompts using vLLM",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Basic usage with default Qwen model
uv run generate-embeddings-uv-vllm.py input-dataset output-dataset
# With custom model and parameters
uv run generate-embeddings-uv-vllm.py input-dataset output-dataset \\
--model-id Qwen/Qwen3-Embedding-0.6B \\
--input-column text \\
--output-column embeddings
# Force specific GPU configuration
uv run generate-embeddings-uv-vllm.py input-dataset output-dataset \\
--tensor-parallel-size 2 \\
--gpu-memory-utilization 0.95
# Using environment variable for token
HF_TOKEN=hf_xxx uv run generate-embeddings-uv-vllm.py input-dataset output-dataset
""",
)
parser.add_argument(
"src_dataset_hub_id",
help="Input dataset on Hugging Face Hub (e.g., username/dataset-name)",
)
parser.add_argument(
"output_dataset_hub_id", help="Output dataset name on Hugging Face Hub"
)
parser.add_argument(
"--model-id",
type=str,
default="Qwen/Qwen3-Embedding-0.6B",
help="Model to use for generation (default: Qwen3-Embedding-0.6B)",
)
parser.add_argument(
"--input-column",
type=str,
default="text",
help="Column containing text to embed (default: text)",
)
parser.add_argument(
"--output-column",
type=str,
default="embeddings",
help="Column name for generated embeddings (default: embeddings)",
)
parser.add_argument(
"--max-samples",
type=int,
help="Maximum number of samples to process (default: all)",
)
parser.add_argument(
"--input-truncation-len",
type=int,
help="Maximum input length (default: model's default)",
)
parser.add_argument(
"--tensor-parallel-size",
type=int,
help="Number of GPUs to use (default: auto-detect)",
)
parser.add_argument(
"--gpu-memory-utilization",
type=float,
default=0.90,
help="GPU memory utilization factor (default: 0.90)",
)
parser.add_argument(
"--hf-token",
type=str,
help="Hugging Face token (can also use HF_TOKEN env var)",
)
args = parser.parse_args()
main(
src_dataset_hub_id=args.src_dataset_hub_id,
output_dataset_hub_id=args.output_dataset_hub_id,
model_id=args.model_id,
input_column=args.input_column,
output_column=args.output_column,
gpu_memory_utilization=args.gpu_memory_utilization,
input_truncation_len=args.input_truncation_len,
tensor_parallel_size=args.tensor_parallel_size,
max_samples=args.max_samples,
hf_token=args.hf_token,
)
else:
# Show HF Jobs example when run without arguments
print("""
vLLM Response Generation Script
==============================
This script requires arguments. For usage information:
uv run generate-responses.py --help
Example HF Jobs command with multi-GPU:
# If you're logged in with huggingface-cli, token will be auto-detected
hf jobs uv run \\
--flavor l4x4 \\
https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-responses.py \\
username/input-dataset \\
username/output-dataset \\
--messages-column messages \\
--model-id Qwen/Qwen3-30B-A3B-Instruct-2507 \\
--temperature 0.7 \\
--max-tokens 16384
""")