InternVideo2-Data / ViCLIP-B16-Eval.py
qingy2024's picture
Update ViCLIP-B16-Eval.py
bf940bf verified
#!/usr/bin/env python
# coding: utf-8
# Cleaned and enhanced ViCLIP B16 evaluation script with structured logging
import os
import sys
import subprocess
import logging
import json
import argparse
from pathlib import Path
import numpy as np
import cv2
import torch
from tqdm import tqdm
from huggingface_hub import hf_hub_download, HfApi, login
from transformers import AutoModel
# --- Constants ---
VICLIP_MODEL_NAME = "ViCLIP-B16"
VICLIP_MODEL_REPO_ID = "qingy2024/ViCLIP-B16-HF"
VICLIP_BPE_FILENAME = "bpe_simple_vocab_16e6.txt.gz"
VICLIP_DEFAULT_NUM_FRAMES = 8
VICLIP_TARGET_SIZE = (224, 224) # For ViCLIP B/16
V_MEAN = np.array([0.485, 0.456, 0.406]).reshape(1, 1, 3)
V_STD = np.array([0.229, 0.224, 0.225]).reshape(1, 1, 3)
PHOTOGRAPHY_MODEL_REPO_URL = "https://github.com/ruo2019/photography-model.git"
PHOTOGRAPHY_MODEL_DIR = Path("photography-model")
DATA_JSON_PATH = PHOTOGRAPHY_MODEL_DIR / "data/ACT75.json"
HF_RESULTS_REPO_ID = "qingy2024/InternVideo2-Data" # Target repository for uploads
# --- Logging Setup ---
def setup_logging(log_level=logging.INFO, log_file=None):
handlers = []
fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
if log_file:
log_file_path = Path(log_file)
log_file_path.parent.mkdir(parents=True, exist_ok=True)
handlers.append(logging.FileHandler(log_file_path))
handlers.append(logging.StreamHandler(sys.stdout))
logging.basicConfig(level=log_level, format=fmt, handlers=handlers)
logging.info("Logging initialized.")
# --- Command Execution ---
def run_command(cmd: str, cwd: str = None):
logging.debug(f"Running command: {cmd} (cwd={cwd})")
result = subprocess.run(cmd, shell=True, cwd=cwd, capture_output=True, text=True, check=False)
if result.returncode != 0:
logging.error(f"Command failed: {cmd}\nSTDOUT: {result.stdout}\nSTDERR: {result.stderr}")
raise RuntimeError(f"Command '{cmd}' failed (exit code {result.returncode})")
logging.debug(f"Command succeeded, output: {result.stdout.strip()}")
return result.stdout.strip()
# --- Hugging Face Utilities ---
def download_bpe_file_to_cwd(repo_id: str, filename: str, local_cwd: str = "."):
"""
Downloads the BPE file to the current working directory using huggingface-cli.
This matches the original notebook's behavior, assuming ViCLIP's custom
tokenizer code (via trust_remote_code=True) might look for it in CWD.
"""
bpe_path_in_cwd = Path(local_cwd) / filename
if not bpe_path_in_cwd.exists():
logging.info(f"Downloading {filename} to {bpe_path_in_cwd.resolve()} using huggingface-cli...")
# Use list form for command arguments if not using shell=True, or ensure proper quoting for shell=True
cli_command = [
"huggingface-cli", "download", repo_id, filename,
"--local-dir", str(Path(local_cwd).resolve()), # Absolute path for local-dir
"--local-dir-use-symlinks", "False" # Ensure actual copy
]
# run_command expects a string if shell=True
run_command(' '.join(cli_command))
else:
logging.info(f"{filename} already exists at {bpe_path_in_cwd.resolve()}")
return bpe_path_in_cwd
def load_viclip_model_and_tokenizer(model_repo_id: str, device: torch.device):
logging.info(f"Loading ViCLIP model ({model_repo_id}) and tokenizer...")
# AutoModel with trust_remote_code=True will execute custom code from the repo.
# This custom code is expected to initialize the model and its tokenizer.
# The BPE file is assumed to be handled correctly by this custom code,
# either by finding it in CWD (if downloaded there) or from the HF cache.
model = AutoModel.from_pretrained(model_repo_id, trust_remote_code=True)
tokenizer = model.tokenizer # tokenizer is set up by the model's custom __init__ or from_pretrained
model = model.to(device)
model.eval()
logging.info(f"ViCLIP model and tokenizer loaded. Model is on {device}.")
return model, tokenizer
# --- ViCLIP Specific Helper Functions ---
def _frame_from_video(video_capture: cv2.VideoCapture):
while video_capture.isOpened():
success, frame = video_capture.read()
if success:
yield frame
else:
break
def normalize_cv2_frame(frame_bgr: np.ndarray) -> np.ndarray:
frame_rgb = frame_bgr[:, :, ::-1] # BGR to RGB
return (frame_rgb / 255.0 - V_MEAN) / V_STD
def frames_window_to_tensor(
video_frames_window: list, # List of CV2 frames (BGR) for the current window
num_expected_frames: int, # Expected number of frames in the window (equals fnum)
target_size: tuple = VICLIP_TARGET_SIZE,
device: torch.device = torch.device('cuda')
) -> torch.Tensor:
if len(video_frames_window) != num_expected_frames:
# This should ideally not happen if windows are prepared correctly.
# If it does, indicates an issue upstream or need for padding/truncation strategy.
logging.warning(
f"frames_window_to_tensor received {len(video_frames_window)} frames, "
f"but expected {num_expected_frames}. Proceeding with given frames."
)
processed_frames = [cv2.resize(frame, target_size) for frame in video_frames_window]
processed_frames = [np.expand_dims(normalize_cv2_frame(frame), axis=(0, 1)) for frame in processed_frames] # each (1,1,H,W,C)
# Concatenate along the time dimension (axis=1)
vid_tensor = np.concatenate(processed_frames, axis=1) # Shape: (1, num_frames, H, W, C)
vid_tensor = np.transpose(vid_tensor, (0, 1, 4, 2, 3)) # Shape: (1, num_frames, C, H, W)
vid_tensor = torch.from_numpy(vid_tensor).to(device, non_blocking=True).float()
return vid_tensor
text_features_cache = {} # Module-level cache for text features
def get_text_features_viclip(texts_to_encode: list, model, tokenizer, device: torch.device) -> torch.Tensor:
global text_features_cache
all_feats = []
# Identify texts that need their features computed
texts_needing_computation = []
for text_item in texts_to_encode: # text_item is the actual string
if text_item not in text_features_cache:
texts_needing_computation.append(text_item)
# Compute features for texts not in cache
if texts_needing_computation:
for text_to_encode in texts_needing_computation:
# CORRECTED LINE: Use positional 'text_to_encode' or keyword 'text=text_to_encode'
feat = model.get_text_features(text_to_encode, tokenizer)
# Ensure the feature tensor is on the correct device.
# The model's get_text_features might put it on model.logit_scale.device,
# which should be `device` if model was moved correctly. This is a safe check.
feat = feat.to(device)
text_features_cache[text_to_encode] = feat # Store in our script's cache
# Retrieve all required features from cache (now populated)
for text_item in texts_to_encode:
all_feats.append(text_features_cache[text_item])
if not all_feats:
# Return an empty tensor on the correct device if no texts were provided or no features generated
return torch.empty(0, device=device)
return torch.cat(all_feats, 0)
def retrieve_text_scores_viclip(
video_frames_window: list, # CV2 frames for the current window
query_texts: list, # List of text phrases to score against the window
model, # ViCLIP model
tokenizer, # ViCLIP tokenizer
num_frames_in_window: int, # Number of frames (fnum for frames_window_to_tensor)
device: torch.device,
topk: int = 1
) -> np.ndarray:
video_tensor = frames_window_to_tensor(
video_frames_window,
num_expected_frames=num_frames_in_window,
device=device
)
video_features = model.get_vid_features(video_tensor)
text_features_tensor = get_text_features_viclip(query_texts, model, tokenizer, device)
# get_predict_label computes similarity and returns topk scores and indices
# Original notebook used softmax=False
probs, _ = model.get_predict_label(video_features, text_features_tensor, top=topk, softmax=False)
return probs.cpu().numpy()[0] # Returns scores for the query_texts (shape (1, K) -> (K,))
# --- Main Processing Logic ---
def process_videos_viclip(
eval_json_path: str,
model,
tokenizer,
output_file_prefix: str, # String prefix for output files, including directory
num_frames_per_window: int,
device: torch.device
):
logging.info(f"Reading evaluation data from {eval_json_path}")
try:
with open(eval_json_path, 'r', encoding='utf-8') as f:
eval_data = json.load(f)
except Exception as e:
logging.error(f"Failed to load or parse JSON data from {eval_json_path}: {e}")
raise
all_predictions, all_logits = [], []
logging.info(f"Using window size: {num_frames_per_window} frames for ViCLIP evaluation.")
global text_features_cache # Clear cache at the start of a new dataset processing pass
text_features_cache = {}
for item_idx, (video_relative_path, phrase, _) in enumerate(eval_data):
logging.info(f"\n--- Processing video {item_idx+1}/{len(eval_data)}: {video_relative_path} ---")
full_video_path = PHOTOGRAPHY_MODEL_DIR / video_relative_path
if not full_video_path.exists():
logging.error(f"Video file not found: {full_video_path}. Skipping.")
all_predictions.append(-1) # Error indicator
all_logits.append([])
continue
logging.info(f"Video: {full_video_path}, Phrase: '{phrase}'")
try:
cap = cv2.VideoCapture(str(full_video_path))
if not cap.isOpened():
logging.error(f"Cannot open video file: {full_video_path}. Skipping.")
all_predictions.append(-1); all_logits.append([]); continue
video_frames = list(_frame_from_video(cap))
except Exception as e:
logging.error(f"Error reading frames from {full_video_path}: {e}. Skipping.")
all_predictions.append(-1); all_logits.append([]); continue
finally:
if 'cap' in locals() and cap.isOpened():
cap.release()
if not video_frames:
logging.warning(f"Video {video_relative_path} is empty. Predicting frame 1 with score 0.")
all_predictions.append(1)
all_logits.append([([0.0], 1)]) # Logits format: list of ([score_for_phrase], frame_index_1_based)
continue
if len(video_frames) < num_frames_per_window:
logging.warning(
f"Video {video_relative_path} has {len(video_frames)} frames, "
f"less than window size {num_frames_per_window}. "
f"Predicting frame 1 with score 0 (no full window possible)."
)
all_predictions.append(1) # Default prediction
# Create dummy logits for all available frames with score 0
dummy_scores = [0.0] * len(video_frames)
dummy_frame_indices = list(range(1, len(video_frames) + 1))
all_logits.append(list(zip([[s] for s in dummy_scores], dummy_frame_indices))) # Ensure score is a list
continue
scores_for_this_video = []
num_possible_windows = len(video_frames) - num_frames_per_window + 1
for j in tqdm(range(num_possible_windows), desc=Path(video_relative_path).stem):
current_window_frames = video_frames[j : j + num_frames_per_window]
# retrieve_text_scores_viclip returns scores for [phrase]
# Since topk=1 and only one phrase, it's an array like [score_for_phrase]
scores_array = retrieve_text_scores_viclip(
video_frames_window=current_window_frames,
query_texts=[phrase], # Expects a list of texts
model=model, tokenizer=tokenizer,
num_frames_in_window=num_frames_per_window,
device=device, topk=1
)
scores_for_this_video.append(scores_array[0]) # Append the actual score
if not scores_for_this_video: # Should not happen if num_possible_windows > 0
logging.warning(f"No scores generated for {video_relative_path} (unexpected). Predicting frame 1.")
predicted_frame_index = 1
logits_for_this_video = [([0.0], 1)]
else:
predicted_frame_index = int(np.argmax(scores_for_this_video) + 1) # 1-indexed
# Logits: list of ([score_value], frame_number_1_based) tuples
logits_for_this_video = list(zip(
[float(s) for s in scores_for_this_video], # Score as a single-element list
range(1, len(scores_for_this_video) + 1)
))
all_predictions.append(predicted_frame_index)
all_logits.append(logits_for_this_video)
logging.info(f"Video result: predicted frame {predicted_frame_index}\n")
preds_file_path = Path(f"{output_file_prefix}-t{num_frames_per_window}.json")
logits_file_path = Path(f"{output_file_prefix}-logits-t{num_frames_per_window}.json")
logging.info(f"Writing predictions to {preds_file_path}")
with preds_file_path.open('w', encoding='utf-8') as f:
json.dump(all_predictions, f, indent=2)
logging.info(f"Writing logits to {logits_file_path}")
with logits_file_path.open('w', encoding='utf-8') as f:
json.dump(all_logits, f, indent=2)
return preds_file_path, logits_file_path
# --- Hugging Face Hub Upload ---
def upload_results_to_hf_hub(
hf_auth_token: str,
local_files_to_upload: list, # List of Path objects
hf_repo_id: str,
model_id_for_repo_path: str, # e.g., "ViCLIP-B16"
num_frames_setting: int
):
if not hf_auth_token:
logging.warning("HF_TOKEN not set. Skipping Hugging Face Hub upload.")
return
logging.info("Logging into Hugging Face Hub...")
try:
login(token=hf_auth_token)
except Exception as e:
logging.error(f"Hugging Face Hub login failed: {e}. Skipping upload.")
return
api = HfApi()
for local_file_path_obj in local_files_to_upload:
# Construct path_in_repo based on original script's convention V5/{model_id}/ACT75[-logits]-t{num_frames}.json
base_filename = local_file_path_obj.name # e.g., "ACT75-V5-ViCLIP-B16-t8.json"
# Determine if it's a logits file to adjust repo path correctly
if "logits" in base_filename:
repo_filename = f"V5/{model_id_for_repo_path}/ACT75-logits-t{num_frames_setting}.json"
else:
repo_filename = f"V5/{model_id_for_repo_path}/ACT75-t{num_frames_setting}.json"
logging.info(f"Uploading {local_file_path_obj} to {hf_repo_id} as {repo_filename}")
try:
api.upload_file(
path_or_fileobj=str(local_file_path_obj),
path_in_repo=repo_filename,
repo_id=hf_repo_id,
repo_type="dataset",
)
except Exception as e:
logging.error(f"Failed to upload {local_file_path_obj}: {e}")
logging.info("Upload process complete (check logs for individual file statuses).")
# --- Main Function ---
def main():
parser = argparse.ArgumentParser(description="Evaluate ViCLIP B/16 sliding-window retrieval.")
parser.add_argument(
"--num_frames", type=int, default=VICLIP_DEFAULT_NUM_FRAMES,
help=f"Number of frames per window for ViCLIP. Default: {VICLIP_DEFAULT_NUM_FRAMES}"
)
parser.add_argument(
"--log_filename", type=str, default="eval_viclip.log",
help="Name of the log file to be saved in output_dir. Set to empty to disable file logging."
)
parser.add_argument(
"--output_dir", type=str, default="output_viclip",
help="Directory to save prediction, logits, and log files."
)
parser.add_argument(
"--skip_upload", action="store_true",
help="Skip uploading results to Hugging Face Hub."
)
parser.add_argument(
"--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
help="Device for computation (e.g., 'cuda', 'cpu'). Default: auto-detect CUDA."
)
args = parser.parse_args()
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
log_file_path = Path(args.output_dir) / args.log_filename if args.log_filename else None
setup_logging(log_file=log_file_path)
eval_device = torch.device(args.device)
logging.info(f"Using device: {eval_device}")
# Download BPE file to CWD if ViCLIP's custom code needs it there.
# This matches the original notebook's explicit download action.
_ = download_bpe_file_to_cwd(
repo_id=VICLIP_MODEL_REPO_ID,
filename=VICLIP_BPE_FILENAME,
local_cwd="." # Download to current working directory of the script
)
model, tokenizer = load_viclip_model_and_tokenizer(VICLIP_MODEL_REPO_ID, eval_device)
if not PHOTOGRAPHY_MODEL_DIR.exists():
logging.info(f"Cloning data repo {PHOTOGRAPHY_MODEL_REPO_URL} to {PHOTOGRAPHY_MODEL_DIR}...")
run_command(f"git clone {PHOTOGRAPHY_MODEL_REPO_URL} {str(PHOTOGRAPHY_MODEL_DIR)}")
else:
logging.info(f"Data repo {PHOTOGRAPHY_MODEL_DIR} already exists.")
if not DATA_JSON_PATH.exists():
logging.error(f"Evaluation JSON file not found: {DATA_JSON_PATH}. Exiting.")
sys.exit(1)
# Output prefix string (includes directory)
output_file_prefix_str = str(Path(args.output_dir) / f"ACT75-V5-{VICLIP_MODEL_NAME}")
preds_file, logits_file = process_videos_viclip(
eval_json_path=str(DATA_JSON_PATH),
model=model, tokenizer=tokenizer,
output_file_prefix=output_file_prefix_str,
num_frames_per_window=args.num_frames,
device=eval_device
)
if not args.skip_upload:
hf_token = os.getenv('HF_TOKEN')
if not hf_token:
logging.warning("HF_TOKEN environment variable not found. Skipping Hugging Face Hub upload.")
else:
upload_results_to_hf_hub(
hf_auth_token=hf_token,
local_files_to_upload=[preds_file, logits_file],
hf_repo_id=HF_RESULTS_REPO_ID,
model_id_for_repo_path=VICLIP_MODEL_NAME,
num_frames_setting=args.num_frames
)
else:
logging.info("Skipping Hugging Face Hub upload as per --skip_upload flag.")
logging.info("ViCLIP evaluation script finished.")
if __name__ == '__main__':
main()