|
|
|
|
|
|
|
|
|
import os |
|
import sys |
|
import subprocess |
|
import logging |
|
import json |
|
import argparse |
|
from pathlib import Path |
|
|
|
import numpy as np |
|
import cv2 |
|
import torch |
|
from tqdm import tqdm |
|
from huggingface_hub import hf_hub_download, HfApi, login |
|
from transformers import AutoModel |
|
|
|
|
|
VICLIP_MODEL_NAME = "ViCLIP-B16" |
|
VICLIP_MODEL_REPO_ID = "qingy2024/ViCLIP-B16-HF" |
|
VICLIP_BPE_FILENAME = "bpe_simple_vocab_16e6.txt.gz" |
|
VICLIP_DEFAULT_NUM_FRAMES = 8 |
|
VICLIP_TARGET_SIZE = (224, 224) |
|
V_MEAN = np.array([0.485, 0.456, 0.406]).reshape(1, 1, 3) |
|
V_STD = np.array([0.229, 0.224, 0.225]).reshape(1, 1, 3) |
|
|
|
PHOTOGRAPHY_MODEL_REPO_URL = "https://github.com/ruo2019/photography-model.git" |
|
PHOTOGRAPHY_MODEL_DIR = Path("photography-model") |
|
DATA_JSON_PATH = PHOTOGRAPHY_MODEL_DIR / "data/ACT75.json" |
|
|
|
HF_RESULTS_REPO_ID = "qingy2024/InternVideo2-Data" |
|
|
|
|
|
def setup_logging(log_level=logging.INFO, log_file=None): |
|
handlers = [] |
|
fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' |
|
if log_file: |
|
log_file_path = Path(log_file) |
|
log_file_path.parent.mkdir(parents=True, exist_ok=True) |
|
handlers.append(logging.FileHandler(log_file_path)) |
|
handlers.append(logging.StreamHandler(sys.stdout)) |
|
logging.basicConfig(level=log_level, format=fmt, handlers=handlers) |
|
logging.info("Logging initialized.") |
|
|
|
|
|
def run_command(cmd: str, cwd: str = None): |
|
logging.debug(f"Running command: {cmd} (cwd={cwd})") |
|
result = subprocess.run(cmd, shell=True, cwd=cwd, capture_output=True, text=True, check=False) |
|
if result.returncode != 0: |
|
logging.error(f"Command failed: {cmd}\nSTDOUT: {result.stdout}\nSTDERR: {result.stderr}") |
|
raise RuntimeError(f"Command '{cmd}' failed (exit code {result.returncode})") |
|
logging.debug(f"Command succeeded, output: {result.stdout.strip()}") |
|
return result.stdout.strip() |
|
|
|
|
|
def download_bpe_file_to_cwd(repo_id: str, filename: str, local_cwd: str = "."): |
|
""" |
|
Downloads the BPE file to the current working directory using huggingface-cli. |
|
This matches the original notebook's behavior, assuming ViCLIP's custom |
|
tokenizer code (via trust_remote_code=True) might look for it in CWD. |
|
""" |
|
bpe_path_in_cwd = Path(local_cwd) / filename |
|
if not bpe_path_in_cwd.exists(): |
|
logging.info(f"Downloading {filename} to {bpe_path_in_cwd.resolve()} using huggingface-cli...") |
|
|
|
cli_command = [ |
|
"huggingface-cli", "download", repo_id, filename, |
|
"--local-dir", str(Path(local_cwd).resolve()), |
|
"--local-dir-use-symlinks", "False" |
|
] |
|
|
|
run_command(' '.join(cli_command)) |
|
else: |
|
logging.info(f"{filename} already exists at {bpe_path_in_cwd.resolve()}") |
|
return bpe_path_in_cwd |
|
|
|
|
|
def load_viclip_model_and_tokenizer(model_repo_id: str, device: torch.device): |
|
logging.info(f"Loading ViCLIP model ({model_repo_id}) and tokenizer...") |
|
|
|
|
|
|
|
|
|
model = AutoModel.from_pretrained(model_repo_id, trust_remote_code=True) |
|
tokenizer = model.tokenizer |
|
|
|
model = model.to(device) |
|
model.eval() |
|
logging.info(f"ViCLIP model and tokenizer loaded. Model is on {device}.") |
|
return model, tokenizer |
|
|
|
|
|
def _frame_from_video(video_capture: cv2.VideoCapture): |
|
while video_capture.isOpened(): |
|
success, frame = video_capture.read() |
|
if success: |
|
yield frame |
|
else: |
|
break |
|
|
|
def normalize_cv2_frame(frame_bgr: np.ndarray) -> np.ndarray: |
|
frame_rgb = frame_bgr[:, :, ::-1] |
|
return (frame_rgb / 255.0 - V_MEAN) / V_STD |
|
|
|
def frames_window_to_tensor( |
|
video_frames_window: list, |
|
num_expected_frames: int, |
|
target_size: tuple = VICLIP_TARGET_SIZE, |
|
device: torch.device = torch.device('cuda') |
|
) -> torch.Tensor: |
|
if len(video_frames_window) != num_expected_frames: |
|
|
|
|
|
logging.warning( |
|
f"frames_window_to_tensor received {len(video_frames_window)} frames, " |
|
f"but expected {num_expected_frames}. Proceeding with given frames." |
|
) |
|
|
|
processed_frames = [cv2.resize(frame, target_size) for frame in video_frames_window] |
|
processed_frames = [np.expand_dims(normalize_cv2_frame(frame), axis=(0, 1)) for frame in processed_frames] |
|
|
|
|
|
vid_tensor = np.concatenate(processed_frames, axis=1) |
|
vid_tensor = np.transpose(vid_tensor, (0, 1, 4, 2, 3)) |
|
vid_tensor = torch.from_numpy(vid_tensor).to(device, non_blocking=True).float() |
|
return vid_tensor |
|
|
|
text_features_cache = {} |
|
|
|
def get_text_features_viclip(texts_to_encode: list, model, tokenizer, device: torch.device) -> torch.Tensor: |
|
global text_features_cache |
|
|
|
all_feats = [] |
|
|
|
|
|
texts_needing_computation = [] |
|
for text_item in texts_to_encode: |
|
if text_item not in text_features_cache: |
|
texts_needing_computation.append(text_item) |
|
|
|
|
|
if texts_needing_computation: |
|
for text_to_encode in texts_needing_computation: |
|
|
|
feat = model.get_text_features(text_to_encode, tokenizer) |
|
|
|
|
|
|
|
|
|
feat = feat.to(device) |
|
text_features_cache[text_to_encode] = feat |
|
|
|
|
|
for text_item in texts_to_encode: |
|
all_feats.append(text_features_cache[text_item]) |
|
|
|
if not all_feats: |
|
|
|
return torch.empty(0, device=device) |
|
|
|
return torch.cat(all_feats, 0) |
|
|
|
|
|
def retrieve_text_scores_viclip( |
|
video_frames_window: list, |
|
query_texts: list, |
|
model, |
|
tokenizer, |
|
num_frames_in_window: int, |
|
device: torch.device, |
|
topk: int = 1 |
|
) -> np.ndarray: |
|
|
|
video_tensor = frames_window_to_tensor( |
|
video_frames_window, |
|
num_expected_frames=num_frames_in_window, |
|
device=device |
|
) |
|
|
|
video_features = model.get_vid_features(video_tensor) |
|
text_features_tensor = get_text_features_viclip(query_texts, model, tokenizer, device) |
|
|
|
|
|
|
|
probs, _ = model.get_predict_label(video_features, text_features_tensor, top=topk, softmax=False) |
|
|
|
return probs.cpu().numpy()[0] |
|
|
|
|
|
|
|
def process_videos_viclip( |
|
eval_json_path: str, |
|
model, |
|
tokenizer, |
|
output_file_prefix: str, |
|
num_frames_per_window: int, |
|
device: torch.device |
|
): |
|
logging.info(f"Reading evaluation data from {eval_json_path}") |
|
try: |
|
with open(eval_json_path, 'r', encoding='utf-8') as f: |
|
eval_data = json.load(f) |
|
except Exception as e: |
|
logging.error(f"Failed to load or parse JSON data from {eval_json_path}: {e}") |
|
raise |
|
|
|
all_predictions, all_logits = [], [] |
|
|
|
logging.info(f"Using window size: {num_frames_per_window} frames for ViCLIP evaluation.") |
|
|
|
global text_features_cache |
|
text_features_cache = {} |
|
|
|
for item_idx, (video_relative_path, phrase, _) in enumerate(eval_data): |
|
logging.info(f"\n--- Processing video {item_idx+1}/{len(eval_data)}: {video_relative_path} ---") |
|
full_video_path = PHOTOGRAPHY_MODEL_DIR / video_relative_path |
|
|
|
if not full_video_path.exists(): |
|
logging.error(f"Video file not found: {full_video_path}. Skipping.") |
|
all_predictions.append(-1) |
|
all_logits.append([]) |
|
continue |
|
|
|
logging.info(f"Video: {full_video_path}, Phrase: '{phrase}'") |
|
|
|
try: |
|
cap = cv2.VideoCapture(str(full_video_path)) |
|
if not cap.isOpened(): |
|
logging.error(f"Cannot open video file: {full_video_path}. Skipping.") |
|
all_predictions.append(-1); all_logits.append([]); continue |
|
video_frames = list(_frame_from_video(cap)) |
|
except Exception as e: |
|
logging.error(f"Error reading frames from {full_video_path}: {e}. Skipping.") |
|
all_predictions.append(-1); all_logits.append([]); continue |
|
finally: |
|
if 'cap' in locals() and cap.isOpened(): |
|
cap.release() |
|
|
|
if not video_frames: |
|
logging.warning(f"Video {video_relative_path} is empty. Predicting frame 1 with score 0.") |
|
all_predictions.append(1) |
|
all_logits.append([([0.0], 1)]) |
|
continue |
|
|
|
if len(video_frames) < num_frames_per_window: |
|
logging.warning( |
|
f"Video {video_relative_path} has {len(video_frames)} frames, " |
|
f"less than window size {num_frames_per_window}. " |
|
f"Predicting frame 1 with score 0 (no full window possible)." |
|
) |
|
all_predictions.append(1) |
|
|
|
dummy_scores = [0.0] * len(video_frames) |
|
dummy_frame_indices = list(range(1, len(video_frames) + 1)) |
|
all_logits.append(list(zip([[s] for s in dummy_scores], dummy_frame_indices))) |
|
continue |
|
|
|
scores_for_this_video = [] |
|
num_possible_windows = len(video_frames) - num_frames_per_window + 1 |
|
|
|
for j in tqdm(range(num_possible_windows), desc=Path(video_relative_path).stem): |
|
current_window_frames = video_frames[j : j + num_frames_per_window] |
|
|
|
|
|
|
|
scores_array = retrieve_text_scores_viclip( |
|
video_frames_window=current_window_frames, |
|
query_texts=[phrase], |
|
model=model, tokenizer=tokenizer, |
|
num_frames_in_window=num_frames_per_window, |
|
device=device, topk=1 |
|
) |
|
scores_for_this_video.append(scores_array[0]) |
|
|
|
if not scores_for_this_video: |
|
logging.warning(f"No scores generated for {video_relative_path} (unexpected). Predicting frame 1.") |
|
predicted_frame_index = 1 |
|
logits_for_this_video = [([0.0], 1)] |
|
else: |
|
predicted_frame_index = int(np.argmax(scores_for_this_video) + 1) |
|
|
|
logits_for_this_video = list(zip( |
|
[float(s) for s in scores_for_this_video], |
|
range(1, len(scores_for_this_video) + 1) |
|
)) |
|
|
|
all_predictions.append(predicted_frame_index) |
|
all_logits.append(logits_for_this_video) |
|
logging.info(f"Video result: predicted frame {predicted_frame_index}\n") |
|
|
|
preds_file_path = Path(f"{output_file_prefix}-t{num_frames_per_window}.json") |
|
logits_file_path = Path(f"{output_file_prefix}-logits-t{num_frames_per_window}.json") |
|
|
|
logging.info(f"Writing predictions to {preds_file_path}") |
|
with preds_file_path.open('w', encoding='utf-8') as f: |
|
json.dump(all_predictions, f, indent=2) |
|
|
|
logging.info(f"Writing logits to {logits_file_path}") |
|
with logits_file_path.open('w', encoding='utf-8') as f: |
|
json.dump(all_logits, f, indent=2) |
|
|
|
return preds_file_path, logits_file_path |
|
|
|
|
|
|
|
def upload_results_to_hf_hub( |
|
hf_auth_token: str, |
|
local_files_to_upload: list, |
|
hf_repo_id: str, |
|
model_id_for_repo_path: str, |
|
num_frames_setting: int |
|
): |
|
if not hf_auth_token: |
|
logging.warning("HF_TOKEN not set. Skipping Hugging Face Hub upload.") |
|
return |
|
|
|
logging.info("Logging into Hugging Face Hub...") |
|
try: |
|
login(token=hf_auth_token) |
|
except Exception as e: |
|
logging.error(f"Hugging Face Hub login failed: {e}. Skipping upload.") |
|
return |
|
|
|
api = HfApi() |
|
for local_file_path_obj in local_files_to_upload: |
|
|
|
base_filename = local_file_path_obj.name |
|
|
|
|
|
if "logits" in base_filename: |
|
repo_filename = f"V5/{model_id_for_repo_path}/ACT75-logits-t{num_frames_setting}.json" |
|
else: |
|
repo_filename = f"V5/{model_id_for_repo_path}/ACT75-t{num_frames_setting}.json" |
|
|
|
logging.info(f"Uploading {local_file_path_obj} to {hf_repo_id} as {repo_filename}") |
|
try: |
|
api.upload_file( |
|
path_or_fileobj=str(local_file_path_obj), |
|
path_in_repo=repo_filename, |
|
repo_id=hf_repo_id, |
|
repo_type="dataset", |
|
) |
|
except Exception as e: |
|
logging.error(f"Failed to upload {local_file_path_obj}: {e}") |
|
logging.info("Upload process complete (check logs for individual file statuses).") |
|
|
|
|
|
|
|
def main(): |
|
parser = argparse.ArgumentParser(description="Evaluate ViCLIP B/16 sliding-window retrieval.") |
|
parser.add_argument( |
|
"--num_frames", type=int, default=VICLIP_DEFAULT_NUM_FRAMES, |
|
help=f"Number of frames per window for ViCLIP. Default: {VICLIP_DEFAULT_NUM_FRAMES}" |
|
) |
|
parser.add_argument( |
|
"--log_filename", type=str, default="eval_viclip.log", |
|
help="Name of the log file to be saved in output_dir. Set to empty to disable file logging." |
|
) |
|
parser.add_argument( |
|
"--output_dir", type=str, default="output_viclip", |
|
help="Directory to save prediction, logits, and log files." |
|
) |
|
parser.add_argument( |
|
"--skip_upload", action="store_true", |
|
help="Skip uploading results to Hugging Face Hub." |
|
) |
|
parser.add_argument( |
|
"--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", |
|
help="Device for computation (e.g., 'cuda', 'cpu'). Default: auto-detect CUDA." |
|
) |
|
args = parser.parse_args() |
|
|
|
Path(args.output_dir).mkdir(parents=True, exist_ok=True) |
|
|
|
log_file_path = Path(args.output_dir) / args.log_filename if args.log_filename else None |
|
setup_logging(log_file=log_file_path) |
|
|
|
eval_device = torch.device(args.device) |
|
logging.info(f"Using device: {eval_device}") |
|
|
|
|
|
|
|
_ = download_bpe_file_to_cwd( |
|
repo_id=VICLIP_MODEL_REPO_ID, |
|
filename=VICLIP_BPE_FILENAME, |
|
local_cwd="." |
|
) |
|
|
|
model, tokenizer = load_viclip_model_and_tokenizer(VICLIP_MODEL_REPO_ID, eval_device) |
|
|
|
if not PHOTOGRAPHY_MODEL_DIR.exists(): |
|
logging.info(f"Cloning data repo {PHOTOGRAPHY_MODEL_REPO_URL} to {PHOTOGRAPHY_MODEL_DIR}...") |
|
run_command(f"git clone {PHOTOGRAPHY_MODEL_REPO_URL} {str(PHOTOGRAPHY_MODEL_DIR)}") |
|
else: |
|
logging.info(f"Data repo {PHOTOGRAPHY_MODEL_DIR} already exists.") |
|
|
|
if not DATA_JSON_PATH.exists(): |
|
logging.error(f"Evaluation JSON file not found: {DATA_JSON_PATH}. Exiting.") |
|
sys.exit(1) |
|
|
|
|
|
output_file_prefix_str = str(Path(args.output_dir) / f"ACT75-V5-{VICLIP_MODEL_NAME}") |
|
|
|
preds_file, logits_file = process_videos_viclip( |
|
eval_json_path=str(DATA_JSON_PATH), |
|
model=model, tokenizer=tokenizer, |
|
output_file_prefix=output_file_prefix_str, |
|
num_frames_per_window=args.num_frames, |
|
device=eval_device |
|
) |
|
|
|
if not args.skip_upload: |
|
hf_token = os.getenv('HF_TOKEN') |
|
if not hf_token: |
|
logging.warning("HF_TOKEN environment variable not found. Skipping Hugging Face Hub upload.") |
|
else: |
|
upload_results_to_hf_hub( |
|
hf_auth_token=hf_token, |
|
local_files_to_upload=[preds_file, logits_file], |
|
hf_repo_id=HF_RESULTS_REPO_ID, |
|
model_id_for_repo_path=VICLIP_MODEL_NAME, |
|
num_frames_setting=args.num_frames |
|
) |
|
else: |
|
logging.info("Skipping Hugging Face Hub upload as per --skip_upload flag.") |
|
|
|
logging.info("ViCLIP evaluation script finished.") |
|
|
|
|
|
if __name__ == '__main__': |
|
main() |