InternVideo2-Data / InternVideo2_6B_V5_ACT75_eval.py
qingy2024's picture
Update InternVideo2_6B_V5_ACT75_eval.py
3412cf2 verified
#!/usr/bin/env python
# coding: utf-8
# Cleaned and enhanced InternVideo2 6B evaluation script with structured logging
# Source:
import os
import sys
import subprocess
import logging
import json
import argparse
from pathlib import Path
import numpy as np
import cv2
import torch
from tqdm import tqdm
from huggingface_hub import hf_hub_download, HfApi, login
def setup_logging(log_level=logging.INFO, log_file=None):
handlers = []
fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
if log_file:
handlers.append(logging.FileHandler(log_file))
handlers.append(logging.StreamHandler(sys.stdout))
logging.basicConfig(level=log_level, format=fmt, handlers=handlers)
logging.info("Logging initialized.")
def run_command(cmd, cwd=None):
logging.debug(f"Running command: {cmd} (cwd={cwd})")
result = subprocess.run(cmd, shell=True, cwd=cwd, capture_output=True, text=True)
if result.returncode != 0:
logging.error(f"Command failed: {cmd}\nSTDOUT: {result.stdout}\nSTDERR: {result.stderr}")
raise RuntimeError(f"Command '{cmd}' failed (exit code {result.returncode})")
logging.debug(f"Command succeeded, output: {result.stdout.strip()}")
return result.stdout.strip()
def download_checkpoint(repo_id: str, filename: str) -> str:
logging.info(f"Downloading {filename} from {repo_id}...")
path = hf_hub_download(repo_id=repo_id, filename=filename)
logging.info(f"Downloaded vision checkpoint to {path}")
return path
def load_config(config_path: str, vision_ckpt_path: str):
from demo.config import Config, eval_dict_leaf
logging.info(f"Loading config from {config_path}")
cfg = Config.from_file(config_path)
cfg = eval_dict_leaf(cfg)
cfg.model.vision_ckpt_path = vision_ckpt_path
cfg.model.vision_encoder.pretrained = vision_ckpt_path
cfg.pretrained_path = vision_ckpt_path
logging.debug(f"Config loaded: {cfg}")
return cfg
def process_videos(
json_path: str,
model,
config,
output_prefix: str,
num_frames_override: int = None
):
"""
Run inference over each video, write outputs.
If num_frames_override is given, use it; otherwise use config.num_frames.
"""
from demo.utils import retrieve_text, _frame_from_video
logging.info(f"Reading evaluation data from {json_path}")
data = json.loads(Path(json_path).read_text())
preds, logits = [], []
# choose frame window size
num_frames = num_frames_override if num_frames_override is not None else config.num_frames
logging.info(f"Using window size: {num_frames} frames")
for video_path, phrase, _ in data:
logging.info("\n--- Starting new video ---")
full_video = Path("photography-model") / video_path
logging.info(f"Processing {full_video} with phrase '{phrase}'")
frames = list(_frame_from_video(cv2.VideoCapture(str(full_video))))
scores = []
for j in tqdm(range(len(frames) - (num_frames - 1)), desc=Path(video_path).stem):
_, probs = retrieve_text(
frames[j : j + num_frames], [phrase],
model=model, topk=1, config=config
)
scores.append(probs[0])
best_idx = int(np.argmax(scores) + 1)
preds.append(best_idx)
logits.append(list(zip(map(float, scores), range(1, len(scores) + 1))))
logging.info(f"Video result: predicted frame {best_idx}\n")
preds_file = f"{output_prefix}-t{num_frames}.json"
logits_file = f"{output_prefix}-logits-t{num_frames}.json"
logging.info(f"Writing predictions to {preds_file}")
Path(preds_file).write_text(json.dumps(preds, indent=2))
logging.info(f"Writing logits to {logits_file}")
Path(logits_file).write_text(json.dumps(logits, indent=2))
return preds_file, logits_file
def upload_results(token: str, upload_files: list, repo_id: str):
logging.info("Logging into Hugging Face Hub...")
login(token)
api = HfApi()
for file_path in upload_files:
logging.info(f"Uploading {file_path} to {repo_id}")
api.upload_file(
path_or_fileobj=file_path,
path_in_repo=Path(file_path).name,
repo_id=repo_id,
repo_type="dataset",
)
logging.info("Upload complete.")
def main():
parser = argparse.ArgumentParser(
description="Evaluate InternVideo2 sliding-window retrieval."
)
parser.add_argument(
"--branch",
type=str,
default="main",
help="Branch to use for evaluation."
)
parser.add_argument(
"--num_frames",
type=int,
default=None,
help="Manually set the number of frames per window."
)
args = parser.parse_args()
setup_logging()
# ensure IV2 repo
iv2_path = Path('~/IV2').expanduser()
if not iv2_path.exists():
logging.info("Cloning IV2 repository...")
run_command('git clone https://github.com/qingy1337/IV2.git ~/IV2')
os.chdir(iv2_path / 'InternVideo2' / 'multi_modality')
sys.path.append(os.getcwd())
run_command(f'git checkout {args.branch}', cwd=os.getcwd())
MODEL_NAME = '6B'
vision_ckpt = download_checkpoint(
repo_id="OpenGVLab/InternVideo2-Stage2_6B-224p-f4",
filename="internvideo2-s2_6b-224p-f4.pt"
)
config = load_config('scripts/pretraining/stage2/6B/config.py', vision_ckpt)
from demo.utils import setup_internvideo2
model, tokenizer = setup_internvideo2(config)
if not Path('photography-model').exists():
run_command('git clone https://github.com/ruo2019/photography-model.git')
prefix = f"ACT75-V5-InternVideo-{MODEL_NAME}"
preds_file, logits_file = process_videos(
'photography-model/data/ACT75.json',
model, config, prefix,
num_frames_override=args.num_frames
)
upload_results(
os.getenv('HF_TOKEN', ''),
[preds_file, logits_file],
'qingy2024/InternVideo2-Data'
)
if __name__ == '__main__':
main()