|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
import os |
|
import asyncio |
|
import time |
|
|
|
import pandas as pd |
|
import aiohttp |
|
import requests.utils |
|
from dotenv import load_dotenv |
|
from huggingface_hub import HfApi |
|
from tenacity import retry, stop_after_attempt, wait_exponential |
|
|
|
load_dotenv() |
|
|
|
CACHE_DIR = ".hf_cache" |
|
os.makedirs(CACHE_DIR, exist_ok=True) |
|
|
|
api = HfApi() |
|
USER_ID = api.whoami()["name"] |
|
REPO_ID = f"{USER_ID}/hub-stats" |
|
|
|
print(f"π Hugging Face Hub Stats Collector") |
|
print(f"π Dataset will be uploaded to: {REPO_ID}") |
|
print(f"π€ User: {USER_ID}") |
|
print("-" * 50) |
|
|
|
ENDPOINT_CONFIGS = { |
|
"models": { |
|
"limit": 1000, |
|
"params": { |
|
"full": "true", |
|
"config": "true", |
|
"expand[]": [ |
|
"gguf", |
|
"downloadsAllTime", |
|
"transformersInfo", |
|
"cardData", |
|
"safetensors", |
|
"baseModels", |
|
"author", |
|
"likes", |
|
"inferenceProviderMapping", |
|
"downloads", |
|
"siblings", |
|
"tags", |
|
"pipeline_tag", |
|
"lastModified", |
|
"createdAt", |
|
"config", |
|
"library_name", |
|
], |
|
}, |
|
}, |
|
"datasets": { |
|
"limit": 1000, |
|
"params": { |
|
"full": "true", |
|
"expand[]": [ |
|
"author", |
|
"cardData", |
|
"citation", |
|
"createdAt", |
|
"disabled", |
|
"description", |
|
"downloads", |
|
"downloadsAllTime", |
|
"gated", |
|
"lastModified", |
|
"likes", |
|
"paperswithcode_id", |
|
"private", |
|
"siblings", |
|
"sha", |
|
"tags", |
|
"trendingScore", |
|
], |
|
}, |
|
}, |
|
"spaces": {"limit": 1000, "params": {"full": "true"}}, |
|
"posts": {"limit": 50, "params": {"skip": 0}}, |
|
"daily_papers": { |
|
"limit": 50, |
|
"params": {}, |
|
"base_url": "https://huggingface.co/api/daily_papers", |
|
}, |
|
} |
|
|
|
|
|
def parse_link_header(link_header): |
|
if not link_header: |
|
return None |
|
links = requests.utils.parse_header_links(link_header) |
|
for link in links: |
|
if link.get("rel") == "next": |
|
return link.get("url") |
|
return None |
|
|
|
|
|
def to_json_string(x): |
|
return ( |
|
json.dumps(x) |
|
if isinstance(x, (dict, list)) |
|
else str(x) if x is not None else None |
|
) |
|
|
|
|
|
def process_dataframe(df, endpoint): |
|
if len(df) == 0: |
|
return df |
|
|
|
if endpoint == "posts": |
|
if "author" in df.columns: |
|
author_df = pd.json_normalize(df["author"]) |
|
author_cols = ["avatarUrl", "followerCount", "fullname", "name"] |
|
for col in author_cols: |
|
if col in author_df.columns: |
|
df[col] = author_df[col] |
|
df = df.drop("author", axis=1) |
|
|
|
for ts_col in ["publishedAt", "updatedAt"]: |
|
if ts_col in df.columns: |
|
df[ts_col] = pd.to_datetime(df[ts_col]).dt.tz_localize(None) |
|
|
|
elif endpoint == "daily_papers": |
|
if "paper" in df.columns: |
|
paper_df = pd.json_normalize(df["paper"], errors="ignore").add_prefix( |
|
"paper_" |
|
) |
|
df = pd.concat([df.drop("paper", axis=1), paper_df], axis=1) |
|
|
|
for ts_col in ["publishedAt", "paper_publishedAt"]: |
|
if ts_col in df.columns: |
|
df[ts_col] = pd.to_datetime(df[ts_col], errors="coerce").dt.tz_localize( |
|
None |
|
) |
|
|
|
else: |
|
for field in ["createdAt", "lastModified"]: |
|
if field in df.columns: |
|
df[field] = pd.to_datetime(df[field], errors="coerce").dt.tz_localize( |
|
None |
|
) |
|
|
|
if "gated" in df.columns: |
|
df["gated"] = df["gated"].astype(str) |
|
|
|
for col in ["cardData", "config", "gguf"]: |
|
if col in df.columns: |
|
df[col] = df[col].apply(to_json_string) |
|
|
|
return df |
|
|
|
|
|
def save_parquet(df, output_file): |
|
df.to_parquet(output_file, index=False, engine="pyarrow") |
|
|
|
|
|
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, min=4, max=60)) |
|
async def fetch_data_page(session, url, params=None, headers=None): |
|
async with session.get(url, params=params, headers=headers) as response: |
|
response.raise_for_status() |
|
return await response.json(), response.headers.get("Link") |
|
|
|
|
|
async def create_parquet_files(skip_upload=False): |
|
start_time = time.time() |
|
endpoints = ["daily_papers", "models", "spaces", "datasets", "posts"] |
|
created_files = [] |
|
|
|
async with aiohttp.ClientSession() as session: |
|
for endpoint in endpoints: |
|
print(f"Processing {endpoint}...") |
|
|
|
config = ENDPOINT_CONFIGS[endpoint] |
|
base_url = config.get("base_url", f"https://huggingface.co/api/{endpoint}") |
|
params = {"limit": config["limit"]} |
|
params.update(config["params"]) |
|
|
|
headers = {"Accept": "application/json"} |
|
all_data = [] |
|
url = base_url |
|
page = 0 |
|
|
|
jsonl_file = None |
|
if skip_upload: |
|
jsonl_file = os.path.join(CACHE_DIR, f"{endpoint}_raw.jsonl") |
|
with open(jsonl_file, "w") as f: |
|
pass |
|
|
|
while url: |
|
if endpoint == "posts": |
|
params["skip"] = page * params["limit"] |
|
|
|
try: |
|
data, link_header = await fetch_data_page( |
|
session, url, params, headers |
|
) |
|
|
|
if skip_upload and jsonl_file: |
|
with open(jsonl_file, "a") as f: |
|
f.write(json.dumps(data) + "\n") |
|
|
|
if endpoint == "posts": |
|
items = data["socialPosts"] |
|
total_items = data["numTotalItems"] |
|
all_data.extend(items) |
|
|
|
if (page + 1) * params["limit"] >= total_items: |
|
url = None |
|
else: |
|
url = base_url |
|
else: |
|
all_data.extend(data) |
|
url = parse_link_header(link_header) |
|
if url: |
|
params = {} |
|
|
|
if len(all_data) % 10000 == 0: |
|
print(f" {len(all_data):,} records processed") |
|
|
|
page += 1 |
|
|
|
except Exception as e: |
|
print(f"Error on page {page}: {e}") |
|
await asyncio.sleep(2) |
|
if page > 0: |
|
url = None |
|
else: |
|
raise |
|
|
|
if skip_upload and jsonl_file and os.path.exists(jsonl_file): |
|
print(f" Raw data saved to {jsonl_file}") |
|
|
|
df = pd.DataFrame(all_data) |
|
df = process_dataframe(df, endpoint) |
|
|
|
output_file = os.path.join(CACHE_DIR, f"{endpoint}.parquet") |
|
save_parquet(df, output_file) |
|
created_files.append(output_file) |
|
|
|
print(f"β {endpoint}: {len(df):,} rows -> {output_file}") |
|
|
|
if not skip_upload: |
|
upload_to_hub(output_file, REPO_ID) |
|
|
|
elapsed = time.time() - start_time |
|
return created_files, elapsed |
|
|
|
|
|
def recreate_from_jsonl(): |
|
endpoints = ["daily_papers", "models", "spaces", "datasets", "posts"] |
|
|
|
for endpoint in endpoints: |
|
jsonl_file = os.path.join(CACHE_DIR, f"{endpoint}_raw.jsonl") |
|
if not os.path.exists(jsonl_file): |
|
print(f"β {jsonl_file} not found") |
|
continue |
|
|
|
print(f"Recreating {endpoint} from {jsonl_file}...") |
|
|
|
all_data = [] |
|
with open(jsonl_file, "r") as f: |
|
for line in f: |
|
data = json.loads(line.strip()) |
|
if endpoint == "posts": |
|
all_data.extend(data["socialPosts"]) |
|
else: |
|
all_data.extend(data) |
|
|
|
df = pd.DataFrame(all_data) |
|
df = process_dataframe(df, endpoint) |
|
|
|
output_file = os.path.join(CACHE_DIR, f"{endpoint}.parquet") |
|
save_parquet(df, output_file) |
|
|
|
print(f"β {endpoint}: {len(df):,} rows -> {output_file}") |
|
|
|
|
|
def upload_to_hub(file_path, repo_id): |
|
try: |
|
api.upload_file( |
|
path_or_fileobj=file_path, |
|
path_in_repo=os.path.basename(file_path), |
|
repo_id=repo_id, |
|
repo_type="dataset", |
|
) |
|
print(f"β Uploaded {os.path.basename(file_path)} to {repo_id}") |
|
return True |
|
except Exception as e: |
|
print(f"β Failed to upload {os.path.basename(file_path)}: {e}") |
|
return False |
|
|
|
|
|
def main(skip_upload=False): |
|
created_files, elapsed = asyncio.run(create_parquet_files(skip_upload=skip_upload)) |
|
|
|
print(f"\nCompleted in {elapsed:.2f} seconds") |
|
print(f"Created {len(created_files)} parquet files:") |
|
|
|
for file in created_files: |
|
size = os.path.getsize(file) |
|
rows = len(pd.read_parquet(file)) |
|
print(f" {os.path.basename(file)}: {rows:,} rows, {size:,} bytes") |
|
|
|
if skip_upload: |
|
print(f"\nRaw JSONL files saved to {CACHE_DIR}/ for recreation") |
|
print("Use 'python app.py --recreate' to recreate parquet files from JSONL") |
|
|
|
|
|
if __name__ == "__main__": |
|
import sys |
|
|
|
if "--recreate" in sys.argv: |
|
recreate_from_jsonl() |
|
else: |
|
skip_upload = "--skip-upload" in sys.argv |
|
main(skip_upload=skip_upload) |
|
|