File size: 9,943 Bytes
7611952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1846b49
 
 
 
 
 
 
 
 
7611952
 
 
 
 
 
 
 
 
 
 
 
10c9b1e
7611952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c6fe49
7611952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f461000
 
 
7611952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c6fe49
7611952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
efb33e5
7611952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f461000
2c6fe49
1846b49
7611952
 
 
 
 
 
2c6fe49
7611952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1846b49
2c6fe49
 
 
 
 
 
 
 
 
 
 
 
7611952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
# /// script
# requires-python = ">=3.10"
# dependencies = [
#     "pandas==2.2.2",
#     "aiohttp",
#     "python-dotenv==1.0.1",
#     "huggingface-hub==0.24.3",
#     "tenacity==9.0.0",
#     "pyarrow==17.0.0",
#     "requests",
# ]
# ///

import json
import os
import asyncio
import time

import pandas as pd
import aiohttp
import requests.utils
from dotenv import load_dotenv
from huggingface_hub import HfApi
from tenacity import retry, stop_after_attempt, wait_exponential

load_dotenv()

CACHE_DIR = ".hf_cache"
os.makedirs(CACHE_DIR, exist_ok=True)

api = HfApi()
USER_ID = api.whoami()["name"]
REPO_ID = f"{USER_ID}/hub-stats"

print(f"πŸš€ Hugging Face Hub Stats Collector")
print(f"πŸ“Š Dataset will be uploaded to: {REPO_ID}")
print(f"πŸ‘€ User: {USER_ID}")
print("-" * 50)

ENDPOINT_CONFIGS = {
    "models": {
        "limit": 1000,
        "params": {
            "full": "true",
            "config": "true",
            "expand[]": [
                "gguf",
                "downloadsAllTime",
                "transformersInfo",
                "cardData",
                "safetensors",
                "baseModels",
                "author",
                "likes",
                "inferenceProviderMapping",
                "downloads",
                "siblings",
                "tags",
                "pipeline_tag",
                "lastModified",
                "createdAt",
                "config",
                "library_name",
            ],
        },
    },
    "datasets": {
        "limit": 1000,
        "params": {
            "full": "true",
            "expand[]": [
                "author",
                "cardData",
                "citation",
                "createdAt",
                "disabled",
                "description",
                "downloads",
                "downloadsAllTime",
                "gated",
                "lastModified",
                "likes",
                "paperswithcode_id",
                "private",
                "siblings",
                "sha",
                "tags",
                "trendingScore",
            ],
        },
    },
    "spaces": {"limit": 1000, "params": {"full": "true"}},
    "posts": {"limit": 50, "params": {"skip": 0}},
    "daily_papers": {
        "limit": 50,
        "params": {},
        "base_url": "https://huggingface.co/api/daily_papers",
    },
}


def parse_link_header(link_header):
    if not link_header:
        return None
    links = requests.utils.parse_header_links(link_header)
    for link in links:
        if link.get("rel") == "next":
            return link.get("url")
    return None


def to_json_string(x):
    return (
        json.dumps(x)
        if isinstance(x, (dict, list))
        else str(x) if x is not None else None
    )


def process_dataframe(df, endpoint):
    if len(df) == 0:
        return df

    if endpoint == "posts":
        if "author" in df.columns:
            author_df = pd.json_normalize(df["author"])
            author_cols = ["avatarUrl", "followerCount", "fullname", "name"]
            for col in author_cols:
                if col in author_df.columns:
                    df[col] = author_df[col]
            df = df.drop("author", axis=1)

        for ts_col in ["publishedAt", "updatedAt"]:
            if ts_col in df.columns:
                df[ts_col] = pd.to_datetime(df[ts_col]).dt.tz_localize(None)

    elif endpoint == "daily_papers":
        if "paper" in df.columns:
            paper_df = pd.json_normalize(df["paper"], errors="ignore").add_prefix(
                "paper_"
            )
            df = pd.concat([df.drop("paper", axis=1), paper_df], axis=1)

        for ts_col in ["publishedAt", "paper_publishedAt"]:
            if ts_col in df.columns:
                df[ts_col] = pd.to_datetime(df[ts_col], errors="coerce").dt.tz_localize(
                    None
                )

    else:
        for field in ["createdAt", "lastModified"]:
            if field in df.columns:
                df[field] = pd.to_datetime(df[field], errors="coerce").dt.tz_localize(
                    None
                )

    if "gated" in df.columns:
        df["gated"] = df["gated"].astype(str)

    for col in ["cardData", "config", "gguf"]:
        if col in df.columns:
            df[col] = df[col].apply(to_json_string)

    return df


def save_parquet(df, output_file):
    df.to_parquet(output_file, index=False, engine="pyarrow")


@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, min=4, max=60))
async def fetch_data_page(session, url, params=None, headers=None):
    async with session.get(url, params=params, headers=headers) as response:
        response.raise_for_status()
        return await response.json(), response.headers.get("Link")


async def create_parquet_files(skip_upload=False):
    start_time = time.time()
    endpoints = ["daily_papers", "models", "spaces", "datasets", "posts"]
    created_files = []

    async with aiohttp.ClientSession() as session:
        for endpoint in endpoints:
            print(f"Processing {endpoint}...")

            config = ENDPOINT_CONFIGS[endpoint]
            base_url = config.get("base_url", f"https://huggingface.co/api/{endpoint}")
            params = {"limit": config["limit"]}
            params.update(config["params"])

            headers = {"Accept": "application/json"}
            all_data = []
            url = base_url
            page = 0

            jsonl_file = None
            if skip_upload:
                jsonl_file = os.path.join(CACHE_DIR, f"{endpoint}_raw.jsonl")
                with open(jsonl_file, "w") as f:
                    pass

            while url:
                if endpoint == "posts":
                    params["skip"] = page * params["limit"]

                try:
                    data, link_header = await fetch_data_page(
                        session, url, params, headers
                    )

                    if skip_upload and jsonl_file:
                        with open(jsonl_file, "a") as f:
                            f.write(json.dumps(data) + "\n")

                    if endpoint == "posts":
                        items = data["socialPosts"]
                        total_items = data["numTotalItems"]
                        all_data.extend(items)

                        if (page + 1) * params["limit"] >= total_items:
                            url = None
                        else:
                            url = base_url
                    else:
                        all_data.extend(data)
                        url = parse_link_header(link_header)
                        if url:
                            params = {}

                    if len(all_data) % 10000 == 0:
                        print(f"  {len(all_data):,} records processed")

                    page += 1

                except Exception as e:
                    print(f"Error on page {page}: {e}")
                    await asyncio.sleep(2)
                    if page > 0:
                        url = None
                    else:
                        raise

            if skip_upload and jsonl_file and os.path.exists(jsonl_file):
                print(f"  Raw data saved to {jsonl_file}")

            df = pd.DataFrame(all_data)
            df = process_dataframe(df, endpoint)

            output_file = os.path.join(CACHE_DIR, f"{endpoint}.parquet")
            save_parquet(df, output_file)
            created_files.append(output_file)

            print(f"βœ“ {endpoint}: {len(df):,} rows -> {output_file}")

            if not skip_upload:
                upload_to_hub(output_file, REPO_ID)

    elapsed = time.time() - start_time
    return created_files, elapsed


def recreate_from_jsonl():
    endpoints = ["daily_papers", "models", "spaces", "datasets", "posts"]

    for endpoint in endpoints:
        jsonl_file = os.path.join(CACHE_DIR, f"{endpoint}_raw.jsonl")
        if not os.path.exists(jsonl_file):
            print(f"βœ— {jsonl_file} not found")
            continue

        print(f"Recreating {endpoint} from {jsonl_file}...")

        all_data = []
        with open(jsonl_file, "r") as f:
            for line in f:
                data = json.loads(line.strip())
                if endpoint == "posts":
                    all_data.extend(data["socialPosts"])
                else:
                    all_data.extend(data)

        df = pd.DataFrame(all_data)
        df = process_dataframe(df, endpoint)

        output_file = os.path.join(CACHE_DIR, f"{endpoint}.parquet")
        save_parquet(df, output_file)

        print(f"βœ“ {endpoint}: {len(df):,} rows -> {output_file}")


def upload_to_hub(file_path, repo_id):
    try:
        api.upload_file(
            path_or_fileobj=file_path,
            path_in_repo=os.path.basename(file_path),
            repo_id=repo_id,
            repo_type="dataset",
        )
        print(f"βœ“ Uploaded {os.path.basename(file_path)} to {repo_id}")
        return True
    except Exception as e:
        print(f"βœ— Failed to upload {os.path.basename(file_path)}: {e}")
        return False


def main(skip_upload=False):
    created_files, elapsed = asyncio.run(create_parquet_files(skip_upload=skip_upload))

    print(f"\nCompleted in {elapsed:.2f} seconds")
    print(f"Created {len(created_files)} parquet files:")

    for file in created_files:
        size = os.path.getsize(file)
        rows = len(pd.read_parquet(file))
        print(f"  {os.path.basename(file)}: {rows:,} rows, {size:,} bytes")

    if skip_upload:
        print(f"\nRaw JSONL files saved to {CACHE_DIR}/ for recreation")
        print("Use 'python app.py --recreate' to recreate parquet files from JSONL")


if __name__ == "__main__":
    import sys

    if "--recreate" in sys.argv:
        recreate_from_jsonl()
    else:
        skip_upload = "--skip-upload" in sys.argv
        main(skip_upload=skip_upload)