File size: 8,435 Bytes
98f425b
 
efe5396
 
 
 
 
 
 
 
 
 
 
 
0829d87
 
 
 
84275f1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbf709e
 
 
 
31d61e6
 
 
 
 
 
 
 
 
 
ca38505
 
31d61e6
 
262d6d8
 
 
 
8d18f20
 
 
 
 
 
 
 
 
 
523e414
 
 
 
b3e55e2
 
 
 
 
 
 
 
1ff9050
 
220681a
1ff9050
afa318b
98f425b
cb496c0
78d6653
cb496c0
 
98f425b
 
3c065f8
 
 
 
98f425b
efe5396
 
 
 
84275f1
 
 
 
31d61e6
 
 
 
8d18f20
 
 
 
b3e55e2
 
 
 
98f425b
 
 
 
 
274591f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fcb0bf2
274591f
 
4916b93
274591f
4916b93
 
 
 
 
 
 
c7df756
4916b93
 
 
 
d554c4b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
---
dataset_info:
- config_name: issue_comments
  features:
  - name: user
    dtype: string
  - name: created_at
    dtype: timestamp[us]
  - name: body
    dtype: string
  - name: issue_number
    dtype: int64
  splits:
  - name: train
    num_bytes: 4224585
    num_examples: 8587
  download_size: 1677910
  dataset_size: 4224585
- config_name: issues
  features:
  - name: number
    dtype: int64
  - name: title
    dtype: string
  - name: user
    dtype: string
  - name: state
    dtype: string
  - name: created_at
    dtype: timestamp[us]
  - name: closed_at
    dtype: timestamp[us]
  - name: comments_count
    dtype: int64
  splits:
  - name: train
    num_bytes: 271785
    num_examples: 2550
  download_size: 159333
  dataset_size: 271785
- config_name: models
  features:
  - name: id
    dtype: string
  - name: created_at
    dtype: timestamp[us, tz=UTC]
  - name: likes
    dtype: int64
  - name: downloads
    dtype: int64
  - name: tags
    sequence: string
  splits:
  - name: train
    num_bytes: 15219515
    num_examples: 47485
  download_size: 2245503
  dataset_size: 15219515
- config_name: models_likes
  features:
  - name: user
    dtype: string
  - name: model_id
    dtype: string
  - name: liked_at
    dtype: timestamp[s, tz=UTC]
  splits:
  - name: train
    num_bytes: 340859.0
    num_examples: 5544
  download_size: 154646
  dataset_size: 340859.0
- config_name: pypi_downloads
  features:
  - name: day
    dtype: date32
  - name: num_downloads
    dtype: int64
  splits:
  - name: train
    num_bytes: 19440.0
    num_examples: 1620
  download_size: 15053
  dataset_size: 19440.0
- config_name: stargazers
  features:
  - name: starred_at
    dtype: timestamp[s, tz=UTC]
  - name: user
    dtype: string
  splits:
  - name: train
    num_bytes: 231795
    num_examples: 10842
  download_size: 224821
  dataset_size: 231795
configs:
- config_name: issue_comments
  data_files:
  - split: train
    path: issue_comments/train-*
- config_name: issues
  data_files:
  - split: train
    path: issues/train-*
- config_name: models
  data_files:
  - split: train
    path: models/train-*
- config_name: models_likes
  data_files:
  - split: train
    path: models_likes/train-*
- config_name: pypi_downloads
  data_files:
  - split: train
    path: pypi_downloads/train-*
- config_name: stargazers
  data_files:
  - split: train
    path: stargazers/train-*
---

## Stars

```python
import requests
from datetime import datetime
from datasets import Dataset
import pyarrow as pa
import os

def get_stargazers(owner, repo, token):
    # Initialize the count and the page number
    page = 1
    stargazers = []
    while True:
        # Construct the URL for the stargazers with pagination
        stargazers_url = f"https://api.github.com/repos/{owner}/{repo}/stargazers?page={page}&per_page=100"

        # Send the request to GitHub API with appropriate headers
        headers = {"Accept": "application/vnd.github.v3.star+json", "Authorization": "token " + token}
        response = requests.get(stargazers_url, headers=headers)

        if response.status_code != 200:
            raise Exception(f"Failed to fetch stargazers with status code {response.status_code}: {response.text}")

        stargazers_page = response.json()

        if not stargazers_page:  # Exit the loop if there are no more stargazers to process
            break

        stargazers.extend(stargazers_page)
        page += 1  # Move to the next page

    return stargazers

token = os.environ.get("GITHUB_PAT")
stargazers = get_stargazers("huggingface", "trl", token)
stargazers = {key: [stargazer[key] for stargazer in stargazers] for key in stargazers[0].keys()}
dataset = Dataset.from_dict(stargazers)

def clean(example):
    starred_at = datetime.strptime(example["starred_at"], "%Y-%m-%dT%H:%M:%SZ")
    starred_at = pa.scalar(starred_at, type=pa.timestamp("s", tz="UTC"))
    return {"starred_at": starred_at, "user": example["user"]["login"]}

dataset = dataset.map(clean, remove_columns=dataset.column_names)
dataset.push_to_hub("qgallouedec/trl-metrics", config_name="stargazers")
```


## Pypi downloads

```python
from datasets import Dataset
from google.cloud import bigquery
import os

os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "propane-tree-432413-4c3e2b5e6b3c.json"

# Initialize a BigQuery client
client = bigquery.Client()

# Define your query
query = """
#standardSQL
WITH daily_downloads AS (
  SELECT
    DATE(timestamp) AS day,
    COUNT(*) AS num_downloads
  FROM
    `bigquery-public-data.pypi.file_downloads`
  WHERE
    file.project = 'trl'
    -- Filter for the last 12 months
    AND DATE(timestamp) BETWEEN DATE_SUB(CURRENT_DATE(), INTERVAL 54 MONTH) AND CURRENT_DATE()
  GROUP BY
    day
)
SELECT
  day,
  num_downloads
FROM
  daily_downloads
ORDER BY
  day DESC
"""

# Execute the query
query_job = client.query(query)

# Fetch the results
results = query_job.result()

# Convert the results to a pandas DataFrame and then to a Dataset
df = results.to_dataframe()
dataset = Dataset.from_pandas(df)

dataset.push_to_hub("qgallouedec/trl-metrics", config_name="pypi_downloads")
```

## Models tagged


```python
from huggingface_hub import HfApi
from datasets import Dataset

api = HfApi()
models = api.list_models(tags="trl")
dataset_list = [{"id": model.id, "created_at": model.created_at, "likes": model.likes, "downloads": model.downloads, "tags": model.tags} for model in models]
dataset_dict = {key: [d[key] for d in dataset_list] for key in dataset_list[0].keys()}
dataset = Dataset.from_dict(dataset_dict)
dataset.push_to_hub("qgallouedec/trl-metrics", config_name="models")
```


## Issues and comments

```python
import requests
from datetime import datetime
import os
from datasets import Dataset
from tqdm import tqdm

token = os.environ.get("GITHUB_PAT")

def get_full_response(url, headers, params=None):
    page = 1
    output = []
    params = params or {}
    while True:
        params = {**params, "page": page, "per_page": 100}
        response = requests.get(url, headers=headers, params=params)

        if response.status_code != 200:
            raise Exception(f"Failed to fetch issues: {response.text}")

        batch = response.json()
        if len(batch) == 0:
            break
        output.extend(batch)
        page += 1
    return output

# GitHub API URL for issues (closed and open)
issues_url = f"https://api.github.com/repos/huggingface/trl/issues"

# Set up headers for authentication
headers = {"Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json"}

# Make the request
issues = get_full_response(issues_url, headers, params={"state": "all"})

issues_dataset_dict = {
    "number": [],
    "title": [],
    "user": [],
    "state": [],
    "created_at": [],
    "closed_at": [],
    "comments_count": [],
}
comments_dataset_dict = {
    "user": [],
    "created_at": [],
    "body": [],
    "issue_number": [],
}
for issue in tqdm(issues):
    # Extract relevant information
    issue_number = issue["number"]
    title = issue["title"]
    created_at = datetime.strptime(issue["created_at"], "%Y-%m-%dT%H:%M:%SZ")
    comments_count = issue["comments"]
    comments_url = issue["comments_url"]

    comments = get_full_response(comments_url, headers=headers)
    for comment in comments:
        comments_dataset_dict["user"].append(comment["user"]["login"])
        comments_dataset_dict["created_at"].append(datetime.strptime(comment["created_at"], "%Y-%m-%dT%H:%M:%SZ"))
        comments_dataset_dict["body"].append(comment["body"])
        comments_dataset_dict["issue_number"].append(issue_number)

    issues_dataset_dict["number"].append(issue_number)
    issues_dataset_dict["title"].append(title)
    issues_dataset_dict["user"].append(issue["user"]["login"])
    issues_dataset_dict["state"].append(issue["state"])
    issues_dataset_dict["created_at"].append(datetime.strptime(issue["created_at"], "%Y-%m-%dT%H:%M:%SZ"))
    issues_dataset_dict["closed_at"].append(datetime.strptime(issue["closed_at"], "%Y-%m-%dT%H:%M:%SZ") if issue["closed_at"] else None)
    issues_dataset_dict["comments_count"].append(comments_count)

issues_dataset = Dataset.from_dict(issues_dataset_dict)
comments_dataset = Dataset.from_dict(comments_dataset_dict)

issues_dataset.push_to_hub("qgallouedec/trl-metrics", config_name="issues")
comments_dataset.push_to_hub("qgallouedec/trl-metrics", config_name="issue_comments")
```