davanstrien HF Staff commited on
Commit
d760504
·
verified ·
1 Parent(s): 9b030f4

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/trackio_logo_old.png filter=lfs diff=lfs merge=lfs -text
__init__.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os
3
+ import warnings
4
+ import webbrowser
5
+ from pathlib import Path
6
+ from typing import Any
7
+
8
+ from gradio.blocks import BUILT_IN_THEMES
9
+ from gradio.themes import Default as DefaultTheme
10
+ from gradio.themes import ThemeClass
11
+ from gradio_client import Client
12
+
13
+ from trackio import context_vars, deploy, utils
14
+ from trackio.imports import import_csv, import_tf_events
15
+ from trackio.media import TrackioImage
16
+ from trackio.run import Run
17
+ from trackio.sqlite_storage import SQLiteStorage
18
+ from trackio.table import Table
19
+ from trackio.ui import demo
20
+ from trackio.utils import TRACKIO_DIR, TRACKIO_LOGO_DIR
21
+
22
+ __version__ = Path(__file__).parent.joinpath("version.txt").read_text().strip()
23
+
24
+ __all__ = [
25
+ "init",
26
+ "log",
27
+ "finish",
28
+ "show",
29
+ "import_csv",
30
+ "import_tf_events",
31
+ "Image",
32
+ "Table",
33
+ ]
34
+
35
+ Image = TrackioImage
36
+
37
+
38
+ config = {}
39
+
40
+ DEFAULT_THEME = "citrus"
41
+
42
+
43
+ def init(
44
+ project: str,
45
+ name: str | None = None,
46
+ space_id: str | None = None,
47
+ dataset_id: str | None = None,
48
+ config: dict | None = None,
49
+ resume: str = "never",
50
+ settings: Any = None,
51
+ ) -> Run:
52
+ """
53
+ Creates a new Trackio project and returns a [`Run`] object.
54
+
55
+ Args:
56
+ project (`str`):
57
+ The name of the project (can be an existing project to continue tracking or
58
+ a new project to start tracking from scratch).
59
+ name (`str` or `None`, *optional*, defaults to `None`):
60
+ The name of the run (if not provided, a default name will be generated).
61
+ space_id (`str` or `None`, *optional*, defaults to `None`):
62
+ If provided, the project will be logged to a Hugging Face Space instead of
63
+ a local directory. Should be a complete Space name like
64
+ `"username/reponame"` or `"orgname/reponame"`, or just `"reponame"` in which
65
+ case the Space will be created in the currently-logged-in Hugging Face
66
+ user's namespace. If the Space does not exist, it will be created. If the
67
+ Space already exists, the project will be logged to it.
68
+ dataset_id (`str` or `None`, *optional*, defaults to `None`):
69
+ If a `space_id` is provided, a persistent Hugging Face Dataset will be
70
+ created and the metrics will be synced to it every 5 minutes. Specify a
71
+ Dataset with name like `"username/datasetname"` or `"orgname/datasetname"`,
72
+ or `"datasetname"` (uses currently-logged-in Hugging Face user's namespace),
73
+ or `None` (uses the same name as the Space but with the `"_dataset"`
74
+ suffix). If the Dataset does not exist, it will be created. If the Dataset
75
+ already exists, the project will be appended to it.
76
+ config (`dict` or `None`, *optional*, defaults to `None`):
77
+ A dictionary of configuration options. Provided for compatibility with
78
+ `wandb.init()`.
79
+ resume (`str`, *optional*, defaults to `"never"`):
80
+ Controls how to handle resuming a run. Can be one of:
81
+
82
+ - `"must"`: Must resume the run with the given name, raises error if run
83
+ doesn't exist
84
+ - `"allow"`: Resume the run if it exists, otherwise create a new run
85
+ - `"never"`: Never resume a run, always create a new one
86
+ settings (`Any`, *optional*, defaults to `None`):
87
+ Not used. Provided for compatibility with `wandb.init()`.
88
+
89
+ Returns:
90
+ `Run`: A [`Run`] object that can be used to log metrics and finish the run.
91
+ """
92
+ if settings is not None:
93
+ warnings.warn(
94
+ "* Warning: settings is not used. Provided for compatibility with wandb.init(). Please create an issue at: https://github.com/gradio-app/trackio/issues if you need a specific feature implemented."
95
+ )
96
+
97
+ if space_id is None and dataset_id is not None:
98
+ raise ValueError("Must provide a `space_id` when `dataset_id` is provided.")
99
+ space_id, dataset_id = utils.preprocess_space_and_dataset_ids(space_id, dataset_id)
100
+ url = context_vars.current_server.get()
101
+
102
+ if url is None:
103
+ if space_id is None:
104
+ _, url, _ = demo.launch(
105
+ show_api=False,
106
+ inline=False,
107
+ quiet=True,
108
+ prevent_thread_lock=True,
109
+ show_error=True,
110
+ )
111
+ else:
112
+ url = space_id
113
+ context_vars.current_server.set(url)
114
+
115
+ if (
116
+ context_vars.current_project.get() is None
117
+ or context_vars.current_project.get() != project
118
+ ):
119
+ print(f"* Trackio project initialized: {project}")
120
+
121
+ if dataset_id is not None:
122
+ os.environ["TRACKIO_DATASET_ID"] = dataset_id
123
+ print(
124
+ f"* Trackio metrics will be synced to Hugging Face Dataset: {dataset_id}"
125
+ )
126
+ if space_id is None:
127
+ print(f"* Trackio metrics logged to: {TRACKIO_DIR}")
128
+ utils.print_dashboard_instructions(project)
129
+ else:
130
+ deploy.create_space_if_not_exists(space_id, dataset_id)
131
+ print(
132
+ f"* View dashboard by going to: {deploy.SPACE_URL.format(space_id=space_id)}"
133
+ )
134
+ context_vars.current_project.set(project)
135
+
136
+ client = None
137
+ if not space_id:
138
+ client = Client(url, verbose=False)
139
+
140
+ if resume == "must":
141
+ if name is None:
142
+ raise ValueError("Must provide a run name when resume='must'")
143
+ if name not in SQLiteStorage.get_runs(project):
144
+ raise ValueError(f"Run '{name}' does not exist in project '{project}'")
145
+ elif resume == "allow":
146
+ if name is not None and name in SQLiteStorage.get_runs(project):
147
+ print(f"* Resuming existing run: {name}")
148
+ elif resume == "never":
149
+ if name is not None and name in SQLiteStorage.get_runs(project):
150
+ name = None
151
+ else:
152
+ raise ValueError("resume must be one of: 'must', 'allow', or 'never'")
153
+
154
+ run = Run(
155
+ url=url,
156
+ project=project,
157
+ client=client,
158
+ name=name,
159
+ config=config,
160
+ space_id=space_id,
161
+ )
162
+ context_vars.current_run.set(run)
163
+ globals()["config"] = run.config
164
+ return run
165
+
166
+
167
+ def log(metrics: dict, step: int | None = None) -> None:
168
+ """
169
+ Logs metrics to the current run.
170
+
171
+ Args:
172
+ metrics (`dict`):
173
+ A dictionary of metrics to log.
174
+ step (`int` or `None`, *optional*, defaults to `None`):
175
+ The step number. If not provided, the step will be incremented
176
+ automatically.
177
+ """
178
+ run = context_vars.current_run.get()
179
+ if run is None:
180
+ raise RuntimeError("Call trackio.init() before trackio.log().")
181
+ run.log(
182
+ metrics=metrics,
183
+ step=step,
184
+ )
185
+
186
+
187
+ def finish():
188
+ """
189
+ Finishes the current run.
190
+ """
191
+ run = context_vars.current_run.get()
192
+ if run is None:
193
+ raise RuntimeError("Call trackio.init() before trackio.finish().")
194
+ run.finish()
195
+
196
+
197
+ def show(project: str | None = None, theme: str | ThemeClass = DEFAULT_THEME):
198
+ """
199
+ Launches the Trackio dashboard.
200
+
201
+ Args:
202
+ project (`str` or `None`, *optional*, defaults to `None`):
203
+ The name of the project whose runs to show. If not provided, all projects
204
+ will be shown and the user can select one.
205
+ theme (`str` or `ThemeClass`, *optional*, defaults to `"citrus"`):
206
+ A Gradio Theme to use for the dashboard instead of the default `"citrus"`,
207
+ can be a built-in theme (e.g. `'soft'`, `'default'`), a theme from the Hub
208
+ (e.g. `"gstaff/xkcd"`), or a custom Theme class.
209
+ """
210
+ if theme != DEFAULT_THEME:
211
+ # TODO: It's a little hacky to reproduce this theme-setting logic from Gradio Blocks,
212
+ # but in Gradio 6.0, the theme will be set in `launch()` instead, which means that we
213
+ # will be able to remove this code.
214
+ if isinstance(theme, str):
215
+ if theme.lower() in BUILT_IN_THEMES:
216
+ theme = BUILT_IN_THEMES[theme.lower()]
217
+ else:
218
+ try:
219
+ theme = ThemeClass.from_hub(theme)
220
+ except Exception as e:
221
+ warnings.warn(f"Cannot load {theme}. Caught Exception: {str(e)}")
222
+ theme = DefaultTheme()
223
+ if not isinstance(theme, ThemeClass):
224
+ warnings.warn("Theme should be a class loaded from gradio.themes")
225
+ theme = DefaultTheme()
226
+ demo.theme: ThemeClass = theme
227
+ demo.theme_css = theme._get_theme_css()
228
+ demo.stylesheets = theme._stylesheets
229
+ theme_hasher = hashlib.sha256()
230
+ theme_hasher.update(demo.theme_css.encode("utf-8"))
231
+ demo.theme_hash = theme_hasher.hexdigest()
232
+
233
+ _, url, share_url = demo.launch(
234
+ show_api=False,
235
+ quiet=True,
236
+ inline=False,
237
+ prevent_thread_lock=True,
238
+ favicon_path=TRACKIO_LOGO_DIR / "trackio_logo_light.png",
239
+ allowed_paths=[TRACKIO_LOGO_DIR],
240
+ )
241
+
242
+ base_url = share_url + "/" if share_url else url
243
+ dashboard_url = base_url + f"?project={project}" if project else base_url
244
+ print(f"* Trackio UI launched at: {dashboard_url}")
245
+ webbrowser.open(dashboard_url)
246
+ utils.block_except_in_notebook()
__pycache__/__init__.cpython-312.pyc ADDED
Binary file (10.9 kB). View file
 
__pycache__/commit_scheduler.cpython-312.pyc ADDED
Binary file (18.8 kB). View file
 
__pycache__/context_vars.cpython-312.pyc ADDED
Binary file (775 Bytes). View file
 
__pycache__/deploy.cpython-312.pyc ADDED
Binary file (6.75 kB). View file
 
__pycache__/dummy_commit_scheduler.cpython-312.pyc ADDED
Binary file (1.03 kB). View file
 
__pycache__/file_storage.cpython-312.pyc ADDED
Binary file (2.78 kB). View file
 
__pycache__/imports.cpython-312.pyc ADDED
Binary file (12.7 kB). View file
 
__pycache__/media.cpython-312.pyc ADDED
Binary file (5.79 kB). View file
 
__pycache__/run.cpython-312.pyc ADDED
Binary file (7.36 kB). View file
 
__pycache__/sqlite_storage.cpython-312.pyc ADDED
Binary file (18.7 kB). View file
 
__pycache__/table.cpython-312.pyc ADDED
Binary file (2.48 kB). View file
 
__pycache__/typehints.cpython-312.pyc ADDED
Binary file (867 Bytes). View file
 
__pycache__/ui.cpython-312.pyc ADDED
Binary file (29.9 kB). View file
 
__pycache__/utils.cpython-312.pyc ADDED
Binary file (15.3 kB). View file
 
assets/trackio_logo_dark.png ADDED
assets/trackio_logo_light.png ADDED
assets/trackio_logo_old.png ADDED

Git LFS Details

  • SHA256: 3922c4d1e465270ad4d8abb12023f3beed5d9f7f338528a4c0ac21dcf358a1c8
  • Pointer size: 131 Bytes
  • Size of remote file: 487 kB
assets/trackio_logo_type_dark.png ADDED
assets/trackio_logo_type_dark_transparent.png ADDED
assets/trackio_logo_type_light.png ADDED
assets/trackio_logo_type_light_transparent.png ADDED
cli.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ from trackio import show
4
+
5
+
6
+ def main():
7
+ parser = argparse.ArgumentParser(description="Trackio CLI")
8
+ subparsers = parser.add_subparsers(dest="command")
9
+
10
+ ui_parser = subparsers.add_parser(
11
+ "show", help="Show the Trackio dashboard UI for a project"
12
+ )
13
+ ui_parser.add_argument(
14
+ "--project", required=False, help="Project name to show in the dashboard"
15
+ )
16
+ ui_parser.add_argument(
17
+ "--theme",
18
+ required=False,
19
+ default="citrus",
20
+ help="A Gradio Theme to use for the dashboard instead of the default 'citrus', can be a built-in theme (e.g. 'soft', 'default'), a theme from the Hub (e.g. 'gstaff/xkcd').",
21
+ )
22
+
23
+ args = parser.parse_args()
24
+
25
+ if args.command == "show":
26
+ show(args.project, args.theme)
27
+ else:
28
+ parser.print_help()
29
+
30
+
31
+ if __name__ == "__main__":
32
+ main()
commit_scheduler.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Originally copied from https://github.com/huggingface/huggingface_hub/blob/d0a948fc2a32ed6e557042a95ef3e4af97ec4a7c/src/huggingface_hub/_commit_scheduler.py
2
+
3
+ import atexit
4
+ import logging
5
+ import os
6
+ import time
7
+ from concurrent.futures import Future
8
+ from dataclasses import dataclass
9
+ from io import SEEK_END, SEEK_SET, BytesIO
10
+ from pathlib import Path
11
+ from threading import Lock, Thread
12
+ from typing import Callable, Dict, List, Optional, Union
13
+
14
+ from huggingface_hub.hf_api import (
15
+ DEFAULT_IGNORE_PATTERNS,
16
+ CommitInfo,
17
+ CommitOperationAdd,
18
+ HfApi,
19
+ )
20
+ from huggingface_hub.utils import filter_repo_objects
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ @dataclass(frozen=True)
26
+ class _FileToUpload:
27
+ """Temporary dataclass to store info about files to upload. Not meant to be used directly."""
28
+
29
+ local_path: Path
30
+ path_in_repo: str
31
+ size_limit: int
32
+ last_modified: float
33
+
34
+
35
+ class CommitScheduler:
36
+ """
37
+ Scheduler to upload a local folder to the Hub at regular intervals (e.g. push to hub every 5 minutes).
38
+
39
+ The recommended way to use the scheduler is to use it as a context manager. This ensures that the scheduler is
40
+ properly stopped and the last commit is triggered when the script ends. The scheduler can also be stopped manually
41
+ with the `stop` method. Checkout the [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#scheduled-uploads)
42
+ to learn more about how to use it.
43
+
44
+ Args:
45
+ repo_id (`str`):
46
+ The id of the repo to commit to.
47
+ folder_path (`str` or `Path`):
48
+ Path to the local folder to upload regularly.
49
+ every (`int` or `float`, *optional*):
50
+ The number of minutes between each commit. Defaults to 5 minutes.
51
+ path_in_repo (`str`, *optional*):
52
+ Relative path of the directory in the repo, for example: `"checkpoints/"`. Defaults to the root folder
53
+ of the repository.
54
+ repo_type (`str`, *optional*):
55
+ The type of the repo to commit to. Defaults to `model`.
56
+ revision (`str`, *optional*):
57
+ The revision of the repo to commit to. Defaults to `main`.
58
+ private (`bool`, *optional*):
59
+ Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists.
60
+ token (`str`, *optional*):
61
+ The token to use to commit to the repo. Defaults to the token saved on the machine.
62
+ allow_patterns (`List[str]` or `str`, *optional*):
63
+ If provided, only files matching at least one pattern are uploaded.
64
+ ignore_patterns (`List[str]` or `str`, *optional*):
65
+ If provided, files matching any of the patterns are not uploaded.
66
+ squash_history (`bool`, *optional*):
67
+ Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is
68
+ useful to avoid degraded performances on the repo when it grows too large.
69
+ hf_api (`HfApi`, *optional*):
70
+ The [`HfApi`] client to use to commit to the Hub. Can be set with custom settings (user agent, token,...).
71
+ on_before_commit (`Callable[[], None]`, *optional*):
72
+ If specified, a function that will be called before the CommitScheduler lists files to create a commit.
73
+
74
+ Example:
75
+ ```py
76
+ >>> from pathlib import Path
77
+ >>> from huggingface_hub import CommitScheduler
78
+
79
+ # Scheduler uploads every 10 minutes
80
+ >>> csv_path = Path("watched_folder/data.csv")
81
+ >>> CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path=csv_path.parent, every=10)
82
+
83
+ >>> with csv_path.open("a") as f:
84
+ ... f.write("first line")
85
+
86
+ # Some time later (...)
87
+ >>> with csv_path.open("a") as f:
88
+ ... f.write("second line")
89
+ ```
90
+
91
+ Example using a context manager:
92
+ ```py
93
+ >>> from pathlib import Path
94
+ >>> from huggingface_hub import CommitScheduler
95
+
96
+ >>> with CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path="watched_folder", every=10) as scheduler:
97
+ ... csv_path = Path("watched_folder/data.csv")
98
+ ... with csv_path.open("a") as f:
99
+ ... f.write("first line")
100
+ ... (...)
101
+ ... with csv_path.open("a") as f:
102
+ ... f.write("second line")
103
+
104
+ # Scheduler is now stopped and last commit have been triggered
105
+ ```
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ *,
111
+ repo_id: str,
112
+ folder_path: Union[str, Path],
113
+ every: Union[int, float] = 5,
114
+ path_in_repo: Optional[str] = None,
115
+ repo_type: Optional[str] = None,
116
+ revision: Optional[str] = None,
117
+ private: Optional[bool] = None,
118
+ token: Optional[str] = None,
119
+ allow_patterns: Optional[Union[List[str], str]] = None,
120
+ ignore_patterns: Optional[Union[List[str], str]] = None,
121
+ squash_history: bool = False,
122
+ hf_api: Optional["HfApi"] = None,
123
+ on_before_commit: Optional[Callable[[], None]] = None,
124
+ ) -> None:
125
+ self.api = hf_api or HfApi(token=token)
126
+ self.on_before_commit = on_before_commit
127
+
128
+ # Folder
129
+ self.folder_path = Path(folder_path).expanduser().resolve()
130
+ self.path_in_repo = path_in_repo or ""
131
+ self.allow_patterns = allow_patterns
132
+
133
+ if ignore_patterns is None:
134
+ ignore_patterns = []
135
+ elif isinstance(ignore_patterns, str):
136
+ ignore_patterns = [ignore_patterns]
137
+ self.ignore_patterns = ignore_patterns + DEFAULT_IGNORE_PATTERNS
138
+
139
+ if self.folder_path.is_file():
140
+ raise ValueError(
141
+ f"'folder_path' must be a directory, not a file: '{self.folder_path}'."
142
+ )
143
+ self.folder_path.mkdir(parents=True, exist_ok=True)
144
+
145
+ # Repository
146
+ repo_url = self.api.create_repo(
147
+ repo_id=repo_id, private=private, repo_type=repo_type, exist_ok=True
148
+ )
149
+ self.repo_id = repo_url.repo_id
150
+ self.repo_type = repo_type
151
+ self.revision = revision
152
+ self.token = token
153
+
154
+ self.last_uploaded: Dict[Path, float] = {}
155
+ self.last_push_time: float | None = None
156
+
157
+ if not every > 0:
158
+ raise ValueError(f"'every' must be a positive integer, not '{every}'.")
159
+ self.lock = Lock()
160
+ self.every = every
161
+ self.squash_history = squash_history
162
+
163
+ logger.info(
164
+ f"Scheduled job to push '{self.folder_path}' to '{self.repo_id}' every {self.every} minutes."
165
+ )
166
+ self._scheduler_thread = Thread(target=self._run_scheduler, daemon=True)
167
+ self._scheduler_thread.start()
168
+ atexit.register(self._push_to_hub)
169
+
170
+ self.__stopped = False
171
+
172
+ def stop(self) -> None:
173
+ """Stop the scheduler.
174
+
175
+ A stopped scheduler cannot be restarted. Mostly for tests purposes.
176
+ """
177
+ self.__stopped = True
178
+
179
+ def __enter__(self) -> "CommitScheduler":
180
+ return self
181
+
182
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
183
+ # Upload last changes before exiting
184
+ self.trigger().result()
185
+ self.stop()
186
+ return
187
+
188
+ def _run_scheduler(self) -> None:
189
+ """Dumb thread waiting between each scheduled push to Hub."""
190
+ while True:
191
+ self.last_future = self.trigger()
192
+ time.sleep(self.every * 60)
193
+ if self.__stopped:
194
+ break
195
+
196
+ def trigger(self) -> Future:
197
+ """Trigger a `push_to_hub` and return a future.
198
+
199
+ This method is automatically called every `every` minutes. You can also call it manually to trigger a commit
200
+ immediately, without waiting for the next scheduled commit.
201
+ """
202
+ return self.api.run_as_future(self._push_to_hub)
203
+
204
+ def _push_to_hub(self) -> Optional[CommitInfo]:
205
+ if self.__stopped: # If stopped, already scheduled commits are ignored
206
+ return None
207
+
208
+ logger.info("(Background) scheduled commit triggered.")
209
+ try:
210
+ value = self.push_to_hub()
211
+ if self.squash_history:
212
+ logger.info("(Background) squashing repo history.")
213
+ self.api.super_squash_history(
214
+ repo_id=self.repo_id, repo_type=self.repo_type, branch=self.revision
215
+ )
216
+ return value
217
+ except Exception as e:
218
+ logger.error(
219
+ f"Error while pushing to Hub: {e}"
220
+ ) # Depending on the setup, error might be silenced
221
+ raise
222
+
223
+ def push_to_hub(self) -> Optional[CommitInfo]:
224
+ """
225
+ Push folder to the Hub and return the commit info.
226
+
227
+ <Tip warning={true}>
228
+
229
+ This method is not meant to be called directly. It is run in the background by the scheduler, respecting a
230
+ queue mechanism to avoid concurrent commits. Making a direct call to the method might lead to concurrency
231
+ issues.
232
+
233
+ </Tip>
234
+
235
+ The default behavior of `push_to_hub` is to assume an append-only folder. It lists all files in the folder and
236
+ uploads only changed files. If no changes are found, the method returns without committing anything. If you want
237
+ to change this behavior, you can inherit from [`CommitScheduler`] and override this method. This can be useful
238
+ for example to compress data together in a single file before committing. For more details and examples, check
239
+ out our [integration guide](https://huggingface.co/docs/huggingface_hub/main/en/guides/upload#scheduled-uploads).
240
+ """
241
+ # Check files to upload (with lock)
242
+ with self.lock:
243
+ if self.on_before_commit is not None:
244
+ self.on_before_commit()
245
+
246
+ logger.debug("Listing files to upload for scheduled commit.")
247
+
248
+ # List files from folder (taken from `_prepare_upload_folder_additions`)
249
+ relpath_to_abspath = {
250
+ path.relative_to(self.folder_path).as_posix(): path
251
+ for path in sorted(
252
+ self.folder_path.glob("**/*")
253
+ ) # sorted to be deterministic
254
+ if path.is_file()
255
+ }
256
+ prefix = f"{self.path_in_repo.strip('/')}/" if self.path_in_repo else ""
257
+
258
+ # Filter with pattern + filter out unchanged files + retrieve current file size
259
+ files_to_upload: List[_FileToUpload] = []
260
+ for relpath in filter_repo_objects(
261
+ relpath_to_abspath.keys(),
262
+ allow_patterns=self.allow_patterns,
263
+ ignore_patterns=self.ignore_patterns,
264
+ ):
265
+ local_path = relpath_to_abspath[relpath]
266
+ stat = local_path.stat()
267
+ if (
268
+ self.last_uploaded.get(local_path) is None
269
+ or self.last_uploaded[local_path] != stat.st_mtime
270
+ ):
271
+ files_to_upload.append(
272
+ _FileToUpload(
273
+ local_path=local_path,
274
+ path_in_repo=prefix + relpath,
275
+ size_limit=stat.st_size,
276
+ last_modified=stat.st_mtime,
277
+ )
278
+ )
279
+
280
+ # Return if nothing to upload
281
+ if len(files_to_upload) == 0:
282
+ logger.debug("Dropping schedule commit: no changed file to upload.")
283
+ return None
284
+
285
+ # Convert `_FileToUpload` as `CommitOperationAdd` (=> compute file shas + limit to file size)
286
+ logger.debug("Removing unchanged files since previous scheduled commit.")
287
+ add_operations = [
288
+ CommitOperationAdd(
289
+ # TODO: Cap the file to its current size, even if the user append data to it while a scheduled commit is happening
290
+ # (requires an upstream fix for XET-535: `hf_xet` should support `BinaryIO` for upload)
291
+ path_or_fileobj=file_to_upload.local_path,
292
+ path_in_repo=file_to_upload.path_in_repo,
293
+ )
294
+ for file_to_upload in files_to_upload
295
+ ]
296
+
297
+ # Upload files (append mode expected - no need for lock)
298
+ logger.debug("Uploading files for scheduled commit.")
299
+ commit_info = self.api.create_commit(
300
+ repo_id=self.repo_id,
301
+ repo_type=self.repo_type,
302
+ operations=add_operations,
303
+ commit_message="Scheduled Commit",
304
+ revision=self.revision,
305
+ )
306
+
307
+ for file in files_to_upload:
308
+ self.last_uploaded[file.local_path] = file.last_modified
309
+
310
+ self.last_push_time = time.time()
311
+
312
+ return commit_info
313
+
314
+
315
+ class PartialFileIO(BytesIO):
316
+ """A file-like object that reads only the first part of a file.
317
+
318
+ Useful to upload a file to the Hub when the user might still be appending data to it. Only the first part of the
319
+ file is uploaded (i.e. the part that was available when the filesystem was first scanned).
320
+
321
+ In practice, only used internally by the CommitScheduler to regularly push a folder to the Hub with minimal
322
+ disturbance for the user. The object is passed to `CommitOperationAdd`.
323
+
324
+ Only supports `read`, `tell` and `seek` methods.
325
+
326
+ Args:
327
+ file_path (`str` or `Path`):
328
+ Path to the file to read.
329
+ size_limit (`int`):
330
+ The maximum number of bytes to read from the file. If the file is larger than this, only the first part
331
+ will be read (and uploaded).
332
+ """
333
+
334
+ def __init__(self, file_path: Union[str, Path], size_limit: int) -> None:
335
+ self._file_path = Path(file_path)
336
+ self._file = self._file_path.open("rb")
337
+ self._size_limit = min(size_limit, os.fstat(self._file.fileno()).st_size)
338
+
339
+ def __del__(self) -> None:
340
+ self._file.close()
341
+ return super().__del__()
342
+
343
+ def __repr__(self) -> str:
344
+ return (
345
+ f"<PartialFileIO file_path={self._file_path} size_limit={self._size_limit}>"
346
+ )
347
+
348
+ def __len__(self) -> int:
349
+ return self._size_limit
350
+
351
+ def __getattribute__(self, name: str):
352
+ if name.startswith("_") or name in (
353
+ "read",
354
+ "tell",
355
+ "seek",
356
+ ): # only 3 public methods supported
357
+ return super().__getattribute__(name)
358
+ raise NotImplementedError(f"PartialFileIO does not support '{name}'.")
359
+
360
+ def tell(self) -> int:
361
+ """Return the current file position."""
362
+ return self._file.tell()
363
+
364
+ def seek(self, __offset: int, __whence: int = SEEK_SET) -> int:
365
+ """Change the stream position to the given offset.
366
+
367
+ Behavior is the same as a regular file, except that the position is capped to the size limit.
368
+ """
369
+ if __whence == SEEK_END:
370
+ # SEEK_END => set from the truncated end
371
+ __offset = len(self) + __offset
372
+ __whence = SEEK_SET
373
+
374
+ pos = self._file.seek(__offset, __whence)
375
+ if pos > self._size_limit:
376
+ return self._file.seek(self._size_limit)
377
+ return pos
378
+
379
+ def read(self, __size: Optional[int] = -1) -> bytes:
380
+ """Read at most `__size` bytes from the file.
381
+
382
+ Behavior is the same as a regular file, except that it is capped to the size limit.
383
+ """
384
+ current = self._file.tell()
385
+ if __size is None or __size < 0:
386
+ # Read until file limit
387
+ truncated_size = self._size_limit - current
388
+ else:
389
+ # Read until file limit or __size
390
+ truncated_size = min(__size, self._size_limit - current)
391
+ return self._file.read(truncated_size)
context_vars.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextvars
2
+ from typing import TYPE_CHECKING
3
+
4
+ if TYPE_CHECKING:
5
+ from trackio.run import Run
6
+
7
+ current_run: contextvars.ContextVar["Run | None"] = contextvars.ContextVar(
8
+ "current_run", default=None
9
+ )
10
+ current_project: contextvars.ContextVar[str | None] = contextvars.ContextVar(
11
+ "current_project", default=None
12
+ )
13
+ current_server: contextvars.ContextVar[str | None] = contextvars.ContextVar(
14
+ "current_server", default=None
15
+ )
deploy.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ import time
4
+ from importlib.resources import files
5
+ from pathlib import Path
6
+
7
+ import gradio
8
+ import huggingface_hub
9
+ from gradio_client import Client, handle_file
10
+ from httpx import ReadTimeout
11
+ from huggingface_hub.errors import RepositoryNotFoundError
12
+ from requests import HTTPError
13
+
14
+ from trackio.sqlite_storage import SQLiteStorage
15
+
16
+ SPACE_URL = "https://huggingface.co/spaces/{space_id}"
17
+ PERSISTENT_STORAGE_DIR = "/data/.huggingface/trackio"
18
+
19
+
20
+ def deploy_as_space(
21
+ space_id: str,
22
+ dataset_id: str | None = None,
23
+ ):
24
+ if (
25
+ os.getenv("SYSTEM") == "spaces"
26
+ ): # in case a repo with this function is uploaded to spaces
27
+ return
28
+
29
+ trackio_path = files("trackio")
30
+
31
+ hf_api = huggingface_hub.HfApi()
32
+
33
+ try:
34
+ huggingface_hub.create_repo(
35
+ space_id,
36
+ space_sdk="gradio",
37
+ repo_type="space",
38
+ exist_ok=True,
39
+ )
40
+ except HTTPError as e:
41
+ if e.response.status_code in [401, 403]: # unauthorized or forbidden
42
+ print("Need 'write' access token to create a Spaces repo.")
43
+ huggingface_hub.login(add_to_git_credential=False)
44
+ huggingface_hub.create_repo(
45
+ space_id,
46
+ space_sdk="gradio",
47
+ repo_type="space",
48
+ exist_ok=True,
49
+ )
50
+ else:
51
+ raise ValueError(f"Failed to create Space: {e}")
52
+
53
+ with open(Path(trackio_path, "README.md"), "r") as f:
54
+ readme_content = f.read()
55
+ readme_content = readme_content.replace("{GRADIO_VERSION}", gradio.__version__)
56
+ readme_buffer = io.BytesIO(readme_content.encode("utf-8"))
57
+ hf_api.upload_file(
58
+ path_or_fileobj=readme_buffer,
59
+ path_in_repo="README.md",
60
+ repo_id=space_id,
61
+ repo_type="space",
62
+ )
63
+
64
+ # We can assume pandas, gradio, and huggingface-hub are already installed in a Gradio Space.
65
+ # Make sure necessary dependencies are installed by creating a requirements.txt.
66
+ requirements_content = """
67
+ pyarrow>=21.0
68
+ """
69
+ requirements_buffer = io.BytesIO(requirements_content.encode("utf-8"))
70
+ hf_api.upload_file(
71
+ path_or_fileobj=requirements_buffer,
72
+ path_in_repo="requirements.txt",
73
+ repo_id=space_id,
74
+ repo_type="space",
75
+ )
76
+
77
+ huggingface_hub.utils.disable_progress_bars()
78
+ hf_api.upload_folder(
79
+ repo_id=space_id,
80
+ repo_type="space",
81
+ folder_path=trackio_path,
82
+ ignore_patterns=["README.md"],
83
+ )
84
+
85
+ huggingface_hub.add_space_variable(space_id, "TRACKIO_DIR", PERSISTENT_STORAGE_DIR)
86
+ if hf_token := huggingface_hub.utils.get_token():
87
+ huggingface_hub.add_space_secret(space_id, "HF_TOKEN", hf_token)
88
+ if dataset_id is not None:
89
+ huggingface_hub.add_space_variable(space_id, "TRACKIO_DATASET_ID", dataset_id)
90
+
91
+
92
+ def create_space_if_not_exists(
93
+ space_id: str,
94
+ dataset_id: str | None = None,
95
+ ) -> None:
96
+ """
97
+ Creates a new Hugging Face Space if it does not exist. If a dataset_id is provided, it will be added as a space variable.
98
+
99
+ Args:
100
+ space_id: The ID of the Space to create.
101
+ dataset_id: The ID of the Dataset to add to the Space.
102
+ """
103
+ if "/" not in space_id:
104
+ raise ValueError(
105
+ f"Invalid space ID: {space_id}. Must be in the format: username/reponame or orgname/reponame."
106
+ )
107
+ if dataset_id is not None and "/" not in dataset_id:
108
+ raise ValueError(
109
+ f"Invalid dataset ID: {dataset_id}. Must be in the format: username/datasetname or orgname/datasetname."
110
+ )
111
+ try:
112
+ huggingface_hub.repo_info(space_id, repo_type="space")
113
+ print(f"* Found existing space: {SPACE_URL.format(space_id=space_id)}")
114
+ if dataset_id is not None:
115
+ huggingface_hub.add_space_variable(
116
+ space_id, "TRACKIO_DATASET_ID", dataset_id
117
+ )
118
+ return
119
+ except RepositoryNotFoundError:
120
+ pass
121
+ except HTTPError as e:
122
+ if e.response.status_code in [401, 403]: # unauthorized or forbidden
123
+ print("Need 'write' access token to create a Spaces repo.")
124
+ huggingface_hub.login(add_to_git_credential=False)
125
+ huggingface_hub.add_space_variable(
126
+ space_id, "TRACKIO_DATASET_ID", dataset_id
127
+ )
128
+ else:
129
+ raise ValueError(f"Failed to create Space: {e}")
130
+
131
+ print(f"* Creating new space: {SPACE_URL.format(space_id=space_id)}")
132
+ deploy_as_space(space_id, dataset_id)
133
+
134
+
135
+ def wait_until_space_exists(
136
+ space_id: str,
137
+ ) -> None:
138
+ """
139
+ Blocks the current thread until the space exists.
140
+ May raise a TimeoutError if this takes quite a while.
141
+
142
+ Args:
143
+ space_id: The ID of the Space to wait for.
144
+ """
145
+ delay = 1
146
+ for _ in range(10):
147
+ try:
148
+ Client(space_id, verbose=False)
149
+ return
150
+ except (ReadTimeout, ValueError):
151
+ time.sleep(delay)
152
+ delay = min(delay * 2, 30)
153
+ raise TimeoutError("Waiting for space to exist took longer than expected")
154
+
155
+
156
+ def upload_db_to_space(project: str, space_id: str) -> None:
157
+ """
158
+ Uploads the database of a local Trackio project to a Hugging Face Space.
159
+
160
+ Args:
161
+ project: The name of the project to upload.
162
+ space_id: The ID of the Space to upload to.
163
+ """
164
+ db_path = SQLiteStorage.get_project_db_path(project)
165
+ client = Client(space_id, verbose=False)
166
+ client.predict(
167
+ api_name="/upload_db_to_space",
168
+ project=project,
169
+ uploaded_db=handle_file(db_path),
170
+ hf_token=huggingface_hub.utils.get_token(),
171
+ )
dummy_commit_scheduler.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A dummy object to fit the interface of huggingface_hub's CommitScheduler
2
+ class DummyCommitSchedulerLock:
3
+ def __enter__(self):
4
+ return None
5
+
6
+ def __exit__(self, exception_type, exception_value, exception_traceback):
7
+ pass
8
+
9
+
10
+ class DummyCommitScheduler:
11
+ def __init__(self):
12
+ self.lock = DummyCommitSchedulerLock()
file_storage.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ from PIL import Image as PILImage
4
+
5
+ try: # absolute imports when installed
6
+ from trackio.utils import TRACKIO_DIR
7
+ except ImportError: # relative imports for local execution on Spaces
8
+ from utils import TRACKIO_DIR
9
+
10
+
11
+ class FileStorage:
12
+ @staticmethod
13
+ def get_project_media_path(
14
+ project: str,
15
+ run: str | None = None,
16
+ step: int | None = None,
17
+ filename: str | None = None,
18
+ ) -> Path:
19
+ if filename is not None and step is None:
20
+ raise ValueError("filename requires step")
21
+ if step is not None and run is None:
22
+ raise ValueError("step requires run")
23
+
24
+ path = TRACKIO_DIR / "media" / project
25
+ if run:
26
+ path /= run
27
+ if step is not None:
28
+ path /= str(step)
29
+ if filename:
30
+ path /= filename
31
+ return path
32
+
33
+ @staticmethod
34
+ def init_project_media_path(
35
+ project: str, run: str | None = None, step: int | None = None
36
+ ) -> Path:
37
+ path = FileStorage.get_project_media_path(project, run, step)
38
+ path.mkdir(parents=True, exist_ok=True)
39
+ return path
40
+
41
+ @staticmethod
42
+ def save_image(
43
+ image: PILImage.Image,
44
+ project: str,
45
+ run: str,
46
+ step: int,
47
+ filename: str,
48
+ format: str = "PNG",
49
+ ) -> Path:
50
+ path = FileStorage.init_project_media_path(project, run, step) / filename
51
+ image.save(path, format=format)
52
+ return path
53
+
54
+ @staticmethod
55
+ def get_image(project: str, run: str, step: int, filename: str) -> PILImage.Image:
56
+ path = FileStorage.get_project_media_path(project, run, step, filename)
57
+ if not path.exists():
58
+ raise FileNotFoundError(f"Image file not found: {path}")
59
+ return PILImage.open(path).convert("RGBA")
imports.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+
4
+ import pandas as pd
5
+
6
+ from trackio import deploy, utils
7
+ from trackio.sqlite_storage import SQLiteStorage
8
+
9
+
10
+ def import_csv(
11
+ csv_path: str | Path,
12
+ project: str,
13
+ name: str | None = None,
14
+ space_id: str | None = None,
15
+ dataset_id: str | None = None,
16
+ ) -> None:
17
+ """
18
+ Imports a CSV file into a Trackio project. The CSV file must contain a `"step"`
19
+ column, may optionally contain a `"timestamp"` column, and any other columns will be
20
+ treated as metrics. It should also include a header row with the column names.
21
+
22
+ TODO: call init() and return a Run object so that the user can continue to log metrics to it.
23
+
24
+ Args:
25
+ csv_path (`str` or `Path`):
26
+ The str or Path to the CSV file to import.
27
+ project (`str`):
28
+ The name of the project to import the CSV file into. Must not be an existing
29
+ project.
30
+ name (`str` or `None`, *optional*, defaults to `None`):
31
+ The name of the Run to import the CSV file into. If not provided, a default
32
+ name will be generated.
33
+ name (`str` or `None`, *optional*, defaults to `None`):
34
+ The name of the run (if not provided, a default name will be generated).
35
+ space_id (`str` or `None`, *optional*, defaults to `None`):
36
+ If provided, the project will be logged to a Hugging Face Space instead of a
37
+ local directory. Should be a complete Space name like `"username/reponame"`
38
+ or `"orgname/reponame"`, or just `"reponame"` in which case the Space will
39
+ be created in the currently-logged-in Hugging Face user's namespace. If the
40
+ Space does not exist, it will be created. If the Space already exists, the
41
+ project will be logged to it.
42
+ dataset_id (`str` or `None`, *optional*, defaults to `None`):
43
+ If provided, a persistent Hugging Face Dataset will be created and the
44
+ metrics will be synced to it every 5 minutes. Should be a complete Dataset
45
+ name like `"username/datasetname"` or `"orgname/datasetname"`, or just
46
+ `"datasetname"` in which case the Dataset will be created in the
47
+ currently-logged-in Hugging Face user's namespace. If the Dataset does not
48
+ exist, it will be created. If the Dataset already exists, the project will
49
+ be appended to it. If not provided, the metrics will be logged to a local
50
+ SQLite database, unless a `space_id` is provided, in which case a Dataset
51
+ will be automatically created with the same name as the Space but with the
52
+ `"_dataset"` suffix.
53
+ """
54
+ if SQLiteStorage.get_runs(project):
55
+ raise ValueError(
56
+ f"Project '{project}' already exists. Cannot import CSV into existing project."
57
+ )
58
+
59
+ csv_path = Path(csv_path)
60
+ if not csv_path.exists():
61
+ raise FileNotFoundError(f"CSV file not found: {csv_path}")
62
+
63
+ df = pd.read_csv(csv_path)
64
+ if df.empty:
65
+ raise ValueError("CSV file is empty")
66
+
67
+ column_mapping = utils.simplify_column_names(df.columns.tolist())
68
+ df = df.rename(columns=column_mapping)
69
+
70
+ step_column = None
71
+ for col in df.columns:
72
+ if col.lower() == "step":
73
+ step_column = col
74
+ break
75
+
76
+ if step_column is None:
77
+ raise ValueError("CSV file must contain a 'step' or 'Step' column")
78
+
79
+ if name is None:
80
+ name = csv_path.stem
81
+
82
+ metrics_list = []
83
+ steps = []
84
+ timestamps = []
85
+
86
+ numeric_columns = []
87
+ for column in df.columns:
88
+ if column == step_column:
89
+ continue
90
+ if column == "timestamp":
91
+ continue
92
+
93
+ try:
94
+ pd.to_numeric(df[column], errors="raise")
95
+ numeric_columns.append(column)
96
+ except (ValueError, TypeError):
97
+ continue
98
+
99
+ for _, row in df.iterrows():
100
+ metrics = {}
101
+ for column in numeric_columns:
102
+ value = row[column]
103
+ if bool(pd.notna(value)):
104
+ metrics[column] = float(value)
105
+
106
+ if metrics:
107
+ metrics_list.append(metrics)
108
+ steps.append(int(row[step_column]))
109
+
110
+ if "timestamp" in df.columns and bool(pd.notna(row["timestamp"])):
111
+ timestamps.append(str(row["timestamp"]))
112
+ else:
113
+ timestamps.append("")
114
+
115
+ if metrics_list:
116
+ SQLiteStorage.bulk_log(
117
+ project=project,
118
+ run=name,
119
+ metrics_list=metrics_list,
120
+ steps=steps,
121
+ timestamps=timestamps,
122
+ )
123
+
124
+ print(
125
+ f"* Imported {len(metrics_list)} rows from {csv_path} into project '{project}' as run '{name}'"
126
+ )
127
+ print(f"* Metrics found: {', '.join(metrics_list[0].keys())}")
128
+
129
+ space_id, dataset_id = utils.preprocess_space_and_dataset_ids(space_id, dataset_id)
130
+ if dataset_id is not None:
131
+ os.environ["TRACKIO_DATASET_ID"] = dataset_id
132
+ print(f"* Trackio metrics will be synced to Hugging Face Dataset: {dataset_id}")
133
+
134
+ if space_id is None:
135
+ utils.print_dashboard_instructions(project)
136
+ else:
137
+ deploy.create_space_if_not_exists(space_id, dataset_id)
138
+ deploy.wait_until_space_exists(space_id)
139
+ deploy.upload_db_to_space(project, space_id)
140
+ print(
141
+ f"* View dashboard by going to: {deploy.SPACE_URL.format(space_id=space_id)}"
142
+ )
143
+
144
+
145
+ def import_tf_events(
146
+ log_dir: str | Path,
147
+ project: str,
148
+ name: str | None = None,
149
+ space_id: str | None = None,
150
+ dataset_id: str | None = None,
151
+ ) -> None:
152
+ """
153
+ Imports TensorFlow Events files from a directory into a Trackio project. Each
154
+ subdirectory in the log directory will be imported as a separate run.
155
+
156
+ Args:
157
+ log_dir (`str` or `Path`):
158
+ The str or Path to the directory containing TensorFlow Events files.
159
+ project (`str`):
160
+ The name of the project to import the TensorFlow Events files into. Must not
161
+ be an existing project.
162
+ name (`str` or `None`, *optional*, defaults to `None`):
163
+ The name prefix for runs (if not provided, will use directory names). Each
164
+ subdirectory will create a separate run.
165
+ space_id (`str` or `None`, *optional*, defaults to `None`):
166
+ If provided, the project will be logged to a Hugging Face Space instead of a
167
+ local directory. Should be a complete Space name like `"username/reponame"`
168
+ or `"orgname/reponame"`, or just `"reponame"` in which case the Space will
169
+ be created in the currently-logged-in Hugging Face user's namespace. If the
170
+ Space does not exist, it will be created. If the Space already exists, the
171
+ project will be logged to it.
172
+ dataset_id (`str` or `None`, *optional*, defaults to `None`):
173
+ If provided, a persistent Hugging Face Dataset will be created and the
174
+ metrics will be synced to it every 5 minutes. Should be a complete Dataset
175
+ name like `"username/datasetname"` or `"orgname/datasetname"`, or just
176
+ `"datasetname"` in which case the Dataset will be created in the
177
+ currently-logged-in Hugging Face user's namespace. If the Dataset does not
178
+ exist, it will be created. If the Dataset already exists, the project will
179
+ be appended to it. If not provided, the metrics will be logged to a local
180
+ SQLite database, unless a `space_id` is provided, in which case a Dataset
181
+ will be automatically created with the same name as the Space but with the
182
+ `"_dataset"` suffix.
183
+ """
184
+ try:
185
+ from tbparse import SummaryReader
186
+ except ImportError:
187
+ raise ImportError(
188
+ "The `tbparse` package is not installed but is required for `import_tf_events`. Please install trackio with the `tensorboard` extra: `pip install trackio[tensorboard]`."
189
+ )
190
+
191
+ if SQLiteStorage.get_runs(project):
192
+ raise ValueError(
193
+ f"Project '{project}' already exists. Cannot import TF events into existing project."
194
+ )
195
+
196
+ path = Path(log_dir)
197
+ if not path.exists():
198
+ raise FileNotFoundError(f"TF events directory not found: {path}")
199
+
200
+ # Use tbparse to read all tfevents files in the directory structure
201
+ reader = SummaryReader(str(path), extra_columns={"dir_name"})
202
+ df = reader.scalars
203
+
204
+ if df.empty:
205
+ raise ValueError(f"No TensorFlow events data found in {path}")
206
+
207
+ total_imported = 0
208
+ imported_runs = []
209
+
210
+ # Group by dir_name to create separate runs
211
+ for dir_name, group_df in df.groupby("dir_name"):
212
+ try:
213
+ # Determine run name based on directory name
214
+ if dir_name == "":
215
+ run_name = "main" # For files in the root directory
216
+ else:
217
+ run_name = dir_name # Use directory name
218
+
219
+ if name:
220
+ run_name = f"{name}_{run_name}"
221
+
222
+ if group_df.empty:
223
+ print(f"* Skipping directory {dir_name}: no scalar data found")
224
+ continue
225
+
226
+ metrics_list = []
227
+ steps = []
228
+ timestamps = []
229
+
230
+ for _, row in group_df.iterrows():
231
+ # Convert row values to appropriate types
232
+ tag = str(row["tag"])
233
+ value = float(row["value"])
234
+ step = int(row["step"])
235
+
236
+ metrics = {tag: value}
237
+ metrics_list.append(metrics)
238
+ steps.append(step)
239
+
240
+ # Use wall_time if present, else fallback
241
+ if "wall_time" in group_df.columns and not bool(
242
+ pd.isna(row["wall_time"])
243
+ ):
244
+ timestamps.append(str(row["wall_time"]))
245
+ else:
246
+ timestamps.append("")
247
+
248
+ if metrics_list:
249
+ SQLiteStorage.bulk_log(
250
+ project=project,
251
+ run=str(run_name),
252
+ metrics_list=metrics_list,
253
+ steps=steps,
254
+ timestamps=timestamps,
255
+ )
256
+
257
+ total_imported += len(metrics_list)
258
+ imported_runs.append(run_name)
259
+
260
+ print(
261
+ f"* Imported {len(metrics_list)} scalar events from directory '{dir_name}' as run '{run_name}'"
262
+ )
263
+ print(f"* Metrics in this run: {', '.join(set(group_df['tag']))}")
264
+
265
+ except Exception as e:
266
+ print(f"* Error processing directory {dir_name}: {e}")
267
+ continue
268
+
269
+ if not imported_runs:
270
+ raise ValueError("No valid TensorFlow events data could be imported")
271
+
272
+ print(f"* Total imported events: {total_imported}")
273
+ print(f"* Created runs: {', '.join(imported_runs)}")
274
+
275
+ space_id, dataset_id = utils.preprocess_space_and_dataset_ids(space_id, dataset_id)
276
+ if dataset_id is not None:
277
+ os.environ["TRACKIO_DATASET_ID"] = dataset_id
278
+ print(f"* Trackio metrics will be synced to Hugging Face Dataset: {dataset_id}")
279
+
280
+ if space_id is None:
281
+ utils.print_dashboard_instructions(project)
282
+ else:
283
+ deploy.create_space_if_not_exists(space_id, dataset_id)
284
+ deploy.wait_until_space_exists(space_id)
285
+ deploy.upload_db_to_space(project, space_id)
286
+ print(
287
+ f"* View dashboard by going to: {deploy.SPACE_URL.format(space_id=space_id)}"
288
+ )
media.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+ from pathlib import Path
3
+
4
+ import numpy as np
5
+ from PIL import Image as PILImage
6
+
7
+ try: # absolute imports when installed
8
+ from trackio.file_storage import FileStorage
9
+ from trackio.utils import TRACKIO_DIR
10
+ except ImportError: # relative imports for local execution on Spaces
11
+ from file_storage import FileStorage
12
+ from utils import TRACKIO_DIR
13
+
14
+
15
+ class TrackioImage:
16
+ """
17
+ Creates an image that can be logged with trackio.
18
+
19
+ Demo: fake-training-images
20
+ """
21
+
22
+ TYPE = "trackio.image"
23
+
24
+ def __init__(
25
+ self, value: str | np.ndarray | PILImage.Image, caption: str | None = None
26
+ ):
27
+ """
28
+ Parameters:
29
+ value: A string path to an image, a numpy array, or a PIL Image.
30
+ caption: A string caption for the image.
31
+ """
32
+ self.caption = caption
33
+ self._pil = TrackioImage._as_pil(value)
34
+ self._file_path: Path | None = None
35
+ self._file_format: str | None = None
36
+
37
+ @staticmethod
38
+ def _as_pil(value: str | np.ndarray | PILImage.Image) -> PILImage.Image:
39
+ try:
40
+ if isinstance(value, str):
41
+ return PILImage.open(value).convert("RGBA")
42
+ elif isinstance(value, np.ndarray):
43
+ arr = np.asarray(value).astype("uint8")
44
+ return PILImage.fromarray(arr).convert("RGBA")
45
+ elif isinstance(value, PILImage.Image):
46
+ return value.convert("RGBA")
47
+ except Exception as e:
48
+ raise ValueError(f"Failed to process image data: {value}") from e
49
+
50
+ def _save(self, project: str, run: str, step: int = 0, format: str = "PNG") -> str:
51
+ if not self._file_path:
52
+ # Save image as {TRACKIO_DIR}/media/{project}/{run}/{step}/{uuid}.{ext}
53
+ filename = f"{uuid.uuid4()}.{format.lower()}"
54
+ path = FileStorage.save_image(
55
+ self._pil, project, run, step, filename, format=format
56
+ )
57
+ self._file_path = path.relative_to(TRACKIO_DIR)
58
+ self._file_format = format
59
+ return str(self._file_path)
60
+
61
+ def _get_relative_file_path(self) -> Path | None:
62
+ return self._file_path
63
+
64
+ def _get_absolute_file_path(self) -> Path | None:
65
+ return TRACKIO_DIR / self._file_path
66
+
67
+ def _to_dict(self) -> dict:
68
+ if not self._file_path:
69
+ raise ValueError("Image must be saved to file before serialization")
70
+ return {
71
+ "_type": self.TYPE,
72
+ "file_path": str(self._get_relative_file_path()),
73
+ "file_format": self._file_format,
74
+ "caption": self.caption,
75
+ }
76
+
77
+ @classmethod
78
+ def _from_dict(cls, obj: dict) -> "TrackioImage":
79
+ if not isinstance(obj, dict):
80
+ raise TypeError(f"Expected dict, got {type(obj).__name__}")
81
+ if obj.get("_type") != cls.TYPE:
82
+ raise ValueError(f"Wrong _type: {obj.get('_type')!r}")
83
+
84
+ file_path = obj.get("file_path")
85
+ if not isinstance(file_path, str):
86
+ raise TypeError(
87
+ f"'file_path' must be string, got {type(file_path).__name__}"
88
+ )
89
+
90
+ absolute_path = TRACKIO_DIR / file_path
91
+ try:
92
+ if not absolute_path.is_file():
93
+ raise ValueError(f"Image file not found: {file_path}")
94
+ pil = PILImage.open(absolute_path).convert("RGBA")
95
+ instance = cls(pil, caption=obj.get("caption"))
96
+ instance._file_path = Path(file_path)
97
+ instance._file_format = obj.get("file_format")
98
+ return instance
99
+ except Exception as e:
100
+ raise ValueError(f"Failed to load image from file: {absolute_path}") from e
py.typed ADDED
File without changes
run.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ import time
3
+
4
+ import huggingface_hub
5
+ from gradio_client import Client, handle_file
6
+
7
+ from trackio.media import TrackioImage
8
+ from trackio.sqlite_storage import SQLiteStorage
9
+ from trackio.table import Table
10
+ from trackio.typehints import LogEntry, UploadEntry
11
+ from trackio.utils import RESERVED_KEYS, fibo, generate_readable_name
12
+
13
+ BATCH_SEND_INTERVAL = 0.5
14
+
15
+
16
+ class Run:
17
+ def __init__(
18
+ self,
19
+ url: str,
20
+ project: str,
21
+ client: Client | None,
22
+ name: str | None = None,
23
+ config: dict | None = None,
24
+ space_id: str | None = None,
25
+ ):
26
+ self.url = url
27
+ self.project = project
28
+ self._client_lock = threading.Lock()
29
+ self._client_thread = None
30
+ self._client = client
31
+ self._space_id = space_id
32
+ self.name = name or generate_readable_name(
33
+ SQLiteStorage.get_runs(project), space_id
34
+ )
35
+ self.config = config or {}
36
+ self._queued_logs: list[LogEntry] = []
37
+ self._queued_uploads: list[UploadEntry] = []
38
+ self._stop_flag = threading.Event()
39
+
40
+ self._client_thread = threading.Thread(target=self._init_client_background)
41
+ self._client_thread.daemon = True
42
+ self._client_thread.start()
43
+
44
+ def _batch_sender(self):
45
+ """Send batched logs every BATCH_SEND_INTERVAL."""
46
+ while not self._stop_flag.is_set() or len(self._queued_logs) > 0:
47
+ # If the stop flag has been set, then just quickly send all
48
+ # the logs and exit.
49
+ if not self._stop_flag.is_set():
50
+ time.sleep(BATCH_SEND_INTERVAL)
51
+
52
+ with self._client_lock:
53
+ if self._queued_logs and self._client is not None:
54
+ logs_to_send = self._queued_logs.copy()
55
+ self._queued_logs.clear()
56
+ self._client.predict(
57
+ api_name="/bulk_log",
58
+ logs=logs_to_send,
59
+ hf_token=huggingface_hub.utils.get_token(),
60
+ )
61
+ if self._queued_uploads and self._client is not None:
62
+ uploads_to_send = self._queued_uploads.copy()
63
+ self._queued_uploads.clear()
64
+ self._client.predict(
65
+ api_name="/bulk_upload_media",
66
+ uploads=uploads_to_send,
67
+ hf_token=huggingface_hub.utils.get_token(),
68
+ )
69
+
70
+ def _init_client_background(self):
71
+ if self._client is None:
72
+ fib = fibo()
73
+ for sleep_coefficient in fib:
74
+ try:
75
+ client = Client(self.url, verbose=False)
76
+
77
+ with self._client_lock:
78
+ self._client = client
79
+ break
80
+ except Exception:
81
+ pass
82
+ if sleep_coefficient is not None:
83
+ time.sleep(0.1 * sleep_coefficient)
84
+
85
+ self._batch_sender()
86
+
87
+ def _process_media(self, metrics, step: int | None) -> dict:
88
+ """
89
+ Serialize media in metrics and upload to space if needed.
90
+ """
91
+ serializable_metrics = {}
92
+ if not step:
93
+ step = 0
94
+ for key, value in metrics.items():
95
+ if isinstance(value, TrackioImage):
96
+ value._save(self.project, self.name, step)
97
+ serializable_metrics[key] = value._to_dict()
98
+ if self._space_id:
99
+ # Upload local media when deploying to space
100
+ upload_entry: UploadEntry = {
101
+ "project": self.project,
102
+ "run": self.name,
103
+ "step": step,
104
+ "uploaded_file": handle_file(value._get_absolute_file_path()),
105
+ }
106
+ with self._client_lock:
107
+ self._queued_uploads.append(upload_entry)
108
+ else:
109
+ serializable_metrics[key] = value
110
+ return serializable_metrics
111
+
112
+ @staticmethod
113
+ def _replace_tables(metrics):
114
+ for k, v in metrics.items():
115
+ if isinstance(v, Table):
116
+ metrics[k] = v._to_dict()
117
+
118
+ def log(self, metrics: dict, step: int | None = None):
119
+ for k in metrics.keys():
120
+ if k in RESERVED_KEYS or k.startswith("__"):
121
+ raise ValueError(
122
+ f"Please do not use this reserved key as a metric: {k}"
123
+ )
124
+ Run._replace_tables(metrics)
125
+
126
+ metrics = self._process_media(metrics, step)
127
+ log_entry: LogEntry = {
128
+ "project": self.project,
129
+ "run": self.name,
130
+ "metrics": metrics,
131
+ "step": step,
132
+ }
133
+
134
+ with self._client_lock:
135
+ self._queued_logs.append(log_entry)
136
+
137
+ def finish(self):
138
+ """Cleanup when run is finished."""
139
+ self._stop_flag.set()
140
+
141
+ # Wait for the batch sender to finish before joining the client thread.
142
+ time.sleep(2 * BATCH_SEND_INTERVAL)
143
+
144
+ if self._client_thread is not None:
145
+ print(
146
+ f"* Run finished. Uploading logs to Trackio Space: {self.url} (please wait...)"
147
+ )
148
+ self._client_thread.join()
sqlite_storage.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import sqlite3
4
+ from datetime import datetime
5
+ from pathlib import Path
6
+ from threading import Lock
7
+
8
+ import huggingface_hub as hf
9
+ import pandas as pd
10
+
11
+ try: # absolute imports when installed
12
+ from trackio.commit_scheduler import CommitScheduler
13
+ from trackio.dummy_commit_scheduler import DummyCommitScheduler
14
+ from trackio.utils import TRACKIO_DIR
15
+ except Exception: # relative imports for local execution on Spaces
16
+ from commit_scheduler import CommitScheduler
17
+ from dummy_commit_scheduler import DummyCommitScheduler
18
+ from utils import TRACKIO_DIR
19
+
20
+
21
+ class SQLiteStorage:
22
+ _dataset_import_attempted = False
23
+ _current_scheduler: CommitScheduler | DummyCommitScheduler | None = None
24
+ _scheduler_lock = Lock()
25
+
26
+ @staticmethod
27
+ def _get_connection(db_path: Path) -> sqlite3.Connection:
28
+ conn = sqlite3.connect(str(db_path))
29
+ conn.row_factory = sqlite3.Row
30
+ return conn
31
+
32
+ @staticmethod
33
+ def get_project_db_filename(project: str) -> Path:
34
+ """Get the database filename for a specific project."""
35
+ safe_project_name = "".join(
36
+ c for c in project if c.isalnum() or c in ("-", "_")
37
+ ).rstrip()
38
+ if not safe_project_name:
39
+ safe_project_name = "default"
40
+ return f"{safe_project_name}.db"
41
+
42
+ @staticmethod
43
+ def get_project_db_path(project: str) -> Path:
44
+ """Get the database path for a specific project."""
45
+ filename = SQLiteStorage.get_project_db_filename(project)
46
+ return TRACKIO_DIR / filename
47
+
48
+ @staticmethod
49
+ def init_db(project: str) -> Path:
50
+ """
51
+ Initialize the SQLite database with required tables.
52
+ If there is a dataset ID provided, copies from that dataset instead.
53
+ Returns the database path.
54
+ """
55
+ db_path = SQLiteStorage.get_project_db_path(project)
56
+ db_path.parent.mkdir(parents=True, exist_ok=True)
57
+ with SQLiteStorage.get_scheduler().lock:
58
+ with sqlite3.connect(db_path) as conn:
59
+ cursor = conn.cursor()
60
+ cursor.execute("""
61
+ CREATE TABLE IF NOT EXISTS metrics (
62
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
63
+ timestamp TEXT NOT NULL,
64
+ run_name TEXT NOT NULL,
65
+ step INTEGER NOT NULL,
66
+ metrics TEXT NOT NULL
67
+ )
68
+ """)
69
+ cursor.execute(
70
+ """
71
+ CREATE INDEX IF NOT EXISTS idx_metrics_run_step
72
+ ON metrics(run_name, step)
73
+ """
74
+ )
75
+ conn.commit()
76
+ return db_path
77
+
78
+ @staticmethod
79
+ def export_to_parquet():
80
+ """
81
+ Exports all projects' DB files as Parquet under the same path but with extension ".parquet".
82
+ """
83
+ # don't attempt to export (potentially wrong/blank) data before importing for the first time
84
+ if not SQLiteStorage._dataset_import_attempted:
85
+ return
86
+ all_paths = os.listdir(TRACKIO_DIR)
87
+ db_paths = [f for f in all_paths if f.endswith(".db")]
88
+ for db_path in db_paths:
89
+ db_path = TRACKIO_DIR / db_path
90
+ parquet_path = db_path.with_suffix(".parquet")
91
+ if (not parquet_path.exists()) or (
92
+ db_path.stat().st_mtime > parquet_path.stat().st_mtime
93
+ ):
94
+ with sqlite3.connect(db_path) as conn:
95
+ df = pd.read_sql("SELECT * from metrics", conn)
96
+ # break out the single JSON metrics column into individual columns
97
+ metrics = df["metrics"].copy()
98
+ metrics = pd.DataFrame(
99
+ metrics.apply(json.loads).values.tolist(), index=df.index
100
+ )
101
+ del df["metrics"]
102
+ for col in metrics.columns:
103
+ df[col] = metrics[col]
104
+ df.to_parquet(parquet_path)
105
+
106
+ @staticmethod
107
+ def import_from_parquet():
108
+ """
109
+ Imports to all DB files that have matching files under the same path but with extension ".parquet".
110
+ """
111
+ all_paths = os.listdir(TRACKIO_DIR)
112
+ parquet_paths = [f for f in all_paths if f.endswith(".parquet")]
113
+ for parquet_path in parquet_paths:
114
+ parquet_path = TRACKIO_DIR / parquet_path
115
+ db_path = parquet_path.with_suffix(".db")
116
+ df = pd.read_parquet(parquet_path)
117
+ with sqlite3.connect(db_path) as conn:
118
+ # fix up df to have a single JSON metrics column
119
+ if "metrics" not in df.columns:
120
+ # separate other columns from metrics
121
+ metrics = df.copy()
122
+ other_cols = ["id", "timestamp", "run_name", "step"]
123
+ df = df[other_cols]
124
+ for col in other_cols:
125
+ del metrics[col]
126
+ # combine them all into a single metrics col
127
+ metrics = json.loads(metrics.to_json(orient="records"))
128
+ df["metrics"] = [json.dumps(row) for row in metrics]
129
+ df.to_sql("metrics", conn, if_exists="replace", index=False)
130
+
131
+ @staticmethod
132
+ def get_scheduler():
133
+ """
134
+ Get the scheduler for the database based on the environment variables.
135
+ This applies to both local and Spaces.
136
+ """
137
+ with SQLiteStorage._scheduler_lock:
138
+ if SQLiteStorage._current_scheduler is not None:
139
+ return SQLiteStorage._current_scheduler
140
+ hf_token = os.environ.get("HF_TOKEN")
141
+ dataset_id = os.environ.get("TRACKIO_DATASET_ID")
142
+ space_repo_name = os.environ.get("SPACE_REPO_NAME")
143
+ if dataset_id is None or space_repo_name is None:
144
+ scheduler = DummyCommitScheduler()
145
+ else:
146
+ scheduler = CommitScheduler(
147
+ repo_id=dataset_id,
148
+ repo_type="dataset",
149
+ folder_path=TRACKIO_DIR,
150
+ private=True,
151
+ allow_patterns=["*.parquet", "media/**/*"],
152
+ squash_history=True,
153
+ token=hf_token,
154
+ on_before_commit=SQLiteStorage.export_to_parquet,
155
+ )
156
+ SQLiteStorage._current_scheduler = scheduler
157
+ return scheduler
158
+
159
+ @staticmethod
160
+ def log(project: str, run: str, metrics: dict, step: int | None = None):
161
+ """
162
+ Safely log metrics to the database. Before logging, this method will ensure the database exists
163
+ and is set up with the correct tables. It also uses the scheduler to lock the database so
164
+ that there is no race condition when logging / syncing to the Hugging Face Dataset.
165
+ """
166
+ db_path = SQLiteStorage.init_db(project)
167
+
168
+ with SQLiteStorage.get_scheduler().lock:
169
+ with SQLiteStorage._get_connection(db_path) as conn:
170
+ cursor = conn.cursor()
171
+
172
+ cursor.execute(
173
+ """
174
+ SELECT MAX(step)
175
+ FROM metrics
176
+ WHERE run_name = ?
177
+ """,
178
+ (run,),
179
+ )
180
+ last_step = cursor.fetchone()[0]
181
+ if step is None:
182
+ current_step = 0 if last_step is None else last_step + 1
183
+ else:
184
+ current_step = step
185
+
186
+ current_timestamp = datetime.now().isoformat()
187
+
188
+ cursor.execute(
189
+ """
190
+ INSERT INTO metrics
191
+ (timestamp, run_name, step, metrics)
192
+ VALUES (?, ?, ?, ?)
193
+ """,
194
+ (
195
+ current_timestamp,
196
+ run,
197
+ current_step,
198
+ json.dumps(metrics),
199
+ ),
200
+ )
201
+ conn.commit()
202
+
203
+ @staticmethod
204
+ def bulk_log(
205
+ project: str,
206
+ run: str,
207
+ metrics_list: list[dict],
208
+ steps: list[int] | None = None,
209
+ timestamps: list[str] | None = None,
210
+ ):
211
+ """Bulk log metrics to the database with specified steps and timestamps."""
212
+ if not metrics_list:
213
+ return
214
+
215
+ if timestamps is None:
216
+ timestamps = [datetime.now().isoformat()] * len(metrics_list)
217
+
218
+ db_path = SQLiteStorage.init_db(project)
219
+ with SQLiteStorage.get_scheduler().lock:
220
+ with SQLiteStorage._get_connection(db_path) as conn:
221
+ cursor = conn.cursor()
222
+
223
+ if steps is None:
224
+ steps = list(range(len(metrics_list)))
225
+ elif any(s is None for s in steps):
226
+ cursor.execute(
227
+ "SELECT MAX(step) FROM metrics WHERE run_name = ?", (run,)
228
+ )
229
+ last_step = cursor.fetchone()[0]
230
+ current_step = 0 if last_step is None else last_step + 1
231
+
232
+ processed_steps = []
233
+ for step in steps:
234
+ if step is None:
235
+ processed_steps.append(current_step)
236
+ current_step += 1
237
+ else:
238
+ processed_steps.append(step)
239
+ steps = processed_steps
240
+
241
+ if len(metrics_list) != len(steps) or len(metrics_list) != len(
242
+ timestamps
243
+ ):
244
+ raise ValueError(
245
+ "metrics_list, steps, and timestamps must have the same length"
246
+ )
247
+
248
+ data = []
249
+ for i, metrics in enumerate(metrics_list):
250
+ data.append(
251
+ (
252
+ timestamps[i],
253
+ run,
254
+ steps[i],
255
+ json.dumps(metrics),
256
+ )
257
+ )
258
+
259
+ cursor.executemany(
260
+ """
261
+ INSERT INTO metrics
262
+ (timestamp, run_name, step, metrics)
263
+ VALUES (?, ?, ?, ?)
264
+ """,
265
+ data,
266
+ )
267
+ conn.commit()
268
+
269
+ @staticmethod
270
+ def get_logs(project: str, run: str) -> list[dict]:
271
+ """Retrieve logs for a specific run. Logs include the step count (int) and the timestamp (datetime object)."""
272
+ db_path = SQLiteStorage.get_project_db_path(project)
273
+ if not db_path.exists():
274
+ return []
275
+
276
+ with SQLiteStorage._get_connection(db_path) as conn:
277
+ cursor = conn.cursor()
278
+ cursor.execute(
279
+ """
280
+ SELECT timestamp, step, metrics
281
+ FROM metrics
282
+ WHERE run_name = ?
283
+ ORDER BY timestamp
284
+ """,
285
+ (run,),
286
+ )
287
+
288
+ rows = cursor.fetchall()
289
+ results = []
290
+ for row in rows:
291
+ metrics = json.loads(row["metrics"])
292
+ metrics["timestamp"] = row["timestamp"]
293
+ metrics["step"] = row["step"]
294
+ results.append(metrics)
295
+ return results
296
+
297
+ @staticmethod
298
+ def load_from_dataset():
299
+ dataset_id = os.environ.get("TRACKIO_DATASET_ID")
300
+ space_repo_name = os.environ.get("SPACE_REPO_NAME")
301
+ if dataset_id is not None and space_repo_name is not None:
302
+ hfapi = hf.HfApi()
303
+ updated = False
304
+ if not TRACKIO_DIR.exists():
305
+ TRACKIO_DIR.mkdir(parents=True, exist_ok=True)
306
+ with SQLiteStorage.get_scheduler().lock:
307
+ try:
308
+ files = hfapi.list_repo_files(dataset_id, repo_type="dataset")
309
+ for file in files:
310
+ # Download parquet and media assets
311
+ if not (file.endswith(".parquet") or file.startswith("media/")):
312
+ continue
313
+ hf.hf_hub_download(
314
+ dataset_id, file, repo_type="dataset", local_dir=TRACKIO_DIR
315
+ )
316
+ updated = True
317
+ except hf.errors.EntryNotFoundError:
318
+ pass
319
+ except hf.errors.RepositoryNotFoundError:
320
+ pass
321
+ if updated:
322
+ SQLiteStorage.import_from_parquet()
323
+ SQLiteStorage._dataset_import_attempted = True
324
+
325
+ @staticmethod
326
+ def get_projects() -> list[str]:
327
+ """
328
+ Get list of all projects by scanning the database files in the trackio directory.
329
+ """
330
+ if not SQLiteStorage._dataset_import_attempted:
331
+ SQLiteStorage.load_from_dataset()
332
+
333
+ projects: set[str] = set()
334
+ if not TRACKIO_DIR.exists():
335
+ return []
336
+
337
+ for db_file in TRACKIO_DIR.glob("*.db"):
338
+ project_name = db_file.stem
339
+ projects.add(project_name)
340
+ return sorted(projects)
341
+
342
+ @staticmethod
343
+ def get_runs(project: str) -> list[str]:
344
+ """Get list of all runs for a project."""
345
+ db_path = SQLiteStorage.get_project_db_path(project)
346
+ if not db_path.exists():
347
+ return []
348
+
349
+ with SQLiteStorage._get_connection(db_path) as conn:
350
+ cursor = conn.cursor()
351
+ cursor.execute(
352
+ "SELECT DISTINCT run_name FROM metrics",
353
+ )
354
+ return [row[0] for row in cursor.fetchall()]
355
+
356
+ @staticmethod
357
+ def get_max_steps_for_runs(project: str, runs: list[str]) -> dict[str, int]:
358
+ """Efficiently get the maximum step for multiple runs in a single query."""
359
+ db_path = SQLiteStorage.get_project_db_path(project)
360
+ if not db_path.exists():
361
+ return {run: 0 for run in runs}
362
+
363
+ with SQLiteStorage._get_connection(db_path) as conn:
364
+ cursor = conn.cursor()
365
+ placeholders = ",".join("?" * len(runs))
366
+ cursor.execute(
367
+ f"""
368
+ SELECT run_name, MAX(step) as max_step
369
+ FROM metrics
370
+ WHERE run_name IN ({placeholders})
371
+ GROUP BY run_name
372
+ """,
373
+ runs,
374
+ )
375
+
376
+ results = {run: 0 for run in runs} # Default to 0 for runs with no data
377
+ for row in cursor.fetchall():
378
+ results[row["run_name"]] = row["max_step"]
379
+
380
+ return results
381
+
382
+ def finish(self):
383
+ """Cleanup when run is finished."""
384
+ pass
table.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Literal, Optional, Union
2
+
3
+ from pandas import DataFrame
4
+
5
+
6
+ class Table:
7
+ """
8
+ Initializes a Table object.
9
+
10
+ Args:
11
+ columns (`list[str]`, *optional*, defaults to `None`):
12
+ Names of the columns in the table. Optional if `data` is provided. Not
13
+ expected if `dataframe` is provided. Currently ignored.
14
+ data (`list[list[Any]]`, *optional*, defaults to `None`):
15
+ 2D row-oriented array of values.
16
+ dataframe (`pandas.`DataFrame``, *optional*, defaults to `None`):
17
+ DataFrame object used to create the table. When set, `data` and `columns`
18
+ arguments are ignored.
19
+ rows (`list[list[any]]`, *optional*, defaults to `None`):
20
+ Currently ignored.
21
+ optional (`bool` or `list[bool]`, *optional*, defaults to `True`):
22
+ Currently ignored.
23
+ allow_mixed_types (`bool`, *optional*, defaults to `False`):
24
+ Currently ignored.
25
+ log_mode: (`Literal["IMMUTABLE", "MUTABLE", "INCREMENTAL"]` or `None`, *optional*, defaults to `"IMMUTABLE"`):
26
+ Currently ignored.
27
+ """
28
+
29
+ TYPE = "trackio.table"
30
+
31
+ def __init__(
32
+ self,
33
+ columns: Optional[list[str]] = None,
34
+ data: Optional[list[list[Any]]] = None,
35
+ dataframe: Optional[DataFrame] = None,
36
+ rows: Optional[list[list[Any]]] = None,
37
+ optional: Union[bool, list[bool]] = True,
38
+ allow_mixed_types: bool = False,
39
+ log_mode: Optional[
40
+ Literal["IMMUTABLE", "MUTABLE", "INCREMENTAL"]
41
+ ] = "IMMUTABLE",
42
+ ):
43
+ # TODO: implement support for columns, dtype, optional, allow_mixed_types, and log_mode.
44
+ # for now (like `rows`) they are included for API compat but don't do anything.
45
+
46
+ if dataframe is None:
47
+ self.data = data
48
+ else:
49
+ self.data = dataframe.to_dict(orient="records")
50
+
51
+ def _to_dict(self):
52
+ return {
53
+ "_type": self.TYPE,
54
+ "_value": self.data,
55
+ }
typehints.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from gradio import FileData
4
+
5
+
6
+ class LogEntry(TypedDict):
7
+ project: str
8
+ run: str
9
+ metrics: dict[str, Any]
10
+ step: int | None
11
+
12
+
13
+ class UploadEntry(TypedDict):
14
+ project: str
15
+ run: str
16
+ step: int | None
17
+ uploaded_file: FileData
ui.py ADDED
@@ -0,0 +1,746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import shutil
4
+ from typing import Any
5
+
6
+ import gradio as gr
7
+ import huggingface_hub as hf
8
+ import numpy as np
9
+ import pandas as pd
10
+
11
+ HfApi = hf.HfApi()
12
+
13
+ try:
14
+ import trackio.utils as utils
15
+ from trackio.file_storage import FileStorage
16
+ from trackio.media import TrackioImage
17
+ from trackio.sqlite_storage import SQLiteStorage
18
+ from trackio.table import Table
19
+ from trackio.typehints import LogEntry, UploadEntry
20
+ except: # noqa: E722
21
+ import utils
22
+ from file_storage import FileStorage
23
+ from media import TrackioImage
24
+ from sqlite_storage import SQLiteStorage
25
+ from table import Table
26
+ from typehints import LogEntry, UploadEntry
27
+
28
+
29
+ def get_project_info() -> str | None:
30
+ dataset_id = os.environ.get("TRACKIO_DATASET_ID")
31
+ space_id = os.environ.get("SPACE_ID")
32
+ persistent_storage_enabled = os.environ.get(
33
+ "PERSISTANT_STORAGE_ENABLED"
34
+ ) # Space env name has a typo
35
+ if persistent_storage_enabled:
36
+ return "&#10024; Persistent Storage is enabled, logs are stored directly in this Space."
37
+ if dataset_id:
38
+ sync_status = utils.get_sync_status(SQLiteStorage.get_scheduler())
39
+ upgrade_message = f"New changes are synced every 5 min <span class='info-container'><input type='checkbox' class='info-checkbox' id='upgrade-info'><label for='upgrade-info' class='info-icon'>&#9432;</label><span class='info-expandable'> To avoid losing data between syncs, <a href='https://huggingface.co/spaces/{space_id}/settings' class='accent-link'>click here</a> to open this Space's settings and add Persistent Storage.</span></span>"
40
+ if sync_status is not None:
41
+ info = f"&#x21bb; Backed up {sync_status} min ago to <a href='https://huggingface.co/datasets/{dataset_id}' target='_blank' class='accent-link'>{dataset_id}</a> | {upgrade_message}"
42
+ else:
43
+ info = f"&#x21bb; Not backed up yet to <a href='https://huggingface.co/datasets/{dataset_id}' target='_blank' class='accent-link'>{dataset_id}</a> | {upgrade_message}"
44
+ return info
45
+ return None
46
+
47
+
48
+ def get_projects(request: gr.Request):
49
+ projects = SQLiteStorage.get_projects()
50
+ if project := request.query_params.get("project"):
51
+ interactive = False
52
+ else:
53
+ interactive = True
54
+ project = projects[0] if projects else None
55
+
56
+ return gr.Dropdown(
57
+ label="Project",
58
+ choices=projects,
59
+ value=project,
60
+ allow_custom_value=True,
61
+ interactive=interactive,
62
+ info=get_project_info(),
63
+ )
64
+
65
+
66
+ def get_runs(project) -> list[str]:
67
+ if not project:
68
+ return []
69
+ return SQLiteStorage.get_runs(project)
70
+
71
+
72
+ def get_available_metrics(project: str, runs: list[str]) -> list[str]:
73
+ """Get all available metrics across all runs for x-axis selection."""
74
+ if not project or not runs:
75
+ return ["step", "time"]
76
+
77
+ all_metrics = set()
78
+ for run in runs:
79
+ metrics = SQLiteStorage.get_logs(project, run)
80
+ if metrics:
81
+ df = pd.DataFrame(metrics)
82
+ numeric_cols = df.select_dtypes(include="number").columns
83
+ numeric_cols = [c for c in numeric_cols if c not in utils.RESERVED_KEYS]
84
+ all_metrics.update(numeric_cols)
85
+
86
+ all_metrics.add("step")
87
+ all_metrics.add("time")
88
+
89
+ sorted_metrics = utils.sort_metrics_by_prefix(list(all_metrics))
90
+
91
+ result = ["step", "time"]
92
+ for metric in sorted_metrics:
93
+ if metric not in result:
94
+ result.append(metric)
95
+
96
+ return result
97
+
98
+
99
+ def extract_images(logs: list[dict]) -> dict[str, list[TrackioImage]]:
100
+ image_data = {}
101
+ logs = sorted(logs, key=lambda x: x.get("step", 0))
102
+ for log in logs:
103
+ for key, value in log.items():
104
+ if isinstance(value, dict) and value.get("_type") == TrackioImage.TYPE:
105
+ if key not in image_data:
106
+ image_data[key] = []
107
+ try:
108
+ image_data[key].append(TrackioImage._from_dict(value))
109
+ except Exception as e:
110
+ print(f"Image not currently available: {key}: {e}")
111
+ return image_data
112
+
113
+
114
+ def load_run_data(
115
+ project: str | None,
116
+ run: str | None,
117
+ smoothing: bool,
118
+ x_axis: str,
119
+ log_scale: bool = False,
120
+ ) -> tuple[pd.DataFrame, dict]:
121
+ if not project or not run:
122
+ return None, None
123
+
124
+ logs = SQLiteStorage.get_logs(project, run)
125
+ if not logs:
126
+ return None, None
127
+
128
+ images = extract_images(logs)
129
+ df = pd.DataFrame(logs)
130
+
131
+ if "step" not in df.columns:
132
+ df["step"] = range(len(df))
133
+
134
+ if x_axis == "time" and "timestamp" in df.columns:
135
+ df["timestamp"] = pd.to_datetime(df["timestamp"])
136
+ first_timestamp = df["timestamp"].min()
137
+ df["time"] = (df["timestamp"] - first_timestamp).dt.total_seconds()
138
+ x_column = "time"
139
+ elif x_axis == "step":
140
+ x_column = "step"
141
+ else:
142
+ x_column = x_axis
143
+
144
+ if log_scale and x_column in df.columns:
145
+ x_vals = df[x_column]
146
+ if (x_vals <= 0).any():
147
+ df[x_column] = np.log10(np.maximum(x_vals, 0) + 1)
148
+ else:
149
+ df[x_column] = np.log10(x_vals)
150
+
151
+ if smoothing:
152
+ numeric_cols = df.select_dtypes(include="number").columns
153
+ numeric_cols = [c for c in numeric_cols if c not in utils.RESERVED_KEYS]
154
+
155
+ df_original = df.copy()
156
+ df_original["run"] = f"{run}_original"
157
+ df_original["data_type"] = "original"
158
+
159
+ df_smoothed = df.copy()
160
+ window_size = max(3, min(10, len(df) // 10)) # Adaptive window size
161
+ df_smoothed[numeric_cols] = (
162
+ df_smoothed[numeric_cols]
163
+ .rolling(window=window_size, center=True, min_periods=1)
164
+ .mean()
165
+ )
166
+ df_smoothed["run"] = f"{run}_smoothed"
167
+ df_smoothed["data_type"] = "smoothed"
168
+
169
+ combined_df = pd.concat([df_original, df_smoothed], ignore_index=True)
170
+ combined_df["x_axis"] = x_column
171
+ return combined_df, images
172
+ else:
173
+ df["run"] = run
174
+ df["data_type"] = "original"
175
+ df["x_axis"] = x_column
176
+ return df, images
177
+
178
+
179
+ def update_runs(project, filter_text, user_interacted_with_runs=False):
180
+ if project is None:
181
+ runs = []
182
+ num_runs = 0
183
+ else:
184
+ runs = get_runs(project)
185
+ num_runs = len(runs)
186
+ if filter_text:
187
+ runs = [r for r in runs if filter_text in r]
188
+ if not user_interacted_with_runs:
189
+ return gr.CheckboxGroup(choices=runs, value=runs), gr.Textbox(
190
+ label=f"Runs ({num_runs})"
191
+ )
192
+ else:
193
+ return gr.CheckboxGroup(choices=runs), gr.Textbox(label=f"Runs ({num_runs})")
194
+
195
+
196
+ def filter_runs(project, filter_text):
197
+ runs = get_runs(project)
198
+ runs = [r for r in runs if filter_text in r]
199
+ return gr.CheckboxGroup(choices=runs, value=runs)
200
+
201
+
202
+ def update_x_axis_choices(project, runs):
203
+ """Update x-axis dropdown choices based on available metrics."""
204
+ available_metrics = get_available_metrics(project, runs)
205
+ return gr.Dropdown(
206
+ label="X-axis",
207
+ choices=available_metrics,
208
+ value="step",
209
+ )
210
+
211
+
212
+ def toggle_timer(cb_value):
213
+ if cb_value:
214
+ return gr.Timer(active=True)
215
+ else:
216
+ return gr.Timer(active=False)
217
+
218
+
219
+ def check_auth(hf_token: str | None) -> None:
220
+ if os.getenv("SYSTEM") == "spaces": # if we are running in Spaces
221
+ # check auth token passed in
222
+ if hf_token is None:
223
+ raise PermissionError(
224
+ "Expected a HF_TOKEN to be provided when logging to a Space"
225
+ )
226
+ who = HfApi.whoami(hf_token)
227
+ access_token = who["auth"]["accessToken"]
228
+ owner_name = os.getenv("SPACE_AUTHOR_NAME")
229
+ repo_name = os.getenv("SPACE_REPO_NAME")
230
+ # make sure the token user is either the author of the space,
231
+ # or is a member of an org that is the author.
232
+ orgs = [o["name"] for o in who["orgs"]]
233
+ if owner_name != who["name"] and owner_name not in orgs:
234
+ raise PermissionError(
235
+ "Expected the provided hf_token to be the user owner of the space, or be a member of the org owner of the space"
236
+ )
237
+ # reject fine-grained tokens without specific repo access
238
+ if access_token["role"] == "fineGrained":
239
+ matched = False
240
+ for item in access_token["fineGrained"]["scoped"]:
241
+ if (
242
+ item["entity"]["type"] == "space"
243
+ and item["entity"]["name"] == f"{owner_name}/{repo_name}"
244
+ and "repo.write" in item["permissions"]
245
+ ):
246
+ matched = True
247
+ break
248
+ if (
249
+ (
250
+ item["entity"]["type"] == "user"
251
+ or item["entity"]["type"] == "org"
252
+ )
253
+ and item["entity"]["name"] == owner_name
254
+ and "repo.write" in item["permissions"]
255
+ ):
256
+ matched = True
257
+ break
258
+ if not matched:
259
+ raise PermissionError(
260
+ "Expected the provided hf_token with fine grained permissions to provide write access to the space"
261
+ )
262
+ # reject read-only tokens
263
+ elif access_token["role"] != "write":
264
+ raise PermissionError(
265
+ "Expected the provided hf_token to provide write permissions"
266
+ )
267
+
268
+
269
+ def upload_db_to_space(
270
+ project: str, uploaded_db: gr.FileData, hf_token: str | None
271
+ ) -> None:
272
+ check_auth(hf_token)
273
+ db_project_path = SQLiteStorage.get_project_db_path(project)
274
+ if os.path.exists(db_project_path):
275
+ raise gr.Error(
276
+ f"Trackio database file already exists for project {project}, cannot overwrite."
277
+ )
278
+ os.makedirs(os.path.dirname(db_project_path), exist_ok=True)
279
+ shutil.copy(uploaded_db["path"], db_project_path)
280
+
281
+
282
+ def bulk_upload_media(uploads: list[UploadEntry], hf_token: str | None) -> None:
283
+ check_auth(hf_token)
284
+ for upload in uploads:
285
+ media_path = FileStorage.init_project_media_path(
286
+ upload["project"], upload["run"], upload["step"]
287
+ )
288
+ shutil.copy(upload["uploaded_file"]["path"], media_path)
289
+
290
+
291
+ def log(
292
+ project: str,
293
+ run: str,
294
+ metrics: dict[str, Any],
295
+ step: int | None,
296
+ hf_token: str | None,
297
+ ) -> None:
298
+ check_auth(hf_token)
299
+ SQLiteStorage.log(project=project, run=run, metrics=metrics, step=step)
300
+
301
+
302
+ def bulk_log(
303
+ logs: list[LogEntry],
304
+ hf_token: str | None,
305
+ ) -> None:
306
+ check_auth(hf_token)
307
+
308
+ logs_by_run = {}
309
+ for log_entry in logs:
310
+ key = (log_entry["project"], log_entry["run"])
311
+ if key not in logs_by_run:
312
+ logs_by_run[key] = {"metrics": [], "steps": []}
313
+ logs_by_run[key]["metrics"].append(log_entry["metrics"])
314
+ logs_by_run[key]["steps"].append(log_entry.get("step"))
315
+
316
+ for (project, run), data in logs_by_run.items():
317
+ SQLiteStorage.bulk_log(
318
+ project=project,
319
+ run=run,
320
+ metrics_list=data["metrics"],
321
+ steps=data["steps"],
322
+ )
323
+
324
+
325
+ def filter_metrics_by_regex(metrics: list[str], filter_pattern: str) -> list[str]:
326
+ """
327
+ Filter metrics using regex pattern.
328
+
329
+ Args:
330
+ metrics: List of metric names to filter
331
+ filter_pattern: Regex pattern to match against metric names
332
+
333
+ Returns:
334
+ List of metric names that match the pattern
335
+ """
336
+ if not filter_pattern.strip():
337
+ return metrics
338
+
339
+ try:
340
+ pattern = re.compile(filter_pattern, re.IGNORECASE)
341
+ return [metric for metric in metrics if pattern.search(metric)]
342
+ except re.error:
343
+ return [
344
+ metric for metric in metrics if filter_pattern.lower() in metric.lower()
345
+ ]
346
+
347
+
348
+ def configure(request: gr.Request):
349
+ sidebar_param = request.query_params.get("sidebar")
350
+ match sidebar_param:
351
+ case "collapsed":
352
+ sidebar = gr.Sidebar(open=False, visible=True)
353
+ case "hidden":
354
+ sidebar = gr.Sidebar(open=False, visible=False)
355
+ case _:
356
+ sidebar = gr.Sidebar(open=True, visible=True)
357
+
358
+ if metrics := request.query_params.get("metrics"):
359
+ return metrics.split(","), sidebar
360
+ else:
361
+ return [], sidebar
362
+
363
+
364
+ def create_image_section(images_by_run: dict[str, dict[str, list[TrackioImage]]]):
365
+ with gr.Accordion(label="media"):
366
+ with gr.Group(elem_classes=("media-group")):
367
+ for run, images_by_key in images_by_run.items():
368
+ with gr.Tab(label=run, elem_classes=("media-tab")):
369
+ for key, images in images_by_key.items():
370
+ gr.Gallery(
371
+ [(image._pil, image.caption) for image in images],
372
+ label=key,
373
+ columns=6,
374
+ elem_classes=("media-gallery"),
375
+ )
376
+
377
+
378
+ css = """
379
+ #run-cb .wrap { gap: 2px; }
380
+ #run-cb .wrap label {
381
+ line-height: 1;
382
+ padding: 6px;
383
+ }
384
+ .logo-light { display: block; }
385
+ .logo-dark { display: none; }
386
+ .dark .logo-light { display: none; }
387
+ .dark .logo-dark { display: block; }
388
+ .dark .caption-label { color: white; }
389
+
390
+ .info-container {
391
+ position: relative;
392
+ display: inline;
393
+ }
394
+ .info-checkbox {
395
+ position: absolute;
396
+ opacity: 0;
397
+ pointer-events: none;
398
+ }
399
+ .info-icon {
400
+ border-bottom: 1px dotted;
401
+ cursor: pointer;
402
+ user-select: none;
403
+ color: var(--color-accent);
404
+ }
405
+ .info-expandable {
406
+ display: none;
407
+ opacity: 0;
408
+ transition: opacity 0.2s ease-in-out;
409
+ }
410
+ .info-checkbox:checked ~ .info-expandable {
411
+ display: inline;
412
+ opacity: 1;
413
+ }
414
+ .info-icon:hover { opacity: 0.8; }
415
+ .accent-link { font-weight: bold; }
416
+
417
+ .media-gallery { max-height: 325px; }
418
+ .media-group, .media-group > div { background: none; }
419
+ .media-group .tabs { padding: 0.5em; }
420
+ """
421
+
422
+ with gr.Blocks(theme="citrus", title="Trackio Dashboard", css=css) as demo:
423
+ with gr.Sidebar(open=False) as sidebar:
424
+ logo = gr.Markdown(
425
+ f"""
426
+ <img src='/gradio_api/file={utils.TRACKIO_LOGO_DIR}/trackio_logo_type_light_transparent.png' width='80%' class='logo-light'>
427
+ <img src='/gradio_api/file={utils.TRACKIO_LOGO_DIR}/trackio_logo_type_dark_transparent.png' width='80%' class='logo-dark'>
428
+ """
429
+ )
430
+ project_dd = gr.Dropdown(label="Project", allow_custom_value=True)
431
+ run_tb = gr.Textbox(label="Runs", placeholder="Type to filter...")
432
+ run_cb = gr.CheckboxGroup(
433
+ label="Runs", choices=[], interactive=True, elem_id="run-cb"
434
+ )
435
+ gr.HTML("<hr>")
436
+ realtime_cb = gr.Checkbox(label="Refresh metrics realtime", value=True)
437
+ smoothing_cb = gr.Checkbox(label="Smooth metrics", value=True)
438
+ x_axis_dd = gr.Dropdown(
439
+ label="X-axis",
440
+ choices=["step", "time"],
441
+ value="step",
442
+ )
443
+ log_scale_cb = gr.Checkbox(label="Log scale X-axis", value=False)
444
+ metric_filter_tb = gr.Textbox(
445
+ label="Metric Filter (regex)",
446
+ placeholder="e.g., loss|ndcg@10|gpu",
447
+ value="",
448
+ info="Filter metrics using regex patterns. Leave empty to show all metrics.",
449
+ )
450
+
451
+ timer = gr.Timer(value=1)
452
+ metrics_subset = gr.State([])
453
+ user_interacted_with_run_cb = gr.State(False)
454
+
455
+ gr.on([demo.load], fn=configure, outputs=[metrics_subset, sidebar])
456
+ gr.on(
457
+ [demo.load],
458
+ fn=get_projects,
459
+ outputs=project_dd,
460
+ show_progress="hidden",
461
+ )
462
+ gr.on(
463
+ [timer.tick],
464
+ fn=update_runs,
465
+ inputs=[project_dd, run_tb, user_interacted_with_run_cb],
466
+ outputs=[run_cb, run_tb],
467
+ show_progress="hidden",
468
+ )
469
+ gr.on(
470
+ [timer.tick],
471
+ fn=lambda: gr.Dropdown(info=get_project_info()),
472
+ outputs=[project_dd],
473
+ show_progress="hidden",
474
+ )
475
+ gr.on(
476
+ [demo.load, project_dd.change],
477
+ fn=update_runs,
478
+ inputs=[project_dd, run_tb],
479
+ outputs=[run_cb, run_tb],
480
+ show_progress="hidden",
481
+ )
482
+ gr.on(
483
+ [demo.load, project_dd.change, run_cb.change],
484
+ fn=update_x_axis_choices,
485
+ inputs=[project_dd, run_cb],
486
+ outputs=x_axis_dd,
487
+ show_progress="hidden",
488
+ )
489
+
490
+ realtime_cb.change(
491
+ fn=toggle_timer,
492
+ inputs=realtime_cb,
493
+ outputs=timer,
494
+ api_name="toggle_timer",
495
+ )
496
+ run_cb.input(
497
+ fn=lambda: True,
498
+ outputs=user_interacted_with_run_cb,
499
+ )
500
+ run_tb.input(
501
+ fn=filter_runs,
502
+ inputs=[project_dd, run_tb],
503
+ outputs=run_cb,
504
+ )
505
+
506
+ gr.api(
507
+ fn=upload_db_to_space,
508
+ api_name="upload_db_to_space",
509
+ )
510
+ gr.api(
511
+ fn=bulk_upload_media,
512
+ api_name="bulk_upload_media",
513
+ )
514
+ gr.api(
515
+ fn=log,
516
+ api_name="log",
517
+ )
518
+ gr.api(
519
+ fn=bulk_log,
520
+ api_name="bulk_log",
521
+ )
522
+
523
+ x_lim = gr.State(None)
524
+ last_steps = gr.State({})
525
+
526
+ def update_x_lim(select_data: gr.SelectData):
527
+ return select_data.index
528
+
529
+ def update_last_steps(project, runs):
530
+ """Update the last step from all runs to detect when new data is available."""
531
+ if not project or not runs:
532
+ return {}
533
+
534
+ return SQLiteStorage.get_max_steps_for_runs(project, runs)
535
+
536
+ timer.tick(
537
+ fn=update_last_steps,
538
+ inputs=[project_dd, run_cb],
539
+ outputs=last_steps,
540
+ show_progress="hidden",
541
+ )
542
+
543
+ @gr.render(
544
+ triggers=[
545
+ demo.load,
546
+ run_cb.change,
547
+ last_steps.change,
548
+ smoothing_cb.change,
549
+ x_lim.change,
550
+ x_axis_dd.change,
551
+ log_scale_cb.change,
552
+ metric_filter_tb.change,
553
+ ],
554
+ inputs=[
555
+ project_dd,
556
+ run_cb,
557
+ smoothing_cb,
558
+ metrics_subset,
559
+ x_lim,
560
+ x_axis_dd,
561
+ log_scale_cb,
562
+ metric_filter_tb,
563
+ ],
564
+ show_progress="hidden",
565
+ )
566
+ def update_dashboard(
567
+ project,
568
+ runs,
569
+ smoothing,
570
+ metrics_subset,
571
+ x_lim_value,
572
+ x_axis,
573
+ log_scale,
574
+ metric_filter,
575
+ ):
576
+ dfs = []
577
+ images_by_run = {}
578
+ original_runs = runs.copy()
579
+
580
+ for run in runs:
581
+ df, images_by_key = load_run_data(
582
+ project, run, smoothing, x_axis, log_scale
583
+ )
584
+ if df is not None:
585
+ dfs.append(df)
586
+ images_by_run[run] = images_by_key
587
+ if dfs:
588
+ master_df = pd.concat(dfs, ignore_index=True)
589
+ else:
590
+ master_df = pd.DataFrame()
591
+
592
+ if master_df.empty:
593
+ return
594
+
595
+ x_column = "step"
596
+ if dfs and not dfs[0].empty and "x_axis" in dfs[0].columns:
597
+ x_column = dfs[0]["x_axis"].iloc[0]
598
+
599
+ numeric_cols = master_df.select_dtypes(include="number").columns
600
+ numeric_cols = [c for c in numeric_cols if c not in utils.RESERVED_KEYS]
601
+ if metrics_subset:
602
+ numeric_cols = [c for c in numeric_cols if c in metrics_subset]
603
+
604
+ if metric_filter and metric_filter.strip():
605
+ numeric_cols = filter_metrics_by_regex(list(numeric_cols), metric_filter)
606
+
607
+ nested_metric_groups = utils.group_metrics_with_subprefixes(list(numeric_cols))
608
+ color_map = utils.get_color_mapping(original_runs, smoothing)
609
+
610
+ metric_idx = 0
611
+ for group_name in sorted(nested_metric_groups.keys()):
612
+ group_data = nested_metric_groups[group_name]
613
+
614
+ with gr.Accordion(
615
+ label=group_name,
616
+ open=True,
617
+ key=f"accordion-{group_name}",
618
+ preserved_by_key=["value", "open"],
619
+ ):
620
+ # Render direct metrics at this level
621
+ if group_data["direct_metrics"]:
622
+ with gr.Draggable(
623
+ key=f"row-{group_name}-direct", orientation="row"
624
+ ):
625
+ for metric_name in group_data["direct_metrics"]:
626
+ metric_df = master_df.dropna(subset=[metric_name])
627
+ color = "run" if "run" in metric_df.columns else None
628
+ if not metric_df.empty:
629
+ plot = gr.LinePlot(
630
+ utils.downsample(
631
+ metric_df,
632
+ x_column,
633
+ metric_name,
634
+ color,
635
+ x_lim_value,
636
+ ),
637
+ x=x_column,
638
+ y=metric_name,
639
+ y_title=metric_name.split("/")[-1],
640
+ color=color,
641
+ color_map=color_map,
642
+ title=metric_name,
643
+ key=f"plot-{metric_idx}",
644
+ preserved_by_key=None,
645
+ x_lim=x_lim_value,
646
+ show_fullscreen_button=True,
647
+ min_width=400,
648
+ )
649
+ plot.select(
650
+ update_x_lim,
651
+ outputs=x_lim,
652
+ key=f"select-{metric_idx}",
653
+ )
654
+ plot.double_click(
655
+ lambda: None,
656
+ outputs=x_lim,
657
+ key=f"double-{metric_idx}",
658
+ )
659
+ metric_idx += 1
660
+
661
+ # If there are subgroups, create nested accordions
662
+ if group_data["subgroups"]:
663
+ for subgroup_name in sorted(group_data["subgroups"].keys()):
664
+ subgroup_metrics = group_data["subgroups"][subgroup_name]
665
+
666
+ with gr.Accordion(
667
+ label=subgroup_name,
668
+ open=True,
669
+ key=f"accordion-{group_name}-{subgroup_name}",
670
+ preserved_by_key=["value", "open"],
671
+ ):
672
+ with gr.Draggable(key=f"row-{group_name}-{subgroup_name}"):
673
+ for metric_name in subgroup_metrics:
674
+ metric_df = master_df.dropna(subset=[metric_name])
675
+ color = (
676
+ "run" if "run" in metric_df.columns else None
677
+ )
678
+ if not metric_df.empty:
679
+ plot = gr.LinePlot(
680
+ utils.downsample(
681
+ metric_df,
682
+ x_column,
683
+ metric_name,
684
+ color,
685
+ x_lim_value,
686
+ ),
687
+ x=x_column,
688
+ y=metric_name,
689
+ y_title=metric_name.split("/")[-1],
690
+ color=color,
691
+ color_map=color_map,
692
+ title=metric_name,
693
+ key=f"plot-{metric_idx}",
694
+ preserved_by_key=None,
695
+ x_lim=x_lim_value,
696
+ show_fullscreen_button=True,
697
+ min_width=400,
698
+ )
699
+ plot.select(
700
+ update_x_lim,
701
+ outputs=x_lim,
702
+ key=f"select-{metric_idx}",
703
+ )
704
+ plot.double_click(
705
+ lambda: None,
706
+ outputs=x_lim,
707
+ key=f"double-{metric_idx}",
708
+ )
709
+ metric_idx += 1
710
+ if images_by_run and any(any(images) for images in images_by_run.values()):
711
+ create_image_section(images_by_run)
712
+
713
+ table_cols = master_df.select_dtypes(include="object").columns
714
+ table_cols = [c for c in table_cols if c not in utils.RESERVED_KEYS]
715
+ if metrics_subset:
716
+ table_cols = [c for c in table_cols if c in metrics_subset]
717
+ if metric_filter and metric_filter.strip():
718
+ table_cols = filter_metrics_by_regex(list(table_cols), metric_filter)
719
+ if len(table_cols) > 0:
720
+ with gr.Accordion("tables", open=True):
721
+ with gr.Row(key="row"):
722
+ for metric_idx, metric_name in enumerate(table_cols):
723
+ metric_df = master_df.dropna(subset=[metric_name])
724
+ if not metric_df.empty:
725
+ value = metric_df[metric_name].iloc[-1]
726
+ if (
727
+ isinstance(value, dict)
728
+ and "_type" in value
729
+ and value["_type"] == Table.TYPE
730
+ ):
731
+ try:
732
+ df = pd.DataFrame(value["_value"])
733
+ gr.DataFrame(
734
+ df,
735
+ label=f"{metric_name} (latest)",
736
+ key=f"table-{metric_idx}",
737
+ wrap=True,
738
+ )
739
+ except Exception as e:
740
+ gr.Warning(
741
+ f"Column {metric_name} failed to render as a table: {e}"
742
+ )
743
+
744
+
745
+ if __name__ == "__main__":
746
+ demo.launch(allowed_paths=[utils.TRACKIO_LOGO_DIR], show_api=False, show_error=True)
utils.py ADDED
@@ -0,0 +1,568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import sys
3
+ import time
4
+ from pathlib import Path
5
+ from typing import TYPE_CHECKING
6
+
7
+ import huggingface_hub
8
+ import numpy as np
9
+ import pandas as pd
10
+ from huggingface_hub.constants import HF_HOME
11
+
12
+ if TYPE_CHECKING:
13
+ from trackio.commit_scheduler import CommitScheduler
14
+ from trackio.dummy_commit_scheduler import DummyCommitScheduler
15
+
16
+ RESERVED_KEYS = ["project", "run", "timestamp", "step", "time", "metrics"]
17
+ TRACKIO_DIR = Path(HF_HOME) / "trackio"
18
+
19
+ TRACKIO_LOGO_DIR = Path(__file__).parent / "assets"
20
+
21
+
22
+ def generate_readable_name(used_names: list[str], space_id: str | None = None) -> str:
23
+ """
24
+ Generates a random, readable name like "dainty-sunset-0".
25
+ If space_id is provided, generates username-timestamp format instead.
26
+ """
27
+ if space_id is not None:
28
+ username = huggingface_hub.whoami()["name"]
29
+ timestamp = int(time.time())
30
+ return f"{username}-{timestamp}"
31
+ adjectives = [
32
+ "dainty",
33
+ "brave",
34
+ "calm",
35
+ "eager",
36
+ "fancy",
37
+ "gentle",
38
+ "happy",
39
+ "jolly",
40
+ "kind",
41
+ "lively",
42
+ "merry",
43
+ "nice",
44
+ "proud",
45
+ "quick",
46
+ "hugging",
47
+ "silly",
48
+ "tidy",
49
+ "witty",
50
+ "zealous",
51
+ "bright",
52
+ "shy",
53
+ "bold",
54
+ "clever",
55
+ "daring",
56
+ "elegant",
57
+ "faithful",
58
+ "graceful",
59
+ "honest",
60
+ "inventive",
61
+ "jovial",
62
+ "keen",
63
+ "lucky",
64
+ "modest",
65
+ "noble",
66
+ "optimistic",
67
+ "patient",
68
+ "quirky",
69
+ "resourceful",
70
+ "sincere",
71
+ "thoughtful",
72
+ "upbeat",
73
+ "valiant",
74
+ "warm",
75
+ "youthful",
76
+ "zesty",
77
+ "adventurous",
78
+ "breezy",
79
+ "cheerful",
80
+ "delightful",
81
+ "energetic",
82
+ "fearless",
83
+ "glad",
84
+ "hopeful",
85
+ "imaginative",
86
+ "joyful",
87
+ "kindly",
88
+ "luminous",
89
+ "mysterious",
90
+ "neat",
91
+ "outgoing",
92
+ "playful",
93
+ "radiant",
94
+ "spirited",
95
+ "tranquil",
96
+ "unique",
97
+ "vivid",
98
+ "wise",
99
+ "zany",
100
+ "artful",
101
+ "bubbly",
102
+ "charming",
103
+ "dazzling",
104
+ "earnest",
105
+ "festive",
106
+ "gentlemanly",
107
+ "hearty",
108
+ "intrepid",
109
+ "jubilant",
110
+ "knightly",
111
+ "lively",
112
+ "magnetic",
113
+ "nimble",
114
+ "orderly",
115
+ "peaceful",
116
+ "quick-witted",
117
+ "robust",
118
+ "sturdy",
119
+ "trusty",
120
+ "upstanding",
121
+ "vibrant",
122
+ "whimsical",
123
+ ]
124
+ nouns = [
125
+ "sunset",
126
+ "forest",
127
+ "river",
128
+ "mountain",
129
+ "breeze",
130
+ "meadow",
131
+ "ocean",
132
+ "valley",
133
+ "sky",
134
+ "field",
135
+ "cloud",
136
+ "star",
137
+ "rain",
138
+ "leaf",
139
+ "stone",
140
+ "flower",
141
+ "bird",
142
+ "tree",
143
+ "wave",
144
+ "trail",
145
+ "island",
146
+ "desert",
147
+ "hill",
148
+ "lake",
149
+ "pond",
150
+ "grove",
151
+ "canyon",
152
+ "reef",
153
+ "bay",
154
+ "peak",
155
+ "glade",
156
+ "marsh",
157
+ "cliff",
158
+ "dune",
159
+ "spring",
160
+ "brook",
161
+ "cave",
162
+ "plain",
163
+ "ridge",
164
+ "wood",
165
+ "blossom",
166
+ "petal",
167
+ "root",
168
+ "branch",
169
+ "seed",
170
+ "acorn",
171
+ "pine",
172
+ "willow",
173
+ "cedar",
174
+ "elm",
175
+ "falcon",
176
+ "eagle",
177
+ "sparrow",
178
+ "robin",
179
+ "owl",
180
+ "finch",
181
+ "heron",
182
+ "crane",
183
+ "duck",
184
+ "swan",
185
+ "fox",
186
+ "wolf",
187
+ "bear",
188
+ "deer",
189
+ "moose",
190
+ "otter",
191
+ "beaver",
192
+ "lynx",
193
+ "hare",
194
+ "badger",
195
+ "butterfly",
196
+ "bee",
197
+ "ant",
198
+ "beetle",
199
+ "dragonfly",
200
+ "firefly",
201
+ "ladybug",
202
+ "moth",
203
+ "spider",
204
+ "worm",
205
+ "coral",
206
+ "kelp",
207
+ "shell",
208
+ "pebble",
209
+ "face",
210
+ "boulder",
211
+ "cobble",
212
+ "sand",
213
+ "wavelet",
214
+ "tide",
215
+ "current",
216
+ "mist",
217
+ ]
218
+ number = 0
219
+ name = f"{adjectives[0]}-{nouns[0]}-{number}"
220
+ while name in used_names:
221
+ number += 1
222
+ adjective = adjectives[number % len(adjectives)]
223
+ noun = nouns[number % len(nouns)]
224
+ name = f"{adjective}-{noun}-{number}"
225
+ return name
226
+
227
+
228
+ def block_except_in_notebook():
229
+ in_notebook = bool(getattr(sys, "ps1", sys.flags.interactive))
230
+ if in_notebook:
231
+ return
232
+ try:
233
+ while True:
234
+ time.sleep(0.1)
235
+ except (KeyboardInterrupt, OSError):
236
+ print("Keyboard interruption in main thread... closing dashboard.")
237
+
238
+
239
+ def simplify_column_names(columns: list[str]) -> dict[str, str]:
240
+ """
241
+ Simplifies column names to first 10 alphanumeric or "/" characters with unique suffixes.
242
+
243
+ Args:
244
+ columns: List of original column names
245
+
246
+ Returns:
247
+ Dictionary mapping original column names to simplified names
248
+ """
249
+ simplified_names = {}
250
+ used_names = set()
251
+
252
+ for col in columns:
253
+ alphanumeric = re.sub(r"[^a-zA-Z0-9/]", "", col)
254
+ base_name = alphanumeric[:10] if alphanumeric else f"col_{len(used_names)}"
255
+
256
+ final_name = base_name
257
+ suffix = 1
258
+ while final_name in used_names:
259
+ final_name = f"{base_name}_{suffix}"
260
+ suffix += 1
261
+
262
+ simplified_names[col] = final_name
263
+ used_names.add(final_name)
264
+
265
+ return simplified_names
266
+
267
+
268
+ def print_dashboard_instructions(project: str) -> None:
269
+ """
270
+ Prints instructions for viewing the Trackio dashboard.
271
+
272
+ Args:
273
+ project: The name of the project to show dashboard for.
274
+ """
275
+ YELLOW = "\033[93m"
276
+ BOLD = "\033[1m"
277
+ RESET = "\033[0m"
278
+
279
+ print("* View dashboard by running in your terminal:")
280
+ print(f'{BOLD}{YELLOW}trackio show --project "{project}"{RESET}')
281
+ print(f'* or by running in Python: trackio.show(project="{project}")')
282
+
283
+
284
+ def preprocess_space_and_dataset_ids(
285
+ space_id: str | None, dataset_id: str | None
286
+ ) -> tuple[str | None, str | None]:
287
+ if space_id is not None and "/" not in space_id:
288
+ username = huggingface_hub.whoami()["name"]
289
+ space_id = f"{username}/{space_id}"
290
+ if dataset_id is not None and "/" not in dataset_id:
291
+ username = huggingface_hub.whoami()["name"]
292
+ dataset_id = f"{username}/{dataset_id}"
293
+ if space_id is not None and dataset_id is None:
294
+ dataset_id = f"{space_id}-dataset"
295
+ return space_id, dataset_id
296
+
297
+
298
+ def fibo():
299
+ """Generator for Fibonacci backoff: 1, 1, 2, 3, 5, 8, ..."""
300
+ a, b = 1, 1
301
+ while True:
302
+ yield a
303
+ a, b = b, a + b
304
+
305
+
306
+ COLOR_PALETTE = [
307
+ "#3B82F6",
308
+ "#EF4444",
309
+ "#10B981",
310
+ "#F59E0B",
311
+ "#8B5CF6",
312
+ "#EC4899",
313
+ "#06B6D4",
314
+ "#84CC16",
315
+ "#F97316",
316
+ "#6366F1",
317
+ ]
318
+
319
+
320
+ def get_color_mapping(runs: list[str], smoothing: bool) -> dict[str, str]:
321
+ """Generate color mapping for runs, with transparency for original data when smoothing is enabled."""
322
+ color_map = {}
323
+
324
+ for i, run in enumerate(runs):
325
+ base_color = COLOR_PALETTE[i % len(COLOR_PALETTE)]
326
+
327
+ if smoothing:
328
+ color_map[f"{run}_smoothed"] = base_color
329
+ color_map[f"{run}_original"] = base_color + "4D"
330
+ else:
331
+ color_map[run] = base_color
332
+
333
+ return color_map
334
+
335
+
336
+ def downsample(
337
+ df: pd.DataFrame,
338
+ x: str,
339
+ y: str,
340
+ color: str | None,
341
+ x_lim: tuple[float, float] | None = None,
342
+ ) -> pd.DataFrame:
343
+ if df.empty:
344
+ return df
345
+
346
+ columns_to_keep = [x, y]
347
+ if color is not None and color in df.columns:
348
+ columns_to_keep.append(color)
349
+ df = df[columns_to_keep].copy()
350
+
351
+ n_bins = 100
352
+
353
+ if color is not None and color in df.columns:
354
+ groups = df.groupby(color)
355
+ else:
356
+ groups = [(None, df)]
357
+
358
+ downsampled_indices = []
359
+
360
+ for _, group_df in groups:
361
+ if group_df.empty:
362
+ continue
363
+
364
+ group_df = group_df.sort_values(x)
365
+
366
+ if x_lim is not None:
367
+ x_min, x_max = x_lim
368
+ before_point = group_df[group_df[x] < x_min].tail(1)
369
+ after_point = group_df[group_df[x] > x_max].head(1)
370
+ group_df = group_df[(group_df[x] >= x_min) & (group_df[x] <= x_max)]
371
+ else:
372
+ before_point = after_point = None
373
+ x_min = group_df[x].min()
374
+ x_max = group_df[x].max()
375
+
376
+ if before_point is not None and not before_point.empty:
377
+ downsampled_indices.extend(before_point.index.tolist())
378
+ if after_point is not None and not after_point.empty:
379
+ downsampled_indices.extend(after_point.index.tolist())
380
+
381
+ if group_df.empty:
382
+ continue
383
+
384
+ if x_min == x_max:
385
+ min_y_idx = group_df[y].idxmin()
386
+ max_y_idx = group_df[y].idxmax()
387
+ if min_y_idx != max_y_idx:
388
+ downsampled_indices.extend([min_y_idx, max_y_idx])
389
+ else:
390
+ downsampled_indices.append(min_y_idx)
391
+ continue
392
+
393
+ if len(group_df) < 500:
394
+ downsampled_indices.extend(group_df.index.tolist())
395
+ continue
396
+
397
+ bins = np.linspace(x_min, x_max, n_bins + 1)
398
+ group_df["bin"] = pd.cut(
399
+ group_df[x], bins=bins, labels=False, include_lowest=True
400
+ )
401
+
402
+ for bin_idx in group_df["bin"].dropna().unique():
403
+ bin_data = group_df[group_df["bin"] == bin_idx]
404
+ if bin_data.empty:
405
+ continue
406
+
407
+ min_y_idx = bin_data[y].idxmin()
408
+ max_y_idx = bin_data[y].idxmax()
409
+
410
+ downsampled_indices.append(min_y_idx)
411
+ if min_y_idx != max_y_idx:
412
+ downsampled_indices.append(max_y_idx)
413
+
414
+ unique_indices = list(set(downsampled_indices))
415
+
416
+ downsampled_df = df.loc[unique_indices].copy()
417
+ downsampled_df = downsampled_df.sort_values(x).reset_index(drop=True)
418
+ downsampled_df = downsampled_df.drop(columns=["bin"], errors="ignore")
419
+
420
+ return downsampled_df
421
+
422
+
423
+ def sort_metrics_by_prefix(metrics: list[str]) -> list[str]:
424
+ """
425
+ Sort metrics by grouping prefixes together for dropdown/list display.
426
+ Metrics without prefixes come first, then grouped by prefix.
427
+
428
+ Args:
429
+ metrics: List of metric names
430
+
431
+ Returns:
432
+ List of metric names sorted by prefix
433
+
434
+ Example:
435
+ Input: ["train/loss", "loss", "train/acc", "val/loss"]
436
+ Output: ["loss", "train/acc", "train/loss", "val/loss"]
437
+ """
438
+ groups = group_metrics_by_prefix(metrics)
439
+ result = []
440
+
441
+ if "charts" in groups:
442
+ result.extend(groups["charts"])
443
+
444
+ for group_name in sorted(groups.keys()):
445
+ if group_name != "charts":
446
+ result.extend(groups[group_name])
447
+
448
+ return result
449
+
450
+
451
+ def group_metrics_by_prefix(metrics: list[str]) -> dict[str, list[str]]:
452
+ """
453
+ Group metrics by their prefix. Metrics without prefix go to 'charts' group.
454
+
455
+ Args:
456
+ metrics: List of metric names
457
+
458
+ Returns:
459
+ Dictionary with prefix names as keys and lists of metrics as values
460
+
461
+ Example:
462
+ Input: ["loss", "accuracy", "train/loss", "train/acc", "val/loss"]
463
+ Output: {
464
+ "charts": ["loss", "accuracy"],
465
+ "train": ["train/loss", "train/acc"],
466
+ "val": ["val/loss"]
467
+ }
468
+ """
469
+ no_prefix = []
470
+ with_prefix = []
471
+
472
+ for metric in metrics:
473
+ if "/" in metric:
474
+ with_prefix.append(metric)
475
+ else:
476
+ no_prefix.append(metric)
477
+
478
+ no_prefix.sort()
479
+
480
+ prefix_groups = {}
481
+ for metric in with_prefix:
482
+ prefix = metric.split("/")[0]
483
+ if prefix not in prefix_groups:
484
+ prefix_groups[prefix] = []
485
+ prefix_groups[prefix].append(metric)
486
+
487
+ for prefix in prefix_groups:
488
+ prefix_groups[prefix].sort()
489
+
490
+ groups = {}
491
+ if no_prefix:
492
+ groups["charts"] = no_prefix
493
+
494
+ for prefix in sorted(prefix_groups.keys()):
495
+ groups[prefix] = prefix_groups[prefix]
496
+
497
+ return groups
498
+
499
+
500
+ def group_metrics_with_subprefixes(metrics: list[str]) -> dict:
501
+ """
502
+ Group metrics with simple 2-level nested structure detection.
503
+
504
+ Returns a dictionary where each prefix group can have:
505
+ - direct_metrics: list of metrics at this level (e.g., "train/acc")
506
+ - subgroups: dict of subgroup name -> list of metrics (e.g., "loss" -> ["train/loss/norm", "train/loss/unnorm"])
507
+
508
+ Example:
509
+ Input: ["loss", "train/acc", "train/loss/normalized", "train/loss/unnormalized", "val/loss"]
510
+ Output: {
511
+ "charts": {
512
+ "direct_metrics": ["loss"],
513
+ "subgroups": {}
514
+ },
515
+ "train": {
516
+ "direct_metrics": ["train/acc"],
517
+ "subgroups": {
518
+ "loss": ["train/loss/normalized", "train/loss/unnormalized"]
519
+ }
520
+ },
521
+ "val": {
522
+ "direct_metrics": ["val/loss"],
523
+ "subgroups": {}
524
+ }
525
+ }
526
+ """
527
+ result = {}
528
+
529
+ for metric in metrics:
530
+ if "/" not in metric:
531
+ if "charts" not in result:
532
+ result["charts"] = {"direct_metrics": [], "subgroups": {}}
533
+ result["charts"]["direct_metrics"].append(metric)
534
+ else:
535
+ parts = metric.split("/")
536
+ main_prefix = parts[0]
537
+
538
+ if main_prefix not in result:
539
+ result[main_prefix] = {"direct_metrics": [], "subgroups": {}}
540
+
541
+ if len(parts) == 2:
542
+ result[main_prefix]["direct_metrics"].append(metric)
543
+ else:
544
+ subprefix = parts[1]
545
+ if subprefix not in result[main_prefix]["subgroups"]:
546
+ result[main_prefix]["subgroups"][subprefix] = []
547
+ result[main_prefix]["subgroups"][subprefix].append(metric)
548
+
549
+ for group_data in result.values():
550
+ group_data["direct_metrics"].sort()
551
+ for subgroup_metrics in group_data["subgroups"].values():
552
+ subgroup_metrics.sort()
553
+
554
+ if "charts" in result and not result["charts"]["direct_metrics"]:
555
+ del result["charts"]
556
+
557
+ return result
558
+
559
+
560
+ def get_sync_status(scheduler: "CommitScheduler | DummyCommitScheduler") -> int | None:
561
+ """Get the sync status from the CommitScheduler in an integer number of minutes, or None if not synced yet."""
562
+ if getattr(
563
+ scheduler, "last_push_time", None
564
+ ): # DummyCommitScheduler doesn't have last_push_time
565
+ time_diff = time.time() - scheduler.last_push_time
566
+ return int(time_diff / 60)
567
+ else:
568
+ return None
version.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 0.3.2