Commit
·
08bea20
1
Parent(s):
0af7f58
add api updates
Browse files- objaverse_xl/smithsonian.py +29 -15
- objaverse_xl/thingiverse.py +218 -0
objaverse_xl/smithsonian.py
CHANGED
@@ -2,7 +2,7 @@ import multiprocessing
|
|
2 |
import os
|
3 |
from functools import partial
|
4 |
from multiprocessing import Pool
|
5 |
-
from typing import Dict, List, Optional
|
6 |
|
7 |
import fsspec
|
8 |
import pandas as pd
|
@@ -47,7 +47,9 @@ def load_smithsonian_metadata(download_dir: str = "~/.objaverse") -> pd.DataFram
|
|
47 |
return df
|
48 |
|
49 |
|
50 |
-
def _download_smithsonian_object(
|
|
|
|
|
51 |
"""Downloads a Smithsonian Object from a URL.
|
52 |
|
53 |
Overwrites the file if it already exists and assumes this was previous checked.
|
@@ -58,7 +60,9 @@ def _download_smithsonian_object(url: str, download_dir: str = "~/.objaverse") -
|
|
58 |
Supports all file systems supported by fsspec. Defaults to "~/.objaverse".
|
59 |
|
60 |
Returns:
|
61 |
-
str:
|
|
|
|
|
62 |
"""
|
63 |
uid = get_uid_from_str(url)
|
64 |
|
@@ -70,7 +74,7 @@ def _download_smithsonian_object(url: str, download_dir: str = "~/.objaverse") -
|
|
70 |
# check if the path is valid
|
71 |
if response.status_code == 404:
|
72 |
logger.warning(f"404 for {url}")
|
73 |
-
return None
|
74 |
|
75 |
# write to tmp path so that we don't have a partial file
|
76 |
tmp_path = f"{path}.tmp"
|
@@ -81,7 +85,7 @@ def _download_smithsonian_object(url: str, download_dir: str = "~/.objaverse") -
|
|
81 |
# rename to final path
|
82 |
fs.rename(tmp_path, path)
|
83 |
|
84 |
-
return filename
|
85 |
|
86 |
|
87 |
def download_smithsonian_objects(
|
@@ -118,7 +122,9 @@ def download_smithsonian_objects(
|
|
118 |
|
119 |
# get the existing glb files
|
120 |
existing_glb_files = fs.glob(os.path.join(objects_dir, "*.glb"), refresh=True)
|
121 |
-
existing_uids = [
|
|
|
|
|
122 |
|
123 |
# find the urls that need to be downloaded
|
124 |
out = []
|
@@ -130,10 +136,16 @@ def download_smithsonian_objects(
|
|
130 |
urls_to_download.add(url)
|
131 |
else:
|
132 |
already_downloaded_urls.add(url)
|
133 |
-
out.append(
|
|
|
|
|
134 |
|
135 |
-
logger.info(
|
136 |
-
|
|
|
|
|
|
|
|
|
137 |
|
138 |
if len(urls_to_download) == 0:
|
139 |
return out
|
@@ -143,17 +155,19 @@ def download_smithsonian_objects(
|
|
143 |
tqdm(
|
144 |
pool.imap_unordered(
|
145 |
partial(_download_smithsonian_object, download_dir=download_dir),
|
146 |
-
urls_to_download
|
147 |
),
|
148 |
total=len(urls_to_download),
|
149 |
desc="Downloading Smithsonian Objects",
|
150 |
)
|
151 |
)
|
152 |
|
153 |
-
out.extend(
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
|
|
|
|
158 |
|
159 |
return out
|
|
|
2 |
import os
|
3 |
from functools import partial
|
4 |
from multiprocessing import Pool
|
5 |
+
from typing import Dict, List, Optional, Tuple
|
6 |
|
7 |
import fsspec
|
8 |
import pandas as pd
|
|
|
47 |
return df
|
48 |
|
49 |
|
50 |
+
def _download_smithsonian_object(
|
51 |
+
url: str, download_dir: str = "~/.objaverse"
|
52 |
+
) -> Tuple[str, Optional[str]]:
|
53 |
"""Downloads a Smithsonian Object from a URL.
|
54 |
|
55 |
Overwrites the file if it already exists and assumes this was previous checked.
|
|
|
60 |
Supports all file systems supported by fsspec. Defaults to "~/.objaverse".
|
61 |
|
62 |
Returns:
|
63 |
+
Tuple[str, Optional[str]]: Tuple of the URL and the path to the downloaded
|
64 |
+
Smithsonian Object. If the Smithsonian Object was not downloaded, the path
|
65 |
+
will be None.
|
66 |
"""
|
67 |
uid = get_uid_from_str(url)
|
68 |
|
|
|
74 |
# check if the path is valid
|
75 |
if response.status_code == 404:
|
76 |
logger.warning(f"404 for {url}")
|
77 |
+
return url, None
|
78 |
|
79 |
# write to tmp path so that we don't have a partial file
|
80 |
tmp_path = f"{path}.tmp"
|
|
|
85 |
# rename to final path
|
86 |
fs.rename(tmp_path, path)
|
87 |
|
88 |
+
return url, filename
|
89 |
|
90 |
|
91 |
def download_smithsonian_objects(
|
|
|
122 |
|
123 |
# get the existing glb files
|
124 |
existing_glb_files = fs.glob(os.path.join(objects_dir, "*.glb"), refresh=True)
|
125 |
+
existing_uids = [
|
126 |
+
os.path.basename(file).split(".")[0] for file in existing_glb_files
|
127 |
+
]
|
128 |
|
129 |
# find the urls that need to be downloaded
|
130 |
out = []
|
|
|
136 |
urls_to_download.add(url)
|
137 |
else:
|
138 |
already_downloaded_urls.add(url)
|
139 |
+
out.append(
|
140 |
+
{"download_path": os.path.join(objects_dir, f"{uid}.glb"), "url": url}
|
141 |
+
)
|
142 |
|
143 |
+
logger.info(
|
144 |
+
f"Found {len(already_downloaded_urls)} Smithsonian Objects already downloaded"
|
145 |
+
)
|
146 |
+
logger.info(
|
147 |
+
f"Downloading {len(urls_to_download)} Smithsonian Objects with {processes=}"
|
148 |
+
)
|
149 |
|
150 |
if len(urls_to_download) == 0:
|
151 |
return out
|
|
|
155 |
tqdm(
|
156 |
pool.imap_unordered(
|
157 |
partial(_download_smithsonian_object, download_dir=download_dir),
|
158 |
+
urls_to_download,
|
159 |
),
|
160 |
total=len(urls_to_download),
|
161 |
desc="Downloading Smithsonian Objects",
|
162 |
)
|
163 |
)
|
164 |
|
165 |
+
out.extend(
|
166 |
+
[
|
167 |
+
{"download_path": download_path, "url": url}
|
168 |
+
for url, download_path in results
|
169 |
+
if download_path is not None
|
170 |
+
]
|
171 |
+
)
|
172 |
|
173 |
return out
|
objaverse_xl/thingiverse.py
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import multiprocessing
|
3 |
+
from multiprocessing import Pool
|
4 |
+
from typing import Dict, List, Optional
|
5 |
+
|
6 |
+
import fsspec
|
7 |
+
import requests
|
8 |
+
from loguru import logger
|
9 |
+
import pandas as pd
|
10 |
+
from tqdm import tqdm
|
11 |
+
import time
|
12 |
+
|
13 |
+
|
14 |
+
def _get_response_with_retries(
|
15 |
+
url: str, max_retries: int = 3, retry_delay: int = 5
|
16 |
+
) -> Optional[requests.models.Response]:
|
17 |
+
"""Get a response from a URL with retries.
|
18 |
+
|
19 |
+
Args:
|
20 |
+
url (str): The URL to get a response from.
|
21 |
+
max_retries (int, optional): The maximum number of retries. Defaults to 3.
|
22 |
+
retry_delay (int, optional): The delay between retries in seconds. Defaults to 5.
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
Optional[requests.models.Response]: The response from the URL. If there was an error, returns None.
|
26 |
+
"""
|
27 |
+
|
28 |
+
for i in range(max_retries):
|
29 |
+
try:
|
30 |
+
response = requests.get(url, stream=True)
|
31 |
+
# if successful, break out of loop
|
32 |
+
if response.status_code not in {200, 404}:
|
33 |
+
time.sleep(retry_delay)
|
34 |
+
continue
|
35 |
+
break
|
36 |
+
except ConnectionError:
|
37 |
+
if i < max_retries - 1: # i.e. not on the last try
|
38 |
+
time.sleep(retry_delay)
|
39 |
+
else:
|
40 |
+
return None
|
41 |
+
|
42 |
+
return response
|
43 |
+
|
44 |
+
|
45 |
+
def _download_item(item: Dict[str, str], download_dir: str) -> None:
|
46 |
+
"""Download the given item.
|
47 |
+
|
48 |
+
Args:
|
49 |
+
item (Dict[str, str]): The item to download. It should be a dictionary with the
|
50 |
+
keys "thingId" and "fileId".
|
51 |
+
download_dir (str, optional): The directory to save the files to. Supports all
|
52 |
+
file systems supported by fsspec.
|
53 |
+
|
54 |
+
Returns:
|
55 |
+
Optional[str]: The path to the downloaded file. If there was an error or 404,
|
56 |
+
returns None.
|
57 |
+
"""
|
58 |
+
file_id = item["fileId"]
|
59 |
+
thing_id = item["thingId"]
|
60 |
+
|
61 |
+
url = f"https://www.thingiverse.com/download:{file_id}"
|
62 |
+
response = _get_response_with_retries(url)
|
63 |
+
|
64 |
+
if response is None:
|
65 |
+
logger.error(f"{file_id=} Could not get response from {url}")
|
66 |
+
return None
|
67 |
+
|
68 |
+
# Check if the request was successful
|
69 |
+
if response.status_code == 404:
|
70 |
+
logger.error(f"{file_id=} (404) Could not find file with ID")
|
71 |
+
return None
|
72 |
+
|
73 |
+
file_path = os.path.join(download_dir, f"thing-{thing_id}-file-{file_id}.stl")
|
74 |
+
fs, path = fsspec.core.url_to_fs(file_path)
|
75 |
+
|
76 |
+
with fs.open(path, "wb") as file:
|
77 |
+
file.write(response.content)
|
78 |
+
|
79 |
+
return file_path
|
80 |
+
|
81 |
+
|
82 |
+
def _parallel_download_item(args) -> Optional[str]:
|
83 |
+
item, download_dir = args
|
84 |
+
download_path = _download_item(item=item, download_dir=download_dir)
|
85 |
+
return item, download_path
|
86 |
+
|
87 |
+
|
88 |
+
def download_thingiverse_objects(
|
89 |
+
file_ids: Optional[List[str]] = None,
|
90 |
+
processes: Optional[int] = None,
|
91 |
+
download_dir: str = "~/.objaverse",
|
92 |
+
) -> List[Dict[str, str]]:
|
93 |
+
"""Download the objects from the given list of things and files.
|
94 |
+
|
95 |
+
Args:
|
96 |
+
file_ids (Optional[List[str]]): The list of file IDs to download. If None,
|
97 |
+
downloads all files. Defaults to None.
|
98 |
+
processes (int, optional): The number of processes to use. If None, maps to
|
99 |
+
use all available CPUs using multiprocessing.cpu_count(). Defaults to None.
|
100 |
+
download_dir (str, optional): The directory to save the files to. Supports all
|
101 |
+
file systems supported by fsspec. Defaults to "~/.objaverse-xl".
|
102 |
+
|
103 |
+
Returns:
|
104 |
+
List[Dict[str, str]]: The list of things and files that were downloaded. Each
|
105 |
+
item in the list is a dictionary with the keys "thingId", "fileId",
|
106 |
+
"filePath", and everything else from the annotations.
|
107 |
+
"""
|
108 |
+
if processes is None:
|
109 |
+
processes = multiprocessing.cpu_count()
|
110 |
+
|
111 |
+
# get the records of the specified fileIds
|
112 |
+
df = load_annotations(download_dir=download_dir)
|
113 |
+
file_ids = set(file_ids)
|
114 |
+
if file_ids is not None:
|
115 |
+
df = df[df["fileId"].isin(file_ids)]
|
116 |
+
things_and_files = df.to_dict(orient="records")
|
117 |
+
|
118 |
+
# create the download directory
|
119 |
+
download_dir = os.path.join(download_dir, "thingiverse")
|
120 |
+
fs, path = fsspec.core.url_to_fs(download_dir)
|
121 |
+
fs.makedirs(path, exist_ok=True)
|
122 |
+
|
123 |
+
# check to filter out files that already exist
|
124 |
+
existing_files = fs.glob(os.path.join(download_dir, "*.stl"), refresh=True)
|
125 |
+
existing_file_ids = set(
|
126 |
+
[os.path.basename(file).split(".")[0].split("-")[-1] for file in existing_files]
|
127 |
+
)
|
128 |
+
|
129 |
+
# filter out existing files
|
130 |
+
items_to_download = []
|
131 |
+
already_downloaded_count = 0
|
132 |
+
out = []
|
133 |
+
for item in things_and_files:
|
134 |
+
if item["fileId"] in existing_file_ids:
|
135 |
+
already_downloaded_count += 1
|
136 |
+
out.append(
|
137 |
+
{
|
138 |
+
"filePath": os.path.join(
|
139 |
+
download_dir,
|
140 |
+
f"thing-{item['thingId']}-file-{item['fileId']}.stl",
|
141 |
+
),
|
142 |
+
**item,
|
143 |
+
}
|
144 |
+
)
|
145 |
+
else:
|
146 |
+
items_to_download.append(item)
|
147 |
+
|
148 |
+
logger.info(f"Found {already_downloaded_count} Thingiverse objects downloaded")
|
149 |
+
logger.info(
|
150 |
+
f"Downloading {len(items_to_download)} Thingiverse objects with {processes=}"
|
151 |
+
)
|
152 |
+
if len(items_to_download) == 0:
|
153 |
+
return out
|
154 |
+
|
155 |
+
# download the files
|
156 |
+
if processes == 1:
|
157 |
+
for item in tqdm(items_to_download):
|
158 |
+
file_path = _download_item(item=item, download_dir=download_dir)
|
159 |
+
out.append(
|
160 |
+
{"filePath": file_path, **item,}
|
161 |
+
)
|
162 |
+
else:
|
163 |
+
args = [(item, download_dir) for item in items_to_download]
|
164 |
+
with Pool(processes=processes) as pool:
|
165 |
+
items_and_file_paths = list(
|
166 |
+
tqdm(
|
167 |
+
pool.imap(_parallel_download_item, args),
|
168 |
+
total=len(args),
|
169 |
+
desc="Downloading Thingiverse Objects",
|
170 |
+
)
|
171 |
+
)
|
172 |
+
out.extend(
|
173 |
+
[
|
174 |
+
{"filePath": file_path, **item,}
|
175 |
+
for item, file_path in items_and_file_paths
|
176 |
+
]
|
177 |
+
)
|
178 |
+
return out
|
179 |
+
|
180 |
+
|
181 |
+
def load_annotations(download_dir: str = "~/.objaverse") -> pd.DataFrame:
|
182 |
+
"""Load the annotations from the given directory.
|
183 |
+
|
184 |
+
Args:
|
185 |
+
download_dir (str, optional): The directory to load the annotations from.
|
186 |
+
Supports all file systems supported by fsspec. Defaults to
|
187 |
+
"~/.objaverse".
|
188 |
+
|
189 |
+
Returns:
|
190 |
+
pd.DataFrame: The annotations, which includes the columns "thingId", "fileId",
|
191 |
+
"filename", and "license".
|
192 |
+
"""
|
193 |
+
remote_url = "https://huggingface.co/datasets/allenai/objaverse-xl/resolve/main/thingiverse/thingiverse-objects.parquet"
|
194 |
+
download_path = os.path.join(
|
195 |
+
download_dir, "thingiverse", "thingiverse-objects.parquet"
|
196 |
+
)
|
197 |
+
fs, path = fsspec.core.url_to_fs(download_path)
|
198 |
+
|
199 |
+
if not fs.exists(path):
|
200 |
+
fs.makedirs(os.path.dirname(path), exist_ok=True)
|
201 |
+
logger.info(f"Downloading {remote_url} to {download_path}")
|
202 |
+
response = requests.get(remote_url)
|
203 |
+
response.raise_for_status()
|
204 |
+
with fs.open(path, "wb") as file:
|
205 |
+
file.write(response.content)
|
206 |
+
|
207 |
+
# read the file with pandas and fsspec
|
208 |
+
with fs.open(download_path, "rb") as f:
|
209 |
+
annotations_df = pd.read_parquet(f)
|
210 |
+
|
211 |
+
return annotations_df
|
212 |
+
|
213 |
+
|
214 |
+
if __name__ == "__main__":
|
215 |
+
# example usage
|
216 |
+
annotations = load_annotations()
|
217 |
+
file_ids = annotations.head(n=100)["fileId"].tolist()
|
218 |
+
download_thingiverse_objects(file_ids=file_ids, processes=5)
|