Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
b97cb4f
1
Parent(s):
4838762
get more info HF space
Browse files
app.py
CHANGED
@@ -12,6 +12,8 @@ from zea import init_device
|
|
12 |
|
13 |
from main import Config, init, run
|
14 |
from utils import load_image
|
|
|
|
|
15 |
|
16 |
CONFIG_PATH = "configs/semantic_dps.yaml"
|
17 |
SLIDER_CONFIG_PATH = "configs/slider_params.yaml"
|
@@ -285,9 +287,30 @@ with gr.Blocks() as demo:
|
|
285 |
DEVICE = init_device()
|
286 |
except:
|
287 |
print("Could not initialize device using `zea.init_device()`")
|
288 |
-
print(f"JAX version: {jax.__version__}")
|
289 |
-
print(f"JAX devices: {jax.devices()}")
|
290 |
print(f"KERAS version: {keras.__version__}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
291 |
|
292 |
config, diffusion_model = initialize_model()
|
293 |
ready_msg = gr.update(
|
|
|
12 |
|
13 |
from main import Config, init, run
|
14 |
from utils import load_image
|
15 |
+
import torch
|
16 |
+
import subprocess
|
17 |
|
18 |
CONFIG_PATH = "configs/semantic_dps.yaml"
|
19 |
SLIDER_CONFIG_PATH = "configs/slider_params.yaml"
|
|
|
287 |
DEVICE = init_device()
|
288 |
except:
|
289 |
print("Could not initialize device using `zea.init_device()`")
|
|
|
|
|
290 |
print(f"KERAS version: {keras.__version__}")
|
291 |
+
try:
|
292 |
+
print(f"JAX version: {jax.__version__}")
|
293 |
+
print(f"JAX devices: {jax.devices()}")
|
294 |
+
except Exception as e:
|
295 |
+
print(f"Could not get JAX info: {e}")
|
296 |
+
|
297 |
+
try:
|
298 |
+
print(f"PyTorch version: {torch.__version__}")
|
299 |
+
print(f"PyTorch CUDA available: {torch.cuda.is_available()}")
|
300 |
+
print(f"PyTorch CUDA device count: {torch.cuda.device_count()}")
|
301 |
+
print(f"PyTorch devices: {[torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())]}")
|
302 |
+
print(f"PyTorch CUDA version: {torch.version.cuda}")
|
303 |
+
print(f"PyTorch cuDNN version: {torch.backends.cudnn.version()}")
|
304 |
+
except Exception as e:
|
305 |
+
print(f"Could not get PyTorch info: {e}")
|
306 |
+
|
307 |
+
try:
|
308 |
+
cuda_version = subprocess.getoutput("nvcc --version")
|
309 |
+
print(f"nvcc version:\n{cuda_version}")
|
310 |
+
nvidia_smi = subprocess.getoutput("nvidia-smi")
|
311 |
+
print(f"nvidia-smi output:\n{nvidia_smi}")
|
312 |
+
except Exception as e:
|
313 |
+
print(f"Could not get CUDA/nvidia-smi info: {e}")
|
314 |
|
315 |
config, diffusion_model = initialize_model()
|
316 |
ready_msg = gr.update(
|