ford442 commited on
Commit
cdf4bc3
·
verified ·
1 Parent(s): a9a8fda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -76,12 +76,12 @@ import os
76
  import torch
77
  import paramiko
78
 
79
- torch.backends.cuda.matmul.allow_tf32 = True
80
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
81
  torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
82
- torch.backends.cudnn.allow_tf32 = True
83
  torch.backends.cudnn.deterministic = False
84
- torch.backends.cudnn.benchmark = True
85
  torch.backends.cuda.preferred_blas_library="cublas"
86
  torch.backends.cuda.preferred_linalg_library="cusolver"
87
  torch.set_float32_matmul_precision("highest")
 
76
  import torch
77
  import paramiko
78
 
79
+ torch.backends.cuda.matmul.allow_tf32 = False
80
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
81
  torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
82
+ torch.backends.cudnn.allow_tf32 = False
83
  torch.backends.cudnn.deterministic = False
84
+ torch.backends.cudnn.benchmark = False
85
  torch.backends.cuda.preferred_blas_library="cublas"
86
  torch.backends.cuda.preferred_linalg_library="cusolver"
87
  torch.set_float32_matmul_precision("highest")