Spaces:
Sleeping
Sleeping
Donald Winkelman
commited on
Commit
·
6dd3836
1
Parent(s):
47d849b
Updating Side-By-Side Space
Browse files- app.py +4 -4
- requirements.txt +1 -2
app.py
CHANGED
@@ -33,7 +33,7 @@ def load_models(progress=None):
|
|
33 |
|
34 |
try:
|
35 |
# Load base model
|
36 |
-
if progress:
|
37 |
progress(0.2, desc="Loading base model... This may take a few minutes")
|
38 |
|
39 |
print(f"Loading base model from {BASE_MODEL_PATH}")
|
@@ -44,7 +44,7 @@ def load_models(progress=None):
|
|
44 |
)
|
45 |
|
46 |
# Load novel model
|
47 |
-
if progress:
|
48 |
progress(0.7, desc="Loading novel model... This may take a few minutes")
|
49 |
|
50 |
print(f"Loading novel model from {NOVEL_MODEL_PATH}")
|
@@ -54,7 +54,7 @@ def load_models(progress=None):
|
|
54 |
n_threads=4 # Number of CPU threads to use
|
55 |
)
|
56 |
|
57 |
-
if progress:
|
58 |
progress(1.0, desc="Models loaded successfully!")
|
59 |
|
60 |
print("Models loaded successfully!")
|
@@ -62,7 +62,7 @@ def load_models(progress=None):
|
|
62 |
|
63 |
except Exception as e:
|
64 |
print(f"Error loading models: {str(e)}")
|
65 |
-
if progress:
|
66 |
progress(1.0, desc=f"Error loading models: {str(e)}")
|
67 |
return False
|
68 |
|
|
|
33 |
|
34 |
try:
|
35 |
# Load base model
|
36 |
+
if progress is not None:
|
37 |
progress(0.2, desc="Loading base model... This may take a few minutes")
|
38 |
|
39 |
print(f"Loading base model from {BASE_MODEL_PATH}")
|
|
|
44 |
)
|
45 |
|
46 |
# Load novel model
|
47 |
+
if progress is not None:
|
48 |
progress(0.7, desc="Loading novel model... This may take a few minutes")
|
49 |
|
50 |
print(f"Loading novel model from {NOVEL_MODEL_PATH}")
|
|
|
54 |
n_threads=4 # Number of CPU threads to use
|
55 |
)
|
56 |
|
57 |
+
if progress is not None:
|
58 |
progress(1.0, desc="Models loaded successfully!")
|
59 |
|
60 |
print("Models loaded successfully!")
|
|
|
62 |
|
63 |
except Exception as e:
|
64 |
print(f"Error loading models: {str(e)}")
|
65 |
+
if progress is not None:
|
66 |
progress(1.0, desc=f"Error loading models: {str(e)}")
|
67 |
return False
|
68 |
|
requirements.txt
CHANGED
@@ -5,5 +5,4 @@ torch==2.7.0
|
|
5 |
accelerate==1.6.0
|
6 |
sentencepiece==0.2.0
|
7 |
protobuf==6.30.2
|
8 |
-
llama-cpp-python==0.3.9
|
9 |
-
--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
|
|
|
5 |
accelerate==1.6.0
|
6 |
sentencepiece==0.2.0
|
7 |
protobuf==6.30.2
|
8 |
+
llama-cpp-python==0.3.9
|
|