Gregor Betz
commited on
logging
Browse files
app.py
CHANGED
@@ -88,9 +88,9 @@ CHATBOT_INSTRUCTIONS = (
|
|
88 |
with open("config.yaml") as stream:
|
89 |
try:
|
90 |
demo_config = yaml.safe_load(stream)
|
91 |
-
logging.
|
92 |
except yaml.YAMLError as exc:
|
93 |
-
logging.
|
94 |
gr.Error("Error loading config: {exc}")
|
95 |
|
96 |
try:
|
@@ -111,10 +111,10 @@ try:
|
|
111 |
duration=-1
|
112 |
)
|
113 |
except Exception as exc:
|
114 |
-
logging.
|
115 |
gr.Error(f"Error processing config: {exc}")
|
116 |
|
117 |
-
logging.
|
118 |
|
119 |
|
120 |
|
@@ -128,7 +128,7 @@ def setup_client_llm(**client_kwargs) -> LogitsModel | None:
|
|
128 |
try:
|
129 |
llm = create_logits_model(**client_kwargs)
|
130 |
except Exception as e:
|
131 |
-
logging.
|
132 |
return False
|
133 |
return llm
|
134 |
|
@@ -162,13 +162,14 @@ async def bot(
|
|
162 |
if len(history_langchain_format) <= 1:
|
163 |
|
164 |
# health check
|
|
|
165 |
health_check = await guide.health_check()
|
166 |
|
167 |
if health_check.get("status", None) != "ok":
|
168 |
health_msg = " | ".join([f"{k}: {v}" for k, v in health_check.items()])
|
169 |
-
logging.
|
170 |
gr.Error(f"LLM availability / health check failed: {health_msg}")
|
171 |
-
logging.
|
172 |
|
173 |
message = history[-1][0]
|
174 |
|
@@ -176,9 +177,9 @@ async def bot(
|
|
176 |
artifacts = {}
|
177 |
progress_step = 0
|
178 |
async for otype, ovalue in guide.guide(message):
|
179 |
-
logging.
|
180 |
if otype.value == "progress":
|
181 |
-
logging.
|
182 |
gr.Info(ovalue, duration=12)
|
183 |
progress((progress_step,4))
|
184 |
progress_step += 1
|
@@ -186,7 +187,6 @@ async def bot(
|
|
186 |
artifacts[otype.value] = ovalue
|
187 |
else:
|
188 |
break
|
189 |
-
asyncio.sleep(0.1)
|
190 |
except asyncio.TimeoutError:
|
191 |
msg = "Guided reasoning process took too long. Please try again."
|
192 |
raise gr.Error(msg)
|
|
|
88 |
with open("config.yaml") as stream:
|
89 |
try:
|
90 |
demo_config = yaml.safe_load(stream)
|
91 |
+
logging.info(f"Config: {demo_config}")
|
92 |
except yaml.YAMLError as exc:
|
93 |
+
logging.error(f"Error loading config: {exc}")
|
94 |
gr.Error("Error loading config: {exc}")
|
95 |
|
96 |
try:
|
|
|
111 |
duration=-1
|
112 |
)
|
113 |
except Exception as exc:
|
114 |
+
logging.error(f"Error processing config: {exc}")
|
115 |
gr.Error(f"Error processing config: {exc}")
|
116 |
|
117 |
+
logging.info(f"Reasoning guide expert model is {guide_kwargs['expert_model']}.")
|
118 |
|
119 |
|
120 |
|
|
|
128 |
try:
|
129 |
llm = create_logits_model(**client_kwargs)
|
130 |
except Exception as e:
|
131 |
+
logging.error(f"When setting up client llm: Error: {e}")
|
132 |
return False
|
133 |
return llm
|
134 |
|
|
|
162 |
if len(history_langchain_format) <= 1:
|
163 |
|
164 |
# health check
|
165 |
+
gr.Info("Checking availability and health of inference endpoints ...", duration=6)
|
166 |
health_check = await guide.health_check()
|
167 |
|
168 |
if health_check.get("status", None) != "ok":
|
169 |
health_msg = " | ".join([f"{k}: {v}" for k, v in health_check.items()])
|
170 |
+
logging.error(f"Guide health check failed: {health_msg}")
|
171 |
gr.Error(f"LLM availability / health check failed: {health_msg}")
|
172 |
+
logging.info(f"Health check: {health_check}")
|
173 |
|
174 |
message = history[-1][0]
|
175 |
|
|
|
177 |
artifacts = {}
|
178 |
progress_step = 0
|
179 |
async for otype, ovalue in guide.guide(message):
|
180 |
+
logging.info(f"Guide output: {otype.value} - {ovalue}")
|
181 |
if otype.value == "progress":
|
182 |
+
logging.info(f"Progress: {ovalue}")
|
183 |
gr.Info(ovalue, duration=12)
|
184 |
progress((progress_step,4))
|
185 |
progress_step += 1
|
|
|
187 |
artifacts[otype.value] = ovalue
|
188 |
else:
|
189 |
break
|
|
|
190 |
except asyncio.TimeoutError:
|
191 |
msg = "Guided reasoning process took too long. Please try again."
|
192 |
raise gr.Error(msg)
|