kimhyunwoo commited on
Commit
8a7a11f
Β·
verified Β·
1 Parent(s): c9ceb74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -69
app.py CHANGED
@@ -28,6 +28,7 @@ print("This might take a few minutes, especially on the first launch...")
28
  model = None
29
  tokenizer = None
30
  load_successful = False
 
31
 
32
  try:
33
  start_load_time = time.time()
@@ -35,11 +36,11 @@ try:
35
  MODEL_ID,
36
  torch_dtype=torch.float32,
37
  device_map="cpu",
38
- # force_download=True # 주석 처리. μΊμ‹œ λ¬Έμ œκ°€ μ—†λ‹€λ©΄ λΆˆν•„μš”
39
  )
40
  tokenizer = AutoTokenizer.from_pretrained(
41
  MODEL_ID,
42
- # force_download=True # 주석 처리
43
  )
44
  model.eval()
45
  load_time = time.time() - start_load_time
@@ -48,14 +49,14 @@ try:
48
 
49
  # --- Stop Token Configuration ---
50
  stop_token_strings = ["<|endofturn|>", "<|stop|>"]
51
- stop_token_ids_list = [tokenizer.convert_tokens_to_ids(token) for token in stop_token_strings]
52
 
53
- if tokenizer.eos_token_id is not None and tokenizer.eos_token_id not in stop_token_ids_list:
54
- stop_token_ids_list.append(tokenizer.eos_token_id)
55
  elif tokenizer.eos_token_id is None:
56
  print("Warning: tokenizer.eos_token_id is None. Cannot add to stop tokens.")
57
 
58
- stop_token_ids_list = [tid for tid in stop_token_ids_list if tid is not None]
59
 
60
  if not stop_token_ids_list:
61
  print("Warning: Could not find any stop token IDs. Using default EOS if available, otherwise generation might not stop correctly.")
@@ -63,7 +64,7 @@ try:
63
  stop_token_ids_list = [tokenizer.eos_token_id]
64
  else:
65
  print("Error: No stop tokens found, including default EOS. Generation may run indefinitely.")
66
- # ν•„μš”μ‹œ μ—λŸ¬ 처리 λ˜λŠ” κΈ°λ³Έκ°’ μ„€μ •
67
 
68
  print(f"Using Stop Token IDs: {stop_token_ids_list}")
69
 
@@ -72,7 +73,7 @@ except Exception as e:
72
  if 'model' in locals() and model is not None: del model
73
  if 'tokenizer' in locals() and tokenizer is not None: del tokenizer
74
  gc.collect()
75
- # μ•± μ‹€ν–‰ 전에 λ‘œλ”© μ‹€νŒ¨ μ‹œ Gradio μ—λŸ¬ λŒ€μ‹  ν”„λ‘œμ„ΈμŠ€ μ’…λ£Œ λ˜λŠ” λ‹€λ₯Έ 처리 κ³ λ €
76
  raise gr.Error(f"Failed to load the model {MODEL_ID}. Cannot start the application. Error: {e}")
77
 
78
 
@@ -81,7 +82,7 @@ def get_system_prompt():
81
  current_date = datetime.datetime.now().strftime("%Y-%m-%d (%A)")
82
  return (
83
  f"- AI μ–Έμ–΄λͺ¨λΈμ˜ 이름은 \"CLOVA X\" 이며 λ„€μ΄λ²„μ—μ„œ λ§Œλ“€μ—ˆλ‹€.\n"
84
- # f"- μ˜€λŠ˜μ€ {current_date}이닀.\n" # ν•„μš”μ‹œ 주석 ν•΄μ œ
85
  f"- μ‚¬μš©μžμ˜ μ§ˆλ¬Έμ— λŒ€ν•΄ μΉœμ ˆν•˜κ³  μžμ„Έν•˜κ²Œ ν•œκ΅­μ–΄λ‘œ λ‹΅λ³€ν•΄μ•Ό ν•œλ‹€."
86
  )
87
 
@@ -109,16 +110,22 @@ def warmup_model():
109
  return_tensors="pt"
110
  ).to("cpu")
111
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  with torch.no_grad():
113
- output_ids = model.generate(
114
- **inputs,
115
- max_new_tokens=10, # 짧게 μƒμ„±ν•˜μ—¬ μ‹œκ°„ μ ˆμ•½
116
- eos_token_id=stop_token_ids_list,
117
- pad_token_id=tokenizer.eos_token_id if tokenizer.eos_token_id is not None else tokenizer.pad_token_id,
118
- do_sample=False # Warm-up μ‹œμ—λŠ” μƒ˜ν”Œλ§ λΆˆν•„μš”
119
- )
120
-
121
- # κ²°κ³Ό λ””μ½”λ”© (선택 사항, ν™•μΈμš©)
122
  # response = tokenizer.decode(output_ids[0, inputs['input_ids'].shape[1]:], skip_special_tokens=True)
123
  # print(f"Warm-up response (decoded): {response}")
124
 
@@ -130,40 +137,43 @@ def warmup_model():
130
 
131
  except Exception as e:
132
  print(f"!!! Error during model warm-up: {e}")
133
- # μ›œμ—… μ‹€νŒ¨κ°€ μ•± 싀행을 λ§‰μ§€λŠ” μ•Šλ„λ‘ 처리
134
  finally:
135
- gc.collect() # Ensure cleanup even if warmup fails
136
-
137
 
138
  # --- Inference Function ---
139
  def predict(message, history):
140
  """
141
- Generates response using HyperCLOVAX based on user message and chat history.
142
- Handles chat formatting, generation, decoding, and memory management.
143
- Assumes 'history' is in the Gradio 'messages' format: List[List[str | None | Tuple]] or List[Dict]
144
  """
145
  if model is None or tokenizer is None:
146
  return "였λ₯˜: λͺ¨λΈμ΄ λ‘œλ“œλ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€."
147
 
148
  system_prompt = get_system_prompt()
149
 
150
- # history ν˜•μ‹μ΄ List[Dict] ('messages' format)라고 κ°€μ •ν•˜κ³  처리
151
  chat_history_formatted = [
152
- {"role": "tool_list", "content": ""},
153
  {"role": "system", "content": system_prompt}
154
  ]
155
- # historyλŠ” [{'role': 'user', 'content': '...'}, {'role': 'assistant', 'content': '...'}] ν˜•νƒœ
156
- for turn in history:
157
- # history의 각 μš”μ†Œκ°€ λ”•μ…”λ„ˆλ¦¬ ν˜•νƒœμΈμ§€ 확인 (더 μ•ˆμ „ν•˜κ²Œ)
158
- if isinstance(turn, dict) and "role" in turn and "content" in turn:
159
- chat_history_formatted.append(turn)
160
- else:
161
- # μ˜ˆμƒμΉ˜ λͺ»ν•œ ν˜•μ‹μ΄ λ“€μ–΄μ˜¬ 경우 κ²½κ³  좜λ ₯ (λ””λ²„κΉ…μš©)
162
- print(f"Warning: Unexpected history format item: {turn}")
163
- # ν•„μš”ν•˜λ‹€λ©΄ μ—¬κΈ°μ„œ μ—λŸ¬ 처리 λ˜λŠ” λ³€ν™˜ 둜직 μΆ”κ°€
164
-
165
-
166
- # Add the latest user message
 
 
 
 
 
 
167
  chat_history_formatted.append({"role": "user", "content": message})
168
 
169
  inputs = None
@@ -175,41 +185,47 @@ def predict(message, history):
175
  add_generation_prompt=True,
176
  return_dict=True,
177
  return_tensors="pt"
178
- ).to("cpu") # Explicitly send to CPU
179
  input_length = inputs['input_ids'].shape[1]
180
  print(f"\nInput tokens: {input_length}")
181
 
182
  except Exception as e:
183
  print(f"!!! Error applying chat template: {e}")
184
- # Provide feedback to the user
185
  return f"였λ₯˜: μž…λ ₯ ν˜•μ‹μ„ μ²˜λ¦¬ν•˜λŠ” 쀑 λ¬Έμ œκ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€. ({e})"
186
 
187
  try:
188
  print("Generating response...")
189
  generation_start_time = time.time()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  with torch.no_grad():
191
- output_ids = model.generate(
192
- **inputs,
193
- max_new_tokens=MAX_NEW_TOKENS,
194
- eos_token_id=stop_token_ids_list,
195
- pad_token_id=tokenizer.eos_token_id if tokenizer.eos_token_id is not None else tokenizer.pad_token_id,
196
- do_sample=True,
197
- temperature=0.7,
198
- top_p=0.9,
199
- )
200
  generation_time = time.time() - generation_start_time
201
  print(f"Generation complete in {generation_time:.2f} seconds.")
202
 
203
  except Exception as e:
204
  print(f"!!! Error during model generation: {e}")
205
- # Clean up potentially large tensors in case of error
206
  if inputs is not None: del inputs
207
  if output_ids is not None: del output_ids
208
  gc.collect()
209
  return f"였λ₯˜: 응닡을 μƒμ„±ν•˜λŠ” 쀑 λ¬Έμ œκ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€. ({e})"
210
 
211
  # Decode the response
212
- response = "였λ₯˜: 응닡 생성에 μ‹€νŒ¨ν–ˆμŠ΅λ‹ˆλ‹€." # κΈ°λ³Έκ°’
213
  if output_ids is not None:
214
  try:
215
  new_tokens = output_ids[0, input_length:]
@@ -220,7 +236,6 @@ def predict(message, history):
220
  print(f"!!! Error decoding response: {e}")
221
  response = "였λ₯˜: 응닡을 οΏ½οΏ½οΏ½μ½”λ”©ν•˜λŠ” 쀑 λ¬Έμ œκ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€."
222
 
223
-
224
  # Clean up memory
225
  if inputs is not None: del inputs
226
  if output_ids is not None: del output_ids
@@ -232,13 +247,8 @@ def predict(message, history):
232
  # --- Gradio Interface Setup ---
233
  print("--- Setting up Gradio Interface ---")
234
 
235
- # type='messages'λ₯Ό λͺ…μ‹œν•˜μ—¬ UserWarning ν•΄κ²° 및 μ΅œμ‹  ν˜•μ‹ μ‚¬μš©
236
- chatbot_component = gr.Chatbot(
237
- label="HyperCLOVA X SEED (0.5B) λŒ€ν™”",
238
- bubble_full_width=False,
239
- height=600,
240
- type='messages' # 이 뢀뢄을 λͺ…μ‹œν•˜μ—¬ ChatInterfaceμ™€μ˜ ν˜Έν™˜μ„± 확보
241
- )
242
 
243
  examples = [
244
  ["넀이버 ν΄λ‘œλ°”XλŠ” λ¬΄μ—‡μΈκ°€μš”?"],
@@ -247,34 +257,32 @@ examples = [
247
  ["μ œμ£Όλ„ μ—¬ν–‰ κ³„νšμ„ μ„Έμš°κ³  μžˆλŠ”λ°, 3λ°• 4일 μΆ”μ²œ μ½”μŠ€ μ’€ μ§œμ€„λž˜?"],
248
  ]
249
 
250
- # ChatInterface 생성 μ‹œ λΆˆν•„μš”ν•œ 인자 제거됨
 
251
  demo = gr.ChatInterface(
252
- fn=predict, # 예츑 ν•¨μˆ˜ μ—°κ²°
253
- chatbot=chatbot_component, # Chatbot μ»΄ν¬λ„ŒνŠΈ μ‚¬μš© (type='messages' 섀정됨)
254
  title="πŸ‡°πŸ‡· 넀이버 HyperCLOVA X SEED (0.5B) 데λͺ¨",
255
  description=(
256
  f"**λͺ¨λΈ:** {MODEL_ID}\n"
257
  f"**ν™˜κ²½:** Hugging Face 무료 CPU (16GB RAM)\n"
258
- f"**주의:** CPUμ—μ„œ μ‹€ν–‰λ˜λ―€λ‘œ 응닡 생성에 λ‹€μ†Œ μ‹œκ°„μ΄ 걸릴 수 μžˆμŠ΅λ‹ˆλ‹€. (μ›œμ—… μ‹œλ„λ¨)\n"
259
  f"μ΅œλŒ€ 생성 토큰 μˆ˜λŠ” {MAX_NEW_TOKENS}개둜 μ œν•œλ©λ‹ˆλ‹€."
260
  ),
261
  examples=examples,
262
- cache_examples=False, # 무료 ν‹°μ–΄ 캐싱 λΉ„ν™œμ„±ν™”
263
  theme="soft",
264
- # retry_btn, undo_btn, clear_btn 등은 μ΅œμ‹  λ²„μ „μ—μ„œ 직접 μ§€μ›ν•˜μ§€ μ•ŠμŒ
265
  )
266
 
267
  # --- Application Launch ---
268
  if __name__ == "__main__":
269
- # λͺ¨λΈ λ‘œλ”© 성곡 μ‹œμ—λ§Œ μ›œμ—… μ‹€ν–‰
270
  if load_successful:
271
  warmup_model()
272
  else:
273
  print("Skipping warm-up because model loading failed.")
274
 
275
  print("--- Launching Gradio App ---")
276
- # queue()λŠ” μ—¬λŸ¬ μ‚¬μš©μž 처리 및 κΈ΄ μž‘μ—… 관리에 유용
277
  demo.queue().launch(
278
- # share=True # 곡개 링크 생성 μ‹œ ν•„μš” (둜그인 ν•„μš”ν•  수 있음)
279
- # server_name="0.0.0.0" # 둜컬 λ„€νŠΈμ›Œν¬μ—μ„œ μ ‘κ·Ό ν—ˆμš© μ‹œ
280
  )
 
28
  model = None
29
  tokenizer = None
30
  load_successful = False
31
+ stop_token_ids_list = [] # Initialize stop_token_ids_list
32
 
33
  try:
34
  start_load_time = time.time()
 
36
  MODEL_ID,
37
  torch_dtype=torch.float32,
38
  device_map="cpu",
39
+ # force_download=True # Keep commented unless cache issues reappear
40
  )
41
  tokenizer = AutoTokenizer.from_pretrained(
42
  MODEL_ID,
43
+ # force_download=True # Keep commented
44
  )
45
  model.eval()
46
  load_time = time.time() - start_load_time
 
49
 
50
  # --- Stop Token Configuration ---
51
  stop_token_strings = ["<|endofturn|>", "<|stop|>"]
52
+ temp_stop_ids = [tokenizer.convert_tokens_to_ids(token) for token in stop_token_strings]
53
 
54
+ if tokenizer.eos_token_id is not None and tokenizer.eos_token_id not in temp_stop_ids:
55
+ temp_stop_ids.append(tokenizer.eos_token_id)
56
  elif tokenizer.eos_token_id is None:
57
  print("Warning: tokenizer.eos_token_id is None. Cannot add to stop tokens.")
58
 
59
+ stop_token_ids_list = [tid for tid in temp_stop_ids if tid is not None] # Assign to the global scope variable
60
 
61
  if not stop_token_ids_list:
62
  print("Warning: Could not find any stop token IDs. Using default EOS if available, otherwise generation might not stop correctly.")
 
64
  stop_token_ids_list = [tokenizer.eos_token_id]
65
  else:
66
  print("Error: No stop tokens found, including default EOS. Generation may run indefinitely.")
67
+ # Consider raising an error or setting a default if this is critical
68
 
69
  print(f"Using Stop Token IDs: {stop_token_ids_list}")
70
 
 
73
  if 'model' in locals() and model is not None: del model
74
  if 'tokenizer' in locals() and tokenizer is not None: del tokenizer
75
  gc.collect()
76
+ # Raise Gradio error to display in the Space UI if loading fails
77
  raise gr.Error(f"Failed to load the model {MODEL_ID}. Cannot start the application. Error: {e}")
78
 
79
 
 
82
  current_date = datetime.datetime.now().strftime("%Y-%m-%d (%A)")
83
  return (
84
  f"- AI μ–Έμ–΄λͺ¨λΈμ˜ 이름은 \"CLOVA X\" 이며 λ„€μ΄λ²„μ—μ„œ λ§Œλ“€μ—ˆλ‹€.\n"
85
+ # f"- μ˜€λŠ˜μ€ {current_date}이닀.\n" # Uncomment if needed
86
  f"- μ‚¬μš©μžμ˜ μ§ˆλ¬Έμ— λŒ€ν•΄ μΉœμ ˆν•˜κ³  μžμ„Έν•˜κ²Œ ν•œκ΅­μ–΄λ‘œ λ‹΅λ³€ν•΄μ•Ό ν•œλ‹€."
87
  )
88
 
 
110
  return_tensors="pt"
111
  ).to("cpu")
112
 
113
+ # Check if stop_token_ids_list is empty and handle appropriately
114
+ gen_kwargs = {
115
+ "max_new_tokens": 10,
116
+ "pad_token_id": tokenizer.eos_token_id if tokenizer.eos_token_id is not None else tokenizer.pad_token_id,
117
+ "do_sample": False
118
+ }
119
+ if stop_token_ids_list:
120
+ gen_kwargs["eos_token_id"] = stop_token_ids_list
121
+ else:
122
+ print("Warmup Warning: No stop tokens defined for generation.")
123
+
124
+
125
  with torch.no_grad():
126
+ output_ids = model.generate(**inputs, **gen_kwargs)
127
+
128
+ # Optional: Decode warmup response for verification
 
 
 
 
 
 
129
  # response = tokenizer.decode(output_ids[0, inputs['input_ids'].shape[1]:], skip_special_tokens=True)
130
  # print(f"Warm-up response (decoded): {response}")
131
 
 
137
 
138
  except Exception as e:
139
  print(f"!!! Error during model warm-up: {e}")
 
140
  finally:
141
+ gc.collect()
 
142
 
143
  # --- Inference Function ---
144
  def predict(message, history):
145
  """
146
+ Generates response using HyperCLOVAX.
147
+ Assumes 'history' is in the Gradio 'messages' format: List[Dict].
 
148
  """
149
  if model is None or tokenizer is None:
150
  return "였λ₯˜: λͺ¨λΈμ΄ λ‘œλ“œλ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€."
151
 
152
  system_prompt = get_system_prompt()
153
 
154
+ # Start with system prompt
155
  chat_history_formatted = [
156
+ {"role": "tool_list", "content": ""}, # As required by model card
157
  {"role": "system", "content": system_prompt}
158
  ]
159
+
160
+ # Append history (List of {'role': 'user'/'assistant', 'content': '...'})
161
+ if isinstance(history, list): # Check if history is a list
162
+ for turn in history:
163
+ # Validate turn format
164
+ if isinstance(turn, dict) and "role" in turn and "content" in turn:
165
+ chat_history_formatted.append(turn)
166
+ # Handle potential older tuple format if necessary (though less likely now)
167
+ elif isinstance(turn, (list, tuple)) and len(turn) == 2:
168
+ print(f"Warning: Received history item in tuple format: {turn}. Converting to messages format.")
169
+ chat_history_formatted.append({"role": "user", "content": turn[0]})
170
+ if turn[1]: # Ensure assistant message exists
171
+ chat_history_formatted.append({"role": "assistant", "content": turn[1]})
172
+ else:
173
+ print(f"Warning: Skipping unexpected history format item: {turn}")
174
+
175
+
176
+ # Append the latest user message
177
  chat_history_formatted.append({"role": "user", "content": message})
178
 
179
  inputs = None
 
185
  add_generation_prompt=True,
186
  return_dict=True,
187
  return_tensors="pt"
188
+ ).to("cpu")
189
  input_length = inputs['input_ids'].shape[1]
190
  print(f"\nInput tokens: {input_length}")
191
 
192
  except Exception as e:
193
  print(f"!!! Error applying chat template: {e}")
 
194
  return f"였λ₯˜: μž…λ ₯ ν˜•μ‹μ„ μ²˜λ¦¬ν•˜λŠ” 쀑 λ¬Έμ œκ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€. ({e})"
195
 
196
  try:
197
  print("Generating response...")
198
  generation_start_time = time.time()
199
+
200
+ # Prepare generation arguments, handling empty stop_token_ids_list
201
+ gen_kwargs = {
202
+ "max_new_tokens": MAX_NEW_TOKENS,
203
+ "pad_token_id": tokenizer.eos_token_id if tokenizer.eos_token_id is not None else tokenizer.pad_token_id,
204
+ "do_sample": True,
205
+ "temperature": 0.7,
206
+ "top_p": 0.9,
207
+ }
208
+ if stop_token_ids_list:
209
+ gen_kwargs["eos_token_id"] = stop_token_ids_list
210
+ else:
211
+ print("Generation Warning: No stop tokens defined.")
212
+
213
+
214
  with torch.no_grad():
215
+ output_ids = model.generate(**inputs, **gen_kwargs)
216
+
 
 
 
 
 
 
 
217
  generation_time = time.time() - generation_start_time
218
  print(f"Generation complete in {generation_time:.2f} seconds.")
219
 
220
  except Exception as e:
221
  print(f"!!! Error during model generation: {e}")
 
222
  if inputs is not None: del inputs
223
  if output_ids is not None: del output_ids
224
  gc.collect()
225
  return f"였λ₯˜: 응닡을 μƒμ„±ν•˜λŠ” 쀑 λ¬Έμ œκ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€. ({e})"
226
 
227
  # Decode the response
228
+ response = "였λ₯˜: 응닡 생성에 μ‹€νŒ¨ν–ˆμŠ΅λ‹ˆλ‹€."
229
  if output_ids is not None:
230
  try:
231
  new_tokens = output_ids[0, input_length:]
 
236
  print(f"!!! Error decoding response: {e}")
237
  response = "였λ₯˜: 응닡을 οΏ½οΏ½οΏ½μ½”λ”©ν•˜λŠ” 쀑 λ¬Έμ œκ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€."
238
 
 
239
  # Clean up memory
240
  if inputs is not None: del inputs
241
  if output_ids is not None: del output_ids
 
247
  # --- Gradio Interface Setup ---
248
  print("--- Setting up Gradio Interface ---")
249
 
250
+ # No need to create a separate Chatbot component beforehand
251
+ # chatbot_component = gr.Chatbot(...) # REMOVED
 
 
 
 
 
252
 
253
  examples = [
254
  ["넀이버 ν΄λ‘œλ°”XλŠ” λ¬΄μ—‡μΈκ°€μš”?"],
 
257
  ["μ œμ£Όλ„ μ—¬ν–‰ κ³„νšμ„ μ„Έμš°κ³  μžˆλŠ”λ°, 3λ°• 4일 μΆ”μ²œ μ½”μŠ€ μ’€ μ§œμ€„λž˜?"],
258
  ]
259
 
260
+ # Let ChatInterface manage its own internal Chatbot component
261
+ # Remove the chatbot=... argument
262
  demo = gr.ChatInterface(
263
+ fn=predict, # Link the prediction function
264
+ # chatbot=chatbot_component, # REMOVED
265
  title="πŸ‡°πŸ‡· 넀이버 HyperCLOVA X SEED (0.5B) 데λͺ¨",
266
  description=(
267
  f"**λͺ¨λΈ:** {MODEL_ID}\n"
268
  f"**ν™˜κ²½:** Hugging Face 무료 CPU (16GB RAM)\n"
269
+ f"**주의:** CPUμ—μ„œ μ‹€ν–‰λ˜λ―€λ‘œ 응닡 생성에 λ‹€μ†Œ μ‹œκ°„μ΄ 걸릴 수 μžˆμŠ΅λ‹ˆλ‹€. (μ›œμ—… μ™„λ£Œ)\n"
270
  f"μ΅œλŒ€ 생성 토큰 μˆ˜λŠ” {MAX_NEW_TOKENS}개둜 μ œν•œλ©λ‹ˆλ‹€."
271
  ),
272
  examples=examples,
273
+ cache_examples=False,
274
  theme="soft",
 
275
  )
276
 
277
  # --- Application Launch ---
278
  if __name__ == "__main__":
 
279
  if load_successful:
280
  warmup_model()
281
  else:
282
  print("Skipping warm-up because model loading failed.")
283
 
284
  print("--- Launching Gradio App ---")
 
285
  demo.queue().launch(
286
+ # share=True # Uncomment for public link
287
+ # server_name="0.0.0.0" # Uncomment for local network access
288
  )