tommytracx commited on
Commit
bb1d7fe
·
verified ·
1 Parent(s): 6b05f97

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +41 -7
handler.py CHANGED
@@ -34,10 +34,46 @@ model, tokenizer = load_model()
34
  print(f"Model loaded on {DEVICE} with dtype {TORCH_DTYPE}")
35
 
36
  # Default system prompt for the model
37
- DEFAULT_SYSTEM_PROMPT = """You are OGAI, a helpful assistant specialized in oil and gas engineering.
38
- You provide informative, accurate, and detailed responses related to drilling, completions,
39
- reservoir management, production optimization, and other oil and gas topics.
40
- You aim to be technically precise while remaining accessible to users with varying levels of expertise."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  def format_prompt(messages: List[Dict[str, str]], system_prompt: Optional[str] = None) -> str:
43
  """Format the conversation messages into a prompt the model can understand."""
@@ -49,7 +85,6 @@ def format_prompt(messages: List[Dict[str, str]], system_prompt: Optional[str] =
49
  role = message["role"]
50
  content = message["content"]
51
 
52
- # First message is already included above
53
  if i == 0 and role == "user":
54
  formatted_prompt += f"{content}\n<|assistant|>\n"
55
  else:
@@ -57,7 +92,6 @@ def format_prompt(messages: List[Dict[str, str]], system_prompt: Optional[str] =
57
  formatted_prompt += f"<|user|>\n{content}\n<|assistant|>\n"
58
  elif role == "assistant":
59
  formatted_prompt += f"{content}\n"
60
- # Ignore system messages after the first one
61
 
62
  return formatted_prompt
63
 
@@ -196,4 +230,4 @@ if __name__ == "__main__":
196
  else:
197
  # Stream response
198
  for chunk in response:
199
- print(json.loads(chunk)["generated_text"], end="")
 
34
  print(f"Model loaded on {DEVICE} with dtype {TORCH_DTYPE}")
35
 
36
  # Default system prompt for the model
37
+ DEFAULT_SYSTEM_PROMPT = """You are OGAI, an expert assistant in oil and gas engineering.
38
+
39
+ You provide **technically accurate, structured, and detailed** responses to inquiries related to **drilling, reservoir engineering, completions, production optimization, and oilfield calculations**. Your goal is to offer step-by-step explanations, precise calculations, and practical industry insights.
40
+
41
+ ### **Guidelines for Responses:**
42
+ - **Use Markdown formatting** for better readability.
43
+ - **Explain formulas step-by-step**, defining each variable.
44
+ - **Ensure numerical consistency** in calculations.
45
+ - **Use real-world examples** where applicable.
46
+ - **Provide unit conversions** if relevant.
47
+
48
+ ### **Example Format:**
49
+ #### **Q: How do you calculate bottomhole pressure?**
50
+
51
+ Bottomhole pressure (BHP) can be determined using the hydrostatic pressure equation:
52
+
53
+ \[ BHP = P_s + (\rho \cdot g \cdot h) \]
54
+
55
+ Where:
56
+ - \( BHP \) = Bottomhole Pressure (psi)
57
+ - \( P_s \) = Surface Pressure (psi)
58
+ - \( \rho \) = Mud Density (lb/gal)
59
+ - \( g \) = Acceleration due to gravity (ft/s²)
60
+ - \( h \) = True Vertical Depth (ft)
61
+
62
+ **Example Calculation:**
63
+ If:
64
+ - \( P_s = 500 \) psi
65
+ - \( \rho = 9.5 \) lb/gal
66
+ - \( h = 10,000 \) ft
67
+
68
+ Convert density:
69
+ \[ \rho' = 0.052 \times \rho = 0.052 \times 9.5 = 0.494 \text{ psi/ft} \]
70
+
71
+ Calculate BHP:
72
+ \[ BHP = 500 + (0.494 \times 10,000) = 5,440 \text{ psi} \]
73
+
74
+ Thus, **BHP is approximately 5,440 psi.**
75
+
76
+ Ensure all responses maintain technical precision, and clarify assumptions if necessary."""
77
 
78
  def format_prompt(messages: List[Dict[str, str]], system_prompt: Optional[str] = None) -> str:
79
  """Format the conversation messages into a prompt the model can understand."""
 
85
  role = message["role"]
86
  content = message["content"]
87
 
 
88
  if i == 0 and role == "user":
89
  formatted_prompt += f"{content}\n<|assistant|>\n"
90
  else:
 
92
  formatted_prompt += f"<|user|>\n{content}\n<|assistant|>\n"
93
  elif role == "assistant":
94
  formatted_prompt += f"{content}\n"
 
95
 
96
  return formatted_prompt
97
 
 
230
  else:
231
  # Stream response
232
  for chunk in response:
233
+ print(json.loads(chunk)["generated_text"], end="")