zulissimeta commited on
Commit
875e2f3
·
1 Parent(s): 341c716

add backoff, use direct requests instead of inference client

Browse files
Files changed (2) hide show
  1. hf_calculator.py +39 -16
  2. requirements.txt +2 -3
hf_calculator.py CHANGED
@@ -6,17 +6,19 @@ LICENSE file in the root directory of this source tree.
6
  """
7
 
8
  import hashlib
9
- import json
10
  import os
11
  from pathlib import Path
12
 
13
  import ase
 
14
  import gradio as gr
15
  import huggingface_hub as hf_hub
 
16
  from ase.calculators.calculator import Calculator
17
  from ase.db.core import now
18
  from ase.db.row import AtomsRow
19
  from ase.io.jsonio import decode, encode
 
20
 
21
 
22
  def hash_save_file(atoms: ase.Atoms, task_name, path: Path | str):
@@ -65,37 +67,56 @@ class HFEndpointCalculator(Calculator):
65
  "You need to log in to HF and have gated model access to UMA before running your own simulations!"
66
  )
67
 
68
- self.client = hf_hub.InferenceClient(
69
- model=endpoint_url, token=os.environ["HF_TOKEN"]
70
- )
71
  self.atoms = atoms
72
  self.task_name = task_name
73
 
74
  super().__init__(*args, **kwargs)
75
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  def calculate(self, atoms, properties, system_changes):
77
  Calculator.calculate(self, atoms, properties, system_changes)
78
 
79
  task_name = self.task_name.lower()
80
 
81
- # Run inference via a post request to the endpoint
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  try:
83
- response = self.client.post(
84
- json={
85
- "inputs": atoms_to_json(atoms, data=atoms.info),
86
- "properties": properties,
87
- "system_changes": system_changes,
88
- "task_name": task_name,
89
- }
90
- )
91
- except hf_hub.errors.BadRequestError:
92
  hash_save_file(atoms, task_name, "/data/custom_inputs/errors/")
93
  raise gr.Error(
94
- "Backend failure during your calculation; if you have continued issues please file an issue in the main FAIR chemistry repo (https://github.com/facebookresearch/fairchem)."
95
  )
96
 
97
  # Load the response and store the results in the calc and atoms object
98
- response_dict = decode(json.loads(response))
99
  self.results = response_dict["results"]
100
  atoms.info = response_dict["info"]
101
 
@@ -118,6 +139,8 @@ def atoms_to_json(atoms, data=None):
118
 
119
  if data:
120
  dct["data"] = data
 
 
121
 
122
  constraints = row.get("constraints")
123
  if constraints:
 
6
  """
7
 
8
  import hashlib
 
9
  import os
10
  from pathlib import Path
11
 
12
  import ase
13
+ import backoff
14
  import gradio as gr
15
  import huggingface_hub as hf_hub
16
+ import requests
17
  from ase.calculators.calculator import Calculator
18
  from ase.db.core import now
19
  from ase.db.row import AtomsRow
20
  from ase.io.jsonio import decode, encode
21
+ from requests.exceptions import HTTPError
22
 
23
 
24
  def hash_save_file(atoms: ase.Atoms, task_name, path: Path | str):
 
67
  "You need to log in to HF and have gated model access to UMA before running your own simulations!"
68
  )
69
 
70
+ self.url = endpoint_url
71
+ self.token = os.environ["HF_TOKEN"]
 
72
  self.atoms = atoms
73
  self.task_name = task_name
74
 
75
  super().__init__(*args, **kwargs)
76
 
77
+ @staticmethod
78
+ @backoff.on_exception(
79
+ backoff.expo,
80
+ (requests.exceptions.RequestException,),
81
+ max_tries=10,
82
+ jitter=backoff.full_jitter,
83
+ )
84
+ def _post_with_backoff(url, headers, payload):
85
+ response = requests.post(url, headers=headers, json=payload)
86
+ response.raise_for_status()
87
+ return response
88
+
89
  def calculate(self, atoms, properties, system_changes):
90
  Calculator.calculate(self, atoms, properties, system_changes)
91
 
92
  task_name = self.task_name.lower()
93
 
94
+ payload = {
95
+ "inputs": atoms_to_json(atoms, data=atoms.info),
96
+ "properties": properties,
97
+ "system_changes": system_changes,
98
+ "task_name": task_name,
99
+ }
100
+
101
+ headers = {
102
+ "Accept": "application/json",
103
+ "Authorization": f"Bearer {self.token}",
104
+ "Content-Type": "application/json",
105
+ }
106
+
107
+ print(payload)
108
+
109
  try:
110
+ response = self._post_with_backoff(self.url, headers, payload)
111
+ response_dict = response.json()
112
+ except HTTPError as error:
 
 
 
 
 
 
113
  hash_save_file(atoms, task_name, "/data/custom_inputs/errors/")
114
  raise gr.Error(
115
+ f"Backend failure during your calculation; if you have continued issues please file an issue in the main FAIR chemistry repo (https://github.com/facebookresearch/fairchem).\n{error}"
116
  )
117
 
118
  # Load the response and store the results in the calc and atoms object
119
+ response_dict = decode(response_dict)
120
  self.results = response_dict["results"]
121
  atoms.info = response_dict["info"]
122
 
 
139
 
140
  if data:
141
  dct["data"] = data
142
+ else:
143
+ dct["data"] = {}
144
 
145
  constraints = row.get("constraints")
146
  if constraints:
requirements.txt CHANGED
@@ -1,6 +1,5 @@
1
  gradio<5.30.0
2
  numpy
3
  ase
4
-
5
- # Pinned since huggingface_hub>=0.31 does not support custom inference endpoint posts
6
- huggingface_hub<0.31
 
1
  gradio<5.30.0
2
  numpy
3
  ase
4
+ huggingface_hub
5
+ backoff