Leyogho commited on
Commit
6b4e8f4
·
1 Parent(s): 0357478

Revert "Add header html"

Browse files

This reverts commit 0a69bef22e614b2855a40c48545ae16ce3119ee1.

Files changed (4) hide show
  1. app.py +3 -2
  2. explanation.py +0 -15
  3. requirements.txt +1 -1
  4. translator.py +7 -0
app.py CHANGED
@@ -88,9 +88,10 @@ def init_distributed(opt):
88
  def setup_model():
89
  """Initialize the model on CPU without CUDA initialization."""
90
  opt = load_opt_from_config_files(["configs/biomedparse_inference.yaml"])
 
91
  opt = init_distributed(opt)
92
  pretrained_path = init_huggingface()
93
- model = BaseModel(opt, build_model(opt)).from_pretrained(pretrained_path).eval()
94
  return model
95
 
96
  @torch.no_grad()
@@ -180,7 +181,7 @@ def process_image(image, prompts, model):
180
  def setup_gradio_interface(model):
181
  """Configure l'interface Gradio."""
182
  return gr.Interface(
183
- theme=gr.Theme.from_hub("lone17/kotaemon"),
184
  fn=lambda img, txt: process_image(img, txt, model),
185
  inputs=[
186
  gr.Image(type="numpy", label="Image médicale"),
 
88
  def setup_model():
89
  """Initialize the model on CPU without CUDA initialization."""
90
  opt = load_opt_from_config_files(["configs/biomedparse_inference.yaml"])
91
+ opt['CUDA'] = opt.get('CUDA', True) and torch.cuda.is_available() # Vérifier la disponibilité de CUDA
92
  opt = init_distributed(opt)
93
  pretrained_path = init_huggingface()
94
+ model = BaseModel(opt, build_model(opt)).from_pretrained(pretrained_path)
95
  return model
96
 
97
  @torch.no_grad()
 
181
  def setup_gradio_interface(model):
182
  """Configure l'interface Gradio."""
183
  return gr.Interface(
184
+ theme=gr.Theme.from_hub("JohnSmith9982/small_and_pretty"),
185
  fn=lambda img, txt: process_image(img, txt, model),
186
  inputs=[
187
  gr.Image(type="numpy", label="Image médicale"),
explanation.py DELETED
@@ -1,15 +0,0 @@
1
- import openai
2
-
3
- client = openai.Client()
4
-
5
- def explain(model, input_text, max_tokens=100):
6
- response = client.Completion.create(
7
- model=model,
8
- prompt=input_text,
9
- max_tokens=max_tokens,
10
- temperature=0,
11
- top_p=1,
12
- frequency_penalty=0,
13
- presence_penalty=0
14
- )
15
- return response.choices[0].text.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  pillow==9.4.0
 
2
  opencv-python==4.8.1.78
3
  pyyaml==6.0.1
4
  json_tricks==3.17.3
@@ -33,7 +34,6 @@ open-clip-torch==2.26.1
33
  nibabel==5.1.0
34
  git+https://github.com/facebookresearch/detectron2
35
  gradio
36
- openai
37
  #torch==2.3.1 #2.0.1
38
  #torchvision==0.15.2
39
  #torchaudio==2.0.2
 
1
  pillow==9.4.0
2
+ accelerate
3
  opencv-python==4.8.1.78
4
  pyyaml==6.0.1
5
  json_tricks==3.17.3
 
34
  nibabel==5.1.0
35
  git+https://github.com/facebookresearch/detectron2
36
  gradio
 
37
  #torch==2.3.1 #2.0.1
38
  #torchvision==0.15.2
39
  #torchaudio==2.0.2
translator.py CHANGED
@@ -29,3 +29,10 @@ def translate_text(text, source_lang, target_lang):
29
  print(f"An error occurred: {e}")
30
  return text
31
 
 
 
 
 
 
 
 
 
29
  print(f"An error occurred: {e}")
30
  return text
31
 
32
+ if __name__ == "__main__":
33
+ text_to_translate = "Dans cette image donne moi l'œdème"
34
+ source_language = "fr"
35
+ target_language = "en"
36
+ translation = translate_text(text_to_translate, source_language, target_language)
37
+ if translation:
38
+ print(translation)