thankrandomness commited on
Commit
f8a90e7
·
1 Parent(s): c41ff99

comment out the debug messages

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -128,12 +128,12 @@ def evaluate_efficiency(dataset_split, similarity_threshold=0.1):
128
  # Predictions (limit to length of true annotations to avoid mismatch)
129
  y_pred.extend(retrieved_codes[:len(annotations_list)])
130
 
131
- for result in retrieved_results:
132
- print(f" Code: {result['code']}, Similarity Score: {result['similarity_score']:.2f}")
133
 
134
  # Debugging output to check for mismatches and understand results
135
- print("Sample y_true:", y_true[:10])
136
- print("Sample y_pred:", y_pred[:10])
137
 
138
  if total_items > 0:
139
  avg_similarity = total_similarity / total_items
@@ -175,7 +175,7 @@ metrics = f"Accuracy: {avg_similarity:.2f}"
175
 
176
  with gr.Blocks() as interface:
177
  gr.Markdown("# Automated Medical Coding POC")
178
- gr.Markdown(metrics)
179
  with gr.Row():
180
  with gr.Column():
181
  text_input = gr.Textbox(label="Input Text")
 
128
  # Predictions (limit to length of true annotations to avoid mismatch)
129
  y_pred.extend(retrieved_codes[:len(annotations_list)])
130
 
131
+ # for result in retrieved_results:
132
+ # print(f" Code: {result['code']}, Similarity Score: {result['similarity_score']:.2f}")
133
 
134
  # Debugging output to check for mismatches and understand results
135
+ # print("Sample y_true:", y_true[:10])
136
+ # print("Sample y_pred:", y_pred[:10])
137
 
138
  if total_items > 0:
139
  avg_similarity = total_similarity / total_items
 
175
 
176
  with gr.Blocks() as interface:
177
  gr.Markdown("# Automated Medical Coding POC")
178
+ # gr.Markdown(metrics)
179
  with gr.Row():
180
  with gr.Column():
181
  text_input = gr.Textbox(label="Input Text")