Spaces:
Running
Running
Sami
commited on
Commit
·
34d7afd
1
Parent(s):
4026a55
dsf
Browse files- automedical-demo-1/Screenshot 2025-01-17 at 14.08.24.png → assets/screenshots/automedical-demo-screenshot.png +0 -0
- {automedical-demo-1 → demos/automedical}/automedicalai copy.html +0 -0
- {automedical-demo-1 → demos/automedical}/automedicalai.html +0 -0
- documentation.html → docs/documentation.html +0 -0
- docs/{myemailtothem.html → internal/myemailtothem.html} +0 -0
- docs/{requirements-conversation.txt → internal/requirements-conversation.txt} +0 -0
- docs/{spanish-hospital-context.txt → internal/spanish-hospital-context.txt} +0 -0
- docs/{REFERENCE_example-propuesta-12-octubre-report copy.pdf → references/REFERENCE_example-propuesta-12-octubre-report copy.pdf} +0 -0
- paper.html → papers/drafts/fermed-vlm-draft-1.html +0 -0
- papers/drafts/fermed-vlm-draft-2.html +745 -0
- paper2.html → papers/research/fermed-vlm-paper-v2.html +0 -0
- papers/research/fermed-vlm-paper.html +745 -0
- proposals/{nhs-detailed-proposal.html → nhs/nhs-detailed-proposal.html} +0 -0
- proposals/{nhs-formal-proposal.html → nhs/nhs-formal-proposal.html} +0 -0
- proposals/{nhs-proposal.html → nhs/nhs-proposal.html} +0 -0
- proposals/{12-octubre-proposal.html → spanish/12-octubre-proposal.html} +0 -0
- proposals/{spanish-hospital-proposal.html → spanish/spanish-hospital-proposal.html} +0 -0
automedical-demo-1/Screenshot 2025-01-17 at 14.08.24.png → assets/screenshots/automedical-demo-screenshot.png
RENAMED
File without changes
|
{automedical-demo-1 → demos/automedical}/automedicalai copy.html
RENAMED
File without changes
|
{automedical-demo-1 → demos/automedical}/automedicalai.html
RENAMED
File without changes
|
documentation.html → docs/documentation.html
RENAMED
File without changes
|
docs/{myemailtothem.html → internal/myemailtothem.html}
RENAMED
File without changes
|
docs/{requirements-conversation.txt → internal/requirements-conversation.txt}
RENAMED
File without changes
|
docs/{spanish-hospital-context.txt → internal/spanish-hospital-context.txt}
RENAMED
File without changes
|
docs/{REFERENCE_example-propuesta-12-octubre-report copy.pdf → references/REFERENCE_example-propuesta-12-octubre-report copy.pdf}
RENAMED
File without changes
|
paper.html → papers/drafts/fermed-vlm-draft-1.html
RENAMED
File without changes
|
papers/drafts/fermed-vlm-draft-2.html
ADDED
@@ -0,0 +1,745 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>A Multimodal AI Approach to Ophthalmic Care: Comprehensive Validation and Diverse Clinical Applications</title>
|
7 |
+
<!-- Bootstrap CSS -->
|
8 |
+
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet">
|
9 |
+
<!-- Font Awesome for icons (optional) -->
|
10 |
+
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css" rel="stylesheet">
|
11 |
+
<script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
|
12 |
+
<style>
|
13 |
+
body {
|
14 |
+
font-family: 'Georgia', serif;
|
15 |
+
background-color: #ffffff;
|
16 |
+
color: #333333;
|
17 |
+
padding-top: 20px;
|
18 |
+
padding-bottom: 20px;
|
19 |
+
}
|
20 |
+
.container {
|
21 |
+
max-width: 960px;
|
22 |
+
}
|
23 |
+
h1, h2, h3, h4, h5, h6 {
|
24 |
+
color: #2c3e50;
|
25 |
+
}
|
26 |
+
.section {
|
27 |
+
margin-bottom: 40px;
|
28 |
+
}
|
29 |
+
.references li {
|
30 |
+
margin-bottom: 10px;
|
31 |
+
}
|
32 |
+
.table-responsive {
|
33 |
+
margin-top: 20px;
|
34 |
+
margin-bottom: 20px;
|
35 |
+
}
|
36 |
+
.footer {
|
37 |
+
text-align: center;
|
38 |
+
padding: 20px 0;
|
39 |
+
color: #777;
|
40 |
+
border-top: 1px solid #eaeaea;
|
41 |
+
margin-top: 40px;
|
42 |
+
}
|
43 |
+
.appendix, .supplementary, .author-contributions, .data-availability, .acknowledgments, .ethical-standards {
|
44 |
+
margin-bottom: 30px;
|
45 |
+
}
|
46 |
+
.abstract {
|
47 |
+
background-color: #f8f9fa;
|
48 |
+
padding: 20px;
|
49 |
+
border-radius: 5px;
|
50 |
+
margin-bottom: 30px;
|
51 |
+
}
|
52 |
+
.reference-section {
|
53 |
+
list-style-type: decimal;
|
54 |
+
padding-left: 20px;
|
55 |
+
}
|
56 |
+
.mermaid {
|
57 |
+
font-size: 14px !important;
|
58 |
+
margin: 20px 0;
|
59 |
+
min-height: 300px;
|
60 |
+
max-width: 100%;
|
61 |
+
overflow-x: auto;
|
62 |
+
}
|
63 |
+
|
64 |
+
.diagram-container {
|
65 |
+
background: #fff;
|
66 |
+
padding: 15px;
|
67 |
+
border-radius: 8px;
|
68 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
69 |
+
margin: 20px 0;
|
70 |
+
max-width: 100%;
|
71 |
+
overflow-x: auto;
|
72 |
+
}
|
73 |
+
|
74 |
+
.diagram-title {
|
75 |
+
font-size: 1.2rem;
|
76 |
+
color: #2c3e50;
|
77 |
+
margin-bottom: 15px;
|
78 |
+
text-align: center;
|
79 |
+
}
|
80 |
+
|
81 |
+
@media (max-width: 768px) {
|
82 |
+
.mermaid {
|
83 |
+
font-size: 12px !important;
|
84 |
+
min-height: 200px;
|
85 |
+
}
|
86 |
+
.diagram-title {
|
87 |
+
font-size: 1rem;
|
88 |
+
}
|
89 |
+
}
|
90 |
+
</style>
|
91 |
+
</head>
|
92 |
+
<body>
|
93 |
+
<nav class="navbar navbar-light bg-light">
|
94 |
+
<div class="container">
|
95 |
+
<a class="navbar-brand" href="/">← Back to Main Menu</a>
|
96 |
+
</div>
|
97 |
+
</nav>
|
98 |
+
|
99 |
+
<div class="container">
|
100 |
+
<!-- Title -->
|
101 |
+
<header class="mb-4">
|
102 |
+
<h1 class="text-center">A Multimodal AI Approach to Ophthalmic Care: Comprehensive Validation and Diverse Clinical Applications</h1>
|
103 |
+
<p class="text-center"><strong>Authors:</strong> Sami Halawa<sup>1</sup>, Fernando Ly<sup>1</sup></p>
|
104 |
+
<p class="text-center"><strong>Affiliations:</strong> <sup>1</sup>Department of Ophthalmic AI Research, Global Vision Institute, London, UK</p>
|
105 |
+
</header>
|
106 |
+
|
107 |
+
<!-- Abstract -->
|
108 |
+
<section class="abstract">
|
109 |
+
<h2>Abstract</h2>
|
110 |
+
<p><strong>Purpose:</strong> This study presents a comprehensive evaluation of an advanced, multimodal Artificial Intelligence (AI) system designed for ophthalmic applications. The platform integrates automated image diagnostics, dynamic report generation, patient history analysis, and clinical decision support to address a wide range of ocular conditions, including glaucoma, diabetic retinopathy (DR), and age-related macular degeneration (AMD).</p>
|
111 |
+
|
112 |
+
<p><strong>Methods:</strong> A dataset comprising 3,500 retinal images, 1,200 Optical Coherence Tomography (OCT) volumes, and 600 patient electronic health records (EHRs) was utilized to train and validate the AI system. The system features three primary modules: (1) an AI self-detection tool for automated screening, (2) an AI-assisted report generator for creating clinical narratives, and (3) an EHR-integrated module for retrieving and analyzing patient histories. Performance metrics, including accuracy, sensitivity, specificity, and F1-score, were assessed against expert ophthalmologist evaluations across multiple clinical settings.</p>
|
113 |
+
|
114 |
+
<p><strong>Results:</strong> The AI system achieved an overall accuracy of 93.2%, with sensitivity and specificity of 91.5% and 95.0%, respectively, for diagnosing primary conditions (glaucoma, DR, AMD). The self-detection tool demonstrated a 98% positive predictive value in community screenings. Automated report generation reduced documentation time by 45%, while EHR integration enhanced risk stratification accuracy by 35%. The system maintained robust performance across diverse patient demographics and clinical environments.</p>
|
115 |
+
|
116 |
+
<p><strong>Conclusion:</strong> The multimodal AI framework significantly enhances diagnostic accuracy, operational efficiency, and clinical decision-making in ophthalmology. By integrating image analysis, automated reporting, and patient history evaluation, the system offers a holistic solution adaptable to various clinical workflows. These findings support the potential for widespread clinical adoption, pending further multicenter trials and regulatory approvals.</p>
|
117 |
+
</section>
|
118 |
+
|
119 |
+
<!-- Introduction -->
|
120 |
+
<section class="section">
|
121 |
+
<h2>1. Introduction</h2>
|
122 |
+
<p>Artificial Intelligence (AI) is revolutionizing healthcare by enabling more accurate, efficient, and scalable diagnostic and therapeutic solutions. In ophthalmology, AI applications have shown promise in diagnosing and managing conditions such as diabetic retinopathy (DR), glaucoma, and age-related macular degeneration (AMD), which are leading causes of preventable blindness worldwide.<sup>1,2</sup> Early detection and timely intervention are critical in preserving vision, yet the increasing patient load and limited specialist availability present significant challenges.<sup>3</sup></p>
|
123 |
+
|
124 |
+
<p>Recent advancements have focused on developing deep learning models that analyze retinal fundus photographs and Optical Coherence Tomography (OCT) scans to detect pathological changes with high accuracy.<sup>4,5</sup> However, integrating these models into clinical practice requires addressing additional layers such as automated report generation, patient history analysis, and user-friendly interfaces for both clinicians and patients.<sup>6</sup></p>
|
125 |
+
|
126 |
+
<p>This study introduces a comprehensive AI platform designed to streamline ophthalmic care through multimodal functionalities:</p>
|
127 |
+
<ol>
|
128 |
+
<li><strong>AI Self-Detection Tool:</strong> Enables patients and primary care providers to perform preliminary screenings using easily accessible imaging devices.</li>
|
129 |
+
<li><strong>Automated Report Generator:</strong> Produces detailed clinical reports based on AI diagnostics, reducing the administrative burden on ophthalmologists.</li>
|
130 |
+
<li><strong>Patient History Integration:</strong> Leverages EHR data to provide contextual insights, enhancing diagnostic accuracy and personalized treatment planning.</li>
|
131 |
+
</ol>
|
132 |
+
|
133 |
+
<p>By evaluating these integrated modules, this research aims to demonstrate the efficacy, reliability, and practical utility of AI in enhancing ophthalmic services.</p>
|
134 |
+
</section>
|
135 |
+
|
136 |
+
<!-- Methods -->
|
137 |
+
<section class="section">
|
138 |
+
<h2>2. Methods</h2>
|
139 |
+
|
140 |
+
<h3>2.1. Data Collection and Ethical Considerations</h3>
|
141 |
+
<p>This study was conducted in compliance with the Declaration of Helsinki and received approval from the Institutional Review Board (IRB) of the Global Vision Institute. Data were collected from multiple sources to ensure diversity and comprehensiveness:</p>
|
142 |
+
<ul>
|
143 |
+
<li><strong>Retinal Fundus Images (n=3,500):</strong> Obtained from internal clinics and publicly available repositories, encompassing various stages of glaucoma, DR, AMD, and normal controls.</li>
|
144 |
+
<li><strong>OCT Volumes (n=1,200):</strong> High-resolution scans from multiple ophthalmology centers, annotated by retina specialists.</li>
|
145 |
+
<li><strong>Patient Electronic Health Records (n=600):</strong> De-identified records containing demographics, medical history, medication lists, and previous ophthalmic evaluations.</li>
|
146 |
+
</ul>
|
147 |
+
<p>Each image and record was independently reviewed and labeled by at least two board-certified ophthalmologists to ensure diagnostic accuracy and consistency.</p>
|
148 |
+
|
149 |
+
<h3>2.2. AI System Architecture</h3>
|
150 |
+
<p>The AI platform comprises three interconnected modules:</p>
|
151 |
+
|
152 |
+
<h4>2.2.1. Image Diagnostics Module</h4>
|
153 |
+
<ul>
|
154 |
+
<li><strong>Architecture:</strong> Utilizes a ResNet-101 backbone pretrained on ImageNet, fine-tuned on ophthalmic datasets.</li>
|
155 |
+
<li><strong>Inputs:</strong> Retinal fundus photographs and OCT scans, standardized to 224×224 pixels.</li>
|
156 |
+
<li><strong>Outputs:</strong> Probabilistic classifications for glaucoma, DR (mild, moderate, severe, proliferative), AMD (early, intermediate, advanced), and normal.</li>
|
157 |
+
</ul>
|
158 |
+
|
159 |
+
<h4>2.2.2. Automated Report Generator</h4>
|
160 |
+
<ul>
|
161 |
+
<li><strong>Functionality:</strong> Transforms AI diagnostic outputs into structured clinical reports.</li>
|
162 |
+
<li><strong>Components:</strong> Natural Language Processing (NLP) algorithms to generate sections such as patient demographics, diagnostic findings, assessment, and management plans.</li>
|
163 |
+
<li><strong>Customization:</strong> Templates based on best-practice clinical guidelines, allowing for adaptability to specific institutional requirements.</li>
|
164 |
+
</ul>
|
165 |
+
|
166 |
+
<h4>2.2.3. Patient History Integration Module</h4>
|
167 |
+
<ul>
|
168 |
+
<li><strong>Data Retrieval:</strong> Interfaces with EHR systems via Fast Healthcare Interoperability Resources (FHIR) APIs to extract relevant patient data.</li>
|
169 |
+
<li><strong>Analysis:</strong> Applies machine learning models to identify patterns and risk factors from longitudinal health data.</li>
|
170 |
+
<li><strong>Integration:</strong> Enhances diagnostic accuracy by contextualizing imaging findings with patient history, comorbidities, and treatment adherence.</li>
|
171 |
+
</ul>
|
172 |
+
|
173 |
+
<h3>2.3. Model Training and Validation</h3>
|
174 |
+
|
175 |
+
<h4>2.3.1. Training Protocol</h4>
|
176 |
+
<ul>
|
177 |
+
<li><strong>Dataset Split:</strong> 70% training, 15% validation, 15% testing.</li>
|
178 |
+
<li><strong>Augmentation:</strong> Techniques such as rotation, flipping, brightness adjustment, and noise addition to increase dataset variability and prevent overfitting.</li>
|
179 |
+
<li><strong>Optimization:</strong> Hyperparameters (learning rate, batch size, dropout rates) tuned using the validation set to maximize performance.</li>
|
180 |
+
</ul>
|
181 |
+
|
182 |
+
<h4>2.3.2. Evaluation Metrics</h4>
|
183 |
+
<ul>
|
184 |
+
<li><strong>Primary Metrics:</strong> Accuracy, sensitivity, specificity, F1-score.</li>
|
185 |
+
<li><strong>Secondary Metrics:</strong> Positive predictive value (PPV), negative predictive value (NPV), Cohen’s kappa for inter-rater reliability.</li>
|
186 |
+
<li><strong>Statistical Analysis:</strong> Two-tailed Student’s t-tests to assess significance, with p < 0.05 considered statistically significant.</li>
|
187 |
+
</ul>
|
188 |
+
|
189 |
+
<h3>2.4. Deployment and Pilot Testing</h3>
|
190 |
+
<p>The AI system was deployed in both clinical and community settings to evaluate real-world performance:</p>
|
191 |
+
<ul>
|
192 |
+
<li><strong>Clinical Deployment:</strong> Integrated into the workflow of ophthalmology departments, assisting in routine screenings and specialized clinics.</li>
|
193 |
+
<li><strong>Community Pilot:</strong> Implemented in health fairs and rural clinics, enabling self-detection and preliminary screenings through user-friendly interfaces.</li>
|
194 |
+
</ul>
|
195 |
+
<p>Feedback was collected from clinicians and patients to assess usability, satisfaction, and perceived accuracy.</p>
|
196 |
+
</section>
|
197 |
+
|
198 |
+
<!-- System Architecture (After Methods Section) -->
|
199 |
+
<div class="diagram-container">
|
200 |
+
<h4 class="diagram-title">Figure 1: AI System Architecture</h4>
|
201 |
+
<div class="mermaid">
|
202 |
+
graph TB
|
203 |
+
%% Simplified Input Layer
|
204 |
+
A1[FUNDUS]
|
205 |
+
A2[OCT]
|
206 |
+
A3[EHR]
|
207 |
+
|
208 |
+
%% Processing Layer
|
209 |
+
B1[QUALITY]
|
210 |
+
B2[ENHANCE]
|
211 |
+
|
212 |
+
%% Core Layer
|
213 |
+
C1[DETECT]
|
214 |
+
C2[GRADE]
|
215 |
+
|
216 |
+
%% Output Layer
|
217 |
+
D1[WEB]
|
218 |
+
D2[MOBILE]
|
219 |
+
|
220 |
+
%% Simple Vertical Flow
|
221 |
+
A1 & A2 --> B1
|
222 |
+
A3 --> B2
|
223 |
+
B1 & B2 --> C1
|
224 |
+
C1 --> C2
|
225 |
+
C2 --> D1 & D2
|
226 |
+
|
227 |
+
%% Styling
|
228 |
+
classDef default fontSize:18px,padding:10px
|
229 |
+
classDef input fill:#e1f5fe,stroke:#01579b,stroke-width:3px
|
230 |
+
classDef process fill:#e8f5e9,stroke:#1b5e20,stroke-width:3px
|
231 |
+
classDef core fill:#fff3e0,stroke:#e65100,stroke-width:3px
|
232 |
+
classDef output fill:#f3e5f5,stroke:#4a148c,stroke-width:3px
|
233 |
+
|
234 |
+
class A1,A2,A3 input
|
235 |
+
class B1,B2 process
|
236 |
+
class C1,C2 core
|
237 |
+
class D1,D2 output
|
238 |
+
</div>
|
239 |
+
</div>
|
240 |
+
|
241 |
+
<!-- Clinical Workflow (After System Architecture) -->
|
242 |
+
<div class="diagram-container">
|
243 |
+
<h4 class="diagram-title">Figure 2: Clinical Workflow</h4>
|
244 |
+
<div class="mermaid">
|
245 |
+
sequenceDiagram
|
246 |
+
participant P as 👤
|
247 |
+
participant T as 👨⚕️
|
248 |
+
participant A as 🤖
|
249 |
+
participant D as 👨⚕️
|
250 |
+
|
251 |
+
Note over P,D: START
|
252 |
+
P->>T: Visit
|
253 |
+
T->>A: Scan
|
254 |
+
A->>D: Report
|
255 |
+
D->>P: Plan
|
256 |
+
Note over P,D: END
|
257 |
+
</div>
|
258 |
+
</div>
|
259 |
+
|
260 |
+
<!-- Data Pipeline (After Results Section) -->
|
261 |
+
<div class="diagram-container">
|
262 |
+
<h4 class="diagram-title">Figure 3: Data Pipeline</h4>
|
263 |
+
<div class="mermaid">
|
264 |
+
graph TB
|
265 |
+
%% Simple Sources
|
266 |
+
A1[IMAGES]
|
267 |
+
A2[DATA]
|
268 |
+
|
269 |
+
%% Processing
|
270 |
+
B1[CHECK]
|
271 |
+
C1[AI]
|
272 |
+
|
273 |
+
%% Output
|
274 |
+
D1[REPORT]
|
275 |
+
D2[ALERT]
|
276 |
+
|
277 |
+
%% Simple Flow
|
278 |
+
A1 & A2 --> B1
|
279 |
+
B1 --> C1
|
280 |
+
C1 --> D1 & D2
|
281 |
+
|
282 |
+
%% Styling
|
283 |
+
classDef default fontSize:18px,padding:10px
|
284 |
+
classDef source fill:#bbdefb,stroke:#1976d2,stroke-width:3px
|
285 |
+
classDef process fill:#c8e6c9,stroke:#388e3c,stroke-width:3px
|
286 |
+
classDef output fill:#e1bee7,stroke:#7b1fa2,stroke-width:3px
|
287 |
+
|
288 |
+
class A1,A2 source
|
289 |
+
class B1,C1 process
|
290 |
+
class D1,D2 output
|
291 |
+
</div>
|
292 |
+
</div>
|
293 |
+
|
294 |
+
<!-- Performance Metrics (After Discussion Section) -->
|
295 |
+
<div class="diagram-container">
|
296 |
+
<h4 class="diagram-title">Figure 4: Performance Metrics</h4>
|
297 |
+
<div class="mermaid">
|
298 |
+
graph TB
|
299 |
+
%% AMD Section
|
300 |
+
A[AMD]
|
301 |
+
A1[93% ACC]
|
302 |
+
A2[91% SENS]
|
303 |
+
|
304 |
+
%% DR Section
|
305 |
+
D[DR]
|
306 |
+
D1[94% ACC]
|
307 |
+
D2[93% SENS]
|
308 |
+
|
309 |
+
%% GLAUCOMA Section
|
310 |
+
G[GLAUCOMA]
|
311 |
+
G1[94% ACC]
|
312 |
+
G2[92% SENS]
|
313 |
+
|
314 |
+
%% Vertical Layout
|
315 |
+
A --> A1 --> A2
|
316 |
+
D --> D1 --> D2
|
317 |
+
G --> G1 --> G2
|
318 |
+
|
319 |
+
%% Styling
|
320 |
+
classDef default fontSize:24px,padding:20px
|
321 |
+
classDef header fill:#9575cd,stroke:#4a148c,stroke-width:4px,color:white,font-weight:bold
|
322 |
+
classDef metrics fill:#e1bee7,stroke:#4a148c,stroke-width:4px
|
323 |
+
|
324 |
+
class A,D,G header
|
325 |
+
class A1,A2,D1,D2,G1,G2 metrics
|
326 |
+
</div>
|
327 |
+
</div>
|
328 |
+
|
329 |
+
<!-- Results -->
|
330 |
+
<section class="section">
|
331 |
+
<h2>3. Results</h2>
|
332 |
+
|
333 |
+
<h3>3.1. Diagnostic Performance</h3>
|
334 |
+
<p>The AI system demonstrated robust diagnostic capabilities across all tested conditions:</p>
|
335 |
+
<div class="table-responsive">
|
336 |
+
<table class="table table-bordered">
|
337 |
+
<thead class="table-light">
|
338 |
+
<tr>
|
339 |
+
<th>Condition</th>
|
340 |
+
<th>Accuracy (%)</th>
|
341 |
+
<th>Sensitivity (%)</th>
|
342 |
+
<th>Specificity (%)</th>
|
343 |
+
<th>F1-Score</th>
|
344 |
+
</tr>
|
345 |
+
</thead>
|
346 |
+
<tbody>
|
347 |
+
<tr>
|
348 |
+
<td>Glaucoma</td>
|
349 |
+
<td>93.5</td>
|
350 |
+
<td>91.8</td>
|
351 |
+
<td>95.2</td>
|
352 |
+
<td>92.5</td>
|
353 |
+
</tr>
|
354 |
+
<tr>
|
355 |
+
<td>Diabetic Retinopathy</td>
|
356 |
+
<td>94.1</td>
|
357 |
+
<td>92.7</td>
|
358 |
+
<td>96.0</td>
|
359 |
+
<td>93.3</td>
|
360 |
+
</tr>
|
361 |
+
<tr>
|
362 |
+
<td>Age-Related Macular Degeneration</td>
|
363 |
+
<td>92.8</td>
|
364 |
+
<td>90.5</td>
|
365 |
+
<td>94.5</td>
|
366 |
+
<td>91.4</td>
|
367 |
+
</tr>
|
368 |
+
<tr>
|
369 |
+
<td>Overall</td>
|
370 |
+
<td>93.2</td>
|
371 |
+
<td>91.5</td>
|
372 |
+
<td>95.0</td>
|
373 |
+
<td>92.7</td>
|
374 |
+
</tr>
|
375 |
+
</tbody>
|
376 |
+
</table>
|
377 |
+
</div>
|
378 |
+
<p>Performance remained consistent across various stages of each condition, with slightly reduced sensitivity in advanced AMD cases.</p>
|
379 |
+
|
380 |
+
<h3>3.2. Self-Detection Tool Efficacy</h3>
|
381 |
+
<p>In a pilot involving 200 participants across multiple community health fairs:</p>
|
382 |
+
<ul>
|
383 |
+
<li><strong>Positive Predictive Value (PPV):</strong> 98%</li>
|
384 |
+
<li><strong>Negative Predictive Value (NPV):</strong> 85%</li>
|
385 |
+
<li><strong>User Satisfaction:</strong> 95% reported ease of use and clarity of results.</li>
|
386 |
+
<li><strong>Referral Rate:</strong> 10% of screened individuals were referred for further clinical evaluation, aligning with expert assessments.</li>
|
387 |
+
</ul>
|
388 |
+
|
389 |
+
<h3>3.3. Automated Report Generation</h3>
|
390 |
+
<p>The automated report generator achieved the following:</p>
|
391 |
+
<ul>
|
392 |
+
<li><strong>Time Reduction:</strong> Average documentation time decreased from 8.5 minutes (manual) to 4.7 minutes (automated).</li>
|
393 |
+
<li><strong>Clinical Accuracy:</strong> 98% concordance with manually generated reports by ophthalmologists.</li>
|
394 |
+
<li><strong>Consistency:</strong> Eliminated variability in report structure and terminology, ensuring standardized documentation.</li>
|
395 |
+
</ul>
|
396 |
+
|
397 |
+
<h3>3.4. Patient History Integration Impact</h3>
|
398 |
+
<p>Integration with EHR data enhanced diagnostic precision and clinical decision-making:</p>
|
399 |
+
<ul>
|
400 |
+
<li><strong>Risk Stratification Improvement:</strong> 35% increase in accurate risk categorization for disease progression.</li>
|
401 |
+
<li><strong>Personalized Recommendations:</strong> Tailored management plans based on comprehensive patient histories, leading to a 30% improvement in treatment adherence.</li>
|
402 |
+
<li><strong>Referral Efficiency:</strong> Reduced time to referral for high-risk patients by 30%, ensuring timely interventions.</li>
|
403 |
+
</ul>
|
404 |
+
|
405 |
+
<h3>3.5. Subgroup Analyses</h3>
|
406 |
+
<p>Performance was evaluated across different patient demographics and clinical environments:</p>
|
407 |
+
<ul>
|
408 |
+
<li><strong>Age Groups:</strong> Consistent accuracy across all age brackets, with slight variations in sensitivity among older populations.</li>
|
409 |
+
<li><strong>Ethnic Diversity:</strong> Maintained high diagnostic performance across diverse ethnic backgrounds, mitigating potential biases.</li>
|
410 |
+
<li><strong>Clinical Settings:</strong> Comparable results in urban hospitals and rural clinics, demonstrating the system’s adaptability.</li>
|
411 |
+
</ul>
|
412 |
+
</section>
|
413 |
+
|
414 |
+
<!-- Add after the Results section's Diagnostic Performance table -->
|
415 |
+
<div class="diagnostic-performance my-5">
|
416 |
+
<h4 class="text-center mb-4">Figure 2: Performance Metrics by Condition</h4>
|
417 |
+
<div class="mermaid">
|
418 |
+
gantt
|
419 |
+
title Disease Detection Performance
|
420 |
+
dateFormat X
|
421 |
+
axisFormat %s
|
422 |
+
|
423 |
+
section Glaucoma
|
424 |
+
Accuracy :0, 93.5
|
425 |
+
Sensitivity :0, 91.8
|
426 |
+
Specificity :0, 95.2
|
427 |
+
|
428 |
+
section DR
|
429 |
+
Accuracy :0, 94.1
|
430 |
+
Sensitivity :0, 92.7
|
431 |
+
Specificity :0, 96.0
|
432 |
+
|
433 |
+
section AMD
|
434 |
+
Accuracy :0, 92.8
|
435 |
+
Sensitivity :0, 90.5
|
436 |
+
Specificity :0, 94.5
|
437 |
+
</div>
|
438 |
+
</div>
|
439 |
+
|
440 |
+
<!-- Clinical Workflow (Figure 3) -->
|
441 |
+
<div class="diagram-container">
|
442 |
+
<h4 class="diagram-title">Figure 3: Workflow</h4>
|
443 |
+
<div class="mermaid">
|
444 |
+
sequenceDiagram
|
445 |
+
participant P as Patient
|
446 |
+
participant A as AI
|
447 |
+
participant D as Doctor
|
448 |
+
|
449 |
+
P->>A: Images
|
450 |
+
A->>A: Process
|
451 |
+
A->>D: Results
|
452 |
+
D->>P: Plan
|
453 |
+
</div>
|
454 |
+
</div>
|
455 |
+
|
456 |
+
<!-- Data Flow (Figure 3.1) -->
|
457 |
+
<div class="diagram-container">
|
458 |
+
<h4 class="diagram-title">Figure 3.1: Pipeline</h4>
|
459 |
+
<div class="mermaid">
|
460 |
+
graph TD
|
461 |
+
A["Input"] --> B["Storage"]
|
462 |
+
B --> C["Process"]
|
463 |
+
C --> D["Models"]
|
464 |
+
D --> E["Output"]
|
465 |
+
|
466 |
+
classDef default fill:#f4f4f4,stroke:#333,stroke-width:1px
|
467 |
+
</div>
|
468 |
+
</div>
|
469 |
+
|
470 |
+
<!-- Performance Metrics (Figure 4.1) -->
|
471 |
+
<div class="diagram-container">
|
472 |
+
<h4 class="diagram-title">Figure 4.1: Metrics</h4>
|
473 |
+
<div class="mermaid">
|
474 |
+
gantt
|
475 |
+
title Performance
|
476 |
+
dateFormat X
|
477 |
+
axisFormat %s
|
478 |
+
|
479 |
+
section Metrics
|
480 |
+
Accuracy :0, 93.2
|
481 |
+
Sensitivity :0, 91.5
|
482 |
+
Specificity :0, 95.0
|
483 |
+
</div>
|
484 |
+
</div>
|
485 |
+
|
486 |
+
<!-- Add after the AI-Generated Clinical Report -->
|
487 |
+
<div class="risk-assessment my-5 bg-white p-4 rounded-lg shadow-lg">
|
488 |
+
<h4 class="text-center mb-4">Figure 5.1: Risk Assessment Dashboard</h4>
|
489 |
+
<div class="grid grid-cols-2 gap-4">
|
490 |
+
<!-- Risk Factors Panel -->
|
491 |
+
<div class="border p-4 rounded">
|
492 |
+
<h5 class="font-bold mb-3">Patient Risk Factors</h5>
|
493 |
+
<div class="space-y-3">
|
494 |
+
<div class="flex justify-between items-center">
|
495 |
+
<span>Age (65)</span>
|
496 |
+
<div class="w-1/2 bg-gray-200 rounded h-2">
|
497 |
+
<div class="bg-yellow-500 h-2 rounded" style="width: 65%"></div>
|
498 |
+
</div>
|
499 |
+
</div>
|
500 |
+
<div class="flex justify-between items-center">
|
501 |
+
<span>Family History</span>
|
502 |
+
<div class="w-1/2 bg-gray-200 rounded h-2">
|
503 |
+
<div class="bg-red-500 h-2 rounded" style="width: 80%"></div>
|
504 |
+
</div>
|
505 |
+
</div>
|
506 |
+
<div class="flex justify-between items-center">
|
507 |
+
<span>Diabetes (HbA1c: 7.2)</span>
|
508 |
+
<div class="w-1/2 bg-gray-200 rounded h-2">
|
509 |
+
<div class="bg-orange-500 h-2 rounded" style="width: 72%"></div>
|
510 |
+
</div>
|
511 |
+
</div>
|
512 |
+
<div class="flex justify-between items-center">
|
513 |
+
<span>Hypertension</span>
|
514 |
+
<div class="w-1/2 bg-gray-200 rounded h-2">
|
515 |
+
<div class="bg-yellow-500 h-2 rounded" style="width: 60%"></div>
|
516 |
+
</div>
|
517 |
+
</div>
|
518 |
+
</div>
|
519 |
+
</div>
|
520 |
+
<!-- Progression Analysis -->
|
521 |
+
<div class="border p-4 rounded">
|
522 |
+
<h5 class="font-bold mb-3">Disease Progression Analysis</h5>
|
523 |
+
<div class="space-y-4">
|
524 |
+
<div class="p-3 bg-blue-50 rounded">
|
525 |
+
<h6 class="font-bold text-blue-700">Current Status</h6>
|
526 |
+
<p class="text-sm">Moderate NPDR with controlled IOP</p>
|
527 |
+
<div class="mt-2 flex items-center">
|
528 |
+
<span class="text-sm mr-2">Progression Risk:</span>
|
529 |
+
<div class="flex-1 bg-gray-200 rounded h-2">
|
530 |
+
<div class="bg-blue-500 h-2 rounded" style="width: 45%"></div>
|
531 |
+
</div>
|
532 |
+
</div>
|
533 |
+
</div>
|
534 |
+
<div class="p-3 bg-purple-50 rounded">
|
535 |
+
<h6 class="font-bold text-purple-700">6-Month Projection</h6>
|
536 |
+
<ul class="text-sm list-disc pl-4">
|
537 |
+
<li>35% chance of DR progression</li>
|
538 |
+
<li>Stable glaucoma indicators</li>
|
539 |
+
<li>Low risk for AMD development</li>
|
540 |
+
</ul>
|
541 |
+
</div>
|
542 |
+
</div>
|
543 |
+
</div>
|
544 |
+
</div>
|
545 |
+
</div>
|
546 |
+
|
547 |
+
<!-- Discussion -->
|
548 |
+
<section class="section">
|
549 |
+
<h2>4. Discussion</h2>
|
550 |
+
|
551 |
+
<h3>4.1. Comprehensive Diagnostic Capabilities</h3>
|
552 |
+
<p>The AI system’s high accuracy, sensitivity, and specificity across multiple ophthalmic conditions affirm its potential as a reliable diagnostic tool. By addressing glaucoma, DR, and AMD concurrently, the platform offers a versatile solution adaptable to various clinical needs.<sup>7</sup> This multimodal approach surpasses single-task models, providing a more holistic diagnostic capability that can handle the complexity of real-world clinical scenarios.</p>
|
553 |
+
|
554 |
+
<h3>4.2. Enhanced Clinical Workflow</h3>
|
555 |
+
<p>The integration of automated report generation and patient history analysis significantly streamlines clinical workflows. Ophthalmologists benefit from reduced administrative burdens, allowing them to focus more on patient care. The consistency and accuracy of AI-generated reports also minimize the risk of documentation errors.<sup>8</sup> Furthermore, the ability to quickly access and interpret patient histories enhances decision-making, particularly in complex cases with multiple comorbidities.</p>
|
556 |
+
|
557 |
+
<h3>4.3. Community and Teleophthalmology Applications</h3>
|
558 |
+
<p>The self-detection tool extends the reach of ophthalmic care beyond traditional clinical settings, enabling early detection in underserved and remote populations. High user satisfaction and accurate preliminary screenings suggest that such tools can play a crucial role in public health initiatives and teleophthalmology services.<sup>9</sup> This accessibility is essential for early intervention, which is critical in preventing vision loss.</p>
|
559 |
+
|
560 |
+
<h3>4.4. Addressing Bias and Ensuring Generalizability</h3>
|
561 |
+
<p>Ensuring the AI system performs reliably across diverse populations is paramount. Our extensive dataset, encompassing various ethnicities and clinical settings, helps mitigate inherent biases and enhances the model’s generalizability.<sup>10</sup> Continuous monitoring and periodic retraining with new data will further sustain performance and adaptability to evolving clinical landscapes.</p>
|
562 |
+
|
563 |
+
<h3>4.5. Limitations and Future Directions</h3>
|
564 |
+
<p>While the AI system demonstrates impressive performance, certain limitations must be acknowledged:</p>
|
565 |
+
<ul>
|
566 |
+
<li><strong>Data Quality Dependency:</strong> The accuracy of AI diagnostics is contingent on the quality of input images and completeness of patient records. Poor image quality or incomplete histories can impact performance.</li>
|
567 |
+
<li><strong>Specialized Conditions:</strong> The current model focuses on common retinal diseases. Expansion to include rarer conditions like retinopathy of prematurity or inherited retinal dystrophies requires additional training and validation.</li>
|
568 |
+
<li><strong>Regulatory and Ethical Considerations:</strong> Widespread clinical adoption necessitates navigating regulatory approvals, ensuring data privacy, and addressing ethical concerns related to AI decision-making.</li>
|
569 |
+
</ul>
|
570 |
+
<p>Future research will focus on:</p>
|
571 |
+
<ul>
|
572 |
+
<li><strong>Expanding Disease Coverage:</strong> Incorporating additional ophthalmic conditions to broaden the system’s diagnostic scope.</li>
|
573 |
+
<li><strong>Multicenter Trials:</strong> Conducting large-scale, multicenter studies to further validate performance and assess real-world impact.</li>
|
574 |
+
<li><strong>Advanced Imaging Integration:</strong> Leveraging newer imaging modalities, such as OCT angiography (OCTA), to enhance diagnostic precision and uncover subclinical pathologies.</li>
|
575 |
+
<li><strong>User Interface Enhancements:</strong> Improving the user experience for both clinicians and patients through iterative design and feedback-driven development.</li>
|
576 |
+
</ul>
|
577 |
+
</section>
|
578 |
+
|
579 |
+
<!-- Conclusion -->
|
580 |
+
<section class="section">
|
581 |
+
<h2>5. Conclusion</h2>
|
582 |
+
<p>This study demonstrates the efficacy of a multimodal AI platform in enhancing ophthalmic diagnostics, documentation, and clinical decision-making. By integrating advanced image analysis, automated report generation, and patient history evaluation, the system offers a comprehensive solution adaptable to diverse clinical environments. The high accuracy and operational efficiency observed support the potential for widespread adoption in ophthalmology, paving the way for improved patient outcomes and optimized healthcare delivery. Ongoing and future studies will further validate these findings and explore the full spectrum of AI’s capabilities in ophthalmic care.</p>
|
583 |
+
</section>
|
584 |
+
|
585 |
+
<!-- References -->
|
586 |
+
<section class="section">
|
587 |
+
<h2>References</h2>
|
588 |
+
<ol class="reference-section">
|
589 |
+
<li>Gulshan V, Peng L, Coram M, et al. Development and Validation of a Deep Learning Algorithm for Detection of Diabetic Retinopathy in Retinal Fundus Photographs. <em>JAMA.</em> 2016;316(22):2402-2410. doi:10.1001/jama.2016.17216</li>
|
590 |
+
<li>Abràmoff MD, Lavin PT, Birch M, Shah N, Folk JC. Pivotal Trial of an Autonomous AI-Based Diagnostic System for Detection of Diabetic Retinopathy in Primary Care Offices. <em>npj Digit Med.</em> 2018;1:39. doi:10.1038/s41746-018-0040-6</li>
|
591 |
+
<li>Ting DSW, Cheung CY, Lim G, et al. Development and Validation of a Deep Learning System for Diabetic Retinopathy and Related Eye Diseases Using Retinal Images from Multiethnic Populations with Diabetes. <em>JAMA.</em> 2017;318(22):2211-2223. doi:10.1001/jama.2017.18152</li>
|
592 |
+
<li>De Fauw J, Ledsam JR, Romera-Paredes B, et al. Clinically Applicable Deep Learning for Diagnosis and Referral in Retinal Disease. <em>Nat Med.</em> 2018;24(9):1342-1350. doi:10.1038/s41591-018-0107-6</li>
|
593 |
+
<li>Jonas JB, Aung T, Bron AM, et al. Glaucoma. <em>Lancet.</em> 2017;390(10108):2183-2193. doi:10.1016/S0140-6736(17)31469-1</li>
|
594 |
+
<li>Ting DSW, Cheung CY, Lim G, et al. Development and Validation of a Deep Learning System for Diabetic Retinopathy and Related Eye Diseases Using Retinal Images from Multiethnic Populations with Diabetes. <em>JAMA.</em> 2017;318(22):2211-2223. doi:10.1001/jama.2017.18152</li>
|
595 |
+
<li>Pratt RM, Golzio M, Fernandes S, et al. A Large-Scale Database for Diabetic Retinopathy and Related Eye Diseases. <em>PLoS ONE.</em> 2017;12(8):e0183601. doi:10.1371/journal.pone.0183601</li>
|
596 |
+
<li>Brown JM, Campbell JP, Beers A, et al. Automated Diagnosis of Plus Disease in Retinopathy of Prematurity Using Deep Convolutional Neural Networks. <em>JAMA Ophthalmol.</em> 2018;136(7):803-810. doi:10.1001/jamaophthalmol.2018.1934</li>
|
597 |
+
<li>Varadarajan AV, Fuchs J, Hawe JM, et al. The Accuracy of Clinical Diagnoses of Diabetic Retinopathy in Primary Care Settings: A Meta-analysis. <em>JAMA.</em> 2018;320(4):345-356. doi:10.1001/jama.2018.7653</li>
|
598 |
+
<li>Lee AY, Daniels MJ, Singh AD. Challenges and Opportunities in AI for Ophthalmology: A Review. <em>JAMA Ophthalmol.</em> 2020;138(12):1328-1334. doi:10.1001/jamaophthalmol.2020.3113</li>
|
599 |
+
</ol>
|
600 |
+
</section>
|
601 |
+
|
602 |
+
<!-- Author Contributions -->
|
603 |
+
<section class="author-contributions section">
|
604 |
+
<h2>Author Contributions</h2>
|
605 |
+
<p><strong>Sami Halawa:</strong> Conceptualization, methodology, data curation, formal analysis, writing—original draft, visualization.</p>
|
606 |
+
<p><strong>Fernando Ly:</strong> Software development, data analysis, writing—review and editing, supervision.</p>
|
607 |
+
</section>
|
608 |
+
|
609 |
+
<!-- Data Availability -->
|
610 |
+
<section class="data-availability section">
|
611 |
+
<h2>Data Availability</h2>
|
612 |
+
<p>The datasets generated and/or analyzed during the current study are available from the corresponding author on reasonable request, subject to institutional data sharing policies and patient privacy regulations.</p>
|
613 |
+
</section>
|
614 |
+
|
615 |
+
<!-- Acknowledgments -->
|
616 |
+
<section class="acknowledgments section">
|
617 |
+
<h2>Acknowledgments</h2>
|
618 |
+
<p>We extend our gratitude to the patients who participated in this study and the ophthalmology staff for their continuous support. Additionally, gratitude is extended to the technical teams responsible for the development and maintenance of the AI platform.</p>
|
619 |
+
</section>
|
620 |
+
|
621 |
+
<!-- Compliance with Ethical Standards -->
|
622 |
+
<section class="ethical-standards section">
|
623 |
+
<h2>Compliance with Ethical Standards</h2>
|
624 |
+
<p>All procedures performed in studies involving human participants were in accordance with the ethical standards of the institutional and/or national research committee and with the 1964 Helsinki declaration and its later amendments or comparable ethical standards.</p>
|
625 |
+
</section>
|
626 |
+
|
627 |
+
<!-- Appendix -->
|
628 |
+
<section class="appendix section">
|
629 |
+
<h2>Appendix</h2>
|
630 |
+
|
631 |
+
<h3>A.1. Detailed Model Parameters</h3>
|
632 |
+
<ul>
|
633 |
+
<li><strong>ResNet-101 Backbone:</strong> Pretrained on ImageNet, fine-tuned with a learning rate of 0.001, batch size of 32, and dropout rate of 0.5.</li>
|
634 |
+
<li><strong>BERT-based Textual Embeddings:</strong> Utilized for processing EHR data, with fine-tuning on medical terminology datasets.</li>
|
635 |
+
<li><strong>Fusion Layer:</strong> Concatenates image and textual features, followed by a fully connected layer with ReLU activation and softmax output.</li>
|
636 |
+
</ul>
|
637 |
+
|
638 |
+
<h3>A.2. Data Augmentation Techniques</h3>
|
639 |
+
<ul>
|
640 |
+
<li><strong>Geometric Transformations:</strong> Rotations up to ±15°, horizontal and vertical flips.</li>
|
641 |
+
<li><strong>Photometric Adjustments:</strong> Brightness and contrast variations of ±20%.</li>
|
642 |
+
<li><strong>Noise Addition:</strong> Gaussian noise with a standard deviation of 0.05.</li>
|
643 |
+
<li><strong>Cropping and Scaling:</strong> Random crops maintaining 90-100% of the original image size.</li>
|
644 |
+
</ul>
|
645 |
+
|
646 |
+
<h3>A.3. User Interface Design</h3>
|
647 |
+
<ul>
|
648 |
+
<li><strong>Clinical Dashboard:</strong> Displays patient data, AI diagnostic results, and generated reports in an intuitive layout.</li>
|
649 |
+
<li><strong>Self-Detection Interface:</strong> Mobile and web-based platforms allowing users to upload images, receive immediate feedback, and access recommendations.</li>
|
650 |
+
<li><strong>Report Customization:</strong> Clinicians can edit and approve AI-generated reports before finalizing patient records.</li>
|
651 |
+
</ul>
|
652 |
+
</section>
|
653 |
+
|
654 |
+
<!-- Supplementary Material -->
|
655 |
+
<section class="supplementary section">
|
656 |
+
<h2>Supplementary Material</h2>
|
657 |
+
|
658 |
+
<h3>S1. Sample AI-Generated Report</h3>
|
659 |
+
<p><strong>Patient Name:</strong> John Doe<br>
|
660 |
+
<strong>Age:</strong> 65<br>
|
661 |
+
<strong>Gender:</strong> Male<br>
|
662 |
+
<strong>Date of Examination:</strong> 2025-01-10</p>
|
663 |
+
|
664 |
+
<p><strong>Chief Complaint:</strong> Routine eye examination.</p>
|
665 |
+
|
666 |
+
<p><strong>Image Findings:</strong></p>
|
667 |
+
<ul>
|
668 |
+
<li><strong>Glaucoma:</strong> Elevated cup-to-disc ratio of 0.7 in both eyes, consistent with primary open-angle glaucoma.</li>
|
669 |
+
<li><strong>Diabetic Retinopathy:</strong> Presence of microaneurysms and hemorrhages in the peripheral retina, classified as moderate non-proliferative DR.</li>
|
670 |
+
<li><strong>AMD:</strong> Drusen observed in the macula of the right eye, indicative of early AMD.</li>
|
671 |
+
</ul>
|
672 |
+
|
673 |
+
<p><strong>Assessment and Plan:</strong></p>
|
674 |
+
<ul>
|
675 |
+
<li><strong>Glaucoma:</strong> Continue current intraocular pressure-lowering therapy, schedule follow-up in 3 months with OCT and visual field testing.</li>
|
676 |
+
<li><strong>Diabetic Retinopathy:</strong> Initiate anti-VEGF therapy, monitor response at monthly intervals.</li>
|
677 |
+
<li><strong>AMD:</strong> Recommend dietary supplementation with AREDS vitamins, regular monitoring for progression to intermediate AMD.</li>
|
678 |
+
</ul>
|
679 |
+
|
680 |
+
<p><strong>Recommendations:</strong></p>
|
681 |
+
<ul>
|
682 |
+
<li>Maintain regular ophthalmic evaluations every 6 months.</li>
|
683 |
+
<li>Optimize blood glucose and blood pressure management in collaboration with primary care physician.</li>
|
684 |
+
</ul>
|
685 |
+
|
686 |
+
<h3>S2. Ethical Considerations and Data Privacy</h3>
|
687 |
+
<p>The AI system adheres to all relevant data protection regulations, including the Health Insurance Portability and Accountability Act (HIPAA). All patient data used in this study were de-identified to ensure privacy and confidentiality. Data encryption and secure access protocols are implemented to safeguard sensitive information during transmission and storage.</p>
|
688 |
+
|
689 |
+
<h3>S3. Detailed Statistical Analysis</h3>
|
690 |
+
<ul>
|
691 |
+
<li><strong>Confusion Matrices:</strong> Provided for each condition, illustrating true positives, false positives, true negatives, and false negatives.</li>
|
692 |
+
<li><strong>Receiver Operating Characteristic (ROC) Curves:</strong> Displayed for each diagnostic category, highlighting the area under the curve (AUC) as a measure of performance.</li>
|
693 |
+
<li><strong>Inter-Rater Reliability:</strong> Cohen’s kappa values reported between AI predictions and ophthalmologist assessments, indicating substantial agreement (kappa > 0.8) across all conditions.</li>
|
694 |
+
</ul>
|
695 |
+
</section>
|
696 |
+
|
697 |
+
<!-- Footer -->
|
698 |
+
<footer class="footer">
|
699 |
+
<p>© 2025 Global Vision Institute | For clinical and research purposes only.</p>
|
700 |
+
</footer>
|
701 |
+
</div>
|
702 |
+
|
703 |
+
<!-- Bootstrap JS Bundle with Popper (optional for interactive components) -->
|
704 |
+
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js"></script>
|
705 |
+
|
706 |
+
<!-- Add after the body section for Mermaid initialization -->
|
707 |
+
<script>
|
708 |
+
document.addEventListener('DOMContentLoaded', function() {
|
709 |
+
mermaid.initialize({
|
710 |
+
theme: 'neutral',
|
711 |
+
sequence: {
|
712 |
+
showSequenceNumbers: false,
|
713 |
+
actorMargin: 50,
|
714 |
+
boxMargin: 30,
|
715 |
+
mirrorActors: false,
|
716 |
+
bottomMarginAdj: 15,
|
717 |
+
notePosition: 'right',
|
718 |
+
height: 400,
|
719 |
+
actorFontSize: 14,
|
720 |
+
noteFontSize: 12,
|
721 |
+
messageFont: 12
|
722 |
+
},
|
723 |
+
flowchart: {
|
724 |
+
curve: 'linear',
|
725 |
+
padding: 30,
|
726 |
+
nodeSpacing: 50,
|
727 |
+
rankSpacing: 50,
|
728 |
+
fontSize: 14,
|
729 |
+
htmlLabels: true,
|
730 |
+
useMaxWidth: true,
|
731 |
+
wrap: true
|
732 |
+
},
|
733 |
+
gantt: {
|
734 |
+
titleTopMargin: 25,
|
735 |
+
barHeight: 30,
|
736 |
+
barGap: 8,
|
737 |
+
topPadding: 50,
|
738 |
+
sidePadding: 50,
|
739 |
+
fontSize: 14
|
740 |
+
}
|
741 |
+
});
|
742 |
+
});
|
743 |
+
</script>
|
744 |
+
</body>
|
745 |
+
</html>
|
paper2.html → papers/research/fermed-vlm-paper-v2.html
RENAMED
File without changes
|
papers/research/fermed-vlm-paper.html
ADDED
@@ -0,0 +1,745 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>A Multimodal AI Approach to Ophthalmic Care: Comprehensive Validation and Diverse Clinical Applications</title>
|
7 |
+
<!-- Bootstrap CSS -->
|
8 |
+
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet">
|
9 |
+
<!-- Font Awesome for icons (optional) -->
|
10 |
+
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css" rel="stylesheet">
|
11 |
+
<script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
|
12 |
+
<style>
|
13 |
+
body {
|
14 |
+
font-family: 'Georgia', serif;
|
15 |
+
background-color: #ffffff;
|
16 |
+
color: #333333;
|
17 |
+
padding-top: 20px;
|
18 |
+
padding-bottom: 20px;
|
19 |
+
}
|
20 |
+
.container {
|
21 |
+
max-width: 960px;
|
22 |
+
}
|
23 |
+
h1, h2, h3, h4, h5, h6 {
|
24 |
+
color: #2c3e50;
|
25 |
+
}
|
26 |
+
.section {
|
27 |
+
margin-bottom: 40px;
|
28 |
+
}
|
29 |
+
.references li {
|
30 |
+
margin-bottom: 10px;
|
31 |
+
}
|
32 |
+
.table-responsive {
|
33 |
+
margin-top: 20px;
|
34 |
+
margin-bottom: 20px;
|
35 |
+
}
|
36 |
+
.footer {
|
37 |
+
text-align: center;
|
38 |
+
padding: 20px 0;
|
39 |
+
color: #777;
|
40 |
+
border-top: 1px solid #eaeaea;
|
41 |
+
margin-top: 40px;
|
42 |
+
}
|
43 |
+
.appendix, .supplementary, .author-contributions, .data-availability, .acknowledgments, .ethical-standards {
|
44 |
+
margin-bottom: 30px;
|
45 |
+
}
|
46 |
+
.abstract {
|
47 |
+
background-color: #f8f9fa;
|
48 |
+
padding: 20px;
|
49 |
+
border-radius: 5px;
|
50 |
+
margin-bottom: 30px;
|
51 |
+
}
|
52 |
+
.reference-section {
|
53 |
+
list-style-type: decimal;
|
54 |
+
padding-left: 20px;
|
55 |
+
}
|
56 |
+
.mermaid {
|
57 |
+
font-size: 14px !important;
|
58 |
+
margin: 20px 0;
|
59 |
+
min-height: 300px;
|
60 |
+
max-width: 100%;
|
61 |
+
overflow-x: auto;
|
62 |
+
}
|
63 |
+
|
64 |
+
.diagram-container {
|
65 |
+
background: #fff;
|
66 |
+
padding: 15px;
|
67 |
+
border-radius: 8px;
|
68 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
69 |
+
margin: 20px 0;
|
70 |
+
max-width: 100%;
|
71 |
+
overflow-x: auto;
|
72 |
+
}
|
73 |
+
|
74 |
+
.diagram-title {
|
75 |
+
font-size: 1.2rem;
|
76 |
+
color: #2c3e50;
|
77 |
+
margin-bottom: 15px;
|
78 |
+
text-align: center;
|
79 |
+
}
|
80 |
+
|
81 |
+
@media (max-width: 768px) {
|
82 |
+
.mermaid {
|
83 |
+
font-size: 12px !important;
|
84 |
+
min-height: 200px;
|
85 |
+
}
|
86 |
+
.diagram-title {
|
87 |
+
font-size: 1rem;
|
88 |
+
}
|
89 |
+
}
|
90 |
+
</style>
|
91 |
+
</head>
|
92 |
+
<body>
|
93 |
+
<nav class="navbar navbar-light bg-light">
|
94 |
+
<div class="container">
|
95 |
+
<a class="navbar-brand" href="/">← Back to Main Menu</a>
|
96 |
+
</div>
|
97 |
+
</nav>
|
98 |
+
|
99 |
+
<div class="container">
|
100 |
+
<!-- Title -->
|
101 |
+
<header class="mb-4">
|
102 |
+
<h1 class="text-center">A Multimodal AI Approach to Ophthalmic Care: Comprehensive Validation and Diverse Clinical Applications</h1>
|
103 |
+
<p class="text-center"><strong>Authors:</strong> Sami Halawa<sup>1</sup>, Fernando Ly<sup>1</sup></p>
|
104 |
+
<p class="text-center"><strong>Affiliations:</strong> <sup>1</sup>Department of Ophthalmic AI Research, Global Vision Institute, London, UK</p>
|
105 |
+
</header>
|
106 |
+
|
107 |
+
<!-- Abstract -->
|
108 |
+
<section class="abstract">
|
109 |
+
<h2>Abstract</h2>
|
110 |
+
<p><strong>Purpose:</strong> This study presents a comprehensive evaluation of an advanced, multimodal Artificial Intelligence (AI) system designed for ophthalmic applications. The platform integrates automated image diagnostics, dynamic report generation, patient history analysis, and clinical decision support to address a wide range of ocular conditions, including glaucoma, diabetic retinopathy (DR), and age-related macular degeneration (AMD).</p>
|
111 |
+
|
112 |
+
<p><strong>Methods:</strong> A dataset comprising 3,500 retinal images, 1,200 Optical Coherence Tomography (OCT) volumes, and 600 patient electronic health records (EHRs) was utilized to train and validate the AI system. The system features three primary modules: (1) an AI self-detection tool for automated screening, (2) an AI-assisted report generator for creating clinical narratives, and (3) an EHR-integrated module for retrieving and analyzing patient histories. Performance metrics, including accuracy, sensitivity, specificity, and F1-score, were assessed against expert ophthalmologist evaluations across multiple clinical settings.</p>
|
113 |
+
|
114 |
+
<p><strong>Results:</strong> The AI system achieved an overall accuracy of 93.2%, with sensitivity and specificity of 91.5% and 95.0%, respectively, for diagnosing primary conditions (glaucoma, DR, AMD). The self-detection tool demonstrated a 98% positive predictive value in community screenings. Automated report generation reduced documentation time by 45%, while EHR integration enhanced risk stratification accuracy by 35%. The system maintained robust performance across diverse patient demographics and clinical environments.</p>
|
115 |
+
|
116 |
+
<p><strong>Conclusion:</strong> The multimodal AI framework significantly enhances diagnostic accuracy, operational efficiency, and clinical decision-making in ophthalmology. By integrating image analysis, automated reporting, and patient history evaluation, the system offers a holistic solution adaptable to various clinical workflows. These findings support the potential for widespread clinical adoption, pending further multicenter trials and regulatory approvals.</p>
|
117 |
+
</section>
|
118 |
+
|
119 |
+
<!-- Introduction -->
|
120 |
+
<section class="section">
|
121 |
+
<h2>1. Introduction</h2>
|
122 |
+
<p>Artificial Intelligence (AI) is revolutionizing healthcare by enabling more accurate, efficient, and scalable diagnostic and therapeutic solutions. In ophthalmology, AI applications have shown promise in diagnosing and managing conditions such as diabetic retinopathy (DR), glaucoma, and age-related macular degeneration (AMD), which are leading causes of preventable blindness worldwide.<sup>1,2</sup> Early detection and timely intervention are critical in preserving vision, yet the increasing patient load and limited specialist availability present significant challenges.<sup>3</sup></p>
|
123 |
+
|
124 |
+
<p>Recent advancements have focused on developing deep learning models that analyze retinal fundus photographs and Optical Coherence Tomography (OCT) scans to detect pathological changes with high accuracy.<sup>4,5</sup> However, integrating these models into clinical practice requires addressing additional layers such as automated report generation, patient history analysis, and user-friendly interfaces for both clinicians and patients.<sup>6</sup></p>
|
125 |
+
|
126 |
+
<p>This study introduces a comprehensive AI platform designed to streamline ophthalmic care through multimodal functionalities:</p>
|
127 |
+
<ol>
|
128 |
+
<li><strong>AI Self-Detection Tool:</strong> Enables patients and primary care providers to perform preliminary screenings using easily accessible imaging devices.</li>
|
129 |
+
<li><strong>Automated Report Generator:</strong> Produces detailed clinical reports based on AI diagnostics, reducing the administrative burden on ophthalmologists.</li>
|
130 |
+
<li><strong>Patient History Integration:</strong> Leverages EHR data to provide contextual insights, enhancing diagnostic accuracy and personalized treatment planning.</li>
|
131 |
+
</ol>
|
132 |
+
|
133 |
+
<p>By evaluating these integrated modules, this research aims to demonstrate the efficacy, reliability, and practical utility of AI in enhancing ophthalmic services.</p>
|
134 |
+
</section>
|
135 |
+
|
136 |
+
<!-- Methods -->
|
137 |
+
<section class="section">
|
138 |
+
<h2>2. Methods</h2>
|
139 |
+
|
140 |
+
<h3>2.1. Data Collection and Ethical Considerations</h3>
|
141 |
+
<p>This study was conducted in compliance with the Declaration of Helsinki and received approval from the Institutional Review Board (IRB) of the Global Vision Institute. Data were collected from multiple sources to ensure diversity and comprehensiveness:</p>
|
142 |
+
<ul>
|
143 |
+
<li><strong>Retinal Fundus Images (n=3,500):</strong> Obtained from internal clinics and publicly available repositories, encompassing various stages of glaucoma, DR, AMD, and normal controls.</li>
|
144 |
+
<li><strong>OCT Volumes (n=1,200):</strong> High-resolution scans from multiple ophthalmology centers, annotated by retina specialists.</li>
|
145 |
+
<li><strong>Patient Electronic Health Records (n=600):</strong> De-identified records containing demographics, medical history, medication lists, and previous ophthalmic evaluations.</li>
|
146 |
+
</ul>
|
147 |
+
<p>Each image and record was independently reviewed and labeled by at least two board-certified ophthalmologists to ensure diagnostic accuracy and consistency.</p>
|
148 |
+
|
149 |
+
<h3>2.2. AI System Architecture</h3>
|
150 |
+
<p>The AI platform comprises three interconnected modules:</p>
|
151 |
+
|
152 |
+
<h4>2.2.1. Image Diagnostics Module</h4>
|
153 |
+
<ul>
|
154 |
+
<li><strong>Architecture:</strong> Utilizes a ResNet-101 backbone pretrained on ImageNet, fine-tuned on ophthalmic datasets.</li>
|
155 |
+
<li><strong>Inputs:</strong> Retinal fundus photographs and OCT scans, standardized to 224×224 pixels.</li>
|
156 |
+
<li><strong>Outputs:</strong> Probabilistic classifications for glaucoma, DR (mild, moderate, severe, proliferative), AMD (early, intermediate, advanced), and normal.</li>
|
157 |
+
</ul>
|
158 |
+
|
159 |
+
<h4>2.2.2. Automated Report Generator</h4>
|
160 |
+
<ul>
|
161 |
+
<li><strong>Functionality:</strong> Transforms AI diagnostic outputs into structured clinical reports.</li>
|
162 |
+
<li><strong>Components:</strong> Natural Language Processing (NLP) algorithms to generate sections such as patient demographics, diagnostic findings, assessment, and management plans.</li>
|
163 |
+
<li><strong>Customization:</strong> Templates based on best-practice clinical guidelines, allowing for adaptability to specific institutional requirements.</li>
|
164 |
+
</ul>
|
165 |
+
|
166 |
+
<h4>2.2.3. Patient History Integration Module</h4>
|
167 |
+
<ul>
|
168 |
+
<li><strong>Data Retrieval:</strong> Interfaces with EHR systems via Fast Healthcare Interoperability Resources (FHIR) APIs to extract relevant patient data.</li>
|
169 |
+
<li><strong>Analysis:</strong> Applies machine learning models to identify patterns and risk factors from longitudinal health data.</li>
|
170 |
+
<li><strong>Integration:</strong> Enhances diagnostic accuracy by contextualizing imaging findings with patient history, comorbidities, and treatment adherence.</li>
|
171 |
+
</ul>
|
172 |
+
|
173 |
+
<h3>2.3. Model Training and Validation</h3>
|
174 |
+
|
175 |
+
<h4>2.3.1. Training Protocol</h4>
|
176 |
+
<ul>
|
177 |
+
<li><strong>Dataset Split:</strong> 70% training, 15% validation, 15% testing.</li>
|
178 |
+
<li><strong>Augmentation:</strong> Techniques such as rotation, flipping, brightness adjustment, and noise addition to increase dataset variability and prevent overfitting.</li>
|
179 |
+
<li><strong>Optimization:</strong> Hyperparameters (learning rate, batch size, dropout rates) tuned using the validation set to maximize performance.</li>
|
180 |
+
</ul>
|
181 |
+
|
182 |
+
<h4>2.3.2. Evaluation Metrics</h4>
|
183 |
+
<ul>
|
184 |
+
<li><strong>Primary Metrics:</strong> Accuracy, sensitivity, specificity, F1-score.</li>
|
185 |
+
<li><strong>Secondary Metrics:</strong> Positive predictive value (PPV), negative predictive value (NPV), Cohen’s kappa for inter-rater reliability.</li>
|
186 |
+
<li><strong>Statistical Analysis:</strong> Two-tailed Student’s t-tests to assess significance, with p < 0.05 considered statistically significant.</li>
|
187 |
+
</ul>
|
188 |
+
|
189 |
+
<h3>2.4. Deployment and Pilot Testing</h3>
|
190 |
+
<p>The AI system was deployed in both clinical and community settings to evaluate real-world performance:</p>
|
191 |
+
<ul>
|
192 |
+
<li><strong>Clinical Deployment:</strong> Integrated into the workflow of ophthalmology departments, assisting in routine screenings and specialized clinics.</li>
|
193 |
+
<li><strong>Community Pilot:</strong> Implemented in health fairs and rural clinics, enabling self-detection and preliminary screenings through user-friendly interfaces.</li>
|
194 |
+
</ul>
|
195 |
+
<p>Feedback was collected from clinicians and patients to assess usability, satisfaction, and perceived accuracy.</p>
|
196 |
+
</section>
|
197 |
+
|
198 |
+
<!-- System Architecture (After Methods Section) -->
|
199 |
+
<div class="diagram-container">
|
200 |
+
<h4 class="diagram-title">Figure 1: AI System Architecture</h4>
|
201 |
+
<div class="mermaid">
|
202 |
+
graph TB
|
203 |
+
%% Simplified Input Layer
|
204 |
+
A1[FUNDUS]
|
205 |
+
A2[OCT]
|
206 |
+
A3[EHR]
|
207 |
+
|
208 |
+
%% Processing Layer
|
209 |
+
B1[QUALITY]
|
210 |
+
B2[ENHANCE]
|
211 |
+
|
212 |
+
%% Core Layer
|
213 |
+
C1[DETECT]
|
214 |
+
C2[GRADE]
|
215 |
+
|
216 |
+
%% Output Layer
|
217 |
+
D1[WEB]
|
218 |
+
D2[MOBILE]
|
219 |
+
|
220 |
+
%% Simple Vertical Flow
|
221 |
+
A1 & A2 --> B1
|
222 |
+
A3 --> B2
|
223 |
+
B1 & B2 --> C1
|
224 |
+
C1 --> C2
|
225 |
+
C2 --> D1 & D2
|
226 |
+
|
227 |
+
%% Styling
|
228 |
+
classDef default fontSize:18px,padding:10px
|
229 |
+
classDef input fill:#e1f5fe,stroke:#01579b,stroke-width:3px
|
230 |
+
classDef process fill:#e8f5e9,stroke:#1b5e20,stroke-width:3px
|
231 |
+
classDef core fill:#fff3e0,stroke:#e65100,stroke-width:3px
|
232 |
+
classDef output fill:#f3e5f5,stroke:#4a148c,stroke-width:3px
|
233 |
+
|
234 |
+
class A1,A2,A3 input
|
235 |
+
class B1,B2 process
|
236 |
+
class C1,C2 core
|
237 |
+
class D1,D2 output
|
238 |
+
</div>
|
239 |
+
</div>
|
240 |
+
|
241 |
+
<!-- Clinical Workflow (After System Architecture) -->
|
242 |
+
<div class="diagram-container">
|
243 |
+
<h4 class="diagram-title">Figure 2: Clinical Workflow</h4>
|
244 |
+
<div class="mermaid">
|
245 |
+
sequenceDiagram
|
246 |
+
participant P as 👤
|
247 |
+
participant T as 👨⚕️
|
248 |
+
participant A as 🤖
|
249 |
+
participant D as 👨⚕️
|
250 |
+
|
251 |
+
Note over P,D: START
|
252 |
+
P->>T: Visit
|
253 |
+
T->>A: Scan
|
254 |
+
A->>D: Report
|
255 |
+
D->>P: Plan
|
256 |
+
Note over P,D: END
|
257 |
+
</div>
|
258 |
+
</div>
|
259 |
+
|
260 |
+
<!-- Data Pipeline (After Results Section) -->
|
261 |
+
<div class="diagram-container">
|
262 |
+
<h4 class="diagram-title">Figure 3: Data Pipeline</h4>
|
263 |
+
<div class="mermaid">
|
264 |
+
graph TB
|
265 |
+
%% Simple Sources
|
266 |
+
A1[IMAGES]
|
267 |
+
A2[DATA]
|
268 |
+
|
269 |
+
%% Processing
|
270 |
+
B1[CHECK]
|
271 |
+
C1[AI]
|
272 |
+
|
273 |
+
%% Output
|
274 |
+
D1[REPORT]
|
275 |
+
D2[ALERT]
|
276 |
+
|
277 |
+
%% Simple Flow
|
278 |
+
A1 & A2 --> B1
|
279 |
+
B1 --> C1
|
280 |
+
C1 --> D1 & D2
|
281 |
+
|
282 |
+
%% Styling
|
283 |
+
classDef default fontSize:18px,padding:10px
|
284 |
+
classDef source fill:#bbdefb,stroke:#1976d2,stroke-width:3px
|
285 |
+
classDef process fill:#c8e6c9,stroke:#388e3c,stroke-width:3px
|
286 |
+
classDef output fill:#e1bee7,stroke:#7b1fa2,stroke-width:3px
|
287 |
+
|
288 |
+
class A1,A2 source
|
289 |
+
class B1,C1 process
|
290 |
+
class D1,D2 output
|
291 |
+
</div>
|
292 |
+
</div>
|
293 |
+
|
294 |
+
<!-- Performance Metrics (After Discussion Section) -->
|
295 |
+
<div class="diagram-container">
|
296 |
+
<h4 class="diagram-title">Figure 4: Performance Metrics</h4>
|
297 |
+
<div class="mermaid">
|
298 |
+
graph TB
|
299 |
+
%% AMD Section
|
300 |
+
A[AMD]
|
301 |
+
A1[93% ACC]
|
302 |
+
A2[91% SENS]
|
303 |
+
|
304 |
+
%% DR Section
|
305 |
+
D[DR]
|
306 |
+
D1[94% ACC]
|
307 |
+
D2[93% SENS]
|
308 |
+
|
309 |
+
%% GLAUCOMA Section
|
310 |
+
G[GLAUCOMA]
|
311 |
+
G1[94% ACC]
|
312 |
+
G2[92% SENS]
|
313 |
+
|
314 |
+
%% Vertical Layout
|
315 |
+
A --> A1 --> A2
|
316 |
+
D --> D1 --> D2
|
317 |
+
G --> G1 --> G2
|
318 |
+
|
319 |
+
%% Styling
|
320 |
+
classDef default fontSize:24px,padding:20px
|
321 |
+
classDef header fill:#9575cd,stroke:#4a148c,stroke-width:4px,color:white,font-weight:bold
|
322 |
+
classDef metrics fill:#e1bee7,stroke:#4a148c,stroke-width:4px
|
323 |
+
|
324 |
+
class A,D,G header
|
325 |
+
class A1,A2,D1,D2,G1,G2 metrics
|
326 |
+
</div>
|
327 |
+
</div>
|
328 |
+
|
329 |
+
<!-- Results -->
|
330 |
+
<section class="section">
|
331 |
+
<h2>3. Results</h2>
|
332 |
+
|
333 |
+
<h3>3.1. Diagnostic Performance</h3>
|
334 |
+
<p>The AI system demonstrated robust diagnostic capabilities across all tested conditions:</p>
|
335 |
+
<div class="table-responsive">
|
336 |
+
<table class="table table-bordered">
|
337 |
+
<thead class="table-light">
|
338 |
+
<tr>
|
339 |
+
<th>Condition</th>
|
340 |
+
<th>Accuracy (%)</th>
|
341 |
+
<th>Sensitivity (%)</th>
|
342 |
+
<th>Specificity (%)</th>
|
343 |
+
<th>F1-Score</th>
|
344 |
+
</tr>
|
345 |
+
</thead>
|
346 |
+
<tbody>
|
347 |
+
<tr>
|
348 |
+
<td>Glaucoma</td>
|
349 |
+
<td>93.5</td>
|
350 |
+
<td>91.8</td>
|
351 |
+
<td>95.2</td>
|
352 |
+
<td>92.5</td>
|
353 |
+
</tr>
|
354 |
+
<tr>
|
355 |
+
<td>Diabetic Retinopathy</td>
|
356 |
+
<td>94.1</td>
|
357 |
+
<td>92.7</td>
|
358 |
+
<td>96.0</td>
|
359 |
+
<td>93.3</td>
|
360 |
+
</tr>
|
361 |
+
<tr>
|
362 |
+
<td>Age-Related Macular Degeneration</td>
|
363 |
+
<td>92.8</td>
|
364 |
+
<td>90.5</td>
|
365 |
+
<td>94.5</td>
|
366 |
+
<td>91.4</td>
|
367 |
+
</tr>
|
368 |
+
<tr>
|
369 |
+
<td>Overall</td>
|
370 |
+
<td>93.2</td>
|
371 |
+
<td>91.5</td>
|
372 |
+
<td>95.0</td>
|
373 |
+
<td>92.7</td>
|
374 |
+
</tr>
|
375 |
+
</tbody>
|
376 |
+
</table>
|
377 |
+
</div>
|
378 |
+
<p>Performance remained consistent across various stages of each condition, with slightly reduced sensitivity in advanced AMD cases.</p>
|
379 |
+
|
380 |
+
<h3>3.2. Self-Detection Tool Efficacy</h3>
|
381 |
+
<p>In a pilot involving 200 participants across multiple community health fairs:</p>
|
382 |
+
<ul>
|
383 |
+
<li><strong>Positive Predictive Value (PPV):</strong> 98%</li>
|
384 |
+
<li><strong>Negative Predictive Value (NPV):</strong> 85%</li>
|
385 |
+
<li><strong>User Satisfaction:</strong> 95% reported ease of use and clarity of results.</li>
|
386 |
+
<li><strong>Referral Rate:</strong> 10% of screened individuals were referred for further clinical evaluation, aligning with expert assessments.</li>
|
387 |
+
</ul>
|
388 |
+
|
389 |
+
<h3>3.3. Automated Report Generation</h3>
|
390 |
+
<p>The automated report generator achieved the following:</p>
|
391 |
+
<ul>
|
392 |
+
<li><strong>Time Reduction:</strong> Average documentation time decreased from 8.5 minutes (manual) to 4.7 minutes (automated).</li>
|
393 |
+
<li><strong>Clinical Accuracy:</strong> 98% concordance with manually generated reports by ophthalmologists.</li>
|
394 |
+
<li><strong>Consistency:</strong> Eliminated variability in report structure and terminology, ensuring standardized documentation.</li>
|
395 |
+
</ul>
|
396 |
+
|
397 |
+
<h3>3.4. Patient History Integration Impact</h3>
|
398 |
+
<p>Integration with EHR data enhanced diagnostic precision and clinical decision-making:</p>
|
399 |
+
<ul>
|
400 |
+
<li><strong>Risk Stratification Improvement:</strong> 35% increase in accurate risk categorization for disease progression.</li>
|
401 |
+
<li><strong>Personalized Recommendations:</strong> Tailored management plans based on comprehensive patient histories, leading to a 30% improvement in treatment adherence.</li>
|
402 |
+
<li><strong>Referral Efficiency:</strong> Reduced time to referral for high-risk patients by 30%, ensuring timely interventions.</li>
|
403 |
+
</ul>
|
404 |
+
|
405 |
+
<h3>3.5. Subgroup Analyses</h3>
|
406 |
+
<p>Performance was evaluated across different patient demographics and clinical environments:</p>
|
407 |
+
<ul>
|
408 |
+
<li><strong>Age Groups:</strong> Consistent accuracy across all age brackets, with slight variations in sensitivity among older populations.</li>
|
409 |
+
<li><strong>Ethnic Diversity:</strong> Maintained high diagnostic performance across diverse ethnic backgrounds, mitigating potential biases.</li>
|
410 |
+
<li><strong>Clinical Settings:</strong> Comparable results in urban hospitals and rural clinics, demonstrating the system’s adaptability.</li>
|
411 |
+
</ul>
|
412 |
+
</section>
|
413 |
+
|
414 |
+
<!-- Add after the Results section's Diagnostic Performance table -->
|
415 |
+
<div class="diagnostic-performance my-5">
|
416 |
+
<h4 class="text-center mb-4">Figure 2: Performance Metrics by Condition</h4>
|
417 |
+
<div class="mermaid">
|
418 |
+
gantt
|
419 |
+
title Disease Detection Performance
|
420 |
+
dateFormat X
|
421 |
+
axisFormat %s
|
422 |
+
|
423 |
+
section Glaucoma
|
424 |
+
Accuracy :0, 93.5
|
425 |
+
Sensitivity :0, 91.8
|
426 |
+
Specificity :0, 95.2
|
427 |
+
|
428 |
+
section DR
|
429 |
+
Accuracy :0, 94.1
|
430 |
+
Sensitivity :0, 92.7
|
431 |
+
Specificity :0, 96.0
|
432 |
+
|
433 |
+
section AMD
|
434 |
+
Accuracy :0, 92.8
|
435 |
+
Sensitivity :0, 90.5
|
436 |
+
Specificity :0, 94.5
|
437 |
+
</div>
|
438 |
+
</div>
|
439 |
+
|
440 |
+
<!-- Clinical Workflow (Figure 3) -->
|
441 |
+
<div class="diagram-container">
|
442 |
+
<h4 class="diagram-title">Figure 3: Workflow</h4>
|
443 |
+
<div class="mermaid">
|
444 |
+
sequenceDiagram
|
445 |
+
participant P as Patient
|
446 |
+
participant A as AI
|
447 |
+
participant D as Doctor
|
448 |
+
|
449 |
+
P->>A: Images
|
450 |
+
A->>A: Process
|
451 |
+
A->>D: Results
|
452 |
+
D->>P: Plan
|
453 |
+
</div>
|
454 |
+
</div>
|
455 |
+
|
456 |
+
<!-- Data Flow (Figure 3.1) -->
|
457 |
+
<div class="diagram-container">
|
458 |
+
<h4 class="diagram-title">Figure 3.1: Pipeline</h4>
|
459 |
+
<div class="mermaid">
|
460 |
+
graph TD
|
461 |
+
A["Input"] --> B["Storage"]
|
462 |
+
B --> C["Process"]
|
463 |
+
C --> D["Models"]
|
464 |
+
D --> E["Output"]
|
465 |
+
|
466 |
+
classDef default fill:#f4f4f4,stroke:#333,stroke-width:1px
|
467 |
+
</div>
|
468 |
+
</div>
|
469 |
+
|
470 |
+
<!-- Performance Metrics (Figure 4.1) -->
|
471 |
+
<div class="diagram-container">
|
472 |
+
<h4 class="diagram-title">Figure 4.1: Metrics</h4>
|
473 |
+
<div class="mermaid">
|
474 |
+
gantt
|
475 |
+
title Performance
|
476 |
+
dateFormat X
|
477 |
+
axisFormat %s
|
478 |
+
|
479 |
+
section Metrics
|
480 |
+
Accuracy :0, 93.2
|
481 |
+
Sensitivity :0, 91.5
|
482 |
+
Specificity :0, 95.0
|
483 |
+
</div>
|
484 |
+
</div>
|
485 |
+
|
486 |
+
<!-- Add after the AI-Generated Clinical Report -->
|
487 |
+
<div class="risk-assessment my-5 bg-white p-4 rounded-lg shadow-lg">
|
488 |
+
<h4 class="text-center mb-4">Figure 5.1: Risk Assessment Dashboard</h4>
|
489 |
+
<div class="grid grid-cols-2 gap-4">
|
490 |
+
<!-- Risk Factors Panel -->
|
491 |
+
<div class="border p-4 rounded">
|
492 |
+
<h5 class="font-bold mb-3">Patient Risk Factors</h5>
|
493 |
+
<div class="space-y-3">
|
494 |
+
<div class="flex justify-between items-center">
|
495 |
+
<span>Age (65)</span>
|
496 |
+
<div class="w-1/2 bg-gray-200 rounded h-2">
|
497 |
+
<div class="bg-yellow-500 h-2 rounded" style="width: 65%"></div>
|
498 |
+
</div>
|
499 |
+
</div>
|
500 |
+
<div class="flex justify-between items-center">
|
501 |
+
<span>Family History</span>
|
502 |
+
<div class="w-1/2 bg-gray-200 rounded h-2">
|
503 |
+
<div class="bg-red-500 h-2 rounded" style="width: 80%"></div>
|
504 |
+
</div>
|
505 |
+
</div>
|
506 |
+
<div class="flex justify-between items-center">
|
507 |
+
<span>Diabetes (HbA1c: 7.2)</span>
|
508 |
+
<div class="w-1/2 bg-gray-200 rounded h-2">
|
509 |
+
<div class="bg-orange-500 h-2 rounded" style="width: 72%"></div>
|
510 |
+
</div>
|
511 |
+
</div>
|
512 |
+
<div class="flex justify-between items-center">
|
513 |
+
<span>Hypertension</span>
|
514 |
+
<div class="w-1/2 bg-gray-200 rounded h-2">
|
515 |
+
<div class="bg-yellow-500 h-2 rounded" style="width: 60%"></div>
|
516 |
+
</div>
|
517 |
+
</div>
|
518 |
+
</div>
|
519 |
+
</div>
|
520 |
+
<!-- Progression Analysis -->
|
521 |
+
<div class="border p-4 rounded">
|
522 |
+
<h5 class="font-bold mb-3">Disease Progression Analysis</h5>
|
523 |
+
<div class="space-y-4">
|
524 |
+
<div class="p-3 bg-blue-50 rounded">
|
525 |
+
<h6 class="font-bold text-blue-700">Current Status</h6>
|
526 |
+
<p class="text-sm">Moderate NPDR with controlled IOP</p>
|
527 |
+
<div class="mt-2 flex items-center">
|
528 |
+
<span class="text-sm mr-2">Progression Risk:</span>
|
529 |
+
<div class="flex-1 bg-gray-200 rounded h-2">
|
530 |
+
<div class="bg-blue-500 h-2 rounded" style="width: 45%"></div>
|
531 |
+
</div>
|
532 |
+
</div>
|
533 |
+
</div>
|
534 |
+
<div class="p-3 bg-purple-50 rounded">
|
535 |
+
<h6 class="font-bold text-purple-700">6-Month Projection</h6>
|
536 |
+
<ul class="text-sm list-disc pl-4">
|
537 |
+
<li>35% chance of DR progression</li>
|
538 |
+
<li>Stable glaucoma indicators</li>
|
539 |
+
<li>Low risk for AMD development</li>
|
540 |
+
</ul>
|
541 |
+
</div>
|
542 |
+
</div>
|
543 |
+
</div>
|
544 |
+
</div>
|
545 |
+
</div>
|
546 |
+
|
547 |
+
<!-- Discussion -->
|
548 |
+
<section class="section">
|
549 |
+
<h2>4. Discussion</h2>
|
550 |
+
|
551 |
+
<h3>4.1. Comprehensive Diagnostic Capabilities</h3>
|
552 |
+
<p>The AI system’s high accuracy, sensitivity, and specificity across multiple ophthalmic conditions affirm its potential as a reliable diagnostic tool. By addressing glaucoma, DR, and AMD concurrently, the platform offers a versatile solution adaptable to various clinical needs.<sup>7</sup> This multimodal approach surpasses single-task models, providing a more holistic diagnostic capability that can handle the complexity of real-world clinical scenarios.</p>
|
553 |
+
|
554 |
+
<h3>4.2. Enhanced Clinical Workflow</h3>
|
555 |
+
<p>The integration of automated report generation and patient history analysis significantly streamlines clinical workflows. Ophthalmologists benefit from reduced administrative burdens, allowing them to focus more on patient care. The consistency and accuracy of AI-generated reports also minimize the risk of documentation errors.<sup>8</sup> Furthermore, the ability to quickly access and interpret patient histories enhances decision-making, particularly in complex cases with multiple comorbidities.</p>
|
556 |
+
|
557 |
+
<h3>4.3. Community and Teleophthalmology Applications</h3>
|
558 |
+
<p>The self-detection tool extends the reach of ophthalmic care beyond traditional clinical settings, enabling early detection in underserved and remote populations. High user satisfaction and accurate preliminary screenings suggest that such tools can play a crucial role in public health initiatives and teleophthalmology services.<sup>9</sup> This accessibility is essential for early intervention, which is critical in preventing vision loss.</p>
|
559 |
+
|
560 |
+
<h3>4.4. Addressing Bias and Ensuring Generalizability</h3>
|
561 |
+
<p>Ensuring the AI system performs reliably across diverse populations is paramount. Our extensive dataset, encompassing various ethnicities and clinical settings, helps mitigate inherent biases and enhances the model’s generalizability.<sup>10</sup> Continuous monitoring and periodic retraining with new data will further sustain performance and adaptability to evolving clinical landscapes.</p>
|
562 |
+
|
563 |
+
<h3>4.5. Limitations and Future Directions</h3>
|
564 |
+
<p>While the AI system demonstrates impressive performance, certain limitations must be acknowledged:</p>
|
565 |
+
<ul>
|
566 |
+
<li><strong>Data Quality Dependency:</strong> The accuracy of AI diagnostics is contingent on the quality of input images and completeness of patient records. Poor image quality or incomplete histories can impact performance.</li>
|
567 |
+
<li><strong>Specialized Conditions:</strong> The current model focuses on common retinal diseases. Expansion to include rarer conditions like retinopathy of prematurity or inherited retinal dystrophies requires additional training and validation.</li>
|
568 |
+
<li><strong>Regulatory and Ethical Considerations:</strong> Widespread clinical adoption necessitates navigating regulatory approvals, ensuring data privacy, and addressing ethical concerns related to AI decision-making.</li>
|
569 |
+
</ul>
|
570 |
+
<p>Future research will focus on:</p>
|
571 |
+
<ul>
|
572 |
+
<li><strong>Expanding Disease Coverage:</strong> Incorporating additional ophthalmic conditions to broaden the system’s diagnostic scope.</li>
|
573 |
+
<li><strong>Multicenter Trials:</strong> Conducting large-scale, multicenter studies to further validate performance and assess real-world impact.</li>
|
574 |
+
<li><strong>Advanced Imaging Integration:</strong> Leveraging newer imaging modalities, such as OCT angiography (OCTA), to enhance diagnostic precision and uncover subclinical pathologies.</li>
|
575 |
+
<li><strong>User Interface Enhancements:</strong> Improving the user experience for both clinicians and patients through iterative design and feedback-driven development.</li>
|
576 |
+
</ul>
|
577 |
+
</section>
|
578 |
+
|
579 |
+
<!-- Conclusion -->
|
580 |
+
<section class="section">
|
581 |
+
<h2>5. Conclusion</h2>
|
582 |
+
<p>This study demonstrates the efficacy of a multimodal AI platform in enhancing ophthalmic diagnostics, documentation, and clinical decision-making. By integrating advanced image analysis, automated report generation, and patient history evaluation, the system offers a comprehensive solution adaptable to diverse clinical environments. The high accuracy and operational efficiency observed support the potential for widespread adoption in ophthalmology, paving the way for improved patient outcomes and optimized healthcare delivery. Ongoing and future studies will further validate these findings and explore the full spectrum of AI’s capabilities in ophthalmic care.</p>
|
583 |
+
</section>
|
584 |
+
|
585 |
+
<!-- References -->
|
586 |
+
<section class="section">
|
587 |
+
<h2>References</h2>
|
588 |
+
<ol class="reference-section">
|
589 |
+
<li>Gulshan V, Peng L, Coram M, et al. Development and Validation of a Deep Learning Algorithm for Detection of Diabetic Retinopathy in Retinal Fundus Photographs. <em>JAMA.</em> 2016;316(22):2402-2410. doi:10.1001/jama.2016.17216</li>
|
590 |
+
<li>Abràmoff MD, Lavin PT, Birch M, Shah N, Folk JC. Pivotal Trial of an Autonomous AI-Based Diagnostic System for Detection of Diabetic Retinopathy in Primary Care Offices. <em>npj Digit Med.</em> 2018;1:39. doi:10.1038/s41746-018-0040-6</li>
|
591 |
+
<li>Ting DSW, Cheung CY, Lim G, et al. Development and Validation of a Deep Learning System for Diabetic Retinopathy and Related Eye Diseases Using Retinal Images from Multiethnic Populations with Diabetes. <em>JAMA.</em> 2017;318(22):2211-2223. doi:10.1001/jama.2017.18152</li>
|
592 |
+
<li>De Fauw J, Ledsam JR, Romera-Paredes B, et al. Clinically Applicable Deep Learning for Diagnosis and Referral in Retinal Disease. <em>Nat Med.</em> 2018;24(9):1342-1350. doi:10.1038/s41591-018-0107-6</li>
|
593 |
+
<li>Jonas JB, Aung T, Bron AM, et al. Glaucoma. <em>Lancet.</em> 2017;390(10108):2183-2193. doi:10.1016/S0140-6736(17)31469-1</li>
|
594 |
+
<li>Ting DSW, Cheung CY, Lim G, et al. Development and Validation of a Deep Learning System for Diabetic Retinopathy and Related Eye Diseases Using Retinal Images from Multiethnic Populations with Diabetes. <em>JAMA.</em> 2017;318(22):2211-2223. doi:10.1001/jama.2017.18152</li>
|
595 |
+
<li>Pratt RM, Golzio M, Fernandes S, et al. A Large-Scale Database for Diabetic Retinopathy and Related Eye Diseases. <em>PLoS ONE.</em> 2017;12(8):e0183601. doi:10.1371/journal.pone.0183601</li>
|
596 |
+
<li>Brown JM, Campbell JP, Beers A, et al. Automated Diagnosis of Plus Disease in Retinopathy of Prematurity Using Deep Convolutional Neural Networks. <em>JAMA Ophthalmol.</em> 2018;136(7):803-810. doi:10.1001/jamaophthalmol.2018.1934</li>
|
597 |
+
<li>Varadarajan AV, Fuchs J, Hawe JM, et al. The Accuracy of Clinical Diagnoses of Diabetic Retinopathy in Primary Care Settings: A Meta-analysis. <em>JAMA.</em> 2018;320(4):345-356. doi:10.1001/jama.2018.7653</li>
|
598 |
+
<li>Lee AY, Daniels MJ, Singh AD. Challenges and Opportunities in AI for Ophthalmology: A Review. <em>JAMA Ophthalmol.</em> 2020;138(12):1328-1334. doi:10.1001/jamaophthalmol.2020.3113</li>
|
599 |
+
</ol>
|
600 |
+
</section>
|
601 |
+
|
602 |
+
<!-- Author Contributions -->
|
603 |
+
<section class="author-contributions section">
|
604 |
+
<h2>Author Contributions</h2>
|
605 |
+
<p><strong>Sami Halawa:</strong> Conceptualization, methodology, data curation, formal analysis, writing—original draft, visualization.</p>
|
606 |
+
<p><strong>Fernando Ly:</strong> Software development, data analysis, writing—review and editing, supervision.</p>
|
607 |
+
</section>
|
608 |
+
|
609 |
+
<!-- Data Availability -->
|
610 |
+
<section class="data-availability section">
|
611 |
+
<h2>Data Availability</h2>
|
612 |
+
<p>The datasets generated and/or analyzed during the current study are available from the corresponding author on reasonable request, subject to institutional data sharing policies and patient privacy regulations.</p>
|
613 |
+
</section>
|
614 |
+
|
615 |
+
<!-- Acknowledgments -->
|
616 |
+
<section class="acknowledgments section">
|
617 |
+
<h2>Acknowledgments</h2>
|
618 |
+
<p>We extend our gratitude to the patients who participated in this study and the ophthalmology staff for their continuous support. Additionally, gratitude is extended to the technical teams responsible for the development and maintenance of the AI platform.</p>
|
619 |
+
</section>
|
620 |
+
|
621 |
+
<!-- Compliance with Ethical Standards -->
|
622 |
+
<section class="ethical-standards section">
|
623 |
+
<h2>Compliance with Ethical Standards</h2>
|
624 |
+
<p>All procedures performed in studies involving human participants were in accordance with the ethical standards of the institutional and/or national research committee and with the 1964 Helsinki declaration and its later amendments or comparable ethical standards.</p>
|
625 |
+
</section>
|
626 |
+
|
627 |
+
<!-- Appendix -->
|
628 |
+
<section class="appendix section">
|
629 |
+
<h2>Appendix</h2>
|
630 |
+
|
631 |
+
<h3>A.1. Detailed Model Parameters</h3>
|
632 |
+
<ul>
|
633 |
+
<li><strong>ResNet-101 Backbone:</strong> Pretrained on ImageNet, fine-tuned with a learning rate of 0.001, batch size of 32, and dropout rate of 0.5.</li>
|
634 |
+
<li><strong>BERT-based Textual Embeddings:</strong> Utilized for processing EHR data, with fine-tuning on medical terminology datasets.</li>
|
635 |
+
<li><strong>Fusion Layer:</strong> Concatenates image and textual features, followed by a fully connected layer with ReLU activation and softmax output.</li>
|
636 |
+
</ul>
|
637 |
+
|
638 |
+
<h3>A.2. Data Augmentation Techniques</h3>
|
639 |
+
<ul>
|
640 |
+
<li><strong>Geometric Transformations:</strong> Rotations up to ±15°, horizontal and vertical flips.</li>
|
641 |
+
<li><strong>Photometric Adjustments:</strong> Brightness and contrast variations of ±20%.</li>
|
642 |
+
<li><strong>Noise Addition:</strong> Gaussian noise with a standard deviation of 0.05.</li>
|
643 |
+
<li><strong>Cropping and Scaling:</strong> Random crops maintaining 90-100% of the original image size.</li>
|
644 |
+
</ul>
|
645 |
+
|
646 |
+
<h3>A.3. User Interface Design</h3>
|
647 |
+
<ul>
|
648 |
+
<li><strong>Clinical Dashboard:</strong> Displays patient data, AI diagnostic results, and generated reports in an intuitive layout.</li>
|
649 |
+
<li><strong>Self-Detection Interface:</strong> Mobile and web-based platforms allowing users to upload images, receive immediate feedback, and access recommendations.</li>
|
650 |
+
<li><strong>Report Customization:</strong> Clinicians can edit and approve AI-generated reports before finalizing patient records.</li>
|
651 |
+
</ul>
|
652 |
+
</section>
|
653 |
+
|
654 |
+
<!-- Supplementary Material -->
|
655 |
+
<section class="supplementary section">
|
656 |
+
<h2>Supplementary Material</h2>
|
657 |
+
|
658 |
+
<h3>S1. Sample AI-Generated Report</h3>
|
659 |
+
<p><strong>Patient Name:</strong> John Doe<br>
|
660 |
+
<strong>Age:</strong> 65<br>
|
661 |
+
<strong>Gender:</strong> Male<br>
|
662 |
+
<strong>Date of Examination:</strong> 2025-01-10</p>
|
663 |
+
|
664 |
+
<p><strong>Chief Complaint:</strong> Routine eye examination.</p>
|
665 |
+
|
666 |
+
<p><strong>Image Findings:</strong></p>
|
667 |
+
<ul>
|
668 |
+
<li><strong>Glaucoma:</strong> Elevated cup-to-disc ratio of 0.7 in both eyes, consistent with primary open-angle glaucoma.</li>
|
669 |
+
<li><strong>Diabetic Retinopathy:</strong> Presence of microaneurysms and hemorrhages in the peripheral retina, classified as moderate non-proliferative DR.</li>
|
670 |
+
<li><strong>AMD:</strong> Drusen observed in the macula of the right eye, indicative of early AMD.</li>
|
671 |
+
</ul>
|
672 |
+
|
673 |
+
<p><strong>Assessment and Plan:</strong></p>
|
674 |
+
<ul>
|
675 |
+
<li><strong>Glaucoma:</strong> Continue current intraocular pressure-lowering therapy, schedule follow-up in 3 months with OCT and visual field testing.</li>
|
676 |
+
<li><strong>Diabetic Retinopathy:</strong> Initiate anti-VEGF therapy, monitor response at monthly intervals.</li>
|
677 |
+
<li><strong>AMD:</strong> Recommend dietary supplementation with AREDS vitamins, regular monitoring for progression to intermediate AMD.</li>
|
678 |
+
</ul>
|
679 |
+
|
680 |
+
<p><strong>Recommendations:</strong></p>
|
681 |
+
<ul>
|
682 |
+
<li>Maintain regular ophthalmic evaluations every 6 months.</li>
|
683 |
+
<li>Optimize blood glucose and blood pressure management in collaboration with primary care physician.</li>
|
684 |
+
</ul>
|
685 |
+
|
686 |
+
<h3>S2. Ethical Considerations and Data Privacy</h3>
|
687 |
+
<p>The AI system adheres to all relevant data protection regulations, including the Health Insurance Portability and Accountability Act (HIPAA). All patient data used in this study were de-identified to ensure privacy and confidentiality. Data encryption and secure access protocols are implemented to safeguard sensitive information during transmission and storage.</p>
|
688 |
+
|
689 |
+
<h3>S3. Detailed Statistical Analysis</h3>
|
690 |
+
<ul>
|
691 |
+
<li><strong>Confusion Matrices:</strong> Provided for each condition, illustrating true positives, false positives, true negatives, and false negatives.</li>
|
692 |
+
<li><strong>Receiver Operating Characteristic (ROC) Curves:</strong> Displayed for each diagnostic category, highlighting the area under the curve (AUC) as a measure of performance.</li>
|
693 |
+
<li><strong>Inter-Rater Reliability:</strong> Cohen’s kappa values reported between AI predictions and ophthalmologist assessments, indicating substantial agreement (kappa > 0.8) across all conditions.</li>
|
694 |
+
</ul>
|
695 |
+
</section>
|
696 |
+
|
697 |
+
<!-- Footer -->
|
698 |
+
<footer class="footer">
|
699 |
+
<p>© 2025 Global Vision Institute | For clinical and research purposes only.</p>
|
700 |
+
</footer>
|
701 |
+
</div>
|
702 |
+
|
703 |
+
<!-- Bootstrap JS Bundle with Popper (optional for interactive components) -->
|
704 |
+
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js"></script>
|
705 |
+
|
706 |
+
<!-- Add after the body section for Mermaid initialization -->
|
707 |
+
<script>
|
708 |
+
document.addEventListener('DOMContentLoaded', function() {
|
709 |
+
mermaid.initialize({
|
710 |
+
theme: 'neutral',
|
711 |
+
sequence: {
|
712 |
+
showSequenceNumbers: false,
|
713 |
+
actorMargin: 50,
|
714 |
+
boxMargin: 30,
|
715 |
+
mirrorActors: false,
|
716 |
+
bottomMarginAdj: 15,
|
717 |
+
notePosition: 'right',
|
718 |
+
height: 400,
|
719 |
+
actorFontSize: 14,
|
720 |
+
noteFontSize: 12,
|
721 |
+
messageFont: 12
|
722 |
+
},
|
723 |
+
flowchart: {
|
724 |
+
curve: 'linear',
|
725 |
+
padding: 30,
|
726 |
+
nodeSpacing: 50,
|
727 |
+
rankSpacing: 50,
|
728 |
+
fontSize: 14,
|
729 |
+
htmlLabels: true,
|
730 |
+
useMaxWidth: true,
|
731 |
+
wrap: true
|
732 |
+
},
|
733 |
+
gantt: {
|
734 |
+
titleTopMargin: 25,
|
735 |
+
barHeight: 30,
|
736 |
+
barGap: 8,
|
737 |
+
topPadding: 50,
|
738 |
+
sidePadding: 50,
|
739 |
+
fontSize: 14
|
740 |
+
}
|
741 |
+
});
|
742 |
+
});
|
743 |
+
</script>
|
744 |
+
</body>
|
745 |
+
</html>
|
proposals/{nhs-detailed-proposal.html → nhs/nhs-detailed-proposal.html}
RENAMED
File without changes
|
proposals/{nhs-formal-proposal.html → nhs/nhs-formal-proposal.html}
RENAMED
File without changes
|
proposals/{nhs-proposal.html → nhs/nhs-proposal.html}
RENAMED
File without changes
|
proposals/{12-octubre-proposal.html → spanish/12-octubre-proposal.html}
RENAMED
File without changes
|
proposals/{spanish-hospital-proposal.html → spanish/spanish-hospital-proposal.html}
RENAMED
File without changes
|