diff --git "a/0NAzT4oBgHgl3EQftv0k/content/tmp_files/load_file.txt" "b/0NAzT4oBgHgl3EQftv0k/content/tmp_files/load_file.txt" new file mode 100644--- /dev/null +++ "b/0NAzT4oBgHgl3EQftv0k/content/tmp_files/load_file.txt" @@ -0,0 +1,546 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf,len=545 +page_content='COVID-NET USPRO: AN OPEN-SOURCE EXPLAINABLE FEW-SHOT DEEP PROTOTYPICAL NETWORK TO MONITOR AND DETECT COVID-19 INFECTION FROM POINT-OF-CARE ULTRASOUND IMAGES Jessy Song Department of Systems Design Engineering University of Waterloo Waterloo, ON N2L 3G1, Canada Ashkan Ebadi Digital Technologies Research Centre National Research Council Canada Toronto, ON M5T 3J1, Canada ashkan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='ebadi@nrc-cnrc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='gc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='ca Adrian Florea Department of Emergency Medicine McGill University Montreal,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' QC H4A 3J1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Canada Pengcheng Xi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Stéphane Tremblay Digital Technologies Research Centre National Research Council Canada Ottawa,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' ON K1A 0R6,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Canada Alexander Wong Department of Systems Design Engineering University of Waterloo Waterloo,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' ON N2L 3G1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Canada ABSTRACT As the Coronavirus Disease 2019 (COVID-19) continues to impact many aspects of life and the global healthcare systems,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' the adoption of rapid and effective screening methods to prevent further spread of the virus and lessen the burden on healthcare providers is a necessity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' As a cheap and widely accessible medical image modality, point-of-care ultrasound (POCUS) imaging allows radiologists to identify symptoms and assess severity through visual inspection of the chest ultrasound images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Combined with the recent advancements in computer science, applications of deep learning techniques in medical image analysis have shown promising results, demonstrating that artificial intelligence-based solutions can accelerate the diagnosis of COVID-19 and lower the burden on healthcare professionals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' However, the lack of a huge amount of well-annotated data poses a challenge in building effective deep neural networks in the case of novel diseases and pandemics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Motivated by this, we present COVID-Net USPro, an explainable few-shot deep prototypical network, that monitors and detects COVID-19 positive cases with high precision and recall from minimal ultrasound images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID- Net USPro achieves 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='65% overall accuracy, 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='7% recall and 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='67% precision for COVID-19 positive cases when trained with only 5 shots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The analytic pipeline and results were verified by our contributing clinician with extensive experience in POCUS interpretation, ensuring that the network makes decisions based on actual patterns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Keywords Ultrasonic imaging · Lung · COVID-19 · Few-shot learning · Deep explainable architecture 1 Introduction The Coronavirus Disease 2019, or COVID-19, caused by severe acute respiratory syndrome coronavirus 2 (SARS- CoV-2), has been continuously impacting individual’s well-being and the global healthcare systems [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Despite the arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='01679v1 [eess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='IV] 4 Jan 2023 Song et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net USPro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' vaccination efforts, policies and regulations in place, due to the rapid transmission of the virus and waves of rising cases, the development of effective screening and risk stratification methods remains to be a critical need in controlling the disease [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Various types of diagnostic tools, including reverse transcription-polymerase chain reaction (RT-PCR), rapid antigen detection tests, and antibody tests, have been developed and adapted globally to increase the rate of screening.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' While RT-PCR has been the gold standard test for diagnosing COVID-19, the technique involves large labour and laboratory resources and is time-consuming [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Other rapid antigen tests and antibody tests with varying sensitivity are also less reliable in comparison to RT-PCR tests [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' For people with significant respiratory symptoms, medical imaging is used to identity the disease and assess the severity of the disease progression [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Under this protocol, a computed tomography (CT) scan, chest X-ray (CXR), or point-of-care ultrasound (POCUS) imaging can be performed and used clinically as an alternative diagnostic tool [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' To make a diagnosis, acute care physicians and radiologists visually inspect the radiographic images to find patterns related to symptoms and to assess severity of COVID-19 infection and deformation [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' During times of high transmission rate of COVID-19, large influx of patients increases the burden on clinicians and radiologists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Medical image processing and artificial intelligence (AI) can assist in reducing this burden and accelerate the diagnostic and decision-making process, as existing models and algorithms continue to improve and the amount of available medical image data continues to grow [5, 6, 7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Different imaging modalities, including CT scan, X-ray, and ultrasound may be used in the diagnosis of COVID-19 and offer varying diagnostic values [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Chest CT scan is the most sensitive imaging modality in the initial diagnosis and management of confirmed cases, but it is more expensive and time-consuming [8, 5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In contrast, ultrasound imaging is more accessible and portable, cheap, and safer as radiation is not involved during the examination, which are desirable properties for its usage [8], especially in resource-limited settings/environments/areas/regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Deep learning usually requires a large set of training examples [9, 7, 4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' However, due to the nature of novel diseases, the availability of such a huge amount of well-annotated data poses a great challenge to the learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Few-shot learning is an approach where model is trained to classify new data based on a limited number of samples exposed in training [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' This resembles how humans learn, as we can recognize new object classes from very few instances, different from other current machine learning techniques that require large amount of data to achieve similar performance [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Since the few-shot model requires less data to train, the computational costs in the process is also significantly reduced [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' These properties make it an appropriate and promising approach for COVID-19 or rare disease diagnosis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' One approach for few-shot learning is metric-based learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' As a few-shot metric-based learning approach, prototypical networks (PN) perform classification by computing distances to prototype representations of each class [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' PN has shown state-of-the-art (SOTA) results on other datasets/domains (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', [11, 12, 13]), proving that some simple design decisions can yield significant improvements over other complicated architectures and meta-learning approaches [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Motivated by the needs for fast and effective alternative screening solutions and considering ultrasound imaging advantages, we present an open-source explainable deep prototypical network, called COVID-Net USPro, that learns to detect COVID-19 positive cases with high precision and recall from a very limited number of lung ultrasound (LUS) images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' When trained with only 5 shots, COVID-Net USPro classifies between positive and negative COVID-19 cases with 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='65% overall accuracy, 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='7% recall and 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='67% precision for COVID-19 positive cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Intensive experimentation was conducted (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', testing different image encoders, varying training conditions and number of classes to optimize the network) to assess the performance of COVID-Net USPro network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' To ensure the network’s fairness and accountability, network benefits from an explainability module, assessing decisions with visual explanation tools, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', Grad-CAM [14] and GSInquire [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Moreover, our contributing clinician (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=') carefully verified and validated the pipeline and produced results to ensure the validity of the proposed solution from the clinical perspective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='1 Related Work There are several studies that aim to apply deep learning into the screening and detection of COVID-19 positive cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' As an open-source and open-access initiative, the COVID-Net [16, 5, 9, 7] includes research on the application of deep learning neural networks using multitude of image modalities, such as CT, X-ray, and ultrasound images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Multiple works have demonstrated the effectiveness of deep learning in the classification of CT and X-ray images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' For example, COVID-Net CXR [17], a tailored deep convolutional neural network (DCNN/CNN) for detection of COVID-19 cases from chest X-ray images, has achieved an overall accuracy of 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='3% and 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='5% sensitivity for COVID-19 cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Another work by Ozturk et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' proposed a DCNN based on the DarkNet model used for the you only look once (YOLO) real time object detection system to classify X-ray images, which achieves 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='08% accuracy for binary COVID-19 cases detection [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Research by Afshar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' proposed a capsule CNN-based network called COVID-CAPS [19] which achieved over 98% accuracy and specificity using a limited amount of X-ray images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net CT [6], a deep neural network for detection of COVID-19 from CT images, scored 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='2% in sensitivity and 99% in specificity 2 Song et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net USPro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' for COVID-19 cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Potential of including both CT-scan and X-ray images for classification is also explored, with research by Thakur and Kumar demonstrating a DCNN-based model achieving over 99% accuracy and precision for COVID-19 detection using images of both modalities [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' For ultrasound images, custom neural network such as COVID-Net US [7] was constructed and tailored to COVID-19 case detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The network achieved an area under receiver operating curve (AUC) of over 98% when trained with positive COVID-19 and negative normal case images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Research by Diaz-Escobar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [21] also leveraged pre-trained neural networks such as VGG19 [22], InceptionV3 [23], and ResNet50 [24] in the detection of COVID-19 using ultrasound images and achieved 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='1% accuracy and AUC of 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='1%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' One limitation of using a custom deep neural network in most of the existing research is the need for a large amount of training data, where in mentioned works above, datasets all surpassed 10,000 total images [7, 9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Application of few-shot learning techniques has also been investigated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' For example, MetaCOVID, proposed by Shorfuzzaman et al [25], is a Siamese neural network framework with contrastive loss for few-shot diagnosis of COVID-19 infection using CXR images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The performance of the best network achieved an accuracy of 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='6% and AUC of 97% when trained under a 3-way, and tested in a 10-shot setting [25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In [26], a deep siamese convolutional network, called COVID-Net FewSE, is able to detect COVID-19 positive cases with 90% recall and accuracy of 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='7% when the network is provided with only 50 observations in the training phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In the work by Karnes et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [27], the possibility of using adaptive few-shot learning for ultrasound COVID-19 detection is examined, and the increasing performance with the increasing number of shots is investigated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Although the feasibility of adopting few-shot learning techniques for COVID-19 detection from medical imaging has been already investigated, analysis on network’s interpretability is either missing or inadequate and lacks clinician validation, which limits the full understanding of the network and whether data interpretation process aligns with real clinical settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Our contribution is at least three folds: 1) We presents a high-performing network (99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='65% accuracy) trained with only 5 shots, while other works achieving similar performance require larger numbers of training examples, 2) COVID-Net USPro is an explainable network, as demonstrated by analysis from two explainability visualization tools and clinician validation, and 3) COVID-Net USPro is open-sourced and available to the public, which helps promote reproducibility and accessibility of AI in healthcare and encourage further innovation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The remainder of this paper is as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Section 2 explains data, techniques, and the experiments conducted to assess the network performance in details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Section 3 presents findings from the analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Findings are then discussed in Section 4 where some limitations of the research and future directions are also presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 2 Data and Methodology 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='1 Data The COVIDx-US dataset v1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [1] is used for this study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVIDx-US is an open-access benchmark dataset of lung ultrasound imaging data that contains 242 videos and 29,651 processed images of patients with COVID-19 infection, non-COVID-19 infection, other lung conditions, and normal control cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The dataset provides LUS images captured with two kinds of probe, linear probe which produces a square or rectangular image, or convex probe, which allows for a wider field of view [28].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Due to the difference in field of view and low numbers of COVID-19 positive examples with linear probe, combining the linear and convex probe data in training may increase noise and influence the performance of the network and hence, linear probe data are excluded in this study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' A total number of 25,262 convex LUS images are then randomly split into train set containing 90% of images in each class and test set with the remaining 10% of images, ensuring all frames from each video are either in train or test set to avoid data leakage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' All images are rescaled to 224 × 224 pixels to keep the images across entire dataset consistent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The dataset is further augmented by rotating each image by 90°, 180°, 270°, resulting in a total of 101,048 images (25262 × 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' This rotation technique is an appropriate method for increasing the dataset size, as it keeps the images and areas of interest for clinical decisions unaltered and in-bound [29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='2 Methodology COVID-Net USPro is a prototypical few-shot learning network that trains in an episodic learning setting, using a distance metric for assessing similarities between a set of unlabelled data, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', query set, and labelled data, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', support set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Labelled data can be used to compute a single prototype representation of the class, and unlabelled data are assigned to the class of the prototype they are closest to.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' A prototypical network [10] is based on this idea that there exists an embedding in which points in a class cluster around a single prototype representation for the class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' During the training phase, a neural network is used to learn the non-linear mapping of the inputs to an embedding space, and a class prototype is computed as the mean of its support set data in the embedding space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Classification is then done by finding the nearest class prototype for each query point based on a specified distance metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' An episodic approach 3 Song et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net USPro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Figure 1: High-level conceptual flow of the Analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' is used to train the model, where in each training episode, the few-shot task is simulated by sampling the data point in mini-batches to make the training process consistent with the testing environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Performance of the network is evaluated using the test dataset, and both quantitative analysis based on accuracy, precision and recall and qualitative explainability analysis are conducted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' An high-level conceptual flow of the Analysis is presented in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' We defined the classification problem as a K-way N-shot episodic task, where K denotes the number of classes present in the dataset and N denotes the number of available image examples for each class in each episode.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' For a given dataset, N images from each of the K classes are sampled to form the support set, and another M images from each class are sampled to form the query set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The network then aims to classify the images of the query set based on the K ∗ N total images presented in the support set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In this work, we formulated the problem as a 2-way, 3-way and 4-way classification problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Details are included under section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The few-shot classification with prototypical network can be summarized into three steps: 1) encoding of the images, 2) generating class prototypes, and 3) assigning labels to query samples based on distance to the class prototypes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Let’s S = {(x(1,s), y(1,s)), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' , (x(N,s), y(N,s))} and Q = {(x(1,q), y(1,q)), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' , (x(N,q), y(N,q))} be the support and query sets respectively, where each xi ∈ RD is a D-dimensional example feature vector and yi ∈ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' K} is the label of the example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The prototypical network embodies an image encoder fφ : RD → RH that transforms each image xi onto a H-dimensional embedding space where images of the same class cluster together.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Class prototypes are then generated for each class by averaging the embedding image vectors in the support set, where vk = 1 N �N i=1 fφ(xi,s(k)) denotes the prototype of class k [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' To classify the query image, a distance metric is used where distances between the embedding vector of a query image and each of the class prototypes are computed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In this work, squared Euclidean distance d (v, q) = ||v − q|| = �� (vi − q)2 is used, where q is the embedding vector of the query image and vi is the embedding vector of the i-th prototype.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' After distances are computed, a SoftMax function is applied over distances to the prototypes to compute the probabilities of the query image being in each class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The class with the highest probability is then assigned to the query image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In the training phase, the network learns by minimizing a loss function, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', the negative log-SoftMax function (J = − log (p (y = k|x))) of the true class k via an optimizer for which we use an Adam optimizer with an initial learning rate of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='001, and reduced if loss is not improved after 3 epochs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In each episode, a subset of data points is randomly selected, forming support and query set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Loss term is calculated at the end of each training episode.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' To facilitate effective training process and prevent over-fitting, early stopping is implemented to stop the training process 4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Data Source 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Model Construction COVIDxUS v1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='4 Dataset Model Training 4 classes: COVID, Normal, Generate batches of data, build few-shot prototypical network* Pneumonia, Other and train to perform classification Total: 29,651 processed images Experiments 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Data Preparation Adjust 1) image encoder network, 2) training shot settings and 3) classification problem formulation to optimize network performance Data Selection Keep convex probe data Total: 25,262 images 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=" Model Evaluation Image Preprocessing Rescale to 224 × 224 pixels Quantitative Evaluation Augmentation by rotation of 90°, 180°, 270° Evaluate each model's performance with accuracy, precision Total: 25,262x4 images and recall using the unseen test set Select Best-performing model 10% 90% 3." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Data Splitting Qualitative Explainability Evaluation Test Train Assess model explainability through visual COVID: 860 images COVID: 7,687 images explanation tools Normal: 204 images Normal: 1,907 images Validate results by clinician to ensure diagnosis Other: 825 images Other: 7,397images aligns with clinical perspective Pneumonia: 650 images Pneumonia: 5,753 imagesSong et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net USPro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Figure 2: COVID-Net USPro, network architecture design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' when loss term is not improved after 5 epochs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' A total of 10 epochs is set for all training processes and 200 episodes is set for each training epoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Figure 2 presents an architecture design overview of the COVID-Net USPro network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Trained model’s performance is evaluated quantitatively and qualitatively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In quantitative analysis, model’s accuracy, precision and recall for each class are reported.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In qualitative analysis, model explainability is investigated and visualized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Explainable Artificial Intelligence (XAI) has been an important criterion when assessing whether neural networks can be applied to real clinical settings [30].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' While AI-driven systems may show high accuracy and precision in analyzing medical images, lack of reasonable explainability will spark criticism to the network’s adoption [30].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net USPro’s explainability is assessed using two approached, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', Gradient-weighted Class Activation Map (Grad-CAM) [14] and GSInquire [15], on a selected dataset containing correctly classified COVID-19 and normal cases with high confidence (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', > 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9% probability) as well as falsely predicted COVID-19 and normal cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Grad-CAM generates a visual explanation of the input image using the gradient information flowing into the last convolutional layer of the convolutional neural network (CNN) encoder and assigns importance values to each neuron for making a classification decision [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The output is a heatmap-overlayed image that shows the regions that impact the particular classification decision made by the network [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The other tool GSInquire identifies the critical factors in an input image that are shown to be integral to the decisions made by the network in a generative synthesis approach [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The result is an annotated image highlighting the critical region, which drastically changes the classification result if removed [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Results from both tools are reviewed by a clinician with experience in analysis of ultrasound images to assess whether clinically important patterns are captured by the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='3 Experiment Settings We comprehensively assess the performance of COVID-Net USPro in detecting COVID-19 positive cases from ultrasound images by testing various training conditions such as image encoders, number of shots available for training, and classification task types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Details are further discussed in this section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='1 Image Encoders To leverage the power of transfer learning, multiple encoders are experimented, including but not limited to the ResNet and VGG-based models [24, 22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Pre-trained models refer to using model parameters pre-trained on ImageNet [31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Here, we report 4 best encoders with respect to our research objectives: ResNet18L1: Pre-trained ResNet18 [24], with trainable parameters on the final connected layer and setting out features as the number of classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Model 1 is regarded as the baseline model for encoders, as it contains the least number of layers and retrained parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' ResNet18L5: Pre-trained ResNet18 [24], with trainable parameters on the last 4 convolutional layers and final connected layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Out features set to the number of classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' ResNet50L1: Pre-trained ResNet50 [24], with trainable parameters on the final connected layer and setting out features as the number of classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' ResNet50L4: Pre-trained ResNet50 [24], with trainable parameters on the last 3 convolutional layers and final connected layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Out features set to the number of classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 5 Prototype Generation & Distance Calculation Embedding Support Set Predictions Encoder In training: Loss Calculation Query Set BackpropagationSong et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net USPro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='2 Number of Training Shots The optimal number of shots for maximized performance is tested by training models under 5, 10, 20, 30, 40, 50, 75, and 100-shot scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' For selected models showing steady increase of performance over increasing shots, 150 and 200-shot conditions are tested to verify that the maximum performance is reached at 100-shot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' To ensure training process is faithful to the testing environment, the number of example shots for each class presented in each episode is the same in support and query set in both training and testing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' For example, in 5-shot scenario, 5 images in each class are presented for both support set and query set in training, and the same follows in testing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='3 Problem Formulation As the ability of the model to correctly identify COVID-19 positive cases is valued the most in comparison to other classes, the classification problem for identifying COVID-19 was formulated in 3 different scenarios as follows, in an ascending order of data complexity: 2-way classification: Data from all 3 other classes, namely ’normal’ class, ’non-COVID-19’ class and ’other’ class, are viewed as a combined COVID-19 negative class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The network learns from COVID-19 positive and COVID-19 negative dataset in this setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 3-way classification: As the ’other’ class contains data from multiple different lung conditions, it has the highest variations and may disrupt network’s learning process due to the lack of uniformity in the data compared with COVID-19, normal or non-COVID-19 class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In 3-class classification, the ‘other’ class is excluded, and the network is trained to classify the remaining three classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 4-way classification: As the dataset contains four classes, the four-class classification condition remains this setting and network is trained to classify ’COVID-19’, ’normal’, ’non-COVID-19’ and ’other’ class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 3 Results This section summarizes the quantitative performance results of all combination of experiment settings listed in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='3 as well as the results of the network explainability analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='1 Quantitative Performance Analysis The performance of COVID-Net USPro is evaluated using the overall accuracy, and the precision and recall for each class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' As the performance of the model to diagnose COVID-19 positive cases is the most important for current clinical use case, precision and recall for only COVID-19 case is reported below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' To reduce table size, Table 1 only summarizes the performance of the network under 5-shot and 100-shot scenarios for encoders that scored over 80% across all evaluated metrics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' For full performance results of all shot settings and precision, recall for all classes, please refer to project repository: [www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='anonymous].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Across all classification types and models, performance is better under 100-shots training scenario than in 5-shot, with performance metrics increasing from 5-shot and plateauing after 75-shot, as shown in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' ResNet networks demonstrate the ability to classify COVID-19 with precision and recall above 87% consistently under both 5-shot and above 99% under 100-shot condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' As seen in Table 1, the increasing classes in 3-way and 4-way classification types reduces the performance of the network, as the classification is more complex given larger number of classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' However, this performance difference among the three classification types is reduced when the number of shots increases, as more examples available in training improves the network’s ability to distinguish between multiple classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Among the four models, deeper models (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', those with ResNet50 as encoder) perform better in all classification types and shot conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In addition, models with re-trained final convolutional layers parameters (model ResNet18L5 and ResNet50L4) using the ultrasound images achieve higher accuracy, precision, and recall.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Therefore, it can be said that while using pre-trained parameters and simpler models reduce the computational complexity and space, tailoring parameters on the final 3-4 convolutional layers to the ultrasound images and deeper image encoding boosted performance to above 99%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In 2-way and 3-way classification, it is also observed that the precision and recall for classes other than COVID-19 achieve similar magnitude as the COVID-19 class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In the 4-way case, the precision and recall for ‘other’ class is around 2-3% lower than those for ‘non-COVID-19’, ‘normal’ and ‘COVID-19’ classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' This is expected since the ‘other’ class covers various lung conditions/diseases that encompass a larger range of image features and variations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Overall, with precision and recall achieving similar magnitude for all cases in 2-way, 3-way and 4-way classification, the network also demonstrates the ability to distinguish between multiple diseases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In comparison to studies outlined in 6 Song et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net USPro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Table 1: Summary of classification results for 5-shot and 100-shot conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Scenario No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' shots Model Accuracy Precision Recall 2-way 5 ResNet18L1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9420 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9486 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9460 2-way 5 ResNet18L5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9930 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9925 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9950 2-way 5 ResNet50L1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9525 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9570 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9560 2-way 5 ResNet50L4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9965 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9967 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9970 2-way 100 ResNet18L1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9758 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9764 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9755 2-way 100 ResNet18L5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 2-way 100 ResNet50L1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9963 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9964 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9962 2-way 100 ResNet50L4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9999 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9999 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 3-way 5 ResNet18L1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9570 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9606 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9510 3-way 5 ResNet18L5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9987 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9992 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9970 3-way 5 ResNet50L1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9945 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9508 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9660 3-way 5 ResNet50L4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9947 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9942 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9940 3-way 100 ResNet18L1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9867 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9833 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9853 3-way 100 ResNet18L5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 3-way 100 ResNet50L1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9977 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9970 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9975 3-way 100 ResNet50L4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 4-way 5 ResNet18L1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='8627 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9281 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='8710 4-way 5 ResNet18L5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9817 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9975 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9970 4-way 5 ResNet50L1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9392 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9640 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9540 4-way 5 ResNet50L4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9850 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9917 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9930 4-way 100 ResNet18L1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9385 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9742 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9704 4-way 100 ResNet18L5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9884 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 4-way 100 ResNet50L1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9813 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9947 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9955 4-way 100 ResNet50L4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='9902 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='0000 Figure 3: Performance results with increasing shots trained under 4-class condition: (a) Pre-trained ResNet18 with trainable parameters on the final connected layer and setting out features as the number of classes (ResNet18L1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (b) Pre-trained ResNet50 with trainable parameters on the last 3 convolutional layers and final connected layer (ResNet50L4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='00 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='98 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='96 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='99 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='94 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='92 Metric 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='97 Metric 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='90 Accuracy Accuracy COVID-19 Precision 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='96 COVID-19 Precision 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='88 COVID-19 Recall COViD-19 Recal 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='86 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='95 50 100 150 0 200 0 50 100 150 200 Shots Shots (a) ResNet18L1 (b) ResNet50L4Song et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net USPro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Figure 4: COVID-19 positive case examples correctly classified by COVID-Net USPro with high confidence: (a) an example of wrong decision factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (b) an example of a decision made based on the disease-related patterns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Section 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='1, the performance of COVID-Net USPro networks tailored to ultrasound images with re-trained parameters is improved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Accuracy of ResNet50L1 and ResNet50L4 exceeds 98% under 4-way 5-shot setting, while other work such as MetaCOVID [25], which also applied a few-shot approach, achieved 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='6% accuracy under 3-way, 10-shot setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Additionally, the sensitivity of COVID-Net USPro for COVID-19 cases are also higher than networks trained with other image modality data such as X-ray or CT, where they scored 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='5% in the best performing case [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='2 Clinical Validation and Network Explainability Analysis In addition to the intensive quantitative performance analysis, we clinically validated the network output to ensure that the network captures important patterns in the ultrasound images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' For this purpose, our contributing clinician (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=') reviewed a randomly selected set of images and reported his findings and observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Our contributing clinician (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=') is an Assistant Professor in the Department of Emergency Medicine and the ultrasound co-director for undergraduate medical students at McGill University.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' He is practicing Emergency Medicine full-time at Saint Mary’s Hospital in Montreal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Figure 4 presents two select ultrasound images of COVID-19 positive cases, annotated by Grad-CAM and GSInquire, as examples, that were reviewed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' As seen, the annotated images contain the lung pleura region at the top of the image, while the second example (Figure 4-b) also marks the bottom region with high importance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' B-lines, or the light comet-tail artifacts extending from pleura to the bottom of the image, and the presence of dark regions interspacing the B-lines at the bottom part of the image corresponding to signs of lung consolidation are indicators of abnormality [32].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Hence, the visual annotations for the second example (Figure 4-b) are more representative for disease-related patterns within the ultrasound image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Figure 4-a is one of the examples where the model considers the rib as a structure of interest, which is not the abnormality, leading to classify the images as a COVID-19 positive case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Hence, although the model correctly classified the image, the decision was made based on invalid clinical factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' We implement two strategies to solve the mentioned issues and improve classification explainability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' First, excluding images with low image quality, such as insufficient image depth or the lack of representative features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' A severity grade introduced by COVIDx-US dataset v1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='4, called lung ultrasound score (LUSS), rates each ultrasound video on a scale of 0 to 3, where 0 corresponds to presence of only normal features, and 3 corresponds to presence of severe disease artifacts [33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Therefore, in the first attempt to improve the network, images from videos with score of 0 for the normal case and images from videos with score of 2 and 3 for COVID-19 case are used to train a binary classification version 8 GSInquire Grad-CAM Original Image Annotated Image Annotated Image a) b)Song et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net USPro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Figure 5: Four cropped COVID-19 positive examples predicted correctly with high confidence by COVID-Net USPro (a-d), while recognizing disease artifacts, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', extended B-lines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' of the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' By observing the annotated images, network shows to focus more on the bottom regions of the images, though cases where network focus on the top pleura region are still present.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The second strategy to further improve model explainability is to exclude regions above the pleura (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', soft tissue) of the images, so that network focuses on the disease-defining features, present mostly at the bottom of the images below lung pleura.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Our experiments confirm the effectiveness of this strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Hence, combining the first and second strategy, a binary model with LUSS score filtered and cropped images is trained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Figure 5 shows examples from the cropped images analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' As suggested from the annotated examples and confirmed by our contributing clinician (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' ), clinically determining artifacts such as B-lines and lung consolidation are clearly identified in COVID-19 positive images by COVID-Net USPro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 9 Grad-CAM GSInquire Original Image Annotated Image Annotated Image a) b) c) d)Song et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net USPro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 4 Conclusions Deep neural network architectures have shown promising results in a wide range of tasks, including predictive and diagnostic tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' However, such networks require a massive amount of labelled data to train which is against the nature of new pandemics and novel diseases where there are no or very few data samples available, especially in the initial stages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' As part of the COVID-Net initiative and using a diverse complex benchmark dataset, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', COVIDx-US, in this work we introduce the COVID-Net USPro network, tailored to detect COVID-19 infection with high accuracy from very few ultrasound images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' The proposed deep prototypical network leverages pretrained models with tailored parameters on final layers to reduce computational complexity and achieve high classification performance when only 5 examples from each class are available for training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Accuracy, precision and recall for the best performing network are over 99%, which are comparable or outperforming other existing work [7, 27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' These properties are not only highly crucial for the control of the COVID-19 pandemic but also for screening patients in new diseases/pandemics for which the proposed network can be easily tuned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' We intensively assessed the explainability of the network and clinically validated its performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Experimental results demonstrate that COVID-Net USPro can not only achieve high performance in terms of accuracy, precision, and recall, but also shows predictive behaviour that is consistent with clinical interpretation, as validated by our contributing clinician (A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=').' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In addition, as part of the explainability-driven performance validation process, we proposed and implemented two strategies to further improve the network performance in accordance with the background clinical knowledge in identifying COVID-19 positive and negative cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Overall, we believe the simplicity and effectiveness of COVID-Net USPro makes it a promising tool to aid the COVID-19 screening process using ultrasound images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' We hope the open-source release of COVID-Net USPro help researchers and clinical data scientists to accelerate innovations in the combat against the COVID-19 pandemic that can ultimately benefit the larger society.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Several future research directions can be explored to further improve the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' First, some additional steps in data augmentation and preparation can be taken to improve data quality and dataset size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In this work, ultrasound images captured with linear probe are excluded due to differences in clinical interpretation of linear probe and convex probe captured images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' More image augmentation and preparation techniques can be experimented to include linear probe data and increase the data size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Second, in this work, we used simple cropping to filter out the pleura region of the images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' A more procedural image segmentation step could be added to include only clinically relevant areas of the images for network construction to further improve network performance from the explainability standpoint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Lastly, we used COVIDx-US which is a public dataset that includes data of various sources and quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Network training could be improved by only using high quality input ultrasound data, collected systematically, which contain clear representative image artifacts with sufficient/specific image depth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' For this purpose, a data collection protocol might be required to capture ultrasound images in a standardized manner from a set of consented participants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' References [1] Ashkan Ebadi, Pengcheng Xi, Alexander MacLean, Adrian Florea, Stéphane Tremblay, Sonny Kohli, and Alexander Wong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Covidx-us: An open-access benchmark dataset of ultrasound imaging data for ai-driven covid-19 analytics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Frontiers in Bioscience-Landmark, 27(7), 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [2] Marco Cascella, Michael Rajnik, Abdul Aleem, Scott C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Dulebohn, and Raffaela Di Napoli.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Features, evaluation, and treatment of coronavirus (covid-19), May 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [3] Dinnes Jacqueline, Deeks Jonathan J, Berhane Sarah, Taylor Melissa, Adriano Ada, Davenport Clare, Dittrich Sabine, Emperador Devy, Takwoingi Yemisi, Cunningham Jane, Beese Sophie, Domen Julie, Dretzke Janine, Ferrante di Ruffano Lavinia, Harris Isobel M, Price Malcolm J, Taylor-Phillips Sian, Hooft Lotty, Leeflang Mariska MG, McInnes Matthew DF, Spijker René, Van den Bruel Ann, and Cochrane COVID-19 Diagnostic Test Accuracy Group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Rapid, point-of-care antigen and molecular-based tests for diagnosis of sars-cov-2 infection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Cochrane Database of Systematic Reviews, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [4] Dandi Yang, Cristhian Martinez, Lara Visuña, Hardev Khandhar, Chintan Bhatt, and Jesus Carretero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Detection and analysis of covid-19 in medical images using deep learning techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Scientific Reports, 11, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [5] Linda Wang, Zhong Qiu Lin, and Alexander Wong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Covid-net: a tailored deep convolutional neural network design for detection of covid-19 cases from chest x-ray images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Scientific Reports, 10(1):19549, Nov 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [6] Hayden Gunraj, Ali Sabri, David Koff, and Alexander Wong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Covid-net ct-2: Enhanced deep neural networks for detection of covid-19 from chest ct images through bigger, more diverse learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Frontiers in Medicine, 8:729287, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [7] Alexander MacLean, Saad Abbasi, Ashkan Ebadi, Andy Zhao, Maya Pavlova, Hayden Gunraj, Pengcheng Xi, Sonny Kohli, and Alexander Wong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Covid-net us: A tailored, highly efficient, self-attention deep convolutional 10 Song et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net USPro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' neural network design for detection of covid-19 patient cases from point-of-care ultrasound imaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In FAIR- MICCAI’21, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [8] Hany Kasban.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' A comparative study of medical imaging techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' International Journal of Information Science and Intelligent System, 4:37–58, 03 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [9] Hayden Gunraj, Linda Wang, and Alexander Wong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Covidnet-ct: A tailored deep convolutional neural network design for detection of covid-19 cases from chest ct images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Frontiers in Medicine, 7:1025, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [10] Jake Snell, Kevin Swersky, and Richard Zemel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Prototypical networks for few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Guyon, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Luxburg, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Bengio, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Wallach, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Fergus, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Vishwanathan, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Garnett, editors, Advances in Neural Information Processing Systems, volume 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Curran Associates, Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=', 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [11] Gen Li, Varun Jampani, Laura Sevilla-Lara, Deqing Sun, Jonghyun Kim, and Joongkyu Kim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Adaptive prototype learning and allocation for few-shot segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8334–8343, June 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [12] Shengli Sun, Qingfeng Sun, Kevin Zhou, and Tengchao Lv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Hierarchical attention prototypical networks for few-shot text classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 476–485, Hong Kong, China, November 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Association for Computational Linguistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [13] Jessica Deuschel, Daniel Firmbach, Carol I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Geppert, Markus Eckstein, Arndt Hartmann, Volker Bruns, Petr Kuritcyn, Jakob Dexl, David Hartmann, Dominik Perrin, Thomas Wittenberg, and Michaela Benz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Multi-prototype few-shot learning in histopathology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops, pages 620–628, October 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [14] Ramprasaath R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Grad-cam: Visual explanations from deep networks via gradient-based localization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In 2017 IEEE International Conference on Computer Vision (ICCV), pages 618–626, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [15] Zhong Qiu Lin, Mohammad Javad Shafiee, Stanislav Bochkarev, Michael St.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Jules, Xiaoyu Wang, and Alexander Wong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Do explanations reflect decisions?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' A machine-centric strategy to quantify the performance of explainability algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' CoRR, abs/1910.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='07387, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [16] Alexander Wong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Covid-net open initiative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [17] Hossein Aboutalebi, Maya Pavlova, Hayden Gunraj, Mohammad Javad Shafiee, Ali Sabri, Amer Alaref, and Alexander Wong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Medusa: Multi-scale encoder-decoder self-attention deep neural network architecture for medical image analysis, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [18] Tulin Ozturk, Muhammed Talo, Eylul Azra Yildirim, Ulas Baran Baloglu, Ozal Yildirim, and U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Rajendra Acharya.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Automated detection of covid-19 cases using deep neural networks with x-ray images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Computers in Biology and Medicine, 121:103792, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [19] Parnian Afshar, Shahin Heidarian, Farnoosh Naderkhani, Anastasia Oikonomou, Konstantinos N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Plataniotis, and Arash Mohammadi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Covid-caps: A capsule network-based framework for identification of covid-19 cases from x-ray images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Pattern Recognition Letters, 138:638–643, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [20] Samritika Thakur and Aman Kumar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' X-ray and ct-scan-based automated detection and classification of covid-19 using convolutional neural networks (cnn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Biomedical Signal Processing and Control, 69:102920, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [21] Julia Diaz-Escobar, Nelson E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Ordóñez-Guillén, Salvador Villarreal-Reyes, Alejandro Galaviz-Mosqueda, Vitaly Kober, Raúl Rivera-Rodriguez, and Jose E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Lozano Rizk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Deep-learning based detection of covid-19 using lung ultrasound imagery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' PLOS ONE, 16(8), 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [22] Karen Simonyan and Andrew Zisserman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Very deep convolutional networks for large-scale image recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In International Conference on Learning Representations, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [23] Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Rethinking the inception architecture for computer vision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' CoRR, abs/1512.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='00567, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [24] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Deep residual learning for image recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' CoRR, abs/1512.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='03385, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [25] Mohammad Shorfuzzaman and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Shamim Hossain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Metacovid: A siamese neural network framework with contrastive loss for n-shot diagnosis of covid-19 patients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Pattern Recognition, 113:107700, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [26] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Ebadi, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Azimi, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Xi, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Tremblay, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Wong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Covid-net fewse: An open-source deep siamese convolu- tional network model for few-shot detection of covid-19 infection from x-ray images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Journal of Computational Vision and Imaging Systems, 7(1):16–18, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 11 Song et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' COVID-Net USPro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [27] Michael Karnes, Shehan Perera, Srikar Adhikari, and Alper Yilmaz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Adaptive few-shot learning poc ultrasound covid-19 diagnostic system, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [28] Wonseok Lee and Yongrae Roh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Ultrasonic transducers for medical diagnostic imaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Biomedical Engineering Letters, 7(2):91–97, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [29] Zeshan Hussain, Francisco Gimenez, Darvin Yi, and Daniel Rubin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Differential data augmentation techniques for medical imaging classification tasks, Apr 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [30] Julia Amann, Alessandro Blasimme, Effy Vayena, Dietmar Frey, and Vince I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Madai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Explainability for artificial intelligence in healthcare: A multidisciplinary perspective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' BMC Medical Informatics and Decision Making, 20(1), 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [31] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Imagenet: A large-scale hierarchical image database.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Ieee, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [32] Daniel A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Lichtenstein, Gilbert A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Mezière, Jean-François Lagoueyte, Philippe Biderman, Ivan Goldstein, and Agnès Gepner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' A-lines and b-lines: lung ultrasound as a bedside tool for predicting pulmonary artery occlusion pressure in the critically ill.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Chest, 136(4):1014–1020, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' [33] Ashkan Ebadi, Pengcheng Xi, Alexander MacLean, Stéphane Tremblay, Sonny Kohli, and Alexander Wong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' Covidx-us - an open-access benchmark dataset of ultrasound imaging data for ai-driven covid-19 analytics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' arXiv:2103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content='10003, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'} +page_content=' 12' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NAzT4oBgHgl3EQftv0k/content/2301.01679v1.pdf'}