diff --git "a/2tE2T4oBgHgl3EQfjAfh/content/tmp_files/load_file.txt" "b/2tE2T4oBgHgl3EQfjAfh/content/tmp_files/load_file.txt" new file mode 100644--- /dev/null +++ "b/2tE2T4oBgHgl3EQfjAfh/content/tmp_files/load_file.txt" @@ -0,0 +1,1160 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf,len=1159 +page_content='1 BiCurNet: Pre-Movement EEG based Neural Decoder for Biceps Curl Trajectory Estimation Manali Saini*, Anant Jain*, Lalan Kumar, Suriya Prakash Muthukrishnan, Shubhendu Bhasin and Sitikantha Roy Abstract—Kinematic parameter (KP) estimation from early electroencephalogram (EEG) signals is essential for positive augmentation using wearable robot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' However, work related to early estimation of KPs from surface EEG is sparse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In this work, a deep learning-based model, BiCurNet, is presented for early estimation of biceps curl using collected EEG signal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The model utilizes light-weight architecture with depth-wise separable convolution layers and customized attention module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The feasibility of early estimation of KPs is demonstrated using brain source imaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Computationally efficient EEG features in spherical and head harmonics domain is utilized for the first time for KP prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The best Pearson correlation coefficient (PCC) between estimated and actual trajectory of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='7 is achieved when combined EEG features (spatial and harmonics domain) in delta band is utilized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Robustness of the proposed network is demonstrated for subject-dependent and subject-independent training, using EEG signals with artifacts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Index Terms—Brain-computer interface, Electroencephalo- gram, Deep learning, Kinematic parameter estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' INTRODUCTION Brain-computer interface (BCI) is an integration of the measurement, decoding, and translation of the activity of central nervous system (CNS) into imitative output that rein- states, augments, or rehabilitates the natural CNS output [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This creates an interface between the CNS and its external environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' BCI-based systems are rapidly emerging on account of the recent advancements in signal processing and artificial intelligence [2], [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' These systems are useful in Manali Saini and Anant Jain have contributed equally to this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This work was supported in part by DRDO - JATC project with project number RP04191G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This work involved human subjects or animals in its research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Approval of all ethical and experimental procedures and protocols was granted by the Institute Ethics Committee, All India Institute of Medical Sciences, New Delhi, India with reference number IEC-751/07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2020,RP-06/2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Manali Saini is with the Department of Electrical Engineering, Indian Institute of Technology Delhi, New Delhi 110016, India (e-mail: manali- igit@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='com).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Anant Jain is with the Department of Electrical Engineering, Indian Institute of Technology Delhi, New Delhi 110016, India (e-mail: anant- jain@ee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='iitd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='in).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Lalan Kumar is with the Department of Electrical Engineering, Bharti School of Telecommunication, and Yardi School of Artificial Intelligence, Indian Institute of Technology Delhi, New Delhi 110016, India (e-mail: lkumar@ee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='iitd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='in).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Suriya Prakash Muthukrishnan is with the Department of Physiology, All India Institute of Medical Sciences, New Delhi - 110016, India(e-mail: dr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='suriyaprakash@aiims.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='edu).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Subhendu Bhasin is with the Department of Electrical Engineering, In- dian Institute of Technology Delhi, New Delhi 110016, India (e-mail: sb- hasin@ee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='iitd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='in).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Sitikantha Roy is with the Department of Applied Mechanics, In- dian Institute of Technology Delhi, New Delhi 110016, India (e-mail: sroy@am.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='iitd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='in).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' neuro-rehabilitation to assist users with motor-impairments [4]–[7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' For real-time operability of these systems, continuous signal decoding is required for extraction of kinematic param- eters (KPs) such as motion trajectory, velocity, acceleration, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [8]–[10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In view of these aspects, electroencephalogram (EEG)-based BCI systems have gained popularity in the recent years, owing to the non-invasiveness, low-cost, and excellent temporal resolution of EEG signals [11], [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Motivation and Related Work Literary works explore machine learning and deep learning- based paradigms for upper limb kinematic parameter esti- mation (KPE), movement intention detection and classifica- tion from low frequency components of EEG signals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' For instance, in [13], sparse multinomial logistic regression is utilized to classify EEG signals during reach intention and actual movement, based on multiple hand-crafted features extracted from EEG signals filtered in the range of 1 − 40 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In this work, independent component analysis (ICA) and dipole fitting are applied to remove movement artifacts from the recorded EEG signals, for obtaining low classification error rates [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Researchers in [14] have explored EEG current source dipole (CSD) data, using standardized low resolution brain electromagnetic tomography (sLORETA) to decode actual and imagined arm joint trajectories based on multiple linear regression (mLR).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The most useful time lags are observed to be between 80−150 ms prior to the movement, and the low β and γ bands are shown to be more effective in movement decoding with a correlation of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Similarly, mLR is utilized in [15] for estimating the 3D trajectories of arm movement with variable velocities using EEG segments filtered in the range of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1 − 40 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The researchers reported a high correlation between the movement velocities and EEG activity above the motor cortex in fronto-central and parietal areas [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' mLR is also utilized in [16] with α and β band powers of EEG signals during the motor planning and exe- cution phases to predict the upcoming peak tangential speed and acceleration of hand movement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This study demonstrates the prominence of occipital and parietal-occipital regions for α band, and frontal and frontal-central regions for the β band in movement planning and execution phases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In a recent study, researchers have explored the feasibility of a commercial EEG headset in motor decoding and classification with the use of Kalman filter and spatio-spectral features extracted from EEG signals [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' An overall correlation of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='58 is achieved in this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Besides mLR, sparse LR is investigated for predicting the circular trajectories of upper limb during movement of arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='03965v1 [eess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='SP] 10 Jan 2023 2 bottles with varying masses [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In this work, a wide range of EEG frequencies, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=', 0−150 Hz is used and channels over the motor cortex are shown to be more prominent towards the prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In [18], movement intent is decoded from movement related cortical potentials (MRCPs) using narrow- band EEG in the range of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1−1 Hz to train a support vector machine (SVM)-based classifier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The selection of a single- channel, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=', Cz in movement onset decoding with an accuracy of 91% using low frequency (0 − 5 Hz) Teager-Kaiser energy operator with threshold-based classification is demonstrated in [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Despite of the effectiveness of conventional machine learning-based paradigms in EEG-based movement decoding, there is a need of extracting the high-level features which can enhance the performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' To overcome this, researchers have proposed deep learning-based paradigms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' For example, convo- lutional neural network (CNN) is proposed with the use of pre- movement raw spatio-temporal multi-channel EEG for hand movement and force levels classification with an accuracy of 84% [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This work demonstrates early classification of hand movement, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=', in 100−1600 ms advance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' CNN is also utilized in [21] along with bidirectional long short term memory (Bi- LSTM)-based network to predict the velocities of arm reaching tasks using pre-processed EEG signals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' An overall correlation between 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='4 − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='6 is achieved in this work and feasibility of robotic arm control based on real-time EEG is demonstrated [21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Recently, deep learning-based three-dimensional (3D) hand movement trajectory during grasp and lift movements is estimated using a public EEG database in [10], [22], [23].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In [22], wavelet packet decomposition (WPD) based time- lagged EEG sub bands are used to train a CNN-LSTM network for prediction of the hand position/trajectory with a high correlation of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This work explores the source-aware EEG features and the demonstrates the relevance of low frequency bands (δ, θ, and α) in movement estimation, however, it has limited feasibility in real-time hardware implementation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Early estimation of this trajectory is demonstrated in [10] with a high correlation 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='79 using the δ band of EEG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Further, researchers in [23] demonstrate the feasibility of a brain-inspired spiking neural network (Bi-SNN) along with mid-frequency and high- frequency EEG bands, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=', α, β, and γ, toward the same trajectory estimation with a correlation of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Based on the aforementioned description of literary works, it can be asserted that many of these works focus on classification of upper limb movements, rather than predic- tion/estimation of the related kinematic parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Timely extraction of kinematic parameters from EEG data during upper limb movement is imperative towards different real-time exosuit control-based BCI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Further, few existing machine-learning based regression algorithms are able to esti- mate the KPs earlier w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' actual movement, however, average correlation is achieved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Although the existing deep learning- based networks outperform these ML-based paradigms, only few of them have explored early estimation of KPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Further, these networks use slightly complex architectures after pre- processing which may not be feasible on hand-held processors for real-time BCI systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Most importantly, the performance of the existing paradigms for KP estimation is highly subject- Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1: Experimental setup for biceps-curl task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' specific, which further adds to the complexity since the networks need to be trained for each subject.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Objective and Key Contributions In view of the aforementioned challenges of literary works, this work proposes a deep learning-based upper limb mo- tion trajectory prediction/estimation from preceding EEG, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=', BiCurNet, for early estimation towards exosuit control-based BCI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Further, the proposed network is demon- strated to be subject-independent and robust against artifacts, unlike the existing works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' To the best of our awareness, this is the first work which focuses on early estimation of kine- matic parameters from both subject-dependent and subject- independent EEG signals and further analyses the noise- robustness of the proposed network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The key contributions of this work are listed as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Low-complex deep learning-based architecture is pro- posed for early estimation of upper limb motion trajec- tory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In-house recording of multi-channel EEG signals during upper limb biceps curl experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Spherical harmonics and head-harmonics domain EEG features based motion trajectory estimation has been explored for the first time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Demonstration of subject-adaptability and noise- robustness of the proposed network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The rest of this paper is organized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Section II describes the experimental recording and data acquisition procedures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Section III presents the proposed methodology for BiCurNet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Section IV discusses the experimental evaluation results for the proposed work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Finally, section V concludes this work with major advantages, shortcomings, and future directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 3 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2: Block diagram depicting the proposed methodology for biceps-curl trajectory estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' EXPERIMENT AND DATA ACQUISITION The key objective of the study is to investigate the via- bility of using EEG signals for elbow joint angle decoding during biceps-curl motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' For this purpose, we designed a synchronous EEG and joint angle recording system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The description of the experimental paradigm and data acquisition are elucidated in the subsequent sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Subjects and Equipment The experiment was performed in the Multichannel Signal Processing Laboratory, Department of Electrical Engineering at Indian Institute of Technology Delhi, New Delhi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This research was authorized by the Institutional Review Board of All India Institute Of Medical Sciences, New Delhi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' EEG data and joint angle data were recorded from 5 healthy subjects (all males, age 29 ± 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='61, all right handed) while performing the biceps curl task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Each subject performed 300 trials of biceps curls task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' EEG data was recorded using 16-channel dry-active electrodes (actiCAP Xpress Twist, Brain Products, Gilching, Germany) with wireless EEG amplifier (LiveAmp- 16, Brain Products, Gilching, Germany).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The EEG sensors were arranged in 10-20 international system of EEG electrode placement, namely, Fp1, Fz, F3, C3, T7, Pz, P3, O1, Oz, O2, P4, Cz, C4, T8, F4 and Fp2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The EEG data was acquired with 500 Hz sampling frequency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A marker-based camera system (Noraxon NiNOX 125 Camera System) was placed for elbow joint angle measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The NiNOX 125 camera system was connected to Noraxon myoResearch platform (MR 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='16) for recording the biceps-curl hand motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The camera system was placed in sagittal plane 2 m away from the subject.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The elbow joint angle was calculated using myoResearch software in the post-processing step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The 3-point angle measurement tool was utilized to compute 2D joint angle by tracking reflective markers in the video recording.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The joint angle data was sampled with the sampling frequency of 125 Hz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The EEG and joint angle data was synchronized using Noraxon myoSync device.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Experimental Setup and Paradigm Concurrent EEG and motion data was collected from the users during biceps-curl task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' At the beginning of experiment, participants were in standing position with 2 Kg dumbbell holding in their right hand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A monitor was positioned 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='8 m away in front of them for showing the experimental paradigm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Participants were standing in balanced upright posture with dumbbell in their right hand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' We designed the experiment in PsychoPy [24] for instructing the participant for initiating the biceps-curl movement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Each trial begin with a cross appearing on the center screen along with a beep sound, indicating the start of the trial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' After a couple of seconds, a visual cue appeared on the screen to instruct the participant to initiate the biceps-curl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The biceps-curl was performed in the motion execution phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Each trial ended with resting phase of two seconds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Before the actual data acquisition, each participant performed a practice run for executing the task correctly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This practice run was not included for any consequent analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' We recorded 30 runs with 10 trials each for the biceps curl task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Inter-run rest was given to the participant for avoiding muscle fatigue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' PROPOSED METHODOLOGY This section elaborates the proposed methodology for early prediction of upper limb motion trajectory from EEG based on deep learning, as illustrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' It consists of three major modules: EEG recording,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' pre-processing and feature Raw EEG signals Pre-processing and 3 Dense andino (Channels 1 feature extraction EEG data Flatten layers,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 8 layer,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' N to 16) (Channels 1 layer units,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' units,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' DFT-based to Nc) DWSConv1Dlayer Conv1D layer Maxpool1D layer Enhance and suppress activation = activation = baseline wander 32 kernels,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='kernel 32kernels,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' kernel Pool size=2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' attention block Swish Linear size=5,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' stride=1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' size=5,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' stride=1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' noise removal activation-ReLu stride=2 activation=ReLu (Threshold: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 Hz).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' dwConv1D Dense + ReLu layer Common average referencing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' ICA-based artifact rejection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Amplitude normalization [v].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' DWT-based sub- bands extraction: [AA:"A:A:*A] DWT-Spherical harmonics (SH) (C3-Ks 1) × 32 C4 × 32 features: C4 C5 DWT-Head harmonics (H2) (N-Ks+1) × 32 (C,-Ks+1) × 32 (C,/2) × 32 C5 × 1 8×1 N×1 NxNC .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' features: Predicted c C2 C3 trajectory EEG recording4 extraction, and depth-wise separable convolutional neural net- work with a customized attention module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The modules are described in the subsequent sub-sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' EEG recording In this work, the EEG signals are acquired using LiveAmp 16 Brain Products system as described in the previous section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Prior to be used for the proposed BiCurNet, these signals are pre-processed as detailed in the ensuing sub-section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Pre-processing The recorded EEG signals are pre-processed in EEGLAB [25] and MATLAB for feature extraction prior to be fed to the proposed BiCurNet, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' After recording and re-sampling the EEG signals, low frequency (below 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 Hz) baseline wander noise (BWN) suppression is done using discrete Fourier transform (DFT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' For this purpose, the DFT coefficients corresponding to frequencies below 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5Hz are estimated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The computation of DFT coefficient index k is done as: k = ⌊(fqNd/fqs)⌋, where fq is the frequency in Hz, fqs is the sampling frequency, and Nd is the number of DFT points for computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' These DFT coefficients are thresholded to zero for suppression of the BWN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The EEG signal after BWN suppression is synthesized as the inverse of the DFT coefficient matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The mathematical interpretation of this procedure is described for a recorded EEG signal v[n] by the following DFT pair: DFT of recorded signal : V [k] = Nd−1 � n=0 v[n]e −jn2πk Nd (1) ˜vq[n] = 1 Nd Nd−1 � k=0 ˜Vq(k)e jn2πk Nd (2) where ˜ Xq denotes the DFT coefficient matrix after thresh- olding, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=', ˜ Xq(k) = [0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' , 0, Xq[k + 1], .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' , Xq[Nd − k − 1], 0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='., 0].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' All signals are normalized w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' amplitude to bring it in range: [−1, 1] as ˜ xq[n] max| ˜ xq[n]|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In this work, the recorded EEG signals are analyzed for the estimation of motion trajectory with and without artifact suppression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Independent component analysis (ICA) is utilized for artifact suppression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' It is used for estimating the sources corresponding to cerebral and non-cerebral activities resulting in the scalp EEG [26].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' EEGLAB is used in this work for ICA- based decomposition of the EEG signals obtained after BWN removal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The decomposed independent sources with more than 70% of artifactual components are rejected and the artifact-free EEG signal is reconstructed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Brain source imaging Brain source imaging (BSI) is performed to select the rel- evant pre-movement EEG segment prior to feature extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Numerical Boundary Element Method (BEM) based forward modeling is utilized for this purpose.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The head model utilizes ICBM MRI template [27] in OpenMEEG [28] toolbox.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The spatio-temporal dynamics of brain cortical sources are ob- tained using inverse modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In particular, standardized low- resolution electromagnetic tomography (sLORETA) [29] is utilized to solve the under-determined inverse problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Under the constraint of the smooth source distribution, standardized current density maps are utilized for localization inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Source localization plots for a right hand biceps-curl activity are illustrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The analysis shown corresponds to a single-trial of biceps-curl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The subject was instructed to focus the vision on fixation cross.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A visual cue for movement onset was presented at 0 ms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The subject executed biceps-curl activity 410 ms after the visual cue was given.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A constant activation may be observed in occipital lobe up to 60 ms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The information starts getting transferred to the left motor cortex thereafter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' All such pre-movement EEG [Fig 3(c)-(g)] has inbuilt motion trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' It may be noted that the left motor cortex region was acti- vated at 220-240 ms [Fig 3(e)] corresponding to the right-hand biceps-curl activity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Motor activity was observed thereafter up to 320 msec [Fig 3(e)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The subject executed biceps-curl activity at 400-450 ms after the visual cue was given.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' It may be concluded that the motor neural information corresponding to the biceps-curl activity is present approximately 250 ms prior to the motor execution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This information is utilized for selecting the time-lag window for elbow joint-angle trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The selected EEG data was utilized for the training and testing of the proposed neural decoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Feature extraction The pre-processed EEG signals are analyzed with different transform-domain techniques for significant feature extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This work explores time-frequency features using discrete wavelet transform (DWT) in: Spatial domain;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Spatio-temporal domain using spherical Fourier transform (SFT);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' and Spatio- temporal domain using head harmonics transform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1) Discrete wavelet transform-based features: Discrete wavelet transform is utilized to decompose the EEG signals into constituent sub-bands/rhythms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' It makes use of high-pass and low-pass filters for decomposing the signals into a pre- defined number of levels based on the sampling frequency [30].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' DWT of a single channel EEG signal v[n] is given by Vj,r = � n∈z v[n]ψ∗ j,r[n] (3) where ψj,r is the translated and scaled version of the mother wavelet ψ0,0, and defined as: ψj,r[n] = 2−(j/2)ψ0,0 � 2−j(n − r) � (4) The procedure for DWT-based decomposition follows a tree-like structure as demonstrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' At each decompo- sition level, the wavelet coefficients are down-sampled for re- moving the redundant information [31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In this work, since the sampling frequency used is 125 Hz, the decomposed sub bands are obtained as: delta (δ : 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 − 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='9 Hz), theta (θ : 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='9 − 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='8 Hz), alpha (α : 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='8 − 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='6 Hz), beta (β : 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='6 − 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 Hz), and gamma (γ :> 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 Hz), denoted by Vδ, Vθ, Vα, Vβ, and Vγ respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 5 (a) (b) (c) (d) (e) (f) (g) (h) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 3: Brain source localization using sLORETA at different time stamps : (a) 0ms (b) 60ms (c) 120ms (d) 180ms (e) 240ms (f) 300ms (g) 360ms (h) 420ms Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 4: Four-level DWT-based decomposition to obtain the approxi- mation and detail bands with frequency range at level j given by: � 0, 2−j−1 Fs � , and � 2−j−1 Fs, 2−j Fs � respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2) DWT-Spherical harmonics-based features: To extract the spatio-temporal features of the EEG signal and the correspond- ing DWT-based sub bands obtained above, spherical Fourier transform (SFT) is explored in this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Since the human head is assumed to be spherical in shape [32], spherical Fourier basis functions have been widely employed in literary works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The decomposition of a multi-channel EEG signal V in SFD is obtained as: VSH lm = � Ω V (Ω, n) [Y m n (Ω)] dΩ (5) where V (Ω, n) denotes the potential at (Ω) = (r, θ, φ) on the scalp at time instant n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Here, r represents the radius of head, θ denotes the angle of elevation measured in downward direction from positive Z-axis (θ ∈ [0, π]), and φ denotes the azimuth angle measured in anticlockwise direction from positive X-axis, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The real-valued Y m l (Ω) of lth order and mth degree constitutes an orthonormal set of basis function, defined over spherical array.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' For a finite order Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 5: Total potential at a channel is a contribution of each active equivalent dipole .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' system, l ∈ [0, L], and m ∈ [−l, l].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Therefore, (L+1)2 distinct spherical harmonics are obtained in total.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Since the number of sampling points S in spatial domain should be atleast (L+1)2, the highest limit of L is ≤ � (S) − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In this work, since 16 electrodes are used for recording, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=', S = 16, the limit of L is 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Therefore, L = 2 is used here and total 9 distinct spherical harmonics are obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The corresponding features are stored in V SH nm with a dimension of 9 × N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Each EEG sub band is also decomposed using spherical Fourier basis functions, and the corresponding features are obtained as V SH δlm , V SH θlm , V SH αlm, V SH βlm , and V SH γlm .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 3) DWT-Head harmonics-based features: More recently, head harmonics (H2) basis functions have been proposed for more adequate representation of EEG signals based on the geometry of human head [32].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Since the EEG sensors placed on head form a shape between a sphere and a hemisphere, H2 basis functions are shown to be more efficient for representing x[n] sampled at F, = 125 Hz Approximation 1 Detail 1 (0-31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='25 Hz) (31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='25-62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 Hz) V Approximation 2 Detail 2 (0-15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='625 Hz) (15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='625-31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='25 Hz) Vp Approximation 3 Detail 3 (0-7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='8125 Hz) (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='8125-15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='625 Hz) Va Approximation 4 Detail 4 (0-3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='9 Hz) (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='9-7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='8125 Hz) Vs Ve+Y0 10-86 the data sampled over head.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The decomposition of an EEG signal matrix V in H2 domain is given as: VH2 lm = � Ω V (Ω, n) [Hm l (Ω)] dΩ ≈ S � w=1 zwV (Ωw, n) [Hm l (Ωw)] (6) where, zw denotes the sampling weight and Ωw = (θw, φw) is the location of channel w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Here, the angle of elevation θ is in the range [0, 2π/3], as per the head geometry shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The real-valued Hm l (Ω) of lth order and mth degree constitutes an orthonormal set of basis function defined over human head .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 6: Geometry of human head with the parameters: Perimeter=40cm, radius=10cm [32].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The corresponding features are stored in V H2 lm with a di- mension of 9 × N, similar to that obtained in SFT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Each EEG sub-band is also decomposed using H2 basis functions, and the corresponding features are obtained as V H2 δlm , V H2 θlm, V H2 αlm, V H2 βlm, and V H2 γlm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Proposed BiCurNet After pre-processing and feature extraction, the EEG data is given as input to the proposed BiCurNet model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The proposed deep learning model is illustrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The constituent layers in the proposed model include a depth-wise separable one-dimensional convolution layer (DWSConv1D), a conv1D layer, a maxpooling (maxpool1D) layer, a customized attention module, a flatten layer, three dense layers, and an output layer for regression/prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Depth-wise separable convolution layer (DWSConv1D): The first layer of the network is a conv1D layer which performs a depth-wise separable convolution of the re- ceived input data with the kernels/filters used in this layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' It receives the input EEG data in the form of N × Nc matrix as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Here N denotes the number of samples in the data, and Nc is the number of channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The convolution operation is split into two parts in this layer as depth-wise and point-wise [33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Depth- wise convolution is performed with each channel sepa- rately, and point-wise convolution is performed as 1 × 1 convolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' It is a computationally efficient operation w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' the standard convolution layer, making it suitable for lightweight scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Convolution of a filter f[n] with an input v[n] is written as: v[n] ∗ f[n] = ks−1 � i=0 v[i] · f[n − i] (7) where, ‘∗’ represents the convolution operation and ks denotes the filter width.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In this layer, 32 filters are used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Each filter has a width ks of 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In general, the zth convolution output, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='e, feature map of layer lr is given as [34]: clr z = σ � �bilr z + � j clr−1 j × f lr zj � � (8) where, clr z is the zth feature in the lrth layer;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' clr−1 j is the jth feature in the corresponding preceding layer;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' f lr zj represents the filter which links feature z to feature j, bilr z represents the corresponding bias vector and σ denotes the activation function, which is rectified linear unit (ReLu) in this layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' It is defined as: σ(t) = max(0, t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A stride of one is used in this layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The ’He’ uniform initialization is used for kernel weights and zero initializa- tion is used for bias vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' All these parameters produce an output dimension of C1: (N − ks + 1) × 32 as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' L2 regularization with a factor of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='001 is also used in this layer to reduce over-fitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Conv1D layer: The second layer is a conventional con- volution layer, which operates on all input channels at a time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This layer uses the same parameters as described in the previous layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The corresponding output dimension of this layer is given as (C1 − ks + 1) × 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Max pooling layer (Maxpool1D): The convolution layer output is reduced in dimensionality by using a max pooling 1D layer, which retains the highest value of the feature in a segment with a pool size [35].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This layer helps in low-level feature extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The corresponding process can be interpreted as [34]: chx mx = max ∀b∈arm chx−1 b (9) where, arm denotes the pool area with index m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In this work, a pool size and a stride of 2 is selected, which results in the dimension of the output as (C1 − ks + 1)/2 × 32, shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Customized attention module (CAM): The feature maps of the previous layer are further transformed to intensify the more relevant features and restrain the less relevant features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A CAM is utilized for this purpose, which uses a dense layer with 32 units and a multiply layer as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This module works on the attention phenomenon, which enhances the relevant features and diminishes the less significant features [33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' An element- wise multiplication operation is performed between the outputs of the dense layer and the maxpool1D layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This produces higher values of product where both maxpool1D and dense layer outputs are high, thereby enhancing the more intense features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Similarly, the less significant features are further restrained due to low values of the product where both the layer outputs are low.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The input dimension of the dense layer is (C3) × 32, and a dot product operation between a 32 × 32 weight vector of the dense layer and its input results in the same output dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' + Z (Superior) + Z (Superior) 40 cm +Y fx x 10 cm Posterior Anterior Right Left Z (Inferior) Z (Inferior)7 TABLE I: Training hyper-parameters (After hypertuning).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Nc Nk Dr ks sr lr Bt ec 3 32 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='40 5 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='001 15 100 Ncl: Number of convolution layers, Nk: Number of kernels/filters, Dr: Dropout rate, ks: Kernel width, sr: Stride/shift, lr: Learning rate, Bt: Batch size, ec: Number of training epochs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Flatten layer: This layer transforms the output of CAM which is C3 × 32 to a 1D vector with dimension C4 × 1, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A dropout with a factor of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='4 is used after this layer to prevent the model from over-fitting [36].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Dense layers: Three dense layers with 8 units each are used after the flatten layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In this work, swish activation function is used in these layers, interpreted as: f(x) = x .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' swish(x) (10) Output layer: The final layer is a dense layer for re- gression, that maps the output of flatten layer to the predicted trajectory with dimension N × 1, as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Dense layer implements the element-wise dot product between the input and the kernel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Linear activation function is used in this layer, given by: f(x) = x (11) The aforementioned layers and hyper-parameters are used to create the proposed network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' For training, 80% of EEG signals with different durations/window lengths are taken from the recorded database.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The rest 20% of the data is divided into 10% test and 10% validation data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The information about optimal training hyper-parameter selection and their values is provided in the next section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The proposed network is built using Keras deep learning framework with TensorFlow version 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1 as backend in Python.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In this work, data augmentation is utilized to increase the number of training examples in the data to avoid over-fitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' It makes the proposed network more robust by creating new and different training examples by which it can learn the alterations in the real world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' For this purpose, random flipping and rolling operations are used in Python Keras framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' RESULTS AND DISCUSSION In this Section, the performance evaluation of the proposed BiCurNet on the recorded EEG signals is presented w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' different parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Elaborated interpretations of the results are also presented for the proposed network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Hyper-parameters for training BiCurNet Various parameters used for training the proposed network are presented herein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' For assessing the regression/prediction performance of the proposed network, 10% of the EEG signals from the recorded database are used for testing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The data from each subject is used for training, testing, and validation, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=', subject-dependent training is performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The network is trained using a batch size of 15, epochs as 100, and Adam optimizer with a learning rate as 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' To curtail the statistical unreliability in computation of test loss due to small database, ten-fold cross validation is employed for performance evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Mean square error (MSE) is used as the loss function for regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Table I presents the training hyper-parameters which are selected using the KerasTuner framework in Python.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' It is an optimization framework for tuning the hyper-parameters that uses search-and-selection- based criteria.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The final corresponding selected set of optimal hyper-parameters is listed in the table.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Regression metric In this work, time lagged and windowed EEG signals are used to estimate the motion trajectory in advance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In particular, the EEG data preceding the motion by different time lags (8-240 ms) is used to train, test, and validate the proposed network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Additionally, the performance is evaluated with varying EEG window sizes (320-1600 ms).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A 95% overlap between adjacent windows is considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Pearson correlation coefficient (PCC) is utilized for analysing the performance of the proposed network w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' upper limb motion trajectory estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' PCC between true/measured (A) and predicted/estimated (P) trajectory signal with N samples is given as Π(A, P) = 1 N − 1 N � i=1 �Ai − mA σA � �Pi − mP σP � (12) where m is the mean and σ denotes standard deviation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The normalized covariance measure assumes a value between -1 and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Subject dependent PCC analysis The proposed model is trained and tested for each subject separately, for subject-dependent (SD) performance analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The PCC values averaged across all the trials and subjects, are presented in Table II with varying time lags, window sizes, and EEG features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The EEG bands are considered in spatial (V ), spherical harmonics (Vδnm), and head harmonics domains (V H2 δnm).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' It may be noted that the transformed domain (Vδnm and V H2 δnm) features gives PCC similar to spatial domain coun- terparts with reduced computational cost, as detailed in Section III-D2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Additionally, δ band gives higher PCC values while γ band has the lowest PCC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This indicates the pertinence of low- frequency δ band for motion trajectory decoding using EEG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The best correlation is observed when Vδ, V SH δnm, and V H2 δnm are combined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The highest correlation achieved is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='7 with 240 ms advanced EEG window of 1600 ms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This demonstrates the feasibility of early estimation of the motion trajectory by using the proposed network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Subject-independent performance analysis To further explore the adaptability of the proposed network, subject-independent (SI) analysis is presented herein using leave-one-out scheme.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Simultaneous comparison of SI/SD case on PCC is presented in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The PCC values are averaged over all subjects and lags.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A slight decrease in PCC value may be noted in the SI case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' However, it remains within ±0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='05 which indicates the robustness of the proposed network against the subject-variability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 8 TABLE II: Pearson correlation coefficient (PCC) for different EEG segments and lags of data (Mean over subjects).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' EEG Features 8 ms 40 ms 80 ms 160 ms 240 ms 8 ms 40 ms 80 ms 160 ms 240 ms 8 ms 40 ms 80 ms 160 ms 240 ms 8 ms 40 ms 80 ms 160 ms 240 ms V 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='43 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='56 Vδ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='49 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='66 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='67 Vθ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='24 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='23 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='23 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='24 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='56 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='57 Vα 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='21 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='37 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='53 Vβ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='32 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='32 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='32 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='32 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 Vγ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='27 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='27 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='27 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='27 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 V SH nm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='54 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 V SH δnm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='47 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='47 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='47 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='61 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='66 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='66 V SH θnm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='23 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='23 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='37 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='37 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='37 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='37 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='56 V SH αnm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='19 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='51 V SH βnm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 V SH γnm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='31 V H2 nm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='26 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='42 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='43 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='57 V H2 δnm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='35 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='41 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='48 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='49 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='62 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='62 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='62 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='62 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='65 V H2 θnm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='24 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='24 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='23 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='23 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='53 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='53 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='53 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='53 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='55 V H2 αnm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='38 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='51 V H2 βnm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='18 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='28 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='29 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='33 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='34 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='39 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='4 V H2 γnm 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='17 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='19 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='19 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='19 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='19 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='19 Vcom 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='36 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='37 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='43 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='43 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='51 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='52 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='67 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='67 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='67 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='68 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='70 ■: 320 ms window, ■: 800 ms window, ■: 1200 ms window , ■: 1600 ms window;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Note: Vcom : [Vδ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' V SH δnm;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' V H2 δnm] Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 7: Average PCC values w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' subject dependent (SD) and subject-independent (SI) training of the proposed network at different window sizes (320 ms to 1600 ms).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Robustness analysis The robustness of the proposed network is analyzed herein using artifactual EEG signals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' In particular, the pre-processing did not include ICA decomposition-based artifact removal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The proposed network is trained and tested using such signals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Mean PCC values obtained using without artifact (WOA) and with artifact (WA) EEG signal are presented in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A small decrease of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='06 in the PCC values may be observed with artifact case that indicates the robustness of the proposed model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Trajectory estimation curves The proposed BiCurNet model is additionally evaluated herein using actual motion trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 9 illustrates the estimated and actual trajectories for subject I with window size varying between 800-1600 ms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 95% overlap is considered Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 8: Subject dependent average PCC values utilizing with and without artifactual EEG data for different window sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' between two adjacent windows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' It may be observed from the figure that there is a considerable improvement in correlation when window size is increased.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' This results in trajectory closer to the ground truth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Ability of the proposed network to follow the trajectory pattern for all windows indicates the learning capability of the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' CONCLUSION A deep learning-based paradigm for early estimation of upper limb motion trajectory using EEG signal is proposed in this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The EEG is collected while performing biceps curl movement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The proposed BiCurNet model is built using a light-weight architecture with depth-wise separable con- volution layers and customized attention module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The input features to the model are taken in computationally more efficient spherical and head harmonics domain in addition to spatio-temporal data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The extensive performance evaluation 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 PCC 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='3 SD SI Meanoversubjects0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='6 IWOA IWA 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='4 CC P 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='1 0 320 ms 800ms 1200ms 1600ms Window size9 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 9: Actual and predicted trajectories of subject 1 (Early prediction, before 40 ms).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' of the proposed network on in-house recorded EEG signals demonstrates its effectiveness in early estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Performance evaluation includes subject (in)dependent study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' the noise awareness of the proposed network is also demonstrated by using the artifactual EEG signals for training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Robustness of the proposed network is demonstrated by using the artifactual EEG signals for training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' The proposed network being com- putationally efficient, and noise-aware, makes it suitable for use in real-time BCI applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Real-time implementation of the proposed network for an exosuit control is currently being explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' ACKNOWLEDGMENT This research work was supported in part by DRDO - JATC project with project number RP04191G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' REFERENCES [1] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Wolpaw and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Wolpaw, “Brain-computer interfaces: something new under the sun,” Brain-computer interfaces: principles and practice, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 14, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [2] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Gong, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Xing, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Cichocki, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Li, “Deep learning in EEG: Advance of the last ten-year critical period,” IEEE Transactions on Cognitive and Developmental Systems, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [3] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Aggarwal and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Chugh, “Review of machine learning techniques for EEG based brain computer interface,” Archives of Computational Methods in Engineering, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1–20, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [4] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Chowdhury, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Raza, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Meena, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Dutta, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Prasad, “On- line covariate shift detection-based adaptive brain-computer interface to trigger hand exoskeleton feedback for neuro-rehabilitation,” IEEE Transactions on Cognitive and Developmental Systems, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 10, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1070–1080, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [5] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Raza, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Chowdhury, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Bhattacharyya, “Deep learning based prediction of EEG motor imagery of stroke patients’ for neuro- rehabilitation application,” in 2020 International Joint Conference on Neural Networks (IJCNN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' IEEE, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1–8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [6] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Di Lillo, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Arrichiello, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Di Vito, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Antonelli, “BCI-controlled assistive manipulator: developed architecture and experimental results,” IEEE Transactions on Cognitive and Developmental Systems, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 13, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 91–104, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [7] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Li, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Wang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Miao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Yue, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Tang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Su, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Zheng, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Wu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Wang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=', “Sensorimotor Rhythm-Brain Computer Inter- face With Audio-Cue, Motor Observation and Multisensory Feedback for Upper-Limb Stroke Rehabilitation: A Controlled Study,” Frontiers in Neuroscience, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 16, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [8] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Sosnik and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Zur, “Reconstruction of hand, elbow and shoulder actual and imagined trajectories in 3d space using eeg slow cortical potentials,” Journal of neural engineering, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 17, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 016065, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [9] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Robinson, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Chester, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Smitha, “Use of mobile EEG in decoding hand movement speed and position,” IEEE Transactions on Human-Machine Systems, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 51, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 120–129, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [10] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Jain and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Kumar, “PreMovNet: Premovement EEG-Based Hand Kinematics Estimation for Grasp-and-Lift Task,” IEEE Sensors Letters, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 6, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 7, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1–4, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [11] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Saini, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Satija, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Upadhayay, “One-dimensional convo- lutional neural network architecture for classification of mental tasks from electroencephalogram,” Biomedical Signal Processing and Control, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 74, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 103494, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [12] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Jain and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Kumar, “Subject-Independent 3D Hand Kinematics Reconstruction using Pre-Movement EEG Signals for Grasp And Lift Task,” arXiv preprint arXiv:2209.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='01932, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [13] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Hammon, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Makeig, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Poizner, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Todorov, and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' De Sa, “Predicting reaching targets from human EEG,” IEEE Signal Processing Magazine, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 25, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 69–77, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [14] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Sosnik and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Zheng, “Reconstruction of hand, elbow and shoulder actual and imagined trajectories in 3D space using EEG current source dipoles,” Journal of Neural Engineering, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 18, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 5, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 056011, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [15] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Kim, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Bießmann, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Lee, “Decoding three-dimensional trajectory of executed and imagined arm movements from electroen- cephalogram signals,” IEEE Transactions on Neural Systems and Reha- bilitation Engineering, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 23, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 867–876, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [16] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Yang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Leung, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Plank, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Snider, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Poizner, “EEG activity during movement planning encodes upcoming peak speed and acceler- ation and improves the accuracy in predicting hand kinematics,” IEEE journal of biomedical and health informatics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 19, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 22–28, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [17] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Nakanishi, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Yanagisawa, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Shin, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Kambara, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Yoshimura, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Tanaka, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Fukuma, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Kishima, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Hirata, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Koike, “Mapping ECoG channel contributions to trajectory and muscle activity prediction in human sensorimotor cortex,” Scientific reports, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 7, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1–13, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [18] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Bhagat, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Venkatakrishnan, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Abibullaev, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Artz, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Yoz- batiran, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Blank, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' French, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Karmonik, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Grossman, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' O’Malley et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=', “Design and optimization of an EEG-based brain ma- chine interface (BMI) to an upper-limb exoskeleton for stroke survivors,” Frontiers in neuroscience, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 10, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 122, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [19] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Mahmoodi, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Makkiabadi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Mahmoudi, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Sanei, “A new method for accurate detection of movement intention from single channel EEG for online BCI,” Computer Methods and Programs in Biomedicine Update, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 100027, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [20] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Gatti, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Atum, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Schiaffino, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Jochumsen, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Biurrun Manresa, “Decoding kinetic features of hand motor preparation from single- trial EEG using convolutional neural networks,” European Journal of Neuroscience, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 53, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 556–570, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [21] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Jeong, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Shim, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='-J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Kim, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Lee, “Brain-controlled robotic arm system based on multi-directional CNN-BiLSTM network using EEG signals,” IEEE Transactions on Neural Systems and Reha- bilitation Engineering, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 28, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1226–1238, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [22] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Pancholi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Giri, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Jain, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Kumar, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Roy, “Source aware deep learning framework for hand kinematic reconstruction using EEG signal,” IEEE Transactions on Cybernetics, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [23] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Kumarasinghe, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Kasabov, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Taylor, “Brain-inspired spiking neural networks for decoding and understanding muscle activity and kinematics from electroencephalography signals during hand move- ments,” Scientific reports, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 11, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1–15, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [24] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Peirce, “PsychoPy—psychophysics software in Python,” Journal of neuroscience methods, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 162, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1-2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 8–13, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [25] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Delorme and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Makeig, “EEGLAB: an open source toolbox for analysis of single-trial EEG dynamics including independent component analysis,” Journal of neuroscience methods, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 134, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 9–21, 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Window size: 800 ms Window size: 1200 ms Window size: 1600 ms True Predicted 10 10 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='8 Angle 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='4 PCC 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='4 PCC 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='4 PCC 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='52 EO 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='2 250 500 75010001250150017502000 0 250500 75010001250150017502000 75010001250150017502000 Samples Samples Samples10 [26] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Maddirala and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Veluvolu, “ICA with CWT and k-means for Eye-blink Artifact Removal from Fewer Channel EEG,” IEEE Transactions on Neural Systems and Rehabilitation Engineering, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [27] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Mazziotta, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Toga, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Evans, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Fox, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Lancaster, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Zilles, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Woods, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Paus, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Simpson, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Pike et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=', “A probabilistic atlas and reference system for the human brain: International Consortium for Brain Mapping (ICBM),” Philosophical Transactions of the Royal Society of London.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Series B: Biological Sciences, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 356, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1412, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1293–1322, 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [28] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Gramfort, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Papadopoulo, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Olivi, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Clerc, “OpenMEEG: opensource software for quasistatic bioelectromagnetics,” Biomedical engineering online, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 9, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1–20, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [29] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Pascual-Marqui et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=', “Standardized low-resolution brain elec- tromagnetic tomography (sLORETA): technical details,” Methods Find Exp Clin Pharmacol, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 24, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Suppl D, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 5–12, 2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [30] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Saini, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Satija, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Upadhayay, “Discriminatory features based on wavelet energy for effective analysis of electroencephalogram during mental tasks,” Circuits, Systems, and Signal Processing, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1–29, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [31] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Mallat, “A theory for multiresolution signal decomposition: the wavelet representation,” IEEE transactions on pattern analysis and machine intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 11, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 7, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 674–693, 1989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [32] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Giri, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Kumar, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Gandhi, “Brain source localization in head harmonics domain,” IEEE Transactions on Instrumentation and Measurement, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 70, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1–10, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [33] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Woo, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Park, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Lee, and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Kweon, “Cbam: Convolutional block attention module,” in Proceedings of the European conference on computer vision (ECCV), 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 3–19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [34] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' P´erez-Enciso and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Zingaretti, “A guide on deep learning for complex trait genomic prediction,” Genes, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 10, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 7, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 553, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [35] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Jiao, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Gao, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Wang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Li, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Xu, “Deep convolutional neural networks for mental load classification based on EEG data,” Pattern Recognition, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 76, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 582–595, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' [36] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Wu and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' Gu, “Towards dropout training for convolutional neural networks,” Neural Networks, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 71, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'} +page_content=' 1–10, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/2tE2T4oBgHgl3EQfjAfh/content/2301.03965v1.pdf'}