diff --git "a/1dAyT4oBgHgl3EQfofiA/content/tmp_files/load_file.txt" "b/1dAyT4oBgHgl3EQfofiA/content/tmp_files/load_file.txt" new file mode 100644--- /dev/null +++ "b/1dAyT4oBgHgl3EQfofiA/content/tmp_files/load_file.txt" @@ -0,0 +1,630 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf,len=629 +page_content='EMOGATOR: A NEW OPEN SOURCE VOCAL BURST DATASET WITH BASELINE MACHINE LEARNING CLASSIFICATION METHODOLOGIES Fred W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Buhl University of Florida fredbuhl@ufl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='edu January 3, 2023 ABSTRACT Vocal Bursts – short, non-speech vocalizations that convey emotions, such as laughter, cries, sighs, moans, and groans – are an often-overlooked aspect of speech emotion recognition, but an important aspect of human vocal communication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' One barrier to study of these interesting vocalizations is a lack of large datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' I am pleased to introduce the EmoGator dataset, which consists of 32,040 samples from 365 speakers, 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='91 hours of audio;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' each sample classified into one of 30 distinct emotion categories by the speaker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Several different approaches to construct classifiers to identify emotion categories will be discussed, and directions for future research will be suggested.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Data set is available for download from https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='com/fredbuhl/EmoGator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Keywords speech emotion recognition;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' vocal bursts;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' affect bursts;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' nonverbal vocalizations;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' affective computing;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' machine learning;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' dataset 1 Introduction Emotions are central to human experience—they motivate & inform much of what we do.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Recognizing emotions in others has been a longstanding area of interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Perhaps the first scientific study of emotion recognition was the work of Duchenne [1] in 1862, who collected photographs of facial expressions elicited via electrically stimulating facial muscles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The question of how many emotions there are remains open.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Duchenne identified 13 primary emotions, and 60 combinations, from facial expression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' A recent study by Cowen & Keltner found that humans were able to reliably identify 28 distinct emotions from facial expression [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Another recent study by the same team [3] indicated that humans self-report as many 27 distinct emotions;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' these responses were collected from subjects reacting to short video clips.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The emotion categories presented as gradients, which occasionally overlapped with other emotion categories;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' multiple emotions were elicited to varying degrees by a given stimulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Humans often express emotion vocally by varying speech prosody—the audio characteristics of speech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' One study [4] found that 12 distinct emotions could be recognized from speech prosody—and this across two cultures—a previous study [5] had found cross-cultural emotion recognition with subjects across five nations, although an in-group advantage was noted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Humans also express emotion via brief, non-speech sounds called vocal bursts, also referred to as "affect bursts", "emo- tional vocalizations", or "nonverbal vocalizations"–sounds like laughter, cries, sighs, moans, and groans—vocalizations that are not speech, and likely predate it, evolutionarily speaking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In [6] humans were found to be able to distinguish 14 emotional states from these vocal bursts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' And a recent paper [7] by Cowen, Keltner, and others showed the ability to distinguish 24 emotional states from these brief vocalizations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The ability to detect and express emotion via human vocalization appears early in human development [8, 9, 10, 11, 12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' It is important to language and social development;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' people who have difficulties in discerning emotions in others, due arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='00508v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='SD] 2 Jan 2023 A PREPRINT - JANUARY 3, 2023 to brain injury, or conditions like Autism Spectrum Disorder, experience difficulties communicating effectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' People with auditory affective agnosia [13] cannot discern emotional cues in speech, though they can still understand words, while people afflicted with dysprosody [14] speak in a monotone, without intonation or emotional affect;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' this can also appear in people with Parkinson’s disease [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Any impairment of these abilities has a severe effect on communication and socialization with others, underlining the importance of evoking and understanding emotional expression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='1 The Problem at Hand Interactions with computers via speech recognition is now commonplace via “smart speakers” and their associated virtual assistants such as Siri, Alexa, and Google Assistant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Currently, none of these systems are capable of detecting emotion from the speech audio signal;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' the signal is converted to text (sometimes with comic results) via speech-to-text deep learning models, but any emotional content present in the speech’s prosody is ignored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' For some applications, where how a word is said may be as important (or more important) than what word was said, this could be a severe limitation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' And, given their non-speech nature, vocal bursts are completely ignored by these systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Computers capable of emotion recognition from speech have numerous applications;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' more life-like responses from non- player characters in video games, for example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In early childhood education, awareness of the young user’s emotional state would be helpful to gauge interest, frustration, or boredom;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' they could also be used to assess and improve the child’s emotional intelligence (or "EQ") [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The ability to detect emotion could detect signs of loneliness, agitation, or depression [17], a special concern for isolated people, such as aging-in-place seniors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Social Robots—robots designed to interact closely with humans—benefit from emotion recognition [18];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' such systems can even be used to gauge the robot’s appeal to its human users [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The argument has been made that we will never claim human level performance in speech recognition until we can achieve human-level speech emotion recognition, since humans are capable of both [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' (It should be noted that this area is just one aspect of the larger field of Affective Computing pioneered by Rosalind Picard [21], which involve not only emotion recognition, but also emotional expression, and emotionally-aware decision making.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=') Despite the limitations of current commercial products, Speech Emotion Recognition (SER) is an area of longstanding interest in computer science [22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In 1996, Cowie et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [23] developed a technique of automatically detecting landmarks in a speech signal and collect summary statistics, which were then used to quantify speech characteristics for four emotion categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Various approaches have been used in speech emotion recognition over the years [24]—Mel- Frequency Cepstrum Coefficients (MFCC), Gaussian Mixture Models (GMM), Support Vector Machines (SVM), Hidden Markov Models (HMM), and neural network techniques such as LSTM [25] and, more recently, deep learning neural networks have been used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The research described here examines the largely-neglected area of vocal bursts, enabled by a newly-collected dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' A number of machine learning techniques will be explored, with varying levels of performance, along with suggested directions for future research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The primary inspiration for this work was [7];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' the vocal burst dataset, which the authors graciously provide to other researchers, was the largest vocal burst dataset available when released.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' That dataset consisted of 2,032 vocal burst samples with 30 emotion categories;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' as mentioned, humans were able to reliably distinguish 24 categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The fundamental question at the basis of this current work: if humans can distinguish 24 emotion categories from vocal bursts, can machines do so as well?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' While the Cowen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' dataset was the largest available at the time, it was still relatively small, and the categories were not evenly represented;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' most machine learning approaches benefit greatly from larger numbers of samples, and balanced categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' This author determined that a larger dataset would need to be collected, and several different approaches evaluated, to find the best-performing emotion classifier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 2 The dataset, and a spectrum of deep learning and other methodologies for classification 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='1 The Dataset The EmoGator dataset consists of 32,130 vocal bursts, produced by 357 speakers, providing 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='9654 hours of audio;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' average sample length is 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='901 seconds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Each speaker recorded three samples for each of 30 emotion categories, providing 90 samples per speaker–this provided for an equal number of samples for each category, and for each speaker, assuring equal representation in the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The emotion categories were the same 30 categories used in [7]: Adoration, Amusement, Anger, Awe, Confusion, Contempt, Contentment, Desire, Disappointment, Disgust, Distress, Ecstasy, Elation, Embarrassment, Fear, Guilt, Interest, Neutral, Pain, Pride, Realization, Relief, Romantic Love, Sadness, Serenity, Shame, Surprise (Negative) Surprise (Positive), Sympathy, and Triumph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The speakers were provided text 2 A PREPRINT - JANUARY 3, 2023 prompts with scenarios to help elicit the emotional response;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' the prompts used were a modified and expanded version used by [7], and listed in the online supplemental materials1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Data was collected from unpaid volunteers, and also crowd-sourced workers via Mechanical Turk;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' a website was created where speakers could record and play back their samples using their own computer or mobile device.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The audio files were originally recorded at 44100 or 48000 Hz, depending on the participant’s hardware, and stored as mp3 files.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Each individual recording file is named with a six-digit non-sequential user id, a two-digit emotion ID (1-30), and a single-digit recording number (1,2,3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Since the files are labeled by user ID, researchers can break any train, test, or validation set by speaker, ensuring a given speaker’s submission appears in only in one of the sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' (Efforts were taken to avoid a speaker providing more than one contribution, though this cannot be 100% guaranteed).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' All participants provided informed consent, and all aspects of the study procedures and design were approved by the University of Florida’s Institutional Review Board (IRB).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Quality assurance was a major part of the data collection process;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' there were entire submissions that were silent recordings, or only contained random background noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Some contributors apparently misunderstood the assignment, recording themselves reading the names of the categories, or phrases related to the categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Many speakers provided a large number of high quality samples, but also submitted problematic ones, usually due to audio issues such as background noises (for example, phone chimes or background traffic sounds);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' another issue was excessive breath noise picked up on the microphone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In these instances, speakers would be asked to re-record the problematic samples in order to maintain the same number of samples per speaker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In addition, some speakers did not seem to be able to produce evocative speech from the prompts;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' their responses didn’t convey distinct emotions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' This last group was omitted from the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' As a result of all these factors, this dataset will therefore almost certainly have a bias toward the emotional expressions of North American English-speaking people, as the author, and sole evaluator, shares that personal history.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The dataset will be publicly available at the following URL: https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='com/fredbuhl/EmoGator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Several different steps were evaluated to preprocess the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Normalizing the data so the range of each audio sample was within a [-1,1] range was universally used (for training, validation and testing).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Denoising audio files and trimming silence from the beginning and end of audio files was evaluated as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Augmenting data by creating pitch and time shifted variants of each sample was also explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' While this dataset was being collected, a company named Hume AI collected their own vocal burst dataset, a subset of which was made available for the The ICML 2022 Expressive Vocalizations Workshop and Competition[26] as the Hume-VB dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' This dataset consists of 59,201 vocalizations from 1702 speakers, with 10 emotion categories (Amusement, Awe, Awkwardness, Distress, Excitement, Fear, Horror, Sadness, Surprise, and Triumph).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Each sample has been rated by reviewers, with [0:100] intensity scores for every emotion category provided for each sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' This Hume-VB dataset was also used for the ACII 2022 Affective Vocal Bursts Workshop and Competition[27] There are several differences between the EmoGator dataset to Hume-VB dataset: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' EmoGator has 30 distinct emotion categories, with each sample belonging to a single category determined by the speaker’s intent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Hume-VB has 0-100 ratings for all 10 of its categories provided by reviewers for each sample–the listener’s interpretation, which may in some cases be very different than the speaker’s intent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' EmoGator contributors were provided text prompts describing situations that would elicit a given category of vocal burst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Hume-VB contributors were provided ‘seed’ vocal burst audio samples to imitate–which could reduce the range of expression for a given category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' EmoGator only permitted one 90-sample submission per speaker;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Hume-VB allowed for multiple submissions per speaker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' EmoGator has balanced categories;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' each emotion category has exactly 1,071 samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In Hume-VB, this varies;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' for example, “there are fewer samples that differentially convey Triumph” [26, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 2] 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' While Hume-VB has nearly twice as many samples as EmoGator, the dataset is only provided for use in the two sponsored competitions, and requires signing an End User License Agreement (EULA)2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' EmoGator is freely available under an open-source license.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' At time of publication, EmoGator appears to be the largest vocal burst dataset publicly available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 1https://supp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='apa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='org/psycarticles/supplemental/amp0000399/amp0000399_Supplemental-Materials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' docx 2https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='competitions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='hume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='ai/exvo2022 3 A PREPRINT - JANUARY 3, 2023 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='2 Classification Methodologies A number of different techniques used in speech emotion recognition, sound classification, and elsewhere have been used for these sorts of audio classification problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='3 Spectrogram approaches Some approaches to audio classification involve creating a time-frequency spectrogram (or spectrogram-like) represen- tation of the audio signals, which can be created a number of ways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Typically, the Short-Time Fourier Transform, or STFT [28] is used, which provides the amplitude of different frequencies over time;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' a variant, the Mel spectrogram, modifies the frequencies to correspond to the Mel scale [29], which closely matches human perception of differences in pitch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' MFCC provide a spectrum-like “cepstrum” [30], which, while using Mel frequencies, provides the log of the amplitude in decibels over the phase shift, instead of the time domain used for spectrograms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The resulting spectrograms or cepstrograms are used as features for other machine learning approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='4 1D CNN training on raw waveforms In [31], Dai et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' use a direct approach to sound classification;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' one-dimensional CNNs that work with the raw input waveforms, without using spectograms or some other representation as an intermediate-step feature detector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' networks consisting of layers of one-dimensional convolutional neural networks (1D CNNs) [32] were used for this.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [31] worked on the UrbanSound8k dataset [33], which, with its 10 categories and 8,732 samples, is a bit smaller than the EmoGator dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Testing various architectures, they reported up to 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='68% accuracy on an 18-layer model, which is competitive with CNNs using spectrograms of the same dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' For the EmoGator, dataset, we developed an 18-layer network as in [31], and added dropout layers after each 1D convolution to help prevent overfitting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='5 Random forests Random forest classifiers [34] were also explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' A random forest is constructed by generating multiple random decision trees, each constructed from a random subset of the dataset, using a random subset of each sample’s features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Once constructed, each tree in the forest casts a single vote for a class, and the class with the most votes chosen the winner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' This approach can be used on raw data or with spectrogram-like representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='6 Large pre-trained speech models Several teams in the 2022 ICML Expressive Vocalizations Workshop and Competition made use of large pre-trained speech models [35], [36], [37], [38],[39],[40].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Two models were used frequently: WavLM [41] and HuBERT [42].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Both of these are self-supervised speech representation models, which are built using transformer architectures [43];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' transformers have been applied successfully to a large number of domains–they are typically very large models, which have been trained on large datasets for significant amounts of time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Having access to these pre-trained models can produce better results then can be achieved by training other (usually smaller) datasets in isolation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' WavLM is a large scale self-supervised pre-trained speech model–The “Large” version of WavLM was trained on 94k hours of speech, and has 316.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='62M parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' HuBERT is a similar model, the “large” version has 317M parameters, and was trained on 60k hours of audio on 128 Graphic Processing Units (GPUs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Both WavLM and HuBERT are built upon wav2vec 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='0 [44], a “contrastive learning” self-supervised speech model, which itself is trained on 64 GPUs;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' the output of wav2vec is used as the input to HuBERT or WavLM, providing them higher-level features to build and train upon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' WavLM experiments were run by first running the EmoGator training, validation, and test data through a pre-trained WavLM model, storing the last hidden layer as a new representation for each sample, using a 70% / 15% / 15% train-validation-test split.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The hidden layers from the training data were then used as input to train a single fully connected network, using validation data to find the appropriate stopping point;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' once the ideal models were determined, they were run on the test data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The HuBERT model was used in a identical fashion–using the last hidden later of the HuBERT model instead of WavLM as the input to the fully-connected layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Incorporating WavLM and HuBERT in this work was greatly aided by the HuggingFace transformer libraries [45], which, while initially covering natural language processing, have now expanded into many other areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The benefit of being able to incorporate an large pre-trained language model with a few lines of code cannot be overstated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 4 A PREPRINT - JANUARY 3, 2023 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='7 Ensemble Methods Ensemble methods attempt to improve performance by combining the outputs of multiple models, with suitable training and weighting;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' the aggregate often outperforms the individual models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Two approaches were used for the EmoGator data: Ensemble A took the n-length output (where n was the number of emotion categories) produced by the WavLM-and-HuBERT-single-layer model and averaged them together, using the resulting average to pick the most likely emotion category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Ensemble B concatenated the last hidden layers from WavLM and HuBERT, and then trained single fully-connected layer on those inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='8 Platform & Hardware Requirements Most work on this project was performed on the University of Florida’s HiperGator-AI cluster, which uses 80G A100 GPUs;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' one A100 should be sufficient to run all the models included, but the code may not run directly on systems with lower memory GPUs unless modifications to parameters such as batch size etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' are implemented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 3 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Results 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='1 1D CNN training on raw waveforms For one-dimensional convolutional neural networks, the best results against the full dataset were with a 70% / 15% / 15% train/validation/test split, using an 18-layer 1D CNN based on [31], but with dropout layers after each convolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' A relatively low dropout rate of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='07 was optimal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' All experiments were run with a batchsize of 128 and an Adam optimizer with a learning rate of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Several statistics were calculated;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' For the full 30-category dataset, the average F1 score was 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='270.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' F1 scores and other accuracy metrics, with breakdowns by category, are shown in Table 1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' a confusion matrix is provided in Figure 1 based on the run with the highest F1 score.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The experiments above were all run with normalized audio data, but without denoising the audio signal or trimming silence from the beginning and end;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' earlier experiments with a 70%/30% train/test split revealed that denoising or trimming the audio signal reduced performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Data augmentation was also explored;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' two-to-three times larger “stretched” version of the 70% / 15% / 15% training set were produced by creating new samples by performing independent pitch and tempo shifts of the audio samples;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' however the stretched training sets produced lower performance than the original training set, despite making adjustments to the amount of pitch and tempo scaling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In reviewing these results, it is clear that some categories are much harder (or easier) to identify;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' for example, the F1 score (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='056) for Embarrassment, the worst performing category, is much lower than the highest performing category, Amusement (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='627).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The confusion matrix illustrates the problem well;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' it shows that certain types of vocal bursts are simply difficult to place in the correct category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Per the confusion matrix, Embarrassment (with only 7 samples correctly identified) was more likely to be interpreted as Shame (16) or Guilt (10);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' all closely related concepts that can produce similar vocalizations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' This is an inherently difficult problem, which helps explain why humans could only reliably distinguish 24 emotion categories in [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' By selectively removing emotion categories that performed poorly, it would be expected that overall performance should improve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Using the F1 score as a metric, the lowest scoring categories were removed, creating 24-count, 16-count, and 10-count subsets of the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Interestingly, three of the bottom-scoring six categories removed to make the 24-count subset were also not identifiable by humans in [7];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' two other categories unidentifiable by humans were removed in the 16-count subset–showing some commonality between the two datasets, and also illustrating the difficulties humans and algorithms have with certain emotion categories, even across studies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The same 1D CNN model architecture, hyperparameters, and validation approaches were used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Results are in Table 2;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' we do see improvement as the more ambiguous categories are eliminated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' By creating binary 1D CNN classifiers, with one classifier for each possible pair of emotion categories, we can illustrate which pairs are the easiest to distinguish.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Using the same model architecture and 70%/15%/15% split, and using the F1 score as a similarity metric (on a [0,1] scale, where 1 is least similar), a similarity matrix was created based on the 435 permutations for the 30 categories, and a dendrogram displaying relationships between each category was generated from that matrix (Figure 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The dendrogram illustrates the most easily confused or distinguished categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' For example, it shows how easily the Amusement category is distinguished from all other categories, and shows Realization and Contempt as the most similar–and therefore most confused–categories, despite being very different emotions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 5 A PREPRINT - JANUARY 3, 2023 Table 1: Precision, Recall, and F1 scores from a best run of the 18 layer 1D CNN, with dropout layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Precision Recall F1 score Support Adoration 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='407 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='488 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='444 162 Amusement 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='561 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='710 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='627 162 Anger 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='405 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='327 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='362 162 Awe 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='220 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='296 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='253 162 Confusion 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='354 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='574 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='438 162 Contempt 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='236 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='296 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='263 162 Contentment 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='193 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='272 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='226 162 Desire 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='253 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='309 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='278 162 Disappointment 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='144 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='093 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='113 162 Disgust 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='376 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='580 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='456 162 Distress 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='243 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='111 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='153 162 Ecstasy 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='187 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='123 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='149 162 Elation 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='190 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='074 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='107 162 Embarrassment 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='078 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='043 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='056 162 Fear 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='341 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='179 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='235 162 Guilt 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='175 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='105 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='131 162 Interest 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='288 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='420 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='342 162 Neutral 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='397 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='568 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='467 162 Pain 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='276 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='438 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='339 162 Pride 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='175 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='086 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='116 162 Realization 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='351 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='241 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='286 162 Relief 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='294 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='432 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='350 162 Romantic Love 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='121 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='074 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='092 162 Sadness 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='355 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='302 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='327 162 Serenity 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='209 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='191 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='200 162 Shame 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='197 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='154 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='173 162 Surprise (Negative) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='296 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='364 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='327 162 Surprise (Positive) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='248 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='198 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='220 162 Sympathy 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='233 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='370 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='286 162 Triumph 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='378 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='228 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='285 162 Accuracy 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='288 4860 Macro Average 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='273 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='288 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='270 4860 Weighted Average 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='273 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='288 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='270 4860 Table 2: 1D CNN runs with 24, 16, and 10 category subsets of the EmoGator dataset, compared to the 30 category full dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 1D CNN Dataset size F1 score (avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=') 30-Count Full Dataset 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='267 24-Count Subset 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='344 16-Count Subset 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='459 10-Count Subset 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='597 6 A PREPRINT - JANUARY 3, 2023 Figure 1: The confusion matrix generated by the 18 layer 1D CNN with dropout layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='2 Random Forests As shown in [34], an approach known as Random Forests has been used on a number of small-count, small number-of- category datasets, which suggested it might be an apt choice for the EmoGator dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The classifier (which is included in the scikit-learn library [46]) was trained against Mel-Frequency Cepstral Coefficients (MFCC) of the audio data;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' runs were completed for the full 30 category dataset, along with 24, 16, and 10 category subsets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Results all under-performed the 1D CNN results, however (see Table 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='3 Large pre-trained speech models Results were calculated using the last hidden layer of WavLM and HuBERT models connected to a single fully- connected network layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' A variant of Ensemble B incorporated two fully-connected layers (labeled “2-layer FC”), which resulted in a moderate improvement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' These results are presented, along with others, in Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 7 Confusion Matrix Adoration 100 Amusement Anger Awe Confusion Contempt Contentment Desire 80 Disappointment Disgust Distress Ecstasy Elation Embarrassment 60 label Fear True Guilt Interest Neutral Pain Pride Realization 40 Relief Romantic Love Sadness Serenity Shame 10 Surprise (Negative) 20 Surprise (Positive) Sympathy Triumph ization rise rassment intment (Negati) (Positive Love Predicted labelA PREPRINT - JANUARY 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 2023 Figure 2: The dendrogram generated from F1 scores (range [0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='1]) between pairs of emotion categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Table 3: Random Forest runs with 24, 16, and 10 category subsets of the EmoGator dataset, compared to the 30 category full dataset, using MFCCs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Random Forest Dataset size F1 score (avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=') 30-Count Full Dataset 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='146 24-Count Subset 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='180 16-Count Subset 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='256 10-Count Subset 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='345 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='4 Ensemble Methods Results were calculated using averaged output from the trained fully-connected layers appended on WavLM and HuBERT model runs (Ensemble A), and concatenated last-hidden-layer outputs from both models (Ensemble B), which were then used to train a single fully-connected layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The WavLM and HuBERT single fully-connected layers that had the highest average F1 scores on the validation dataset were used to keep the test data from tainting the ensemble model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Results for the Ensemble methods are presented in Table 4, along with summary data from all the EmoGator experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 4 Discussion Returning to our research question–whether, like humans, machines could reliably identify 24 emotion categories–it appears that the results achieved for the 24-emotion category runs did not approach assumed human proficiency, with a top F1 score of only 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='344 via the 1D CNN method on a 24-category subset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Results for the 24, 16, and 10-category subsets were better than the full 30-category runs, with the 10-category runs performing the best, again using the 1D CNN approach, scoring 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='597.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' (To put these results into perspective, a random guess for a 24-category subset would be right only 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='2% of the time;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' a 10-category random guess would be right only 10% of the time–so these results are much better than pure chance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=') One potential use of this dataset would be to use it to measure how accurate human performance is for vocal bursts– whether the category the speaker intended to convey is correctly identified by listeners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Other studies have used gradient rating scales for each category provided by the listener, without necessarily linking back to the ground truth of the 8 Surprise (Positive) Elation Triumph Fear Distress Surprise (Negative) Pride Pain Disgust Shame Guilt Embarrassment Sympathy Romantic Love Desire Ecstasy Awe Serenity Contentment Relief Disappointment Anger Realization Contempt Interest Confusion Adoration Sadness Neutral Amusement 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='6 8:0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='0A PREPRINT - JANUARY 3, 2023 Table 4: All results from the various approaches and dataset subsets used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Approach # Categories F1 score 1D CNN 30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='267 1D CNN 24 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='344 1D CNN 16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='459 1D CNN 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='597 Random Forest 30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='146 Random Forest 24 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='180 Random Forest 16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='256 Random Forest 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='345 WavLM 30 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='255 WavLM 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='563 HuBERT 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='531 Ensemble A 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='571 Ensemble B 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='591 Ensemble B (2-layer FC) 10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='593 speaker intent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Another question is whether collecting vocal bursts inspired by text-based prompts is better or worse than trying to capture them “in the wild” from recorded conversations, or elicited by other sorts of prompts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Collecting more data would no doubt improve these results;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' this vocal burst dataset, while (currently) the largest publicly available, is still small by machine learning standards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Evaluating subsets of the dataset makes the situation even worse;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' when looking at say, 10-category subsets, only 1 3 of the dataset is used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Using more complex ensemble methods seems a promising way forward;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' while the ensemble results here did not exceed the 1D CNN results, it’s possible that incorporating more individual models could increase accuracy beyond what we’ve been able to achieve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' One topic that was not explored here is generating vocal bursts;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' the author will be next exploring methods such as Generative Adversarial Networks (GANs) and Stable Diffusion models to generate vocal bursts;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' ideally these could be tailored for an individual speaker by providing a few audio samples(the ICML competition had this as one of their challenges).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' More data will help, but it may be that audio data alone will be insufficient to properly classify vocal bursts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Datasets and models incorporating video as well as audio data–not only to look at facial expressions, but also any visual cues that might evoke a vocal burst–could improve accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The words spoken by the utterer, and others around them, before or after a vocal burst may also aid in identification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' (It may be, however, that there are inherent limits far short of certainty for vocal burst classification, regardless of any additional information that can be gathered–often cries of sadness and amusement sound the same, and people sometimes say they are not sure “whether they should laugh or cry”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=') Another area to explore are the demographics of the speakers;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' their age, gender, place of origin, and cultural background could all come into play on classifying bursts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' These demographic concerns also extend to the person evaluating the quality of the sample;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' ideally, the demographic aspects of the reviewer should match those of the submitter for best quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Beyond the demographic aspects, each individual’s unique character and personality certainly comes into play when they generative vocal bursts–so prior experience with the utterer could be key in improving accuracy, especially if the model’s weights can be fine-tuned based on these experiences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' It is hoped that the EmoGator dataset will be introduce researchers to the fascinating area of vocal bursts;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' hopefully other researchers could incorporate this dataset into still-larger collections in the future, “paying it forward” by making those datasets publicly available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Acknowledgement My thanks to Anand Rangarajan for our helpful discussions about the project.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 9 A PREPRINT - JANUARY 3, 2023 References [1] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Duchenne, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' de Boulogne, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Cuthbertson, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Manstead, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Oatley.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The Mechanism of Human Facial Expression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Cambridge books online.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Cambridge University Press, 1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [2] Alan S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Cowen and Dacher Keltner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' What the face displays: Mapping 28 emotions conveyed by naturalistic expression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' American Psychologist, pages No Pagination Specified–No Pagination Specified, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [3] Alan S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Cowen and Dacher Keltner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Self-report captures 27 distinct categories of emotion bridged by continuous gradients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences, 114(38):E7900–E7909, September 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [4] Alan S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Cowen, Petri Laukka, Hillary Anger Elfenbein, Runjing Liu, and Dacher Keltner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The primacy of categories in the recognition of 12 emotions in speech prosody across two cultures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Nature Human Behaviour, 3(4):369–382, April 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [5] Petri Laukka, Hillary Anger Elfenbein, Nutankumar S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Thingujam, Thomas Rockstuhl, Frederick K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Iraki, Wanda Chui, and Jean Althoff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The expression and recognition of emotions in the voice across five nations: A lens model analysis based on acoustic features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Journal of Personality and Social Psychology, 111(5):686–705, November 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [6] Emiliana R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Simon-Thomas, Dacher J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Keltner, Disa Sauter, Lara Sinicropi-Yao, and Anna Abramson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The voice conveys specific emotions: Evidence from vocal burst displays.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Emotion, 9(6):838–846, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [7] Alan S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Cowen, Hillary Anger Elfenbein, Petri Laukka, and Petri Keltner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Mapping 24 emotions conveyed by brief human vocalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' American Psychologist, 74(6):698, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [8] Elena Lyakso and Olga Frolova.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Emotion State Manifestation in Voice Features: Chimpanzees, Human Infants, Children, Adults.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In Andrey Ronzhin, Rodmonga Potapova, and Nikos Fakotakis, editors, Speech and Computer, Lecture Notes in Computer Science, pages 201–208, Cham, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Springer International Publishing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [9] Mariana Vaillant-Molina, Lorraine E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Bahrick, and Ross Flom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Young Infants Match Facial and Vocal Emotional Expressions of Other Infants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Infancy : the official journal of the International Society on Infant Studies, 18(Suppl 1), August 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [10] Amaya Palama, Jennifer Malsert, and Edouard Gentaz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Are 6-month-old human infants able to transfer emotional information (happy or angry) from voices to faces?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' An eye-tracking study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' PLOS ONE, 13(4):e0194579, April 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [11] Lois Bloom and Richard Beckwith.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Talking with Feeling: Integrating Affective and Linguistic Expression in Early Language Development.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Cognition and Emotion, 3(4):313–342, October 1989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Publisher: Routledge _eprint: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='1080/02699938908412711.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [12] Yang Wu, Paul Muentener, and Laura E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Schulz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' One- to four-year-olds connect diverse positive emotional vocalizations to their probable causes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences, 114(45):11896–11901, November 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [13] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Heilman, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Scholes, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Watson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Auditory affective agnosia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Disturbed comprehension of affective speech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Journal of Neurology, Neurosurgery & Psychiatry, 38(1):69–72, January 1975.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Publisher: BMJ Publishing Group Ltd Section: Research Article.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [14] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Monrad-Krohn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Dysprosody or altered "melody of language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Brain: A Journal of Neurology, 70:405–415, 1947.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Place: United Kingdom Publisher: Oxford University Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [15] Sabine Skodda, Heiko Rinsche, and Uwe Schlegel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Progression of dysprosody in Parkinson’s disease over time—A longitudinal study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Movement Disorders, 24(5):716–722, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' _eprint: https://movementdisorders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='onlinelibrary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='wiley.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='com/doi/pdf/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='1002/mds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='22430.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [16] Tsai-Hsuan Tsai, Hsien-Tsung Chang, Shin-Da Liao, Hui-Fang Chiu, Ko-Chun Hung, Chun-Yi Kuo, and Chih-Wei Yang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Employing a Voice-Based Emotion-Recognition Function in a Social Chatbot to Foster Social and Emotional Learning Among Preschoolers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In Constantine Stephanidis, editor, HCI International 2019 – Late Breaking Papers, Lecture Notes in Computer Science, pages 341–356, Cham, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Springer International Publishing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [17] Young-Shin Lee and Won-Hyung Park.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Diagnosis of Depressive Disorder Model on Facial Expression Based on Fast R-CNN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Diagnostics, 12(2):317, January 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [18] Cynthia Breazeal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Emotion and sociable humanoid robots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' International Journal of Human-Computer Studies, 59(1):119–155, July 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [19] Jekaterina Novikova, Christian Dondrup, Ioannis Papaioannou, and Oliver Lemon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Sympathy Begins with a Smile, Intelligence Begins with a Word: Use of Multimodal Features in Spoken Human-Robot Interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:1706.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='02757v1 [cs], June 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 10 A PREPRINT - JANUARY 3, 2023 [20] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' O’Shaughnessy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Speech Communications: Human and Machine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Wiley, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [21] Rosalind W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Picard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Affective Computing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In Affective Computing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The MIT Press, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [22] Shashidhar G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Koolagudi and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Sreenivasa Rao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Emotion recognition from speech: a review.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' International Journal of Speech Technology, 15(2):99–117, June 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [23] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Cowie and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Douglas-Cowie.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Automatic statistical analysis of the signal and prosodic signs of emotion in speech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In Proceeding of Fourth International Conference on Spoken Language Processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' ICSLP ’96, volume 3, pages 1989–1992 vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='3, October 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [24] Akanksha Gadikar, Omkar Gokhale, Subodh Wagh, Anjali Wankhede, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Joshi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' A Survey on Speech Emotion Recognition by Using Neural Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' International Journal of Research and Analytical Reviews, 7(3), September 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [25] Sepp Hochreiter and Jürgen Schmidhuber.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Long Short-Term Memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Neural Computation, 9(8):1735–1780, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [26] Alice Baird, Panagiotis Tzirakis, Gauthier Gidel, Marco Jiralerspong, Eilif B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Muller, Kory Mathewson, Björn Schuller, Erik Cambria, Dacher Keltner, and Alan Cowen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The ICML 2022 Expressive Vocalizations Workshop and Competition: Recognizing, Generating, and Personalizing Vocal Bursts, July 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='01780 [cs, eess].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [27] Alice Baird, Panagiotis Tzirakis, Jeffrey A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Brooks, Christopher B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Gregory, Björn Schuller, Anton Batliner, Dacher Keltner, and Alan Cowen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The ACII 2022 Affective Vocal Bursts Workshop & Competition: Understanding a critically understudied modality of emotional expression, July 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:2207.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='03572 [cs, eess].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [28] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Jacobsen and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Lyons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The sliding DFT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' IEEE Signal Processing Magazine, 20(2):74–80, March 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Conference Name: IEEE Signal Processing Magazine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [29] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Stevens, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Volkmann, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Newman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' A Scale for the Measurement of the Psychological Magnitude Pitch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The Journal of the Acoustical Society of America, 8(3):185–190, January 1937.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Publisher: Acoustical Society of America.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [30] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Bogert.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' The quefrency analysis of time series for echoes : cepstrum, pseudo-autocovariance, cross-cepstrum and saphe cracking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In Proceedings of the Symposium on Time Series Analysis, pages 209–243, 1963.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [31] Wei Dai, Chia Dai, Shuhui Qu, Juncheng Li, and Samarjit Das.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Very Deep Convolutional Neural Networks for Raw Waveforms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:1610.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='00087 [cs], October 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv: 1610.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='00087.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [32] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Kiranyaz, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Ince, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Hamila, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Gabbouj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Convolutional Neural Networks for patient-specific ECG classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In 2015 37th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC), pages 2608–2611, August 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' ISSN: 1558-4615.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [33] Justin Salamon, Christopher Jacoby, and Juan Pablo Bello.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' A Dataset and Taxonomy for Urban Sound Research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' In Proceedings of the 22nd ACM international conference on Multimedia, MM ’14, pages 1041–1044, Orlando, Florida, USA, November 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [34] Leo Breiman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Random Forests.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Machine Learning, 45(1):5–32, October 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [35] Detai Xin, Shinnosuke Takamichi, and Hiroshi Saruwatari.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Exploring the Effectiveness of Self-supervised Learning and Classifier Chains in Emotion Recognition of Nonverbal Vocalizations, June 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='10695 [cs, eess].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [36] Chin-Cheng Hsu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Synthesizing Personalized Non-speech Vocalization from Discrete Speech Representations, June 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='12662 [cs, eess].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [37] Josh Belanich, Krishna Somandepalli, Brian Eoff, and Brendan Jou.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Multitask vocal burst modeling with ResNets and pre-trained paralinguistic Conformers, June 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='12494 [cs, eess].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [38] Roshan Sharma, Tyler Vuong, Mark Lindsey, Hira Dhamyal, Rita Singh, and Bhiksha Raj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Self-supervision and Learnable STRFs for Age, Emotion, and Country Prediction, June 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='12568 [cs, eess].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [39] Tilak Purohit, Imen Ben Mahmoud, Bogdan Vlasenko, and Mathew Magimai Doss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Comparing supervised and self-supervised embedding for ExVo Multi-Task learning track, June 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='11968 [cs, eess].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [40] Atijit Anuchitanukul and Lucia Specia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Burst2Vec: An Adversarial Multi-Task Approach for Predicting Emotion, Age, and Origin from Vocal Bursts, June 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='12469 [cs, eess].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [41] Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Xiangzhan Yu, and Furu Wei.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing, June 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:2110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='13900 [cs, eess].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 11 A PREPRINT - JANUARY 3, 2023 [42] Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units, June 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:2106.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='07447 [cs, eess].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [43] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Attention is All you Need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 31st NIPS Conference Proceedings, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [44] Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, and Michael Auli.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' wav2vec 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='0: A Framework for Self- Supervised Learning of Speech Representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='11477 [cs, eess], June 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv: 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='11477.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [45] Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, and Jamie Brew.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' HuggingFace’s Transformers: State-of-the-art Natural Language Processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv:1910.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='03771 [cs], October 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' arXiv: 1910.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content='03771.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' [46] Fabian Pedregosa, Gaël Varoquaux, Alexandre Gramfort, Vincent Michel, Bertrand Thirion, Olivier Grisel, Mathieu Blondel, Peter Prettenhofer, Ron Weiss, Vincent Dubourg, Jake Vanderplas, Alexandre Passos, David Cournapeau, Matthieu Brucher, Matthieu Perrot, and Édouard Duchesnay.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Scikit-learn: Machine Learning in Python.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=', 12:2825–2830, November 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'} +page_content=' 12' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1dAyT4oBgHgl3EQfofiA/content/2301.00508v1.pdf'}