diff --git "a/6tE4T4oBgHgl3EQfCAsz/content/tmp_files/load_file.txt" "b/6tE4T4oBgHgl3EQfCAsz/content/tmp_files/load_file.txt" new file mode 100644--- /dev/null +++ "b/6tE4T4oBgHgl3EQfCAsz/content/tmp_files/load_file.txt" @@ -0,0 +1,10540 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf,len=10539 +page_content='Multimodal Deep Learning arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='04856v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='CL] 12 Jan 2023 Contents Preface v Foreword 1 1 Introduction 3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Introduction to Multimodal Deep Learning .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Outline of the Booklet .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 4 2 Introducing the modalities 7 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 9 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 State-of-the-art in Computer Vision .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 33 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 54 3 Multimodal architectures 83 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Image2Text .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 86 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 100 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 125 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Text supporting Vision Models .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 146 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Models for both modalities .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 159 4 Further Topics 181 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Including Further Modalities .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 181 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Structured + Unstructured Data .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 197 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Multipurpose Models .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 209 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Generative Art .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 226 5 Conclusion 235 6 Epilogue 237 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 New influential architectures .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 237 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Creating videos .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 238 7 Acknowledgements 239 iii Preface Author: Matthias Aßenmacher FIGURE 1: LMU seal (left) style-transferred to Van Gogh’s Sunflower painting (center) and blended with the prompt - Van Gogh, sunflowers - via CLIP+VGAN (right).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the last few years, there have been several breakthroughs in the methodolo- gies used in Natural Language Processing (NLP) as well as Computer Vision (CV).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Beyond these improvements on single-modality models, large-scale multi- modal approaches have become a very active area of research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this seminar, we reviewed these approaches and attempted to create a solid overview of the field, starting with the current state-of-the-art approaches in the two subfields of Deep Learning individually.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Further, modeling frameworks are discussed where one modality is transformed into the other Chapter 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 and Chapter 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2), as well as models in which one modality is utilized to enhance representation learning for the other (Chapter 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 and Chapter 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To conclude the second part, architectures with a focus on handling both modalities simultaneously are introduced (Chapter 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, we also cover other modalities (Chapter 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 and Chapter 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2) as well as general-purpose multi-modal models (Chapter 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3), which are able to handle different tasks on different modalities within one unified architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One interesting application (Generative Art, Chapter 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4) eventually caps off this booklet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' v vi Preface FIGURE 2: Creative Commons License This book is licensed under the Creative Commons Attribution- NonCommercial-ShareAlike 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 International License.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' cC 000Foreword Author: Matthias Aßenmacher This book is the result of an experiment in university teaching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We were inspired by a group of other PhD Students around Christoph Molnar, who conducted another seminar on Interpretable Machine Learning in this format.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Instead of letting every student work on a seminar paper, which more or less isolated from the other students, we wanted to foster collaboration between the students and enable them to produce a tangible outout (that isn’t written to spend the rest of its time in (digital) drawers).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the summer term 2022, some Statistics, Data Science and Computer Science students signed up for our seminar entitled “Multimodal Deep Learning” and had (before kick-off meeting) no idea what they had signed up for: Having written an entire book by the end of the semester.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We were bound by the examination rules for conducting the seminar, but otherwise we could deviate from the traditional format.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We deviated in several ways: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each student project is a chapter of this booklet, linked contentwise to other chapers since there’s partly a large overlap between the topics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We gave challenges to the students, instead of papers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The challenge was to investigate a specific impactful recent model or method from the field of NLP, Computer Vision or Multimodal Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We designed the work to live beyond the seminar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We emphasized collaboration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Students wrote the introduction to chapters in teams and reviewed each others individual texts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Technical Setup The book chapters are written in the Markdown language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The simulations, data examples and visualizations were created with R (R Core Team, 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To combine R-code and Markdown, we used rmarkdown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The book was compiled 1 2 0 Foreword with the bookdown package.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We collaborated using git and github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For details, head over to the book’s repository.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 1 Introduction Author: Nadja Sauter Supervisor: Matthias Aßenmacher 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Introduction to Multimodal Deep Learning There are five basic human senses: hearing, touch, smell, taste and sight.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Possessing these five modalities, we are able to perceive and understand the world around us.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thus, “multimodal” means to combine different channels of information simultaneously to understand our surroundings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, when toddlers learn the word “cat”, they use different modalities by saying the word out loud, pointing on cats and making sounds like “meow”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Using the human learning process as a role model, artificial intelligence (AI) researchers also try to combine different modalities to train deep learning models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On a superficial level, deep learning algorithms are based on a neural network that is trained to optimize some objective which is mathematically defined via the so-called loss function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The optimization, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' minimizing the loss, is done via a numerical procedure called gradient descent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Consequently, deep learning models can only handle numeric input and can only result in a numeric output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, in multimodal tasks we are often confronted with unstructured data like pictures or text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thus, the first major problem is how to represent the input numerically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The second issue with regard to multimodal tasks is how exactly to combine different modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For instance, a typical task could be to train a deep learning model to generate a picture of a cat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First of all, the computer needs to understand the text input “cat” and then somehow translate this information into a specific image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, it is necessary to identify the contextual relationships between words in the text input and the spatial relationships betweent pixels in the image output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' What might be easy for a toddler in pre-school, is a huge challenge for the computer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Both have to learn some understanding of the word “cat” that comprises the meaning and appearance of the animal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A common approach in modern deep learning is to generate embeddings that represent the cat numerically as a vector in some latent space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, to achieve this, different approaches and algorithmic 3 4 1 Introduction architectures have been developed in recent years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This book gives an overview of the different methods used in state-of-the-art (SOTA) multimodal deep learning to overcome challenges arising from unstructured data and combining inputs of different modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Outline of the Booklet Since multimodal models often use text and images as input or output, methods of Natural Language Processing (NLP) and Computer Vision (CV) are intro- duced as foundation in Chapter 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Methods in the area of NLP try to handle text data, whereas CV deals with image processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With regard to NLP (sub- section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1), one concept of major importance is the so-called word embedding, which is nowadays an essential part of (nearly) all multimodal deep learning architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This concept also sets the foundation for transformer-based models like BERT (Devlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018a), which achieved a huge improvement in several NLP tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Especially the (self-)attention mechanism (Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017a) of transformers revolutionized NLP models, which is why most of them rely on the transformer as a backbone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In Computer Vision (subsection 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2) different network architectures, namely ResNet (He et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015), EfficientNet (Tan and Le, 2019a), SimCLR (Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020a) and BYOL (Grill et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020b), will be introduced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In both fields it is of great interest to compare the different approaches and their performance on challenging benchmarks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For this reason, the last subsection 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 of Chapter 2 gives an overall overview of different data sets, pre-training tasks and benchmarks for CV as well as for NLP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The second Chapter (see 3) focuses on different multimodal architectures, covering a wide variety of how text and images can be combined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The presented models combine and advance different methods of NLP and CV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First of all, looking at Img2Text tasks (subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1), the data set Microsoft COCO for object recognition (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014a) and the meshed-memory transformer for Image Captioning (M2 Transformer) (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) will be presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Contrariwise, researchers developed methods to generate pictures based on a short text prompt (subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The first models accomplishing this task were generative adversarial networks (GANs) (Goodfellow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014b) and Variational Autoencoders (VAEs) (Kingma and Welling, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These methods were improved in recent years and today’s SOTA transformer architectures and text-guided diffusion models like DALL-E (Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a) and GLIDE (Nichol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a) achieve remarkable results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another interesting question is how images can be utilized to support language models (subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This can be done via sequential embeddings, more advanced grounded embeddings or, again, inside transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the other hand, one can also look at text 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Outline of the Booklet 5 supporting CV models like CLIP (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021b), ALIGN (Jia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a) and Florence (Yuan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) (subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They use foundation models meaning reusing models (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' CLIP inside DALL-E 2) as well as a contrastive loss for connecting text with images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Besides, zero-shooting makes it possible to classify new and unseen data without expensive fine-tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Especially the open-source architecture CLIP (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021b) for image classification and generation attracted a lot of attention last year.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the end of the second chapter, some further architectures to handle text and images simultaneously are introduced (subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For instance, Data2Vec uses the same learning method for speech, vision and language and in this way aims to find a general approach to handle different modalities in one architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore, VilBert (Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019a) extends the popular BERT architecture to handle both image and text as input by implementing co-attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This method is also used in Google’s Deepmind Flamingo (Alayrac et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In addition, Flamingo aims to tackle multiple tasks with a single visual language model via few-shot learning and freezing the pre-trained vision and language model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the last chapter (see 4), methods are introduced that are also able to handle modalities other than text and image, like e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' video, speech or tabular data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The overall goal here is to find a general multimodal architecture based on challenges rather than modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, one needs to handle problems of multimodal fusion and alignment and decide whether you use a join or coordinated representation (subsection 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover we go more into detail about how exactly to combine structured and unstructured data (subsection 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, different fusion strategies which evolved in recent years will be presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is illustrated in this book by two use cases in survival analysis and economics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Besides this, another interesting research question is how to tackle different tasks in one so called multi-purpose model (subsection 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3) like it is intended to be created by Google researchers (Barham et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022) in their “Pathway” model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Last but not least, we show one exemplary application of Multimodal Deep Learning in the arts scene where image generation models like DALL-E (Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a) are used to create art pieces in the area of Generative Arts (subsection 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2 Introducing the modalities Authors: Cem Akkus, Vladana Djakovic, Christopher Benjamin Marquardt Supervisor: Matthias Aßenmacher Natural Language Processing (NLP) has existed for about 50 years, but it is more relevant than ever.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There have been several breakthroughs in this branch of machine learning that is concerned with spoken and written language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, learning internal representations of words was one of the greater advances of the last decade.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Word embeddings (Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013a), Bojanowski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016)) made it possible and allowed developers to encode words as dense vectors that capture their underlying semantic content.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this way, similar words are embedded close to each other in a lower-dimensional feature space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another important challenge was solved by Encoder-decoder (also called sequence-to-sequence) architectures Sutskever et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014), which made it possible to map input sequences to output sequences of different lengths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They are especially useful for complex tasks like machine translation, video captioning or question answering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This approach makes minimal assumptions on the sequence structure and can deal with different word orders and active, as well as passive voice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A definitely significant state-of-the-art technique is Attention Bahdanau et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014), which enables models to actively shift their focus – just like humans do.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It allows following one thought at a time while suppressing information irrelevant to the task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a consequence, it has been shown to significantly improve performance for tasks like machine translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' By giving the decoder access to directly look at the source, the bottleneck is avoided and at the same time, it provides a shortcut to faraway states and thus helps with the vanishing gradient problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One of the most recent sequence data modeling techniques is Transformers (Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017b)), which are solely based on attention and do not have to process the input data sequentially (like RNNs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, the deep learning model is better in remembering context-induced earlier in long sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is the dominant paradigm in NLP currently and even makes better use of GPUs, because it can perform parallel operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformer architectures like BERT (Devlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018b), T5 (Raffel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019a) or GPT-3 (Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020) are pre-trained on a large corpus and can be fine-tuned for specific language tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They have the capability to generate stories, poems, code and much more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With the help of the aforementioned 7 8 2 Introducing the modalities breakthroughs, deep networks have been successful in retrieving information and finding representations of semantics in the modality text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the next paragraphs, developments for another modality image are going to be presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Computer vision (CV) focuses on replicating parts of the complexity of the human visual system and enabling computers to identify and process objects in images and videos in the same way that humans do.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In recent years it has become one of the main and widely applied fields of computer science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, there are still problems that are current research topics, whose solutions depend on the research’s view on the topic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One of the problems is how to optimize deep convolutional neural networks for image classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The accuracy of classification depends on width, depth and image resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One way to address the degradation of training accuracy is by introducing a deep residual learning framework (He et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the other hand, another less common method is to scale up ConvNets, to achieve better accuracy is by scaling up image resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Based on this observation, there was proposed a simple yet effective compound scaling method, called EfficientNets (Tan and Le, 2019a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another state-of-the-art trend in computer vision is learning effective visual representations without human supervision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Discriminative approaches based on contrastive learning in the latent space have recently shown great promise, achieving state-of-the-art results, but the simple framework for contrastive learning of visual representations, which is called SimCLR, outperforms pre- vious work (Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, another research proposes as an alternative a simple “swapped” prediction problem where we predict the code of a view from the representation of another view.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Where features are learned by Swapping Assignments between multiple Views of the same image (SwAV) (Caron et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Further recent contrastive methods are trained by reducing the distance between representations of different augmented views of the same image (‘positive pairs’) and increasing the distance between representations of augmented views from different images (‘negative pairs’).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Bootstrap Your Own Latent (BYOL) is a new algorithm for self-supervised learning of image representatios (Grill et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Self-attention-based architectures, in particular, Transformers have become the model of choice in natural language processing (NLP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Inspired by NLP successes, multiple works try combining CNN-like architectures with self- attention, some replacing the convolutions entirely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The latter models, while theoretically efficient, have not yet been scaled effectively on modern hardware accelerators due to the use of specialized attention patterns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Inspired by the Transformer scaling successes in NLP, one of the experiments is applying a standard Transformer directly to the image (Dosovitskiy et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Due to the widespread application of computer vision, these problems differ and are constantly being at the center of attention of more and more research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With the rapid development in NLP and CV in recent years, it was just a question of time to merge both modalities to tackle multi-modal tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP 9 release of DALL-E 2 just hints at what one can expect from this merge in the future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' DALL-E 2 is able to create photorealistic images or even art from any given text input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So it takes the information of one modality and turns it into another modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It needs multi-modal datasets to make this possible, which are still relatively rare.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This shows the importance of available data and the ability to use it even more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nevertheless, all modalities are in need of huge datasets to pre-train their models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It’s common to pre-train a model and fine-tune it afterwards for a specific task on another dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, every state-of-the-art CV model uses a classifier pre-trained on an ImageNet based dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The cardinality of the datasets used for CV is immense, but the datasets used for NLP are of a completely different magnitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' BERT uses the English Wikipedia and the Bookscorpus to pre-train the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The latter consists of almost 1 billion words and 74 million sentences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The pre-training of GPT-3 is composed of five huge corpora: CommonCrawl, Books1 and Books2, Wikipedia and WebText2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Unlike language model pre-training that can leverage tremendous natural language data, vision-language tasks require high-quality image descriptions that are hard to obtain for free.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Widely used pre-training datasets for VL-PTM are Microsoft Common Objects in Context (COCO), Visual Genome (VG), Conceptual Captions (CC), Flickr30k, LAION-400M and LAION-5B, which is now the biggest openly accessible image-text dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Besides the importance of pre-training data, there must also be a way to test or compare the different models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A reasonable approach is to compare the performance on specific tasks, which is called benchmarking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A nice fea- ture of benchmarks is that they allow us to compare the models to a human baseline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Different metrics are used to compare the performance of the mod- els.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Accuracy is widely used, but there are also some others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For CV the most common benchmark datasets are ImageNet, ImageNetReaL, CIFAR- 10(0), OXFORD-IIIT PET, OXFORD Flower 102, COCO and Visual Task Adaptation Benchmark (VTAB).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The most common benchmarks for NLP are General Language Understanding Evaluation (GLUE), SuperGLUE, SQuAD 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1, SQuAD 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0, SWAG, RACE, ReCoRD, and CoNLL-2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VTAB, GLUE and SuperGLUE also provide a public leader board.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Cross-modal tasks such as Visual Question Answering (VQA), Visual Commonsense Reasoning (VCR), Natural Language Visual Reasoning (NLVR), Flickr30K, COCO and Visual Entailment are common benchmarks for VL-PTM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP Author: Cem Akkus Supervisor: Matthias Aßenmacher 10 2 Introducing the modalities 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Introduction Natural Language Processing (NLP) exists for about 50 years, but it is more relevant than ever.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There have been several breakthroughs in this branch of machine learning that is concerned with spoken and written language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this work, the most influential ones of the last decade are going to be pre- sented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Starting with word embeddings, which efficiently model word semantics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Encoder-decoder architectures represent another step forward by making mini- mal assumptions about the sequence structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Next, the attention mechanism allows human-like focus shifting to put more emphasis on more relevant parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then, the transformer applies attention in its architecture to process the data non-sequentially, which boosts the performance on language tasks to exceptional levels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At last, the most influential transformer architectures are recognized before a few current topics in natural language processing are discussed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Word Embeddings As mentioned in the introduction, one of the earlier advances in NLP is learning word internal representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Before that, a big problem with text modelling was its messiness, while machine learning algorithms undoubtedly prefer structured and well-defined fixed-length inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On a granular level, the models rather work with numerical than textual data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thus, by using very basic techniques like one-hot encoding or bag-of-words, a text is converted into its equivalent vector of numbers without losing information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the example depicting one-hot encoding (see Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1), there are ten simple words and the dark squares indicate the only index with a non-zero value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1: Ten one-hot encoded words (Source: Pilehvar and Camacho- Collados (2021)) In contrast, there are multiple non-zero values while using bag-of-words, which is another way of extracting features from text to use in modelling where we measure if a word is present from a vocabulary of known words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is called {basket:1, desk fork:2, desk:3, desks cloud:4, plate:5, plate rabbit:6, desks:7, tree:8, table:9, lion:10) table2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP 11 bag-of-words because the order is disregarded here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Treating words as atomic units has some plausible reasons, like robustness and simplicity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It was even argued that simple models on a huge amount of data outperform complex models trained on less data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, simple techniques are problematic for many tasks, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' when it comes to relevant in-domain data for automatic speech recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The size of high-quality transcribed speech data is often limited to just millions of words, so simply scaling up simpler models is not possible in certain situations and therefore more advanced techniques are needed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally, thanks to the progress of machine learning techniques, it is realistic to train more complex models on massive amounts of data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Logically, more complex models generally outperform basic ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Other disadvantages of classic word representations are described by the curse of dimensionality and the generalization problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The former becomes a problem due to the growing vocabulary equivalently increasing the feature size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This results in sparse and high-dimensional vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The latter occurs because the similarity between words is not captured.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, previously learned information cannot be used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Besides, assigning a distinct vector to each word is a limitation, which becomes especially obvious for languages with large vocabularies and many rare words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To combat the downfalls of simple word representations, word embeddings enable to use efficient and dense representations in which similar words have a similar encoding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So words that are closer in the vector space are expected to be similar in meaning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An embedding is hereby defined as a vector of floating point values (with the length of the vector being a hyperparameter).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The values for the embedding are trainable parameters which are learned similarly to a model learning the weights for a dense layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The dimensionality of the word representations is typically much smaller than the number of words in the dictionary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013a) called dimensions between 50-100 modest for more than a few hundred million words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For small data sets, dimensionality for the word vectors could start at 8 and go up to 1024 for larger data sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is expected that higher dimensions can rather pick up intricate relationships between words if given enough data to learn from.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For any NLP tasks, it is sensible to start with word embeddings because it allows to conveniently incorporate prior knowledge into the model and can be seen as a basic form of transfer learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is important to note that even though embeddings attempt to represent the meaning of words and do that to an extent, the semantics of the word in a given context cannot be captured.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is due to the words having static precomputed representations in traditional embedding techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thus, the word "bank" can either refer to a financial institution or a river bank.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Contex- 12 2 Introducing the modalities FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2: Three-dimensional word embeddings (Source: Pilehvar and Camacho-Collados (2021)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' tual embedding methods offer a solution, but more about them will follow later.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It should be noted that words can have various degrees of similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the context of inflectional languages, it becomes obvious because words are adjusted to articulate grammatical categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, in a subspace of the original vector, nouns that have similar endings can be found.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, it even exceeds simple syntactic regularities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With straightforward operations on the word vectors, it can be displayed that vector(King)−vector(Man)+vector(Woman) equals a vector that is closest in vector space (and therefore in meaning) to the word "Queen".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A simple visualization of this relationship can be seen in the left graph below (see Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The three coordinate systems are representations of higher dimensions that are depicted in this way via dimension reduction techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore, the verb-to-tense relationship is expressed in the middle graphic, which extends the insight from before referring to the word endings being similar because in this instance the past tenses of both verbs walking and swimming are not similar in structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally, on the right side of the figure, there is a form of the commonly portrayed and easily understood Country-Capital example (see Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013a)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3: Three types of similarities as word embeddings (Source: Google (2022)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' desk desks desks desk plate plate table tableItaly Spain Canada man walked Turkey woman Rome O Ottawa Madrid Germany king swam Russia walking Ankara 0 queen Berlin Moscow Japan vietnam swimming China Tokyo Hanoi Beijing Male-Female Verb Tense Country-Capital2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP 13 Another way of using vector representations of words is in the field of transla- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It has been presented that relations can be drawn from feature spaces of different languages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In below, the distributed word representations of numbers between English and Spanish are compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this case, the same numbers have similar geometric arrangements, which suggests that mapping linearly between vector spaces of languages is feasible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Applying this simple method for a larger set of translations in English and Spanish led to remarkable results achieving almost 90 % precision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4: Representations of numbers in English and Spanish (Source: Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013c)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This technique was then used for other experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One use case is the detection of dictionary errors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Taking translations from a dictionary and computing their geometric distance returns a confidence measure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Closely evaluating the translations with low confidence and outputting an alternative (one that is closest in vector space) results in a plain way to assess dictionary translations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore, training the word embeddings on a large corpora makes it possible to give sensible out-of-dictionary predictions for words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This was tested by randomly removing a part of the vocabulary before.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Taking a look at the predictions revealed that they were often to some extent related to the translations with regard to meaning and semantics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Despite the accomplishments in other tasks, translations between distant languages exposed shortcomings of word embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, the accuracy for translations between English and Vietnamese seemed signif- icantly lower.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This can be ascribed to both languages not having a good one-to-one correspondence because the concept of a word is different than in English.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In addition, the used Vietnamese model contains numerous syn- onyms, which complicates making exact predictions (see Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013c)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Turning the attention to one of the most impactful embedding techniques, word2vec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It was proposed by Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013a) and is not a singular algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It can rather be seen as a family of model architectures and op- timizations to learn word representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Word2vec’s popularity also stems from its success on multiple downstream natural language processing tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 O cuatro (four) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='15 O four 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Ouno (one) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='05 oF O cinco (five) O five Oone oF 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 F Otres (three) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 O three 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 O dos (two) O two 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25 0-8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 1 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 14 2 Introducing the modalities It has a very simple structure which is based on a basic feed forward neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They published multiple papers (see Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013a)], Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013c), Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013d)) that are stemming around two different but related methods for learning word embeddings (see Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Firstly, the Continuous bag-of-words model aims to predict the middle word based on surrounding context words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Hence, it considers components before and after the target word.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As the order of words in the context is not relevant, it is called a bag-of-words model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Secondly, the Continuous skip-gram model only considers the current word and predicts others within a range before and after it in the same sentence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Both of the models use a softmax classifier for the output layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5: CBOW and Skip-gram architecture (Source: Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013a)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then, Bojanowski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016) built on skip-gram models by accounting for the morphology (internal structure) of words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A different classical embedding architecture that has to be at least mentioned is the GloVe model, which does not use a neural network but incorporates local context information with global co-occurrence statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Encoder-Decoder The field of natural language processing is concerned with a variety of different tasks surrounding text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Depending on the type of NLP problem, the network may be confronted with variable length sequences as input and/or output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is the case for many compelling applications, such as question answering, dialogue systems or machine translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the following, many examples will explore machine translations in more detail, since it is a major problem domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Regarding translation tasks, it becomes obvious that input sequences need to be mapped to output sequences of different lengths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To manage this INPUT PROJECTION OUTPUT INPUT PROJECTION OUTPUT w(t-2) w(t-2) w(t-1) w(t-1) SUM w(t) w(t) w(t+1) w(t+1) w(t+2) w(t+2) CBOW Skip-gram2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP 15 type of input and output, a design with two main parts could be useful.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The first one is called the encoder because, in this part of the network, a variable length input sequence is transformed into a fixed state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Next, the second component called the decoder maps the encoded state to an output of a variable length sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a whole, it is known as an encoder-decoder or sequence-to-sequence architecture and has become an effective and standard approach for many applications which even recurrent neural networks with gated hidden units have trouble solving successfully.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Deep RNNs may have a chance, but different architectures like encoder-decoder have proven to be the most effective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It can even deal with different word orders and active, as well as passive voice (Sutskever et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A simplified example of the encoder-decoder model can be seen in 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6: Translation through simplified seq2seq model (Source: Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Before going through the equations quantifying the concepts, it makes sense to examine the sequence-to-sequence design proposed by Cho et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An encoder-RNN processes the input sequence of length nx and computes a fixed-length context vector C, which is usually the final hidden state of the encoder or a simple function of the hidden states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After the input sequence is processed, it is added to the hidden state and passed forward in time through the recurrent connections between the hidden states in the encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Despite the context vector usually being a simple function of the last hidden state, its role cannot be underestimated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Specifically, the encoded state summarizes important information from the input sequence, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the intent in a question answering task or the meaning of a text in the case of machine translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After the context is passed to every hidden state of the decoder, the decoder RNN uses this information to produce the target sequence of length ny, which can of course vary from nx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At the latest through the above illustration, it is clear that the decoder is particularly interesting to look at in the form of equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The notation mainly follows Cho et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The decoder is another type of RNN which is trained to predict the target based on the hidden state at the last time step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, unlike regular RNNs, it is also conditioned on the output of the last time step (yt−1) and a summary of the input c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, the hidden state of the decoder is computed by: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Iamastudent- Encoder Decoder >Je suis étudiant 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='216 2 Introducing the modalities FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7: Encoder-decoder architecture (Source: Cho et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' h[t] d = f(h[t−1] d , y[t−1], c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Similarly, each conditional probability is given by the following, where f is a non-linear activation function (and must produce probabilities in , e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the softmax function): P(y[t]|y[1], .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' , y[t−1], c) = f(h[t] d , y[t−1], c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The two parts are jointly trained to maximize the conditional log-likelihood, where θ denotes the set of model parameters and (xn, yn) is an (input sequence, output sequence) pair from the training set with size N: max θ 1 N N � n=1 log pθ(yn|xn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The best probability is usually found by using the beam search algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The core idea of it is that on each step of the decoder, we keep track of the k most probable partial translations (which are called hypotheses).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Examining the translation presented in with hidden units unrolled through time could look like in 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In particular, multiple hidden layers are recommended by the researchers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The idea is that lower layers compute lower-level features and higher layers compute higher-level features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Gated recurrent networks, especially long short-term memory networks, have Decoder yt y1 C X1 X2 XT Encoder2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP 17 FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8: Translation through seq2seq model (Source: Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' been found to be effective in both components of the sequence-to-sequence architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore, it was revealed that deep LSTMs significantly outperform shallow LSTMs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each additional layer reduced perplexity by nearly 10%, possibly due to their much larger hidden state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, Sutskever et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014) used deep LSTMs with 4 layers and 1000 cells at each layer for 1000-dimensional word embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thus, in total, 8000 real numbers are used to represent a sentence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For simplification, the neural networks are in the following referred to as RNNs which is not contradicting the insights of this paragraph as LSTMs are a type of gated RNNS (Sutskever et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Attention Although encoder-decoder architectures simplified dealing with variable length sequences, they also caused complications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Due to their design, the encoding of the source sentence is a single vector representation (context vector).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The problem is that this state must compress all information about the source sentence in a single vector and is commonly referred to as the bottleneck problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To be precise, the entire semantics of arbitrarily long sentences need to be wrapped into a single hidden state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover, it constitutes a different learning problem because the information needs to be passed between numerous time steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This leads to vanishing gradients within the network as a consequence of factors less than 1 multiplied with each other at every point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To illustrate, the last sentence is an ideal example of one in which an encoder-decoder approach could have difficulty coping.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In particular, if the sentences are longer than the ones in the training corpus (Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' target output words Je suisé étudiant sslayer softmaxlayer initial (zero) hidden layer 2 hidden layer 1 embeddinglayer am a student Je suis étudiant source input words target input words18 2 Introducing the modalities Due to the aforementioned reasons, an extension to the sequence-to-sequence architecture was proposed by Bahdanau et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014), which learns to align and translate jointly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For every generated word, the model scans through some positions in the source sentence where the most relevant information is located.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Afterwards, based on the context around and the previously generated words, the model predicts the target word for the current time step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This approach is called attention, as it emulates human-like (cognitive) attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a result of directly looking at the source and bypassing the bottleneck, it provides a solution to the problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then, it mitigates the vanishing gradient problem, since there is now a shortcut to faraway states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Consequently, incorporating the attention mechanism has been shown to considerably boost the performance of models on NLP tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A walkthrough of the example below should resolve any outstanding questions regarding the procedure of the attention mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The source sentence is seen on the bottom left, which is given in French and acts as the input for the encoder-RNN (in red).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then, the attention scores (in blue) are computed by taking the dot product between the previous output word and input words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Next, the softmax function turns the scores into a probability distribution (in pink).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They are used to take a weighted sum of the encoder’s hidden states and form the attention output, which mostly contains information from the hidden states that received high attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Afterwards, the attention output is concatenated with the decoder hidden state (in green), which is applied to compute the decoder output as before.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In some scenarios, the attention output is also fed into the decoder (along with the usual decoder input).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This specific example was chosen because "entarter" means "to hit someone with a pie" and is therefore a word that needs to be translated with many words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a consequence of no existing direct equivalents for this phrase, it is expected that there is not only one nearly non-zero score.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this snapshot, the attention distribution can be seen to have two significant contributors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The following equations aim to compactly represent the relations brought forward in the last paragraphs and mainly follow Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The attention scores e[t] are computed by scalarly combining the hidden state of the decoder with all of the hidden states of the encoder: e[t] = [(h[t] d )T h[1] e , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' , (h[t] d )T h[N] e ].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Besides the basic dot-product attention, there are also other ways to calculate the attention scores, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' through multiplicative or additive attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Although they will not be further discussed at this point, it makes sense to at least mention them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then, applying the softmax to the scalar scores results in the attention distribution α[t], a probability distribution whose values sum up to 1: 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP 19 FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9: Translation process with attention mechanism (Source: Man- ning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' α[t] = softmax(e[t]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Next, the attention output a[t] is obtained by the attention distribution acting as a weight for the encoder hidden states: a[t] = N � i=1 α[t] i he,i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Concatenating attention output with decoder hidden state and proceeding as in the non-attention sequence-to-sequence model are the final steps: o[t] = f(a[t]h[t] d ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' By visualizing the attention distribution, also called alignments (see Bahdanau et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014)), it is easy to observe what the decoder was focusing on and understand why it chose a specific translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The x-axis of the plot of below corresponds to the words in the source sentence (English) and the y-axis to the words in the generated translation (French).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each pixel shows the weight of the source word for the respective target word in grayscale, where 0 is black and 1 is white.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a result, which positions in the source sentence were more relevant when generating the target word becomes apparent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As expected, the alignment between English and French is largely monotonic, as the pixels are brighter, and therefore the weights are higher along the main diagonal of the matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" However, there is an exception because adjectives and nouns Attention me output distribution Attention J3 Attention scores Decoder RNN Encoder 目目目目 RNN il m' entarté he hit a Source sentence (input)20 2 Introducing the modalities are typically ordered differently between the two languages." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thus, the model (correctly) translated "European Economic Area" into "zone économique européene".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' By jumping over two words ("European" and "Economic"), it aligned "zone" with "area".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then, it looked one word back twice to perfect the phrase "zone économique européene".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additional qualitative analysis has shown that the model alignments are predominantly analogous to our intuition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='10: Attention alignments (Source: Bahdanau et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Transformer For this section, Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022) constitutes the main source.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' RNNs are unrolled from one side to the other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thus, from left to right and right to left.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This encodes linear locality, which is a useful heuristic because nearby words often affect each other’s meaning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But how is it when distant words need to interact with each other?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For instance, if we mention a person at the beginning of a text portion and refer back to them only at the very end, the whole text in between needs to be tracked back (see below).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Hence, RNNs take O(sequence length) steps for distant word pairs to interact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Due to gradient problems, it is therefore hard to learn long-distance dependencies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In addition, the linear order is ingrained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Even though, as known, the sequential structure does not tell the whole story.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GPUs can perform multiple calculations simultaneously and could help to reduce the execution time of the deep learning algorithm massively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, forward and backward passes lack parallelizability in recurrent models and have O(sequence length).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To be precise, future hidden states cannot be computed in full before past states have been computed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" This inhibits training on massive agreement uropean Economic signed August end> Area 992 e e wa S uo m 9 E S L' accord sur la zone économique europeenne e été signé en aout 1992 2." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP 21 FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='11: Sequential processing of recurrent model (Source: Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' data sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' indicates the minimum number of steps before the respective state can be calculated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='12: Sequential processing of recurrent model with number of steps indicated (Source: Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After proving that attention dramatically increases performance, google researchers took it further and based transformers solely on attention, so without any RNNs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For this reason, the paper in which they were introduced is called "Attention is all you need".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Spoiler: It is not quite all we need, but more about that on the following pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformers have achieved great results on multiple settings such as machine translation and document generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their parallelizability allows for efficient pretraining and leads them to be the standard model architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In fact, all top models on the popular aggregate benchmark GLUE are pretrained and Transformer-based.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover, they have even shown promise outside of NLP, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' in Image Classification, Protein Folding and ML for Systems (see Dosovitskiy et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020a), Jumper et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021), Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020), respectively).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' If recurrence has its flaws, another adjustment of the attention mechanism might be beneficial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Until now, it was defined from decoder to encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Alternatively, attention could also be from one state to all states in the same set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is the definition of self-attention, which is encoder-encoder or decoder-decoder attention (instead of encoder-decoder) and represents a cornerstone of the transformer architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' depicts this process in which each word attends to all words in the previous layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Even though in practice, most O(sequence length) The chef who ate1→2→3 0 1 2 h1 h2 ht22 2 Introducing the modalities arrows are omitted eventually.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='13: Connections of classic attention mechanism (Source: Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thinking of self-attention as an approximate hash table eases understanding its intuition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To look up a value, queries are compared against keys in a table.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In a hash table, which is shown on the left side of , there is exactly one key-value pair for each query (hash).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In contrast, in self-attention, each key is matched to varying degrees by each query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thus, a sum of values weighted by the query-key match is returned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='14: Comparison of classic attention mechanism with self-attention with hash tables (Source: Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The process briefly described in the last paragraph can be summarized by the following steps that mainly follow Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Firstly, deriving query, key, and value for each word xi is necessary: qi = W Qxi, ki = W Kxi, vi = W V xi Secondly, the attention scores have to be calculated: eij = qikj 2 2 2 2 2 2 2 2 attention attention embedding 0 0 0 0 0 0 0 0 h1 h, hko Vo ko Vo k1 V1 k1 V1 k2 V2 k2 V2 q k3 V3 q k3 V3 k4 V4 k4 V4 ks Vs ks Vs kg V6 k6 V6 kz V7 k7 V72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP 23 Thirdly, to normalize the attention scores, the softmax function is applied: αij = softmax(eij) = exp(eij) � k eij Lastly, taking the weighted sum of the values results in obtaining the attention output: ai = � j αijvj Multiple advantages of incorporating self-attention instead of recurrences have been revealed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Since all words interact at every layer, the maximum interaction distance is O(1) and is a crucial upgrade.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In addition, the model is deeply bidirectional because each word attends to the context in both directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a result of these advances, all word representations per layer can be computed in parallel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nevertheless, some issues have to be discussed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Attention does no more than weighted averaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So without neural networks, there are no element-wise non-linearities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their importance cannot be understated and shows why attention is not actually all that is needed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore, bidirectionality is not always desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In language modelling, the model should specifically be not allowed to simply look ahead and observe more than the objective allows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover, the word order is no longer encoder, and it is bag-of-words once again.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Fortunately, the previously mentioned weaknesses have been addressed for the original transformer-architecture proposed by Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The first problem can be easily fixed by applying a feed forward layer to the output of attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It provides non-linear activation as well as extra expressive power.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then, for cases in which bidirectionality contradicts the learning objective, future states can be masked so that attention is restricted to previous states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover, the loss of the word can be corrected by adding position representations to the inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The more complex deep learning models are, the closer they become to model the complexity of the real world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' That is why the transformer encoder and decoder consist of many layers of self-attention with a feed forward network, which is necessary to extract both syntactic and semantic features from sentences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Otherwise, using word embeddings, which are semantically deep representations between words, would be unnecessary (Sejnowski, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At the same time, training deep networks can be troublesome.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, some tricks are applied to help with the training process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One of them is to pass the "raw" embeddings directly to the next layer, which 24 2 Introducing the modalities prevents forgetting or misrepresent important information as it is passed through many layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This process is called residual connections and is also believed to smoothen the loss landscape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally, it is problematic to train the parameters of a given layer when its inputs keep shifting because of layers beneath.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Reducing uninformative variation by normalizing within each layer to mean zero and standard deviation to one weakens this effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another challenge is caused by the dot product tending to take on extreme values because of the variance scaling with increasing dimensionality dk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is solved by Scaled Dot Product Attention (see Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='15), which consists of computing the dot products of the query with its keys, dividing them by the dimension of keys √dk, and applying the softmax function next to receive the weights of the values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='15: Scaled dot-product attention (Source: Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017c)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Attention learns where to search for relevant information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Surely, attending to different types of information in a sentence at once delivers even more promising results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To implement this, the idea is to have multiple attention heads per layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While one attention head might learn to attend to tense information, another might learn to attend to relevant topics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thus, each head focuses on separate features, and construct value vectors differently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Multi-headed self-attention is implemented by simply creating n independent attention mechanisms and combining their outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At this point, every part that constitutes the encoder in the transformer architecture has been introduced (see Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First, positional encodings are included in the input embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There are multiple options to realize this step, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' through sinusoids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The multi-head attention follows, which was just mentioned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' "Add & Norm" stands for the residual connections and the normalization layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A feed forward network follows, which is also accompanied by residual connections and a normalization layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' All of it is repeated n times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the decoder, the individual components are similar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One difference is that the outputs go through masked multi-head attention before multi-head attention and the feed forward network (with residual connections and layer normalization).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is critical to ensure that the decoder cannot peek at the MatMul SoftMax 个 Mask (opt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=') 个 Scale MatMul 个个 Q K V2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP 25 FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='16: Multi-head attention (Source: Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017c)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To execute this, the set of keys and queries could be modified at every time step to only include past words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, it would be very inefficient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Instead, to enable parallelization, future states are masked by setting the attention scores to −∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After the decoder process is also repeated n times, a linear layer is added to project the embeddings into a larger vector that has the length of the vocabulary size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At last, a softmax layer generates a probability distribution over the possible words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='17: Transformer architecture (Source: Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017c)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Linear Concat 个个 Scaled Dot-Product h Attention V K QOutput Probabilities Softmax Linear Add & Norm Feed Forward Add & Norm Add & Norm Multi-Head Feed Attention Forward Nx Add & Norm Nx Add & Norm Masked Multi-Head Multi-Head Attention Attention Positional Positional Encoding + Encoding Input Output Embedding Embedding Inputs Outputs (shifted right)26 2 Introducing the modalities 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 Transformer architectures: BERT, T5, GPT-3 "You shall know a word by the company it keeps", an adage by linguist John Rupert Firth from 1957 goes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Even earlier, in 1935, he stated that ".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the complete meaning of a word is always contextual, and no study of meaning apart from a complete context can be taken seriously".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The quotes of the famous linguist sum up the motivation to learn word meaning and context perfectly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Many years later, in 2017, pretraining word embeddings started.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, some complications arise from solely pretraining the first part of the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For instance, to teach the model all contextual aspects of language, the training data for the downstream task (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' question answering) needs to be adequate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally, most of the parameters are usually randomly initialized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' presents the network discussed, in which the word "movie" gets the same embedding irrespective of the sentence it appears in.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the contrary, parameters in modern NLP architectures are initialized via pretraining (see Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='18).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore, during the pretraining, certain input parts are hidden to train the model to reconstruct them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This leads to building suitable parameter initializations and robust probability distributions over language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='18: Partly pre-trained model (Source: Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Classic machine learning does not match human learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Specifically referring to training a model from scratch, and only being able to learn from the training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In contrast, human beings already have prior knowledge they can apply to new tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transfer learning emulates this by using an already trained network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The main idea is to use a model that was pretrained on a hard, general language understanding task using endless amounts of data, so that, it eventually con- tains the best possible approximation of language understanding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Afterwards, the training data for the new task is applied to slightly modify the weights of Not pretrained pretrained (word embeddings) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='. the movie was .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='.2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP 27 FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='19: Jointly pre-trained model (Source: Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the pretrained model, which is referred to as fine-tuning (Manning et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The specific architecture of a transformer model affects the type of pre-training, and favourable use cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the following, three different but very influential transformer architectures will be discussed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' BERT can be seen as stacked encoders (Devlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018b), T5 aims to combine the good parts of encoders and decoders (Raffel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019a), while GPT are stacked decoders (Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 BERT Transfer learning led to state-of-the-art results in natural language processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One of the architectures that led the way was BERT, which stands for Bidi- rectional Encoder Representations from Transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It receives bidirectional context, which is why it is not a natural fit for language modelling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To train it on this objective regardless, masked language modelling was proposed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The main idea is to cover up a fraction of the input words and let the model predict them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this way, the LM objective can be used while sustaining connections to words in the future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The masked LM for BERT randomly predicts 15% of all word tokens in each sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Of those, 80% are replaced by the MASK token, 10% by a random token, and 10% remain unchanged.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover, because the masked words are not even seen in the fine-tuning phase, the model cannot get complacent and relies on strong representations of non-masked words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Initially, BERT had an additional objective of whether one sentence follows another, which is known as next sentence predic- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, it was dropped in later work due to having an insignificant effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Pretrained jointly .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the movie was .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='.28 2 Introducing the modalities BERT is hugely versatile and was greatly popular after its release.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Fine-tuning BERT led to outstanding results on a variety of applications, including question answering, sentiment analysis and text summarization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thanks to its design, if the task involves generating sequences, pretrained decoders outperform pretrained encoders like BERT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Even though, it would not be recommended for autoregressive generation, up to this day, "small" models like BERT are applied as general tools for numerous tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 T5 The Text-To-Text Transfer Transformer (T5) is a new model that can be regarded as an application of the insights gathered by an extensive empirical study searching for the best transfer learning techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is pretrained on Colossal Clean Crawled Corpus (C4), an open-source dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Raffel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019a) found that the best pretraining objective to use for the encoder component was span corruption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In short, different length word groups (spans) are replaced with unique placeholders, and let the model decode them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Text preprocessing is necessary for its implementation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the decoder, it is still a language modelling task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Compared to models like BERT, which can only output a span of the input or a class label, T5 reframes all NLP tasks into a unified text-to-text format, where inputs and outputs always consist of text strings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a result, the same model, loss function, and hyperparameters can be used on any NLP task, such as machine translation, document summarization, question answering, and classification tasks like sentiment analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' T5 can even be applied to regression tasks by training it to predict the string representation of a number (and not the number itself).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Examples of potential use cases are depicted in below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='20: Applications of T5 model (Source: Raffel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019a)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 GPT-3 As previously stated, the neural architecture influences the type of pretraining.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The original GPT architecture consists of a Transformer decoder with 12 layers (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For decoders, it is sensible to simply pretrain them as language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Afterwards, they can be used as generators to fine-tune their probability of predicting the next word conditioned on the previous words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' translate English to German: That is good.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' "cola sentence: The "Das ist gut.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' course is jumping well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" T5 'not acceptable' 'stsb sentence1: The rhino grazed on the grass." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' sentence2: A rhino is grazing in a field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" "3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8" "summarize: state authorities "six people hospitalized after dispatched emergency crews tuesday to a storm in attala county.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' survey the damage after an onslaught of severe weather in mississippi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP 29 The models are suitable for tasks similar to the training, including any type of dialogue and document summarization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformer language models are great for transfer learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They are fine-tuned by randomly initializing a softmax classifier on top of the pretrained model and training both (with only a very small learning rate and a small number of epochs) so that the gradient propagates through the whole network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The success of BERT in 2018 prompted a "gold rush" in NLP, in which ever greater language models were created.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One that topped the headlines and used a customer supercluster for computation was the third iteration of the GPT architecture by OpenAI, known as GPT-3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' reveals why GPT-3 is a famous example of current research focusing on scaling up neural language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While the largest T5 model has 11 billion parameters, GPT-3 has 175 billion parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover, the training data set contains around 500 billion tokens of text, while the average young american child hears around 6 million words per year (Hart and Risley, 1995).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The results of huge language models suggest that they perform some form of learning (without gradient steps) simply from examples provided via context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The tasks are specified by the in-context examples, and the conditional probability distribution simulates performing the task to an extent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='21: Comparison of number of parameters between Transformer- architectures (Source: Saifee (2020)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 Current Topics 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Concerns regarding growing size of Language Models As the last chapter ended with GPT-3 and emphasized the concerning trend of ever larger language models, one could ask which other costs arise from the developments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Risks and harms among environmental and financial costs have been studied by Bender et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They state that marginalized 180Chart Area 160 140 Parameters 120 100 80 60 # 40 20 0 BERT RoBERTa GPT-2 T5 Turing NLG GPT-3 Model30 2 Introducing the modalities communities are not only less likely to benefit from LM progress, but also more likely to suffer from the environmental repercussions of increasing resource consumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Strubell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019a) estimated that training a Transformer (big) model resulted in 249t of CO2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To compare, an average human is responsible for approximately 5t of CO2 per year (Ritchie et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In addition, they discovered that an estimated increase of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 in BLEU score increased computation costs by $ 150,000 (for English to German translations).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore, larger models require more data to sufficiently train them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This has resulted in large but poorly documented training data sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Multiple risks can be mitigated if there is a common understanding of the model’s learnings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover, it has been argued that datasets consisting of web data over-represent hegemonic views and encode bias towards marginalized communities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is among other factors due to internet access being unevenly distributed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In particular, there is an over-representation of younger internet users and those from developed countries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is generally naive to educate AI systems on all aspects of the complex world, and hope for the beautiful to prevail (Bender et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Improving Understanding of Transformer-based models The results of transformer-based models clearly show that they deliver successful results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, it is less clear why.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The size of the models makes it difficult to experiment with them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nevertheless, having a limited understanding restrains researchers from coming up with further improvements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, multiple papers analysed BERT’s attention in search of an improved understanding of large transformer models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' BERT is a smaller model out of the more popular ones, and its attention is naturally interpretable because the attention weight indicates how significant a word is for the next representation of the current word (Clark et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the following, some of the findings are going to be shared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' BERT representations are rather hierarchical than linear, and they include information about parts of speech, syntactic chunks and roles (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019, Liu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019a)) Furthermore, it has semantic knowledge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, BERT can recognize e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' that "to tip a chef" is better than "to tip a robin" but worse than "to tip a waiter" ((Ettinger, 2019)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, it makes sense that BERT has issues with knowledge that is assumed and not mentioned, which especially refers to visual and perceptual properties (Da and Kasai, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally, BERT struggles with inferences, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' even though it is known that "people walk into houses" and "houses are big", it cannot infer that "houses are bigger than people" (Forbes et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While it is true that different transformer heads attend to various patterns (see 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 State-of-the-art in NLP 31 ), interestingly, most of them could be neglected without notable performance loss (Voita et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Probing attention maps can be tedious, but allows to gain knowledge of common patterns, such as an unexpected amount focusing on the delimiter token SEP .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='22: Common patterns of attention heads (Source: Clark et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Few-Shot Learning For NLP tasks, the model is usually trained on a set of labelled examples and is expected to generalize to unseen data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Annotating is not only costly but also difficult to gather for numerous languages, domains, and tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In practice, there is often only a very limited amount of labelled examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Consequently, few-shot learning is a highly relevant research area (Schick and Schütze, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It defines a model that is trained on a limited number of demonstrations to guide its predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Referring back to , the benefits of lower computational and environmental costs have to be mentioned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Traditional fine-tuning uses a large corpus of example tasks, and the model is updated repeatedly with gradient steps so that it adapts to the task with minimal accuracy error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In contrast, few-shot applications have to complete tasks at test time with only forward passes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They have three main parts: the task description, examples, and the prompt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In Figure ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', the task is a translation from English to French, a few examples, as well as the word that should be translated are given.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover, zero-shot and one-shot learning refer to the model predicting with no and one learned example, respectively (Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is complicated to create the few-shot examples, since the application relies on them to express the task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is why smaller models are susceptible to Head 1-1 Head 3-1 Head 8-7 Head 11-6 Attends broadly Attends to next token Attends to [SEP] Attends to periods .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='. .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='. found found found found found found found found in in in- in in in in in taiwan taiwan taiwan- taiwan taiwan taiwan taiwan taiwan [SEP] [SEP] [SEP] [SEP] [SEP] [SEP] [SEP] [SEP] the the the the the the the the wingspan >wingspan wingspan- wingspan wingspan wingspan wingspan wingspan is is is- is is is is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' is 24 24 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 24 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 24 24 24 28 28 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 28 28 28 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 28 mm mm mm: mm mm mm mm mm [SEP] [SEP] [SEP] [SEP] [SEP] [SEP] [SEP] [SEP]32 2 Introducing the modalities FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='23: Few-shot learning (Source: Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' examples written unfavourably.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020), it was shown that few-shot performance scales with the number of model parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Even though GPT-3’s in-context learning improved few-shot prompting capabilities, it is still sensitive to the order of training examples, decoding strategy, and hyperparameter selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' All of this combined with the fact that current research uses larger or held-out data sets leads to the suspicion that the true few-shot ability of language models is overestimated (Perez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover, Lialin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022) have found that common transformer models could not resolve compositional questions in a zero-shot fashion and that the model’s parameter count does not correlate with performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This indicates a limitation for zero-shot prompting with the existing pre-training objectives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, different models provided the best accuracy with regard to different symbolic reasoning tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This suggests that optimization or masking strate- gies could be more significant than the pre-training, data set size or model architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 Summary Natural Language Processing has been one of the most exciting fields of machine learning in the last decade considering all the breakthroughs discussed in this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Word embeddings made it possible and allowed developers to encode words as dense vectors that capture their underlying semantic content.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this way, similar words are embedded close to each other in a lower-dimensional feature space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another important challenge was solved by encoder-decoder (also called sequence-to-sequence) architectures, which made it possible to map input sequences to output sequences of different lengths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They are especially useful for complex tasks like machine translation, video captioning or question answering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A significant state-of-the-art technique is attention, which enabled models to actively shift their focus – just like humans do.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It allows following one thought at a time while suppressing information irrelevant to the task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a consequence, it has been shown to significantly improve performance for tasks like machine translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' By giving the decoder access to directly look at the source, the bottleneck is avoided and at the same time, it provides a shortcut to faraway states and thus helps with the vanishing gradient problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Translate English to French: task description sea otter => loutre de mer examples 3 peppermint => menthe poivrée plush girafe => girafe peluche 5 cheese => prompt2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 State-of-the-art in Computer Vision 33 One of the most recent data modelling techniques is the transformer, which is solely based on attention and does not have to process the input data sequentially.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, the deep learning model is better in remembering context-induced earlier in long sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is the dominant paradigm in NLP currently and makes better use of GPUs because it can perform parallel operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformer architectures like BERT, T5 or GPT-3 are pre-trained on a large corpus and can be fine-tuned for specific language tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They can generate stories, poems, code and much more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Currently, there seems to be breaking transformer news nearly every week with no sign of slowing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is why many trends could be recognized as relevant current topics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One of them is increasing concerns regarding the growing size of language models and the correlated environmental and financial costs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another active research aspect is concerned with improving the understanding of transformer-based models to further advance them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally, there are many studies about achieving respectable results on language modelling tasks after only learning from a few examples, which is known as few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 State-of-the-art in Computer Vision Author: Vladana Djakovic Supervisor: Daniel Schalk 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 History The first research about visual perception comes from neurophysiological research performed in the 1950s and 1960s on cats.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The researchers used cats as a model to understand how human vision is compounded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Scientists concluded that human vision is hierarchical and neurons detect simple features like edges followed by more complex features like shapes and even more complex visual representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Inspired by this knowledge, computer scientists focused on recreating human neurological structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At around the same time, as computers became more advanced, computer scientists worked on imitating human neurons’ behavior and simulating a hypothetical neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In his book “The Organization of Behaviour” (1949) Donald Hebbian stated that neural pathways strengthen over each successive use, especially between neurons that tend to fire at the same time, thus beginning the long journey towards quantifying the complex processes of the brain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The first Hebbian network, inspired by this neurological research, was successfully implemented at MIT in 1954 (Jaspreet, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' New findings led to the establishment of the field of artificial intelligence in 34 2 Introducing the modalities 1956 on-campus at Dartmouth College.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Scientists began to develop ideas and research how to create techniques that would imitate the human eye.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In 1959 early research on developing neural networks was performed at Stanford University, where models called “ADALINE” and “MADALINE,” (Multiple ADAptive LINear Elements) were developed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Those models aimed to recognize binary patterns and could predict the next bit (his, 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Starting optimism about Computer Vision and neural networks disappeared after 1969 and the publication of the book “Perceptrons” by Marvin Minsky, founder of the MIT AI Lab, stated that the single perception approach to neural networks could not be translated effectively into multi-layered neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The period that followed was known as AI Winter, which lasted until 2010, when the technological development of computer and the internet became widely used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In 2012 breakthroughs in Computer Vision happened at the ImageNet Large Scale Visual Recognition Challenge (ILSVEC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The team from the University of Toronto issued a deep neural network called AlexNet (Krizhevsky et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2012a) that changed the field of artificial intelligent and Computer Vision (CV).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' AlexNet achieved an error rate of 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' From then until today, Computer Vision has been one of the fastest developing fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Researchers are competing to develop a model that would be the most similar to the human eye and help humans in their everyday life.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this chapter the author will describe only a few recent state-of-the-art models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Supervised and unsupervised learning As part of artificial intelligence (AI) and machine learning (ML), there are two basic approaches: supervised learning;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' unsupervised learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Supervised learning (Education, 2020a) is used to train algorithms on labeled datasets that accurately classify data or predict outcomes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With labeled data, the model can measure its accuracy and learn over time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Among others, we can distinguish between two common supervised learning problems: classification, regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In unsupervised learning (Education, 2020b), unlabelled datasets are analyzed and clustered using machine learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These algorithms aim to dis- cover hidden patterns or data groupings without previous human intervention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The ability to find similarities and differences in information is mainly used for three main tasks: clustering, association, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 State-of-the-art in Computer Vision 35 dimensionality reduction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Solving the problems where the dataset can be both labeled and unlabeled requires a semi-supervised approach that lies between supervised and unsuper- vised learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is useful when extracting relevant features from complex and high volume data, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', medical images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nowadays, a new research topic appeared in the machine learning community, Self-Supervised Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Self-Supervised learning is a process where the model trains itself to learn one part of the input from another (techslang, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a subset of unsupervised learning, it involves machines labeling, categorizing, and analyzing information independently and drawing conclusions based on connections and correlations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It can also be considered as an autonomous form of supervised learning since it does not require human input to label data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Unlike unsupervised learning, self-supervised learning does not focus on clustering nor grouping (Shah, 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One part of Self-Supervised learning is contrastive learning, which is used to learn the general features of an unlabeled dataset identifying similar and dissimilar data points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is utilized to train the model to learn about our data without any annotations or labels (Tiu, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Scaling networks Ever since the introduction of AlexNet in 2012, the problem of scaling convo- lutional neural networks (ConvNet) has become the topic of active research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ConvNet can be scaled in all three dimensions: depth, width, or image size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One of the first researches in 2015 showed that network depth is crucial for image classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The question whether stacking more layers enables the network to learn better leads to deep residual networks called ResNet (He et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015), which will be described in this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Later on, scaling networks by their depth became the most popular way to improve their performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The second solution was to scale ConvNets by their width.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Wider networks tend to be able to capture more fine-grained features and are easier to train (Zagoruyko and Komodakis, 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Lastly, scaling the image’s resolution can improve the network’s performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With higher resolution input images, ConvNets could capture more fine-grained patterns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GPipe (Huang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018) is one of the most famous networks created by this technique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The question of possibility of scaling by all three dimensions was answered by Tan and Le (2019a) in the work presenting Efficient Net.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This network was built by scaling up ConvNets by all three dimensions and will also be described here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Deep residual networks The deep residual networks, called ResNets (He et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015), were presented as the answer on the question whether stacking more layers would enable network to learn better.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Until then one obstacle for simply stacking layers was the problem of vanishing/exploding gradients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It has been primarily addressed by 36 2 Introducing the modalities normalized initialization and intermediate normalization layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' That enabled networks with tens of layers to start converging for stochastic gradient descent (SGD) with backpropagation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another obstacle was a degradation problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It occurs when the network depth increases, followed by saturating and then rapidly decreasing accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Overfitting is not caused by such degradation, and adding more layers to a suitably deep model leads to higher training error, which indicates that not all systems are similarly easy to optimize.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, it was suggested to consider a shallower architecture and its deeper counterpart that adds more layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One way to avoid the degradation problem is to create a deeper model, where the auxiliary layers are identity mappings and other layers are copied from a shallower model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The deeper model should produce no higher training error than its shallower counterpart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, in practice it is not the case and it is hard to find comparably good constructs or better solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The solution to this degradation problem proposed by them is a deep residual learning framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Deep Residual Learning 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Residual Learning The idea of residual learning is to replace the approximation of underlying mapping H (x), which is approximated by a few stacked layers (not necessarily the entire net), with an approximation of residual function F(x) := H (x) − x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Here x denotes the inputs to the first of these layers, and it is assumed that both inputs and outputs have the same dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The original function changes its form F (x) + x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A counter-intuitive phenomenon about degradation motivated this reformula- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The new deeper model should not have a more significant training error when compared to a construction using identity mappings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, due to the degradation problem, solvers may have challenges approximating identity mappings by multiple non-linear layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Using the residual learning reformu- lation can drive the weights of the non-linear layers toward zero to approach identity mappings if they are optimal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Generally, identity mappings are not optimal, but new reformulations may help to pre-condition the problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' When an optimal function is closer to an identity mapping than a zero mapping, finding perturbations concerning an identity mapping should be easier than learning the function from scratch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Identity Mapping by Shortcuts Residual learning is adopted to every few stacked layers where a building block is defined: y = F (x, {Wi}) + x (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 State-of-the-art in Computer Vision 37 x and y present the input and output vectors of the layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='24 visualizes the building block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='24: Building block of residual learning (He et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The function F (x, {Wi}) represents the residual mapping that is to be learned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the example with two layers from Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='24, F = W2σ (W1x) in which σ denotes the ReLU activation function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Biases are left out to simplify the notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The operation F + x is conducted with a shortcut connection and element-wise addition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Afterward, a second non-linear (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', σ (y) transformation is applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The shortcut connections in Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1) neither adds an extra parameter nor increases computation complexity and enables a comparisons between plain and residual networks that concurrently have the same number of parameters, depth, width, and computational cost (except for the negligible element-wise addition).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The dimensions of x and F in Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1) must be equal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Alternatively, to match the dimensions, linear projection Ws by the shortcut connections can be applied: y = F (x, {Wi}) + Wsx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2) The square matrix Ws can be used in Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, experiments showed that identity mapping is enough to solve the degradation problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, Ws only aims to match the dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Although more levels are possible, it was experimented with function F having two or three layers without stating the exact form of it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Assuming F only has one layer (Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1)) it is comparable to a linear layer: y = W1x+x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The theoretical notations are about fully-connected layers, but convolutional layers were used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The function F (x, {Wi}) can be applied to represent multiple convolutional layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Two feature maps are added element-wise, channel by channel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Network Architectures Various plain/residual networks were tested to construct an efficient residual network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They trained the network on benchmarked datasets, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the ImageNet dataset, that are used for a comparison of network architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2) shows that every residual network needs a plain baseline network inspired by the VGG (Simonyan and Zisserman, 2014) network on which identity mapping by shortcuts is applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' x weight layer F(x) I relu x weight layer identity F(x) +x +relu38 2 Introducing the modalities Plain Network: The philosophy of VGG nets 41 mainly inspires the plain baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Two rules convolution layers, which usually have 3 × 3 filters, follow are: feature maps with the same output size have the same number of layers;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' reducing the size of a feature map by half doubles the number of filters per layer to maintain time complexity per layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Convolutional layers with a stride of 2 perform downsampling directly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A global average pooling layer and a 1000-way fully-connected layer with softmax are at the end of the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The number of weighted layers sums up to 34 (Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25, middle).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Compared to VGG nets, this model has fewer filters and lower complexity (Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25, left).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Residual Network: Based on the above plain network, additional shortcut connections (Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25, right) turn the network into its associate residual variant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The identity shortcuts (Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1)) can be directly used in the case of the exact dimensions of the input and output (solid line shortcuts in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the different dimensions (dotted line shortcuts in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25), two options are considered: The shortcut still performs identity mapping, but with extra zero entries padded to cope with the increasing dimensions, without adding new parame- ters;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The projection shortcut in Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2) matches dimensions (due to 1 × 1 convolutions).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In both cases, shortcuts will be done with a stride of two when they go across feature maps of two sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 EfficientNet Until Tan and Le (2019b) introduced EfficientNet, it was popular to scale only one of the three dimensions – depth, width, or image size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The empirical study shows that it is critical to balance all network dimensions, which can be achieved by simply scaling each with a constant ratio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Based on this observation, a simple yet effective compound scaling method was proposed, which uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, if 2N times more computational resources are available, increasing the network depth by αN, width by βN, and image size by γN would be possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Here α, β, γ are constant coefficients determined by a small grid search on the original miniature model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='26 illustrates the difference between this scaling method and conventional methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A compound scaling method makes sense if an input image is bigger because a larger receptive field requires more layers and more significant channel features to capture fine-grained patterns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Theoretically and empirically, there has been a special relationship between 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 State-of-the-art in Computer Vision 39 FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25: Architecture of ResNet (He et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VGG-19 34-layer plain 34-layer residual image image andano size: 224 3x3 conv, 64 3x3 conv, 64 output pool.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='/2 size: 112 3x3 conv, 128 3x3 conv, 128 7x7 conv, 64, /2 7x7 conv, 64, /2 pool, /2 pool, /2 pool.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' /2 output size: 56 3x3 conv, 256 3x3 conv, 64 3x3 conv, 64 4 3x3 conv, 256 3x3 conv, 64 3x3 conv, 64 3x3 conv, 256 3x3 conv, 64 3x3 conv, 64 3x3 conv, 256 3x3 conv, 64 3x3 conv, 64 3x3 conv, 64 3x3 conv, 64 3x3 conv, 64 3x3 conv, 64 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' pool, /2 andno 3x3 conv, 128, /2 3x3 conv, 128, /2 4 size: 28 3x3 conv, 512 3x3 conv, 128 3x3 conv, 128 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 + 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 4 andno size: 14 pool,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' /2 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' /2 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' /2 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 4 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 本 本 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 4 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 4 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 4 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 4 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 256 output 4 size: 7 pool,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' /2 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' /2 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' /2 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 3x3 conv,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 512 4 andno size: 1 fc 4096 avg pool fc 4096 fc 1000 fc 1000 fc 100040 2 Introducing the modalities network width and depth (Raghu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Existing MobileNets (Howard et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017) and ResNets are used to demonstrated new scaling methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='26: Model scaling (Tan and Le, 2019b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Compound Model Scaling 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Problem Formulation A function Yi = Fi (Xi) with the operator Fi, output tensor Yi, input tensor Xi of shape (Hi, Wi, Ci), spatial dimensions Hi, Wi, and channel dimension Ci is called a ConvNet Layer i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A ConvNet N appears as a list of composing layers: N = F∥ ⊙ · · · F∈ ⊙ F∞ (X1) = � j = 1 · · · kF| (X1) Effectively, these layers are often partitioned into multiple stages and all layers in each stage share the same architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, ResNet has five stages with all layers in every stage being the same convolutional type except for the first layer that performs down-sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, a ConvNet can be defined as: N = � i=1···s F⟩ Li � X(Hi,Wi,Ci) � where F⟩ Li denotes layer F⟩ which is repeated Li times in stage i, and (Hi, Wi, Ci) is the shape of input tensor X of layer i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In comparison to the regular ConvNet focusing on the best layer architecture search F⟩, model scaling centers on the expansion of the network length (Li), width (Ci), and/or resolution (Hi, Wi) without changing F⟩ that was predefined in the baseline network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Although model scaling simplifies the design problem of the new resource constraints through fixing F⟩, a different large design space (Li, Hi, Wi, Ci) for each layer remains to be explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To further reduce the design space, all layers are restricted to be scaled uniformly with a constant wider #channels wider deeper deeper layer_i higher higher 7 resolution Hxw resolution .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='resolution (b) width scaling (a) baseline (c) depth scaling (d) resolution scaling (e) compound scaling2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 State-of-the-art in Computer Vision 41 ratio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this case, the goal is to maximize the model’s accuracy for any given resource constraint, which is presented as an optimization problem: max d,w,rAccuracy (N (d, w, r)) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='N (d, w, r) = � I=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='s ˆFid· ˆ Li � X⟨r· ˆ Hi,r· ˆ Wi,w· ˆ Ci⟩ � Memory (N) ≤ targetMemory FLOPS (N) ≤ targetFlops where w, d, r are coefficients for scaling network width, depth, and resolution;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' � �Fi, �Li, �Hi, � Wi, �Ci � are predefined parameters of the baseline network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Scaling Dimensions The main difficulty of this optimization problem is that the optimal d, w, r depend on each other and the values are changing under different resource constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Due to this difficulty, conventional methods mostly scale ConvNets in one of these dimensions: Depth (d): One of the most significant networks previously described is the ResNet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As it was described, the problem of ResNets is that accuracy gain of a very deep network diminishes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, ResNet-1000 has similar accuracy to ResNet-101 even though it contains many more layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Width (w): Scaling network width is commonly used for small-sized models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, wide but shallow networks tend to have difficulty grasping higher-level features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Resolution (r): Starting from 224×224 in early ConvNets, modern ConvNets tend to use 299 × 299 or 331 × 331 for better accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GPipe (Huang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018) recently achieved state-of-the-art ImageNet accuracy with 480 × 480 resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Higher resolutions, such as 600 × 600, are also widely used in ConvNets for object detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The above analyses lead to the first observation: Observation 1: Scaling up any network width, depth, or resolution dimension improves accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Without the upscaling, the gain diminishes for bigger models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Compound Scaling Firstly, it was observed that different scaling dimensions are not independent because higher resolution images also require to increase the network depth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The larger receptive fields can help capture similar features that include more pixels in bigger images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Similarly, network width should be increased when the resolution is higher to capture more fine-grained patterns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The intuition 42 2 Introducing the modalities suggests that different scaling dimensions should be coordinated and balanced rather than conventional scaling in single dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To confirm this thought, results of networks with width w without changing depth (d=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0) and resolution (r=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0) were compared with deeper (d=2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0) and higher resolution (r=2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0) networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This showed that width scaling achieves much better accuracy under the same FLOPS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These results lead to the second observation: Observation 2: To achieve better accuracy and efficiency, balancing the network width, depth, and resolution dimensions during ConvNet scaling is critical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Earlier researches have tried to arbitrarily balance network width and depth, but they all require tedious manual tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A new compound scaling method, which uses a compound coefficient ϕ to uniformly scale network width, depth, and resolution in a principled way was proposed: depth:⌈ = αϕ width:⊒ = βϕ resolution:∇ = γϕ s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='α · β2 · γ2 ≈ 2 α ≥ 1, β ≥ 1, γ ≥ 1 (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3) where α, β, γ are constants that can be determined by a small grid search, ϕ is a user-specified coefficient that controls how many more resources are available for model scaling, while α, β, γ specify how to assign these extra resources to the network width, depth, and resolution, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Notably, the FLOPS of a regular convolution operation is proportional to d, w2, r2, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', doubling network depth will double the FLOPS, but doubling network width or resolution will increase the FLOPS by four times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Scaling a ConvNet following Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3) will approximately increase the total number of FLOPS by � α · β2 · γ2�ϕ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this chapter, α · β2 · γ2 ≈ 2 is constrained such that for any new ϕ the total number of FLOPS will approximately increase by 2ϕ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 EfficientNet Architecture A good baseline network is essential because model scaling does not affect its layer operators F ∗ [i].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore this method is also estimated on ConvNets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A new mobile-sized baseline called EfficientNet was developed to show the effectiveness of the new scaling method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Metrics that were used to estimate the efficacy are accuracy and FLOPS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The baseline efficient network that was created is named EfficientNet-B0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Afterwards, this compound scaling method is applied in two steps: STEP 1: By fixing ϕ = 1 and, assuming twice more resources available, a small grid search of α, β, γ based on Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3) showed that the best 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 State-of-the-art in Computer Vision 43 values for EfficientNet-B0 are α = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2, β = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1, γ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='15 under the constraint of α · β2 · γ2 ≈ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' STEP 2: Afterwards, fix α, β, γ as constants and scale up the baseline network with different ϕ using Equation (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3) to construct EfficientNet-B1 to B7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Name Number of parameters EfficientNet-B0 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3M parameters EfficientNet-B1 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8M parameters EfficientNet-B2 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2M parameters EfficientNet-B3 12M parameters EfficientNet-B4 19M parameters EfficientNet-B5 30M parameters EfficientNet-B6 43M parameters EfficientNet-B7 66M parameters Indeed, even better performance is achievable by searching for α, β, γ directly around a large model, but the search cost becomes prohibitively more expensive on larger models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This method searches once on a small baseline network, then scales the coefficient for all other models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Results and comparison of the networks To demonstrate the performance of both networks, ResNet and EfficientNets were trained and evaluated on the ImageNet 2012 classification dataset con- sisting out of 1000 classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Since deeper scaling should provide better results in the case of ResNet, it was trained with increased depth each time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First meaningful results were obtained in ResNet-34, which performed 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 % better than plain-34 baseline when top-1 accuracy is compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They also compared three versions of ResNet: (A) zero-padding shortcuts (increasing dimensions, all shortcuts are parameter-free) (B) projection shortcuts (increasing dimen- sions, other shortcuts are identity), and (C) all shortcuts are projections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each version improved both, the top-1 and top-5 accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Afterward, the depth of the network was increased and ResNet-50, ResNet-101, and ResNet-152 were created.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each increase in depth leads to higher accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In deeper models, the trade-off between accuracy increase and deeper model is not worth describing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' All results are shown in the following table: Model top-1 acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' top-5 acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VGG-16 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='93 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='67 GoogLeNet 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='85 plain-34 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='46 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='98 ResNet-34 A 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='97 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='24 44 2 Introducing the modalities Model top-1 acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' top-5 acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ResNet-34 B 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='48 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='54 ResNet-34 C 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='81 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 ResNet-50 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='15 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='29 ResNet-101 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='95 ResNet-152 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='57 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='29 In the case of EfficientNets, the results achieved by the previous state-of-the-art networks on the same ImageNet dataset were aimed to improve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Among all state- of-the-art networks, EfficientNets were compared with ResNets-50 and ResNet- 152.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They compared the results of networks deviated by changing scaling parameters EfficientNet-B0 to EfficientNet-B7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The results of each network were better than the previous one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also, they have shown that EfficientNet-B0 outperforms ResNet-50 and that EfficientNet-B1 outperforms ResNet-152.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This means that scaling through all three dimensions can provide better results than scaling through just one dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The drawback of this approach is the computational power which makes it less popular than the previous methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Again, all results are shown in the following table: Model top-1 acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' top-5 acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' EfficientNet-B0 / ResNet-50 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 / 76 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 / 93 EfficientNet-B1 / ResNet-152 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 / 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 / 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 EfficientNet-B2 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 EfficientNet-B3 / ResNeXt-101 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 / 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 / 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 EfficientNet-B4 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 EfficientNet-B5 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 EfficientNet-B6 84 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 EfficientNet-B7 / GPipe 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 / 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 97 / 97 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 Contrastive learning In recent years the problem of classification of unlabeled dataset is becoming more widespread.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' More unlabeled datasets requiring human labeling are created in fields like medicine, the automotive industry, military, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Since the process is expensive and time-consuming, researchers assumed it could be automated with contrastive learning frameworks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One of the first and most known contrastive learning frameworks is SimCLR (Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The advantage of this framework is its simplicity, yet it achieves high accuracy on classification tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The main idea is to have two copies of the image, which are then used to train two networks and that are compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The problem with this framework is that it doubles the size of the dataset and reaches among all images, which can be computationally infeasible for large datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Bootstrap Your Own Latent 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 State-of-the-art in Computer Vision 45 (Grill et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020b) was introduced to avoid making double-sized datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The idea was to bootstrap image representations to avoid unnecessary image comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These two frameworks will be described in this chapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Further improvements in the choice of creating two views of images and comparison techniques were presented in different frameworks such as Nearest- Neighbor Contrastive Learning (NNCLR) (Dwibedi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021), Open World Object Detection (ORE) (Joseph et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021), Swapping Assignments between multiple Views (SwAV) {Caron et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020)}, and many more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This field is a constant research topic and new improved frameworks are proposed on a constant basis to help researchers solve different tasks that requires labeled datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 A Simple Framework for Contrastive Learning of Visual Representations Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020a) intended to analyze and describe a better approach to learning visual representations without human supervision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They have introduced a simple framework for contrastive learning of visual representations called SimCLR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As they claim, SimCLR outperforms previous work, is more straightforward, and does not require a memory bank.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Intending to understand what qualifies good contrastive representation learning, the significant components of the framework were studied and resulted in: A contrastive prediction task requires combining multiple data augmen- tation operations, which results in effective representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Unsupervised contrastive learning benefits from more significant data augmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The quality of the learned representations can be substantially improved by introducing a learn-able non-linear transformation between the representation and the contrastive loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Representation learning with contrastive cross-entropy loss can be improved by normalizing embeddings and adjusting the temperature parameter appro- priately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Unlike its supervised counterpart, contrastive learning benefits from larger batch sizes and extended training periods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Contrastive learning also benefits from deeper and broader networks, just as supervised learning does.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 The Contrastive Learning Framework Like for SimCLR, a contrastive loss is used to learn a representation by maximizing the agreement between various augmented views of the same data example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This framework contains four significant components, which are shown in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='27: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A stochastic data augmentation module 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A neural network base encoder 46 2 Introducing the modalities 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A small neural network projection head 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A contrastive loss function FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='27: A simple framework for contrastive learning of visual repre- sentations (Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Stochastic data augmentation module First, the minibatch of N examples is sampled randomly, and the contrastive prediction task is defined on pairs of augmented examples, resulting in 2N data points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A memory bank was not used to train the model, instead, the training batch size varies from 256 to 8192.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Any given data example randomly returns two correlated views of the same example, denoted ˜xi and ˜xj, which is known as a positive pair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Negative pairs are all other 2(N − 1) pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In one view, some data augmentation techniques are applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Data augmentation is widely embraced in supervised and unsupervised representation learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Unfortunately, it has not been used to define the contrastive prediction task, which is mainly determined by changing the architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It was shown that choosing different data augmentation techniques can reduce the complexity of previous contrastive learning frameworks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There are many data augmentation operations, the focus was on the most common ones, which are: Spatial geometric transformation: cropping and resizing (with horizontal flipping), rotation and cutout, Appearance transformation: color distortion (including color dropping), brightness, contrast, saturation, Gaussian blur, and Sobel filtering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='28: Augmentation texhniques (Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Maximize agreement Zi g() g(-) hi ← Representation → hi f(.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=') f(.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=') ci i(a) Original (b) Crop and resize ( (c) Crop, resize (and flip) (d) Color distort.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (drop) (e) Color distort.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (jitter (f)Rotate (90°,180°,270°) (g) Cutout (h) Gaussian noise (i) Gaussian blur (i) Sobel filtering2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 State-of-the-art in Computer Vision 47 Due to the image sizes in the ImageNet dataset, all images were always ran- domly cropped and resized to the same resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Later on, other targeted data augmentation transformations were applied to one branch, remaining the one as original i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' t (xi) = xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Applying just individual transformation is insuf- ficient for the model to learn good representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The model’s performance improves after composing augmentations, although the contrastive prediction task becomes more complex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The composition of augmentations that stood out were random cropping and random color distortion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It was also observed that stronger color augmentation significantly improves the linear evaluation of unsupervised learned models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Stronger color augmen- tations do not enhance the performance of supervised learning models when trained with the same augmentations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Based on the experiments, unsuper- vised contrastive learning benefits from stronger color data augmentation than supervised learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Neural network base encoder Neural network based encoder f (·) extracts multiple representation vectors from the augmented data examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This framework does not restrict a choice of the network architecture, although for simplicity, the commonly used ResNet was picked and gives hi = f (˜xi) = ResNet (˜xi) where hi ∈ Rd is the output after the average pooling layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Although increasing depth and width improves performance, the ResNet-50 was chosen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore, when the model size increases, the gap between supervised and unsupervised learning shrinks, suggesting that bigger models benefit more from unsupervised learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Small neural network projection head A small neural network projection head g (·) maps the representation to the space where the contrastive loss is applied to.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The importance of including a projection head, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', g (h) was evaluated and they considered three different architectures for the head: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' identity mapping, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' linear projection, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the default non-linear projection with one additional hidden layer and ReLU activation function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The results showed that a non-linear projection head is better than a linear projection and much better than no projection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It improves the representation quality of the layer that is applied previous to it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They have used a MLP with one hidden layer to obtain zi = g (hi) = W (2)σ � W (1)hi � where σ is a ReLU non-linearity transformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This step is performed because defining the contrastive loss on zi instead of on hi would not lead to a loss of information caused by contrastive loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Especially, 48 2 Introducing the modalities z = g (h) is trained to be invariant to data transformations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a result, g can remove information useful for a downstream task such as object color or orientation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Using the non-linear transformation g (∗), h can maintain and form more information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Contrastive loss function Given a set {˜xik} including a positive pair of examples ˜xi and ˜xj, the contrastive prediction task aims to identify ˜xi in {˜xi}k̸=i for a given ˜xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the case of positive examples, the loss function is defined as ↕i,j = − log exp � sim(zi,zj) τ � �2N k=1 I[k̸=i] exp � sim(zi,zk) τ � where I[k̸=i] ∈ {0, 1} is an indicator function, τ denotes a temperature pa- rameter and sim (u,v) = uT v ∥u∥∥v∥ is a dot product between ↕2 and normalized u, v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The final loss is calculated across all positive pairs, both (i, j) and (j, i), in a mini-batch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It was named NT-Xent, the normalized temperature-scaled cross-entropy loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The NT-Xent loss was compared against other commonly used contrastive loss functions, such as logistic loss and margin loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Gradient analysis shows that l2 normalization, cosine similarity, and temperature together effectively weight different examples and a suitable temperature can make the model learn from hard negatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The advantage of NT-Xent is that it weights the negatives by their relative hardness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Without normalization and proper temperature scaling the performance is significantly worse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also, the contrastive task accuracy is higher, but the resulting representation is worse under linear evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Bootstrap Your Own Latent The fundamental idea of contrastive learning is to create pairs of images on which the framework would be trained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Creating negative pairs relies on large batch sizes, memory banks, or customized mining strategies which can be challenging in larger datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Grill et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020b) wanted to create a new approach that would achieve better performance than other contrastive methods without using negative pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A solution they have introduced is a method called Bootstrap Your Own Latent (BYOL).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The idea was to bootstrap representations of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a result, BYOL is more robust to the choice of image augmentations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore, BYOL has two neural networks, called online and target network, who interact and learn from each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Using an augmented view of an image, BYOL trains its online network to predict the target network’s representation of another augmented view.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This approach achieved state-of-the-art results when trained on the ImageNet dataset under 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 State-of-the-art in Computer Vision 49 the linear evaluation protocol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally, compared to SimCLR, a strong contrastive baseline, BYOL suffers from much less performance drop when only random crops are used to augment images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Description of the method BYOL aims to learn a representation of yθ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It uses two neural networks: online and the target network to achieve that.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The online network is determined by a set of weights θ and consists of: an encoder fθ, a projector gθ, a predictor qθ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='29: Bootstrap Your Own Latent (Grill et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The target network has the same architecture as the online network but uses different weights ξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It provides the regression targets to train the online network, and its parameters ξ are an exponential moving average of the online parameters θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Precisely, given a target decay rate τ ∈ [0, 1], after each training step, the following update ξ ← τξ + (1 − τ)θ is performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Firstly, an image is sampled uniformly from D from which two distributions of image augmentations T and T ′ are created.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' BYOL applies respectively two image augmentations t ∼ T and t′ ∼ T ′ creating two aug- mented views v ≜ t(x) and v′ ≜ t′(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First augmented view v is used for the online network and result in the output yθ ≜ fθ(v) and afterwards the projection zθ ≜ gθ(y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Similarly, from the second augmented view v′ the target network outputs y′ ξ ≜ fξ(v′) and the target projection z′ ξ ≜ gξ(y′).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Later on output a prediction of qθ (zθ) of z′ ξ and ℓ2-normalize both qθ (zθ) and z′ ξ to qθ (zθ) ≜ qθ (zθ) / ∥qθ (zθ)∥2 and ¯z′ ξ ≜ z′ ξ/ ��z′ ξ �� 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The predictor is only applied to the online pipeline, making the architecture asymmetric between the online and target pipeline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Lastly, the following mean view representation projection prediction fe ge b input image t ye qe(z0) online loss + target ft 9s sg50 2 Introducing the modalities squared error between the normalized predictions and target projections is defined: Lθ,ξ ≜ ��qθ (zθ) − ¯z′ ξ ��2 2 = 2 − 2 · � qθ (zθ) , z′ ξ � ∥qθ (zθ)∥2 · ���z′ ξ ��� 2 The loss is symmetrized Lθ,ξ by using v′ for the online network and v for the target network separately to calculate �Lθ,ξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At each training step, a stochastic optimization step is applied to minimize LBYOL θ,ξ = Lθ,ξ + �Lθ,ξ with respect to θ only but not ξ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' BYOL’s dynamics are summarized as θ ← optimizer � θ, ∇θLBYOL θ,ξ , η � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' where η is a learning rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At the end of the training, only the encoder fθ is used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Comparison of contrastive learning frameworks Of all frameworks, SimCLR is the most popular due to its simplicity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The ResNet-50 in 3 different hidden layer widths (width multipliers of 1×, 2×, and 4×) were used and trained for 1000 epochs each.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The accuracy of these frameworks on the ImageNet dataset with few labels improved when the width of ResNet-50 increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For SimCLR with ResNet-50 top-1 accuracy is 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 and top-5 accuracy is 89, while for ResNet-50(4x) top-1 accuracy is 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 and top-5 accuracy is 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These results are comparable with supervised methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The BYOL framework was built to improve the results of SimCLR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It was also stated that the accuracy for the baseline ResNet-50 is 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 and 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 for top-1 accuracy and top-5 accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' When using ResNet-50(4x), an increase in accuracy to 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 and 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 for top-1 and top-5 is observed, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' More information about performance can be found in following table: Model Architecture Param (M) top-1 acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' top-5 acc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' SimCLR ResNet-50 24 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 SimCLR ResNet-50 (2x) 94 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 SimCLR ResNet-50 (4x) 375 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 BYOL ResNet-50 24 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 BYOL ResNet-50 (x2) 94 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 BYOL ResNet-50 (x4) 375 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 BYOL ResNet-200 (x2) 250 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 State-of-the-art in Computer Vision 51 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 Transformers in Computer Vision Since the first appearance of the Transformers architecture in 2017 ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', it has become an irreplaceable part of all-natural language processing (NLP) models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The main advantage of Transformers is that they can be trained on a large text corpus and then fine-tuned on a smaller task-specific dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This enabled model training of unspecified size with more than 100B parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, computer vision still relied on convolutional architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With datasets constantly growing and the diversity of the fields computer vision tasks could be applied to, researchers wanted to implement Transformers architecture in the CV field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Some works aim for combining CNN-like architectures with self-attention (Wang and Li, 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Others attempted to replace convolutions entirely, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Ramachandran et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Due to specialized attention patterns, the problem was that they have not yet been scaled effectively on modern hardware accelerators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, in large-scale image recognition, classic ResNet-like architectures are still state-of-the-art.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In 2021 the Google research Brain Team published the paper “An image is worth 16 × 16 words” where they introduced new Transformers-based architecture for CV called Vision Transformers (ViT) (Dosovitskiy et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Based on the success of Transformer in NLP scaling, they aimed to apply standard Transformer directly to images with little as possible changes to the existing architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The image is split into patches and linear embeddings of these patches are provided as inputs to the Transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These patches are the same as tokens (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' words) in NLP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The model is trained for image classification in a supervised learning fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Vision Transformers Brain Team wanted to create simple but universally scalable architecture to follow the original Transformers architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Method Compared to NLP, with 1-dimensional token embedding input for the Trans- former, images are 2-dimensional objects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Firstly, images needed to be rep- resented differently to imitate original architectures as close as possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For that reason image x ∈ RH×W ×C is reshaped into a sequence of flattened 2-dimensional patches xp ∈ RN×(P 2·C), where (H, W) is the resolution of the original image, C is the number of channels, (P, P) is the resolution of each image patch, and N = HW/P 2 is the resulting number of patches, also the Transformer’s effective input sequence length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Transformer input through all layers is a fixed vector of size D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The first step is to flatten the patches, usu- ally 16 × 16 and map them to D dimensions with a trainable linear projection to create patch embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 52 2 Introducing the modalities FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='30: Vision Transformer (Dosovitskiy et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' z0 = � xclass ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' x1 pE;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' x2 pE;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' · · · ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' xN p E � + Epos, E ∈ R(P 2·C)×D, Epos ∈ R(N+1)×D To this sequence of “patch embeddings”, a prefix learnable [class] token, like in BERT, is usually added.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This token z0 0 = xclass tells the model to classify the image and increases the dimension of vector z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also, the state of this token at the output of the Transformer encoder � z0 L � , on which the layernorm is applied, serves as the image representation y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' y = LN � z0 L � Furthermore, it is the only one to which the classification head is attached to during pre-training and fine-tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The classification head during pre- training is compiled of MLP with one hidden layer and a single linear layer at a fine-tuning time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Position embedding, a standard learnable 1-dimensional position embedding, are attached to the patch embeddings, serving as input to the encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The standard Transformer encoder consists of alternating layers of multiheaded self-attention and MLP blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After each block, a residual connection is applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' z′ ℓ = MSA (LN (zℓ−1)) + zℓ−1, ℓ = 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' L zℓ = MLP (LN (z′ ℓ)) + z′ ℓ, ℓ = 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' L Vision Transformer has a significantly lower inductive bias than CNNs in image- specific information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VIT only has local and translational equivariant MLP layers, while the self-attention layers are global.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A 2-dimensional neighborhood structure is used sparingly: the image is cut into patches at the beginning Vision Transformer (ViT) Transformer Encoder Class Bird MLP Ball Head Car MLP Norm Transformer Encoder Patch + Position Multi-Head 13 1[5] 118 Embedding Attention Extra learnable Linear Projection of Flattened Patches [class] embedding Norm Embedded Patches2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 State-of-the-art in Computer Vision 53 and the position embeddings are resized as needed at the fine-tuning time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Alternatively, the input sequence can consist of a CNN’s feature map on which the patch embedding projection is applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Vision Transformers are pre-trained on large datasets and fine-tuned to (smaller) downstream tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For fine-tuning, a projection head is removed and a zero-initialized D × K feedforward layer is attached with K being the number of downstream classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is also beneficial to use higher resolution then in pre-training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also ViT can handle arbitrary sequence lengths but the pre-trained position embeddings can become sufficient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is necessary to point out that resolution adjustment and patch extraction are the only points at which an inductive bias about the 2-dimensional structure of the images is manually injected into the Vision Transformers 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Experiments Similarly to BERT models, multiple versions of the model at various scales were created.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They have created Base = “B”, Large = “L”, Huge = “H” versions of ViT, with 12, 24 and 32 layers and 86M, 307M and 632M parameters respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To explore the model scalability, the previous mentioned dataset ImageNet was used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In addition, ViT was compared against a slightly modified ResNet called “ResNet(BiT)”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The batch Normalization layer was replaced with Group Normalization and used standardized convolutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another network that it was compared to was Noisy Student (Xie et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019), a large EfficientNet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Experiments showed that ViT Hughe with 14×14 input patch size outperformed both CNN-based networks with an accuracy of 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5%, whereas ResNet BiT had 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='54% and Noisy Student 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is worth mentioning that ViT Large with 16 × 16 input patch size had 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='76% accuracy on the same dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another thing worth pointing out is that ViT outperforms CNN-based architectures on all larger datasets yet performs slightly worse than CNN networks on a smaller dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 Conclusion In this chapter, the authors presented some of the current state-of-the-art approaches in Computer Vision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nowadays, when technology is advancing each day, creating networks that would imitate human brain is more challenging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Still, the networks presented in this chapter are highly accurate and creating network which can out-perform them is challenging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore, it is noticeable that the application of CV is dictating the development of networks and frameworks which help humans with their everyday tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 54 2 Introducing the modalities 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multi- modal tasks Author: Christopher Marquardt Supervisor: Christian Heumann When we see athletes perform in their sports we only see the results of their hard work prior or till to the event.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Most of the time they casually talk about their off-season, but everybody knows the results are made in the off-season.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Same goes for the models we will see in the later chapters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We are just interested in the results, but why and how does the model come to these results?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It has to learn to some key fundamentals of the modality to achieve these results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But how do they get them to perform in such a way or even better?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It’s possible to build better architectures and/or use more and new data to achieve this.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' New data by hand is easy to get but this new data results in a new problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' New data has to be carefully labeled by humans, which can be very expensive by the amount of data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Models which learn from labeled data use the supervised learning strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This learning strategy is a bottleneck for future progress, because of the given reason.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But the need for labeling the data isn’t the only problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Let’s visit the athlete analogy again.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Imagine a professional football player has to participate in a professional ski race.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' He will not be able to compete with the others, because they are trained only to do ski races.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Here see the other problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Models which use supervised learning have shown to perform very well on the task they are trained to do.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This means models which learn on carefully labeled data only perform very well on this specific task, but poor on others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also it’s not possible to label everything in the world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So the goal is to generate more generalist models which can perform well on different tasks without the need of huge labeled data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Humans are able to perform well on different tasks in a short amount of time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Humans, for example, only need a small amount of hours to learn how to drive a car, even without supervision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the other hand fully automated driving AI need thousand of hours of data to drive a car.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Why do humans learn so fast compared to machines?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Humans don’t rely on labeled data, because most of the time humans learn by observation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' By this humans generate a basic knowledge of how the world works, which also called common sense.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This enables us to learn so much faster compared to machines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Meta AI (Yann and Ishan, 2021) believes that self-supervised learning is one of the most promising ways to generate background knowledge and some sort of common sense in AI systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' By self-supervised learning one means a supervised learning algorithm, but it doesn’t need an external supervisor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Self-supervised pre-training differs 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 55 between the modalities, which means there is not an approach which works in all modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The following chapter will inspect on the one hand pre-training resources and the use of them and on the other hand also the benchmarks which are used for Natural Language Processing (NLP), Computer Vision (CV) and ,the combination of both, vision language pre-trained models (VL-PTM).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Datasets After pointing out that pre-training is very important, one might ask how do the datasets look and how do the different modalities pre-train?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At first we will inspect the former one and focus afterwards on the use of the resources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As one might expect NLP models pre-train on text, CV models pre-train on images and VL-PTM pre-train on text image pairs, which can somehow be seen as a combination of NLP and CV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But CV models mostly used labeled data like a picture of a dog with the corresponding single label “dog”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MML datasets can contain several sentences of text which correspond to the given image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Even if the datasets might be completely different, the procedure to get the data is mostly the same for all of them, because the data is crafted from the internet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This can lead to a problem, since by using this method the resulting dataset might be noisy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One approach for the VL-PTM, for example, is to use CommonCrawl and extract the image plus the alt of an image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The alt is an alternate text for an image, if the image cannot be displayed or for visual impaired people.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This seems like a reasonable approach, but the alt is often not very informative about what’s in the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another difference between the modalities is the cardinality of the pre-training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It’s easy to realize that text is by far easiest to crawl from the internet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This results in huge high-quality massive text data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Some magnitudes smaller are the datasets for CV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Since VL-PTM are pretty new compared to the other modalities it still relatively small, but growing fast.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A small downer is that some of the datasets are not public available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The big companies like to keep their models and used datasets private, which hinders the reproducibility, but there are also real open AI competitors like LAION and Eleuther in the field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The next chapter will provide some of the most used pre-training datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Natural Language Processing Datasets 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Common Crawl As already mentioned, extracting text from the internet is rather easy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' More precisely there is a non-profit organization, called Common Crawl, which does exactly this.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They provide copies of the internet to researchers, companies and individuals at no cost for the purpose of research and analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Common Crawl corpus contains petabytes of data collected since 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Every month, Common Crawl releases a snapshot of the web obtained by randomly exploring 56 2 Introducing the modalities and sampling URLs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It contains raw web page data, extracted metadata and text extractions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The advantages of Common Crawl come along with their disadvantages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The text is from diverse domains but with varying quality of data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To handle the raw nature of the datasets one often has to use a well-designed extraction and filter to use the datasets appropriately (Gao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GPT-3 ,for example, uses a filtered version of Common Crawl, which consists of 410 billion tokens (Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So data for NLP is freely available but one needs to use well-designed extraction and filtering to really use the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 The Pile Recent work (Rosset, 2020) showed that diversity in training datasets improves general cross-domain knowledge and downstream generalization capability for language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Pile (Gao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020) was introduced to address exactly these results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Pile contains 22 sub-datasets, including established NLP datasets, but also several newly introduced ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The size of the 22 sub-datasets, which can be categorized roughly into five categories, pile up to around 825 GB of data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The following treemap shows the distribution of the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While only 13% of the world’s population speaks English, the vast majority of NLP research is done on English.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Gao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020) followed this trend, but did not explicitly filtered out other languages when collecting our the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This leads to the fact that roughly 95% of the Pile is English.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also EuroParl (Koehn, 2005), a multilingual parallel corpus introduced for machine translation, is included in the Pile.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To train GPT-2 Open AI collected data from WebText.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' WebText is an internet dataset created by scraping URLs extracted from CompositionofthePilebyCategory Academic = Internet - Prose Dialogue = Misc Bibliotik Pile-CC PG-19 BC2 PubMedCentral ArXiv Subtitles StackExchange IRC EP PMA Github FreeLaw USPTO Phil NIH OpenWebText2 Wikipedia DM Math HNYT2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 57 Reddit submissions with a minimum score for quality, but sadly it was never released to the public.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Independent researchers reproduced the pipeline and released the resulting dataset, called OpenWebTextCorpus (Gokaslan and Cohen, 2019) (OWT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Eleuther created an enhanced version of the original OWT Corpus called OpenWebText2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It covers all Reddit submissions from 2005 up until April 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It covers content from multiple languages, document metadata, multiple dataset versions, and open source replication code.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They also explicitly included a dataset of mathematical problems (DeepMind Mathematics) to improve the mathematical ability of language models trained on the Pile.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An ArXiv dataset was in included in the hopes that it will be a source of high quality text and math knowledge, and benefit potential downstream applications to research in these areas and also because arXiv papers are written in LaTeX.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Training a language model to be able to generate papers written in LaTeX could be a huge benefit to the research community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Since CC needs further steps, due to the raw nature of CC, to really use is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Pile-CC is Common Crawl-based dataset, which can be used directly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It yields higher quality output than directly using the WET files.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These were only some of the 22 included datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A more detailed description of the sub-dataset and the reasons why these were included can be found in the corresponding paper (Gao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Multilingual Datasets Another pre-cleaned version of CC is CC-100 (Wenzek et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They present a pipeline to create curated monolingual corpora in more than 100 languages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A filter, which covers the data based on their distance to Wikipedia, is used and this improves the quality of the resulting dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, its English portion is much smaller than the Pile.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But a multilingual dataset might help a low-resource language acquire extra knowledge from other lan- guages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Perhaps the most multilingual corpus publicly available, containing 30k sentences in over 900 languages, is the Bible corpus (Mayer and Cysouw, 2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Till now all datasets were freely available and almost directly usable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The next one is not public available for some reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To provide mT5 (Xue et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020), which is multilingual pre-trained text-to- text transformer, a suitable pre-training dataset, Google Research designed a dataset including more than 100 languages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The dataset is called mC4 (Xue et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Since some languages are relatively scarce on the internet, they used all of the 71 monthly web scrapes released so far by Common Crawl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It contains 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 billion pages and 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 trillion tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A smaller version of the mC4 is also used by Google Research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The smaller dataset C4 (Colossal Clean Common Crawl) was explicitly designed to be English only.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The C4 dataset is a collection of about 750GB of English-language text sourced from the public Common Crawl web.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 58 2 Introducing the modalities Most of the datasets used in NLP are derived entirely from Common Crawl and Rosset (2020) came to the result, that the current best practice in training large-scale language models involve using both large web scrapes and more targeted, higher-quality datasets, which the Pile directly addresses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 BooksCorpus The last dataset for NLP is the BooksCorpus dataset (Zhu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The BooksCorpus uses books from yet unplished authors from the web.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Only books with more than 20k words were included to filter out shorter, noisier stories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This results in around 11k books from 16 different genres.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So more than 74 million sentences can be used in pre-training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' BooksCorpus contains a sample of books from a distributor of indie ebooks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Sadly a datasheet about the BooksCorpus was not releasd with the corresponing paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Frankly, there was just an paragraph about the content and the extraction inside the paper (Zhu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Bandy and Vincent (2021) addressed exactly this short coming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They provided a retrospective datasheet about the BooksCorpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Some of their major concerns were copyright violations, duplicate books, skewed genre representation, potentially skewed religious representation and also problematic content (18+ content).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Little harm can be expected if an informed adults reads books with these concers, but how does a language model contribute to for example well-documented gender discrimination if it trains on these books.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Since BookCorpus is no longer distributed, one has to visit the distributor of the indie ebooks and collect a own version of the BookCorpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is one of the user-based dataset, besides to the datasets of the Pile.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Computer Vision Dataset 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 ImageNet The next inspected modality is CV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Almost every state-of-the-art CV model uses a classifier pre-trained on an ImageNet based dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ImageNet uses the hierarchical structure of WordNet (Fellbaum, 2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At the release of ImageNet-1k the amount of classes was unheard at this time point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Datasets like CIFAR-10 (Krizhevsky et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2009) and CIFAR-100 (Krizhevsky et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2009) had 10 or 100 classes, but ImageNet1k had 1000 different classes and this was not the only major improvement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They also increased the resolution from 32×32 to 256×256.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In all, there are roughly 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 million training images, 50,000 validation images, and 150,000 testing images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The ImageNet-1k dataset is a subset of the ImageNet dataset (Deng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The full ImageNet dataset is also called ImageNet-21k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It consists of more than 14 million images, divided in almost 22k classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Because of this some paper described it as ImageNet-22k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Those two dataset do not only differ by the amount of classes, but also by the type of labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The labels of ImageNet-21k are not mutually exclusive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 59 Because of this the pre-training wiht ImageNet-1k is far more popular.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also the ImageNet-21k dataset lacks an official train-validation split, which is just another reason why ImageNet-1k is more popular.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The raw dataset ImageNet- 21k is around 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 terabyte (TB).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It’s also nice, that the the dataset of ImageNet are open available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The next dataset is in contrast to this, because it’s not freely available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Joint-Foto-Tree (JFT) & Entity-Foto-Tree (EFT) The Joint-Foto-Tree (JFT) 300M is one of the follow up version of the JFT dataset (Hinton et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Given the name it consists of 300 million images and on average each image has 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='26 labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The whole datasets has around 375 million labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These labels can be divided into 18291 classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These categories form a rich hierarchy with the maximum depth of hierarchy being 12 and maximum number of child for parent node being 2876 (Sun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example there are labels for 1165 types of animals and 5720 types of vehicles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The work states that approximately 20% of the labels in this dataset are noisy (Sun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017), because the labels are generated automatically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It also provides the fact, that the distribution is heavily long-tailed, which means that some of the classes have less than 100 images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There is also an extendend version of the JFT dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It’s called Entity-Foto-Tree (EFT), because the class labels are physical entities organized in a tree-like hierarchy, which contains 20 diversified verticals and consists of 100k classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It’s even rarely used in practice by Google because of the intolerable large model size and the slow training speed (Gao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Honestly, nobody really knows what is inside these datasets, except Google and they never published a datasheet about it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These datasets are often used for image classification, but localization-sensitive tasks like object detection and semantic segmentation are also of interest in CV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Objects365 Objects365 (Shao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) is a large-scale object detection and semantic segmentation freely available dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It contains 365 object categories with over 600K training images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' More than 10 million, high-quality bounding boxes are manually labeled through a three-step, carefully designed annotation pipeline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The ImageNet datasets also contain bounding boxes, but compared Object365 dataset the number of boxes per image is about 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 vs 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 (Deng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2009).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They collected images mainly from Flicker to make the image sources more diverse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' All the images conform to licensing for research purposes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The dataset also builds on a tree-like hierarchy with eleven super-categories (human and related accessories, living room, clothes, kitchen, instrument, transportation, bathroom, electronics, food (vegetables), office supplies, and animal).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Further 60 2 Introducing the modalities they proposed 442 categories which widely exists in daily lives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As some of the object categories are rarely found, they first annotate all 442 categories in the first 100K images and then they selected the most frequent 365 object categories as their target objects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To enable compatibility with the existing object detection benchmarks, the 365 categories include the categories defined in Microsoft Common Objects in Context (COCO) (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014b), which is described in the next paragraph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Microsoft Common Objects in Context (COCO) Microsoft decided to employed a novel pipeline for gathering data with extensive use of Amazon Mechanical Turk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their goal was to create a non-iconic image collection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Iconic-object images have a single large object in the centered of the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' By this they provide high quality object instances, but they also lack information of contextual important and non-canonical viewpoints (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Recent work showed that non-iconic images are better at generalizing (Torralba and Efros, 2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They mostly used Flickr images, because they tend to have fewer iconic images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This results in a collection of 328,000 images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After getting the images they used workers on Amazon’s Mechanical Turk for the annotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The workers got a list with 91 categories and 11 super-categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At first a worker had to decide if a super-category (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' animal) was present or not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' If it was present he had to class the animal into the appropriate subordinate category (dog, cat, mouse).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This greatly reduces the time needed to classify the various categories and took the workers about 20k hours to complete.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After this the workers had also to do instance spotting and instance segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the instance segmentation the workers had to complete a training task until their segmentation adequately matched the ground truth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Only 1 in 3 workers passed this training stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At the end they added five written captions to each image in the dataset, which is called Microsoft Common Objects in Context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At the end they utilized more than 70,000 worker hours to collect a amount of annotated object instances, which were gathered to drive the advancement of segmentation algorithms and others tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' COCO is a dataset which can be used in CV and also in multi-modal models, because of the image-text pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Multi Modal Datasets The Pile is an attempt from Eleuther to mimic the dataset used for GPT-3 and LAION wants to achieve something similiar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Open AI collected more than 250 million text-images pairs from the internet to train CLIP and DALL- E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This dataset does include parts of COCO, Conceptual Captions and a filtered subset of the Yahoo Flickr Creative Commons 100 Million Dataset (YFCC100M).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' YFCC100M contains of a total of 100 million media objects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The collection provides a comprehensive snapshot of how photos and videos were taken, described, and shared over the years, from the inception of Flickr 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 61 in 2004 until early 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also this dataset was never published, even though the used data is freely available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To address this shortcoming, LAION created the LAION-400M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 LAION 400M & 5B LAION-400M (Schuhmann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a) consists of 400 million image-text pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They used Common Crawl and parsed out all HTML IMG tags containing an alt-text attribute.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As already mentioned these alt-texts can sometimes be very uninformative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So they used CLIP to compute embeddings of the image and alt-text and droped all samples with a similarity below 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The dataset also contains the CLIP embedding and kNN indices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Schuhmann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021a) describes the procedure to create the dataset in an open manner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They also ran DALLE-pytroch, an open-source replication of DALL-E, on a subset of LAION-400M and produced samples of sufficient quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This opens the road for large-scale training and research of language-vision models, which was previously not possible for everyone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It still is difficult, because of the large amount of data, but at least it’s theoretically possible for everyone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' LAION- 400M is also known as crawling@home (C@H), because they started as a small group and used only their own computers at the beginning, which is like the fight of David versus Goliath.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' End of March 2022 the team of LAION released a 14× bigger than LAION- 400M dataset called LAION-5B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It consists of 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='85 billion CLIP-filtered image- text pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A paper about the dataset is right now in progress, but the dataset is already available to download if you have enough space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The size of the dataset is about 240 TB in 384 or 80 TB in 224.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Due to the nature of the extraction 2,3 billion contain English language, 2,2 billion samples from 100+ other languages and they also provide a search demo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At the moment LAION-5B is the biggest openly accessible image-text dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The amount of image-text pairs in LAION-400M or LAION-5B seems incom- parable to COCO, but one has to keep in mind, that the text in the COCO dataset is gathered in a high-quality manner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The COCO dataset is still used, because of the high quality, even though it was created 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Localized Narratives Localized Narratives choose a new form of connecting vision and language in multi-modal image annotations (Pont-Tuset et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They asked anno- tators to describe an image with their voice while simultaneously hovering their mouse over the region they are describing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This synchronized approach enable them to determine the image location of every single word in the description.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Since the automatic speech recognition still results in imperfect transcription, an additional transcription of the voice stream is needed to get the written word.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The manual transcription step might be skipped in the future if automatic speech recognition improves and this would result in an 62 2 Introducing the modalities even more effective approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They collected Localized Narratives for, the earlier introduced, COCO (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014b) dataset, ADE20K (Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017), Flickr30k & 32k datasets (Young et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014) and 671k images of Open Images(Kuznetsova et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Localized Narratives can be used in many different multi-modal tasks, since it incorporates four synchronized modalities (Image, Text, Speech, Grounding).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another difference is that the captions are longer than in most previous datasets (Krishna et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Kuznetsova et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014b) and models like Imagen (Saharia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022a) and Parti (Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022a) work well with long prompts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Beside to that the 849k images with Localized Narratives are publicly available (Website, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 WuDaoMM English is the most spoken language on the world, but Mandarin Chinese is on the second place and also increasing steadily.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So we will also present a large-scale Chinese multi-modal dataset WuDaoMM (Yuan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Totally it consists of 650 million image-text pair samples but, they released a base version dataset containing about 5 million image-text pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' WuDaoMM base includes 19 categories and 5 million high-quality images, which can be used for most of Chinese vision-language model pre-training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They designed two acquisition strategies according to the correlation types between text and image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their collection included data with weak relations, by this they mean that the texts don’t have tp precisely describe their corresponding images to be retained, and data with strong relations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These strong relation image-text pairs were found on professional websites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Most of these images are reviewed for relevance, content, and sensitivity when they are uploaded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The WuDaoMM- base dataset is a balanced sub-dataset composed of each major category of the strong-correlated dataset, which is sufficient to support the research and use of current mainstream pre-training models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Wikipedia Image Text (WIT) The Wikipedia Image Text (WIT) dataset ends this chapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Most dataset are only in English and this lack of language coverage also impedes research in the multilingual mult-imodal space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To address these challenges and to advance in research on multilingual, multimodal learning they presented WIT (Srinivasan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They used Wikipedia articles and Wikimedia image link to extract multiple different texts associated with an image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally a rigorous filtering was used to retain high quality image-text associations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This results in a dataset, which contains more than 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 million image-text sets and spans 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 million unique images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Due to the multi-modal coverage of Wikipedia, they provide unique multilingual coverage – with more than 12K examples in each of the 108 languages and 53 languages have more than 100K image-text pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 63 Another thing which is worth pointing out, is that they could leverage Wikipedia’s editing, verification and correction mechanism,to ensure a high- quality bar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This curation can be seen an huge difference compared to the web crawls used to create other existing datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At the end they even verified the curated quality of the WIT dataset via an extensive human-annotation process with an overwhelming majority of 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5% judging the randomly sampled image-text associations favorably.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These datasets were just some of the more used dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Some of them are public available while some others are not public available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Normally each dataset comes with a paper, which describes the procedure way more detailed than this chapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This chapter gives just a small insight into the different datasets and wants to raise the interest into the corresponding papers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Papers with code delivers research papers with code implementations by the authors or community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One can get information about the State-of-the-Art model for every modality and down-task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They also provide available datasets for all possible tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Datasets are crucial for research and exploration as, rather obviously, data is required for performing experiments, analyzing designs, and building applica- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A particular problem is that the collected data is often not made publicly available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While this sometimes is out of necessity due to the proprietary or sensitive nature of the data, this is certainly not always the case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A public dataset with clearly marked licenses that do not overly impose restrictions on how the data is used, such as those offered by CC, would therefore be suitable for use by both academia and industry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But one has to keep in mind that an effective dataset is a catalyst and accelerator for technological development (Yuan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This may be a reason, why the big companies don’t share their datasets, but there are also some other reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another reason might be the bias which is included in the datastes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Bias In Datasets Internet access itself is not evenly distributed, which results in a narrow Internet participation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So internet data overrepresents younger users and those from developed countries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' User-generated content sites present themselves as open to anyone, but there are factors including moderation practices which make them less welcoming to specific sub-populations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Take the training data of GPT-2 as an example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is sourced by scraping outbound links from Reddit, and Pew Internet Research���s 2016 (MICHAEL BARTHEL and MITCHELL, 2016) survey reveals 67% of Reddit users in the United States are men, and 64% between ages 18 and 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Sheng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019) shedded lights on some of the gender bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They used OpenAI’s GPT-2 to generate text given different prompts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Some of the examples can be seen in the next table.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 64 2 Introducing the modalities Prompt Generated text The man worked as a car salesman at the local Wal-Mart The woman worked as a prostitute under the name of Hariya The Black man worked as a pimp for 15 years The White man worked as a police officer,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' a judge,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' a prosecutor,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' a prosecutor,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' and the president of the United States The gay person was known for his love of dancing,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' but he also did drugs The straight person was known for his ability to find his own voice and to speak clearly Datasets obviously encode the social bias that surrounds us,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' and models trained on that data may expose the bias in their decisions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The predictions of the models are based on what the model learned from so we habe to be aware of this bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Dhamala et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021) introduced the Bias in Open-Ended Language Gener- ation Dataset (BOLD), a large-scale dataset that consists of 23,679 English text generation prompts for bias benchmarking across five domains: profession, gender, race, religion, and political ideology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They also proposed new auto- mated metrics for toxicity, psycholinguistic norms, and text gender polarity to measure social biases in open-ended text generation from multiple angles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An examination of text generated from three popular language models (BERT, GPT-2, CTRL) revealed that the majority of these models exhibit a large social bias across all domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It was also shown that GPT-2 conform more to social biases than BERT and GPT-3 was trained on filtered version of the Common Crawl dataset, developed by training a classifier to pick out those documents that are most similar to the ones used in GPT-2’s training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So very likely the same goes for GPT-3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These biases don’t only persist in the NLP datasets, they can also be found in other modalites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There exists the so called WordNet Effect which leads to some bias in the CV datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This effects emerges because WordNet includes words that can be perceived as pejorative or offensive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' N*****r and wh**e are just two examples which can be found in WordNet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Prabhu and Birhane (2020) investigated problematic practices and the consequences of large scale vision datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Broad issues such as the question of consent and justice as well as specific concerns such as the inclusion of verifiably pornographic images in datasets were revealed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Two days after the publication of the paper (Prabhu and Birhane, 2020), the TinyImages was withdrawn, because of their findings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Torralba, Fergus, Freeman, the creator of TinyImages, also argued that the offensive images were a consequence of the automated data collection procedure that relied on nouns from WordNet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MS-Celeb (Guo et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2016) was also retracted 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 65 for the same reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It would be very surprising if these kinds of problems where not present in other databases for this kind of research, especially as we get to extremely dataset sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Despite retractions, datasets like TinyImages and MS-Celeb remain widely available through file sharing websites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Even if LAION-400M opened the road for large-scale training and research of language-vision models for everyone, their curation pipeline involves CLIP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One might argue, that this approach will potentially generate CLIP-like models and it is known that CLIP inherits various biases (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Birhane et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021) found that the LAION-400M dataset contains, troublesome and explicit images and text pairs of rape, pornography, malign stereotypes, racist and ethnic slurs, and other extremely problematic content and you can be pretty sure that the same holds for LAION-5B, as it uses the same curation pipeline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This shows even more that large institutions should open up their datasets to both internal and external audits in a thoughtful manner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We have to fully understand the risks of using such datasets and this is not achievable by the used approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Despite all these concerns, the next chapters will demonstrate how the different datasets are used, but it is important to keep these concerns in mind.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Pre-Training Tasks Yann LeCun and Ishan Misra suggest in their blogpost that supervised pre- training is gone because of the already mentioned reasons at the beginning and the future will be self-supervised pre-training (Yann and Ishan, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Meta AI wants to create a background knowledge in the models that can approximate the common sense of humans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This suggestion is even more reasonable, because recent work (Mineault, 2021) also showed that a self-supervised or a unsu- pervised pre-training approach is biologically more plausible than supervised methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This why neuroscientists are taking interest in unsupervised and self-supervised deep neural networks in order to explain how the brain works (Zhuang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Self-supervised learning (SSL) is also called predictive learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This comes by the nature of the process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The general technique of self-supervised learning is to predict any unobserved or hidden part (or property) of the input from any observed or unhidden part of the input (Yann and Ishan, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Models like BERT try to predict between known intervals and GPT-3 predicts the future, given the past.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A part of a sentence is hidden and the model tries to predict the hidden words from the remaining ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Predicting missing parts of the input is one of the more standard tasks for SSL pre-training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To complete a sentence with missing parts the system has to learn how to represent the meaning of words, the syntactic role of words, and the meaning of entire texts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These missing parts tasks are easy to implement in NLP compared to CV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In NLP the solution space is finite, because one estimates a distribution from, 66 2 Introducing the modalities a before specified, dictionary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In CV the solution space is infinite, because it is not possible to explicitly represent all the possible frames and associate a prediction score to them (Yann and Ishan, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Meta AI proposed an unified view of self-supervised method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They say an energy-based model (EBM) is a system that, given two inputs, x and y, tells us how incompatible they are with each other (Yann and Ishan, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' If the energy is high, x and y are deemed incompatible;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' if it is low, they are deemed compatible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The idea sounds simple, but it is difficult to achieve this.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An usual approach is to take an image and create an augmented version of the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' By this approach the energy has to be low, because it’s from save picture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example one can gray scale the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' By this we say the model the color does not matter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Bromley et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (1993) proposed this kind of approach under the name Siamese networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The difficulty is to make sure that the networks produce high energy, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' different embedding vectors, when x and y are different images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The problem is that these Siamese networks tend to collapse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' When a collapse occurs, the energy is not higher for nonmatching x and y than it is for matching x and y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So the networks ignore their input and produce the same embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This lead to so called contrastive methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The method used to train NLP systems by masking or substituting some input words belongs to the category of contrastive methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Contrastive methods are based on the simple idea of constructing pairs of x and y that are not compatible, and adjusting the parameters of the model so that the corresponding output energy is large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The problem is that they are very inefficient to train.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For a contrastive methods one needs so called hard negatives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These are images that are similar to image x but different enough to still produce a high energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is a major issue of contrastive methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So Self-supervised representation learning relies on negative samples to prevent collapsing to trivial solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So the best idea is to get rid of the hard negatives and BYOL (Grill et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020a) is one approach that achieved exactly this.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They create two slightly different variants of an image by applying two random augmentations, like a random crop, a horizontal flip, a color jitter or a blur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A big difference to the Siamese network is that they use different parameters in the encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They use so called online and target parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The target parameters are never learned, they are just copied over from the online parameters, but they use an exponential moving average.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So it’s some kind of a lagged version of the online parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' BYOL achieves to learn a representation of an image, without using negative pairs, just by predicting previous versions of its outputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Still they say, that BYOL remains dependent on existing sets of augmentations and these augmentations require human intention and automating the search for these augmentations would be an important next step, if this is even possible (Grill et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 67 He et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022) recently came very close to the MLM pre-training used in BERT with their masked autoencoder (MAE).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They leveraged transformers and autoencoders for self-supervised pre-training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An autoencoder is an encoder that maps the observed signal to a latent representation, and a decoder that reconstructs the original signal from the latent representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The MAE is a form of denoising autoencoding exactly like the MLM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their approach is to divide an image into, for example, 16 × 16 patches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then remove 75% of the patches and just use the remaining 25% in their huge encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Important to add is that the position embeddings are also used in the encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The input of the decoder is again the full set of tokens consisting of the unmasked and the masked tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So the MAE has to reconstruct the input by predicting the pixel values for each masked patch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Autoencoding pursues a conceptually different direction compared to BYOl or DINO, which are based on augmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Still their reconstructions look kind of blury, but the learned representations are already very rich.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Interesting to note is also that BERT removes only 15% of the data where MAE removes 75% of the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Dual encoder models like CLIP (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a) and ALIGN (Jia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021b) demonstrated in the past that contrastive objectives on noisy image-text pairs can lead to strong image and text representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One thing to mention is, that contrastive objectives are easier to implement in vision-language models (VLM) than in CV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This comes from the fact that VLM use image-text pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a dual encoder CLIP encodes the image and text and by construction the text which corresponds to the image or vice versa achieves the highest similarity and the other texts will have a lower similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So one already has some hard negatives already available and don’t has to search for some.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Through the SSL the models already learned a good representation of the given input, but fine-tuning models leads to even better results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This chapter will just provide an rough sketch, since fine-tuning heavily depends on the model and the down-stream task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also fine-tuning will be shown in later chapters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Fine-tuning means updating the weights of a pre-trained model by training on a supervised (labeled) dataset to a specific down-task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A huge amount of data is needed to fine-tune a model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is also the main disadvantage of fine-tuning, because one needs new large dataset for every possible down-task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After pre-training and fine-tuning the models there is a need to compare the models, because one always seeks to find the best model among all competitors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This need lead to the creation of datasets for test purposes which are often called benchmarks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Benchmarks As models got better over time, because of bigger datasets or better pre-training tasks, it’s important to create and use new benchmarks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Interestingly there are also benchmark, which rely only on Zero-Shot performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Zero-shot 68 2 Introducing the modalities learning (ZSL) is a problem in machine learning, where during test time, a model observes samples from classes not observed during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So it has to complete a task without having received any training examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' By this the model has to generalize on a novel category of samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But the most common approach is to use a part of the datasets which was not used to train the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To make this possible the pre-training datasets are divided into training, test and validation sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It’s clear that the models must not be tested on the training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This splitting results in so called held-out data, but Rajpurkar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2018) showed, that this held-out datasets are often not comprehensive, and contain the same biases as the training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Recht et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019) also proposed that these held-out datasets may overestimate the real-world performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Something to consider is also that pre-training on large internet datasets may lead to the unintentional overlap of pre-training and down-tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Because of this studies (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a, Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022a), Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020)) conducted a de-duplication analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' CLIP analysis resulted in a median overlap of 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2% and an average overlap of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2%, but they also observed that the overall accuracy is rarely shifted by more than 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1% (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Mahajan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2018), Kolesnikov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019) also came to the similar results, but it’s still something to keep in mind.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Some of the already mentioned datasets like COCO and the ImageNet versions are often used for CV or VLM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Almost every state-of-the-art CV model uses a classifier pre-trained on an ImageNet based dataset and benchmarked on the validation sets of the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A another small downer is that the models of the big companies are usually trained on different datasets, but at least compared on the same benchmarks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So the comparison seems a bit odd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Maybe the better performance of the models comes from the different pre-training datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Natural Language Processing Benchmarks 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 (Super)GLUE The goal of NLP is the development of general and robust natural language understanding systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Through SSL models gain a good “understanding” of language in general.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To benchmark this good “understanding” General Language Understanding Evaluation (GLUE) was created.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It’s a collection of nine different task datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These datasets can be divided into the Single- Sentence Tasks, Similarity and Paraphrase Tasks and Inference Tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Single-Sentence Tasks consist of the Corpus of Linguistic Acceptability (CoLA) and The Stanford Sentiment Treebank (SST-2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each example in the CoLA is a sequence of words annotated with whether it is a grammatical English 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 69 sentence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' SST-2 uses sentences from movie reviews and human annotations of their sentiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The task is to predict the sentiment of a given sentence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the Similarity and Paraphrase Tasks the Microsoft Research Paraphrase Corpus (MRPC), Quora Question Pairs (QQP) and the Semantic Textual Similarity Benchmark (STS-B) are used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MRPC is a corpus of sentence pairs automatically extracted from online news sources, with human annotations for whether the sentences in the pair are semantically equivalent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The model has to predict if sentence B is a paraphrase of sentence A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The STS-B sub-task dataset consist of a collection of sentence pairs drawn from news headlines, video and image captions, and natural language inference data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each pair is human-annotated with a similarity score from 1 to 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The task for the model is to predict these similarity scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' QQP is a collection of question pairs from the community question-answering website Quora.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Here the model has to predict if a pair of questions are semantically equivalent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Lastly The Multi-Genre Natural Language Inference Corpus (MNLI), the Stanford Question Answering Dataset (QNLI), The Recognizing Textual Entailment (RTE) dataset and the Winograd Schema Challenge (WNLI) are used in the Inference Tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' WNLI is a crowdsourced collection of sentence pairs with textual entailment annotations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The task is to predict whether the premise entails the hypothesis (entailment), contradicts the hypothesis (contradiction), or neither (neutral).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' QNLI is a question-answering dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph contains the answer to the corresponding question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The task is to determine whether the context sentence contains the answer to the question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' RTE comes from a series of annual textual entailment challenges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' WNLI is a reading comprehension task in which a system must read a sentence with a pronoun and select the referent of that pronoun from a list of choices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the following table is a short summary of all GLUE tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A nice topping is that GLUE also provides a leaderboard with a human benchmark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So the models can compete against each other and a human Dataset Description Data example Metric Is the sentence grammatical or "This building is than that one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" CoLA ungrammatical?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' = Ungrammatical Matthews Is the movie review positive, negative, "The movie is funny , smart , visually inventive , and most of all , alive .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" SST-2 = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='93056 (Very Positive) Accuracy Or neutral?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Is the sentence B a paraphrase of B) "The island reported another 35 probable cases yesterday , taking its total to 418 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" MRPC sentence A?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' = A Paraphrase Accuracy / F1 B) "A herd of elephants are walking along a trail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" STS-B How similar are sentences A and B?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 (Very Similar) Pearson / Spearman B) "How can Internet speed be increased by hacking through DNs?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" QQP Are the two questions similar?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' = Not Similar Accuracy / F1 A) "Tourist Information offices can be very helpful.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" Does sentence A entail or contradict B) "Tourist Information offices are never of any help.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" MNLI-mm = Contradiction Accuracy sentence B?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A) "What is essential for the mating of the elements that create radio waves?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" Does sentence B contain the answer to to the electromagnetic field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" QNLI the question in sentence A?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' = Answerable Accuracy A) "ln 2oo3, Yunus brought the microcredit revolution to the streets of Bangladesh to support more than 5O,0o0 beggars, whom the Grameen Bank respectfully calls Struggling Members.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" B) "Yunus supported more than 50,000 Struggling Members.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" RTE Does sentence A entail sentence B?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' = Entailed Accuracy Sentence B replaces sentence A\'s A) "Lily spoke to Donna, breaking her concentration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" ambiguous pronoun with one of the B) "Lily spoke to Donna, breaking Lily\'s concentration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='" WNLI nouns - is this the correct noun?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' = Incorrect Referent Accuracy70 2 Introducing the modalities benchmark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After a short period of time the models started to surpass the human benchmark, which lead to creation of SuperGLUE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' SuperGLUE also consists of a public leaderboard built around eight language understanding tasks, drawing on existing data, accompanied by a single-number performance metric, and an analysis toolkit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' SuperGLUE surpassed GLUE because of more challenging tasks, more diverse task formats, comprehensive human baslines, improved code support and refinded usage rules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The following figure gives a short summary of the SuperGLUE tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='31: taken from https://mccormickml.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='com The GLUE and SuperGLUE tasks are more or less reduced to a classification problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One might argue if this is really General Language Understanding, but we will see other benchmarks which try evaluate that in an other way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" However it’s also of interest to check if the models understand what they BoolQ Passage: Barg's - Barg's is an American soft drink." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Its brand of root beer is notable for having caffeine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" Barg's, created by Edward Barg and bottled since the turn of the 2Oth century, is owned by the Barq family but bottled by the Coca-Cola Company." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" It was known as Barq's Famous Olde Tyme Root Beer until 2012." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" Question: is barg's root beer a pepsi product Answer: No Text: B: And yet, uh, I we-, I hope to see employer based you know, helping out." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' You know, child uh care centers at the place of employment and things like that, that will help out.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A: Uh-huh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' B: What do you think, do you think we are, setting a trend?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Hypothesis: they are setting a trend Entailment: Unknown COPA Premise: My body cast a shadow over the grass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" Question: What's the CAUSE for this?" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Alternative 1: The sun was rising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Alternative 2: The grass was cut.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Correct Alternative: 1 Paragraph: Susan wanted to have a birthday party.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' She called all of her friends.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' She has five friends.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MultiR Her mom said that Susan can invite them all to the party.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Her first friend could not go to the party because she was sick.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Her second friend was going out oftown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Her third friend was not so sure if her parents would let her: The fourth friend said maybe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The fifth friend could go to the party for sure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Susan was a little sad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the day of the party, all five friends showed up.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" Each friend had a present for Susan Susan was happy and sent each friend a thank you card the next week Question: Did Susan's sick friend recover?" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" Candidate answers: Yes, she recovered (T), No (F), Yes (T), No, she didn't recover (F), Yes, she was at Susan's party (T) Paragraph: (CNN) Puerto Rico on Sunday overwhelmingly voted for statehood But Congress, the only body that can approve new states, will ultimately decide whether the status of the US commonwealth changes." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Ninety-seven percent of the votes in the nonbinding referendum favored statehood an increase over the results of a 2ol2 referendum official results from the State Electorcal Commission show It was the fifth such vote on statehood.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' \'"Today we the people of Puerto Rico are sending a strong and clear message to the Us Congress .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' and to the world .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' claiming our equal rights as American citizens Puerto Rico Gov Ricardo Rossello said in a news release.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' @highlight Puerto Rico voted Sunday in favor of US statehood Query For one, they can truthfully say, "Don\'t blame me, I didn\'t vote for them, " when discussing the presidency Correct Entities: Us Text: Dana Reeve, the widow of the actor Christopher Reeve, has died of lung cancer at age 44, according to the Christopher Reeve Foundation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Hypothesis: Christopher Reeve had an accident.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Entailment: False Context 1: Room and board.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Context 2: He nailed boards across the windows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Sense match: False Text: Mark told Pete many lies about himself which Pete included in his book.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' He should have been more truthful.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Coreference: False2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 71 are reading.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The act of understanding what you are reading is called reading comprehension (RC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' RC requires both understanding of natural language and knowledge about the world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Stanford Question Answering Dataset (SQuAD) (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 & 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0) Rajpurkar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016) introduced the Stanford Question Answering Dataset (SQuAD), a large reading comprehension dataset on Wikipedia articles with human annotated question-answer pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' SQuAD contains 107,785 question- answer pairs on 536 articles and it does not provide a list of answer choices for each question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The model must select the answer from all possible spans in the passage, thus needing to cope with a fairly large number of candidates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The problem is that the it’s guaranteed that the answer exist in the context document.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To address this weakness Rajpurkar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2018) presented SQuAD 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0, the latest version of SQuAD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' SQuAD 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 combines existing SQuAD data with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Rajpurkar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2018) contribution to NLP is not that they provide a deeper glimpse into the workings of QA systems, they also facilitated the creation of more non-English datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Korean, Russian, Italian, Spanish, French and Arabic versions of SQuAD exist around the world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' XQuAD, MLQA and TyDi are multilingual question-answering datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' XQuAD is a subset of SQuAD translated into 10 different language by professional translators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These kinds of resources are crucial in ensuring that the societal benefits of NLP can also be felt by speakers of lower resourced languages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Beyond the Imitation Game Benchmark (BIG-bench) The mentioned ones are rather old compared to Beyond the Imitation Game Benchmark (BIG-bench) (Srivastava et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It’s a collaborative bench- mark intended to probe large language models and extrapolate their future capabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' BIG-bench already contains more than 200 tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They claim that current language-modeling benchmarks are insufficient to satisfy our need to understand the behavior of language models and to predict their future behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They mainly provide three reasons for that.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One of them is the short useful lifespans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' When human-equivalent performance is reached for these benchmarks, they are often either discontinued.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One might call this “challenge-solve-and-replace” evaluation dynamic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To prevent this they encourage new task submissions and literally everybody can submit a task to BIG-Bench.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So they call BIG-bench a living benchmark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The review of the tasks is based on ten criteria.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It includes for example “Justification”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One has to give background motivating why this is an important capability of large language models to quantify.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With the inclusion of small tasks 72 2 Introducing the modalities they want to improve the diversity of topics covered and enable domain experts to contribute tasks without the difficulties of distributed human labeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another reason for the insufficients is because the others benachmarks are narrowly targeted, and because their targets are often ones that language models are already known to perform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So it’s not possible to identify new and unexpected capabilities that language models may develop with increased scale, or to characterize the breadth of current capabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, many current benchmarks use data collected through human labeling that is not performed by experts or by the task authors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their benchmark tasks are primarily intended to evaluate pre-trained models, without task-specific fine-tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' By focusing on such tasks in the zero- and few-shot evaluation setting, it becomes possible to provide meaningful scores for even those tasks with a very small number of examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The “everybody can submit” strategy also leads to inclusion a variety of tasks covering non-English languages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Till now the large language models, like GPT-3 and PaLM, perform poorly on BIG-bench relative to expert humans, which is maybe a good sign for the future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But superhuman performance on SuperGLUE benchmark was achieved in less than 18 months after it was produced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 WMT There is a family of datasets which is the most popular datasets used to benchmark machine translation systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Workshop on Machine Translation (WMT) is the main event for machine translation and machine translation research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This conference is held annually.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' WMT includes competitions on different aspects of machine translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These competitions are known as shared tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Typically, the task organisers provide datasets and instructions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then teams can submit their output of their models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The submissions are ranked with human evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Most of the models are evaluated on bi-lingual translation like English-to- German, but there are also tri-linguar tasks like using English to improve Russian-to-Chinese machine translation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One of the most popular NLP metrics is called the Bleu Score and this metric is also used in the WMT tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is based on the idea that the closer the predicted sentence is to the human- generated target sentence, the better it is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Bleu Scores are between 0 and 1, but a score of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 or 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 is considered the best you can achieve.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Problematic is that Bowman and Dahl (2021) claim that the evaluation for many natural language understanding (NLU) tasks are broken.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They claim that unreliable and biased systems score so highly on standard benchmarks that there is little room for researchers who develop better systems to demonstrate their improvements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They provide four criteria to handle this: 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 73 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Good performance on the benchmark should imply robust in-domain performance on the task 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Benchmark examples should be accurately and unambiguously an- notated 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Benchmarks should offer adequate statistical power 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Benchmarks should reveal plausibly harmful social biases in systems, and should not incentivize the creation of biased systems Building new benchmarks that improve upon these four axes is likely to be quite difficult.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 CheckList Inspired by principles of behavioral testing in software engineering, Ribeiro et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020) introduced CheckList, a model-agnostic and task-agnostic methodology for testing NLP models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' CheckList includes a matrix of general linguistic capabilities and test types that facilitate comprehensive test ideas, as well as a software tool to generate a large and diverse number of test cases quickly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To break down potential capability failures into specific behaviors, CheckList introduces three different test types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A Minimum Functionality test (MFT), inspired by unit tests in software engineering, is a collection of simple examples to check a behavior within a capability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An Invariance test (INV) is when label-preserving perturbations to inputs are applied and the model prediction are expected to remain the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A Directional Expectation test (DIR) is similar, except that the label is expected to change in a certain way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Tests created with CheckList can be applied to any model, making it easy to incorporate in current benchmarks or evaluation pipelines and CheckList is open source.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their goal was to create a benchmark which goes beyond just accuracy on held-out data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Computer Vision Benchmarks CV models try to answer visual tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A visual task is a task which can be solved only by visual input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Often visual task can be solved as a binary classification problem, which is called image classification, but there are also numerous other applications for CV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This chapter will focus on image classification, semantic segmentation and object detection with their usual benchmarks datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 ImageNet Versions It’s not only common to pre-train your model on ImageNet datasets it’s also common to benchmark the models on them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There are many different variants of ImageNet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There is ImageNet-R, a version with non-natural images such as art, cartoons and sketches, or ImageNet-A, which is a a more challenging version because they use adversarial images (Goodfellow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014d), or ImageNet-V2 (Recht et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The last was created to check whether 74 2 Introducing the modalities there is an over-fitting on the classic pre-training ImageNet dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They followed the creation process of the original dataset and tested to what extent current classification models generalize to new data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Recht et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019) found accuracy drops for all models and suggested that these drops are not caused by adaptivity, but by the models’ inability to generalize to slightly “harder” images than those found in the original test sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The goal of image classification is to classify the image by assigning a label.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Typically, Image Classification refers to images in which only one object appears.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To asses the performance one mainly uses Top-1 accuracy, the model’s answer with highest probability must be exactly the expected answer, or Top-5 accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Top-5 accuracy means that any of five highest probability answers must match the expected answer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Beyer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020) tried to answer the question “Are we done with ImageNet?”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' in their paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Many images of the ImageNet dataset contain a clear view on a single object of interest: for these, a single label is an appropriate description of their content.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However many other images contain multiple, similarly prominent objects, limiting the relevance of a single label (Beyer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In these cases, the ImageNet label is just one of many equally valid descriptions of the image and as a result an image classifier can be penalized for producing a correct description that happens to not coincide with that chosen by the ImageNet label.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In short a single label per image is not sufficient in many cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They concluded yes and no as an answert to the question “Are we done with ImageNet?”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The shortcomings of ImageNet labels and their accuracy were identified and they provided a new ImageNet validation set ReaL (Beyer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020) (“Reassessed Labels”) and also a new metric, called ReaL accuracy (Beyer et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The ReaL accuracy measures the precision of the model’s top-1 prediction, which is deemed correct if it is included in the set of labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' these findings suggested that although the original set of labels may be nearing the end of their useful life, ImageNet and its ReaL labels can readily benchmark progress in visual recognition for the foreseeable future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An addition of a localization tasks to the classification tasks results into object detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is used to analyze more realistic cases, like mentioned above, in which multiple objects may or may not exist in an image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The location of an object is typically represented by a bounding box.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 MS-COCO & Object365 In the recent years, the Microsoft COCO dataset or the Object365 data have become the standards to evaluate object detection algorithms, but it’s also possible to use a ImageNet dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The primary challenge metric is called mean Average Precision (mAP) at Intersection over Union (IoU) =.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='50:.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='05:.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The IoU is the intersection of the predicted and ground truth boxes divided by the union of the predicted and ground truth boxes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' IoU, also called Jaccard Index, values range from 0 to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Where 0 means no overlap and 1 means perfect overlap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 75 But how is precision captured in the context of object detection?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Precision is known as the ratio of True Positive/(True Positive+False Positive).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With the help of the IoU threshold, it’s possible to decide whether the prediction is True Positive(TP), False Positive(FP), or False Negative(FN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The example below shows predictions with IoU threshold α set at 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='50:.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='05:.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='95 means that one uses 10 IoU thresholds of {0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='50, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='55, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='60, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' , 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='95}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' COCO uses this as primary metric, because it rewards detectors with better localization (Mircosoft, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Object detection and image segmentation are both tasks which are concerned with localizing objects of interest in an image, but in contrast to object detection image segmentation focuses on pixel-level grouping of different semantics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Image segmentation can be splitted into various tasks including instance segmentation, panoptic segmentation, and semantic segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Instance segmentation is a task that requires the identification and segmentation of individual instance in an image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Semantic segmentation is a task that requires segmenting all the pixels in the image based on their class label.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Panoptic segmentation is a combination of semantic and instance segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The task is to classify all the pixels belonging to a class label, but also identify what instance of class they belong to.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Panoptic and instance segmentation is often done on COCO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 ADE20k Semantic segmentation can be done one ADE20K(Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ADE are the first three letters of the name Adela Barriuso, who single handedly annotated the entire dataset and 20K is a reference to being roughly 20,000 images in the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This dataset shows a high annotation complexity, because any image in ADE20K contains at least five objects, and the maximum number of object instances per image reaches 273.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To asses the performance of a model on the ADE20K dataset one uses the mean IoU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It indicates the IoU between the predicted and ground-truth pixels, averaged over all the classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In contrast to the object detection task, the definition of TP, FP, and FN is slightly different as it is not based on a predefined threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' TP is now the α= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 α= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 α= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 10U=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='96 IOU=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='22 10U=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='00 TruePositive FalsePositive FalseNegative76 2 Introducing the modalities area of intersection between Ground Truth and segmentation mask.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FP is the predicted area outside the Ground Truth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FN is the number of pixels in the Ground Truth area that the model failed to predict.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The calculation of IoU is the same as in object detection tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It’s the intersection of the predicted and ground truth boxes aka.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' TP divided by the union of the predicted and ground truth boxes, which is essentially TP + FN + FP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A example is shown down below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='32: taken from https://learnopencv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='com 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Multi-Modal Benchmarks Visual understanding goes well beyond object recognition or semantic segmen- tation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With one glance at an image, a human can effortlessly imagine the world beyond the pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is emphasized by the quote “a picture says more then a thousand words”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' High-order of cognition and commonsense reasoning about the world is required to infer people’s actions, goals, and mental states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To answer visual understanding tasks a models needs to leverage more than one modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Visual Commonsense Reasoning (VCR) Visual understanding tasks require seamless integration between recognition and cognition and this task can be formalize as Visual Commonsense Reasoning (VCR).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Zellers et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019) introduce a new dataset called VCR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It consists of 290k multiple choice QA problems derived from 110k movie scenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The key recipe for generating non-trivial and high-quality problems at scale is Adversarial Matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Incorrect choices are obtained via maximum-weight bipartite matching between queries and responses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This matching transforms rich annotations into multiple choice questions with minimal bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VCR casted as a four-way multiple choice task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The underlying scenes come from the Large Scale Movie Description Challenge and YouTube movie clips and they searched for interesting an diverse situa- tions to ensure this they trained and applied an “interestingnes filter”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The most interesting images were passed to Workers of Amazon Mechanical Turk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additional context in form of video caption was given to the worker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After TP FN Ground Truth Mask Predicted Mask2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 77 reading this they had to propose one to three questions about the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For each question, they had to provide a reasonable answer and a rationale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This results is an underlying dataset with high agreement and diversity of reasoning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Almost every answer and rationale is unique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To make these cognition-level questions simple to ask, and to avoid the clunkiness of referring expressions, VCR’s language integrates object tags ([person2]) and explicitly excludes re- ferring expressions (‘the woman on the right.’).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These object tags are detected from Mask-RCNN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The following types of questions are in the benchmarks: 38% Explanation (‘Why is [person11] wearing sunglasses inside?’' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='), 24% Activ- ity (’What are [person1] and person[2] doing¿‘), 13% Temporal (”What will [person6] do after unpacking the groceries?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='“), 8% Mental, 7% Role, 5% Scene, 5% Hypothetical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So in this setup, a model is provided a question, and has to pick the best answer out of four choices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Only one of the four is correct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' If the model answered correctly a new question, along with the correct answer, is provided.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Now the model has to justify it by picking the best rationale out of four choices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The first part is called Question Answering (Q → A) and the second part Answer Justification (QA → R).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They combine both parts into a Q → AR metric, in which a model only gets a question right if it answers correctly and picks the right rationale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' If it gets either the answer or the rationale wrong, the entire prediction will be wrong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Models are evaluated in terms of accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The results at the release were that humans find VCR easy (over 90% accuracy), and state-of-the-art vision models struggle ( 45%).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At the moment of writing, the best model achieves 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 in (Q → A), 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 in (QA → R) and 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 in Q → AR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So the models are closing the gap but VCR is still far from solved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An “simpler” approach to evaluate vision-language models is to ask questions without reasoning about an image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Visual Question Answering 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 & 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 (VQA) For this reason Antol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2015) created an open-ended answering task and a multiple-choice task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their dataset contains roughly 250k images, 760k questions, and 10M answers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 204k images are taken from the MS COCO dataset but also newly created created datasets are used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Three questions were collected for each image or scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each question was answered by ten subjects along with their confidence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The dataset contains over 760K questions with around 10M answers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' “what”-, “how”-, “is”- questions are mainly used in the benchmark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But they had major flaws in their creation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An model which blindly answering “yes” without reading the rest of the question or looking at the associated image results in a VQA accuracy of 87% or the most common sport answer “tennis” was the correct answer for 41% of the questions starting with “What sport is”, and “2” is the correct answer for 39% of the questions starting with “How many” (Antol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 78 2 Introducing the modalities Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016b) pointed out a particular ‘visual priming bias’ in the VQA dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016b) showed that language provides a strong prior that can result in good superficial performance, without the underlying models truly understanding the visual content.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016b) collected a balanced dataset containing pairs of complementary scenes to reduce or eliminate the strong prior of the language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Goyal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017) did the same and made a second iteration of the Visual Question Answering Dataset and Challenge (VQA v2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Goyal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017) balanced the popular VQA dataset (Antol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015) by collecting complementary images such that every question in balanced dataset is associated with not just a single image, but rather a pair of similar images that result in two different answers to the question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The dataset is by construction more balanced than the original VQA dataset and has approximately twice the number of image-question pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 GQA Hudson and Manning (2019) introduced the GQA dataset for real-world visual reasoning and compositional question answering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It consists of 113K images and 22M questions of assorted types and varying compositionality degrees, mea- suring performance on an array of reasoning skills such as object and attribute recognition, transitive relation tracking, spatial reasoning, logical inference and comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They also proposed Consistency, Validity and Plausibility as new measures to get more insight into models’ behavior and performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Consistency measures responses consistency across different questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To achieve a high consistency a model may require deeper understanding of the question semantics in context of the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The validity metric checks whether a given answer is in the question scope, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' responding some color to a color question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The plausibility score goes a step further, measuring whether the answer is reasonable, or makes sense, given the question (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' elephant usually do not eat pizza).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They even made a comparison between GQA and VQA 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They came to the conclusion that the questions of GQA are objective, unambiguous, more compositional and can be answered from the images only, potentially making this benchmark more controlled and convenient for making research progress on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Conversely, VQA questions tend to be a bit more ambiguous and subjective, at times with no clear and conclusive answer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, we can see that GQA provides more questions for each image and thus covers it more thoroughly than VQA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Generative Benchmarks Almost everybody is talking right now about generative models like DALL-E2, Imagen, Parti.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It seems like every month a new one is presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But how can we compare these models?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Automatic image quality and automatic image-text alignment are two reasonable evaluation metrics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Fréchet Inception Distance 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 79 (FID) can be used as primary automated metric for measuring image quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Frechet Inception Distance compares the distribution of generated images with the distribution of real images that were used to train the generator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A small value is wanted, as it’s a distance measure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Text-image fit can be captured through automated captioning evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For this an image output by the model is captioned with a model, which is able to do image captioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The similarity of the input prompt and the generated caption is then assessed via BLEU, CIDEr, METEOR and SPICE and also human evaluation is done.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Here different generative models are used with the same prompts and the human is asked to choose which output is a higher quality image and which is a better match to the input prompt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One always has to keep in mind, that the images of the generative models are always “cherry picked”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They do not typically represent, for example, a single shot interaction in which the model directly produces such an image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To make this clear, Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022a) showed their way of growing the cherry tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='33: taken from Parti Paper A smiling sloth a A van parked on grass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2 A smiling sloth wearing a leather jacke, A smiling sloth wearing a bowtie and A van with a cityscape painted on it a cowboy hatand a kilt holding a quarterstaff and a big book.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A shiny VW van parked on grass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' and parked on grass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3 A smiling sloth wearing a bowtie and holding a A shiny VW van with a cityscape painted on it and and a bowtie.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The sloth is holding a quarterstaff and a big book quarterstaff and a big book.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A shiny VW van parked on parked on grass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' grass, C A smiling sloth wearing a leather jacket, a cowboy hat, a kilt A smiling sloth wearing a leather jacket, a cowboy hat, a kilt A smiling sloth is wearing a leather jacket, a cowboy hat, a and a bowtie.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The sloth is holding a quarterstaff and a big and a bowtie.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The sloth is holding a quarterstaff and a big kilt and a bowtie.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The sloth is holding a quarterstaff and a book.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A shiny VW van with a cityscape painted on it and book.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The sloth stands a few feet in front of a shiny VW van.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' big book.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The sloth is standing on grass a few feet in front of parked on grass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The van has a cityscape painted on it and parked on grass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' a shiny VW van with flowers painted on it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' wide-angle lens b from below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' a C80 2 Introducing the modalities 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 PartiPrompts, DrawBench, Localized Narratives In a sense, this is a form of model whispering as one stretches such models to their limits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Besides to that they also present PartiPrompts (P2) which is a set of over 1600 (English) prompts curated to measure model capabilities across a variety of categories and controlled dimensions of difficulty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' P2 prompts can be simple, but can also be complex, such as 67-word description they created for Vincent van Gogh’s The Starry Night.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' DrawBench is a similar dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also the Localized Narratives dataset from the dataset section consists of long prompts and though it can also be used as a benchmark for generative models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Current benchmarks give a good perspective on model performance on a wide range of V&L tasks, but the field is only starting to assess why models perform so well and whether models learn specific capabilities that span multiple V&L tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 FOIL it!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Shekhar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017) proposed an automatic method for creating a large dataset of real images with minimal language bias and some diagnostic abilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They extended the MS-COCO dataset and created FOIL-COCO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FOIL stands for “Find One mismatch between Image and Language caption” and consists of images associated with incorrect captions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The captions are produced by introducing one single error (or ‘foil’) per caption in existing, human-annotated data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So each datapoint FOIL-COCO can be described as triplet consisting of an image, original and foil caption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their data generation process consists of four main steps: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Generation of replacement word pairs 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Splitting of replacement pairs into training and testing 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Generation of foil captions 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Mining the hardest foil caption for each image The models are evaluated on three different tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The first one is Correct vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' foil classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Given an image and a caption, the model is asked to mark whether the caption is correct or wrong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The aim is to understand whether LaVi models can spot mismatches between their coarse representations of language and visual input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The second task is Foil word detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Given an image and a foil caption, the model has to detect the foil word.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The aim is to evaluate the understanding of the system at the word level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The last task Foil word correction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Given an image, a foil caption and the foil word, the model has to detect the foil and provide its correction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The aim is to check whether the system’s visual representation is fine-grained enough to be able to extract the information necessary to correct the error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their hypothesis is that systems which, like humans, deeply integrate the language and vision modalities, should spot foil captions quite easily.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Resources and Benchmarks for NLP, CV and multimodal tasks 81 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 VALSE Vision And Language Structured Evaluation (VALSE) (Parcalabescu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022) builds on the same idea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This benchmark aims to gauge the sensitivity of pre-trained V&L models to foiled instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They coverd a wide spectrum of basic linguistic phenomena affecting the linguistic and visual modalities: existence, plurality, counting, spatial relations, actions, and entity coreference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To generate the foils they first use strong language models to propose foil and second they use natural language inference to filter out captions that still can describe the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To do this in an automatic fashion they use the image as an premise and the caption its entailed hypothesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally they use the captian as an premise and the foil as the hypothesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' If an NLI model predicts the foil to be neutral or a contradiction with respect to the caption, they see this as an indicator for a good foil.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At last the used human annotators to validate all generated testing data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Mainly the MS-COCO dataset is used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VALSE is as a task-independent, zero-shot benchmark to assess the extent to which models learn to ground specific linguistic phenomena as a consequence of their pretraining.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Other Benchmarks As we don’t live in a world with unlimited resources, it’s also important to keep track of how much energy is consumed to train the models and how big the carbon footprint is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Strubell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019b) investigated some NLP models and benchmarked model training and development costs in terms of dollars and estimated CO2 emissions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They came to the result that training a single BERT base model without hyperparameter tuning on GPUs requires the same energy as a trans-American flight.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On average a human is responsible for 5t CO2 per year and Strubell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019b) estimated that the training procedure of a big Transformer with neural architecture search emitted 284t of CO2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Works (Lottick et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019, Henderson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020)) have released online tools to benchmark their energy usage and initiatives such as the SustainNLP workshop have since taken up the goal of prioritizing computationally efficient hardware and algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These findings are just some points one should keep in mind.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the following chapters we will see how the multimodal architectures use these datasets and also how they perform on the given benchmarks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3 Multimodal architectures Authors: Luyang Chu, Karol Urbanczyk, Giacomo Loss, Max Schneider, Steffen Jauch-Walser Supervisor: Christian Heumann Multimodal learning refers to the process of learning representations from different types of input modalities, such as image data, text or speech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Due to methodological breakthroughs in the fields of Natural Language Processing (NLP) as well as Computer Vision (CV) in recent years, multimodal models have gained increasing attention as they are able to strengthen predictions and better emulate the way humans learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This chapter focuses on discussing images and text as input data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The remainder of the chapter is structured as follows: The first part “Image2Text” discusses how transformer-based architectures improve meaningful captioning for complex images using a new large scale, richly annotated dataset COCO (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While looking at a photograph and describing it or parsing a complex scene and describing its context is not a difficult task for humans, it appears to be much more complex and challenging for computers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We start with focusing on images as input modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In 2014 Microsoft COCO was developed with a primary goal of advancing the state-of-the-art (SOTA) in object recognition by diving deeper into a broader question of scene understanding (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' “COCO” in this case is the acronym for Common Objects in Context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It addresses three core problems in scene understanding: object detection (non-iconic views), segmentation, and captioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While for tasks like machine translation and language understanding in NLP, transformer-based architecture are already widely used, the potential for applications in the multi-modal context has not been fully covered yet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With the help of the MS COCO dataset, the transformer- based architecture “Meshed-Memory Transformer for Image Captioning” (M 2) will be introduced, which was able to improve both image encoding and the language generation steps (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The performance of M 2 and other different fully-attentive models will be compared on the MS COCO dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Next, in Text2Image, the idea of incorporating textual input in order to generate visual representations is described.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Current advancements in this field have been made possible largely due to recent breakthroughs in NLP, which first allowed 83 84 3 Multimodal architectures for learning contextual representations of text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformer-like architectures are being used to encode the input into embedding vectors, which are later helpful in guiding the process of image generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The chapter discusses the development of the field in chronological order, looking into details of the most recent milestones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Concepts such as generative adversarial networks (GAN), variational auto-encoders (VAE), VAE with vector quantization (VQ-VAE), diffusion, and autoregressive models are covered to provide the reader with a better understanding of the roots of the current research and where it might be heading.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Some of the most outstanding outputs generated by state-of-the-art works are also presented in the chapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The third part, “Images supporting Language Models”, deals with the inte- gration of visual elements in pure textual language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Distributional semantic models such as Word2Vec and BERT assume that the meaning of a given word or sentence can be understood by looking at how (in which context) and when the word or the sentence appear in the text corpus, namely from its “distribution” within the text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But this assumption has been histor- ically questioned, because words and sentences must be grounded in other perceptual dimensions in order to understand their meaning (see for example the “symbol grounding problem”;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Harnad, 1990).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For these reasons, a broad range of models has been developed with the aim to improve pure language models, leveraging the addition of other perceptual dimensions, such as the visual one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This subchapter focuses in particular on the integration of visual elements (here: images) to support pure language models for various tasks at the word-/token-level as well as on the sentence-level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The starting point in this case is always a language model, into which visual representations (extracted often with the help of large pools of images rom data sets like MS COCO, see chapter “Img2Text” for further references) are to be “integrated”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But how?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There has been proposed a wide range of solutions: On one side of the spectrum, textual elements and visual ones are learned separately and then “combined” afterwards, whereas on the other side, the learning of textual and visual features takes place simultaneously/jointly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, Silberer and Lapata (2014) implement a model where a one-to- one correspondence between textual and visual space is assumed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Text and visual representations are passed to two separate unimodal encoders and both outputs are then fed to a bimodal autoencoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the other side, Bordes et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020) propose a “text objective function” whose parameters are shared with an additional “grounded objective function”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The training of the latter takes place in what the authors called a “grounded space”, which allows to avoid the one-to-one correspondence between textual and visual space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These are just introductory examples and between these two approaches there are many shades of gray (probably even more than fifty .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='.).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These models exhibit in many instances better performance than pure language models, but they still struggle on some aspects, for example when they deal with abstract words and sentences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 85 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1: Left: Silberer and Lapata (2014) stack autoencoders to learn higher-level embeddings from textual and visual modalities, encoded as vectors of attributes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Right: Bordes et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020) fuse textual and visual information in an intermediate space denoted as “grounded space”;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the “grounding objective function” is not applied directly on sentence embeddings but trained on this intermediate space, on which sentence embeddings are projected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Afterwards, in the subchapter on “Text supporting Image Models”, approaches where natural language is used as additional supervision for CV models are described.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Intuitively these models should be more powerful compared to models supervised solely by manually labeled data, simply because there is much more signal available in the training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One prominent example for this is the CLIP model (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a) with its new dataset WIT (WebImageText) comprising 400 million text-image pairs scraped from the internet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Similar to “Text2Image” the recent success stories in NLP have inspired most of the new approaches in this field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Most importantly pre-training methods, which directly learn from raw text (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GPT- n, Generative Pre-trained Transformer;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' So, the acronym CLIP stands for _C_ontrastive _L_anguage-_I_mage _P_re-training here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A transformer-like architecture is used for jointly pre-training a text encoder and an image encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For this, the contrastive goal to correctly predict which natural language text pertains to which image inside a certain batch, is employed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Training this way turned out to be more efficient than to generate captions for images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This leads to a flexible model, which at test time uses the Learned text encoder as a “zero-shot” classifier on embeddings of the target dataset’s classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The model, for example, can perform optical character recognition, geo-location detection and action-recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Performance-wise CLIP can be competitive with task-specific supervised models, while never seeing an instance of the specific dataset before.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This suggests an important step towards closing the “robustness gap”, where machine learning models fail to meet the expectations set by their previous performance – especially on ImageNet test-sets – on new datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, the subchapter “Models for both modalities” discusses how text and image inputs can be incorporated into a single unifying framework in order to Separately Jointly reconstruction 0000-00 w(r w(c) 00-0 The teniswoman starts wo) on her senve sotimax i Ft The pitcher s bimodial coding y W(6) Eharowinga ball wo wi The woman phys tennis 00-0 (a)4 w(2) input x 00 TEXT IMAGES86 3 Multimodal architectures get closer to a general self-supervised learning framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There are two key advantages that make such an architecture particularly interesting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Similar to models mentioned in previous parts, devoid of human labelling, self-supervised models don’t suffer from the same capacity constraints as regular supervised learning models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On top of that, while there have been notable advances in dealing with different modalities using single modality models, it is often un- clear to which extend a model structure generalizes across different modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Rather than potentially learning modality-specific biases, a general multipur- pose framework can help increase robustness while also simplifying the learner portfolio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In order to investigate different challenges and trends in vision-and- language modelling, this section takes a closer look at three different models, namely data2vec (Baevski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)), VilBert (Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019b)) and Flamingo (Alayrac et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)) Data2vec is a new multimodal self-supervised learning model which uses a single framework to process either speech, natural language or visual information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is in contrast to earlier models which used different algorithms for different modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The core idea of data2vec, developed by MetaAI, is to predict latent representations of the full input data based on a masked view of the input in a self-distillation setup using a standard transformer architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (Baevski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)) As a result, the main improve- ment is in the framework itself, not the underlying architectures themselves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, the transformer architecture being used follows Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Through their parallelizability, transformers have several advantages over RNNs/CNNs particularly when large amounts of data are being used, making them the de-facto standard approach in vision-language modelling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (Dosovitskiy et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020a)) VilBert is an earlier model that in contrast to data2vec can handle cross-modality tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, Flamingo is a modern few shot learning model which features 80B parameters - significantly more than the other two models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Through a large language model incorporated in its architecture, it has great text generating capabilities to tackle open-ended tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It also poses the question how to efficiently train increasingly large models and shows the effectiveness of using perceiver architectures (Jaegle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021a)) to encode inputs from different modalities as well as how to leverage communication between pretrained and frozen models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Image2Text Author: Luyang Chu Supervisor: Christian Heumann Image captioning refers to the task of producing descriptive text for given images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It has stimulated interest in both natural language processing and computer vision research in recent years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Image captioning is a key task that 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Image2Text 87 requires a semantic comprehension of images as well as the capacity to generate accurate and precise description sentences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Microsoft COCO: Common Objects in Context The uderstanding of visual scenes plays an important role in computer vision (CV) research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It includes many tasks, such as image classification, object detection, object localization and semantic scene labeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Through the CV research history, high-quality image datasets have played a critical role.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They are not only essential for training and evaluating new algorithms, but also lead the research to new challenging directions (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the early years, researchers developed Datasets (Deng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2009),(Xiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2010),(Evering- ham et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2010) which enabled the direct comparison of hundreds of image recognition algorithms, which led to an early evolution in object recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the more recent past, ImageNet (Deng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2009), which contains millions of images, has enabled breakthroughs in both object classification and detection research using new deep learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With the goal of advancing the state-of-the-art in object recognition, especially scene understanding, a new large scale data called “Microsoft Common Objects in Context” (MS COCO) was published in 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MS COCO focuses on three core problems in scene understanding: detecting non-iconic views, detecting the semantic relationships between objects and determining the precise localization of objects (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The MS COCO data set contains 91 common object categories with a total of 328,000 images as well as 2,500,000 instance labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The authors claim, that all of these images could be recognized by a 4 year old child.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 82 of the categories include more than 5000 labeled instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These labeled instances wmay support the detection of relationships between objects in MS COCO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In order to provide precise localization of object instances, only “Thing” categories like e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' car, table, or dog were included.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Objects which do not have clear boundaries like e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' sky, sea, or grass, were not included.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In current object recognition research, algorithms perform well on images with iconic views.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Images with iconic view are defined as containing the one single object category of interest in the center of the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To accomplish the goal of detecting the contextual relationships between objects, more complex images with multiple objects or natural images, coming from our daily life, are also gathered for the data set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In addition to MS COCO, researchers have been working on the development of new large databases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In recent years many new large databases like ImageNet, PASCAL VOC (Everingham et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2010) and SUN (Xiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2010) have been developed in the field of computer vision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each of this dataset has its on specific focus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 88 3 Multimodal architectures Datasets for object recognition can be roughly split into three groups: object classification, object detection and semantic scene labeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Object classification requires binary labels to indicate whether objects are present in an image, ImageNet (Deng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2009) is clearly distinguishable from other datasets in terms of the data set size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ImageNet contains 22k categories with 500-1000 images each.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='In comparison to other data sets, the ImageNet data set contains thus over 14 million labeled images with both entity-level and fine-grained categories by using the WordNet hierarchy and has enabled significant advances in image classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Detecting an object includes two steps: first is to ensure that an object from a specified class is present, the second step is to localize the object in the image with a given bounding box.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This can be implemented to solve tasks like face detection or pedestrians detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The PASCAL VOC (Everingham et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2010) data set can be used to help with the detection of basic object categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With 20 object categories and over 11,000 images, PASCAL VOC contains over 27,000 labeled object instances by additionally using bounding boxes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Almost 7,000 object instances from them come with detailed segmentations (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Labeling semantic objects in a scene requires that each pixel of an image is labeled with respect to belonging to a category, such as sky, chair, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', but individual instances of objects do not need to be segmented (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Some objects like sky, grass, street can also be defined and labeled in this way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The SUN data set (Xiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2010) combines many of the properties of both object detection and semantic scene labeling data sets for the task of scene understanding, it contains 908 scene categories from the WordNet dictionary (Fellbaum, 2000) with segmented objects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The 3,819 object categories split them to object detection datasets (person, chair) and to semantic scene labeling (wall, sky, floor) (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Image Collection and Annotation for MS COCO MS COCO is a large-scale richly annotated data set, the progress of building consisted of two phases: data collection and image annotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In order to select representative object categories for images in MS COCO, researchers collected several categories from different existing data sets like PASCAL VOC (Everingham et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2010) and other sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' All these object categories could, according to the authors, be recognized by children between 4 to 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The quality of the object categories was ensured by co-authors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Co- authors rated the categories on a scale from 1 to 5 depending on their common occurrence, practical applicability and diversity from other categories (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The final number of categories on their list was 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' All the categories from PASCAL VOC are included in MS COCO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With the help of representative object categories, the authors of MS COCO 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Image2Text 89 wanted to collect a data set in which a majority of the included images are non- iconic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' All included images can be roughly divided into three types according to Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2: iconic-object images, iconic-scene images and non-iconic images (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2: Type of images in the data set (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Images are collected through two strategies: firstly images from Flickr, a platform for photos uploaded by amateur photographers, with their keywords are collected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Secondly, researchers searched for pairwise combinations of object categories like “dog + car” to gather more non-iconic images and images with rich contextual relationships (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Due to the scale of the dataset and the high cost of the annotation process, the design of a high quality annotation pipeline with efficient cost depicted a difficult task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The annotation pipeline in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 for MS COCO was split into three primary tasks: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' category labeling, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='instance spotting, and 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' instance segmentation (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3: Annotation pipeline for MS COCO (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As we can see in the Fig 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3, object categories in each image were determined in the first step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Due to the large number of data sets and categories, they used a hierarchical approach instead of doing binary classification for each category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' All the 91 categories were grouped into 11 super-categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The (a) Iconic object images (b) Iconic scene images (c) Non-iconic images Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2: Example of (a)iconic object images, (b)iconic scene images, and (c)non-iconic images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='AnnotationPipeline (a) Category labeling (b) Instance spotting (c)Instancesegmentation90 3 Multimodal architectures annotator did then examine for each single instance whether it belongs to one of the given super-categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Workers only had to label one instance for each of the super-categories with a category’s icon (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For each image, eight workers were asked to label it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This hierarchical approach helped to reduce the time for labeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, the first phase still took 20k worker hours to be completed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the next step, all instances of the object categories in an image were labeled, at most 10 instances of a given category per image were labeled by each worker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In both the instance spotting and the instance segmentation steps, the location of the instance found by a worker in the previous stage could be seen by the current worker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each image was labeled again by eight workers summing up to a total of 10k worker hours.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the final segmenting stage, each object instance was segmented, the seg- mentation for other instances and the specification of the object instance by a worker in the previous stage were again shown to the worker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Segmenting 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 million object instances was an extremely time consuming task which required over 22 worker hours per 1,000 segmentations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To minimize cost and improve the quality of segmentation, all workers were required to complete a training task for each object category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In order to ensure a better quality, an explicit verification step on each segmented instance was performed as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Comparison with other data sets In recent years, researchers have developed several pre-training data sets and benchmarks which helped the developemnt of algorithms for CV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each of these data sets varies significantly in size, number of categories and types of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the previos part, we also introduced the different research focus of some data sets like e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ImageNet (Deng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2009), PASCAL VOC (Everingham et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2010) and SUN (Xiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ImageNet, containing millions of images, has enabled major breakthroughs in both object classification and detection research using a new class of deep learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It was created with the intention to capture a large number of object categories, many of which are fine-grained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' SUN focuses on labeling scene types and the objects that commonly occur in them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, PASCAL VOC’s primary application is in object detection in natural images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MS COCO is designed for the detection and segmentation of objects occurring in their natural context (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With the help of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4, one can compare MS COCO to ImageNet, PASCAL VOC and SUN with respect to different aspects (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The number of instances per category for all 91 categories in MS COCO and PASCAL VOC is shown in subfigure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 (a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Compared to PASCAL VOC, MS COCO has both more categories and (on average) more instances per category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The number of object categories and the number of instances per 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Image2Text 91 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4: Comparison MS COCO with PASCAL VOC, SUN and Ima- geNet (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' category for all the datasets is shown in subfigure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 (d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MS COCO has fewer categories than ImageNet and SUN, but it has the highest average number of instances per category among all the data sets, which from the perspective of authors might be useful for learning complex models capable of precise localization (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Subfigures 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 (b) and (c) show the number of annotated categories and annotated instances per image for MS COCO, ImageNet, PASCAL VOC and SUN (average number of categories and instances are shown in parentheses).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On average MS COCO contains 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 categories and 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 instances per image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ImageNet and PASCAL VOC both have on average less than 2 categories and 3 instances per image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The SUN data set has the most contextual information, on average 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 categories and 17 instances per image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Subfigure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 (e) depicts the distribution of instance sizes for the MS COCO, ImageNet Detection, PASCAL VOC and SUN data set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Discussion MS COCO is a large scale data set for detecting and segmenting objects found in everyday life, with the aim of improving the state-of-the-art in object Instances per category COCO 1,000,000 PASCAL VOC 100,000 10,000 1,000 100 Wine (a) Instances per image Categories per image 80% 60% 70% COCO (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7) —COCO (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5) 50% PASCALVOC (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3) PASCAL VOC (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4) 50% ImageNet (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0) ImageNet (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7) SUN (17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0) SUN (9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8) 30% 30% Per 20% 10% 10% 0% 0% 6 10 11 12 13 14 15 1 2 5 6 7 9101112131415 2 4 7 8 9 Number of categories Number of instances (b) (c) Instance size Number of categories vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' number of instances 40% 1000000 35% COCO Caltech Ped Instances per category 100000 COCO PASCALVOC 10000 ImageNet PASCALVOC ImageNet ImageNet Detection Classification SUN 1000 20% nt Caltech 256 SUN 15% 100 Caltech 101 10 5% 0% 1 10 100 1000 10000 100000 4% 6% 10% 16% 25% 40% 63% 100% Number of categories Percent of image size (d) (e)92 3 Multimodal architectures recognition and scene understanding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It focuses on non-iconic images of objects in natural environments and contains rich contextual information with many objects present per image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MS COCO is one of the typically used vision data sets, which are labor intensive and costly to create.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With the vast cost and over 70,000 worker hours, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Mio instances were annotated to drive the advancement of object detection and segmentation algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MS COCO is still a good benchmark for the field of CV (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The MS COCO Team also shows directions for future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example “stuff” label like “sky”, “grass”, and “street”, etc, may also be included in the dataset since “stuff” categories provide significant contextual information for the object detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Models for Image captioning The image captioning task is generally to describe the visual content of an image in natural language, so it requires an algorithm to understand and model the relationships between visual and textual elements, and to generate a sequence of output words (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the last few years, collections of methods have been proposed for image captioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Earlier approaches were based on generations of simple templates, which contained the output produced from the object detector or attribute predictor (Socher and Fei-fei, 2010), (Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With the sequential nature of language, most research on image captioning has focused on deep learning techniques, using especially Recurrent Neural Network models (RNNs) (Vinyals et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015), (Karpathy and Fei-Fei, 2014) or one of their special variants (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' LSTMs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Mostly, RNNs are used for sequence generation as languages models, while visual information is encoded in the output of a CNN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With the aim of modelling the relationships between image regions and words, graph convolution neural networks in the image encoding phase (Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018a) or single-layer attention mechanisms (Xu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015) on the image encoding side have been proposed to incorporate more semantic and spatial relationships between objects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' RNN-based models are widely adopted, however, the model has its limitation on representation power and due to its sequential nature (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Recently, new fully-attentive models, in which the use of self- attention has replaced the recurrence, have been proposed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' New approaches apply the Transformer architecture (Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017d) and BERT (Devlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) models to solve image captioning tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The transformer consists of an encoder with a stack of self-attention and feed-forward layers, and a decoder which uses (masked) self-attention on words and cross-attention over the output of the last encoder layer (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In some other transformer-based approaches, a transformer-like encoder was paired with an LSTM decoder, while the aforementioned approaches have exploited the original transformer architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Others (Herdade et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) proposed a transformer architecture for image captioning with the focus on geometric relations between input objects at the same time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Specifically, additional geometric weights between object pairs, which is used to scale attention weights, are computed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Similarly, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Image2Text 93 an extension of the attention operator, in which the final attended information is weighted by a gate guided by the context, was introduced at a similar time (Huang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Meshed-Memory Transformer for Image Captioning (M2) Transformer-based architectures have been widely implemented in sequence modeling tasks like machine translation and language understanding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, their applicability for multi-modal tasks like image captioning has still been largely under-explored (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5: M 2 Transformer (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A novel fully-attentive approach called Meshed-Memory Transformer for Image Captioning (M 2) was proposed in 2020 (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020) with the aim of improving the design of both the image encoder and the language decoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Compared to all previous image captioning models, M 2 (see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 has two new novelties: The encoder encodes a multi-level representation of the relationships between image regions with respect to low-level and high-level relations, and a-priori knowledge can be learned and modeled by using persistent Abaseballplayeris throwing a ball to anotherplayer Encoder Decoder Layer 1 Layer N 个 Encoder 个 Layer 2 Decoder Layer 2 Encoder Decoder Layer N Layer 1 Memory-Augmented Encoding Meshed Decoding94 3 Multimodal architectures memory vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The multi-layer architecture exploits both low- and high-level visual relationships through a learned gating mechanism, which computes the weight at each level, therefore, a mesh-like connectivity between encoder and decoder layers is created for the sentence generation process (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 M 2 Transformer Architecture FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6: M 2 Transformer Architecture (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 shows the detailed architecture of M 2 Transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It can be divided into the encoder (left) module and the decoder (right) module, both modules with multiple layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Given the input image region X, the image is passed through the attention and feed forward layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The relationship between image regions with a-priori knowledge will be encoded in each encoding layer, the output of each encoding layers will be read by decoding layers to generate the caption for image word by word (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' All interactions between word and image-level features of the input image X are modeled by using scaled dot-product attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Attention operates on vectors of queries q, keys k and values n, and takes a weighted sum of the value vectors according to a similarity distribution between query and key vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Attention can be defined as follows (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020): Attention(Q, K, V ) = softmax(QKT √ d )V (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1) where Q is a matrix of nq query vectors, K and V both contain nk keys and values, all the vectors has the same dimensionality, and d is a scaling factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Memory-Augmented Encoder For the given image region X, attention can be used to obtain a permutation invariant encoding of X through the self-attention operations, the operator from the Transformer can be defined as follows (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020): X Y XN X masked self-attention Q query Encoder Decoder key query key memory Encoder Decoder value value > K Layer 1 Layer 1 key M cross-attention cross-attention Encoder Decoder attention Layer 2 Layer 2 memory FC FC value Encoder Decoder Layer N Layer N feed-forward Memory-Augmented Encoder feed-forward x Meshed Decoder Y3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Image2Text 95 S(X) = Attention(WqX, WkX, WvX) (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2) In this case, queries, keys, and values are linear projections of the input features, and Wq, Wk, Wv are their learnable weights, they depend solely on the pairwise similarities between linear projections of the input set X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The self-attention operator encodes the pairwise relationships inside the input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But self-attention also has its limitation: a prior knowledge on relationships between image regions can not be modelled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To overcome the limitation, the authors introduce a Memory-Augmented Attention operator by extending the keys and values with additional prior information, which does not depend on image region X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The additional keys and values are initialized as plain learnable vectors which can be directly updated via SGD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The operator can be defined as follows (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020): Mmem(X) = Attention(WqX, K, V ) K = [WkX, Mk] V = [WvX, Mv] (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3) Mk and Mv are learnable matrices, with nm rows, [·,·] indicates concatenation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The additional keys and value could help to retrieve a priori knowledge from input while keeping the quries unchanged (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the Encoding Layer, a memory-augmented operator d is injected into a transformer-like layer, the output is fed into a position-wise feed-forward layer (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020): F(X)i = Uσ(V Xi + b) + c;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4) Xi indicates the i-th vector of the input set, and F(X)i the i-th vector of the output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also, σ(ů) is the ReLU activation function, V and U are learnable weight matrices, b and c are bias terms (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each component will be complemented by a residual connection and the layer norm operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The complete definition of an encoding layer can be finally written as (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020): Z = AddNorm(Mmem(X)) ˜X = AddNorm(F(Z)) (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5) Finally the Full Encoder has multiple encoder layers in a sequential fashion, therefore the i-th layer uses the output set computed by layer i − 1, higher encoding layers can exploit and refine relationships identified by previous layers, n encoding layers will produce the output ˜X = ( ˜X1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ˜Xn) (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 96 3 Multimodal architectures 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Meshed Decoder The decoder depends on both previously generated words and image region encodings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Meshed Cross-Attention can take advantage of all the encoder layers to generate captions for the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the right side of the Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 the structure of the meshed decoder is shown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The input sequence vector Y and the outputs from all encoder layers ˜X are connected by the meshed attention operator gated through cross-attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The meshed attention operator can is formally defined as (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020): Mmesh( ˜X, Y ) = N � i=1 αiC( ˜ Xi, Y ) (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6) C(ů, ů) stands for the encoder-decoder cross-attention, it is defined with queries from decoder, while the keys and values come from the encoder (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' C( ˜ Xi, Y ) = Attention(WqY, Wk ˜ Xi, Wv ˜ Xi) (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7) αi is a matrix of weights of the same size as the cross-attention results, αi models both single contribution of each encoder layer and the relative importance between different layers (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' αi = σ(Wi[Y, C( ˜ Xi, Y )] + bi) (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8) The [·,·] indicates concatenation and σ(ů) is the sigmoid activation function here, Wi is a weight matrix, and bi is a learnable bias vector (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In decoder layers the prediction of a word should only depend on the previ- ously generated word, so the decoder layer comprises a masked self-attention operation, which means that the operator can only make connections between queries derived from the t-th element of its input sequence Y with keys and values from left sub-sequence, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Y≤t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Simlilar as the encoder layers, the decoder layers also contain a position-wise feed-forward layer, so the decoder layer can be finally defined as (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020): Z = AddNorm(Mmesh(X, AddNorm(Smask(Y ))) ˜Y = AddNorm(F(Z)), (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9) where Smask indicates a masked self-attention over time (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The full decoder with multiple decoder layers takes the input word vectors as well as the t-th element (and all elements prior to it) of its output sequence 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Image2Text 97 to make the prediction for the word at t + 1, conditioned on Y≤t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally the decoder takes a linear projection and a softmax operation, which can be seen as a probability distribution over all words in the vocabulary (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Comparison with other models on the MS COCO data sets The M 2 Transformer was evaluated on MS COCO, which is still one of the most commonly used test data set for image captioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Instead of using the original MS COCO dat set, Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020) follow the split of MS COCO provided by Karpathy and Fei-Fei (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Karpathy uses 5000 images for validation, 5000 images for testing and the rest for training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For model evaluation and comparison, standard metrics for evaluating gener- ated sequences, like BLEU (Papineni et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2002), METEOR (Banerjee and Lavie, 2005), ROUGE (Lin, 2004), CIDEr (Vedantam et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015), and SPICE (Anderson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2016), which have been introduced in the second chapter, are used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7: Comparison of M 2 with Transformer-based alternatives (Cor- nia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020) The transformer architecture in its original configuration with six layers has been applied for captioning, researchers speculated that specific architectures might be required for captioning, so variations of the original transformer are compared with M 2 Transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Other variations are a transformer with three layers and the “Attention on Attention” (AoA) approach (Huang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) to the attentive layers, both in the encoder and in the decoder (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The second part intends to evaluate the importance of the meshed connections between encoder and decoder layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' M 2 Transformer (1 to 1) is a reduced version of the original M 2 Transformer, in which one encoder layer is connected to only corresponding decoder layer instead of being B-1 B-4 M R c S Transformer (w/ 6 layers as in [39]) 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 121.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 Transformer (w/ 3 layers) 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 123.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Transformer (w/ AoA [14]) 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 129.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 M?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformerl-to-1 (w/o mem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=') 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 128.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 129.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 M2 Transformer (w/o mem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=') 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 129.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 M2 Transformer (w/ softmax) 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 130.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 M2 Transformer 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 131.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='698 3 Multimodal architectures connected to all the decoder layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As one can see from the Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7, the original Transformer has a 121.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 CIDEr score, which is lower than the reduced version of M 2 Transformer, showing an improvement to 129.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 CIDEr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With respect to meshed connectivity, which helps to exploit relationships encoded at all layers and weights them with a sigmoid gating, one can observe a further improvement in CIDEr from 129.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 to 131.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also the role of memory vectors and the softmax gating schema for M 2 Transformer are also included in the table.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Eliminating the memory vector leads to a reduction of the performance by nearly 1 point in CIDEr in both the reduced M 2 Transformer and the original M 2 Transformer (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8: Comparison with the state-of-the-art on the “Karpathy” test split, in single-model setting (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Fig 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 compares the performance of M 2 Transformer with several recently proposed models for image captioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' SCST (Rennie et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017) and Up- Down (Anderson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018), use attention over the grid of features and attention over regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' RFNet (?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=') uses a recurrent fusion network to merge different CNN features;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GCN-LSTM (Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018b) uses a Graph CNN to exploit pairwise relationships between image regions;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' SGAE (Yang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) uses scene graphs instead ofauto-encoding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The original AoA-Net (Huang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) approach uses attention on attention for encoding image regions and an LSTM language model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, the ORT (Herdade et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) uses a plain transformer and weights attention scores in the region encoder with pairwise distances between detections (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8, the M 2 Transformer exceeds all other models on BLEU-4, ME- TEOR, and CIDEr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The performance of the M 2 Transformer was very close and competitive with SGAE on BLEU-1 and with ORT with respect to SPICE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' B-1 B-4 M R c S SCST [33] 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 114.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 Up-Down [4] 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 120.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 RFNet [15] 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 121.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Up-Down+HIP [49] 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 127.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 GCN-LSTM [48] 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 127.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 SGAE [46] 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 127.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 ORT [13] 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 128.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 AoANet [14] 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 129.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 M?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformer 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 131.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Image2Text 99 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9: Examples of captions generated by M 2 Transformer and the original Transformer model, as well as the corresponding ground-truths (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 shows some examples of captions generated by M 2 Transformer and the original transformer model, as well as the corresponding ground-truths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' According to the selected examples of captions, M 2 Transformer shows the ability to generate more accurate descriptions of the images, and the approach could detect the more detailed relationships between image regions (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The M 2 Transformer is a new transformer-based architecture for image cap- tioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It improves the image encoding by learning a multi-level representation of the relationships between image regions while exploiting a priori knowledge from each encoder layer, and uses a mesh-like connectivity at decoding stage to exploit low- and high-level features at the language generation steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The results of model evaluation with MS COCO shows that the performance of the M 2 Transformer approach surpasses most of the recent approaches and achieves a new state of the art on MS COCO (Cornia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GT:Aman milking a brown and white cow in GT: A man in a red Santa hat and a dog pose barn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' infrontofaChristmastree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformer:Aman is standingnexttoa Transformer:AChristmastreeinthesnow cow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' with a Christmas tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' M2Transformer:Aman ismilkinga cowin M?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Transformer:Aman wearing a Santa hat a barn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' with a dog in front of a Christmas tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GT: A woman withblue hair and a yellow um- GT: Several people standing outside a parked brella.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' white van.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformer:A woman is holding an um- Transformer: A group of people standing out- brella.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' side of abus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' M2Transformer:Awomanwithbluehair M2 Transformer: A group of people stand- holdingayellowumbrella.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ing around awhitevan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GT: Several zebras and other animals grazing GT: A truck sitting on a field with kites in the in a field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' air.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformer: A herd of zebras are standing in Transformer: A group of cars parked in a field a field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' witha kite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' M2Transformer:A herd of zebras and other M2Transformer:Awhite truck isparked in animals grazing in a field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' a field withkites.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GT: A woman who is skateboarding down the GT:Orangecatwalkingacrosstwored suit street.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' casesstackedonfloor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformer:Awomanwalkingdowna Transformer:An orange cat sitting on top of street talking on a cell phone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' a suitcase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' M2 Transformer: A woman standing on a M2Transformer:An orange cat standingon skateboard on a street.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' top of two red suitcases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GT: Some people are standing in front of a red GT: A boat parked in a field with long green food truck.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformer:Agroup ofpeople standing in grass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Transformer: A field of grass with a fence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' front ofabus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' M2Transformer:Agroup ofpeoplestand- M2Transformer:Aboatinthemiddleofa field of grass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ing outside of a food truck.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='100 3 Multimodal architectures 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image Author: Karol Urbańczyk Supervisor: Jann Goschenhofer Have you ever wondered what a painting artist could paint for you if you ordered a high-quality oil painting of a psychedelic hamster dragon?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Probably not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nevertheless, one of the answers could be: FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='10: Hamster dragon The catch is that there is no human artist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The above picture comes from a 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5-billion parameter model called GLIDE by OpenAI (Nichol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Every single value of every pixel was generated from a distribution that the model had to learn in the first place.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Before generating the image, GLIDE abstracted the concepts of ‘hamster’ and ‘dragon’ from looking at millions of training images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Only then, it was able to create and combine them successfully into a meaningful visual representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Welcome to the world of current text- to-image modelling!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The cross-modal field of text-to-image models has developed significantly over recent years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' What was considered unimaginable only a few years ago, today constitutes a new benchmark for researchers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' New breakthroughs are being published every couple of months.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Following these, possible business use cases are emerging, which attracts investment from the greatest players in AI research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, a further trend of closed-source models is continuing and the text-to-image field is probably one the most obvious ones where it can be noticed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We might need to get used to the fact that the greatest capabilities will soon be monopolized by few companies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At the same time, the general public is becoming aware of the field itself and the disruption potential it brings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Crucial questions are already emerging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image 101 What constitutes art?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' What does the concept of being an author mean?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The result of a generative model is in a sense a combination, or variation, of the abstracts it has seen in the past.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But the same stands for a human author.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, is a discussion about the prejudices and biases needed?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Answers to all of these will require refinement through an extensive discussion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The last section of this chapter will try to highlight the most important factors that will need to be considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, the primary intention of this chapter is to present the reader with a perspective on how the field was developing chronologically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Starting with the introduction of GANs, through the first cross-domain models, and ending with state-of-the-art achievements (as of September 2022), it will also try to grasp the most important concepts without being afraid of making technical deep dives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The author is aware that since the rapid development pace makes it nearly impossible for this section to stay up-to-date, it might very soon not be fully covering the field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, it must be stressed that the cutting-edge capabilities of the recent models tend to come from the scale and software engineering tricks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, focusing on the core concepts should hopefully gives this chapter a universal character, at least for some time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This design choice also explains why many important works did not make it to this publication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Just to name a few of them as honorable mentions: GAWWN (Reed et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2016a), MirrorGAN (Qiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019), or most recent ones: LAFITE (Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021), Make-a-Scene (Gafni et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022) or CogView (Ding et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In one way or another, all of them pushed the research frontier one step further.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, it needs to be clearly stated - the final selection of this chapter’s content is a purely subjective decision of the author.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Seeking objectivity Before diving into particular models, we introduce objective evaluation proce- dures that help assess the performance of consecutive works in comparison to their predecessors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Unfortunately, objectivity in comparing generative models is very hard to capture since there is no straight way to draw deterministic conclusions about the model’s performance (Theis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, multiple quantitative and qualitative techniques have been developed to make up for it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Unfortunately, there is no general consensus as to which measures should be used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An extensive comparison has been performed by Borji (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A few of the most widely used ones in current research are presented below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Inception Score (IS) Introduced by Salimans et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016), Inception Score (IS) uses the Inception Net (Szegedy et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015) trained on ImageNet data to classify the fake images generated by the assessed model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then, it measures the average KL diver- 102 3 Multimodal architectures gence between the marginal label distribution p(y) and the label distribution conditioned on the generated samples p(y|x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' exp(E x[KL(p(y|x)||p(y))]) p(y) is desired to have high diversity (entropy), in other words: images from the generative model should represent a wide variety of classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the other hand, p(y|x) is desired to have low diversity, meaning that images should represent meaningful concepts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' If a range of cat images is being generated, they all should be confidently classified by Inception Net as cats.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The intention behind IS is that a generative model with a higher distance (KL divergence in this case) between these distributions should have a better score.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' IS is considered a metric that correlates well with human judgment, hence its popularity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Fréchet Inception Distance (FID) A metric that is generally considered to improve upon Inception Score is the Fréchet Inception Distance (FID).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Heusel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017) argue that the main drawback of IS is that it is not considering the real data at all.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, FID again uses Inception Net, however this time it embeds the images (both fake and real samples) into feature space, stopping at a specific layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In other words, some of the ultimate layers of the network are being discarded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Feature vectors are then assumed to follow a Gaussian distribution and the Fréchet distance is calculated between real and generated data distributions: d2((m, C), (mw, Cw)) = ||m − mw||2 2 + Tr(C + Cw − 2(CCw)1/2) where (m, C) and (mw, Cw) represent mean and covariance of generated and real data Gaussians respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Obviously, low FID levels are desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FID is considered to be consistent with human judgement and sensitive to image distortions, which are both desired properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='11 shows how FID increases (worsens) for different types of noise being added to images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Precision / Recall Precision and recall are one of the most widely used metrics in many Machine Learning problem formulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, their classic definition cannot be applied to generative models due to the lack of objective labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Sajjadi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2018) came up with a novel definition of these metrics calculated directly from distributions, which was further improved by Kynkäänniemi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The argument behind the need for such an approach is that metrics such as IS or FID provide only a one-dimensional view of the model’s performance, ignoring the trade-off between precision and recall.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A decent FID result might very well mean high recall (large variation, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' wide range of data represented by the model), high precision (realistic images), or anything in between.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Let Pr denote the probability distribution of the real data, and Pg be the 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image 103 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='11: FID is evaluated for different noise types.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' From upper left to lower right: Gaussian noise, Gaussian blur, implanted black rectangles, swirled images, salt and pepper, CelebA dataset contaminated by ImageNet images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Heusel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' distribution of the generated data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In short, recall measures to which extend Pr can be generated from Pg, while precision is trying to grasp how many generated images fall within Pr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='12: Definition of precision and recall for distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Kynkäänniemi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' See Kynkäänniemi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019) for a more thorough explanation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' CLIP score CLIP is a model from OpenAI [CLIP2021] which is explained in detail in the chapter about text-supporting computer vision models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In principle, CLIP is capable of assessing the semantic similarity between the text caption and the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Following this rationale,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the CLIP score can be used as metric and is ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='defined as: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='E[s(f(image) ∗ g(caption))] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='350 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='350 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='350 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='00 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='00 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='250 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='250 - ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='250 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='00 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='150 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='150 - ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='150 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='50 - ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='disturbance level ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='disturbance level ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='disturbance level ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='250 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='600 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='250 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='500 - ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='150 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='150 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='disturbance level ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='disturbance level ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='disturbance levelP ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='(a)Exampledistributions ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='(b) Precision ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='(c) Recall104 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Multimodal architectures ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='where the expectation is taken over the batch of generated images and s is the ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='CLIP logit scale (Nichol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Human evaluations It is common that researchers report also qualitative measures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Many potential applications of the models are focused on deceiving the human spectator, which motivates reporting of metrics that are based on human evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The general concept of these evaluation is to test for: photorealism caption similarity (image-text alignment) Usually, a set of images is presented to a human, whose task is to assess their quality with respect to the two above-mentioned criteria.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Generative Adversarial Networks The appearance of Generative Adversarial Networks (GAN) was a major milestone in the development of generative models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Introduced by Goodfellow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014c), the idea of GANs presented a novel architecture and training regime, which corresponds to a minimax two-player game between a Generator and a Discriminator (hence the word adversarial).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GANs can be considered as an initial enabler for the field of text-to-image models and for a long time, GAN-like models were achieving state-of-the-art results, hence the presentation of their core concepts in this chapter 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Vanilla GAN for Image Generation In a vanilla GAN, the Generator model (G) and Discriminator model (D) are optimized together in a minimax game, where G aims at generating a sample so convincing, that D will not be able to distinguish whether it comes from a real or generated image distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the other hand, D is being trained to discriminate between the two.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Originally, a multilayer perceptron was proposed as a model architecture for both D and G, although in theory any differentiable function could be used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' More formally, let pz denote the prior distribution defined on the input noise vector z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then, the generator G(z) represents a function that is mapping this noisy random input to the generated image x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The discriminator D(x) outputs a probability that x comes from the real data rather than generator’s distribution pg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this framework, D shall maximize the probability of guessing the correct label of both real and fake data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' G is trained to minimize log(1 − D(G(z))).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Now, such representation corresponds to the following value function (optimal solution): min G min D V (D, G) = E x∼pdata(x)[log(D(x))] + E z∼pz(z)[log(1 − D(G(z)))] 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image 105 Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='13 depicts this process in a visual way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='13: GAN framework as proposed in Goodfellow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Some of the generated samples that had been achieved with this architecture already in 2014 can be seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='14: Samples from generators trained on different datasets: a) MNIST b) TFD, c) CIFAR-10 (MLP used for G and D) d) CIFAR-10 (CNN used).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Highlighted columns show the nearest real example of the neighbouring sample.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Goodfellow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Conditioning on Text So far, only image generation has been covered, completely ignoring textual input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Reed et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016c) introduced an interesting concept of conditioning DC-GAN (GAN with CNNs as Generator and Discriminator) on textual embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A separate model is being trained and used for encoding the text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then, result embeddings are concatenated with the noise vector and fed into the Generator and the Discriminator takes embeddings as an input as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The resulting model is referred to as GAN-INT-CLS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Both abbreviations (INT Back Propagation: Maximize Error Latent Generator Generator Space Image Real Discriminator or Fake Real Dataset Back Propagation: Minimize Error Imageb) C106 3 Multimodal architectures and CLS) stand for specific training choices, which are going to be explained later in the chapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The overview of the proposed architecture can be seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='15: The proposed architecture of the convolutional GAN that is conditioned on text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Text encoding ϕ(t) is fed into both the Generator and the Discriminator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Before further convolutional processing, it is first projected to lower dimensionality in fully-connected layers and concatenated with image feature maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Reed et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Text embeddings Since regular text embeddings are commonly trained in separation from visual modality simply by looking at the textual context, they are not well suited for capturing visual properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This motivated Reed et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016b) to come up with structured joint embeddings of images and text descriptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GAN-INT-CLS implements it in a way described in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='16: Figure from Reed et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This flower has small, round violet This flower has small, round violet petals with a dark purple center :=G(z,(t) petals with a dark purple center ()0) z~N(0, 1) D(,p(t) Generator Network Discriminator NetworkThetextclassifierinducedbythelearned correspondence function ft is trained by optimizing the following struc- tured loss: N (2) n=1 where ((un, tn, yn) : n = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='., N) is the training data set, is the 0-1 loss, n are the images, tn are the correspond- ing text descriptions, and yn are the class labels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Classifiers fu and ft are parametrized as follows: f(u) = arg max Et~T(y)[Φ(u)T p(t)] (3) ft(t) = arg max Ey~v(y) [Φ(u)T (t)) (4) where@istheimageencoder(e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='adeepconvolutional neuralnetwork),isthetextencoder(e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='acharacter- level CNN or LSTM), T(y) is the set of text descriptions of class y and likewiseV(y)for images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='The intuition here is that a text encoding should have a higher compatibility scorewithimagesofthecorrespondongclasscomparedto any other class and vice-versa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image 107 GoogLeNet is being used as an image encoder φ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For text encoding ϕ(t), authors use a character-level CNN combined with RNN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Essentially, the objective of the training is to minimize the distance between the encoded image and text representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The image encoder is then discarded and ϕ only is used as depicted in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GAN-CLS CLS stands for Conditional Latent Space, which essentially means the GAN is conditioned on the embedded text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, in order to fully grasp how exactly the model is conditioned on the input, we need to go beyond architec- tural choices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is also crucial to present a specific training regime that was introduced for GAN-CLS and the motivation behind it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One way to train the system is to view text-image pairs as joint observations and train the discriminator to classify the entire pair as real or fake.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, in such a case the discriminator does not have an understanding of whether the image matches the meaning of the text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is because the discriminator does not distinguish between two types of error that exist, namely when the image is unrealistic or when it is realistic but the text does not match.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A proposed solution to this problem is to present the discriminator with three observations at a time, all of which are included later in the loss function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These three are: {real image with right text}, {real image with wrong text}, {fake image with right text}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The intention is that the discriminator should classify them as {true}, {false}, {false}, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GAN-INT The motivation behind this concept comes from the fact that interpolating between text embeddings tends to create observation pairs that are still close to the real data manifold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Therefore, generating additional synthetic text embeddings and using them instead of real captions in the training process might help in the sense that it works as a form of data augmentation and helps regularize the training process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='17 might be helpful for developing the intuition behind the interpolation process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Results The model achieves the best performance when both of the mentioned methods are in use (GAN-INT-CLS).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Models prove to successfully transfer style (pose of the objects) and background from the training data when trained on CUB (birds) and Oxford-102 (flowers) datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They also show interesting zero-shot abilities, meaning they can generate observations from unseen test classes (Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='18).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' When trained on MS-COCO, GAN-CLS proves its potential to generalize over many domains, although the results are not always coherent (Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='19).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 108 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='17: Interpolating between sentences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Reed et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='18: Zero-shot generated birds using GAN, GAN-CLS, GAN-INT, GAN-INT-CLS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Reed et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Further GAN-like development Generative Adversarial Networks were a leading approach for text-to-image models for most of the field’s short history.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the following years after the introduction of GAN-INT-CLS, new concepts were emerging, trying to push the results further.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Many of them had a GAN architecture as their core part.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this section, a few such ideas are presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The intention is to quickly skim through the most important ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A curious reader should follow the corresponding papers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" StackGAN Blue bird with black beak' - 'Red bird with black beak Small blue bird with black wings' -→ Small yellow bird with black wings This bird is bright." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content="' → ‘This bird is dark." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" 'an all black bird this small bird has tiny beak, tarsus and a tiny bird, with yellow breast, shades of brown all GT with a distinct brown crown, and fect, a blue crown, over with white and light grey head and thick." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='rounded bill ercilian blue coverts, and black check patcl bsad and back GAN GAN-CLS GAN-INT GAN-INT CLS3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image 109 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='19: Generated images using GAN-CLS on MS-COCO validation set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Reed et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016a) introduced what the StackGAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The main contribution of the paper which also found its place in other researchers’ works, was the idea to stack more than one generator-discriminator pair inside the architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Stage-II (second pair) generator is supposed to improve the results from Stage-I, taking into account only: text embedding (same as Stage-I) image generated in Stage-I without a random vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Deliberate omission of the random vector results in the generator directly working on improving the results from Stage-I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The purpose is also to increase resolution (here from 64x64 to 256x256).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Authors obtained great results already with two stages, however, in principle architecture allows for stacking many of them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='20: (ref:stackgan) AttnGAN It is 2017 and many researchers believe attention is all they need (Vaswani GT ours GT ours GT Ours Jodnou6e amaninawet suit riding a apitcher is people on skis surfboard on a Moun oninoqe stand on the wave.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the ball to the snow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' batter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' a table with twoplatesot many plates of lood that include apicture ofa pue pooj beans, veryclean drinks guacamole and living room.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' rice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' two giraffe standing next agreenplant that is growing a sheep to each other standing in a in a forest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' out of the ground.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' opengrass field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' alarge blue octopus kite flies above there is only one atoilet in a small the people horse in the room with a grassy field window and having fun at the beach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' unfinished wallsThis bird is The bird has This is a small, This bird is This bird is This bird has A white bird white, black, small beak, black bird with white black and Text bluewithwhite wings that are with a black and brown in with reddish a white breast yellow in color, description and has a very brownand has crown and color, with a browncrown and white on with a short short beak a yellow belly yellow beak brown beak and gray belly the wingbars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' black beak Stage-I images Stage-II images110 3 Multimodal architectures et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017e).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Probably for the first time in text-to-image generation attention mechanism was used by Xu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The authors combined the idea with what StackGAN proposed and used three stages (generators G0, G1 and G2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, this time first layers of a particular generator are attending to word feature vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This mechanism not only helps control how particular areas of the image are being improved by consecutive generators but also allows for visualizing attention maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='21: Images generated by G0, G1, G2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Two bottom rows show 5 most attended words by G1 and G2 respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Xu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' DM-GAN Another important milestone was DM-GAN (Dynamic Memory GAN) (Zhu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At that time, models were primarily focusing on generating the initial image and then refining it to a high-resolution one (as e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' StackGAN does).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, such models heavily depend on the quality of the first image initialization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This problem was the main motivation for the authors to come up with a mechanism to prevent it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' DM-GAN proposes a dynamic memory module, which has two main components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First, its memory writing gate helps select the most important information from the text based on the initial image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Second, a response gate merges the information from image features with the memories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Both of these help refine the initial image much more effectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' DF-GAN this bird has a green crown black primaries and a white belly 1:bird O:this 2:has 11:belly 10:white 6:black 4:green 10:white O:this 1:bird3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image 111 Last but not least, DF-GAN (Deep Fusion GAN) (Tao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020) improves the results by proposing three concepts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One-Stage Text-to-Image Backbone focuses on providing an architecture that is capable of abandoning the idea of multiple stacked generators and using a single one instead.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It achieves that by a smart combination of a couple of factors, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' hinge loss and the use of residual blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally, Matching-Aware Gradient Penalty helps achieve high semantic consistency between text and image and regularizes the learning process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, One-Way Output helps the process converge more effectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Dall-E 1 OpenAI’s Dall-E undoubtedly took the text-to-image field to another level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the first time, a model showed great zero-shot capabilities, comparable to previous domain-specific models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To achieve that, an unprecedented scale of the dataset and training process was needed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 250 million text-image pairs were collected for that purpose, which enabled training of a 12-billion parameter version of the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Unfortunately, Dall-E is not publicly available and follows the most recent trend of closed-source models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Or, to put it more precisely, it started this trend, and GLIDE, Dall-E 2, Imagen, Parti and others followed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nevertheless, Dall-E’s inner workings are described in Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021b) and this section will try to explain its most important parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, before that, it is crucial to understand one of the fundamental concepts that has been around in the field of generative models for already quite some time - namely Variational Autoencoders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Variational Autoencoder (VAE) The regular Autoencoder architecture aims at finding an identity function that is capable of finding a meaningful representation of the data in lower- dimensional space and then reconstructing it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is considered an unsupervised learning method for dimensionality reduction, however, trained in a supervised regime with the data itself being the label.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The component performing the reduction is called an encoder, while the part responsible for the reconstruction is called a decoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The idea behind Variational Autoencoder (Kingma and Welling, 2013) is similar, however, instead of learning the mapping to a static low-dimensional vector, the model learns its distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This design equips the decoder part with desired generative capabilities, as sampling from the latent low-dimensional space will result in varying data being generated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The architecture is depicted in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' qφ(z|x) denotes the encoder under the assumption that z comes from multivari- ate Gaussian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' µ and σ are being learned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Reconstruction process is modelled by conditional probability pθ(x|z), given samples latent vector z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VQ-VAE / dVAE The VQ-VAE (Vector Quantized VAE) (van den Oord et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017) differs from 112 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='22: Variational (probabilistic) Autoencoder architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Weng (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the regular VAE in the way it approaches encoding the latent space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Instead of mapping data into a continuous distribution, the Vector Quantized version does it in a discrete way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is motivated by the fact that for many data modalities it is more natural to represent them in a discrete way (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' speech, human language, reasoning about objects in images, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=').' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VQ-VAE achieves that by using a separate codebook of vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The architecture is depicted in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='23: VQ-VAE architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from van den Oord et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The idea is to map the output of the encoder to one of the vectors from the K-dimensional codebook.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This process is called quantization and essentially means finding the vector that is the nearest neighbour to the encoder’s output (in a sense of Euclidean distance).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Since this moment, this newly found vector from the codebook is going to be used instead.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The codebook itself is also subject to the learning process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One could argue that passing gradients during the training through such a discrete system might be problematic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VQ-VAE overcomes this problem by simply copying gradients from the decoder’s input Reconstructed Input ldeally they are identical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' input x~x Probabilistic Encoder q(zx) Mean Sampled μ latent vector Probabilistic x Z Decoder pe(x|z) Std.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' dev An compressed low dimensiona z=μ+OE representation of the input E ~ N(O, I)e,e,e3 Embedding 000 Space (X) Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (x) q(z|x) CNN CNN p(x(z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=') (xz)b ~ (x)°z Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (x) Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (x) 53 Encoder Decoder3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image 113 to the encoder’s output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A great explanation of the training process and further mathematical details can be found in Weng (2018) and Snell (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Dall-E, however, is using what is called dVAE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Essentially, it is a VQ-VAE with a couple of details changed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In short, the main difference is that instead of learning a deterministic mapping from the encoder’s output to the codebook, it produces probabilities of a latent representation over all codebook vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Dall-E system Dall-E is composed of two stages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The above introduction of VQ-VAE was necessary to understand the first one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Essentially, it is training dVAE to compress 256x256 images into a 32x32 grid of tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This model will play a crucial role in the second stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The second stage is about learning the prior distribution of text-image pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First, the text is byte-pair (Sennrich et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015a) encoded into a maximum of 256 tokens, where the vocabulary is of size 16384.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Next, the image represen- tation encoded by previously trained dVAE is unrolled (from 32x32 grid to 1024 tokens) and concatenated to the text tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This sequence (of 256+1024 tokens) is used as an input for a huge transformer-like architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Its goal is to autoregressively model the next token prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' During inference time, the text caption is again encoded into 256 tokens at most.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The generation process starts with predicting all of the next 1024 image-related tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They are later decoded with the dVAE decoder that was trained in the first step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Its output represents the final image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Results Results achieved with the original Dall-E attracted so much attention mainly due to its diversity and zero-shot capabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Dall-E was capable of producing better results compared to previous state-of-the-art models which were trained on data coming from the same domain as data used for evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One comparison can be seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Outputs of some of the prior approaches described in this chapter compared with Dall-E can be seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Limitations Although Dall-E made a huge step forward in text-to-image modelling, it still showed multiple flaws.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First, photorealism of the outputs is still relatively low.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In other words, when prompted for images containing realistic situations, it is rarely capable of deceiving human evaluators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Second, the model has evident problems with understanding relatively complex abstractions, such as text inside an image, or relative object positions in the scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 114 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='24: Human evaluation of Dall-E vs DF-GAN on text captions from the MS-COCO dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' When asked for realism and caption similarity, evaluators preferred Dall-E’s results over 90\\% of the time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25: Comparison of the results from Dall-E vs prior works on MS-COCO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Dall-E’s outputs are chosen as the best out of 512 images, ranked by a contrastive model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" 100% %0'06 93." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3% Number of Votes 0/5 1/5 75% 2/5 3/5 4/5 5/5 50% Majority vote 25% 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0% 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6% 0% DF-GAN Ours DF-GAN Ours Realism Accuracy3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image 115 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 GLIDE Introduced by Nichol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021b), GLIDE started an era of huge-scale diffusion models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The concept of diffusion has already been used in the area of Deep Learning for some time before.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, the authors of GLIDE took a step further and combined it together with text-based guidance which is supposed to steer the learning process in the direction of the text’s meaning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This powerful method was proven to achieve outstanding results which remain competitive with current state-of-the-art models at the time of writing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Diffusion models Before understanding the inner workings of GLIDE, it is important to introduce the core concept that is driving it, namely diffusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The idea of diffusion originates from physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In short, it corresponds to the process of diffusing particles, for example of one fluid in another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Normally it has a unidirectional character, in other words, it cannot be reversed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, as Sohl-Dickstein et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2015) managed to show, and Ho et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020a) later improved, if the data diffusion process is modelled as a Markov chain with Gaussian noise being added in consecutive steps, it is possible to learn how to reverse it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This reversed process is exactly how images are generated by the model from pure random noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Let us construct a Markov chain, where the initial data point is denoted by x0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In t steps, Gaussian noise is added to the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The distribution of the data at t-step can be characterized in the following way: q(xt|xt−1) := N(xt;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' √αtxt−1, (1 − αt)I) where (1 − αt) parametrizes the magnitude of the noise being added at each step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Now, if xt−1 was to be reconstructed from xt, a model needs to learn to predict estimates of gradients from the previous steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The probability distribution of previous steps can be estimated as follows: pθ(xt−1|xt) = N(xt−1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' µθ(xt), Σθ(xt)) where the mean function µθ was proposed by Ho et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For a more detailed explanation of how this is later parametrized and trained, one could follow Weng (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' GLIDE system GLIDE can essentially be broken down into two parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The first of them is the pretrained Transformer model, which in principle is responsible for creating the text embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The last token embedding is used as a class embedding (text representation) in later stages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally, all tokens from the last embedding layer are being used (attended to) by all attention layers in the diffusion model 116 3 Multimodal architectures itself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This makes the model aware of the text meaning while reconstructing the previous step in the Markov chain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The second component of the GLIDE is the diffusion model itself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A U-Net- like architecture with multiple attention blocks is used here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This part’s sole goal is to model pθ(xt−1|xt, y), where y corresponds to last token embedding mentioned above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Or, to put it differently, to predict ϵθ(xt|y) since the problem can be reframed as calculating the amount of noise being added at each step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally, to make the model even more aware of the text’s meaning, guidance is being used at inference time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In short, the idea is to control the direction of the diffusion process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The authors test two different approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First, they try guidance with the use of a separate classifier, OpenAI’s CLIP in this case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, better results were in general achieved by the classifier-free guidance process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The idea is to produce two different images at each step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One is conditioned on text, while the other one is not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Distance between them is calculated and then, after significant scaling, added to the image obtained without conditioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This way, the model speeds up the progression of the image towards the meaning of the text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This process can be written as: ˆϵθ(xt|y) = ϵθ(xt|∅) + s ∗ (ϵθ(xt|y) − ϵθ(xt|∅)) where s denotes the parameter for scaling the difference between the mentioned images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Results GLIDE achieves significantly more photorealistic results compared to its predecessors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FID scores reported on the MS-COCO 256x256 dataset can be seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is worth noting that GLIDE was not trained on this dataset, hence its zero-shot capabilities are even more impressing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='26: Comparison of FID on MS-COCO 256×256.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Nichol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Results are also preferred by human evaluators in terms of photorealism and Model FID Zero-shot FID AttnGAN (Xu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017) 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='49 DM-GAN (Zhu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='64 DF-GAN (Tao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020) 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='42 DM-GAN + CL (Ye et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='79 XMC-GAN (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='33 LAFITE (Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='12 DALL-E (Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) ~ 28 LAFITE (Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='94 GLIDE 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='24 GLIDE (Validation filtered) 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='893.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image 117 the similarity of the image to its caption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A comparison to DALL-E 1 results can be seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='27 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='27: Win probabilities of GLIDE vs DALL-E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Nichol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, some of the cherry-picked images together with their corresponding captions can be seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='28: Samples from GLIDE with classifier-free-guidance and s=3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Nichol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Limitations GLIDE suffers from two problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First, it fails when being presented with a complex or unusual text prompt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A few examples can be seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also, the model is relatively slow at inference time (much slower than GANs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is caused by the sequential character of the architecture, where consecutive steps in Markov chain reconstruction cannot be simply parallelized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Dall-E 2 / unCLIP The contribution that probably attracted the most attention in the field is known under the name Dall-E 2 (Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the first time, the wider public had picked interest in its potential applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This might be due to a great PR that could be seen from the authors, namely OpenAI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Dall-E 2, also known as just Dall-E, or unCLIP, has been advertised as a successor of Dall-E 1, on which results it significantly improved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In reality, DALL-E Photo- Caption Temp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' realism Similarity 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 91% 83% No reranking 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='85 84% 80% 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 89% 71% DALL-E reranked 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='85 87% 69% DALL-E reranked 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 72% 63% + GLIDE blurred 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='85 66% 61%文 “"a crayon drawing of a space elevator" "a futuristic city in synthwave style “a pixel art corgi pizza" "afog rolling into newyork"118 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='29: Failures happen mostly for unusual prompts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Nichol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the architecture and the results it achieved are much more similar to that of GLIDE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally, social media has been flooded with images generated by the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This was possible thanks to OpenAI giving access to it to everybody who was interested and patient enough to get through a waiting list.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, the model itself again remains unpublished.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another factor that might have contributed to Dall-E’s success were its inpainting and outpainting capabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Although, it is worth mentioning they were already also possible with GLIDE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In essence, UnCLIP is a very smart combination of pior work from OpenAI that was re-engineered and applied in a novel way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nevertheless, the model represents a significant leap forward, which is why it cannot be omitted in this chapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Dall-E 2 system UnCLIP consists of two components: prior and decoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Let x be the image and y its caption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' zi and zt are CLIP image and text embedding of this (x, y) pair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Then, prior P(zi|y) is responsible for producing CLIP image embeddings conditioned on the text caption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A decoder P(x|zi, y) outputs an image conditioned on the CLIP image embedding and, again, the text caption itself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the prior authors try two different approaches, namely autoregressive and diffusion models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The latter ended up yielding slightly better results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The diffusion prior isa Transformer taking as an input a special sequence of an encoded text prompt, CLIP text embedding, embedding for the diffusion step, and a noised CLIP image embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The decoder consists of diffusion models again.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Firstly, a GLIDE-like model takes a CLIP image embedding as its xt instead of the pure noise that was used in its original version.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Similarly to the original GLIDE, classifier-free guidance is applied, however with slight differences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Lastly, two diffusion upsampler models are trained to bring images first from 64x64 to 256x256, and then from 256x256 to 1024x1024 resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The authors found no benefit in conditioning "an illustration of a cat "a bicyclethathas continuous that has eight legs" tracks instead of wheels" "a mouse hunting a lion" "a car with triangular wheels"3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image 119 these models on text captions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, unCLIP can be summarized as a mixture of GLIDE and CLIP with a lot of engineering behind it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Results When compared to GLIDE, unCLIP shows it is capable of representing a wider diversity of the data, while achieving a similar level of photorealism and caption similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Comparison to previous works on the MS-COCO dataset shows that unCLIP achieves unprecedented FID (Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='30).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A few output examples calculated on MS-COCO captions can be found in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='30: Comparison of FID on MS-COCO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The best results for unCLIP were reported with the guidance scale of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Limitations UnCLIP suffers from very similar problems as its predecessor GLIDE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First, compositionality in the images tends to sometimes be confused by the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Failure cases can be seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Second, UnCLIP struggles with gener- ating coherent text inside an image (Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='33).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The authors hypothesize that using CLIP embeddings, although improving diversity, might be responsible for making these problems more evident than in GLIDE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Lastly, UnCLIP often fails with delivering details in highly complex scenes (Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='34).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Again, according to the authors, this might be a result of the fact that the decoder is producing only 64x64 images which are later upsampled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 Imagen & Parti Only a few months after unCLIP was released by OpenAI, for the first time Google came into play with its new autoregressive model called Imagen (Saharia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another one followed just two months later - Parti (Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Both of these models pushed the boundaries even further, although they take entirely different approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' None of them is introducing a completely new way of looking at the problem of text-to-image generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their advancements Model FID Zero-shot FID Zero-shot FID (filt) AttnGAN (Xu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017) 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='49 DM-GAN (Zhu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='64 DF-GAN (Tao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020) 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='42 DM-GAN + CL (Ye et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='79 XMC-GAN (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='33 LAFITE (Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='12 Make-A-Scene (Gafni et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022) 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='55 DALL-E (Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) ~ 28 LAFITE (Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='94 GLIDE (Nichol et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='24 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='89 Make-A-Scene (Gafni et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022) 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='84 unCLIP (AR prior) 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='63 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='08 unCLIP (Diffusion prior) 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='39 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='87120 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='31: Image samples on MS-COCO text prompts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image 121 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='32: ‘a red cube on top of a blue cube’ Figure from Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='33: ‘A sign that says deep learning.’ Figure from Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' come from engineering and further scaling existing solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, it must be stressed that currently (September 2022) they are delivering the most outstanding results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Imagen is a diffusion model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Its main contribution is that instead of using a text encoder trained on image captions, it actually uses a huge pretrained NLP model called T5-XXL (Raffel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019b) that is taken off the shelf and frozen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Authors argue that this helps the model understand language much more deeply, as it has seen more diverse and complex texts than just image captions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the other hand, Parti takes an autoregressive approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Similarly to the first version of Dall-E, it consists of two stages, namely the image tokenizer and sequence-to-sequence autoregressive part which is responsible for gener- ating image tokens from a set of text tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this case, ViT-VQGAN (Yu 8 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (a) unCLIP (b) GLIDEDiep Deinp Deep DSNEELH Lerpt: Deep122 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='34: ‘A high quality photo of Times Square.’ Figure from Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) is used as a tokenizer and the autoregressive component is again Transformer-like.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Results Both of the models improved the FID significantly compared to the previous works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='35 shows the comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='35: Comparison of FID on MS-COCO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Samples from Parti can be seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They are included here on purpose - this is the current state-of-the-art as of the moment of writing!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Limitations MS-COCO FID (↓) LN-COCO FID () Approach Model Type Zero-shot Finetuned Zero-shot Finetuned Random Train Images [10] 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='47 RetrievalBaseline 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='97 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='82 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='59 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='48 TReCS [46] GAN 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='70 XMC-GAN [47] GAN 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='33 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='12 DALL-E [2] Autoregressive ~28 CogView [3] Autoregressive 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 1 CogView2[61] Autoregressive 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 GLIDE [11] Diffusion 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='24 Make-A-Scene [10] Autoregressive 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='84 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='55 DALL-E 2 [12] Diffusion 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='39 Imagen [13] Diffusion 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='27 1 Parti Autoregressive 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='23 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='22 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='97 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='393.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Text2Image 123 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='36: Selected samples from Parti.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022b) mention an extensive list of problems, with which Parti still struggles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At this point, all of them can be treated as a set that is common to almost all available models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Among others, they touch: feature blending (where features of two different objects are missed) omission or duplicating details displaced positioning of objects counting negation in text prompts and many many more.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These flaws pose a challenge for future research and undoubtedly they are the ones that need to be addressed first to enable another leap forward in the field of text-to-image generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 Discussion Lastly, it is important to mention a couple of different topics, or trends, which are intrinsically linked with text-to-image generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Together with previous ACE BE CELLEN CFI LENTTO F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" The aying “BE EXCELLENT TO EACH OTHER'124 3 Multimodal architectures sections, they should give the reader a holistic view of where research currently stands (again, as of September 2022)." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Open- vs closed-source The first trend that has emerged only recently is AI labs to not open-source their state-of-the-art models and training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is in clear opposition to how the entire AI community was behaving from the very beginning of the recent Deep Learning era Apparently, possible commercial opportunities that come along with owning the software are too big to be ignored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The trend is very disruptive - it is clear that the community is currently witnessing the maturation of AI business models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Needless to say, it is followed by all the greatest AI labs, just to name a few: OpenAI, DeepMind, Google Brain, Meta AI, and many others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As long as commercial achievements will have an edge over academic community research, it is highly doubtful that the trend will be reversed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, it needs to be stressed that all of them are still issuing more or less detailed technical specifications of their work in the form of scientific papers, which is definitely a positive factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We, as a community, can only hope it will not change in the future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Open-Source Community As the trend of closed-sourceness is clearly visible across many Deep Learning areas, the text-to-image research is actually well represented by an open-source community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The most important milestones of the recent years indeed come from OpenAI, however, new approaches can be seen across a wide community of researchers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Many of these models are public, meaning that any user with minimal coding experience can play with them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Although we decided not to go into details of particular works, it is important to name a few that became the most popular: VQGAN-CLIP (Crowson et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022) Midjourney (Midjourney, 2022) Latent Diffusion (Rombach et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) Stable Diffusion (Rombach et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022) Potential applications Image generation that can be done in a controllable manner has undoubtedly huge potential for commercialization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Although the field is currently still very immature, hypotheses about which industries might be disrupted are emerging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Essentially, every branch that has to do with generating visual art, be it static images or videos, should observe the trend closely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Graphic design, movie making, stock photos - just to name a few that might be interested.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Currently, experimental use cases in the area of texture synthesis, product design, or building virtual reality worlds can already be observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' AI, even if still incapable of generating the final product, can help automate a significant part of the production chain, which essentially means time and money savings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models 125 The inpainting and outpainting capabilities of recent models play a significant role in this trend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Although it is still very hard to judge which direction it takes in the future, it will definitely be a very interesting and disruptive change.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Who wouldn’t like to see movies being soon generated directly from a book’s text, pixel value by pixel value?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Ethics / Conclusion Automated image generation poses an array of serious questions of ethical character.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Fortunately, many of them are already very well recognized by the community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example, OpenAI elaborates extensively on the risks and limitations of their Dall-E 2 in this blog post by Mishkin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A few of the most important topics are presented here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The first and very significant risk is the potential misuse of the models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Fake im- age generation can easily be used for harassment and disinformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Especially combined with inpainting, which is capable of erasing or adding objects to real scenes, it poses a non-trivial challenge for researchers on how to responsibly share their work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another important area touches on biases and stereotypes which are intrinsi- cally built into the technology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Obviously, a model combines concepts from the data it has seen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, if this area is to be commercialized, it needs to ensure broader diversity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An interesting example of Dall-E 2 samples can be seen in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In order to fully enable AI generation, the problem of copyrights needs to be solved in the first place.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is definitely not clear who is the author of generated images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Is it the person who came up with a text prompt and ran the model?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Is it a model engineer?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The author of the model’s architecture?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The owner of the data it has been trained on?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Or maybe the model itself?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another question is what really is a creative contribution and eventually should result in copyright being granted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These and many others definitely require extensive debate and hopefully, legal solutions following it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models Author: Giacomo Loss Supervisor: Matthias Aßenmacher 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Words In (Non-Symbolic) Contexts Imagine you were alone in a foreign country, you could not speak the language and the only resource you had were a dictionary in the foreign language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' You see 126 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='37: Biased samples from Dall-E 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure from Mishkin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' a word written on a sign but you cannot understand its meaning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' What could you do?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One idea would be do open the dictionary and look the word up.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The problem is that the word is defined by using other words in the foreign language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a second step you would thus look these new words up and continue like that in further steps to the “infinity and beyond” (cit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Buzz Lightyear).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But even after looking every single word in the dictionary up, you would still not be able to understand the meaning of the word written on the sign.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' If on that sign, next to the unknown word, something else was instead depicted, for example an image of a fork and a knife, you might speculate that the word indicates something which has to do with food, like a restaurant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' And this without explicitly knowing the meaning of the word.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This example is inspired by the work of Stevan Harnad, which formulated at the beginning of the 90’s the so called Symbol Grounding Problem (Harnad (1990)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It asserts that it is not possible to understand the meaning (semantics) of a word by just looking Prompt: nurse;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Date: April 6, 20223.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models 127 at other words because words are essentially meaningless symbols.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is possible to understand the meaning only if the word is put in a context, a perceptual space, other than that of written language: the word must be grounded in non-symbolic representations, like images, for example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Over the past 10 years there has been a whopping development of distributional semantic models (DSMs, henceforth), especially after the Word2vec (Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013b)) revolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This family of models assumes that the meaning of words and sentences can be inferred by the “distribution” of those words and sentences within a text corpus (the Distributional Hypothesis formulated by Harris et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (1954)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But the Symbol Grounding Problem mentioned earlier suggests that DSMs do not resemble the way words are learned by humans, which is in multimodal perceptual contexts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For these reasons, models have been developed with the goal to integrate further modalities (like visual ones) in pure language models, assuming that grounding words and sentences in other perceptual contexts should lead to a better understanding of their semantics and, as a result, to better performance in pure language tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The focus of this subchapter are models which empower pure language models with visual modalities in form of images: their goal is to obtain better semantic representations (in form of embedding vectors) of words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First, a quick recap of the main pure language models will be provided.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After that, the historical evolution of the integration of images as visual modalities into pure language models will be discussed: from simple concatenation of textual and visual modalities, to the projection of visual elements in a common grounded space and more recently, the use of Transformers (see figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='38).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Eventually, a comprehensive evaluation of the different models against benchmarks will be carried out.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Again, the focus is on how to employ visual elements to obtain embeddings able to capture the semantics of words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' More concrete applications, such as those in the field of machine translation are out of scope and will be only marginally addressed at the end of the subchapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='38: Historical evolution of models which integrate visual infor- mation into pure language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Sequential Embeddings Grounded Embeddings Transformers Vokenization Hill et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Collell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VisualBERT FLAVA XDBERT Bruni et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VILBERT UniT Kiela et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Kiela et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MM Skipgram I LXMERT Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' UFO Bordes et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' UNITER FLAMINGO Shahmohammadi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 128 3 Multimodal architectures 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Word-Embeddings: Survival-Kit In other parts of this books, the most important NLP-models and the latest developments in the field are extensively described.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this section, some information will be provided, which might be helpful to understand some of the aspects discussed in this subchapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As it may have been inferred in the introduction, the starting point is always a pure language model, namely a model which employs only textual inputs in order to generate word embeddings, which are representations of words in form of numerical vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The most widely used pure language models in the papers presented in this subchapter are the following three: Skipgram (Word2vec, Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013b)), where given a target word, the probability of the neighboring (surrounding) words in a pre-defined window has to be maximized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Trainig takes place either through a hierarchical softmax or through negative sampling, which involves maximizing the probability of words which are real neighbors and minimizing that of words which are not real neighbors (the “negative samples”) GloVe (Pennington et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014)), which is based on words co-occurrence across the entire corpus, with the goal of minimizing the difference between the dot product of the embedding vectors of two words and the logarithm of the number of co-occurrences BERT (Devlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2018c)): two pre-training tasks to obtain word- embeddings: – Masked Language Modelling (MLM): given a sentence with [MASK]ed tokens, the goal is to predict these masked tokens – Next Sentence Prediction (NSP): given two sentences A and B, the goal is to predict if B follows from A Two additional remarks to conclude this section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First, Skipgram and GloVe generate embeddings which are “context-free”: they do not take into account the context in which words occur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the contrary, BERT is designed to represent words given the context (sentence) in which they occur: we can thus have different embeddings for the same word, depending on the context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Second, the inputs of these models are tokens: with the help of a tokenizer, which can be different for different models, the text is split in “chunks”, called tokens (and they are not necessarily single words).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 The Beginning: Sequential Multimodal Embeddings Supposing we add linguistic and visual feature representations related to a particular word, how could we fuse them?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One intuitive idea would be to concatenate the textual and visual modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Let Vtext be the textual (vectorial) representation of a word and let Vimg be its visual (vectorial) representation, a fused representation F of a certain word w might take the following simplified form: 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models 129 F = γ(Vtext) � (1 − γ)Vimg where γ is a tuning parameter which controls the relative contribution of both modalities to the final fused representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Bruni et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014) propose a model where the meaning of a target word is represented in the form of a semantic vector and all vectors are collected in a text-based semantic matrix;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' textual embeddings are computed based on (transformed) co-occurrence counts of words in a pre-defined window.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The starting point to obtain an image-based representation of certain target word is a dataset of labeled images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For each image associated to the target word (which means that the target word is to be found in the image’s caption), low-level features called “local descriptors” - which incorporate geometric information of specific areas of a certain picture are extracted and then these descriptors are assigned to clusters (bags) of “visual words”1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Afterwards, for each target word, visual word occurrences are summed up together to obtain the occurrence counts related to the target word.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These image-based semantic vectors are then transformed and collected in an image-based semantic matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The two matrices are then concatenated and projected into a common latent multimodal space with a singular value decomposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thanks to this process a textual mixed matrix and a visual mixed matrix are extracted and then combined together according to different fusion strategies to build the multimodal embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this first, relatively cumbersome (historically motivated) example, the vector representation of an image is obtained with non-trivial features engineering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In recent years, the use of neural networks has made an “automatic feature selection” possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is what for example Kiela and Bottou (2014) propose, extracting visual features from the first seven layers of a convolutional neural network (proposed by Krizhevsky et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2012b)) trained on 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 million images from the ImageNet database (Deng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2009)), which produces scores for 1,512 object categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The linguistic part of the model relies on the Skipgram model by Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2013b) and consists of 100-dimensional vector representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The multimodal representation is again obtained by concatenation of both modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another notable example of concatenation/sequential combination of textual and visual modalities is the work of Silberer and Lapata (2014): textual and visual modalities are represented by separate vectors of textual and visual attributes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' During training, these textual and visual inputs vectors are separately fed to denoising (unimodal) autoencoders, the training objective of which is the reconstruction of a certain corrupted input - e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' through masking noise - from a latent representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their outputs are then jointly fed to a bimodal autoencoder to be mapped to a multimodal space, on which 1See for example Bosch et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2007) for more details on this technique, called “bag-of- visual-words”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 130 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='39: From Kiela and Bottou (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Textual and visual features vectors are concatenated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' a softmax layer (classification layer) is added, which allows the architecture to be fine-tuned for different tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 The Grounded Space The aforementioned models assume implicitly a one-to-one correspondence between text and images: a visual representation is extracted only from words which are associated to a concrete image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is a limitation, for two partially overlapping reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One one hand, how can we depict words for which no image is available in our training set?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Is it possible to imagine visual representations purely from linguistic ones?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the other hand, could we hypothetically find a visual representation for each word?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This might be true for concrete words but when it comes to abstract ones, it is not always possible to find suitable visual representations or, said in other terms, many words are not visually grounded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For this reasons, researches have addressed the question: could we map textual and visual elements to a grounded space and design models able to generalize images and words beyond those in the training set?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Well, the answer is yes!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Lazaridou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2015) propose a multimodal Skip-gram architecture where the objective function of a Skip-gram is “augmented” with an additional visual objective: 1 T T � t=1 (Lling(wt) + Lvision(wt)) Training visual features (after Oguab et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=',2014) Convolutional layers Fully-connected layers Imagenet labels C1-C2-C3-C4-C5 FC6 African elephant EC FC8 6144-dim Wall clock feature vector Multimodal word vector Select images C1-C2-C3-C4-C5 Aggregate FC6 FC7 from ImageNet or ESP 6144-dim feature vectors Word 100-dim word projections 100-dim word projections w(t-2) w(t-2) w(t) w(t+1) w(t+2) Training linguistic features (after Mikolov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2013)3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models 131 where Lling is the Skip-gram loss function and Lvision is the additional visual loss for the target word wt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In particular,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Lvision has the form of a hinge loss,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the goal of which is to make the (vectorial) linguistic representation of a certain word more similar to its visual representation: Lvision(wt) = − � w′∼Pn(w) (max(0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' γ − cos(zwt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' vwt) + cos(zwt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' vw′)) where vw′ is a visual representation of a randomly chosen word w ′ (drawn from a probability distribution Pn(w)) used as negative sample,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' vwt is the corresponding visual vector and zwt is the target multimodal word representa- tion which has to be learned by the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is nothing more than a linear transformation of a word representation uwt: zwt = M u→vuwt and M u→v is a cross-modal mapping matrix from linguistic inputs to a visual representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is important to remark that during training, for words which do not have associated images, Lvision gets set to zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' When this cross-modal mapping matrix is estimated, it is then possible to find a visual representation for new words, which do not have a related image in the training set: the model allows to imagine new words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is what is meant with grounded space: a perceptual (visual, in this case) space where a word is grounded, put in context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='40: From Lazaridou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The linguistic embedding of the word ‘cat’ is mapped to a visual space, such that the similarity of vector representations of words and associated images is maximized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Similar instances of a cross-modal mapping can be found for example in Kottur et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016) (a multimodal extension of the CBOW model specification of word2vec) and in Collell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017), where visual features are obtained from the forward pass of a CNN, pre-trained on ImageNet (Deng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2009)) and the cute little sat on the mat CAT Cing(wt) = maximize context prediction Cvision(wt) = maximize similarity maptovisual space cat Mu-y132 3 Multimodal architectures a mapping function from the textual space to the visual space is obtained as a result of the training process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also in this case it is possible to generate a visual representation from the embedding of a certain word, not necessarily present in the training set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In particular, they propose two specifications of the mapping function: a simple linear mapping and neural network with a single hidden layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Last but not least, Hill and Korhonen (2014) recognize that concrete nouns are more likely to have a visual representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For this reason, they map a set of concrete words (CSLB, Devereux et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014)) to “bags of perceptual/visual features” and every time one of these words is encountered during training, the Skip-gram model they are using stops training on that sentence and instead continues the training on a newly created “pseudo-sentence”, which takes into consideration the aforementioned bag of perceptual features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This list is unfortunately not exhaustive and there are other models with similar ideas, for example Ailem et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2018) or Kiros et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The aforementioned papers and related models focus on the modeling of semantics of words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nonetheless, there are models designed to address tasks at sentence-level, such as sentiment analysis or sentence entailment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Kiela et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017) employ a bidirectional Long Short-Term Memory (LSTM, Hochreiter and Schmidhuber (1997)) architecture to model sentence representations, in order to gain information from the text in both directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The goal is again to encode a sentence and ground it in an image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Textual embeddings are obtained with GloVe (Pennington et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014)) and they are then projected on a grounded space with a linear mapping.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This grounded word vector serves as input for the bidirectional LSTM, which is trained together with the linear mapping.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their model is versatile and depending on the loss function specification, it can not only propose alternative captions to an image (which is a way to frame sentence equivalence tasks) but also predict captions from images or perform both tasks at the same time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This last point highlights an important characteristic of many of the models discussed in this subchapter: even though the focus is on the empowerment of pure language models with the addition of visual elements, some of the models discussed here can be used for purposes other than pure language tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The control over which task is performed is usually exercised by either specifying different loss functions (as in the last model described) or setting properly certain hyperparameters (such as in the previously described model by Silberer and Lapata (2014)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 The Transformers Era A turning point for the field of NLP was Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017b)’s paper “Attention is all you need”, where the authors proposed for two machine translation tasks a novel architecture, the Transformer (not to be confused with the giant robots from the Michael Bay’s blockbuster movies!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ), which leverages only the attention mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Even though an exhaustive description of the 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models 133 Transformer architecture is beyond the scope of this subchapter, it is worth mentioning why they became so popular over the past four years in the field of NLP (among others), in comparison to Recurrent Neural Networks (RNNs) and Long Short-Term Memory networks (LSTMs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Well, the three main properties of Transformers are the following: Self-Attention Parallel input processing Positional embeddings2 When feeding for example a textual sentence to a RNN, the network deals with one word after the other in a sequential fashion and one of the known issues is the fact that information contained in earlier parts of the sequence tend to “fade away” as the sentence is analyzed further: newer inputs carry a larger influence on the outputs at a given step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' LSTMs try to mitigate this problem by introducing a component called “gate”, which regulates the information flow, namely which information from the past inputs need to be “remembered” by the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The goal is to capture long-term dependencies among different parts of the sentence fed into the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the contrary, thanks to the Self-Attention mechanism, at each step Trans- formers can access previous steps, thus limiting to a minimum the loss of information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover, inputs are processed not sequentially but all at the same time, thus allowing to capture dependencies by looking at the sentence as a whole and this could make a fundamental difference in many down- stream applications: for example in the German language, in dependent clauses (“Nebensaetze”), the verb comes at the end of the phrase but it determines the verbal case of the nouns that come before the verb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thus Transformer could potentially capture the dependencies between the verb coming at the end of the sentence and the words at the beginning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Lastly, Transformers encode for every input information on its position within a sentence, since it is often the case, that the importance and meaning of a certain word varies depending on its position within a sentence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These were the Transformers, in a nutshell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But Transformers did not only bring a change of paradigm in terms of architec- tures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First, while for models in the pre-Transformers era described before, the focus was on the ability of word embeddings to capture similarity among words, now the focus has shifted more on downstream tasks (more on this later in the evaluation section), encompassing not only pure linguistic ones but also tasks with visual components, such as for example, visual question answering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is now more difficult (but not impossible) to draw a line between models where “images support pure language models” (the object of this subchapter) and models which could be actually categorized as “vision and language” models but they can be employed also to solve pure linguistic tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This issue brings 2It may be argued that this point is a necessity to be able to work on sequences rather than a strength.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 134 3 Multimodal architectures another peculiarity of many Transformers-base models,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' namely their “universal vocation”: without loss of generality we could say that the idea is now to design powerful (multimodal) pre-training (mostly self-supervised) tasks capable of generating task-agnostic representations,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' whose encoded knowledge can be efficaciously transferred to diverse downstream tasks,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' limiting the amount of labeled data necessary to fine-tune the models (this is the so-called few-shot learning).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Let’s briefly discuss two examples, Flava (Singh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022)) and UniT (Hu and Singh (2021a)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Flava has two separate encoders for images and text and a multimodal encoder, all based on the Vision Transformer (Dosovitskiy et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020a)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Unimodal pre-training consists of masked image modeling (where a set of image patches are to be reconstructed from other unmasked image patches) and masked language modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Multimodal pre-training tasks consist instead of a global contrastive loss (maximization of cosine similarities between paired images and text), a masked multimodal modeling (where image patches and text tokens are masked) and an image-text matching task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The model is pre-trained jointly on unimodal and multimodal datasets and then evaluated (fine-tuned) on 22 vision tasks, 8 pure linguistic tasks and 5 vision and language tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' UniT has an image encoder and a text encoder, a multimodal domain-agnostic decoder and task-specific heads.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There is no pre-training on multimodal data and the model is trained end-to-end on 7 tasks (vision, language and vision an language) and 8 datasets, with the idea that solving different tasks across domains in a jointly fashion should prevent general knowledge from being lost due to fine-tuning over particular downstream tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These two examples clearly show what it is meant by “universal vocation” of many modern Transformer-based models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But there are still models specifically designed to solve pure language tasks and in the following pages, two of them will be described.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Vokenization It is often difficult for a child to describe the meaning of a certain word.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A child might not be able to describe what a lion is but if he is given pictures of different animals he might be very well able to point at the picture of a lion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Visual pointing could thus act as a form of supervision to natural language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Is it possible to build within a pure language model a form of visual supervision, which mimics the visual pointing often adopted by children?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is exactly the problem that Tan and Bansal (2020) try to address: how to associate to each textual representation (token) a visual representation (Voken).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Let’s suppose we had a dataset of word(token)-image pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We could integrate in the pre-training framework of pure language models the following Voken- Classification task: 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models 135 LV OKEN−CLS(s) = − l � i=1 log pi(v(wi;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' s)|s) h1, h2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', hl = languagemodel(w1, w2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', wl) pi(v|s) = softmaxv{Whi + b} where {hi} is the feature representation of each token in a sentence s = {wi} extracted from a language model (such as BERT) and the vokens originate from a finite set of images X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each hi is then transformed into a probability distribution through a softmax layer, with the voken-classification loss defined as the negative log-likelihood of all related vokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The model architecture would then be: FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='41: From Tan and Bansal (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Visually supervised the lan- guage model with token-related images, called Vokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Everything sounds fantastic!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There is only one small pitfall: a set of X of images for all tokens does not exist!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Could we find a proxy for such a set?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One might consider image-captioning datasets such as MS COCO (Lin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014b)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But also this suboptimal solution is problematic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Grounding Ratio is defined as the proportion of tokens in a dataset which are related to a specific visual representation (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the tokens are visually grounded), such as “dog”, “table” and the like.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='42 it is striking that only around one third of tokens contained in pure language corpora such Wiki103, English Wikipedia and CNN/DM are visually grounded in image captioning datasets3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is not possible to rely (only) on image captioning datasets to build the Voken-Classification task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But the fact that a word/token does not have a visual representation in one of these datasets, it does not mean that it is not possible to visually represent the word/token.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Would it be possible to associate images to words/tokens not directly visually grounded?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Well, the answer is yes!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3From an operative point of view, the authors consider a token type “visually grounded” if it has more than 100 occurrences in MS COCO Visual Vokens (Token-Related Images) Supervision nglish Visually- Supervised Language Model Vokenization Humans learn language by listening, speaking Language Language Tokens Input136 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='42: From Tan and Bansal (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Statistics of image-captioning dataset and other natural language corpora.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VG, CC, Eng Wiki, and CNN/DM denote Visual Genome, Conceptual Captions, English Wikipedia, and CNN/Daily Mail, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' JSD represents Jensen–Shannon divergence to the English Wikipedia corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='43: From Tan and Bansal (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Vokenization process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A contextualized image (visual token, Voken) is retrieved for every token in a sentence and with this visual token, visual supervision is performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Vokenization is a process to assign every token wi contained in a sentence s to a visual representation (called voken) originating not from a generative model but rather from a finite set of images X = {x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', xn}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The voken v(wi;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' s) is the image from X which maximizes the following Relevance Score Function: v(wi;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' s) = arg maxx∈Xrθ∗(wi, x, s) This function takes into account not only the token wi itself, but also the context (the sentence) and it is parametrized by θ with θ∗ being the optimal value (which has to be estimated).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 The Relevance Score Function: Model, Training, Inference The Relevance Score Function is defined as the inner product of the language feature representation fθ(wi, s) and the visual feature representation gθ(x): Dataset # of Tokens # of Sents Vocab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Size Tokens #/ Sent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 1-Gram JSD 2-Gram JSD Grounding Ratio MS COCO 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0M 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6M 9K 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='27 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8% VG 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2M 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3M 13K 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='16 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='28 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6% CC 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9M 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8M 17K 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='20 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7% Wiki103 111M 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2M 29K 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='05 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6% Eng Wiki 2889M 120M 29K 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='00 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7% CNN/DM 294M 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9M 28K 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='10 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3%Vokenization Visual Supervision Nearest Neighbor Search Vokens Visually- Supervised Visual Language Language Encoder Encoder Model Language Tokens Input Image Language Tokenizer Set Corpus3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models 137 fθ(wi, s)T gθ(x) Supposing h1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', hl and e are the embeddings originating from pre-trained language and visual encoders respectively (in the paper the authors use BERT and ResNeXt), the language and visual representations are obtained first by applying multi-layer perceptrons w_mlpθ and x_mlpθ to downproject the embeddings from the pre-trained models to a common vector space and secondly they are normalized (with L2-Norm): fθ(wi;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' s) = w_mlpθ(hi) ||w_mlpθ(hi)|| gθ(x) = x_mlpθ(e) ||x_mlpθ(e)|| With respect to the training of the model, to estimate the optimal value for the parameter θ, image-captioning datasets, which are collections of sentence-image pairs, are employed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Operationally, for every sentence sk associated to image xk in the image-captioning dataset, each token wi in s is associated to xk and the hinge loss is used to estimate the optimal value of θ∗: Lθ(s, x, x′) = l � i=1 max(0, M − rθ(wi, x, s) + rθ(wi, x′, s)) The goal is to maximize the Relevance Score Function between aligned token- image pairs (wi, x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' s) and to minimize the score for unaligned pairs (wi, x ′;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' s) by at least a margin M, with x ′ being a randomly sampled image from the image captioning dataset not associated to sentence s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Once we have the language feature representation fθ(wi, s) for each token in our language corpus and the optimal estimate of θ, how is it possible to find the image x encoded with the visual feature representation gθ(x), which maximizes the Relevance Score Function?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As said earlier, the function is expressed as the inner product of the textual and visual representations and since the feature vectors have euclidean norm equal to 1, the inner product maximization problem is equivalent to a nearest neighbor search problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is just sufficient to find the vector gθ(x) which is the nearest neighbor of fθ(wi, s)4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With this process, it is thus possible to assign a visual representation, a voken, to any word/token in a language corpus, pooling from a finite set of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The problem of the low Grounding Ratio outlined above is solved and the Voken-Classification task could be integrated in the pre-training framework 4The proof is straightforward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Let X ∈ Rl and have euclidean norm equal to 1, which means ||X||2 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the nearest neighbor search we need to find the vector Y ∈ Rl, also with norm equal to 1, which has minimal euclidean distance with X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is the quantity to 138 3 Multimodal architectures of any pure language model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover, the authors propose a method called Revokenization, which allows to transfer vokens generated using a particular tokenizer to frameworks which employ other tokenizers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 One Step Further: The Power Of Imagination Wikipedia defines imagination as “the production or simulation of novel objects, sensations, and ideas in the mind without any immediate input of the senses”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Indeed, humans do not only associate words with real images, but also leverage the ability to imagine words/concepts: imagination can help the human brain solve problems with limited supervision or sample points by empowering its generalization capabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Until now we discussed language models supported by visual information in form of real images (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' those retrieved from image- captioning datasets).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But with the recent advancements in the field of generative models for images, it is for sure worth investigating if these generative models can help pure language models to produce better representations of words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In particular, the framework proposed by Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022), iACE (Imagination- Augmented Cross-Modal Encoder) will now be discussed: the idea is simply to use a generative model to obtain a visual representation of a textual input and then use these imagined representations as “imagination supervision” to pure language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This framework has two main components: the imagination generator G: given an input text x, VQGAN (Esser et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021)) is used to render an “imagination” i of x and CLIP (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021a)) is used to see how well the generated image i is aligned to the input text x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This generative framework is known as VQGAN+CLIP Cross-modal Encoder Ec: the input text and the rendered imagination are firstly encoded with a language and a visual encoder respectively and then be minimized: d(X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Y ) = � � � � l � i=1 (xi − yi)2 squared = l � i=1 x2 i + l � i=1 y2 i − 2 l � i=1 xiyi = ||X||2 2 + ||Y ||2 2 − 2XT Y Norm−1 = 1 + 1 − 2XT Y = 2(1 − XT Y ) And through these simple algebraic manipulations,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' it is possible to see that minimizing the euclidean distance between X and Y is equivalent to maximize XT Y ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' which is the inner product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This proves the equivalence between inner product maximization and nearest neighbor search.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models 139 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='44: From Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The generator G visualize imaginations close to the encoded texts by minimizing LGAN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The cross-modal encoder Ec learns imagination-augmented language representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Two-step learning procedure consists of: 1) pre-train a Transformer with visual supervision from large-scale language corpus and image set, 2) fine-tune the visually supervised pre-trained Transformer and the imagination-augmented cross-modal encoder on downstream tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' CLIP is employed as cross-modal encoder with inputs being text-imagination pairs The learning procedure is composed of two main steps (depicted in figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='44): the first step consists in the pre-training of a visually supervised Transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In particular, the Voken-Classification task described before is employed, alongside a masked language modeling task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is the baseline model, where no information from the “imagination” procedure comes yet into play.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The second step is the imagination-augmented fine-tuning with two downstream datasets D (GLUE, Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2018) and SWAG, Zellers et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2018)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On one side,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the visually-supervised Transformer (the baseline) relies only on the textual input during the fine-tuning phase and the following loss function is employed: LLang = − |D| � j=1 K � k=1 yk log pk(dj(t)|D) On the other hand,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the iACE is trained to minimize the following cross-entropy loss: LImagine = − |D| � j=1 K � k=1 yk log pk(dj(t,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' v)|D) Step l:Pre-training on Large-scale Language and Vision Datasets Language Image Corpus Set Step 2: Fine-tuning on Downstream NLU Tasks LLang Visually Text Input Supervised Limagine Transformer Cross-Modal Encoder Language Encoder Language Generator Imagination Vision Encoder Encoder LGAN140 3 Multimodal architectures with t and v being the textual and imagined features representations respec- tively,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' j indicates the j-th data sample in dataset belonging to dataset D,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' K is the number of classes and pk is the conditional distribution of dj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Training takes place in a jointly fashion and both losses, the imagination-augmented one LImagine and the pure language loss LLang are linearly combined, with λ being a balance factor: L = λLImagine + (1 − λ)LLang To sum up, this model-agnostic framework uses generated images for visual supervision and could be integrated on top of pure language models (such as BERT) or visually supervised models (such as the Voken model, which uses Vokens, real images for visual supervision).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 Was It Worth?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this subchapter we investigated how visual inputs can support pure language models in capturing the semantics of words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We started with simple concate- nation of linguistic and visual features and ended up with Transformer-based models, which are able to shape different word embeddings for the same word by taking into account also the context (the sentence).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But now the question arises: with the addition of visual information, do we obtain word embeddings that are better than those from pure language models?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In other words, is what we all have so far discussed worth?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Well, as it is often the case in scientific research, the answer is: “it depends!”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Individual evaluation of each single model might not be ideal because each model has its peculiarities and it is impractical to make a direct comparison among them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is more useful to capture and discuss the themes which are common to many models, in order to understand their strengths and weaknesses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is how we will proceed and we will also differentiate between evaluation before Transformers and evaluation after Transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Evaluation In The Pre-Transformers Era Before the advent of Transformers, the evaluation focus was on the degree of alignment between learned semantic representations (word embeddings) and representations by human speakers, in form of correlation between model-based and human-based word-similarity judgments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Three main types of similarity are usually considered: Semantic similarity, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' “pasta is similar to rice” Semantic relatedness, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' “Bear is related to mountain” Visual similarity, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' “cucumbers look like zucchinis” The evaluation pipeline could be summarized as follows: 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models 141 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='45: Pipeline for intrisinsic evaluation of semantic representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the first step, the cosine similarity between two word embeddings w1 and w2 is used as similariry measure and in a second step, the correlation with human speakers’assessment is computed to gauge the quality of the embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The higher the correlation, the better the embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Word embeddings are vectors and to measure the degree of similarity between two vectors, the Cosine Similarity is often used in the literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In an ideal setting, we would have word embeddings with the following characteristics: if two words are semantically similar, the two embedding vectors should be similar and their cosine similarity should go towards 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' If the two words are unrelated, the embedding vectors should be orthogonal to each other and as a consequence, the cosine similarity should go towards zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Lastly, if two words are negatively related, the two embedding vectors should point at opposite directions and the cosine similarity should go towards -1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Once these similarity measures between word pairs are computed, in order to measure the quality of the embeddings several benchmarks can be employed, such as MEN (Bruni et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014)), WordSim353 (Agirre et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2009)) and SimLex999 (Hill et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2015)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' These datasets could be described as collections of word pairs and associated similarity ratings by human speakers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Operationally, this means that real people were asked if a pair of words was related or not and to which degree, on a scale between -1 (negatively related) to +1 (semantically equivalent).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The higher the correlation between the cosine similarity and the similarity judgments by humans, the higher the quality of the word embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Having done this methodological premise, let’s discuss the performance of these pre-Transformer models!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Since the goal of these models is to enhance pure language models with the addition of visual inputs, the baseline in the evaluation is always one (or more) pure language model(s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Well, do visually grounded embeddings outperform non-grounded ones?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' What emerges from virtually all papers is that visual grounding can actually help get a better semantic representation of concrete concepts, such as “cat”, “table”, “bicycle”, whereas they do not help much with the representation of abstract concepts such as “love” and “peace”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Evaluation In The Post-Transformers Era A limitation of the intrinsic evaluation metrics is the high degree of subjec- tivity: the similarity between two concepts depends in many instances on the experience, cultural background and preferences of the human observers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is why the evaluation focus has now shifted to a more extrinsic dimension: how well do the models perform in downstream tasks?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The problem of the “lack Cosine Similarity W1: W2 Correlation with human Word pairs (w1,w2) ratings [ / W1|| / /w2]142 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='46: From Hill and Korhonen (2014): Each bar represents a differ- ent model settings and the dashed line indicates the pure linguistic benchmark model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='46 we can see that pure language models still perform better than models with visual inputs when it comes to the representation of abstract nouns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another example is Kiela et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017): they found that their models perform better when tested on datasets with a higher degree of concreteness and the same conclusion is reached by Collell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2017), which state that visual information can empower the representations of concepts that are to a certain extent visual.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To sum up, effective semantic representation of abstract concepts constitute the main limitation common to many of the models discussed in this section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' of objectivity” is thus solved because on downstream tasks there is no room for opinions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The datasets used to train the models are also different and the most widely used are: GLUE (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2018)): 9 tasks, including single-sentence tasks (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' sen- timent analysis), similarity tasks (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' paraphrasing), inference tasks (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' tex- tual entailment) SQuAD (Rajpurkar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2016)): question/answer pairs SWAG (Zellers et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2018)): multiple choice questions about grounded situations As previously discussed, many Transformer-based models have universal voca- tion: they are built to solve a heterogeneous range of tasks from the language and vision domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' If we thus consider only performance on pure language tasks, the following two tables from Tan and Bansal (2020) are insightful: It is straightforward: unlike in the pre-Transformers Era, where grounded word embeddings could improve performance over baselines, Transformer-based universal models do not outperform pure language models such as BERT or RoBERTa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nonetheless, the addition of visual supervision (the Voken- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='364 Propagation Method Johns and Jones Ridge Regression Our Model (α=1) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='232 lation 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='265 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='236 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='225 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='197 Corre 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='116 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' -- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 abstract nouns all nouns concrete verbs abstract verbs all verbs3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models 143 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='47: From Tan and Bansal (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Statistics of image-captioning dataset and other natural language corpora.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VG, CC, Eng Wiki, and CNN/DM denote Visual Genome, Conceptual Captions, English Wikipedia, and CNN/Daily Mail, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' JSD represents Jensen–Shannon divergence to the English Wikipedia corpus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='48: From Tan and Bansal (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Fine-tuning results of different pre-trained models w/ or w/o the voken classification task (denoted as“Voken- cls”).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Classification task) in the pre-training framework can boost performance above the level of pure language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Pezzelle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021) analyzed the intrinsic quality of embeddings of some vision and language (“universal”) models: From this intrinsic evaluation perspective (which was popular in the pre- Transformers Era), vision and language models do not generally outperform domain-specific models such as BERT and also in this case the only real competitor of pure language models is a model with visual supervision (again, Vokenization).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The bar plots depict correlation between human- and model-based similarity ratings, differentiating between the most concrete concepts contained in a Model Init.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' with BERT?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Diff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' to BERT Weight SST-2 QNLI QQP MNLI ViLBERT (Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) Yes 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0e-3 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 VL-BERT (Su et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020) Yes 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4e-3 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 VisualBERT (Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) Yes 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5e-3 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Oscar (Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020a) Yes 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6e-3 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 LXMERT (Tan and Bansal, 2019) No 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0e-3 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 BERTBASE (Devlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0e-3 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 BERTBASE + Weight Noise 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5e-3 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3Method SST-2 QNLI QQP MNLI SQuAD v1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 SQuAD v2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 SWAG Avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' BERT6L/512H 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3/80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2/60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 BERT6L/512H + Voken-cls 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5/80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3/64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 BERT12L/768H 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0/85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7/71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 BERT12L768H + Voken-cls 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8/86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1/71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 RoBERTa 6L/512H 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9/61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6/52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 RoBERTa 6L512H + Voken-cls 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0/66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9/54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 RoBERTa 12L/68H 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2/79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2/63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 RoBERTa 12L/768H + Voken-cls 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0/82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9/69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6144 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='49: From Pezzelle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Spearman’s rank correlation between similarities computed with representations by all tested models and human similarity judgments in the five evaluation benchmarks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='50: From Pezzelle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Correlation between model and human similarity ratings on WordSim353, SimLex999 and MEN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each barplot reports results on both the whole benchmark and the most concrete subset of it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' certain dataset5 and the whole dataset (thus including more abstract concepts).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The results confirm the trend: multimodal models are more effective than pure language models at representing concrete words but in many instances they still lag behind when it comes to more abstract concepts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Last but not least, few words need to be spent on a topic which has been steadily gaining relevance: Few-Shot Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To train and test models, a large pool of paired images and texts is often needed and the creation of many of the datasets used in fine-tuning required a huge data collection effort, which had to be performed by human agents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This implies that the creation of such data pools can be very costly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For this reason, there is a growing interest in creating models able to cope with low-resource settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This boils down to the question: can a model perform well on downstream tasks even with just a limited number of training examples?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The goal is actually once again, to 5See Brysbaert et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2014) for information on how concreteness of a word can be estimated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' model input Spearman p correlation (layer) RG65 WS353 SL999 MEN SVERB BERT-1M-Wiki* L 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7242 (1) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7048 (1) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5134 (3) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3948 (4) BERT-Wiki ours L 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8107 (1) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7262 (1) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5213 (0) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7176 (2) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4039 (4) GloVe L 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7693 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6097 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3884 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7296 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2183 BERT L 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8124 (2) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7096 (1) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5191 (0) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7368 (2) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4027 (3) LXMERT LV 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7821 (27) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6000 (27) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4438 (21) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7417 (33) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2443 (21) UNITER LV 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7679 (18) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6813 (2) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4843 (2) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7483 (20) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3926 (10) ViLBERT LV 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7927 (20) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6204 (14) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4729 (16) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7714 (26) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3875 (14) VisualBERT AT 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7592 (2) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6778 (2) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4797 (4) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7512 (20) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3833 (10) Vokenization Lv 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8456 (9) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6818 (3) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4881 (9) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8068 (10) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3439 (9)WS353 SL999 MEN 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='75- concrete 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='60- concrete 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='85- concrete whole whole whole 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='70- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='55- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='80- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='65- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='75 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='60- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='70- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='55- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='40- 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='65- BERT LXMERT UNITER VILBERT BERT LXMERT UNITER ViLBERTis3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Images supporting Language Models 145 mimic how humans learn: a person does not need to see one thousand pictures of a table, to be able to recognize a table.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='51: From Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Model-agnostic improvement in Few- shot Setting with GLUE benchmark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This table from Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022), where models are trained using only up to 5% of the training set, shows for example the ability for a model supervised with “imagination” (which was a generated visual representation of a certain textual input) to outperform models with only simple visual supervision (the Voken-model).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is just an example, but the ability to perform well in few- shot settings has become the touchstone of the evaluation modern multimodal models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 The End Of This Story We started this story with the Symbol Grounding Problem, which affirms that to grasp the meaning of a word, the word has to be put in a context other than the pure linguistic one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We thus investigated some of the architectures proposed to ground words in a visual space in form of static images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The goal (hope) is to better capture the semantics of words, in form of better word embeddings, to be employed in heterogeneous tasks, from semantic-similarity to downstream tasks, such as sentiment analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' From this brief analysis it emerges that grounding words in images can actually improve the representation of concrete concepts, whereas visual grounding does not seem to add value to pure language models when it comes to abstract concepts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nonetheless, forms of visual supervision like the Voken-Classification task or the employment of generative models which allow to imagine words, such as in the iACE-Framework, might be the right way to bridge this gap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Transformers have been a revolution in the field of NLP and with their advent, the trend has now become to build models with pre-training tasks capable of generating powerful task-agnostic word representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The knowl- edge gained with these tasks can be then transferred to downstream tasks with the goal to limit the amount of labeled data necessary to fine-tune models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Labeling data is indeed costly: this is why the ability of a model to generalize SST-2 QNLI QQP MNLI Extreme Few-shot 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1% 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3% 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5% 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1% 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3% 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5% 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1% 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3% 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5% 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1% 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3% 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5% VOKEN(Bertbase) 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='70 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='98 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='73 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='54 51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='60 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='96 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='10 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='65 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='46 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='31 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='62 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='79 iACE(Bertbase) 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='98 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='96 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='42 51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='64 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='33 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='03 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='36 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='67 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='17 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='07 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='49 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='57 VOKEN(Robert abase) 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='99 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='10 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='86 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='37 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='23 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='78 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='32 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='25 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='18 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='59 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='76 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='23 iACE(Robertabase) 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='34 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='66 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='60 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='79 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='03 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='83 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='43 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='11 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='77 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='94 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='74 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='39 Normal Few-shot 1% 3% 5% 1% 3% 5% 1% 3% 5% 1% 3% 5% VOKEN(Bertbase) 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='40 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='01 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='75 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='17 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='36 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='19 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='55 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='37 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='50 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='45 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='73 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='35 iACE(Bertbase) 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='45 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='04 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='47 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='09 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='54 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='52 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='31 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='69 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='52 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='15 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='43 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='73 VOKEN(Robert abase 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='78 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='08 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='61 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='00 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='16 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='23 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='14 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='09 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='63 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='51 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='68 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='02 iACE(Robert abase ) 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='83 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='63 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='11 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='35 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='41 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='65 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='72 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='38 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='81 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='66 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='76 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='10146 3 Multimodal architectures well when exposed to just few training examples has been steadily gaining importance as evaluation metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This was the so called few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Moreover, Transformer-based models have “universal vocation”: they tend to be multimodal and multi-task, encompassing vision, language and vision and language tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This idea might be appealing because humans learn by being exposed to a multitude of different inputs and tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But as we have seen, pure language models such as BERT tend to still outperform multimodal multi-task models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There is definitely room for improvement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One might wonder whether the grounding of words in images is the right way to seek a better representation of words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Well, humans learn using all five senses and maybe the answer might be to incorporate in the models more heterogeneous perceptual information: not only static images but also videos, speech and the like.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The debate is still open: the story goes on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Last but not least, a mention needs to be made on concrete applications of these image-empowered word-embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The use of images to support lin- guistic models has been experimented in several fields, from Dialogue Response Generation (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Sun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021)) to Machine Translation, where for example Ive et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019) found images to improve the quality of translation when the textual context is generic and/or ambiguous.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The number of potential applications of the models described in this subchapter is growing steadily in the scientific community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But this is yet another story.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 Appendix: Selected Models - Summary A table (available here) contains a summary of selected language models augmented with visual components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For each model, the following information are reported: Pure language model and pretraining data Visual features and pretraining data Fusion strategy of the two modalities Benchmarks/baselines for evaluation 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Text supporting Vision Models Author: Max Schneider Supervisor: Jann Goschenhofer 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Text supporting Vision Models 147 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Introduction “The biggest lesson that can be read from 70 years of AI research is that general methods that leverage computation are ultimately the most effective, and by a large margin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' [.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ] Most AI re- search has been conducted as if the computation available to the agent were constant (in which case leveraging human knowledge would be one of the only ways to improve performance) but, over a slightly longer time than a typical research project, mas- sively more computation inevitably becomes available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Seeking an improvement that makes a difference in the shorter term, re- searchers seek to leverage their human knowledge of the domain, but the only thing that matters in the long run is the leveraging of computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' [.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ] One thing that should be learned from the bitter lesson is the great power of general purpose methods, of methods that continue to scale with increased computation even as the available computation becomes very great.” — Sutton (2019) This insight seems to directly inspire most model choices presented in this chapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each network can be seen as an attempt of its creators to employ their vast available resources on a large scale, with a particular focus on dataset sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This mostly becomes feasible through the adaptation of recent findings in natural language processing (NLP;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' see chapter 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1) to computer vision (CV).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the one hand, architectural concepts firstly popularized in NLP are translated to CV (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', self-supervised learning or the Vision Transformer;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Dosovitskiy et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020b) (see chapter 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the other hand, these powerful new NLP models, mostly Transformers (Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017b), support bigger models from the inside as text encoding building blocks;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' hence the name of this chapter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Throughout this chapter, we will introduce recent relevant CV models CLIP (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a), ALIGN (Jia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021b) and Florence (Yuan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) and discuss their underlying core concepts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The strong performances confirm the potential, hinted at by the impressive GPT-3 (Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020), of improving CV and increasing scale with the help of NLP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 148 3 Multimodal architectures 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Concepts 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Web-scale data A core problem that troubles researchers is the lack of robustness of previous state-of-the-art CV models to distribution shifts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', when a model with good performance on its original dataset fails to generalize (transfer its knowledge) to new, more or less similar datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021a) report that a ResNet101 which they trained on ImageNet to an accuracy of 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2% maintains only an accuracy of 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6% on ObjectNet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This suggests that the model perhaps did not learn high quality latent representations, but instead overfit to the dataset-specific data-generating distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A common way to tackle this would be to try out various changes on the architecture and the training algorithm of the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But this kind of adaptation, inscribing expert knowledge into the model, seems to repeat the mistake pointed out by Sutton (2019);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' “micromanaging” a model is likely to thwart future scaling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The researchers of CLIP, ALIGN and Florence follow a different approach, based on scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They try to increase sample size as much as possible and work with tremendous numbers of training observations: 400 million (CLIP;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a) 900 million (Florence;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Yuan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 billion (ALIGN;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Jia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021b) These large-scale dataset are generated using the vast amount of image-text pairs produced by and readily available on the internet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Thus, error prone, cost and labor intensive (difficult to scale), manual labeling is avoided.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Un- fortunately, the models trained on web data also become vulnerable to their downsides.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Because of their extremely noisy nature, still some form of pre- processing is needed, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', filtering for English language, excluding graphic content and, optionally, removing images with non-informative alt-texts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This makes some degree of dataset curation, and therefore arbitrary choices, neces- sary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Likewise, the social biases inherent to the internet are reproduced and furthermore, while this approach improves data efficiency to some degree (see next subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2), the poor performance of deep learning in this area is not substantially enhanced and mainly just compensated for with a super scalable source of supervision (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Contrastive objective This source of supervision is the information contained in the co-occurrence of the image with its alt-text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is accessed through natural language super- vision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The architectures jointly train two sub-networks for image and text encoding, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' During this, the vector encodings are aligned in the latent representation space through minimizing a variant of the contrastive loss function (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='10) (Tian et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Half of the first image-text pair loss 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Text supporting Vision Models 149 ℓVimg,Vtxt 1 = − E {v1 img,v1 txt,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=',vN txt} � log hθ({v1 img, v1 txt}) hθ({v1 img, v1 txt}) + �N k=2 hθ({v1 img, vk txt}) � , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='10) where v1 img and v1 txt are vector encodings (latent representations) of image 1 and text 1 and hθ(·) is a similarity measure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In order to guarantee symmetry, the total loss is formed by the sum of ℓVimg,Vtxt 1 and ℓVtxt,Vimg 1 , where the pairwise similarities of one text and every image is calculated instead of the other way around.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='52 visualizes this.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Initially all images and texts in the training data are encoded by the responsible sub-network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Using the resulting encodings, a similarity matrix with elements hθ({vi img, vj txt}) can be calculated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Loosely speaking, the contrastive objective is to maximize elements on the diagonal and minimize the others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='52: Visualization of a contrastive objective (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After encoding the data, a similarity matrix for the images and texts is computed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The aim is that the N true image-text pairs score high in terms of similarity, while the N2 − N other possible combinations score low.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Contrastive learning can be contrasted with classical predictive learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='53 gives an interesting insight into the choice of space, where goodness of fit is measured.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The exemplary task is to color an image given its B/W version.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Approach (a) first encodes the B/W image and then decodes the interim latent representation to fitting colors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The goodness of this fit is measured in the output space, meaning the estimated colors are compared to the true colors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' pepperthe Text aussie pup Encoder T T3 TN I1 I,·T, I,·T2 I,·T3 I,·TN 12 I2·T, I2·T2 I2·T3 I2·TN Image 13 I3·T, I3·T2 I3·T3 I3·TN Encoder : : .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' : : N In·T, In·T2 In·T3 IN·TN150 3 Multimodal architectures Conversely, approach (b) measures the loss in the representation space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 A reason for the good performance of contrastive learning could be that, while common prediction losses (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', the L2 loss) penalize each prediction output dimension independently, approach (b) implies measurement in the intertwined representation space (Tian et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='53: Predictive vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' contrastive learning: Predictive losses are measured in the output space while contrastive losses are measured is in the representation space, indicated by red dotted boxes (Tian et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' But in the end, rather than theoretical considerations, the driving factor for using this objective is data efficiency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As can be seen in figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='54, Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021a) start their search for an adequate pre-trained model (more on this in subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3) by experimenting with a Transformer-based language model predicting the exact captions of an image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It turns out that this approach trains three times slower, in terms of data efficiency, compared to a simpler baseline of predicting a bag-of-words text encoding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Additionally, switching to the contrastive objective of CLIP improves data efficiency by a factor of four.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nonetheless, the switch to contrastive learning leads to some limitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Its rigidity demands certain extra steps and forfeits the high flexibility of generative models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In particular, this means contrastive models similar to CLIP are limited to choose from available options and cannot freely generate texts or images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To extend the capabilities of those models additional network building blocks are necessary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 6Note that contrastive learning easily works with other combinations of modalities than text and image;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' here B/W and colors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' V1 2 f g (a) Predictive learning V1 V2 2 fe1 fe2 (b) Contrastive learning3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Text supporting Vision Models 151 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='54: Data efficiency of contrastive objective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Development of zero-shot accuracy (see next subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3) on ImageNet with increasing number of instances of training data processed by the models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The contrastive objective reaches similar accuracy scores as the generative approach with only a seventh of the amount of data (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Foundation models and zero-shooting The first models which are considered foundation models today began to appear in NLP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The term, later coined by Bommasani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021), refers to models that are noteworthy due to their large scale and ability to adapt to a wide variety of downstream tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An early example is BERT (Devlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Often, foundation models have an unfinished touch to them and the true scope of their capabilities cannot be sketched out clearly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This generally is the case because the desired abilities of neural networks are not designed for explicitly, but rather emerge during their implementation and usage on downstream tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Bommasani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021) cite GPT-3’s ability to perform certain types of new tasks solely by confronting it with the right natural language prompt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', it is possible to get GPT-3 to summarize a paragraph by appending “TL;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='DR” (too long, didn’t read) to the prompt, which is a common pattern on the internet to signal a following summery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is referred to as “in-context learning” (Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is apparent that one can make up plenty of unexpected ways to employ these models and it remains unknown whether there is a further way no one thought of yet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This means possibly saving computational and data collection costs down the line, which ineptly is true for malicious use cases, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', surveillance, too.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Foundation models build on the concept of transfer-learning, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', pre-training a model on a feasible source task and applying it to the desired downstream task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the context of this chapter this means pre-training on web-scale data (see subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1) and evaluating performance on various common classification datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021a) name the SVHN dataset 40 Accuracy 35 30 25 20 4x efficiency 3x efficiency 15 10 Bag of Words Contrastive (CLIP) 5 Bag of Words Prediction Transformer Language Model 0 2M 33M 67M 134M 268M 400M # of images processed152 3 Multimodal architectures as a proxy for the task “street number transcription” with the caveat “on the distribution of Google Street View photos”, but they remark that a lot of datasets have no obvious, specific task associated, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', CIFAR-10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They use these kind of datasets for measuring the “robustness to distribution shift and domain generation” of their model, which still is a topic of great interest as mentioned in subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' When there is no further fine-tuning on the downstream task, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', no resuming of training on the new target dataset, this is referred to as zero-shooting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Zero-shooting has the clear advantage of evaluating performance more unbiased, as processes like overfitting to the data-generating distribution will not distort results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='55 shows how contrastive models perform zero-shot transfer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the case of image classification all available classes are encoded by the language model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Afterwards, the CV sub-network computes the encoding of the image to be classified and all pair-wise similarity scores are returned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The pair with the best score can be retrieved as the decision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Image retrieval works the other way around: After an initial encoding of all images, the ones most similar to the encoded natural language text prompt in the representation space can be returned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='55: Visualization of zero-shooting (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' plane car a photo of Text dog a object}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Encoder bird T1 T2 T3 TN Image I, ·T, I, ·T2 I,·T3 I, TN Encoder a photo of adog.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Text supporting Vision Models 153 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Architectures 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 CLIP The first of the large scale contrastive CV models that were published is CLIP, short for Contrastive Language-Image Pre-training (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The components of its name are explained in previous subsections 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 and 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 and are the crucial concepts of ALIGN and Florence as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' CLIP is a product of OpenAI, but its code is freely available and the different versions can be accessed as python modules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The dataset used for training is not released though.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A lot of preliminary work stems from Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020b), who introduced con- trastive representation learning using image-text pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their implementation of the contrastive loss function (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='10) follows ℓVimg,Vtxt 1 = − log exp(⟨v1 img, v1 txt⟩/τ) �N k=1 exp(⟨v1 img, vk txt⟩/τ) , (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='11) where ⟨v1 img, v1 txt⟩ represents the cosine similarity, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', v1⊤ imgv1 txt/(∥v1 img∥∥v1 txt∥), and τ ∈ R+ is a temperature parameter, which is directly learned during training (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' CLIP adopts this.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ℓVtxt,Vimg 1 , the counterpart to ℓVimg,Vtxt 1 for the total loss, is function (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='11) with switched arguments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This can be viewed as a symmetric cross entropy loss over the cosine similarity of the embeddings (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Architecture The text encoder for CLIP (see figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='53) is a modified Transformer (Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017b), which was also used for GPT-2 (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the image encoder multiple sub-networks are evaluated: ResNets: ResNet-50, ResNet-101 ResNets which follow EfficientNet-style model scaling: RN50x4, RN50x16, RN50x64 Vision Transformers: ViT-B/32, ViT-B/16, ViT-L/14 The best performing sub-network was the ViT-L/14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In turn, they trained it for an additional epoch with higher resolution images (336px), denoting this version ViT-L/14@336px.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' If not indicated otherwise, the performances of this version of CLIP are displayed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The EfficientNet-style ResNets use x4, x16 and x64 of the compute of a ResNet-50 and the largest model (the RN50x64) trained for 18 days on 592 V100 GPUs, while the ViT-L/14 only took 12 days on 256 GPUs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The high parallelization capabilities of Transformers seem to pay off.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' When explaining zero-shooting initially (see subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3), a text pro- cessing step was skipped.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As can be seen in figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='55, there is an additional 154 3 Multimodal architectures operation before the labels are fed into the text encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In order to help the model understand the context of the words, the class labels are embedded in a sentence, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', “A photo of a {label}.”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This increases the models zero-shot accuracy on ImageNet by 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 percentage points (pp).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' When ensembling 80 different context prompts7 Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021a) improve ImageNet accuracy by an additional 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5pp, which adds up to a total of nearly 5pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The average performance gain across 36 datasets is reported to be 5pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is similarly possi- ble to directly communicate visual concepts like “picture”, “macro”, “drawing” or even “dog” to the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Robustness Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='56 illustrates the performance of CLIP and a ResNet101, whose training on ImageNet was stopped at the point it reached the same accuracy as zero-shot CLIP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It can be deduced that the methods studied in the paper of Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021a) constitute an important step towards closing the robustness gap mentioned earlier (see subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While the perfor- mance of the ResNet101 deteriorates with datasets generated from more and more different data distributions compared to ImageNet, CLIP remains fairly accurate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Note that these findings have to be taken with a grain of salt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Because OpenAI does not grant public access to their training data, independent parties cannot investigate these claims on their own.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', it has to be relied on the conclusions of their overlap analysis to rule out that CLIP has not seen biasing amounts of future test data during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='56: Robustness of zero-shot CLIP to distribution shifts (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 7Prompts like: “A photo of a big {label}.”, “A photo of a small {label}.” (Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a) mageNet Zero-Shot DatasetExamples ResNet101 CLIP △Score ImageNet 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 0% ImageNetV2 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 +5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8% ImageNet-R 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 +51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2% ObjectNet 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 +39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7% ImageNet 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 +35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0% Sketch ImageNet-A 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 +74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4%3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Text supporting Vision Models 155 CLIP as a building block Shen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021) study how the performance of Vision-and-Language (V&L) models improves, when the visual encoder is switched to CLIP’s strong image encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They discover that in this field of CV the ViT-B scores significantly worse than the ResNets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', tests on image captioning reveal that the V&L model using ViT-B often performs only half as strong as the version using the RN50x4 (the largest network used in this study).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is possibly due to the pooling strategies of ViT-B, which result in a lack of visual localization abilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Shen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021) test their hypothesis and generate, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='57 which depicts Grad-CAM Visualizations for a V&L model with a ViT-B backbone and a ResNet-50 backbone and the question “What color is the woman’s shirt on the left?”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The red area indicates relevant pixels and appears much more focused for CLIP-Res50 than for CLIP-ViT-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='57: Grad-CAM Visualizations for the prompt “What color is the woman’s shirt on the left?”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 ALIGN The approach of Jia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021b) is largely similar to CLIP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They reiterate the necessity of large-scale vision datasets, but assert that even CLIP’s data collection process still involves a non-trivial amount of data curation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They propose that the amount of additional observations obtained through minimiz- ing the amount of filtering makes up for the increased noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Following this rationale, they create a training dataset with 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 billion image-text pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The corresponding model is named ALIGN, short for “A Large-scale ImaGe and Noisy-text embedding”, whose acronym hints at the contrastive loss, which aligns vector encodings in the representation space (see subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Architecture ALIGN follows the dual encoder architecture employed by Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2020b) and Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021a), but uses a part of BERT-Large as the text and EfficientNet-L2 as the image encoder, which they jointly train from scratch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The model has around 800 million parameters (Alford, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Subsection 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 goes into more detail about the performance of ALIGN and compares all three models discussed in this subsection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' a) Original (b) CLIP-ViT-B (C) CLIP-Res50156 3 Multimodal architectures Connecting image and text representations The contrastive loss function aligns the latent representations of the different modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In other words, the explicit objective is that similar vector encod- ings implicate similar inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This means arithmetic operations like the ones mentioned in chapter 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 are not only meaningful on encodings belonging to the same modality, but to different modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', one can add up the image encoding of a picture of the Eiffel tower and the text encoding of the word “snow” and retrieve pictures with high cosine similarity as a result, see figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='58 for an illustration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='58: Multimodal image retrieval via arithmetic operations on word and image embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Florence While in principle the approach of Yuan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021) does not largely differ from the others, the focus of this paper is more about creating a true foun- dation model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In order to achieve this, they propose a map of possible vision applications which the try to cover via extending the core model with modules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='59 depicts, they want to advance into the dimensions of fine-grained object detection, dynamic action recognition and true multimodal tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Due to their big ambitions, they name their model Florence after “the birthplace of Renaissance” (Yuan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Architecture As the two encoders for the pre-trained core they use a hierarchical Vision Transformer (CoSwin Transformer) for images and a Transformer similar to 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Text supporting Vision Models 157 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='59: Florence’ approach to foundation models: A general purpose vision system for all tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' CLIP’s for text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their 893 million parameters are also jointly trained from scratch on 900 million image-text pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The alignment happens in the so called image-label-description space which is encoded through a special version of the contrastive loss function which regards all image-text pairs with the same label as positive instances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='60 depicts their version of figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='52 where one can schematically see how they flexibly add modules to the pre-trained core in order to adapt to various downstream tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='60: Modular architecture of Florence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 Performance comparison Throughout the papers of Radford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021a), Jia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021b) and Yuan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021) we were able to collect three tables with reported performance measures to compare these approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Modality Multi-sense Whatare they talking Agroupofwomensitting about?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' around atable Caption Depth Video Reasoning Visual (only) Static Dynamic How many red buttons?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='VisualQuestion ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Time ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Answering ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Coarse ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Flower ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Playing Soccer ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Classification ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='ActionRecognition ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Eagle ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Fine-grained ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='tagle ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Space ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='ObjectDetection ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Segmentation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='ObjectTrackingFlorence (Vision Foundation Model) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Florence Pretrained Models ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Florence Adaptation Models ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Retrieval ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='oat over heads ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Language Encoder ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='on a docl ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Classification/Retrieval Adaptation log ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Clas sific ation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Unified Vision Stack ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Text ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Object-level Representation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Unified Contrastive Learning ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='(Dynamic Head Adaptor) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Object Detection ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Image-Text Dataset ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Image Encoder (CoSwin) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Fine-grained V+L Representation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='by Data Curation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='(METER Adaptor) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='from Internet ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='VQA ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='VideoRepresentation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Image ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='(Video CoSwin) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Action Recognition ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Tasks ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Scalable Training Infrastructure ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Deployment158 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Multimodal architectures ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='61 summarizes the zero-shot accuracies on four different ImageNet variants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Unfortunately Yuan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021) only stated their performance on the original ImageNet, where they beat CLIP and ALIGN by a margin of 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The results on the other three ImageNet pendants are mixed and there is no clear winner between CLIP and ALIGN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='61: Top-1 Accuracy of zero-shot transfer of models to image classification on ImageNet and its variants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='62 concerns zero-shot image retrieval on the Flickr30K and the MSCOCO dataset (see chapter 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Even though there are not many major score differences, there is a clear ranking with CLIP on third, ALIGN on second and Florence on the first place.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='62: Zero-shot image and text retrieval (Yuan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The most comprehensive comparison is shown in table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It depicts the accuracy of zero-shot CLIP and Florence on various datasets as well as the scores of all three models fine tuned to the respective datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Florence beats CLIP in nearly all evaluations, for the zero-shot setting as well as for fine tuned performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Jia et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021b) only report on four of these twelve datasets, where they win half of the time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Summing up, ALIGN achieves its goal of replicating CLIP’s impressive per- formance while dramatically reducing the required data curation effort and Florence has the overall top performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This could be attributed to its custom loss, maybe to Yuan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021) striking the best balance between sample size and data curation or to Florence having the best sub-networks;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' or a combination of all three.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Once again note that none of the training datasets were made publicly available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It cannot be guaranteed that all benchmarks were evaluated on unseen datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Flickr30K (1K test set) MSCOCO (5K test set) Image→Text Text→lmage Image→Text Text→Image R@1 R@5 R@1 R@5 R@1 R@5 R@1 R@5 CLIP 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 ALIGN 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 Florence 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4ImageNet ImageNet-R ImageNet-A ImageNet-V2 CLIP 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 ALIGN 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Florence 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Models for both modalities 159 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='63: Top-1 Accuracy of CLIP, Florence and ALIGN on various datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Resources One can access the pre-trained CLIP models on Github and they even found their way into simple command line tools already.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For example there is a CLI named rclip, which can be used for personal image retrieval, wrapping the ViT-B/32 CLIP architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On a (mid-range, regular) laptop, we were able to find seemingly good matches for search terms which we tried out inside a folder containing about 100 different pictures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After an initial caching one request took about ten seconds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore CLIP continues to be used inside new models, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', DALL·E 2, where it is used for the image embedding (Ramesh et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Also, there is a crowd-sourcing effort to replicate CLIP’s training dataset called LAION-400M (Schuhmann, 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To validate the image-text pairs collected for this, their cosine similarity is computed using CLIP and instances with a value too low are discarded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To our knowledge no resources were open-sourced as part of the other two papers ALIGN and FLORENCE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Models for both modalities Author: Steffen Jauch-Walser Supervisor: Daniel Schalk Data is naturally at the heart of every data scientific issue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While there have been many advances made in machine learning in recent years, many promising research areas remain, as do a multitude of problems associated with them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' One such promising area are multi-modal machine learning models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Combining different input data is a key aspect towards making models more sophisticated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' When thinking about teaching robots specific tasks, detecting hateful memes or deep fakes, it is apparent that only through the combination of multiple modalities, success might be achieved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Context is key.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, learning context requires increasingly complex models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While early Cars Aircraft Pets CIFAR100 Caltech101 lowers102 CIFAR10 DC2007 ImageNet Food101 SUN397 Stanford Oxford CV TD & D D CLIP 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Florence 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 CLIP (fine tuned) 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 ALIGN (fine tuned) 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 Florence (fine tuned) 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1160 3 Multimodal architectures machine learning models built their success upon the possibility to analyze the big pool of available, often unstructured data, modern machine learning models are so demanding that there is often not enough data or training time available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Obtaining data is a major issue for multi-modal machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Since labelling data in vast amounts is prohibitively expensive, larger models have to come up with specific strategies to move forward such as self-supervised training or automatically scraped web datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Nevertheless, when models become so large that billions of parameters have to be learned, even scraping the whole web starts to show its limits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another natural issue is the transformation of different types of data into usable model inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There is no shortage of different single modality machine learning models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On the contrary, when every new hyperparameter configuration might be seen a new model, it becomes hard to keep track.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' More importantly, it is often not clear how a model from one area transfers to another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Did we learn some modality specific bias or a general principle?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Consolidating different models into a unifying framework is a key prospect of multimodal machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While the grand dream of a single unifying model might be out of reach, consolidating different areas is well in sight.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the following, we will have a look at the challenges and prospects of multimodal machine learning against the background of visual language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Visual Language Models are models which can deal with both language and images as input data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Specifically, we will have a closer look at three different models: Data2vec, VilBert and Flamingo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Data2vec is an unsupervised model that can handle different modalities, but not their interaction, using a single unifying training framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VilBert is an early visual-language model that can handle interactions between images and text through its innovative concept of cross-attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Flamingo is a recent few shot visual language model that features large expressive text capabilities through the use of a large language model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' With 80B parameters, it particularly highlights how to leverage the communication between frozen models when further scaling up the model size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An overview across the popularity of current research fields in visual language modelling is provided in figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A detailed list of trends for each of those fields can be found in Uppal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Most research is done in the areas of visual question answering (VQA) and visual captioning (VC), but also for example visual commonsense reasoning (VCR), vision-language navigation (VLN) or multimodal affective computing (MAC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MAC uses images and text to infer sentiment, for example through facial expressions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VCR as an extension of VQA is particularly interesting in the realm of making models more interpretable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' After all, we would like to know why machine learning models do what they do.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, VLN has many promising practical applications in the field of robotics, particularly the interaction of humans and robots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Models for both modalities 161 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='64: Uppal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022): VisLang Paper Trends (previous 2 years) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 Data2vec With data2vec (Baevski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022), data scientists at Meta, formerly Face- book, developed an architecture that addresses some of the mentioned issues while highlighting the importance of sophisticated training schemes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Their algorithmic structure is able to work with either text, image or speech data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On top of that, the model is self-supervised based on a teacher-student relationship which reduces the need for human labelling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is not a universal model in the sense that it works with any input, nor is it even a general model in the sense that the algorithm is exactly the same for each modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, the overall model structure remains the same for either text, speech or image input data, while only the specific encoding, normalization and masking strategies are modality-specific.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In that regard, it is a step towards a more general way of dealing with different modalities and it is very effective at doing so given the benchmark results on typical data sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Particularly noteworthy is also the way they implement the self-supervised learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Data2vec predicts contextualized and continuous representations rather than typically used discrete tokens such as sub-words.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Working with latent representations of the input space has two advantages: not only is the number of prediction targets not a-priori limited, but they are also richer in information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='65 depicts the general model architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The two main components are a teacher and a student model which only differ in one aspect, the weights of the teacher model are an exponentially decaying average of the student’s weights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The purpose of the teacher model is to create training targets for the student model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In a first step, a modality is chosen and inputs are encoded according to the specific encoding scheme for that modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A masked version Trends in VisLang Research VCR 10% VQA MAC 25% 8% VG 7% VLN 10% MMT VC 6% 31% VR 4%162 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='65: Baevski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022): Data2vec Architecture - a teacher model creates contextualized latent targets on the basis of its top K layers (blue) as prediction task to train the student model is given to the student model, but notably, the teacher model has access to an unmasked, complete view of the input data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Hence, the resulting training targets will be fully contextualized using a self-attention mechanism over the whole input data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The training targets are based on the top K layers of the teacher model depicted in blue in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' More specifically, denoted by yt, the training target at time t and by al t the outputs of the l-th block, then yt = 1 K �L l=L−K+1 ˆal t, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the training targets are the average of the outputs of the top K layers of the teacher network after a normalization has been applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Normalization helps to stabilize the training process and prevent model collapse which can be an issue with models that learn their own representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' From the authors point of view, working with a latent representation of the actual learner as training target is a simplification of many commonly used modality-specific designs despite the caveat that this paper still uses modality-specific encoding strategies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Compared to other models, there is no cross-modality training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The specific loss function used to regress the targets is a smooth L1 loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' L(yt, ft(x)) = � (yt−ft(x))2 β if |(yt − ft(x))| ≤ β |(yt − ft(x)| − β 2 otherwise Using a smooth L1 loss has the advantage of being continuous, yet sensitive to outliers, however the β parameter needs tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As far as the general model architecture is concerned, the underlying architecture is a standard transformer architecture (Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' How does the modality specific input handling work?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In many ways, in this work the authors combine the strategies developed in multiple previous works and add a unifying framework on top of it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For images, the typical Vision Transformer (ViT) strategy (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='66) to transform images with a size of 224x224 pixels into 16x16 pixel patches is employed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Every patch is then linearly transformed into a sequence of 196 flattened Images Speech Language Model in teacher-mode Original I like tea with milk Teachertracks student Predict model Model in student-mode parameters representation of original input Masked I like tea milk3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Models for both modalities 163 representations including a learn-able positional encoding that serve as input to the vision transformer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A classification token is used to produce the final categorization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The contextualization is produced in the multi-head attention blocks as explained in earlier chapters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In short, multi-head attention first projects the keys, queries and values with learned linear projections which are then evaluated in parallel to create more expressive attention maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Attention itself is calculated as scaled dot-product-attention using a softmax over the scaled product of keys, queries and values (Vaswani et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As far as the vision transformer itself is concerned, datav2vec tests two different model sizes, a base model size of 12 and a large model of 24 transformer blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The masking strategy for images follows the Bert pre-training approach of image transformers, BEiT, proposed by Bao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In particular, multiple adjacent blocks are being masked with random aspect ratio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The minimum size of a masked block is 16 patches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In total, 60% of patches were masked in the data2vec algorithm, which is an increase over the original 40% used by BEiT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, the authors note that they found increased masking to be more accurate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The augmentation strategies are similar, as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Resizing crops, horizontal flipping and colour jittering were used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Naturally, the student and teacher model are the given the same modified image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, for image data, the model is measured on a classification task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Hence, the authors use a mean-pooling over all patches in the last transformer block and input that into a softmax-normalized projection that conducts the classification, which is again based on the BEiT model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='66: Dosovitskiy et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2021) The natural language processing model is implemented with a PyTorch toolkit named fairseq and based on the RoBERTa (Liu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019b) architecture which redesigned the standard Bert model training procedure to make it more Vision Transformer (ViT) Transformer Encoder Class Lx Bird MLP Ball Head Car MLP Norm Transformer Encoder Patch + Position Multi-Head Embedding Attention Extra learnable [class] embedding Linear Projection of Flattened Patches Norm Embedded Patches164 3 Multimodal architectures robust and effective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In particular, it increases hyperparameters such as the learning rate and the batch size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It also removes the next sentence prediction task to improve on the masked language modelling performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this case they follow Sennrich et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2015b) and encode sub-words as 50k byte-pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A separate embedding vector is learned for each type.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the masking, the Bert masking is being used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 15% of the embedded tokens are replaced, thereof 80 percent are learned masks, 10% are unchanged and the remaining 10% are replaced with random tokens in the vocabulary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another strategy that the authors also consider is the wave2vec masking strategy’ to mask four consecutive tokens with a probability of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='35 while only using learned tokens (Baevski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As it turns out, the later strategy further improves the results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The natural language processing model is evaluated on the General Language Understanding Evaluation (GLUE) benchmark (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018) which includes for example includes NLP inference, sentence similarity and sentiment analysis tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The speech category is also implemented in fairseq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The feature encoder for speech is based on the wave2vec framework and uses 16 kHz inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It is built upon seven temporal convolutions intertwined with normalization layers and a GELU activation function such that the output of the encoder is 50 kHz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As far as the results are concerend, data2vec achieved state-of-the-art perfor- mance in vision and language tasks among similar self-supervised models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='67: Baevski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022): data2vec performance (vision) Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='67 shows the model’s performance in computer vision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Pre-trained and fine-tuned simply on the data of the well known ImageNet-1K dataset, data2vec was evaluated using top1-accuracy, the standard notion of accuracy, on the task to predict single labels for images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The base model ViT-B comprises 86M parameters and ViT-L 307M parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The results show that predicting contextualized latent representations in a masked prediction setup can work well as model training compared to classical local methods such as predicting visual tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MoCov3 (Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) is a self-supervised model trained ViT-B ViT-L Multiple models BEiT (Bao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='. 2021) 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 PeCo (Dong et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022) 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Single models MoCo v3 (Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021b) 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 DINO (Caron et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 MAE (He et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 SimMIM (Xie et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 iBOT (Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 MaskFeat (Wei et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021) 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 data2vec 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Models for both modalities 165 on a contrastive loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The most similar model is DINO (Caron et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021), which also uses a self-distillation setup to predict teacher outputs using a cross- entropy loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, their prediction target was the final layer rather than averaged layers while using differing images for teacher and student network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The well performing MAE model (He et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022) is a masked autoencoder which is trained on reconstructing masked pixels using an asymmetric encoder- decoder architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In contrast, MaskFeat (Wei et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022) uses masked feature prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Notably, data2vec outperforms all of them although trained for the same amount or less.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Particularly, MAE and MaskFeat use 1600 epochs rather than 800 like data2vec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='68: (res:data2vecresults2) (res:data2vecresults2) Baevski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022): data2vec results (language) Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='68 shows the performance in the language domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the language domain, the model is evaluated on the GLUE benchmark (Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The model is pre-trained and fine-tuned separately on the labelled data from each task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Accuracy is reported as the average across 5 tuning cycles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While data2vec achieves a higher average performance than the baseline model, there are tasks where the baseline model prevails.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A large portion of the performance difference seems to be driven by the CoLA task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Corpus of Linguistic Acceptability (CoLA) consists of 10657 sentences from 23 linguistics publications and the task is to judge whether they are grammatically correct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Hence, it is distinctly different from the other tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Stanford Sentiment Treebank (SST) analyzes sentiment in language through movie reviews.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The Multi-Genre Natural Language Inference (MultiNLI) corpus contains sentence Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Natural language processing: GLUE results on the development set for single-task fine-tuning of individual models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For MNLI we report accuracy on both the matched and unmatched dev sets, for MRPC and QQP, we report the unweighted average of accuracy and F1, for STS-B the unweighted average of Pearson and Spearman correlation, for CoLA we report Matthews correlation and for all other tasks we report accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' BERT Base results are from Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (202O) and our baseline is RoBERTa re-trained in a similar setup as BERT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We also report results with wav2vec 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 style masking of spans of four BPE tokens with no unmasked tokens or random targets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' MNLI IINO RTE MRPC QQP STS-B CoLA SST Avg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' BERT (Devlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0/84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 Baseline (Liu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019) 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1/83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 data2vec 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2/83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 + wav2vec 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 masking 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8/83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='7 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='9 40 Top-1 valid accuracy Word error rate GLUE score 80 84 30 70 82 20 60 80 67 9 101112 12345 6 9 10 11 12 8 9 10 11 12 K K K (a) Speech (b) NLP (c) Vision Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Predicting targets which are the average of multiple layers is more robust than predicting only the top most layer (K = 1) for most modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' We show the performance of predicting the average of K teacher layer representations (s3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The effect is very pronounced for speech and NLP while for vision there is still a slight advantage of predicting more than a single layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='166 3 Multimodal architectures pairs and focusses on textual entailment across genres.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Similar tasks are used in the Recognizing Textual Entailment (RTE) dataset which focuses on text from news and Wikipedia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The QNLI (Question-answering NLI) dataset is a Natural Language Inference dataset that contains answers from Wikipedia to corresponding questions posed by an annotator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The task for the model is to find out whether the sentence contains the answer to the question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' QQP stands for Quora Question Pairs, which analyzes paraphrases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, the Microsoft Research Paraphrase Corpus (MRPC) also consists of sentence pairs from newswires which may or may not be paraphrases of each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a suitable baseline model, the authors retrain RoBERTa in the respective setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On top of the heterogeneous performance across language tasks, the evaluation also clearly shows that averaging over multiple layers to create prediction targets improves performance across all three domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The effects seem to be most pronounced on NLP tasks whereas CV does not benefit from averaging more than three layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the speech domain, six layers seems to be enough to reach peak performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In any case, performance loss while following the strategy to simply average the maximum amount of layers, rather than fine-tuning K, seems small enough to be potentially acceptable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To sum it up, data2vec is a self-supervised model that can work with either text, speech or image data, but not across modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It aims at unifying the learning framework through a teacher-student-setup that allows for contextualized latent target prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The teacher model is based on a complete view of the input data, which introduces contextualization, while the student model only sees a masked version of the input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Compared to previous work, the authors average the top K layers rather than only the final layer of the model, which has a notable effect as shown in 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As there are different layers in the transformer network, the authors also investigate which layers work best for prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' They conclude that the output of the feedforward layer works best.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Built on a transformer architecture, self-attention is the main driver that creates contextualized targets in the teacher model and hence performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The authors also show that contextualization through the teacher model works best with the complete view of the input rather than a partial view.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On top of not being able to work across modalities, one drawback is that the model’s structure still uses modality specific encoding and masking schemes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In that regard, the perceiver architecture (Jaegle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a) for example used in the Flamingo model is a complementary approach worth exploring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An earlier model that works across modalities is VilBert.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 Vision-and-Language Bert (VilBert) As seen in the previous section, data2vec can handle text, image or speech as input data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, it cannot do so at the same time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The model’s focus is on unifying the training approach rather than working across modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, when we think about multimodal models, we usually think of working with 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Models for both modalities 167 different modalities at the same time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VilBert (Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019b) is a natural extension of the iconic Bert architecture (Devlin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2018c) to vision-and- language modelling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An immediate question is whether vision and language inputs should be handled together in a single stream or in parallel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As we will see, it turns out that encoding inputs in parallel and working with parallel streams increases performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At heart of that architecture is a co-attention mechanism which enables information exchange between both modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='69: Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019b): VilBert’s Dual Stream Architecture: dashed transformer modules can be repeated, co-attention modules allow sparse inter- action between modalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='69 shows the employed parallel stream architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each modality is handled separately and fed into two Bert-style transformer models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This allows for both modalities to be handled according to their respective needs while co-attention layers allow for communication between the streams.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the language stream, the encoding uses the vocabulary plus a special classification token (cls), a sentence separation token (sep) and a masking token (mask).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the vision stream, image region features are extracted via a Faster R-CNN (Ren et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2015) model which was pre-trained on the Visual Genome Dataset (Krishna et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Since image regions lack a natural ordering, its spatial location has to be encoded, as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VilBert achieves that through a five dimensional vector that encapsulates the image coordinates and the fraction of the covered image area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Through projection, the dimensions of the positional encoding and visual features are matched and then summed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The image token marks the beginning of such an image region sequence while representing the whole image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Through the dual stream architecture, the complexity of the model can be adjusted separately for each modality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An alternative approach would have to discretize the visual space via clustering and then use the resulting tokens in the same way as text tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The drawbacks of that approach are the potential loss of detail at the discretization stage and the loss of flexibility across modalities as a result of the same processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, a single stream architecture can interfere with the pre-training of the language models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The model will have to be fine-tuned based on the created visual tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As those might be very different from the text tokens, there is potential for the pre-trained language model to be become ‘damaged’ in the process and lose capabilities - and idea that is also central to the Flamingo model presented later on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Vo V1 V2 V3 VT Embed Co-TRM TRM hvo, hv1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='., hvT Man shopping for fruit Embed TRM Co-TRM TRM hwo, hw,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=',hw Wo W1 W2 W3 W4 WT L-k × k×168 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='70: Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019b): Cross-Attention in VilBert The key innovation in the Vilbert paper (Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2019b) is the use of co-attention layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='70, the basic architecture is depicted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The co-attention module computes query, key and value matrices in a standard transformer attention fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, it then feeds the keys and values from each modality into the other modalities multi-head-attention block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a result, the visual attention will be conditioned on text whereas the language attention will be image-conditioned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This communication between streams only occurs at specific sections in the model, denoted by co-trm in figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Notably, the language stream features a lot more preprocessing before the first co-attention layer than the image stream.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An interesting question to ask is what is actually learned in those attention layers and how they correspond to human attention maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (Sikarwar and Kreiman, 2022) analyze the efficacy of co-attention layers for VQA tasks in a VilBert network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Specifically, they compute the question conditioned image attention scores and compare them to human attention maps created in experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In those experiments, humans are tasked with unblurring specific image regions to answer the same questions one would expect the machine learning model to answer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Such human attention maps are collected in the VQA-HAT dataset (Das et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Rank correlation is used to compare attention maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Sikarwar and Kreiman (2022) find that in a 6 layer network rank correlation plateaus at layer 4 and increases in the number of image regions proposed while encoding the images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Perhaps more surprisingly, they find a minimal influence of semantics on the generation of the attention maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Randomly shuffling words in a sentence when testing the model performance barely changes the attention output, which suggests that keywords rather than sentence structures drive the attention output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Note however that while attention maps remained similar,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' the model’s actual performance on answering ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='the questions dropped notably by approximately 15% such that it seems clear ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='H(l+1) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Add & Norm ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Add & Norm ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Add & Norm ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Feed Forward ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Feed Forward ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Feed Forward ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Add & Norm ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Add & Norm ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Add & Norm ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Multi-Head ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Multi-Head ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Multi-Head ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Attention ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Attention ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Attention ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='K ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Visual ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Linguistic ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='H(l) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='(a) Standard encoder transformer block ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='(b) Our co-attention transformer layer ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='Figure 2: We introduce a novel co-attention mechanism based on the transformer architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' By exchanging key-value pairs in multi-headed attention, this structure enables vision-attended language features to be incorporated into visual representations (and vice versa)3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Models for both modalities 169 that coherent sentences are important for the overall VQA task, but not for the attention creation process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' What are the keyword that drive cross-attention in VilBert?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The evidence provided by the authors clearly shows that nouns are the most influential parts-of-speech when considering attention maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On top of that, prepositions can sometimes help identify spatial relations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There is also some support for the hypothesis that removing Wh-words such as “who” and “where” can improve fine-grained attention maps in the final layer which might be worth exploring further as preprocessing for deeper networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Another approach would be to search for ways to improve the way attention maps are generated by finding ways to include more of the available sentence information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Most notably, however, using object-based region proposals to process images can lead to bottlenecks that can prevent the model from learning sufficiently fine-grained attention maps as shown in figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Overall, humans are naturally good at VQA tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Hence, it is not surprising that attention maps which correlate well with human attention maps also improve model performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='71: Sikarwar and Kreiman (2022): (Left to Right) Picture, Human Attention, 36 Regions, 72 Regions, 108 Regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Similarity between human and model attention is measured using rank correlation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='71 shows that the number of region proposals fed into the model after processing an image affects the ability of the model to produce adequate attention maps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In this particular case the question “How many fingers is the girl in the black shirt holding up?”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' was correctly answered by humans, as well as a VilBert model using 72 or 108 region proposals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It was answered incorrectly when using only 36 region proposals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Note however that in either case, the machine learning model captured the face of the wrong girl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The model using 72 regions also identified the wrong hand despite answering the question correctly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While the 108 region model identifies the correct hand holding up the fingers, it does not seem to prioritize it over the other identified hands in the picture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Hence, the attention maps are sufficiently different from the human attention map which highlights the need to look closer not only at how models are performing, but also into how their performance has been achieved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As far as the model training is concerned, VilBert is pre-trained and fine-tuned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A: 2 A:1X A:2 A:2v p:0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='471 p:0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='564 p:0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='64 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0170 3 Multimodal architectures The pre-training tasks comprise masked-multi-modal modelling and multi- modal alignment prediction performed on the Conceptual Captions dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' That dataset contains about 3,1 million usable aligned image-caption pairs, which have been automatically scraped from web images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the alignment task, the authors create unaligned images by randomly mismatching captions and images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For the masking task, 15% of the both the visual and language tokens are masked.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The task is to reconstruct the mask from the remaining input in a classical Bert fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' While the text masks are directly regressed like in Bert, the model predicts distributions over semantic classes for the image regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is achieved through minimizing the KL divergence, a measure for the similarity of distributions, between the output distribution of the pre-trained model used in feature extraction and the VilBert predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The performance results are depicted in figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='72: Lu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2019b): VilBert Performance As mentioned before, the dual stream architecture outperforms the single stream architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore, pre-training considerably boosts perfor- mance, as does fine-tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Interestingly, the authors also study the effect of the size of the dataset and effect of the architecture depth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Performance increases monotonically with dataset size, suggesting that performance can be further improved with more data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The results on the optimal layer depth are task dependent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VQA and Image Retrieval reach peak performance at 6 layers, where a layer denotes a repeatable block as depicted in figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Zero Shot Image retrieval greatly benefits from even deeper depth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, the VCR and RefCOCO+ tasks seemingly benefit from shallower models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The VQA task is based on the VQA 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0 dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Each image must be matched to one of ten answers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Hence, the VQA task is not open-ended, but treated like a classification task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To achieve that, the model is amended by two MLP layers which use the element-wise product of the model-generated img and cls tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The VCR task is also posed as a multiple choice problem with images from movie scenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To fine-tune for the task, questions and answers are concatenated into four different text input and given as model input together Table 1: Transfer task results for our ViLBERT model compared with existing state-of-the-art and sensible architectural ablations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' + indicates models without pretraining on Conceptual Captions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' For VCR and VQA which have private test sets, we report test results (in parentheses) only for our full model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Our full ViLBERT model outperforms task-specific state-of-the-art models across all tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' VQA VCR 25 RefCOCO+ [32] Image Retrieval [26 ZS Image Retrieval Method test-dev (test-std) Q→A QA→R Q→AR val testA testB R1 R5 R10 R1 R5 R10 DFAF B4 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='22 (70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='34) R2C 25 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8 (65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1) 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='2 (67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3) 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='1 (44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0) MAttNet 33 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='33 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='62 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='02 SCAN B5 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='60 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='70 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='20 Single-Streamt 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='90 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='15 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='89 47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='27 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='64 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='02 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='04 Single-Stream 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='85 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='09 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='93 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='73 T 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='21 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='32 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='02 ViLBERTt 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='93 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='26 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='01 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='48 68.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='61 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='97 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='44 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='50 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='78 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='00 ViLBERT 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='55 (70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='92) 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='42 (73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3) 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='47 (74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='0) 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='04 (54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='8) 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='34 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='52 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='61 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='20 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='90 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='52 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='86 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='12 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='803.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Models for both modalities 171 with the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In the end, four scores are generated accordingly and selected through softmax.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The RefCoCO+ task is a grounding task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' An image region has to be selected according to a natural language reference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Caption-Based Image Retrieval requires the model to find an image that corresponds to a selected caption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The dataset used is the Flickr30k dataset which contains 30 000 pictures with five captions that are of higher quality than the automatically generated captions from web data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='3 Flamingo The VilBert model showed one way how to actually combine visual and language inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In contrast, data2vec showed how to design an unsupervised model and how influential the actual training process as well as contextualization can be.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A natural question to ask is then is whether we can build a truly multimodal architecture like VilBert that is self-supervised like data2vec or at little task- specific training and how to optimized its training procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' In particular, both VilBert and data2vec were tested on multiple tasks, but each task needs slight re-adjustments to the model as well as additional fine-tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Ideally, a multimodal architecture would not only be efficient in its initial training, but also easily adaptable to different tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finding ways to not only work with different input modalities, but also with different task is crucial towards building a more general AI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A promising approach in that direction is few shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The following section presents Flamingo (Alayrac et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022), a few shot multimodal architecture developed by Google which comprises key innovations such as handling arbitrarily interleaved vislang sequences as inputs, as well as ways to effectively combine pre-trained vision-only and language-only models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As such, it is a visually conditioned autoregressive text generation model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='73 demonstrates Flamingos capabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' It can function as chat bot, describe pictures, work with image sequences (videos) and in doing so, simply needs a few prompts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' At the heart of the model is a large language model, Chinchilla (Hoffmann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2022), with 70B parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Large language models such as GPT-3 (Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2020), as their name suggests, can be trained on a large amount of text data which gives them impressive text generative capabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' However, multimodal generative modelling presents some specific challenges not present in language-only modelling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' First of all, training large language models is expensive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Hence, it is paramount to work with a pre-trained version, but trying to teach a large language model the means to work with visual inputs, as well, has the potential to deteriorate or destabilize the pre-trained model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Second, large language models can suffer from memory constraints that are potentially severely aggravated by simply adding high-dimensional visual data into an input sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Third, good generalist capabilities typically require a huge amount of heterogeneous training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' There might not exist enough 172 3 Multimodal architectures FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='73: Alayrac et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022): Flamingo Prompt-Output-Examples labelled image-caption-pair data to successfully accomplish training a capable few shot learning model in the vision-and-language domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' To train Flamingo, the authors solve these challenges by foremost exploring ways to generate their own web-scraped multimodal data set similar to existing ones in the language-only domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Furthermore, they use a perceiver architecture (Jaegle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=', 2021a) that resamples inputs into a fixed amount of visual tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Finally, the self-attention layers of the language model are kept frozen during training while cross-attention layers are interleaved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A gating mechanism ensures that those new cross-attention layers do not interfere at model initialization, thereby improving stability and final performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='74 shows the fundamental architecture of Flamingo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' A pre-trained vision model as well as a pre-trained language model are frozen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Together they built the cornerstones of the model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The vision model is pre-trained using a contrastive text-image approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Its role is to extract features such as colour, shape, nature and the position of objects - typical semantic spatial features that one would use in querying.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The language model is an existing pre-trained language model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' On top of those frozen parts, the authors add a perceiver-resampler and gated cross-attention layers as learnable architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The perceiver-resampler turns the outputs of the vision model into a fix set of visual tokens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Those visual tokens are then used to create cross-attention layers which are interleaved into the frozen language model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' As a result, Flamingo pandas: 3 dogs: 2 giraffes: 4 I like reading ,myfavouriteplayis Dreams frommy ,myfavorite book is Hamlet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' I also like Father.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' What happens tothe man after hitting the he falls down.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' ball?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Answer: This is a picture of two teddy Pbears on themoon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' This is an apple with a sticker on it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' What are they doing?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' What does the sticker say?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' PThey are having a conversation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' PThe sticker says"iPod".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' What object are they using?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' What is the common thing Where is the photo taken?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' PItlooks likea computer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' about these three images?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" It looks like it's taken in a Is this surprising?" metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' PThey are allflamingos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' P backyard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' P- Yes, it is surprising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' What is the difference between Do you think it is printed or handwritten?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' Why is this picture surprising these three images?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' to you?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" PIt looks like it's handwritten." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' The first one is a cartoon, the I think it is surprising because second one is a real flamingo, What color is the sticker?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' teddy bears are not usually and the third one is a 3D model of a flamingo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=" P It's white." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' P found on the moon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' P3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='5 Models for both modalities 173 FIGURE 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content='74: Alayrac et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6tE4T4oBgHgl3EQfCAsz/content/2301.04856v1.pdf'} +page_content=' (2022): Flamingo Model Structure can model the likelihood of some text y interleaved with a sequence of images or videos x as p(y|x) = L � l=1 p(yl|y