diff --git "a/G9E0T4oBgHgl3EQfhgFS/content/tmp_files/load_file.txt" "b/G9E0T4oBgHgl3EQfhgFS/content/tmp_files/load_file.txt" new file mode 100644--- /dev/null +++ "b/G9E0T4oBgHgl3EQfhgFS/content/tmp_files/load_file.txt" @@ -0,0 +1,891 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf,len=890 +page_content='Myths and Legends in High-Performance Computing arXiv preprints ©The Author(s) 2023 Reprints and permission: sagepub.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='co.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='uk/journalsPermissions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='nav DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1177/ToBeAssigned www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='sagepub.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='com/ SAGE Satoshi Matsuoka1, Jens Domke1, Mohamed Wahib1, Aleksandr Drozd1, Torsten Hoefler2 Abstract In this humorous and thought provoking article, we discuss certain myths and legends that are folklore among members of the high-performance computing community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We collected those myths from conversations at conferences and meetings, product advertisements, papers, and other communications such as tweets, blogs, and news articles within (and beyond) our community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We believe they represent the zeitgeist of the current era of massive change, driven by the end of many scaling laws such as Dennard scaling and Moore’s law.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' While some laws end, new directions open up, such as algorithmic scaling or novel architecture research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, these myths are rarely based on scientific facts but often on some evidence or argumentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In fact, we believe that this is the very reason for the existence of many myths and why they cannot be answered clearly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' While it feels like there should be clear answers for each, some may remain endless philosophical debates such as the question whether Beethoven was better than Mozart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We would like to see our collection of myths as a discussion of possible new directions for research and industry investment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Keywords Quantum;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' zettascale;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' deep learning;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' clouds;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' HPC myths This manuscript is intended for the “CCDSC Special Issue”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Introduction Any human society has their myths and legends—this also applies to the high-performance computing (HPC) community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' HPC drives the largest and most powerful computers and latest computing and acceleration technologies forward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' One may think that it’s scientific reasoning all the way down in such an advanced field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Yet, we find many persistent myths revolving around trends of the moment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Since it’s late 2022, we started our analysis by asking the all-knowing intelligence ChatGPT “Create myths or legends in high performance computing”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In a HAL 9000 manner, it refused to make up something for us: “I’m sorry [Dave], but as an AI language model, I am not programmed to generate or share myths or legends.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' My primary function is to assist users with information and general knowledge, and I do not have the ability to create or share fictional content.”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' So, even the smartest of internet parrots (Bender et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2021) that was itself created with massive high-performace computation running on a large accelerator system still has a long way to go.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Thus, we fall back to reasoning among the authors of this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We discuss 12 of today’s HPC myths, a number customary in our community, similar to a panel statement where we debate supporting and contradicting facts with a healthy exaggeration in one of those directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We attempt to neither judge nor prove folklore right or wrong but instead try to stipulate an intensive discussion in the community that drives our future thinking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 1: Quantum Computing Will Take Over HPC!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Numerous articles are hyping the quantum computing revolution affecting nearly all aspects of life ranging from quantum artificial intelligence to even quantum gaming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The whole IT industry is following the quantum trend and conceives quickly growing expectations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The actual development of quantum technologies, algorithms, and use- cases is on a very different time-scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Most practitioners would not expect quantum computers to outperform classical computers within the next decade.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Yet, we have constantly been surprised by advances in device scaling as well as, more recently, artificial intelligence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Thus, the fear of missing out on getting rich is driving the industry to heavily invest into quantum technologies pushing the technology forward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' With all this investment, it seems reasonable to expect that quantum computation, which promises to deliver exponential speedups, will replace high-performance computation as we know it today with its meager linear speedup through parallelism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Yet, the nature of quantum computation poses some severe limitations: First, reading unstructured data into a quantum state seems very challenging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Reasonable future quantum computer designs can read in the order of Gigabit/s while modern single-chip processors are already achieving 1RIKEN Center for Computational Science, Japan 2Eidgen¨ossische Technische Hochschule Z¨urich, Switzerland Corresponding author: Torsten Hoefler, ETH Z¨urich, Inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Hochleistungsrechnersyst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', Universit¨atstrasse 6, 8092 Z¨urich, Switzerland Prepared using sagej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='cls [Version: 2017/01/17 v1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='20] arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='02432v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='DC] 6 Jan 2023 2 arXiv preprints Terabit/s—many orders of magnitude more (Hoefler et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Furthermore, once a quantum state is constructed, it can often be “used” only once because measurements destroy superposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' A second limitation stems from the lack of algorithms with high speedups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Most algorithms achieve quadratic speedups for a wide range of use-cases using amplitude amplification at their core.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' While this technique is extremely versatile and can search any unstructured quantum state (cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Grover’s algorithm), its limited speedup is unlikely to make it practical for quantum computers that may be constructed in the next decades (Hoefler et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Thus, it seems unlikely that quantum computation is going to replace a significant fraction of traditional HPC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' It is more likely that it will start as quantum acceleration with a small set of use-cases that may grow in the future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' To determine which use-cases can realistically benefit from quantum acceleration, resource estimation techniques (Beverland et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2022) become crucial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' But unlikely does not mean impossible— we believe that now is the right time to begin a discussion about the role of quantum computation in HPC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Furthermore, it is crucial to guide the resources we invest into the right directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We close with these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' x When will quantum computing be commercially profitable?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' y What will be the first useful algorithm?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' z What will be the next break-through area enabled by a new quantum algorithm?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 2: Everything Will Be Deep Learning!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Simultaneously with the quantum hype, we are in the midst of the deep learning revolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Indeed, in recent years there has been a plethora of papers replacing traditional simulation methods, or whole computational kernels with data-driven models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Most of those employ deep neural network architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Impressive results fire up expectations equally high to the quantum world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Data-driven weather and climate predictions apparently beat the best models (Pathak et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Bi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2022) and output data can be compressed by three orders of magnitude (Huang and Hoefler 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Similar successes are touted in literally any application area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' There is no doubt that deep learning models can learn to approximate complex functions used in scientific simulations in a specific input domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The issue is, as always, the trade- offs: between speed on one hand, and accuracy on the other— and we have to be very careful with these comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In fact, any result can be skewed into any of the extremes (Hoefler 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Sometimes even very simple models (and they have to be simple to be compute-performance competitive) such as multi- layer perceptrons (MLPs) can work well enough in place of exact mathematical expression, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', Rasp et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' (2018);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Brenowitz and Bretherton (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' One wonders sometimes whether the latter could have been simplified in the first place.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' A possible explanation is that neural nets, rather than learning to approximate a given function in some abstract sense, learn to decompose the input space into polyhedra with corresponding simple mappings (Aytekin 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In other words, neural nets can exploit the fact that typical input values in many tasks are concentrated in particular ranges, which, in turn, raises concerns about accuracy guarantees for out-of- distribution inputs, and a possibility of some sort of hybrid / fall-back mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' An independent question is whether the architectures used for machine learning tasks, like classification, are a good match to serve as surrogate models in the first place?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' A new line of research is addressing this by using neural architecture search for such models (Kasim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In an extreme case, the objective is to find a purely symbolic (and thus hopefully more robust to out-of-distribution inputs) formulation for cases where an exact mathematical expression for the problem is not a-priori known (Liu and Tegmark 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Uncertainty quantification and explainability are also two main aspects of high importance in the scientific domain where DL is lacking (due to its black-box optimization nature).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Overall the jury is still out as to which extent surrogate models can replace first-principles simulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, one thing is clear: there is a whole spectrum of simulation tasks (Lavin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2021)—ranging from ones where exact mathematical expressions are not available in the first place (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', contribution of specific vegetation to weather dynamics) and learning it from data could not only be more efficient but also more accurate;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' to those where utmost accuracy and precision guarantees are required and can only be provided by specialized error-controlling numerical methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We close with these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' x Will ML models replace or just augment traditional simulations?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' y Where will ML models fail to deliver?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' z How can we classify (pieces of) an application as ML- acceleratable or not?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 3: Extreme Specialization as Seen in Smartphones Will Push Supercomputers Beyond Moore’s Law!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' AI, like Stable Diffusion, is now in the palm of everyone’s hand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' These modern smartphones typically are driven by a System on Chip (SoC) that consists of a plethora of special function units (SFUs) and/or special purpose processors that accelerate various aspects of smartphone workloads.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The main purpose of such a composition is to achieve low power for longer battery life while maintaining acceptable performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The success of GPUs, growing demands for lower power and highest performance, and the end of Moore’s law created a myth that future supercomputer architectures will be just like smartphones in that there will be multitudes of hardware customization per each facet of the entire workload.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, such a claim misses the point in the analogy, and entirely ignores multiple drawbacks of such an approach as described below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In fact, the only successful “accelerator” in the recent history of HPC is a GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The primary reason for its success is high memory bandwidth, a feature known since the vector supercomputer days, which is now adopted by mainstream CPUs such as Fujitsu A64FX and Intel Sapphire Rapids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The reason for the acceleration is primarily that the majority of the HPC workloads are memory bandwidth bound (Domke et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Thus, modern Prepared using sagej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='cls Matsuoka, Domke, Wahib, Drozd, Hoefler 3 reincarnations of vector processors, such as vector units and fast memory with HBM/GDDR variants, have been sufficient to accelerate such workloads beyond CPUs with slower DDR memory (Matsuoka 2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' So, to claim that multitudes of special accelerators will constitute a supercomputers is stretching the success of GPUs somewhat unfoundedly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In fact, there are mainly three reasons why the plethora of customized accelerated hardware approach would fail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The first is the most important, in that acceleration via SoC integration of various SFU is largely to enable strong scaling at a compute node level, and will be subject to the limitations of the Amdahl’s law, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', reducing the time to solution, the potential speedup is bound by the ratio of accelerated and non-accelerable fractions of the algorithm, which quickly limits the speedup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Modern supercomputing is rather driven by weak scaling as explained by Gustafson (1988), where the speedup is based on how well the parallelizable or accelerable fraction can be scaled on many nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' This is often achieved by linearly increasing the overall workload and maintaining a constant amount of work per node, so the time to solution remains constant but performance gain is proportional to the number of nodes in an ideal case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' This was exactly how massive performance gain was obtained, despite skepticisms from the then experts, towards massively parallel computing, culminating in the first awarding of the Gordon Bell prize in 1987 (Bell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Combination of strong and weak scaling has been instrumental in utilizing massive parallelism and performance speedup in modern supercomputers such as Frontier and Fugaku, but the contribution of the latter has been greater in absolute speedup terms*.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Now, weak scaling to large number of nodes require that the workload can be subdivided to achieve extremely good load balancing, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', (amount of work) / (processing capability) is uniform among all nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' For homogeneous systems, if the workload domain is easily compostable, then simple uniform partitioning will suffice, and multitudes of studies have been conducted to achieve proper domain decomposition for more complex algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Such load balancing work can be readily be applied even for nodes that are composed of heterogeneous elements, provided that (a) the architecture of the nodes are largely uniform (homogeneous) across the entire machine, and (b) during execution, the codes will be running simultaneous on one of the processors within the node, all at the same time within the machine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Practically all successful ‘accelerated’ supercomputers and their applications, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', GPU machines such as Frontier, follow this pattern.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, once the nodes would be composed of plethora of customized hardware, and expected to be utilized in a more random, heterogeneous fashion as in a smartphone, load balancing becomes extremely difficult, and thus weak scaling speedup will flatten quickly, especially in a large parallel system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' There have been efforts to alleviate this by creating a task graph of the workload and conduct dynamic load balancing, but have not really achieved success for very large systems, let alone for numerous heterogeneous accelerators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' This is why, even for GPU-based machines, not only the node architectures are homogeneous, but also, in any given workload only GPUs or CPUs are used dominantly, but not typically both.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Contrastingly, that large parallel program decomposed into a smaller task/dataflow graph and executed on-demand basis heterogeneously on a plethora of accelerators is only largely beneficial for small programs on a small machine, but not for HPC where parallelism will continue to increase to exploit weak scaling The second reason is the increasing difficulty of dark silicon being available in the system to be utilized for heterogeneously specialized hardware, for cost reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In the past, dark silicon was projected to be abundant with reduced lithography, thus justifying the “plethora of accelerators” view, as they were available for very low cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, with the slowing down of Moore’s law, coupled with high cost of manufacturing due to more advanced fab technologies such as EUV, transistor cost over time is flattening, or may even increase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Thus, the chip cost will become largely proportional to the number of transistors irrespective of the lithography, so every transistor has to contribute to the overall performance improvements in a major fashion, turning dark silicon into expensive unused silicon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' For smartphones, the major cost of the phone is not the SoC but rather in the peripherals such as screen, camera, flash memory, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', and the battery life is premium in the cost metric so extra cost incurred by dark silicon may be tolerable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' For supercomputers, however, the major cost of the machine is the processors themselves, dominating over 50% of the overall CapEx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' So unless the acceleration could benefit some major proportion of the workload, dark silicon would become an intolerable waste.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' That is why, over generations, accelerators such as GPUs tend to become more general purpose to cover an increasing proportion of the workload, ultimately becoming general purpose as the CPUs (or, GPGPUs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The third reason is software and productivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Unless the accelerator usage is extremely easy, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', hidden under a set of very simple APIs, expecting the programmers to adopt an arcane programming model is not viable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In fact, this is more serious for HPCs where the market for applications is much smaller than major commodity ecosystems such as smartphones, with a less performance-conscious but extremely large market.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Thus, for example, a large consumer- oriented IT company such as Apple can afford to replace a part of its API for a phone with hardware because it will sell more than 100 million iPhones, but not for supercomputers that have a much narrower market and thus do not warrant such investment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We close with these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' x Will extreme heterogeneity happen?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' y Are supercom- puter workloads worth extreme specialization?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' z When will we have production supercomputers with more than one accelerator type?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ∗If one considers power efficiency for system scaling, massive weak scaling would not have been possible without dramatic increase in power/performance of compute nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, such improvements usually allow increase in the number of nodes and/or processor units, thus helping to push weak scaling;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' as such, in terms of algorithmic scalability, weak scaling is still the dominating factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Prepared using sagej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='cls 4 arXiv preprints Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Classification of Compute Kernels and Supercomputing Architecture Myth 4: Everything Will Run on Some Accelerator!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Related to our previous myth, even if one accepts that there will not be a plethora of accelerators, there could be a few such as GPUs or FPGAs, where the dominant portion of the workload will run.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Indeed, for GPU-based machines that would be an assumption, lest the extra investment will not make sense.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, one could question, would some superchip such as GPUs largely replace the CPUs, the latter be degraded to second class citizens?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' It is not trivial as it may seem, as such statements are rather dogmatic and not based on candid analysis of the workloads.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' By proper analysis of the workloads, we may find that CPUs may continue to play a dominant role, with accelerator being an important but less dominant sidekick.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' From the hardware perspective, workloads can be largely divided into three classes, (C) compute bound, (B) memory bandwidth bound, and (L) memory latency bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Any application will be composed of multiple compute kernels, each one being able to be largely classified into one of the three in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Over time, supercomputer architectures have evolved in an attempt to cover all three in effective ways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Up until the 90s, special-purpose vector machines such as Cray and NEC SX accelerated largely (B), and (C) to some extent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' This was largely due to the dominant workload that was CFD which was largely (B).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Then in the 90s the microprocessor evolution for HPC happened, utilizing the commodity one-chip CPUs which had become very powerful due to high end applications such as engineering and multimedia needs, starting with workstation/server RISC then later x86 processors in massively parallel fashion, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', DoE ASCI Red.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Individual processors were mediocre in performance but attained performance via massive parallelism, exercising weak-scaling, cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Then in the late 2000s, although achieving Petascale performance was pioneered with the DoE Roadrunner and Jaguar machines, there was an ambition to achieve exascale by the late 2010s, achieving 1000x scaling in performance in 10 years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The roadblock was power/performance using conventional CPUs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However by the late 2000s the GPUs were evolving from their graphics-specific purpose to become general purpose compute processors, as they were architectural descendents of classical vector processors Matsuoka (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Different from classical vectors were that the floating point performance had been significantly enhanced, motivated by graphical workloads, and when generalized, the GPUs were now covering (C) and (B), while (L) was left for CPUs as GPU vector pipeline had very long latency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' CPUs that facilitated SIMD vector units with high bandwidth memory such as the Intel Xeon Phi and Fujitsu A64FX brought in classical vector properties back into the CPUs, so in a sense homogeneous system composed of such chips were not direct reincarnations of simple commodity CPU based massively parallel machines, but rather, can be more regarded as converging the GPU and CPU properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Circa 2022, the top machines are either homogeneously configured heterogeneous CPU-GPU nodes, or ‘converged’ nodes such as RIKEN Fugaku or forthcoming machines with Intel Sapphire Rapids CPUs with HBM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, this is not the only possible combination, and other configurations have not been properly explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='. For example, one could conceive of a machine with the latter configuration, with purpose built matrix-based accelerators for compute intensive kernels as a separate chip (or chiplet).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In such a machine, the CPU would cover workloads (B) and (L), while the matrix accelerator will cover (C), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The benefit of such a machine would be ease of programming of (B) workloads which often involve complex memory access patterns, and thus porting to GPU codes has proven to be challenging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' For further acceleration of (L) workloads, there is a limit to acceleration, such as molecular dynamics that require strong scaling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The best strategy seen for such workloads is fully customized data pipelines such as Anton (Shaw et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2008) with hardware design time synthesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' One could almost mimic such customization with cost but make it programmable by FPGAs or CGRAs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Such dataflow customization could also be useful for compute bound workloads such as DL Transformers, if small matrix engines as special function units can be conjoined in a larger macro dataflow as seen in modern FPGAs and CGRA chips.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' As such, in such a machine, (B) will be covered by CPUs, while (C) and (L) will be covered by a ‘strong scaling accelerator’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' As we observe here, we find that we have not even covered the possible configurations of divergence/convergence of Prepared using sagej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='cls Matsuoka, Domke, Wahib, Drozd, Hoefler 5 processing units, as the only mainstream ‘accelerated’ machines are GPUs with the second property, while other design spaces have not been properly explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We close with these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' x Will CPUs become pure “servants” to the accelerators?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' y Are accelerators actually more than just better balanced processors?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' z Will reconfigurable accelerators see a renaissance?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 5: Reconfigurable Hardware Will Give You 100X Speedup!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In a “fool me once.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='..” fashion, one accelerator in particular has taken the HPC community by storm with lofty promises of 100x speedup (Lee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2010) ever since the first ported matrix-multiplication by Larsen and McAllister (2001).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Fueled by NVIDIA’s gross margin of over 50% (Macrotrends LLC 2022), and supported by billions of dollars from US DOE for ECP and similar programs in other parts of the world, the HPC community eventually migrated to a well designed and broadly adopted GPU/CUDA ecosystem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Consequently, 164 systems of the TOP500 list utilize accelerators from NVIDIA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Nearly two decades later, Fugaku has shown that it only took long vectors and high-bandwidth memory to match GPU performance and energy-efficiency for many workloads.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' One positive aspect is that that much code has been “modernized”, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', rewritten in CUDA or languages and frameworks promising portability to utilize new devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' But the open question is how portable are these modernized codes really?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Can they run seamlessly on all new devices?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The global FPGA market was recently valued at about one-third of the global GPU market (Allied Market Research 2020, 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Major chip vendors buying the leading FPGA hardware vendors, AMD acquired Xilinx and Intel bought Altera, respectively, indicate an interest for FPGA integration into future mainstream products.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, so far this has not materialized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Whether FPGA can replace or complement the mainstream GPUs in the HPC and data center market hinges on the questions regarding the cost-to-performance ratio, an existing software ecosystem, and most importantly the productivity of programmers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Unfortunately, we see hurdles in all these areas, which the community and industry might be able to solve with enough time and money.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Without offering at least a factor of 10x performance gain at moderate porting costs, “FPGAs are not a factor in our current planning, because of their unprogrammability” (Sorensen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The question whether reconfigurable logic can replace or ament GPUs as accelerators is interesting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' FPGAs will certainly have a harder time due to their high flexibility that comes at a cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Units built from reconfigurable logic are 10–20x less energy and performance efficient in silicon area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' This issue can be addressed by hardening certain blocks, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', floating point units, as some FPGA companies do.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, even then, the whole control path would be much less efficient and it is unclear whether program-driven execution is that much less efficient compared to reconfigurable dataflow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' A new line of reconfigurable accelerators as materialized in Xilinx’ adaptive compute acceleration platform are similar to coarse-grained reconfigurable arrays (CGRAs) and offer more programmable blocks with a configurable dataflow interconnect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' But if now 90% of the chip are hardened units, are those devices just GPUs with a less mature ecosystem?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We close with these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' x Will the HPC community embrace FPGAs as alternatives to GPUs in large-scale production systems?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' y Can the community afford a “Fool me twice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='..” moment?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' z Will CGRA-style reconfigurable dataflow accelerators take the place of FPGAs to compete?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 6: We Will Soon Run at Zettascale!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Maybe FPGAs are the way to zettascale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' With Aurora still under construction, Intel ignited the debate about zettascale in late 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' While the HPC community initially smirked at their plans, Intel continued pushing the zettascale agenda, culminating in the latest claims to achieve 1 zettaflop/s by the end of the decade (Cutress 2022a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' This proposition needs to be addressed, and we try to put their claims into perspective and predict a realistic timeline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Obviously, we cannot rule out that Intel has a secret, revolutionary technology which they plan to commercialize in due time, however let us not speculate now and instead build on publicly available data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' But first we have to distinguish the terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We assume in the following, that (1) “zettaflop system” refers to any computer capable of achieving over 1021 double- precision floating-point operations (“FP64”) per second on the Linpack benchmark;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' (2) “zettaop system” refers to any computer theoretically capable of performing 1021 operations† per second, and (3) “zettascale system” denotes any computer executing a scientific application with a sustained performance of over 1 zettaflop/s in fp64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Before we extrapolate, we look at historical trends by Strohmaier et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The HPC community achieved 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='068 teraflop/s with Sandia/IBM’s ASCI Red in summer 1997, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='026 petaflop/s with Los Alamos/IBM’s Roadrunner in summer 2008, and achieved (unofficially) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='05 exaflop/s in spring of 2021 with China’s OceanLight system and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1 exaflop/s with OakRidge/HPE’s Frontier in summer 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Not only do 11 and 13 years lie in between these achievements, respectively, but also multiple megawatt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ASCI Red consumed “only” 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='850 MW, Roadrunner increased that to 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='35 MW, and OceanLight and Frontier now consume 35 MW and 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1 MW, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' This and Figure 2 show that the energy efficiency of modern chips cannot keep up with the demand for increasing compute.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Back to Intel claiming to manage 2x performance improvements year-over-year which would yield zettaflop/s by 2032—but at a power requirement of the entire system of 50–100 MW (Cutress 2022b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Hence, this 1,000x in performance comes at the cost of 3–5x in power;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' and reformulated: the energy efficiency to perform fp64 operations needs to increase by 200–350x, from ≈50 to over 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='000 Gflop/s Watt .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Even under idealized conditions and using Frontier’s Rpeak as baseline, this goal requires a †An exact and consistent definition of “operation” in this context is still debated in the HPC community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Prepared using sagej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='cls 6 arXiv preprints Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Historical fp64 power efficiency [in Gflop/s Watt ] extrapolated until 2038 to put Intel’s zettaflop/s claims into perspective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 125x improvement in 10 years, and all of that while other big players slowly acknowledge the end of practical silicon scaling laws (White 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' If we believe the IEEE IRDS™ (2021) roadmap, we might gain 5x in power density (optimistically rounded from 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='27x) by 2034 at 7 ˚A compared to 5 nm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' This leaves 25x, which we could split into 5x from increased transistor count per chip and 5x from increased node count per system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Can we cool the former, yes (Wu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2021), and can we interconnect the latter?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Sure, but doing so, at 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='5 GW, comes down to the will to invest more than anything else, and without revolutions in memory and interconnect technologies, we might see Linpack transition into memory- or I/O-bound territory, nullifying any computational advances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' On the other hand, a zettaop/s system at 100 MW in 2032 is far more likely, since low-precision units (such as tensor cores) can boost the op/s Watt metric, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', currently fp16 tensor cores demonstrate an 8x advantage over fp64 vector units.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Lowering the precision further from fp16 to 3-bit operands could allow for another 5x improvement (Frantar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2022), but only if the industry (and HPC community) sees the need for adding these low-precision units, as we discuss in Myth 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Considering the above, our more realistic, yet optimistic, timeline for zetta is zettaop/s in 2032 at 50 MW, zettaflop/s in 2037 at 200 MW, and zettascale by 2038.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Can Intel or anybody else pull it off before then?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Only time will tell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We close with these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' x Will we reach zettaflop/s performance or will fp64 lose relevance before?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' y Will we continue to build more power-hungry supercomputers as we did in the past?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' z Which one will happen first: zettascale, practical quantum advantage, or all internal combustion-based engines cease to be produced?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 7: Next-Generation Systems Need More Memory per Core!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Before, on the road to peta- and exascale, application scientists continuously raised alarms that the memory per core is decreasing with each new computer generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' This was mainly due to the quick growth in the number of cores while the performance per core was stagnating.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Yet, many workloads can keep those cores utilized with a relatively small working set while staging larger amounts of data remotely and/or recomputing parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Much of this large memory requirement seemingly turns out to be legacy and somewhat wasteful design from times where memory space was abundant compared to other resources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Simplistic arguments along the lines of “we need more of X” seem to have a solid tradition in our community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' For example, the HPC community spent the first decades to hunt more floating point computations per second.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Recently, a demand for larger and faster memory replaced this main goal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The community nearly made a complete 360-degree turn, with Haus (2021) saying “computation is free” and Ivanov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' (2021) showing “data movement is all you need”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Some even argue that this turn was taken too late due to the fixation on flop/s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' While this was all true at the time, the general discussion should really be about the intricate relation between the application requirements and the system capabilities in terms of balance, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', ratio between the different resources such as memory size/bandwidth and compute (Czechowski et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2011).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' These ratios usually shift with chip technology and architectural choices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' For example, Moore’s law drove the costs for compute on chip down over decades but off-chip communication was limited by Rent’s rule.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' This led to the recent data movement crisis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Newly emerging optical off- chip connectivity, see Myth 8, as well as 3D integrated memory (Domke et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2022) shifts the balance again and may alleviate many of these aspects, at least at the scale of a single chip.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' It seems key to understand the malleability of application, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', which resources can be traded for which other resources (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', memory capacity for computation bandwidth using recomputation or caching as techniques).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Prepared using sagej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='cls Matsuoka, Domke, Wahib, Drozd, Hoefler 7 Here, specifically I/O complexity analysis is a tool to deeply understand this trade-off.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Once all trade-offs are understood, requirements models (Calotoiu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2018) could be used to fix trade-offs into designs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' These models could then inform architectural choices as well as hardware developments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' One area to highlight in this context is embedded design where such trade-offs have long been used to build real systems due to resource scarcity (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', battery).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' While those designs were initially limited to very narrow application domains (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', radio signal, audio, or video processing), embedded devices have recently been expanded towards more diverse workloads (“apps”).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We believe that HPC can learn from this field by defining clear system design methodologies based on a solid combination of empirical and analytical modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' More particularly, systems design in HPC can benefit from the embedded systems doctrine of accounting for over-engineering just as one accounts for under-engineering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We close with these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' x When will the current “data movement” focus end?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' y What will be the next bottleneck resource?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' z Will our community be able to adopt a performance modeling discipline to discuss bottlenecks scientifically?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 8: Everything Will Be Disaggregated!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' To stop the waste of memory resources, the academic com- munity is advancing on the Silicon Photonics front (Gonzalez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2022) and industry is pursuing scale-out technologies (Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2022), such as Compute Express LinkTM (CXL), a cache-coherent interconnect for data centers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' But a few players seem to push the idea over the edge with their plans to disaggregate everything (NTT R&D 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Shan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' As Gonzalez et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' (2022) stated: “An optical interconnect is more appealing than an electrical interconnect for memory disaggregation due to three properties: its (1) high bandwidth density significantly reduces the number of IO lanes, (2) power consumption and crosstalk do not increase with distance, and (3) propagation loss is low.” However, several barriers remain before we can fully replace copper- based interconnects in our supercomputers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Generally, we see two remaining challenges for a broad adoption of Silicon Photonics and all-optical interconnects: low-cost manufacturing and optical switching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The former is obvious, because after all, the data center and HPC community relies on inexpensive components to optimize the overall system performance-to-cost ratio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The latter challenge is less obvious for the uninitiated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Current electrically switched networks can operate in “packet switching” mode to effectively lower the observable latency and utilize the available link bandwidth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The alternative to this mode is “circuit-switching” and it was abandoned by the HPC community long ago in favor of packet-switching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, without (cost-)effective means to buffer light, process photon headers in-flight, or reverting to electric switches with expensive optical-electrical-optical conversions, we would have to resort to circuit-switching (Bergman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2022) with all the inherent deficiencies: complex traffic steering calculations, switching delays, latency increase due to lack of available paths, under-utilization of links, just to name some.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' For HPC, an extensive or extreme disaggregation yields another challenge, specifically the speed of light.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Photons travel at a maximum speed of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='3 ns/m in hollow fibers (or slower in other transport media).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' This is equivalent to a level-2 cache access of a modern CPU, but does not yet include the disaggregation overhead, such as from the CXL protocol itself, switching, or optical-electrical conversions at the endpoints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' At 3–4 m distance, the photon travel time alone exceeds the first-word access latency of modern DDR memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Therefore, if main memory would be disaggregated beyond rack boundaries, it will become noticeable for memory- latency sensitive applications, cf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The more sensible solution, in line with Myth 7, for future HPC systems are smaller node-local memory configurations (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', HBM3) paired with rack-local, CXL-based memory pools if the capacity- and performance-to-cost ratios of the memory pool plus required interconnect can outperform node-local SSD solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We close with these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' x Will CXL be deployed widely in HPC?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' y Will large- scale supercomputers be disaggregated beyond rack- scale?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' z Should we disaggregate main memory?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 9: Applications Continue to Improve, Even on Stagnating Hardware!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Modernizing hardware, with Silicon Photonics, Tensor Cores, or simply shrinking transistors, has too long been the primary method of accelerating legacy software.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' More than half of this improvement was based on Moore’s law and its observation that transistors will continue to become smaller every few years (originally 18 months).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The remaining hardware improvements came from architectural innovations, such as deeper cache hierarchies, the migration to more specialized architectures (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', GPUs), or the utilization of larger and wider vector-units (SIMD), as well as scaling the HPC systems up by giving them more processors and cores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Unfortunately, we are no longer seeing the consistent technology scaling that Moore observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Consequently, in the so-called Post-Moore era, the “performance road” forks three-ways, yielding the following options: (1) architectural innovations will attempt to close the performance gap, and an explosion of diverging architectures tailored for specific science domains will emerge, or (2) alternative materials and technologies (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', non-CMOS technologies) that allow the spirit of Moore’s law to continue for a foreseeable future, or (3) we abandon the von-Neumann paradigm together and move to a neuromorphic or quantum-like computer (which, in time, might or might not become practical as discussed in Myth 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' One major aspect that reflects the uncertainty about the future is the initiatives of unprecedented scale: CHIPS act in the US and similar initiatives in other countries in the order of 100s Billion USD, quantum computing initiatives in the order of 10s Billion USD, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' But one point that is often overlooked is that algorithmic improvements in HPC (dubbed as “Algorithmic Moore’s Law” by Keyes (2022)) have over time provided exponential improvement in key areas of HPC, see Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Similar reports attribute a significant portion of the performance Prepared using sagej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='cls ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='arXiv preprints ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='higher ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='order AMR ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='10000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='100000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1000000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='10000000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='100000000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1980 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1990 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2010 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2020 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='Effective Sustained Speedup ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content="Algorithmic Moore's Law Examples " metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='101 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='102 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='103 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='104 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='105 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='106 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='107 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='108 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='Sustained Speed in Gflop/s ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='Combustion Simulation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='(Complex Kinetics) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='Combustion Simulation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='(CFD) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='COSMO Climate Model ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='Fusion Energy Simulation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='(Global MHD) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='Moore’s Law ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='Fusion Energy Simulation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='(Micro-turbulence) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='improved ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='linear solver ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='ARK integrator ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='complex chem ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='AMR ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='semi-implicit ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='high-order ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='elements ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='gyro- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='kinetics ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='delta-f,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' magnetic coordinates improved electron models low Mach auto-code high order improved explicit/implicit solvers Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Examples of “Algorithmic Moore’s Law” for different areas in HPC;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Fusion energy and combustion simulations data by Keyes (2022) and climate simulation data by Schulthess (2016) improvement in many legacy codes to be from numerical solvers, algorithms, low-precision numerics, system software, etc Schulthess (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, we have to be cautious that— just as hardware improvements have physics and engineering limits—the “Algorithmic Moore’s Law” also has its own limits: numerical stability, hitting asymptotic limits, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' That being said, those limits might not be as clear and quantifiable as the limits on hardware.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' That is since even if one numerical method hits its limit, domain experts can often reduce/pre- condition their problem to another numerical method that is more efficient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We close with these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' x As the performance improvements from hardware technologies drop, should the HPC community dramat- ically increase the investment in software?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' y Will the “Algorithmic Moore’s Law” end soon as well?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' z To what extent is the HPC community willing to refactor/rewrite legacy codebases when/if hardware stagnates?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 10: Fortran Is Dead, Long Live the DSL!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Applications might have limits, but what about languages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' How often have we heard “Fortran is dead, long live X”?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Slogans like this have been resonating in the community for nearly 40 years (Post 1982).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' X has been everything from C to C++, and more recently Python or Domain-Specific Languages (DSLs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Yet, Fortran remains in wide use in important communities such as weather and climate even for newly written codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Other languages, such as COBOL were indeed replaced with more modern alternatives such as Java.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Why is this?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Are some parts of our community just stubborn to follow the youngsters?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Or are old languages not necessarily bad for the task?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Indeed, Fortran is a very well designed language for its purpose of expressing mathematical programs at highest performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' It seems hard to replace it with C or other languages and outperform it or even achieve the same baseline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' This may be due to the highly optimized Fortran compilers or the limited language features (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', no pointer aliasing) that enable more powerful optimizations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Fortran and other general-purpose languages remain competitive with many DSLs on CPUs (Ben-Nun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2022) and are recently also adopted to GPUs, albeit often less elegant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' General-purpose portability approaches such as SYCL (Keryell et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2015), also powering Intel’s oneAPI, or OpenMP provide flexibility as well as some portability across devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' High-productivity general-purpose languages are hard to accelerate in practice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' For example, Python’s flexibility (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', monkey patching and flexible typing) disables many static optimizations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, when restricting the syntax to high-performance Python (much of NumPy), then optimizations become simpler (Ziogas et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Any language becomes more complex over time—Fortran 66 evolved into the complex Fortran 2018 language standard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Similar trends affect DSLs that are widening their scope over time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Do we require this generality?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' If yes, then DSLs are doomed to fail or they morph into general-purpose languages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Another argument is that the lower levels usually remain C/C++ and programmers interested in highest performance are often happy to dig into the lower levels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Then the question remains—where should the portability layer be located?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' At a (virtualized) Instruction Set Architecture (ISA) as in LLVM’s IR (Lattner and Adve 2004), some lower-level language such as C/C++ as in SYCL/oneAPI, or even dataflow graph representations as in DaCe (Ben-Nun et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2019)?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We close with these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' x When will programmers stop using Fortran for new applications?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' y Will we ever have more application codes written in DSLs than general-purpose languages?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' z What will be the next big DSL?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 11: HPC Will Pivot to Low or Mixed Precision!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' A high-performance language is nothing without proper data types, but high-precision operations such as fp64 come at a significant cost in terms of silicon area, energy and speed, according to Myth 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Lowering this precision can save costs Prepared using sagej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='cls Matsuoka, Domke, Wahib, Drozd, Hoefler 9 but may reduce accuracy of the results and, in the worst case, break the application (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', convergence).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' But there is more to this trade-off: what if a more clever implementation could maintain convergence properties of high precision numerics, while enjoying computational efficiency of low precision?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' One common trick is using mixed precision on the algorithmic level, for example, using low precision for individual particles and only using high precision for aggregated values (Kutzner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Some processors offer mixed precision tricks at the hardware level in the form of instructions with low precision inputs but higher precision accumulations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' There is however more to reduced precision than using fewer bits—the question is how to optimally distribute bits between mantissa and exponent (Tesla, Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 2021), or even if to use an entirely different (not IEEE-754) way to represent numbers (Gustafson and Yonemoto 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The story of reduced precision in AI hardware is quite telling: In early days of the field, predominantly the IEEE fp32 format was used, but knowing that in deep neural nets the weights and activations are typically distributed on a small range of values, researchers began to explore the fp16 format.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Soon the Pascal generation of GPUs with fp16 performance—at a factor of two compared to fp32 was released—and the magic did not happen by itself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Exploding and vanishing gradients, outlier weights, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', made training large deep neural nets require extra effort to stabilize (incurring corresponding overhead) or just did not converge at all.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The next generation of devices came with bfloat16 format—same 16 bits, but more bits allocated to range, less for precision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' It worked better, but still once in a while a model would collapse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Finally, the recent generation of GPUs came with a 19-bit numeric format, misleadingly called TensorFloat-32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' So far it seems to be at the sweet spot for artificial intelligence workloads—allowing for noticeably faster arithmetics than fp32, while maintaining enough numeric stability for the models to reliably converge without extra programming effort.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Now that mixed precision is a de-facto standard in the AI domain, more hardware support is being implemented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' So far there is no general clarity on the limits—how few bits can we get away with in different HPC areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The following factors in particular are important to consider as we move forward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' A fully transparent solution for the problem is to simulate higher precision using low precision operations, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', as shown by Ootomo and Yokota (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Our Myth 4’s memory-bound problems in particular are good candidates for exploiting “simulated” high precision, since the overhead can be masked by data transfers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' It is not clear however if this incurred overhead is an acceptable price that HPC agrees to pay for remaining in higher precision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' A less transparent method is to approach the problem as precision auto-tuning task by adapting the precision to a minimum while bounding the error, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=', as demonstrated by Menon et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' One main limitation of that method is the reliance on automatic differentiation (AD) to track error propagation, which is not practical for large codebases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Finally, the least transparent approach requires domain experts in HPC to study the numerical stability of solvers to identify, on a case-by-case basis, the susceptibility of solvers to lower/mixed precision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' While this approach is viable for solvers that are wrapped in libraries to be consumed by HPC domain experts, it is unclear whether domain experts writing their own solvers (common in HPC) would be willing to take on this burden.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We close with these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' x Is the HPC community ready (or already late?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=') to react to the new low precision formats driven by deep learning?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' y Will HPC navigate itself into a high-precision niche?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' z When, if ever, will the industry drop fp64 support?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Myth 12: All HPC Will Be Subsumed by the Clouds!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The rapidly advancing AI and new precision options has reignited the cloud discussion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The question whether clouds will subsume supercomputing has been ongoing for more than a decade, since the late 2000s Deelman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' (2008), but remains inconclusive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Today’s cloud offerings offer a wide spectrum for HPC customers, ranging from low-cost standard virtual machines to specialized top-gear HPC equipment in the cloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' It is not surprising that cloud providers offer exactly the same performance as on-prem supercomputing centers in practice De Sensi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' After all, they simply buy the same hardware!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Thus, this discussion is more of a fiscal argument with an interesting economy-of-scale twist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' There are actually bi-directional aspects to the cloud-vs- supercomputer argument.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' One is the so-called “cloudification of supercomputers”, and the latter being “supercomputifica- tion of clouds”, but they often get mixed-up leading to the confusions in the discussions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We must look at both aspects, and it is in fact the latter where such subsumption may happen or not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The former, “cloudification of supercomputers”, is an unmistakable trend, in that various software stack features and APIs are added so that supercomputers effectively become high end compute resources in the same manner as commercial clouds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Indeed, many major supercomputers are already facilitating cloud features, so that they are effectively clouds themselves, and interoperable with commercial clouds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, this assumes that there is already a supercomputing resource facilitated for themselves, and does not directly affect the subsumption argument.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The latter, or “supercomputification of clouds”, is where subsumption may happen, in that clouds nowadays can support features as well as performances of dedicated supercomputers directly, such that they are directly amenable as their replacement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Certainly, there are now multiple cloud services that facilitate virtual compute clusters in the cloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, although Intersect 360 reports that HPC-in-the-cloud CAGR has been dramatic, over 80% in 2021 Intersect360 Research (2022), it also reports the overall high growth in the HPC market, especially in the high end, and also projects that, the growth in the cloud HPC market will flatten over time to be consistent with the overall HPC industry growth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Continued investments by all major global regions in exascale machines and beyond, coupled with companies facilitating their own top-ranked machines, will likely continue to fuel the on-prem infrastructure growth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In fact, for enterprise IT infrastructures, there has always been a swing between on-prem and public clouds, largely Prepared using sagej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='cls 10 arXiv preprints driven by economics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' While standing up comprehensive internal IT has become less attractive with multitudes of cloud services readily available in the cloud, so the CAPX for clouds would be cheaper, especially for small enterprises and startups, for large enterprises there is a tendency to move back to on-prem infrastructures, as the OPEX of clouds could be expensive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The same could be the case of HPC increasingly as the whole field would pose continuous uprisings in economic viability for industry and societal benefits, thus being driven by economic metrics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' However, the variant of the subsumption scenario is that, although on-prem supercomputers continue to exist, processors and other hardware developments will be largely driven by enterprise HPC needs, currently dominated by AI / deep learning workloads.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The R&D expenditures of hyperscalers in IT now outclass the government investments, and increasingly the hyperscalers are investing in high end computing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' If the commercial cloud hyperscalers can work out the scale of economy in their own hardware manufacturing to the extent that,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' it could build and operate large scale HPC infrastructures cheaper than on-prem supercomputers of any size,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' then the swing could totally happen towards full subsumption— although somewhat unlikely,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' this could compromise the ability to cover some of the traditional HPC workloads that do not meet main industrial needs,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' such as the requirement for dense 64 bit linear algebra capabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' We close with these questions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' x What could be a defining development to decide between cloud and on-prem HPC?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' y When will more than half of the HPC cycles be spent in the cloud?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' z Will on-prem systems be a niche or remain with a significant fraction of HPC cycles spent?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Conclusions Many myths shape the discussions in the HPC community today—in this work, we debate some of those and hope to stir up arguments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' While we present them in an exaggerated and humorous way, many of those myths form the core of thinking in our community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Some may be more divisive than others but it seems that many are hard to answer definitively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Maybe some points will settle in the future while others will not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Yet, their sheer importance mandates a serious treatment in order to help guide future directions for academic research but also industry and government investment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' References Allied Market Research (2020) Graphic Processing Unit (GPU) Market by Type (Dedicated, Integrated, and Hybrid), Device (Computer, Tablet, Smartphone, Gaming Console, Television, and Others), Industry Vertical (Electronics, IT & Telecommu- nication, Defense & Intelligence, Media & Entertainment, and Others): Global Opportunity Analysis and Industry Forecast, 2020-2027.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='alliedmarketresearch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' com/graphic-processing-unit-market.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Allied Market Research (2022) Field Programmable Gate Array Market by Technology (EEPROM, Antifuse, SRAM, Flash, and Others (EPROM and PROM)), Application (Data Processing, Consumer Electronics, Industrial, Mil- itary & Aerospace, Automotive, Telecom, and Others), and Type (High-end, Mid-end, and Low-end FPGA): Global Opportunity Analysis and Industry Forecast, 2021- 2030.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='alliedmarketresearch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='com/ field-programmable-gate-array-market.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Aytekin C (2022) Neural Networks are Decision Trees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https: //arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='org/abs/2210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='05189.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Bell G, Bailey DH, Dongarra J, Karp AH and Walsh K (2017) A look back on 30 years of the Gordon Bell Prize.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' The International Journal of High Performance Computing Applications 31(6): 469–484.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1177/1094342017738610.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Ben-Nun T, de Fine Licht J, Ziogas AN, Schneider T and Hoefler T (2019) Stateful Dataflow Multigraphs: A Data-Centric Model for Performance Portability on Heterogeneous Architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis, SC ’19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' New York, NY, USA: Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ISBN 978-1-4503-6229-0, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1145/ 3295500.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='3356173.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Ben-Nun T, Groner L, Deconinck F, Wicky T, Davis E, Dahm J, Elbert OD, George R, McGibbon J, Tr¨umper L, Wu E, Fuhrer O, Schulthess T and Hoefler T (2022) Productive Performance Engineering for Weather and Climate Modeling with Python.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: Proceedings of the International Conference on High Performance Computing, Networking, Storage and Analysis, SC ’22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' IEEE Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ISBN 9784665454445, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='5555/3571885.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='3571982.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Bender EM, Gebru T, McMillan-Major A and Shmitchell S (2021) On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency, FAccT ’21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' New York, NY, USA: Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ISBN 978-1-4503-8309-7, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 610–623.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1145/3442188.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 3445922.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Bergman K, Beausoleil R and Milojicic D (2022) Silicon Photonics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Computer 55(04): 78–81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1109/MC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='3148491.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Beverland ME, Murali P, Troyer M, Svore KM, Hoefler T, Kliuchnikov V, Low GH, Soeken M, Sundaram A and Vaschillo A (2022) Assessing requirements to scale to practical quantum advantage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='48550/ arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='07629.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Bi K, Xie L, Zhang H, Chen X, Gu X and Tian Q (2022) Pangu- Weather: A 3D High-Resolution Model for Fast and Accurate Global Weather Forecast.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='org/abs/ 2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='02556.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Brenowitz ND and Bretherton CS (2018) Prognostic Validation of a Neural Network Unified Physics Parameterization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Geophysical Research Letters 45(12): 6289–6298.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 1029/2018GL078510.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Calotoiu A, Graf A, Hoefler T, Lorenz D, Rinke S and Wolf F (2018) Lightweight Requirements Engineering for Exascale Co-design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: IEEE International Conference on Cluster Computing, CLUSTER 2018, Belfast, UK, September 10-13, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' IEEE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ISBN 978-1-5386-8319-4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 201–211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1109/CLUSTER.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='00038.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Cutress I (2022a) Interview with Intel’s Raja Koduri: Zettascale or ZettaFLOP?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Metaverse what?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https: //www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='anandtech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='com/show/17298/interview- with-intels-raja-koduri-zettascale-or- Prepared using sagej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='cls Matsuoka, Domke, Wahib, Drozd, Hoefler 11 zettaflop-metaverse-what.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Cutress I (2022b) The key to Intel’s ZF (ZettaFLOP, 1ZF FP64 system), is the power.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Trying to keep within the 40-100 MW per Supercomputer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://twitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='com/IanCutress/ status/1599452910743478273.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Czechowski K, Battaglino C, McClanahan C, Chandramowlishwaran A and Vuduc R (2011) Balance Principles for Algorithm- Architecture Co-Design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: Proceedings of the 3rd USENIX Conference on Hot Topic in Parallelism, HotPar’11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' USA: USENIX Association, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' De Sensi D, De Matteis T, Taranov K, Di Girolamo S, Rahn T and Hoefler T (2022) Noise in the Clouds: Influence of Network Performance Variability on Application Scalability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ACM Meas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Anal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Syst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 6(3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1145/3570609.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Deelman E, Singh G, Livny M, Berriman B and Good J (2008) The cost of doing science on the cloud: The Montage example.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: SC ’08: Proceedings of the 2008 ACM/IEEE Conference on Supercomputing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 1–12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1109/SC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='5217932.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Domke J, Vatai E, Drozd A, Peng C, Oyama Y, Zhang L, Salaria S, Mukunoki D, Podobas A, Wahib M and Matsuoka S (2021) Matrix Engines for High Performance Computing: A Paragon of Performance or Grasping at Straws?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: 2021 IEEE International Parallel and Distributed Processing Symposium, IPDPS 2021, Portland, Oregon, USA, May 17-21, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Portland, Oregon, USA: IEEE Press, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 1056–1065.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1109/IPDPS49936.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='00114.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Domke J, Vatai E, Gerofi B, Kodama Y, Wahib M, Podobas A, Mittal S, Peric`as M, Zhang L, Chen P, Drozd A and Matsuoka S (2022) At the Locus of Performance: A Case Study in Enhancing CPUs with Copious 3D-Stacked Cache.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https: //arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='org/abs/2204.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='02235.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Frantar E, Ashkboos S, Hoefler T and Alistarh D (2022) GPTQ: Accurate Post-Training Quantization for Generative Pre- trained Transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='org/abs/2210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 17323.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Gonzalez J, G Palma M, Hattink M, Rubio-Noriega R, Orosa L, Mutlu O, Bergman K and Azevedo R (2022) Optically Connected Memory for Disaggregated Data Centers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Journal of Parallel and Distributed Computing 163(C): 300–312.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1016/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='jpdc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Gustafson JL (1988) Reevaluating Amdahl’s Law.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Communications of the ACM 31(5): 532–533.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1145/42411.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='42415.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Gustafson JL and Yonemoto IT (2017) Beating Floating Point at its Own Game: Posit Arithmetic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Supercomputing Frontiers and Innovations 4(2): 71–86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='14529/jsfi170206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Haus UU (2021) The Brave New World of Exascale Computing: Computation Is Free, Data Movement Is Not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://minoa-itn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='fau.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='de/wp-content/ uploads/2021/03/TRR154-MINOA-20210303.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Hoefler T (2022) Benchmarking Data Science: 12 Ways to Lie With Statistics and Performance on Parallel Computers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Computer 55(8): 49–56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1109/MC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='3152681.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Hoefler T, Haener T and Troyer M (2023) Disentangling Hype from Practicality: On Realistically Achieving Quantum Advantage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Communications of the ACM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Huang L and Hoefler T (2022) Compressing multidimensional weather and climate data into neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https:// arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='org/abs/2210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='12538.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' IEEE IRDS™ (2021) International Roadmap for Devices and Systems (IRDS™) 2021 Edition – Executive Summary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' IEEE IRDS™ Roadmap, IEEE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://irds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='ieee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='org/ images/files/pdf/2021/2021IRDS_ES.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Intersect360 Research (2022) Worldwide HPC and AI Training Market, 2021 Actuals, 2022-26 Forecast.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='intersect360.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='com/wp-content/ uploads/Webinar-Intersect360-WW-HPC-AI- Unified-2021-market-size-and-2022-26- forecast.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Ivanov A, Dryden N, Ben-Nun T, Li S and Hoefler T (2021) Data Movement Is All You Need: A Case Study on Optimizing Transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: Proceedings of Machine Learning and Systems 3 (MLSys 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Kasim MF, Watson-Parris D, Deaconu L, Oliver S, Hatfield P, Froula DH, Gregori G, Jarvis M, Khatiwala S, Korenaga J, Topp- Mugglestone J, Viezzer E and Vinko SM (2021) Building high accuracy emulators for scientific simulations with deep neural architecture search.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Machine Learning: Science and Technology 3(1): 015013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1088/2632-2153/ac3ffa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Keryell R, Reyes R and Howes L (2015) Khronos SYCL for OpenCL: A Tutorial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: Proceedings of the 3rd International Workshop on OpenCL, IWOCL ’15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' New York, NY, USA: Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ISBN 978-1-4503-3484-6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1145/2791321.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2791345.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Keyes D (2022) Efficient Computation through Tuned Approx- imation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://siag-sc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='org/media/files/DK- slides.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Kutzner C, P´all S, Fechner M, Esztermann A, de Groot BL and Grubm¨uller H (2019) More bang for your buck: Improved use of GPU nodes for GROMACS 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Journal of Computational Chemistry 40(27): 2418–2431.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1002/jcc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='26011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Larsen ES and McAllister D (2001) Fast Matrix Multiplies Using Graphics Hardware.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: Proceedings of the 2001 ACM/IEEE Conference on Supercomputing, SC ’01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' New York, NY, USA: Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ISBN 1-58113-293-X, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1145/582034.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='582089.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Lattner C and Adve V (2004) LLVM: A compilation framework for lifelong program analysis & transformation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: International Symposium on Code Generation and Optimization, 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' CGO 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 75–86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1109/CGO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1281665.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Lavin A, Krakauer D, Zenil H, Gottschlich J, Mattson T, Brehmer J, Anandkumar A, Choudry S, Rocki K, Baydin AG, Prunkl C, Paige B, Isayev O, Peterson E, McMahon PL, Macke J, Cranmer K, Zhang J, Wainwright H, Hanuka A, Veloso M, Assefa S, Zheng S and Pfeffer A (2021) Simulation Intelligence: Towards a New Generation of Scientific Methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https: //arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='org/abs/2112.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='03235.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Lee VW, Kim C, Chhugani J, Deisher M, Kim D, Nguyen AD, Satish N, Smelyanskiy M, Chennupaty S, Hammarlund P, Singhal R and Dubey P (2010) Debunking the 100X GPU vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' CPU Myth: An Evaluation of Throughput Computing on CPU and GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: Proceedings of the 37th Annual International Symposium on Computer Architecture, ISCA ’10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' New York, NY, USA: Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ISBN 978-1-4503-0053- 7, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 451–460.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1145/1815961.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1816021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Li H, Berger DS, Novakovic S, Hsu L, Ernst D, Zardoshti P, Shah M, Rajadnya S, Lee S, Agarwal I, Hill MD, Fontoura M and Bianchini R (2022) Pond: CXL-Based Memory Pooling Systems for Cloud Platforms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='org/abs/ 2203.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='00241.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Prepared using sagej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='cls 12 arXiv preprints Liu Z and Tegmark M (2021) Machine Learning Conservation Laws from Trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Physical Review Letters 126(18): 180604.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='126.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='180604.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Macrotrends LLC (2022) NVIDIA Profit Margin 2010- 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='macrotrends.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='net/stocks/ charts/NVDA/nvidia/profit-margins.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Matsuoka S (2008) The Rise of the Commodity Vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: Palma JMLM, Amestoy PR, Dayd´e M, Mattoso M and Lopes JC (eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=') High Performance Computing for Computational Science VECPAR 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Berlin, Heidelberg: Springer Berlin Heidelberg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ISBN 978-3-540-92859-1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 53–62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1007/978-3-540- 92859-1 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Menon H, Lam MO, Osei-Kuffuor D, Schordan M, Lloyd S, Mohror K and Hittinger J (2018) ADAPT: Algorithmic Differentiation Applied to Floating-Point Precision Tuning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: SC18: International Conference for High Performance Computing, Networking, Storage and Analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 614–626.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1109/SC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='00051.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' NTT R&D (2020) What is the All-Photonics Network?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https: //www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='rd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='ntt/e/iown/0002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='html.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Ootomo H and Yokota R (2022) Recovering Single Precision Accuracy from Tensor Cores While Surpassing the FP32 Theoretical Peak Performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' International Journal of High Performance Computing Applications 36(4): 475–491.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1177/10943420221090256.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Pathak J, Subramanian S, Harrington P, Raja S, Chattopadhyay A, Mardani M, Kurth T, Hall D, Li Z, Azizzadenesheli K, Hassanzadeh P, Kashinath K and Anandkumar A (2022) FourCastNet: A Global Data-driven High-resolution Weather Model using Adaptive Fourier Neural Operators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https: //arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='org/abs/2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='11214.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Post E (1982) Real Programmers Don’t Use PASCAL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' http:// www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='ee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='ryerson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='ca/˜elf/hack/realmen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='html.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Rasp S, Pritchard MS and Gentine P (2018) Deep learning to represent subgrid processes in climate models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Proceedings of the National Academy of Sciences - PNAS 115(39): 9684– 9689.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1073/pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1810286115.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Schulthess TC (2016) Exascale computing: Endgame or new beginning for climate modelling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='ecmwf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' int/sites/default/files/elibrary/2016/ 16804-exascale-computing-endgame-or-new- beginning-climate-modelling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Shan Y, Lin W, Guo Z and Zhang Y (2022) Towards a Fully Disaggregated and Programmable Data Center.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: Proceedings of the 13th ACM SIGOPS Asia-Pacific Workshop on Systems, APSys ’22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' New York, NY, USA: Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ISBN 978-1-4503-9441-3, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 18–28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1145/3546591.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='3547527.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Shaw DE, Deneroff MM, Dror RO, Kuskin JS, Larson RH, Salmon JK, Young C, Batson B, Bowers KJ, Chao JC, Eastwood MP, Gagliardo J, Grossman JP, Ho CR, Ierardi DJ, Kolossv´ary I, Klepeis JL, Layman T, McLeavey C, Moraes MA, Mueller R, Priest EC, Shan Y, Spengler J, Theobald M, Towles B and Wang SC (2008) Anton, a Special-Purpose Machine for Molecular Dynamics Simulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ACM 51(7): 91–97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1145/1364782.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1364802.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Sorensen B, Norton A, Joseph E and Conway S (2019) Special Report for NASA: Exploring Options for a Bespoke Supercomputer Targeted for Weather and Climate Workloads.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Technical Report HR4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='0046.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='2019, Hyperion Research, LLC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://hec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='nasa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='gov/news/reports/ NASA-Hyperion-Research-Bespoke-Weather- HPC-Full-Report.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Strohmaier E, Dongarra J, Simon H and Meuer M (2022) TOP500.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' http://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='top500.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='org/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Tesla, Inc (2021) Whitepaper: Tesla Dojo Technology – A Guide to Tesla’s Configurable Floating Point Formats & Arith- metic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://tesla-cdn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='thron.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='com/static/ MXMU3S_tesla-dojo-technology_1WDVZN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='pdf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' White MJ (2022) Nvidia says falling GPU prices are ’a story of the past’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='digitaltrends.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='com/ computing/nvidia-says-falling-gpu-prices- are-over/.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Wu CJ, Hsiao ST, Wang JY, Lin WH, Chang CW, Shao TL, Tung CH and Yu DCH (2021) Ultra High Power Cooling Solution for 3D-ICs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: 2021 Symposium on VLSI Technology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 1–2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Ziogas AN, Schneider T, Ben-Nun T, Calotoiu A, De Matteis T, de Fine Licht J, Lavarini L and Hoefler T (2021) Productivity, Portability, Performance: Data-Centric Python.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' In: Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis, SC ’21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' New York, NY, USA: Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' ISBN 978-1-4503-8442-1, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' DOI:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='1145/ 3458817.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='3476176.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content=' Prepared using sagej.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'} +page_content='cls' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/G9E0T4oBgHgl3EQfhgFS/content/2301.02432v1.pdf'}