diff --git "a/EdAzT4oBgHgl3EQfG_ss/content/tmp_files/load_file.txt" "b/EdAzT4oBgHgl3EQfG_ss/content/tmp_files/load_file.txt" new file mode 100644--- /dev/null +++ "b/EdAzT4oBgHgl3EQfG_ss/content/tmp_files/load_file.txt" @@ -0,0 +1,1031 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf,len=1030 +page_content='High-Quality Supersampling via Mask-reinforced Deep Learning for Real-time Rendering Hongliang Yuan1, Boyu Zhang1,2, Mingyan Zhu1,3, Ligang Liu4, Jue Wang1 1Tencent AI Lab, 2Southeast University, 3Tsinghua University, 4University of Science and Technology of China 11488336@qq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='com, byz@seu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='cn, zmy20@mails.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='tsinghua.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='cn, lgliu@ustc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='cn, maxjwang@tencent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='com (a) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='25-spp input (b) NSRR (c) RAE (d) Ours (e) Ground truth Figure 1: Left to right: (a) noisy image generated using hybrid path-tracer at 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='25 sample per pixel;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' (b) Neural supersampling network [Xiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020] (10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3ms at 1024 × 2048, SSIM: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='7737);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' (c) RAE [Chaitanya et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017] (6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='5ms, SSIM: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='7556);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' (d) our sparse sampling reconstruction (7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8ms, SSIM: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='9036);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' (e) reference path-traced image with 32768 samples per pixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ABSTRACT To generate high quality rendering images for real time applications, it is often to trace only a few samples-per-pixel (spp) at a lower res- olution and then supersample to the high resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Based on the observation that the rendered pixels at a low resolution are typically highly aliased, we present a novel method for neural supersampling based on ray tracing 1/4-spp samples at the high resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Our key insight is that the ray-traced samples at the target resolution are accurate and reliable, which makes the supersampling an inter- polation problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We present a mask-reinforced neural network to reconstruct and interpolate high-quality image sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' First, a novel temporal accumulation network is introduced to compute the correlation between current and previous features to signifi- cantly improve their temporal stability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Then a reconstruct network based on a multi-scale U-Net with skip connections is adopted for reconstruction and generation of the desired high-resolution image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Copyrights for components of this work owned by others than ACM must be honored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Abstracting with credit is permitted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Request permissions from permissions@acm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Conference acronym ’XX, June 03–05, 2018, Woodstock, NY © 2018 Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM ISBN 978-1-4503-XXXX-X/18/06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='$15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='00 https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/XXXXXXX.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='XXXXXXX Experimental results and comparisons have shown that our pro- posed method can generate higher quality results of supersampling, without increasing the total number of ray-tracing samples, over current state-of-the-art methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' KEYWORDS Monte Carlo denoising, neural networks, path tracing 1 INTRODUCTION Rendering noise-free Monte Carlo (MC) ray-traced images at real-time frame rates is still challenging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Despite the widely used of modern RTX GPU accelerators, only a few rays per pixel can be traced at target resolution for real-time applications, resulting in severe noise in renderings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The most efficient strategy is to denoise and reconstruct the rendering results in image-space, usually as a post-process pass of a physically-based renderer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Until recently, most MC denoisers were proposed based on convolutional neural networks (CNN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Chaitanya et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Chaitanya et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017] proposed a recurrent model for interactive applications that are targeted at images rendered with low sample per pixel (1~4 spp).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In addition, the NVIDIA OptiX ray-tracing engine introduces an AI-accelerated denoiser based on this work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We also developed a hybrid ray tracer based on Vulkan and we use it to export training datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The source code of our ray tracer and paper will be available soon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Mustafa et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Işık et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021] adopt dilated arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='01036v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='CV] 3 Jan 2023 Conference acronym ’XX, June 03–05, 2018, Woodstock, NY spatial kernels to filter the noisy image-guided by pairwise affinity over the features and target in the low-sample count regime (2~8 spp).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Meng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Meng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020] also denoise 1-spp noisy input images with a neural bilateral grid at real-time frame rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Hasselgren et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Hasselgren et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020] proposed a neural temporal adaptive sampling method for denoising image sequences rendered at 4-spp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Fan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Fan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021] expands the kernel-prediction method to remove noise at low spp (more than one) in a strict time budget.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' All the state-of-the-art denoising and reconstruction methods aim at removing noise of images rendered with more than 1-spp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In this paper, we propose a novel approach to reconstruct less than 1-spp renderings at real-time frame rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Following traditional temporal anti- aliasing [Karis 2014] (TAA), our method uses renderer generated motion vector to warp previous frames and accumulate sparse samples from the pre- vious frame based on the temporal accumulation factor computed according to the correlation of current and previous frame, effectively increasing the number of samples per pixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The module can also detect ghosting arti- facts at disocclusion regions and remove mismatched pixels at inconsistent shading regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Mustafa et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Işık et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021] also compute temporal accumulation factor for a pixel using neural network, but they concatenate features of current and previous frames and feed them into network to- gether.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Compared to this method, our method can produce better temporal stable and high-quality results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' After accumulating sparse samples, we use a residual block [He et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2015] to fusion the accumulated features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Then we implement a multi-scale U-Net [Ronneberger et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2015a] with skip connections for the reconstruction subnetwork.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The multi-scale predicting network is similar to the method suggested by Vogels et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Vogels et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2018] which uses kernel prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We directly predict denoised images for the current frame and two additional channels as blending factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We also predict a 2 × downscaled image from the layer of the last but one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We composite the final denoised image from the current denoised image, the previous warped image, and 2 × downscaled images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Comprehensive experiment results show that our approach is good at reconstructing 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='25-spp images at a real-time frame rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' To summarize, our contributions are the following: We introduce a temporally-stable neural network to reconstruct image sequences rendered at 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='25-spp at real-time frame rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' To the best of our knowledge, we are the first that utilize 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='25-spp images as input for the neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' A novel temporal accumulation network which computes the correla- tion between current and previous features to significantly improve the temporal stability of Monte Carlo denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Extensive experiments demonstrate that our method outperforms state- of-the-art methods both quantitatively and qualitatively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2 RELATED WORK Traditional best-performing MC denoisers were mainly based on local neigh- borhood regression models [Zwicker et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2015].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' With the advent of power- ful modern GPUs, lots of researchers utilize CNN to build their MC denoisers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In this section, we will mainly discuss CNN-based real-time denoising tech- niques, which are most related to our approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' For a comprehensive study of deep learning-based MC denoising and reconstruction techniques, please refer to the recent survey of Huo et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Huo and Yoon 2021].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1 Image-space Methods Traditional MC denoisers are based on zero-order regression [Delbracio et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2014;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Kalantari et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Moon et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2013;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Rousselle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2012, 2013], first-order regression [Bauszat et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2011;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Bitterli et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Moon et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2014] and even higher-order regression models [Moon et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2016].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='The filtering-based methods are based on using the auxiliary feature buffers to guide the construction of image-space filters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Most of the above methods run in offline rendering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' To increase the effective sample count, real-time denoisers leverage temporal accumulation between frames over time to amortize supersampling [Yang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2009], i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' temporal anti-aliasing (TAA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The previous frame is reprojected according to the motion vector and blended with the current frame using a temporal accumulation factor 𝛼.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The 𝛼 can be constant [Mara et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Meng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Schied et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017] and changed [Schied et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2018] per frame and per pixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The fixed temporal accumulation factor inevitably leads to ghosting and temporal lag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' By setting the parameter adaptively, the temporal filter can fastly respond to times in case of sudden changes between frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Yang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Yang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020] survey recent TAA techniques and provide an in-depth analysis of the image quality trade-offs with these heuristics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Koskela et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Koskela et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2019] propose a blockwise regression for real-time path tracing reconstruction and also do accumulation to improve temporal stability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2 CNN-based Monte Carlo Denoising Recent deep learning denoisers [Bako et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Vogels et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2018] use deep CNN to estimate the local per-pixel filtering kernels used to compute each denoised pixel from its neighbors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Dahlberg et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Dahlberg et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2019] implement the approach of [Vogels et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2018] as a practical production tool used on the animated feature film.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Layer-based denoiser [Munkberg and Hasselgren 2020] designs a hierarchical kernel prediction for multi- resolution denoising and reconstruction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Since the high computational cost of predicting large filtering kernels, these methods mostly target offline renderings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' There are also other methods [Gharbi et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Kuznetsov et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Xu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021] that target denoising rendering results at more than 4 spp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' To reduce the kernel prediction methods’ overhead, Fan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Fan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021] predict an encoding of the kernel map, followed by a high-efficiency decoder to construct the complete kernel map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Chaitanya et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Chaitanya et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017] proposed a recurrent connection based on U-Net [Ronneberger et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2015b] to improve temporal stability for sequences of sparsely sampled input images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Hasselgren et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Hasselgren et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020] proposed a neural spatio-temporal joint optimization of adaptive sampling and denoising with a recurrent feedback loop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Hofmann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Hofmann et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021] also utilized the neural temporal adaptive sampling architecture to denoise rendering results with participating media.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Xiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Xiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020] presented a neural supersampling method for TAA, which is similar to deep-learned supersampling (DLSS) [Edelsten et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2019].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Meng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Meng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020] denoised 1-spp noisy input images with a neural bilateral grid at real-time frame rates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Mustafa et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Işık et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021] adopted dilated spatial kernels to filter the noisy image guiding by pairwise affinity over the features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Compare with these real-time denoising framework targeting for more than 1-spp renderings, our method is designed to work with 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='25-spp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 3 SPARSE SAMPLING DENOISING 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1 Problem Statement Our goal is to reconstruct temporally stable video from 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='25-spp hybrid path traced image sequences in real-time frame rates, and we achieve this with a supervised deep learning method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We use our hybrid path traced renderer to generate a set of data D={(c1,f1,r1), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ,(c𝑁 ,f𝑁 ,r𝑁 )} where c stands for noisy image rendered by sparse sampling, f is the auxiliary features (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' albedo, normal, depth, metallic, roughness, shadow and transparent) obtained in the rendering process, r is the reference image with high spp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We train a deep neural function Φ with parameters Θ to reconstruct the noisy-free image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The loss function ℓ is measured as the difference between the denoised image and its reference image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We then minimize the loss function with gradient descent algorithm across the dataset D with 𝑁 samples to get the High-Quality Supersampling via Mask-reinforced Deep Learning for Real-time Rendering Conference acronym ’XX, June 03–05, 2018, Woodstock, NY optimal parameters ˆ𝜃: ˆ𝜃 = arg min 𝜃 𝑁 ∑︁ 𝑖=1 ℓ (Φ(𝑐𝑖, f𝑖),𝑟𝑖) (1) The loss function combines four-loss items, including spatial, temporal, relative edge, and albedo loss, see section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='4 for details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2 Sparse Sampling We developed a hybrid ray tracer to generate our dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' To accelerate ray tracing, we leverage a rasterization pipeline to get the first hit position from the camera and store its associated shading attributes including albedo, nor- mal, depth, motion vector, metallic, and roughness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' After this rasterization pass, we trace a shadow ray to record the soft shadow attribute.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' If there are transparent materials in the scene, we also save the transparent attribute at the first hit position.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We divide the full resolution into non-overlapping blocks with spatial size 4 × 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We use the MC method to solve the rendering equation [Kajiya 1986] for one pixel in the block at each frame and other pixels remain zero, see Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' If the camera is static, the radiance of all pixels will be computed once at every four frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' For image sequences, we use two-layer CNNs to accumulate history frames, see section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' t t+1 t+2 t+3 Figure 2: Sampling pattern.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In t and t+1 frame, we compute radiance for the top left and top right pixel, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In t+2 and t+3 frame, bottom left and right pixel is estimated, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The input for our network is 18-channels features, including a 3D vector (noised image, albedo, normal, shadow, and transparent) and a 1D vector (depth, metallic, and roughness).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Following prior method [Chaitanya et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017], we demodulate the noisy RGB image by the albedo of the directly visible material, and the untextured irradiance 𝑥 is transformed to log space, ln(1 + 𝑥).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Different from the prior method [Chaitanya et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017], after the untextured irradiance has been reconstructed, we re-modulate by the accumulated albedo predicted by our temporal accumulator network which is our key module for producing temporally stable results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3 Network Pipeline In this section, we describe our method in details with Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1 Temporal Accumulator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The temporal accumulator module contains two neural networks each with 2-layer CNNs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' One network accepts normal and depth of current frame as input and outputs reference embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Another network computes embeddings for the current frame and warped the previous frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' These two embeddings are then multiplied in a pixel- wise manner to the reference embedding and then call softmax(·) to get 𝛼 and 𝛽 (𝛼 +𝛽 = 1) blending factors for current features and previous features, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We only accumulate noisy images, shadow and albedo (see Figure 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Take shadow as an example,we use the following equation to accumulate shadow over the frame: f𝑠 𝑡 = 𝛼 W(f𝑠 𝑡−1) + 𝛽f𝑠 (2) where f𝑠 𝑡 is accumulated shadow until 𝑡 frame, f𝑠 is shadow buffer for 𝑡 frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' W(·) is a warping operator that reprojects previous frame to current one using motion vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' For the first frame, we set f𝑠 𝑡−1 to f𝑠.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2 Feature Fusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' After accumulating images, shadow, and albedo, we concatenate accumulated features, normal, depth, transparent, metallic, and roughness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Then we feed them into a feature fusion network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Since our image is sparse, we use this network to fusion the features and spread signals across spatial space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3 Reconstruction Network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Finally, fused features and warped denoised images of the previous frame are concatenated and fed into a reconstruc- tion network, which outputs the high-quality image for the current frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The reconstruction network details are given in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Our network di- rectly predict denoised fine image d𝑓 for current frame and two additional channels as blending factor, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=', 𝛼𝑠 and 𝛼𝑡.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We also directly predict a 2 × downscaled coarse image d𝑐 from the layer of the last but one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We use scale composition suggested by Vogels et al [Vogels et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2018] to combine fine and coarse images: O𝑝 = d𝑓 𝑝 − 𝛼𝑠 𝑝 [UDd𝑓 ]𝑝 + 𝛼𝑠 𝑝 [Ud𝑐 ]𝑝 (3) where D and U are 2 × 2-downsampling and nearest-neighbor upsampling operators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The filtered history O𝑡−1 is linearly blended with the result of the scale composition O using 𝛼𝑡: O𝑡 = 𝛼𝑡O + (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='0 − 𝛼𝑡)O𝑡−1 (4) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='4 Losses We use the symmetric mean absolute percentage error (SMAPE): ℓ (r, d) = 1 3𝑁 𝑝=𝑁 ∑︁ 𝑝=1 𝑐=3 ∑︁ 𝑐=1 ��d𝑝,𝑐 − r𝑝,𝑐 �� ��d𝑝,𝑐 �� + ��r𝑝,𝑐 �� + 𝜀 (5) Here, 𝑁 is the number of pixels in image and 𝜀 is 10−2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' d and r are the denoised frame and the corresponding reference frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Our loss combines two parts, the first one is computed on a sequence of 5 images, including spatial loss ℓ𝑠 = ℓ (r, d), temporal loss ℓ𝑡 = ℓ (Δr, Δd) where Δ is temporal gradient computed between two consecutive frames, relative edge loss ℓ𝑒 = 𝐿1( ∇d r+𝜀 , ∇r r+𝜀 ), where gradient ∇ is computed using a High Frequency Error Norm (HFEN), an image comparison metric from medical imaging [Ravishankar and Bresler 2011].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' As suggested by Chaitanya et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Chaitanya et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017], we assign higher weight to three loss functions (ℓ𝑠, ℓ𝑡 and ℓ𝑒) of frames later in the sequence to amplify temporal gradients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' For our training sequence of 5 images, we use (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='05, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='25, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='75, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The second part is warped temporal loss ℓ𝑤𝑡 = ℓ (𝜔r,𝜔d) where 𝜔r = 𝑟4 − W(𝑟3), W(·) is a warping operator that reprojects previous frame to current one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We also include albedo loss ℓ𝑎 = ℓ (a𝑎𝑐𝑐, a𝑟 ) where a𝑎𝑐𝑐 is accumulated albedo computed by our feature accumulator network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We only compute albedo loss on last frame and warped temporal loss on last two frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We use a weighted combination of these losses as the final training loss: ℓ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='7ℓ𝑠 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1ℓ𝑡 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2ℓ𝑒 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='4ℓ𝑤𝑡 + 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='0ℓ𝑎 (6) 4 DATESET AND TRAINING PROCEDURE Since our method is designed for 3A game and virtual character rendering, we train a separate network for each 3D scene same as [Xiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Due to the input image being generated at 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='25-spp, training robust denoiser requires a large number of images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We train our method on 6 scenes, see Figure 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' BistroInterior and BistroExterior [Lumberyard 2017] have more than one million triangles and support transparency, diffuse, specular, and soft shadow features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Sponza,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Diningroom,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Angel,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' and Warmroom are Conference acronym ’XX,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' June 03–05,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2018,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Woodstock,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' NY ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Reconstruction ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Network ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Reconstruction ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Network ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Feature Fusion ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Feature Accumulator ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Current Features ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Reconstructed ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Image ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu Reference ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Warp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='43 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='C ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='43 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='43 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Downsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='48 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='48 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Downscale ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='64 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='64 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Downscale ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='64 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='64 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Downscale ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Downscale ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Downscale ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='96 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='96 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Downscale ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='96 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='96 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='128 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='128 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='128 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='128 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Upsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='96 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Upsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='96 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Upsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Upsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='96 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='64 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Upsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='96 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='64 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Upsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='64 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='48 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Upsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='48 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Upsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='48 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Upsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='64 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Softmax ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='64 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Downsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Downsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv+ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Conv ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Previous out ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Warp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Coarse ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Composition ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='ReLu ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Upsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2x ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2x ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Upsample ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2x ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Downscale ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2x ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2x ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Downscale ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2x ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Normal&Depth ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Normal&Depth ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='f tf t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='f ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1 tf ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1 t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Previous Features ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Previous Features ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='O ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1 t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='O ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1 t ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Ot ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Ot ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='Figure 3: Network pipeline of our sparse sampling reconstruction (SSR) method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The pipeline includes feature accumulator, feature fusion, and reconstruction networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The numbers under each network layer represent the output channels at cor- responding layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The kernel size is 3 × 3 at all layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The operator ⊙ denotes dot product between features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' c○ indicates concatenation operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ⊕ and ⊗ represent element-wise addition and multiplication, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' (a) warped albedo (b) current albedo (c) accumulated albedo Figure 4: The history albedo (a) is first wared and then is blended with the current frame albedo (b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Our temporal ac- cumulator not only fills missing pixels but also smooths ar- tifacts at the edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' simple scenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Each scene in the training set contains 100 to 1000 frames with resolution 1024 × 2048 depending on its complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We also rendered a validation set with 10 frames and a test set with 50 frames for each scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' For each frame, we rendered the reference image at 32768 spp which is the target of our denoiser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' (a) BistroInterior (b) BistroExterior (c) Sponza (d) Diningroom (e) Angel (f) Warmroom Figure 5: An overview of reference images in our generated dataset When we train the denoiser, we randomly select 5 consecutive frames for training in consecutive clips of each scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The inputs, including the noisy image and the auxiliary features, of each frame, are randomly cropped with resolution 256 × 256 to make full use of GPU.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We optimize our denoiser network with ADAM optimizer [Kingma and Ba 2015].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We set the initial learning rate to 1 × 10−4 and half it at one-third and two-thirds of the total number of iterations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The batch size is 7 and the epoch is 200 for each scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Our denoiser is implemented by PyTorch [Paszke et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2019] and all the models we presented were trained and tested parallel on four GPUs of NVIDIA Tesla A100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Each network takes around 9 hours.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 5 RESULTS In this section, we evaluate the performance of our method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We describe the implementation of compared baseline and metrics in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1, analyze the algorithm with various ablation experiments in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2, and describe its limitations and future work in Section 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1 Baseline and metrics We compare our method with several state-of-the-art denoising and recon- struction work, including real-time methods RAE [Chaitanya et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017], ANF [Işık et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021], offline method MCD [Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021], and super- resolution model NSRR [Xiao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Although NSRR is a method for the super-resolution task, it can also reconstruct images from zero-padding inputs which means it fits the sparse sampling task well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' So we choose it as one of our competitors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We removed the zero-sampling modules so that it can apply to our dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We follow all these papers and use PyTorch to re-implement them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We train all the methods on the same datasets as in our method with the same training procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' To evaluate quality, we use three quality metrics: peak signal to noise ratio (PSNR), structural similarity index (SSIM) [Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2004], and root mean squared error (RMSE).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The higher the better in both PSNR and SSIM, while the lower the better in RMSE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Quantitative comparison results are shown in Table1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Average results are reported on the 50 test videos of six scenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We only show the results of SSIM due to space limit, and please refer to our supplemental material for more comparison results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' As shown in Table 1, our method achieves the best performance with all six scenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' At inference time, all methods are applied to a single frame at a time, Table 2 shows inference time at 1024 × 2048 resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We tested all the models using a GPU, NVIDIA Tesla A100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' All network models are not optimized with Nvidia TensorRT at 16-bit precision, so inference time still has room for improvement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ERCESTTTHigh-Quality Supersampling via Mask-reinforced Deep Learning for Real-time Rendering Conference acronym ’XX, June 03–05, 2018, Woodstock, NY Scene MCD ANF NSRR RAE SSR BistroInterior 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='7650 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='7583 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='7405 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='7751 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8921 BistroExterior 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8071 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='7201 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8538 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8006 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8962 Sponza 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8119 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8219 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8113 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8898 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='9410 Diningroom 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8637 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='7226 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8843 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='9007 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='9375 Warmroom 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8774 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='9740 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='9675 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='9758 Angel 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8601 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8813 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='9804 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='9161 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='9763 Table 1: Quantitative comparison results on six scenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We choose four baseline methods to compare with our SSR method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Method MCD ANF NSRR RAE SSR Time(ms) 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='5 32 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='5 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='5 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8 Table 2: Comparison results of inference time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In Figure 6, we compare reconstructed images visually.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Our method out- performs all other methods on all scenes by a large margin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Previous state- of-the-art methods are not good at denoising renderings at 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='25-spp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' MCD originally targets offline rendering and transformer needs large memory to train and inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' RAE, NSRR, and ANF feed previous and current features into the network directly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The difference between our approach and the previous ones is that we compute the correlation for each pixel between normal and depth features of current and previous frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Please refer to the supplementary material and videos, our method produces significantly more temporally stable video results than existing methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2 Analysis 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1 Rendering Efficiency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We test rendering time of each stage in NVIDIA RTX 3060 GPU at resolution 1024 × 2048, see Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' With our sparse sampling, the total rendering time of scene BistroInterior is 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='75 ms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Without sparse sampling, the total rendering time is 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='19 ms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' This leads to an about 3× rendering performance improvement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' After applying our SSR model, high-fidelity results are produced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Rasterization Transparent and Shadow W-SS Wo-SS 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='08 ms 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='32 ms 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='35 ms 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='79 ms Table 3: Rendering time of scene BistroInterior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' W-SS means rendering with our sparse sampling, Wo-SS means without sparse sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2 Quality Gain with Shadow and Transparent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Our training images are produced by MC path tracer at 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='25-spp average.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Due to light occlusion, more than three-fourths of pixels remain zero, so we need more features to train our model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We add direct noisy shadow as input of our model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Our feature accumulator will accumulate noisy shadows between the current frame and the history shadow buffer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The accumulated shadow can help our model to detect the continuous edge of shadow and improve temporal stability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The synthesis video of test sequences can show that the edge of Figure 7 without shadow feature will jitter over frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' If noisy features feed into regression-based method [Rousselle et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2013], the quality of the denoised image will decrease.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' These methods need another filter to prefilter noisy features, but CNN-based methods can accept more than one noisy buffer except noisy images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We also add the transparent feature into our model for training, but we did not accumulate it before feeding it into the feature fusion module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The reason is that the transparent feature includes less noise than the shadow, see Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' If the scene didn’t have a transparent object, such as BistroExterior, we also feed transparent features with zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Our model without shadow and transparent only gets 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='98 dB on testing BistroInterior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' With shadow and transparent, our model not only gets higher PSNR (28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='78 dB) but also generates high-quality image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3 Quality Gain with Feature Accumulator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We demodulate the image with the albedo at a primary hit position.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' After our network reconstructs the untextured illumination, we re-modulate by the albedo to include the texture detail in the final rendering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' If the albedo in the corresponding frame has an artifact, the artifact will transfer to the final rendering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Chaitanya et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' al [Chaitanya et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017] apply TAA as a supplemental post-process pass to fix the artifact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We re-modulate by the accumulated albedo generated by the temporal accumulator module to achieve some efficiency as multisampling antialiasing (MSAA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' [Akeley 1993].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In summary, from Figure 4, Figure 7 and Figure 9, we can see that our feature accumulator plays a key role in reconstructing sparse sampling renderings at less than 1-spp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='4 Network Modules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In Table 4, we report the ablation experiments for analyzing the quality improvements from the temporal accumulator (Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1) and feature fusion (Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2) modules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Average results are reported on the 50 test image sequences of the BistroInterior scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' If without the temporal accumulator and feature fusion, the PSNR decreases about 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='58dB, but temporal stability will decrease dramatically in the video results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' See our supplemental materials for more detailed information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Feature Accumulator Feature Fusion SSIM PSNR (dB) � � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8600 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='20 � � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8617 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='15 � � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8866 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='56 � � 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8911 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='78 Table 4: Ablation experiment for the feature accumulator and feature fusion modules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The network is trained with each (and both) of these subnetworks removed, and results on the BistroInterior scene are reported.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3 Limitations and Future Work While our method provides a significant improvement for neural sparse sampling reconstruction, the inference time still has room for improvement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We will adopt TensorRT for acceleration and deploy our model on our game engine and virtual character rendering platform in the future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In addition, there is still little jitter for small objects on the temporal domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Modern game engine all has a TAA pass, applying TAA post-processing can get more temporally stable results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We also try to add a layer of Swin Transformer [Liu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021] to the first layer of our reconstruction network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' It can truely improve quantitative number about 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='23 dB, but inference time will increase 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1 ms at 1024 × 2048 resolution on NVIDIA Tesla A100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 6 CONCLUSION We have presented the first CNN-based method for reconstructing Monte Carlo renderings at 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='25-spp and experiments show that our method recon- structs high-quality results compared with current state-of-the-art methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Conference acronym ’XX, June 03–05, 2018, Woodstock, NY (a) Ours (b) Input (c) MCD (d) ANF (e) NSRR (f) RAE (g) Ours (h) Reference Figure 6: Visual results on BistroInterior, BistroExterior, Sponza, Diningroom, Warmroom, and Angel scenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ALLLAHigh-Quality Supersampling via Mask-reinforced Deep Learning for Real-time Rendering Conference acronym ’XX, June 03–05, 2018, Woodstock, NY (a) Wo-shadow (b) Ours (c) Noisy shadow (d) Ground truth Figure 7: The result (a) is generated by training model with- out shadow feature, our result (b) is trained with shadow fea- ture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' (c) and (d) is noisy shadow feature and ground truth, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' (a) Wo-transparent (b) W-transparent (c) Transparent (d) Ground truth Figure 8: The result (a) is generated by training our model without the transparent feature, our result (b) is trained with the transparent feature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' (c) and (d) is the transparent feature and ground truth, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' (a) SSIM: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8626 (b) SSIM: 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8972 (c) Ground truth Figure 9: (a) Artifact is transferred to the final result, SSIM is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8626 (b) Re-modulating by the accumulated albedo leads to high-quality image, SSIM is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='8927.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We propose an efficient feature accumulator network to compute the blend- ing factor for each pixel between current and previous frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Then the accumulated features are fused and fed into a multi-scale U-Net to recon- struct final results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We evaluated our method by comparing its performance to previous works demonstrating better results across all test scenes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 7 ACKNOWLEDGMENTS We thank Open Research Content Archive (ORCA) of NVIDIA for provid- ing BistroInterior and BistroExterior scenes for training and testing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' We also thank all students who participate in the development of our hybrid renderer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' REFERENCES Kurt Akeley.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 1993.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Reality Engine Graphics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In Proceedings of the 20th Annual Conference on Computer Graphics and Interactive Techniques (Anaheim, CA) (SIGGRAPH ’93).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Association for Computing Machinery, New York, NY, USA, 109–116.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/166117.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='166131 Steve Bako, Thijs Vogels, Brian Mcwilliams, Mark Meyer, Jan NováK, Alex Harvill, Pradeep Sen, Tony Derose, and Fabrice Rousselle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Kernel-Predicting Convo- lutional Networks for Denoising Monte Carlo Renderings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 36, 4, Article 97 (jul 2017), 14 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3072959.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3073708 Pablo Bauszat, Martin Eisemann, and Marcus Magnor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Guided Image Filtering for Interactive High-Quality Global Illumination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In Proceedings of the Twenty- Second Eurographics Conference on Rendering (Prague, Czech Republic) (EGSR ’11).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Eurographics Association, Goslar, DEU, 1361–1368.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1111/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1467- 8659.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='01996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='x Benedikt Bitterli, Fabrice Rousselle, Bochang Moon, José A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Iglesias-Guitián, David Adler, Kenny Mitchell, Wojciech Jarosz, and Jan Novák.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Nonlinearly Weighted First-Order Regression for Denoising Monte Carlo Renderings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Forum 35, 4 (jul 2016), 107–117.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Chakravarty Chaitanya, Anton Kaplanyan, Christoph Schied, Marco Salvi, Aaron Lefohn, Derek Nowrouzezahrai, and Timo Aila.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Interactive reconstruction of Monte Carlo image sequences using a recurrent denoising autoencoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans- actions on Graphics 36 (07 2017), 1–12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3072959.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3073601 Henrik Dahlberg, David Adler, and Jeremy Newlin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Machine-Learning Denoising in Feature Film Production.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In ACM SIGGRAPH 2019 Talks (Los Angeles, California) (SIGGRAPH ’19).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Association for Computing Machinery, New York, NY, USA, Article 21, 2 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3306307.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3328150 Mauricio Delbracio, Pablo Musé, Julien Chauvier, Nicholas Phelps, and Jean-Michel Morel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Boosting Monte Carlo Rendering by Ray Histogram Fusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Transactions on Graphics (TOG) 33 (02 2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/2532708 Andrew Edelsten, Paula Jukarainen, and Anjul Patney.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Truly next-gen: Adding deep learning to games and graphics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In NVIDIA Sponsored Sessions (Game Develop- ers Conference) (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Hangming Fan, Rui Wang, Yuchi Huo, and Hujun Bao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Real-time Monte Carlo Denoising with Weight Sharing Kernel Prediction Network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Com- puter Graphics Forum 40, 4 (2021), 15–27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1111/cgf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='14338 arXiv:https://onlinelibrary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='wiley.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='com/doi/pdf/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1111/cgf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='14338 Michaël Gharbi, Tzu-Mao Li, Miika Aittala, Jaakko Lehtinen, and Frédo Durand.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Sample-Based Monte Carlo Denoising Using a Kernel-Splatting Network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 38, 4, Article 125 (jul 2019), 12 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3306346.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 3322954 Jon Hasselgren, Jacob Munkberg, Marco Salvi, Anjul Patney, and Aaron Lefohn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Neural temporal adaptive sampling and denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In Computer Graphics Forum, Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Wiley Online Library, 147–155.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Deep Residual Learning for Image Recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' CoRR abs/1512.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='03385 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' arXiv:1512.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='03385 http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/abs/1512.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='03385 Nikolai Hofmann, Jon Hasselgren, Petrik Clarberg, and Jacob Munkberg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Interac- tive Path Tracing and Reconstruction of Sparse Volumes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 4, 1, Article 5 (apr 2021), 19 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3451256 Yuchi Huo and Sung-eui Yoon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' A survey on deep learning-based Monte Carlo denoising.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Computational Visual Media 7 (03 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1007/s41095- 021-0209-9 Mustafa Işık, Krishna Mullia, Matthew Fisher, Jonathan Eisenmann, and Michaël Gharbi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Interactive Monte Carlo Denoising Using Affinity of Neural Features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 40, 4, Article 37 (jul 2021), 13 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/ 3450626.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3459793 James T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Kajiya.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 1986.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The Rendering Equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In Proceedings of the 13th An- nual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH ’86).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Association for Computing Machinery, New York, NY, USA, 143–150.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https: //doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/15922.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='15902 Nima Khademi Kalantari, Steve Bako, and Pradeep Sen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' A Machine Learning Approach for Filtering Monte Carlo Noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 34, 4, Article 122 (jul 2015), 12 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/2766977 Brian Karis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' High-Quality Temporal Supersampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In ACM SIGGRAPH 2014 Courses: Advances in Real-time Rendering in Games, Part I (SIGGRAPH ’14) (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/2614028.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2615455 Diederik P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Kingma and Jimmy Ba.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Adam: A Method for Stochastic Optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, Yoshua Bengio and Yann LeCun (Eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=').' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/abs/1412.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='6980 Matias Koskela, Kalle Immonen, Markku Mäkitalo, Alessandro Foi, Timo Viitanen, Pekka Jääskeläinen, Heikki Kultala, and Jarmo Takala.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Blockwise Multi-Order Feature Regression for Real-Time Path-Tracing Reconstruction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 38, 5, Article 138 (jun 2019), 14 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3269978 Alexandr Kuznetsov, Nima Khademi Kalantari, and Ravi Ramamoorthi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Deep Adaptive Sampling for Low Sample Count Rendering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Computer Graphics Forum 37 (07 2018), 35–44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1111/cgf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='13473 Tzu-Mao Li, Yu-Ting Wu, and Yung-Yu Chuang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' SURE-Based Optimization for Adaptive Sampling and Reconstruction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 31, 6, Article 194 (nov 2012), 9 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/2366145.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2366213 Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Swin Transformer: Hierarchical Vision Transformer using Shifted Windows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' CoRR abs/2103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='14030 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' arXiv:2103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='14030 https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/ Conference acronym ’XX, June 03–05, 2018, Woodstock, NY abs/2103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='14030 Amazon Lumberyard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Amazon Lumberyard Bistro, Open Research Content Archive (ORCA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' http://developer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='nvidia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='com/orca/amazon-lumberyard-bistro http://developer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='nvidia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='com/orca/amazon-lumberyard-bistro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Michael Mara, Morgan McGuire, Benedikt Bitterli, and Wojciech Jarosz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' An Efficient Denoising Algorithm for Global Illumination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In Proceedings of High Performance Graphics (Los Angeles, California) (HPG ’17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Association for Computing Machinery, New York, NY, USA, Article 3, 7 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3105762.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3105774 Xiaoxu Meng, Quan Zheng, Amitabh Varshney, Gurprit Singh, and Matthias Zwicker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Real-time Monte Carlo Denoising with the Neural Bilateral Grid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In Eurographics Symposium on Rendering - DL-only Track, Carsten Dachsbacher and Matt Pharr (Eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=').' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' The Eurographics Association.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https: //doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2312/sr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='20201133 Bochang Moon, Nathan Carr, and Sung-Eui Yoon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Adaptive Rendering Based on Weighted Local Regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 33, 5, Article 170 (sep 2014), 14 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/2641762 Bochang Moon, Jong Yun Jun, JongHyeob Lee, Kunho Kim, Toshiya Hachisuka, and Sung-Eui Yoon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Robust Image Denoising Us- ing a Virtual Flash Image for Monte Carlo Ray Tracing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Computer Graphics Forum 32, 1 (2013), 139–151.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1111/cgf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='12004 arXiv:https://onlinelibrary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='wiley.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='com/doi/pdf/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1111/cgf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='12004 Bochang Moon, Steven McDonagh, Kenny Mitchell, and Markus Gross.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Adaptive Polynomial Rendering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 35, 4, Article 40 (jul 2016), 10 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/2897824.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2925936 Jacob Munkberg and Jon Hasselgren.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Neural De- noising with Layer Embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Computer Graphics Fo- rum 39, 4 (2020), 1–12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1111/cgf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='14049 arXiv:https://onlinelibrary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='wiley.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='com/doi/pdf/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1111/cgf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='14049 Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Köpf, Edward Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Yang, Zach DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' PyTorch: An Imperative Style, High-Performance Deep Learning Library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' CoRR abs/1912.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='01703 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' arXiv:1912.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='01703 http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/abs/1912.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='01703 Saiprasad Ravishankar and Yoram Bresler.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' MR Image Reconstruction From Highly Undersampled k-Space Data by Dictionary Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' IEEE Transactions on Medical Imaging 30, 5 (2011), 1028–1041.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1109/TMI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2090538 Olaf Ronneberger, Philipp Fischer, and Thomas Brox.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2015a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' U-Net: Convolutional Networks for Biomedical Image Segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' CoRR abs/1505.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='04597 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' arXiv:1505.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='04597 http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/abs/1505.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='04597 Olaf Ronneberger, Philipp Fischer, and Thomas Brox.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2015b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' U-Net: Convolutional Networks for Biomedical Image Segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' CoRR abs/1505.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='04597 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' arXiv:1505.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='04597 http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/abs/1505.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='04597 Fabrice Rousselle, Claude Knaus, and Matthias Zwicker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Adaptive Rendering with Non-Local Means Filtering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 31, 6, Article 195 (nov 2012), 11 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/2366145.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2366214 Fabrice Rousselle, Marco Manzi, and Matthias Zwicker.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Robust Denoising using Feature and Color Information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Computer Graph- ics Forum 32, 7 (2013), 121–130.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1111/cgf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='12219 arXiv:https://onlinelibrary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='wiley.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='com/doi/pdf/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1111/cgf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='12219 Christoph Schied, Anton Kaplanyan, Chris Wyman, Anjul Patney, Chakravarty R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Alla Chaitanya, John Burgess, Shiqiu Liu, Carsten Dachs- bacher, Aaron Lefohn, and Marco Salvi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Spatiotemporal Variance- Guided Filtering: Real-Time Reconstruction for Path-Traced Global Il- lumination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' In Proceedings of High Performance Graphics (Los Angeles, California) (HPG ’17).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Association for Computing Machinery, New York, NY, USA, Article 2, 12 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3105762.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3105770 Christoph Schied, Christoph Peters, and Carsten Dachsbacher.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Gradi- ent Estimation for Real-Time Adaptive Temporal Filtering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Interact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Tech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 1, 2, Article 24 (aug 2018), 16 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3233301 Thijs Vogels, Fabrice Rousselle, Brian Mcwilliams, Gerhard Röthlin, Alex Harvill, David Adler, Mark Meyer, and Jan Novák.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Denoising with Kernel Prediction and Asymmetric Loss Functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 37, 4, Article 124 (jul 2018), 15 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3197517.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 3201388 Zhou Wang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Bovik, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Sheikh, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Simoncelli.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Image quality assessment: from error visibility to structural similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' IEEE Transactions on Image Processing 13, 4 (2004), 600–612.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1109/TIP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='819861 Lei Xiao, Salah Nouri, Matt Chapman, Alexander Fix, Douglas Lanman, and Anton Kaplanyan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Neural Supersampling for Real-Time Rendering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 39, 4, Article 142 (jul 2020), 12 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3386569.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3392376 Bing Xu, Junfei Zhang, Rui Wang, Kun Xu, Yong-Liang Yang, Chuan Li, and Rui Tang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Adversarial Monte Carlo Denoising with Conditioned Auxiliary Feature Modulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 38, 6, Article 224 (nov 2019), 12 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3355089.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3356547 Lei Yang, Shiqiu Liu, and Marco Salvi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' A Survey of Temporal Antialiasing Techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Computer Graphics Fo- rum 39, 2 (2020), 607–621.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1111/cgf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='14018 arXiv:https://onlinelibrary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='wiley.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='com/doi/pdf/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1111/cgf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='14018 Lei Yang, Diego Nehab, Pedro V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Sander, Pitchaya Sitthi-amorn, Jason Lawrence, and Hugues Hoppe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Amortized Supersampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 28, 5 (dec 2009), 1–12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/1618452.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 1618481 Jiaqi Yu, Yongwei Nie, Chengjiang Long, Wenju Xu, Qing Zhang, and Guiqing Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Monte Carlo Denoising via Auxiliary Feature Guided Self-Attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 40, 6, Article 273 (dec 2021), 13 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='1145/3478513.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='3480565 M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Zwicker, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Jarosz, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Lehtinen, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Moon, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Ramamoorthi, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Rousselle, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Sen, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Soler, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content='-E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Yoon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Recent Advances in Adaptive Sampling and Reconstruction for Monte Carlo Rendering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'} +page_content=' Forum 34, 2 (may 2015), 667–681.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/EdAzT4oBgHgl3EQfG_ss/content/2301.01036v1.pdf'}