hansen97 commited on
Commit
0e07d71
·
0 Parent(s):

Initial clean commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.ARW filter=lfs diff=lfs merge=lfs -text
37
+ *.CR2 filter=lfs diff=lfs merge=lfs -text
38
+ *.dng filter=lfs diff=lfs merge=lfs -text
39
+ *.png filter=lfs diff=lfs merge=lfs -text
40
+ *.dll filter=lfs diff=lfs merge=lfs -text
41
+ *.so filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ */__pycache__
3
+ # images/*
4
+ *.pyc
5
+ *.log
6
+ *.info
7
+ *.jpg
8
+ *.png
9
+ *.h5
10
+ *.pkl
11
+ # *.dng
12
+ *.ARW
13
+ *.mat
14
+ *.mp4
15
+ *.raw
16
+ */private
17
+ private/
18
+ results/
19
+ # *.sh
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 fenghansen
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: YOND
3
+ emoji: 🏢
4
+ colorFrom: indigo
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 5.37.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ short_description: You Only Need a Denoiser (https://arxiv.org/abs/2506.03645)
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import glob
4
+ import time
5
+ from datetime import datetime
6
+ import gradio as gr
7
+ from app_function import YOND_Backend
8
+
9
+ yond = YOND_Backend()
10
+
11
+ # ------------------- 会话管理与资源释放 -------------------
12
+ # 存储会话最后活跃时间(key: 会话ID, value: 最后活跃时间)
13
+ active_sessions = {}
14
+ # 超时时间(秒),这里设为5分钟(300秒),可自行调整
15
+ INACTIVE_TIMEOUT = 120
16
+
17
+ def create_session():
18
+ """创建新会话并记录日志"""
19
+ other_sess = [sess for sess in active_sessions]
20
+ if len(other_sess) > 0:
21
+ gr.Warning(f"检测到{len(other_sess)}个其他用户正在使用,尝试释放超时资源")
22
+ for sess in other_sess:
23
+ check_inactive(sess)
24
+ session_id = str(time.time_ns())
25
+ active_sessions[session_id] = datetime.now()
26
+ print(f"新会话创建: {session_id}")
27
+ return session_id
28
+
29
+ def update_heartbeat(session_id):
30
+ """更新会话活跃时间"""
31
+ if session_id and session_id in active_sessions:
32
+ active_sessions[session_id] = datetime.now()
33
+ print(f"会话 {session_id} 心跳更新")
34
+ return "active"
35
+ return "invalid"
36
+
37
+ def check_inactive(session_id):
38
+ """检查会话是否超时,超时则释放资源"""
39
+ if not session_id or session_id not in active_sessions:
40
+ return "invalid"
41
+
42
+ last_active = active_sessions[session_id]
43
+ if (datetime.now() - last_active).total_seconds() > INACTIVE_TIMEOUT:
44
+ print(f"会话 {session_id} 超时,释放资源")
45
+ gr.Warning(f"会话 {session_id} 超时,释放资源")
46
+ try:
47
+ yond.unload_model() # 释放模型资源
48
+ yond.clear_cache() # 清理缓存
49
+ except Exception as e:
50
+ print(f"释放资源时出错: {e}")
51
+ gr.Error(f"释放资源时出错: {e}")
52
+ finally:
53
+ if session_id in active_sessions:
54
+ del active_sessions[session_id]
55
+ return f"session {session_id} released"
56
+ return "active"
57
+
58
+ def close_session(session_id):
59
+ """关闭会话并释放资源"""
60
+ if session_id and session_id in active_sessions:
61
+ print(f"会话 {session_id} 关闭,释放资源")
62
+ gr.Warning(f"会话 {session_id} 关闭,释放资源")
63
+ try:
64
+ yond.unload_model()
65
+ yond.clear_cache()
66
+ except Exception as e:
67
+ print(f"关闭会话时出错: {e}")
68
+ gr.Error(f"关闭会话时出错: {e}")
69
+ finally:
70
+ if session_id in active_sessions:
71
+ del active_sessions[session_id]
72
+ return "session closed"
73
+ return "invalid session"
74
+ # --------------------------------------------------------------
75
+
76
+ with gr.Blocks(title="YOND WebUI", css="""
77
+ #left_panel {
78
+ width: 400px !important;
79
+ min-width: 400px !important;
80
+ max-width: 400px !important;
81
+ }
82
+ .gradio-container {max-width: 1800px !important}
83
+ .log-panel {height: 200px !important; overflow-y: auto}
84
+ """) as app:
85
+
86
+ # ------------------- 会话ID和心跳组件 -------------------
87
+ # 生成唯一会话ID
88
+ session_id = gr.State(value=create_session)
89
+ # 隐藏组件:用于心跳通信
90
+ heartbeat_signal = gr.Textbox(visible=False)
91
+ session_status = gr.Textbox(visible=False)
92
+ # --------------------------------------------------------------
93
+
94
+ gr.Markdown("""
95
+ # 🌌 YOND ([You Only Need a Denoiser](https://arxiv.org/abs/2506.03645)) | Practical Blind Raw Image Denoising
96
+ ### YOND WebUI Simple Tutorial: (See the [YOND WebUI Introduction](https://vmcl-isp.site/t/topic/201) for a complete usage guide):
97
+ 1. **[Load Config]** → 2. **[Upload Raw Image]** (or **[Load Example]**) → 3. **[Load Image]** (modified metadata and **[Update Image]**) → 4. **[Noise Estimation]** → 5. **[Denoising]** → ...(Optional Operations)... → **[Release GPU]**
98
+ """)
99
+
100
+ with gr.Row():
101
+ with gr.Row():
102
+ yaml_files = glob.glob("runfiles/*/*.yml")
103
+ config_selector = gr.Dropdown(
104
+ label="预设配置",
105
+ choices=yaml_files,
106
+ value="runfiles/Gaussian/gru32n_ft.yml",
107
+ scale=2,
108
+ container=False
109
+ )
110
+ load_config_btn = gr.Button("Load Config", variant="primary", scale=1)
111
+ ckpt_files = glob.glob("images/*.*")
112
+ example_selector = gr.Dropdown(
113
+ label="预设图片",
114
+ choices=ckpt_files,
115
+ value="images/LRID_outdoor_x5_004_iso6400.dng",
116
+ scale=2,
117
+ container=False
118
+ )
119
+ load_example_btn = gr.Button("Load Example", variant="primary", scale=1)
120
+ unload_btn = gr.Button("Release GPU", variant="secondary", scale=1)
121
+
122
+ with gr.Row():
123
+ # 左侧控制面板
124
+ with gr.Column(scale=1, elem_id="left_panel"):
125
+ raw_upload = gr.File(label="Uploaded Raw Image", file_types=[".npy", ".NPY", ".ARW", ".DNG", ".NEF", ".CR2", ".RAW", ".MAT",".arw", ".dng", ".nef", ".cr2", ".raw", ".mat"], type="filepath")
126
+ with gr.Accordion("Raw Metadata", open=True):
127
+ with gr.Row():
128
+ h = gr.Number(label="Height", value=2160, precision=0, scale=1)
129
+ w = gr.Number(label="Width", value=3840, precision=0, scale=1)
130
+ bl = gr.Number(label="Black Level", value=64.0, precision=1, scale=1)
131
+ wp = gr.Number(label="White Point", value=1023.0, precision=1, scale=1)
132
+ ratio = gr.Number(label="DGain (x Ratio)", value=1.0, precision=1, scale=1)
133
+ ispgain = gr.Number(label="ISPGain", value=1.0, precision=1, scale=1)
134
+ with gr.Row():
135
+ image_btn = gr.Button("Load Image", variant="primary")
136
+ image_update_btn = gr.Button("Update Image", variant="secondary")
137
+
138
+ with gr.Accordion("Noise Estimation & Denoising", open=True):
139
+ with gr.Row():
140
+ use_ransac = gr.Checkbox(label="RANSAC", value=False, scale=1)
141
+ double_est = gr.Checkbox(label="Refined Estimation", value=False, scale=1)
142
+ gain = gr.Slider(value=0, step=0.1, label="System Gain (K)")
143
+ sigma = gr.Slider(value=0, step=0.1, label="Read Noise Level (σ)")
144
+
145
+ est_btn = gr.Button("Noise Estimation", variant="primary")
146
+
147
+ use_ddim = gr.Checkbox(label="DDIM Mode", value=False, scale=1)
148
+ with gr.Row():
149
+ patch_size = gr.Number(label="Patch Size", value=1024, precision=0)
150
+ sigsnr = gr.Number(label="SigSNR", precision=2, value=1.03)
151
+
152
+ enh_btn = gr.Button("Denoising", variant="primary")
153
+
154
+ # 右侧显示区域
155
+ with gr.Column(scale=2):
156
+ with gr.Tabs():
157
+ with gr.Tab("Input Image", id="input_tab"):
158
+ input_img = gr.Image(label="Noisy Image", type="pil")
159
+ with gr.Tab("Output Image", id="output_tab"):
160
+ output_img = gr.Image(label="Denoised Image", type="pil")
161
+ with gr.Tab("Threshold Mask", id="analysis_tab"):
162
+ mask_img = gr.Image(label="mask", type="pil")
163
+
164
+ with gr.Accordion("Download Manager", open=True):
165
+ with gr.Row():
166
+ with gr.Column(scale=1):
167
+ save_npy_btn = gr.Button("Save as NPY Files", variant="primary")
168
+ npy_file = gr.File(label="Denoised NPY Download", visible=True)
169
+ with gr.Column(scale=1):
170
+ save_png_btn = gr.Button("Save as PNG Files", variant="primary")
171
+ png_file = gr.File(label="Denoised PNG Download", visible=True)
172
+
173
+ # 加载配置
174
+ load_config_btn.click(
175
+ fn=yond.load_config,
176
+ inputs=[config_selector],
177
+ # outputs=[model_selector],
178
+ )
179
+
180
+ # 加载预设图片
181
+ load_example_btn.click(
182
+ fn=yond.process_image,
183
+ inputs=[example_selector, h, w, bl, wp, ratio, ispgain],
184
+ outputs=[input_img, h, w, bl, wp]
185
+ )
186
+
187
+ # 滑动条绑定
188
+ def update_sliders(gain_val, sigma_val):
189
+ """动态调整滑动条范围"""
190
+ gain_min = round(0.1 * int(gain_val), 2)
191
+ gain_max = round(int(gain_val) * 2.0, 2)
192
+ sigma_min = 0
193
+ sigma_max = max(2.0 * int(gain_val), int(sigma_val) * 2.0)
194
+ return [
195
+ gr.update(minimum=gain_min, maximum=gain_max),
196
+ gr.update(minimum=sigma_min, maximum=sigma_max)
197
+ ]
198
+
199
+
200
+ # 加载图片
201
+ image_btn.click(
202
+ fn=yond.process_image,
203
+ inputs=[raw_upload, h, w, bl, wp, ratio, ispgain],
204
+ outputs=[input_img, h, w, bl, wp]
205
+ )
206
+ # 更新图片
207
+ image_update_btn.click(
208
+ fn=yond.update_image,
209
+ inputs=[bl, wp, ratio, ispgain],
210
+ outputs=[input_img]
211
+ )
212
+ # 估计噪声
213
+ est_btn.click(
214
+ fn=yond.estimate_noise,
215
+ inputs=[double_est, use_ransac, patch_size],
216
+ outputs=[mask_img, gain, sigma]
217
+ ).then(
218
+ fn=update_sliders,
219
+ inputs=[gain, sigma],
220
+ outputs=[gain, sigma]
221
+ )
222
+ # 计算增强
223
+ enh_btn.click(
224
+ fn=yond.enhance_image,
225
+ inputs=[gain, sigma, sigsnr, use_ddim, patch_size],
226
+ outputs=[output_img]
227
+ )
228
+
229
+ save_npy_btn.click(
230
+ fn=yond.save_result_npy,
231
+ outputs=[npy_file]
232
+ )
233
+
234
+ save_png_btn.click(
235
+ fn=yond.save_result_png,
236
+ outputs=[png_file]
237
+ )
238
+
239
+
240
+ # ------------------- Gradio 5.38.0 定时器实现 -------------------
241
+ # 创建定时器(使用value参数设置间隔秒数)
242
+ heartbeat_timer = gr.Timer(
243
+ value=30, # 每30秒触发一次心跳
244
+ active=True # 初始激活状态
245
+ )
246
+ check_timer = gr.Timer(
247
+ value=60, # 每60秒检查一次超时
248
+ active=True
249
+ )
250
+
251
+ # 绑定定时器事件(使用tick方法)
252
+ heartbeat_timer.tick(
253
+ fn=update_heartbeat,
254
+ inputs=[session_id],
255
+ outputs=session_status
256
+ )
257
+
258
+ check_timer.tick(
259
+ fn=check_inactive,
260
+ inputs=[session_id],
261
+ outputs=session_status
262
+ )
263
+
264
+ unload_btn.click(
265
+ fn=close_session,
266
+ inputs=[session_id],
267
+ outputs=session_status
268
+ )
269
+
270
+ # 启动应用时设置关闭回调
271
+ if __name__ == "__main__":
272
+ app.launch(
273
+ server_name="0.0.0.0",
274
+ # server_port=7860,
275
+ # share=True,
276
+ )
app_function.py ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import rawpy
3
+ from PIL import Image
4
+ import torch
5
+ import yaml
6
+ import gradio as gr
7
+ import tempfile
8
+
9
+ # os.environ['CUDA_VISIBLE_DEVICES'] = '0'
10
+ import time
11
+ from torch.optim import Adam, lr_scheduler
12
+ from data_process import *
13
+ from utils import *
14
+ from archs import *
15
+ import sys
16
+ # 将 dist 目录添加到 Python 搜索路径
17
+ sys.path.append("./dist")
18
+ # from dist.isp_algos import *
19
+ from isp_algos import VST, inverse_VST, ddim, BiasLUT, SimpleNLF
20
+ from bm3d import bm3d
21
+
22
+ class YOND_Backend:
23
+ def __init__(self):
24
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
25
+ #self.device = torch.device('cpu') # 强制使用CPU,避免CUDA相关问题
26
+
27
+ # 初始化处理参数
28
+ self.p = {
29
+ "ratio": 1.0,
30
+ "ispgain": 1.0,
31
+ "h": 2160,
32
+ "w": 3840,
33
+ "bl": 64.0,
34
+ "wp": 1023.0,
35
+ "gain": 0.0,
36
+ "sigma": 0.0,
37
+ "wb": [2.0, 1.0, 2.0],
38
+ "ccm": np.eye(3),
39
+ "scale": 959.0, # 1023-64
40
+ "ransac": False,
41
+ "ddim_mode": False,
42
+ }
43
+
44
+ # 状态变量
45
+ self.raw_data = None
46
+ self.denoised_data = None
47
+ self.denoised_npy = None
48
+ self.denoised_rgb = None
49
+ self.mask_data = None
50
+
51
+ self.yond = None
52
+ self.bias_lut = None
53
+
54
+ # 新增:释放模型资源的方法
55
+ def unload_model(self):
56
+ if self.yond is not None:
57
+ del self.yond
58
+ self.yond = None
59
+ self.bias_lut = None
60
+ torch.cuda.empty_cache() # 清理GPU缓存
61
+ print("Model has unloaded, please reload config")
62
+ gr.Success("Model has unloaded, please reload config")
63
+
64
+ # 新增:清理缓存的方法
65
+ def clear_cache(self):
66
+ # 清理处理过程中的临时缓存、中间变量等
67
+ self.raw_data = None
68
+ self.denoised_data = None
69
+ self.denoised_npy = None
70
+ self.denoised_rgb = None
71
+ self.mask_data = None
72
+ gc.collect()
73
+ print("Images has clear, please reload images")
74
+ gr.Success("Images has clear, please reload images")
75
+
76
+ def update_param(self, param, value):
77
+ """更新处理参数"""
78
+ try:
79
+
80
+ if param in ['h', 'w']:
81
+ self.p[param] = int(value)
82
+ else:
83
+ self.p[param] = float(value)
84
+
85
+ # 自动更新相关参数
86
+ if param in ['wp', 'bl']:
87
+ self.p['scale'] = self.p['wp'] - self.p['bl']
88
+
89
+ except (ValueError, TypeError) as e:
90
+ gr.Error(f"参数更新失败: {str(e)}")
91
+ raise ValueError(f"无效的参数值: {value}") from e
92
+
93
+ def load_config(self, config_path):
94
+ """加载配置文件"""
95
+ try:
96
+ self.yond = YOND_anytest(config_path, self.device)
97
+ gr.Success(f"配置加载成功: {config_path}", duration=2)
98
+ gr.Success(f"当前设备: {self.device}", duration=2)
99
+ except Exception as e:
100
+ gr.Error(f"配置加载失败: {str(e)}")
101
+ raise RuntimeError(f"配置加载失败: {str(e)}")
102
+ args = self.yond.args
103
+ if 'pipeline' in args:
104
+ self.p.update(args['pipeline'])
105
+ else:
106
+ self.p.update({'epoch':10, 'sigma_t':0.8, 'eta_t':0.85})
107
+ model_path = f"{self.yond.fast_ckpt}/{self.yond.yond_name}_last_model.pth"
108
+ self.load_model(model_path)
109
+ # return model_path
110
+
111
+
112
+ def load_model(self, model_path):
113
+ """加载预训练模型"""
114
+ try:
115
+ # 加载模型权重
116
+ self.yond.load_model(model_path)
117
+ self.bias_lut = BiasLUT(lut_path='checkpoints/bias_lut_2d.npy')
118
+ if self.bias_lut is None:
119
+ gr.Error(f"BiasLUT加载失败: {os.path.exists('checkpoints/bias_lut_2d.npy')}")
120
+ gr.Success(f"模型加载成功: {model_path}", duration=2)
121
+
122
+ except Exception as e:
123
+ gr.Error(f"模型加载失败: {str(e)}")
124
+ raise RuntimeError(f"模型加载失败: {str(e)}") from e
125
+
126
+ def process_image(self, file_path, h, w, bl, wp, ratio, ispgain):
127
+ """处理原始图像文件"""
128
+ try:
129
+ gr.Warning("正在可视化图像")
130
+ # 更新处理参数
131
+ self.update_param('h', h)
132
+ self.update_param('w', w)
133
+ self.update_param('bl', bl)
134
+ self.update_param('wp', wp)
135
+ self.update_param('ratio', ratio)
136
+ self.update_param('ispgain', ispgain)
137
+
138
+ # 重新初始化
139
+ self.raw_data = None
140
+ self.denoised_data = None
141
+ self.mask_data = None
142
+
143
+ if file_path.lower().endswith(('.arw','.dng','.nef','.cr2')):
144
+ with rawpy.imread(str(file_path)) as raw:
145
+ self.raw_data = raw.raw_image_visible.astype(np.uint16)
146
+ wb, ccm = self._extract_color_params(raw)
147
+ h, w = self.raw_data.shape
148
+ bl, wp = raw.black_level_per_channel[0], raw.white_level
149
+ scale = wp - bl
150
+ self.p.update({'wb':wb,'ccm':ccm,'h':h,'w':w,'bl':bl,'wp':wp,'scale':scale})
151
+ elif file_path.lower().endswith(('.raw', '.npy')):
152
+ try:
153
+ self.raw_data = np.fromfile(file_path, dtype=np.uint16)
154
+ self.raw_data = self.raw_data.reshape(
155
+ self.p['h'], self.p['w']
156
+ )
157
+ except Exception as e:
158
+ gr.Warning(f"默认参数读取失败: {e}, 尝试使用魔↑术↓技↑巧↓")
159
+ info = rawread(file_path)
160
+ self.raw_data = info['raw']
161
+ self.p.update({
162
+ 'h': info['h'], 'w': info['w'],
163
+ 'bl': info['bl'], 'wp': info['wp'],
164
+ 'scale': info['wp'] - info['bl']
165
+ })
166
+ gr.Success('基于 魔↑术↓技↑巧↓,参数已更新...', duration=2)
167
+ # MATLAB格式处理
168
+ elif file_path.lower().endswith('.mat'):
169
+ with h5py.File(file_path, 'r') as f:
170
+ self.raw_data = np.array(f['x']).astype(np.float32) * 959 + 64
171
+ # 尝试读取元数据
172
+ meta_path = file_path.replace('NOISY', 'METADATA')
173
+ if os.path.exists(meta_path):
174
+ self.meta = read_metadata(scipy.io.loadmat(meta_path))#scipy.io.loadmat(meta_path)
175
+ self.p.update({
176
+ 'h': self.raw_data.shape[0], 'w': self.raw_data.shape[1],
177
+ 'bl': 64, 'wp': 1023, 'scale': 959
178
+ })
179
+ else:
180
+ gr.Error("不支持的格式")
181
+ raise ValueError("不支持的格式")
182
+
183
+ # 生成预览图
184
+ self.raw_data = self.raw_data.astype(np.float32)
185
+ if self.p['clip']: self.raw_data = self.raw_data.clip(self.p['bl'],self.p['wp'])
186
+ preview = self._generate_preview()
187
+ return preview, self.p['h'], self.p['w'], self.p['bl'], self.p['wp']
188
+
189
+ except Exception as e:
190
+ gr.Error(f"图像可视化失败: {str(e)}")
191
+ raise RuntimeError(f"图像处理失败: {str(e)}") from e
192
+
193
+ def update_image(self, bl, wp, ratio, ispgain):
194
+ """更新图像文件"""
195
+ try:
196
+ log("更新图像参数...")
197
+ gr.Success("更新图像参数...", duration=2)
198
+ # 更新处理参数
199
+ if ispgain != self.p['ispgain'] and (bl != self.p['bl'] and wp != self.p['wp'] and ratio != self.p['ratio']):
200
+ update_image_flag = True
201
+ self.update_param('bl', bl)
202
+ self.update_param('wp', wp)
203
+ self.update_param('ratio', ratio)
204
+ self.update_param('ispgain', ispgain)
205
+
206
+ # 重新初始化
207
+ self.denoised_data = None
208
+ self.mask_data = None
209
+
210
+ if self.raw_data is not None:
211
+ gr.Success("图像可视化中...", duration=2)
212
+ preview = self._generate_preview()
213
+ return preview
214
+ else:
215
+ gr.Error("请先加载图像")
216
+ raise RuntimeError("请先加载图像")
217
+ except Exception as e:
218
+ gr.Error(f"图像更新失败: {str(e)}")
219
+ raise RuntimeError(f"图像更新失败: {str(e)}") from e
220
+
221
+ def estimate_noise(self, double_est, ransac, patch_size):
222
+ """执行噪声估计"""
223
+ if not self.yond:
224
+ gr.Error("请先加载模型")
225
+ raise RuntimeError("请先加载模型")
226
+ try:
227
+ gr.Warning("正在估计噪声...")
228
+ log('开始估计噪声')
229
+ self.p['ransac'] = ransac
230
+ # 预处理数据
231
+ processed = (self.raw_data - self.p['bl']) / self.p['scale']
232
+ lr_raw = bayer2rggb(processed) * self.p['ratio']
233
+
234
+ # 粗估计
235
+ reg, self.mask_data = SimpleNLF(
236
+ rggb2bayer(lr_raw),
237
+ k=19,
238
+ eps=1e-3,
239
+ setting={'mode': 'self', 'thr_mode':'score2', 'ransac': self.p['ransac']}
240
+ )
241
+ self.p['gain'] = reg[0] * self.p['scale']
242
+ self.p['sigma'] = np.sqrt(max(reg[1], 0)) * self.p['scale']
243
+
244
+ if double_est:
245
+ log(" 使用精估计")
246
+ lr_raw_np = lr_raw * self.p['scale']
247
+ ######## EM-VST矫正VST噪图期望偏差 ########
248
+ bias_base = np.maximum(lr_raw_np, 0)
249
+ bias = self.bias_lut.get_lut(bias_base, K=self.p['gain'], sigGs=self.p['sigma'])
250
+ raw_vst = VST(lr_raw_np, self.p['sigma'], gain=self.p['gain'])
251
+ raw_vst = raw_vst - bias
252
+
253
+ ################# VST��换 #################
254
+ lower = VST(0, self.p['sigma'], gain=self.p['gain'])
255
+ upper = VST(self.p['scale'], self.p['sigma'], gain=self.p['gain'])
256
+ nsr = 1 / (upper - lower)
257
+ raw_vst = (raw_vst - lower) / (upper - lower)
258
+
259
+ ################# 准备去噪 #################
260
+ raw_vst = torch.from_numpy(raw_vst).float().to(self.device).permute(2,0,1)[None,]
261
+ if 'guided' in self.yond.arch:
262
+ sigma_corr = 1.03
263
+ t = torch.tensor(nsr*sigma_corr, dtype=raw_vst.dtype, device=self.device)
264
+
265
+ # Denoise & pad
266
+ target_size = patch_size # 设置目标块大小,可以根据需要调整
267
+ overlap_ratio = 1/8 # 设置重叠率,可以根据需要调整
268
+
269
+ # 使用改进的 big_image_split 函数
270
+ raw_inp, metadata = big_image_split(raw_vst, target_size, overlap_ratio)
271
+
272
+ raw_dn = torch.zeros_like(raw_inp[:,:4])
273
+ with torch.no_grad():
274
+ for i in range(raw_inp.shape[0]): # 处理所有切块
275
+ input_tensor = raw_inp[i][None,].clip(None, 2).to(self.device)
276
+ raw_dn[i] = self.yond.net(input_tensor, t).clamp(0,None)
277
+
278
+ # 使用改进的 big_image_merge 函数
279
+ raw_dn = big_image_merge(raw_dn, metadata, blend_mode='avg')
280
+
281
+ ################# VST逆变换 #################
282
+ raw_vst = raw_dn[0].permute(1,2,0).detach().cpu().numpy()
283
+ raw_vst = raw_vst * (upper - lower) + lower
284
+ raw_vst = inverse_VST(raw_vst, self.p['sigma'], gain=self.p['gain']) / self.p['scale']
285
+
286
+ reg, self.mask_data = SimpleNLF(rggb2bayer(lr_raw), rggb2bayer(raw_vst), k=13,
287
+ setting={'mode':'collab', 'thr_mode':'score3', 'ransac': self.p['ransac']})
288
+ self.p['gain'] = reg[0] * self.p['scale']
289
+ self.p['sigma'] = np.sqrt(max(reg[1], 0)) * self.p['scale']
290
+
291
+ # 生成可视化结果
292
+ mask_img = self._visualize_mask()
293
+ log(f"噪声估计完成: gain={self.p['gain']:.2f}, sigma={self.p['sigma']:.2f}")
294
+ gr.Success(f"噪声估计完成: gain={self.p['gain']:.2f}, sigma={self.p['sigma']:.2f}", duration=2)
295
+ return mask_img, float(f"{self.p['gain']:.2f}"), float(f"{self.p['sigma']:.2f}")
296
+ except Exception as e:
297
+ gr.Error(f"噪声估计失败: {str(e)}")
298
+ raise RuntimeError(f"噪声估计失败: {str(e)}") from e
299
+
300
+ def enhance_image(self, gain, sigma, sigsnr, ddim_mode, patch_size):
301
+ """执行图像增强"""
302
+ if not self.yond:
303
+ log('请先加载模型')
304
+ raise RuntimeError("请先加载模型")
305
+
306
+ try:
307
+ gr.Warning("正在增强图像...")
308
+ log('正在增强图像...')
309
+ # 更新处理参数
310
+ self.p['ddim_mode'] = ddim_mode
311
+ self.update_param('gain', gain)
312
+ self.update_param('sigma', sigma)
313
+ self.update_param('sigsnr', sigsnr)
314
+
315
+ # 数据预处理
316
+ processed = ((self.raw_data - self.p['bl']) / self.p['scale'])
317
+ lr_raw = bayer2rggb(processed) * self.p['ratio']
318
+ lr_raw_np = lr_raw * self.p['scale']
319
+
320
+ bias_base = np.maximum(lr_raw_np, 0)
321
+ bias = self.bias_lut.get_lut(bias_base, K=self.p['gain'], sigGs=self.p['sigma'])
322
+ raw_vst = VST(lr_raw_np, self.p['sigma'], gain=self.p['gain'])
323
+ raw_vst = raw_vst - bias
324
+
325
+ ################# VST变换 #################
326
+ lower = VST(0, self.p['sigma'], gain=self.p['gain'])
327
+ upper = VST(self.p['scale'], self.p['sigma'], gain=self.p['gain'])
328
+ nsr = 1 / (upper - lower)
329
+ raw_vst = (raw_vst - lower) / (upper - lower)
330
+
331
+ ################# 准备去噪 #################
332
+ raw_vst = torch.from_numpy(raw_vst).float().to(self.device).permute(2,0,1)[None,]
333
+ if 'guided' in self.yond.arch:
334
+ t = torch.tensor(nsr*self.p['sigsnr'], dtype=raw_vst.dtype, device=self.device)
335
+
336
+ # Denoise & pad
337
+ target_size = patch_size # 设置目标块大小,可以根据需要调整 GPU:1024
338
+ overlap_ratio = 1/8 # 设置重叠率,可以根据需要调整
339
+
340
+ # 使用改进的 big_image_split 函数
341
+ raw_inp, metadata = big_image_split(raw_vst, target_size, overlap_ratio)
342
+
343
+ raw_dn = torch.zeros_like(raw_inp[:,:4])
344
+ with torch.no_grad():
345
+ if self.p['ddim_mode']:
346
+ for i in range(raw_inp.shape[0]): # 处理所有切块
347
+ print(f'Patch: {i+1}/{len(raw_dn)}')
348
+ raw_dn[i] = ddim(raw_inp[i][None,].clip(None, 2), self.yond.net, t, epoch=self.p['epoch'],
349
+ sigma_t=self.p['sigma_t'], eta=self.p['eta_t'], sigma_corr=1.00)
350
+ else:
351
+ for i in range(raw_inp.shape[0]): # 处理所有切块
352
+ input_tensor = raw_inp[i][None,].clip(None, 2)
353
+ raw_dn[i] = self.yond.net(input_tensor, t).clamp(0,None)
354
+
355
+ # 使用改进的 big_image_merge 函数
356
+ raw_dn = big_image_merge(raw_dn, metadata, blend_mode='avg')
357
+
358
+ ################# VST逆变换 #################
359
+ raw_vst = raw_dn[0].permute(1,2,0).detach().cpu().numpy()
360
+ raw_vst = raw_vst * (upper - lower) + lower
361
+ self.denoised_data = inverse_VST(raw_vst, self.p['sigma'], gain=self.p['gain']) / self.p['scale']
362
+
363
+ self.denoised_npy = rggb2bayer(self.denoised_data)
364
+ # 保存结果
365
+ result = self._generate_result()
366
+ log("图像增强完成,请查看结果")
367
+ gr.Success("图像增强完成,请查看结果")
368
+ return result
369
+ except Exception as e:
370
+ gr.Error(f"图像增强失败: {str(e)}")
371
+ raise RuntimeError(f"增强失败: {str(e)}") from e
372
+
373
+ # 私有工具方法 ------------------------------------------------------------
374
+ def _extract_color_params(self, raw):
375
+ """从RAW文件中提取颜色参数"""
376
+ wb = np.array(raw.camera_whitebalance) / raw.camera_whitebalance[1]
377
+ ccm = raw.color_matrix[:3, :3].astype(np.float32)
378
+ return wb, ccm if ccm[0,0] != 0 else np.eye(3)
379
+
380
+ def _generate_preview(self):
381
+ """生成预览图像"""
382
+ processed = (self.raw_data - self.p['bl']) / self.p['scale']
383
+ rgb = FastISP(bayer2rggb(processed)*self.p['ratio']*self.p['ispgain'],
384
+ self.p['wb'], self.p['ccm'])
385
+ rgb = (rgb.clip(0, 1) * 255).astype(np.uint8)
386
+ preview_img = Image.fromarray(rgb)
387
+ return preview_img
388
+
389
+ def _visualize_mask(self):
390
+ """可视化噪声掩模"""
391
+ from matplotlib import pyplot as plt
392
+ # 检查是否为单通道mask
393
+ if self.mask_data.ndim != 2:
394
+ gr.Error("Input mask must be a 2D array")
395
+ raise ValueError("Input mask must be a 2D array")
396
+
397
+ # 创建viridis颜色映射的查找表
398
+ cmap = plt.cm.viridis
399
+ x = np.linspace(0, 1, 256)
400
+ lut = (cmap(x)[:, :3] * 255).astype(np.uint8)
401
+
402
+ # 将mask值缩放到0-255范围并转换为整数索引
403
+ mask_indices = (np.clip(self.mask_data, 0, 1) * 255).astype(np.uint8)
404
+
405
+ # 使用高级索引进行向量化映射
406
+ rgb_img = lut[mask_indices]
407
+
408
+ # 缩放并转换为PIL图像
409
+ rgb_img = cv2.resize(rgb_img, (self.p['w'], self.p['h']), interpolation=cv2.INTER_LINEAR)
410
+ mask_img = Image.fromarray(rgb_img)
411
+ return mask_img
412
+
413
+ def _generate_result(self):
414
+ """保存最终结果"""
415
+ rgb = FastISP(self.denoised_data*self.p['ispgain'],
416
+ self.p['wb'],
417
+ self.p['ccm'])
418
+ self.denoised_rgb = Image.fromarray((rgb.clip(0, 1) * 255).astype(np.uint8))
419
+ return self.denoised_rgb
420
+
421
+ def save_result_npy(self):
422
+ """保存结果到 NPY 文件"""
423
+ if self.denoised_npy is None:
424
+ gr.Error("请先进行图像增强")
425
+ raise RuntimeError("请先进行图像增强")
426
+ with tempfile.NamedTemporaryFile(suffix=".npy", delete=False) as tmp_file:
427
+ tmp_file_path = tmp_file.name
428
+ np.save(tmp_file_path, self.denoised_npy.astype(np.float32))
429
+ return tmp_file_path
430
+
431
+ def save_result_png(self):
432
+ """保存结果到 PNG 文件"""
433
+ if self.denoised_npy is None:
434
+ gr.Error("请先进行图像增强")
435
+ raise RuntimeError("请先进行图像增强")
436
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_file_png:
437
+ tmp_file_path_png = tmp_file_png.name
438
+ cv2.imwrite(tmp_file_path_png, np.array(self.denoised_rgb)[:,:,::-1])
439
+ return tmp_file_path_png
440
+
441
+ class YONDParser():
442
+ def __init__(self, yaml_path="runfiles/Gaussian/default_gru32n.yml"):
443
+ self.runfile = yaml_path
444
+ self.mode = 'eval'
445
+ self.debug = False
446
+ self.nofig = False
447
+ self.nohost = False
448
+ self.gpu = 0
449
+
450
+ class YOND_anytest():
451
+ def __init__(self, yaml_path, device):
452
+ # 初始化
453
+ self.device = device
454
+ self.parser = YONDParser(yaml_path)
455
+ self.initialization()
456
+
457
+ def initialization(self):
458
+ with open(self.parser.runfile, 'r', encoding="utf-8") as f:
459
+ self.args = yaml.load(f.read(), Loader=yaml.FullLoader)
460
+ self.mode = self.args['mode'] if self.parser.mode is None else self.parser.mode
461
+ if self.parser.debug is True:
462
+ self.args['num_workers'] = 0
463
+ warnings.warn('You are using debug mode, only main worker(cpu) is used!!!')
464
+ if 'clip' not in self.args['dst']:
465
+ self.args['dst']['clip'] = False
466
+ self.save_plot = False if self.parser.nofig else True
467
+ self.args['dst']['mode'] = self.mode
468
+ self.hostname, self.hostpath, self.multi_gpu = get_host_with_dir()
469
+ self.yond_dir = self.args['checkpoint']
470
+ if not self.parser.nohost:
471
+ for key in self.args:
472
+ if 'dst' in key:
473
+ self.args[key]['root_dir'] = f"{self.hostpath}/{self.args[key]['root_dir']}"
474
+ self.dst = self.args['dst']
475
+ self.arch = self.args['arch']
476
+ self.pipe = self.args['pipeline']
477
+ if self.pipe['bias_corr'] == 'none':
478
+ self.pipe['bias_corr'] = None
479
+
480
+ self.yond_name = self.args['model_name']
481
+ self.method_name = self.args['method_name']
482
+ self.fast_ckpt = self.args['fast_ckpt']
483
+ self.sample_dir = os.path.join(self.args['result_dir'] ,f"{self.method_name}")
484
+ os.makedirs(self.sample_dir, exist_ok=True)
485
+ os.makedirs('./logs', exist_ok=True)
486
+ #os.makedirs('./metrics', exist_ok=True)
487
+
488
+ def load_model(self, model_path):
489
+ # 模型加载
490
+ self.net = globals()[self.arch['name']](self.arch)
491
+ model = torch.load(model_path, map_location='cpu')
492
+ self.net = load_weights(self.net, model, by_name=False)
493
+ self.net = self.net.to(self.device)
archs/Restormer.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Restormer: Efficient Transformer for High-Resolution Image Restoration
2
+ ## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang
3
+ ## https://arxiv.org/abs/2111.09881
4
+
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ from pdb import set_trace as stx
10
+ import numbers
11
+
12
+ from einops import rearrange
13
+
14
+
15
+
16
+ ##########################################################################
17
+ ## Layer Norm
18
+
19
+ def to_3d(x):
20
+ return rearrange(x, 'b c h w -> b (h w) c')
21
+
22
+ def to_4d(x,h,w):
23
+ return rearrange(x, 'b (h w) c -> b c h w',h=h,w=w)
24
+
25
+ class BiasFree_LayerNorm(nn.Module):
26
+ def __init__(self, normalized_shape):
27
+ super(BiasFree_LayerNorm, self).__init__()
28
+ if isinstance(normalized_shape, numbers.Integral):
29
+ normalized_shape = (normalized_shape,)
30
+ normalized_shape = torch.Size(normalized_shape)
31
+
32
+ assert len(normalized_shape) == 1
33
+
34
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
35
+ self.normalized_shape = normalized_shape
36
+
37
+ def forward(self, x):
38
+ sigma = x.var(-1, keepdim=True, unbiased=False)
39
+ return x / torch.sqrt(sigma+1e-5) * self.weight
40
+
41
+ class WithBias_LayerNorm(nn.Module):
42
+ def __init__(self, normalized_shape):
43
+ super(WithBias_LayerNorm, self).__init__()
44
+ if isinstance(normalized_shape, numbers.Integral):
45
+ normalized_shape = (normalized_shape,)
46
+ normalized_shape = torch.Size(normalized_shape)
47
+
48
+ assert len(normalized_shape) == 1
49
+
50
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
51
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
52
+ self.normalized_shape = normalized_shape
53
+
54
+ def forward(self, x):
55
+ mu = x.mean(-1, keepdim=True)
56
+ sigma = x.var(-1, keepdim=True, unbiased=False)
57
+ return (x - mu) / torch.sqrt(sigma+1e-5) * self.weight + self.bias
58
+
59
+
60
+ class LayerNorm(nn.Module):
61
+ def __init__(self, nf, LayerNorm_type):
62
+ super(LayerNorm, self).__init__()
63
+ if LayerNorm_type =='BiasFree':
64
+ self.body = BiasFree_LayerNorm(nf)
65
+ else:
66
+ self.body = WithBias_LayerNorm(nf)
67
+
68
+ def forward(self, x):
69
+ h, w = x.shape[-2:]
70
+ return to_4d(self.body(to_3d(x)), h, w)
71
+
72
+
73
+
74
+ ##########################################################################
75
+ ## Gated-Dconv Feed-Forward Network (GDFN)
76
+ class FeedForward(nn.Module):
77
+ def __init__(self, nf, ffn_expansion_factor, bias):
78
+ super(FeedForward, self).__init__()
79
+
80
+ hidden_features = int(nf*ffn_expansion_factor)
81
+
82
+ self.project_in = nn.Conv2d(nf, hidden_features*2, kernel_size=1, bias=bias)
83
+
84
+ self.dwconv = nn.Conv2d(hidden_features*2, hidden_features*2, kernel_size=3, stride=1, padding=1, groups=hidden_features*2, bias=bias)
85
+
86
+ self.project_out = nn.Conv2d(hidden_features, nf, kernel_size=1, bias=bias)
87
+
88
+ def forward(self, x):
89
+ x = self.project_in(x)
90
+ x1, x2 = self.dwconv(x).chunk(2, dim=1)
91
+ x = F.gelu(x1) * x2
92
+ x = self.project_out(x)
93
+ return x
94
+
95
+
96
+
97
+ ##########################################################################
98
+ ## Multi-DConv Head Transposed Self-Attention (MDTA)
99
+ class Attention(nn.Module):
100
+ def __init__(self, nf, num_heads, bias):
101
+ super(Attention, self).__init__()
102
+ self.num_heads = num_heads
103
+ self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
104
+
105
+ self.qkv = nn.Conv2d(nf, nf*3, kernel_size=1, bias=bias)
106
+ self.qkv_dwconv = nn.Conv2d(nf*3, nf*3, kernel_size=3, stride=1, padding=1, groups=nf*3, bias=bias)
107
+ self.project_out = nn.Conv2d(nf, nf, kernel_size=1, bias=bias)
108
+
109
+ self.emb_layers = nn.Sequential(
110
+ nn.Conv2d(1, nf, 1, 1, 0, bias=True),
111
+ nn.SiLU(inplace=True),
112
+ nn.Conv2d(nf, nf, 1, 1, 0, bias=True),
113
+ nn.SiLU(inplace=True),
114
+ nn.Conv2d(nf, nf*3*2, 1, 1, 0, bias=True)
115
+ )
116
+
117
+
118
+ def forward(self, x, t):
119
+ """
120
+ x: [B, C, H, W]
121
+ t: [B, 1, 1, 1] 或 [B, 1, H, W] 的时间信号
122
+ """
123
+ b,c,h,w = x.shape
124
+
125
+ qkv = self.qkv_dwconv(self.qkv(x))
126
+ if len(t.shape) > 0 and t.shape[-1] != 1:
127
+ t = F.interpolate(t, size=x.shape[2:], mode='bilinear', align_corners=False)
128
+ scale, shift = self.emb_layers(t).chunk(2, dim=1)
129
+ qkv = qkv * (1+scale) + shift
130
+ q,k,v = qkv.chunk(3, dim=1)
131
+
132
+ q = rearrange(q, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
133
+ k = rearrange(k, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
134
+ v = rearrange(v, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
135
+
136
+ q = torch.nn.functional.normalize(q, dim=-1)
137
+ k = torch.nn.functional.normalize(k, dim=-1)
138
+
139
+ attn = (q @ k.transpose(-2, -1)) * self.temperature
140
+ attn = attn.softmax(dim=-1)
141
+
142
+ out = (attn @ v)
143
+
144
+ out = rearrange(out, 'b head c (h w) -> b (head c) h w', head=self.num_heads, h=h, w=w)
145
+
146
+ out = self.project_out(out)
147
+ return out
148
+
149
+
150
+
151
+ ##########################################################################
152
+ class TransformerBlock(nn.Module):
153
+ def __init__(self, nf, num_heads, ffn_expansion_factor, bias, LayerNorm_type):
154
+ super(TransformerBlock, self).__init__()
155
+
156
+ self.norm1 = LayerNorm(nf, LayerNorm_type)
157
+ self.attn = Attention(nf, num_heads, bias)
158
+ self.norm2 = LayerNorm(nf, LayerNorm_type)
159
+ self.ffn = FeedForward(nf, ffn_expansion_factor, bias)
160
+
161
+ def forward(self, x, t):
162
+ x = x + self.attn(self.norm1(x), t)
163
+ x = x + self.ffn(self.norm2(x))
164
+
165
+ return x
166
+
167
+
168
+
169
+ ##########################################################################
170
+ ## Overlapped image patch embedding with 3x3 Conv
171
+ class OverlapPatchEmbed(nn.Module):
172
+ def __init__(self, in_c=3, embed_dim=48, bias=False):
173
+ super(OverlapPatchEmbed, self).__init__()
174
+
175
+ self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=3, stride=1, padding=1, bias=bias)
176
+
177
+ def forward(self, x):
178
+ x = self.proj(x)
179
+
180
+ return x
181
+
182
+
183
+
184
+ ##########################################################################
185
+ ## Resizing modules
186
+ class Downsample(nn.Module):
187
+ def __init__(self, n_feat):
188
+ super(Downsample, self).__init__()
189
+
190
+ self.body = nn.Sequential(nn.Conv2d(n_feat, n_feat//2, kernel_size=3, stride=1, padding=1, bias=False),
191
+ nn.PixelUnshuffle(2))
192
+
193
+ def forward(self, x):
194
+ return self.body(x)
195
+
196
+ class Upsample(nn.Module):
197
+ def __init__(self, n_feat):
198
+ super(Upsample, self).__init__()
199
+
200
+ self.body = nn.Sequential(nn.Conv2d(n_feat, n_feat*2, kernel_size=3, stride=1, padding=1, bias=False),
201
+ nn.PixelShuffle(2))
202
+
203
+ def forward(self, x):
204
+ return self.body(x)
205
+
206
+ ##########################################################################
207
+ ##---------- Restormer -----------------------
208
+ class Restormer(nn.Module):
209
+ def __init__(self, args={}):
210
+ super().__init__()
211
+ self.args = args
212
+
213
+ # 设置默认参数
214
+ self.in_nc = in_nc = self.args.get('in_nc', 3)
215
+ self.out_nc = out_nc = self.args.get('out_nc', 3)
216
+ self.nf = nf = self.args.get('nf', 48)
217
+ self.nb = nb = self.args.get('nb', [4, 6, 6, 8])
218
+ self.num_refinement_blocks = num_refinement_blocks = self.args.get('num_refinement_blocks', 4)
219
+ self.heads = heads = self.args.get('heads', [1, 2, 4, 8])
220
+ self.ffn_expansion_factor = ffn_expansion_factor = self.args.get('ffn_expansion_factor', 2.66)
221
+ self.bias = bias = self.args.get('bias', False)
222
+ self.LayerNorm_type = LayerNorm_type = self.args.get('LayerNorm_type', 'WithBias')
223
+ self.nframes = nframes = self.args.get('nframes', 1)
224
+ self.res = self.args.get('res', True)
225
+ self.rectified = self.args.get('rectified', False)
226
+ self.vpred = self.args.get('vpred', False)
227
+
228
+ self.patch_embed = OverlapPatchEmbed(in_nc*nframes, nf)
229
+
230
+ # Encoder blocks
231
+ self.encoder_level1 = nn.Sequential(*[
232
+ TransformerBlock(
233
+ nf=nf,
234
+ num_heads=heads[0],
235
+ ffn_expansion_factor=ffn_expansion_factor,
236
+ bias=bias,
237
+ LayerNorm_type=LayerNorm_type
238
+ ) for i in range(nb[0])
239
+ ])
240
+
241
+ # Downsample Level 1 → Level 2
242
+ self.down1_2 = Downsample(nf)
243
+ self.encoder_level2 = nn.Sequential(*[
244
+ TransformerBlock(
245
+ nf=int(nf * 2**1),
246
+ num_heads=heads[1],
247
+ ffn_expansion_factor=ffn_expansion_factor,
248
+ bias=bias,
249
+ LayerNorm_type=LayerNorm_type
250
+ ) for i in range(nb[1])
251
+ ])
252
+
253
+ # Downsample Level 2 → Level 3
254
+ self.down2_3 = Downsample(int(nf * 2**1))
255
+ self.encoder_level3 = nn.Sequential(*[
256
+ TransformerBlock(
257
+ nf=int(nf * 2**2),
258
+ num_heads=heads[2],
259
+ ffn_expansion_factor=ffn_expansion_factor,
260
+ bias=bias,
261
+ LayerNorm_type=LayerNorm_type
262
+ ) for i in range(nb[2])
263
+ ])
264
+
265
+ # Downsample Level 3 → Level 4
266
+ self.down3_4 = Downsample(int(nf * 2**2))
267
+ self.latent = nn.Sequential(*[
268
+ TransformerBlock(
269
+ nf=int(nf * 2**3),
270
+ num_heads=heads[3],
271
+ ffn_expansion_factor=ffn_expansion_factor,
272
+ bias=bias,
273
+ LayerNorm_type=LayerNorm_type
274
+ ) for i in range(nb[3])
275
+ ])
276
+
277
+ # Decoder blocks
278
+ # Upsample Level 4 → Level 3
279
+ self.up4_3 = Upsample(int(nf * 2**3))
280
+ self.reduce_chan_level3 = nn.Conv2d(
281
+ int(nf * 2**3),
282
+ int(nf * 2**2),
283
+ kernel_size=1,
284
+ bias=bias
285
+ )
286
+ self.decoder_level3 = nn.Sequential(*[
287
+ TransformerBlock(
288
+ nf=int(nf * 2**2),
289
+ num_heads=heads[2],
290
+ ffn_expansion_factor=ffn_expansion_factor,
291
+ bias=bias,
292
+ LayerNorm_type=LayerNorm_type
293
+ ) for i in range(nb[2])
294
+ ])
295
+
296
+ # Upsample Level 3 → Level 2
297
+ self.up3_2 = Upsample(int(nf * 2**2))
298
+ self.reduce_chan_level2 = nn.Conv2d(
299
+ int(nf * 2**2),
300
+ int(nf * 2**1),
301
+ kernel_size=1,
302
+ bias=bias
303
+ )
304
+ self.decoder_level2 = nn.Sequential(*[
305
+ TransformerBlock(
306
+ nf=int(nf * 2**1),
307
+ num_heads=heads[1],
308
+ ffn_expansion_factor=ffn_expansion_factor,
309
+ bias=bias,
310
+ LayerNorm_type=LayerNorm_type
311
+ ) for i in range(nb[1])
312
+ ])
313
+
314
+ # Upsample Level 2 → Level 1
315
+ self.up2_1 = Upsample(int(nf * 2**1))
316
+ self.decoder_level1 = nn.Sequential(*[
317
+ TransformerBlock(
318
+ nf=int(nf * 2**1),
319
+ num_heads=heads[0],
320
+ ffn_expansion_factor=ffn_expansion_factor,
321
+ bias=bias,
322
+ LayerNorm_type=LayerNorm_type
323
+ ) for i in range(nb[0])
324
+ ])
325
+
326
+ # Refinement and output
327
+ self.refinement = nn.Sequential(*[
328
+ TransformerBlock(
329
+ nf=int(nf * 2**1),
330
+ num_heads=heads[0],
331
+ ffn_expansion_factor=ffn_expansion_factor,
332
+ bias=bias,
333
+ LayerNorm_type=LayerNorm_type
334
+ ) for i in range(num_refinement_blocks)
335
+ ])
336
+
337
+ self.output = nn.Conv2d(
338
+ int(nf * 2**1),
339
+ out_nc,
340
+ kernel_size=3,
341
+ stride=1,
342
+ padding=1,
343
+ bias=bias
344
+ )
345
+
346
+ def layer_by_layer(self, modules, x, t):
347
+ # 手动处理需要t的模块
348
+ for layer in modules:
349
+ if isinstance(layer, TransformerBlock):
350
+ x = layer(x, t) # 传递t
351
+ else:
352
+ x = layer(x)
353
+ return x
354
+
355
+ def forward(self, inp_img, t):
356
+ if self.rectified:
357
+ t = t / (1 + t)# sigma->t
358
+ inp_img = inp_img * (1-t)
359
+
360
+ # Encoder
361
+ inp_enc_level1 = self.patch_embed(inp_img)
362
+ out_enc_level1 = self.layer_by_layer(self.encoder_level1, inp_enc_level1, t)
363
+
364
+ inp_enc_level2 = self.down1_2(out_enc_level1)
365
+ out_enc_level2 = self.layer_by_layer(self.encoder_level2, inp_enc_level2, t)
366
+
367
+ inp_enc_level3 = self.down2_3(out_enc_level2)
368
+ out_enc_level3 = self.layer_by_layer(self.encoder_level3, inp_enc_level3, t)
369
+
370
+ inp_enc_level4 = self.down3_4(out_enc_level3)
371
+ latent = self.layer_by_layer(self.latent, inp_enc_level4, t)
372
+
373
+ # Decoder
374
+ inp_dec_level3 = self.up4_3(latent)
375
+ inp_dec_level3 = torch.cat([inp_dec_level3, out_enc_level3], 1)
376
+ inp_dec_level3 = self.reduce_chan_level3(inp_dec_level3)
377
+ out_dec_level3 = self.layer_by_layer(self.decoder_level3, inp_dec_level3, t)
378
+
379
+ inp_dec_level2 = self.up3_2(out_dec_level3)
380
+ inp_dec_level2 = torch.cat([inp_dec_level2, out_enc_level2], 1)
381
+ inp_dec_level2 = self.reduce_chan_level2(inp_dec_level2)
382
+ out_dec_level2 = self.layer_by_layer(self.decoder_level2, inp_dec_level2, t)
383
+
384
+ inp_dec_level1 = self.up2_1(out_dec_level2)
385
+ inp_dec_level1 = torch.cat([inp_dec_level1, out_enc_level1], 1)
386
+ out_dec_level1 = self.layer_by_layer(self.decoder_level1, inp_dec_level1, t)
387
+
388
+ # Refinement
389
+ out_dec_level1 = self.layer_by_layer(self.refinement, out_dec_level1, t)
390
+
391
+ out = self.output(out_dec_level1)
392
+
393
+ if not self.vpred:
394
+ if self.res: out += inp_img
395
+ if self.rectified: out /= (1-t)
396
+
397
+ return out
archs/SCUNet.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import math
3
+ import torch
4
+ import torch.nn as nn
5
+ import numpy as np
6
+ # from thop import profile
7
+ from einops import rearrange
8
+ from einops.layers.torch import Rearrange, Reduce
9
+ from timm.models.layers import trunc_normal_, DropPath
10
+ import torch.nn.functional as F
11
+
12
+ class WMSA(nn.Module):
13
+ """ Self-attention module in Swin Transformer
14
+ """
15
+
16
+ def __init__(self, input_dim, output_dim, head_dim, window_size, type):
17
+ super(WMSA, self).__init__()
18
+ self.input_dim = input_dim
19
+ self.output_dim = output_dim
20
+ self.head_dim = head_dim
21
+ self.scale = self.head_dim ** -0.5
22
+ self.n_heads = input_dim//head_dim
23
+ self.window_size = window_size
24
+ self.type=type
25
+ self.embedding_layer = nn.Linear(self.input_dim, 3*self.input_dim, bias=True)
26
+
27
+ # TODO recover
28
+ # self.relative_position_params = nn.Parameter(torch.zeros(self.n_heads, 2 * window_size - 1, 2 * window_size -1))
29
+ self.relative_position_params = nn.Parameter(torch.zeros((2 * window_size - 1)*(2 * window_size -1), self.n_heads))
30
+
31
+ self.linear = nn.Linear(self.input_dim, self.output_dim)
32
+
33
+ trunc_normal_(self.relative_position_params, std=.02)
34
+ self.relative_position_params = torch.nn.Parameter(self.relative_position_params.view(2*window_size-1, 2*window_size-1, self.n_heads).transpose(1,2).transpose(0,1))
35
+
36
+ def generate_mask(self, h, w, p, shift):
37
+ """ generating the mask of SW-MSA
38
+ Args:
39
+ shift: shift parameters in CyclicShift.
40
+ Returns:
41
+ attn_mask: should be (1 1 w p p),
42
+ """
43
+ # supporting sqaure.
44
+ attn_mask = torch.zeros(h, w, p, p, p, p, dtype=torch.bool, device=self.relative_position_params.device)
45
+ if self.type == 'W':
46
+ return attn_mask
47
+
48
+ s = p - shift
49
+ attn_mask[-1, :, :s, :, s:, :] = True
50
+ attn_mask[-1, :, s:, :, :s, :] = True
51
+ attn_mask[:, -1, :, :s, :, s:] = True
52
+ attn_mask[:, -1, :, s:, :, :s] = True
53
+ attn_mask = rearrange(attn_mask, 'w1 w2 p1 p2 p3 p4 -> 1 1 (w1 w2) (p1 p2) (p3 p4)')
54
+ return attn_mask
55
+
56
+ def forward(self, x):
57
+ """ Forward pass of Window Multi-head Self-attention module.
58
+ Args:
59
+ x: input tensor with shape of [b h w c];
60
+ attn_mask: attention mask, fill -inf where the value is True;
61
+ Returns:
62
+ output: tensor shape [b h w c]
63
+ """
64
+ if self.type!='W': x = torch.roll(x, shifts=(-(self.window_size//2), -(self.window_size//2)), dims=(1,2))
65
+ x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size)
66
+ h_windows = x.size(1)
67
+ w_windows = x.size(2)
68
+ # sqaure validation
69
+ # assert h_windows == w_windows
70
+
71
+ x = rearrange(x, 'b w1 w2 p1 p2 c -> b (w1 w2) (p1 p2) c', p1=self.window_size, p2=self.window_size)
72
+ qkv = self.embedding_layer(x)
73
+ q, k, v = rearrange(qkv, 'b nw np (threeh c) -> threeh b nw np c', c=self.head_dim).chunk(3, dim=0)
74
+ sim = torch.einsum('hbwpc,hbwqc->hbwpq', q, k) * self.scale
75
+ # Adding learnable relative embedding
76
+ sim = sim + rearrange(self.relative_embedding(), 'h p q -> h 1 1 p q')
77
+ # Using Attn Mask to distinguish different subwindows.
78
+ if self.type != 'W':
79
+ attn_mask = self.generate_mask(h_windows, w_windows, self.window_size, shift=self.window_size//2)
80
+ sim = sim.masked_fill_(attn_mask, float("-inf"))
81
+
82
+ probs = nn.functional.softmax(sim, dim=-1)
83
+ output = torch.einsum('hbwij,hbwjc->hbwic', probs, v)
84
+ output = rearrange(output, 'h b w p c -> b w p (h c)')
85
+ output = self.linear(output)
86
+ output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size)
87
+
88
+ if self.type!='W': output = torch.roll(output, shifts=(self.window_size//2, self.window_size//2), dims=(1,2))
89
+ return output
90
+
91
+ def relative_embedding(self):
92
+ cord = torch.tensor(np.array([[i, j] for i in range(self.window_size) for j in range(self.window_size)]))
93
+ relation = cord[:, None, :] - cord[None, :, :] + self.window_size -1
94
+ # negative is allowed
95
+ return self.relative_position_params[:, relation[:,:,0].long(), relation[:,:,1].long()]
96
+
97
+
98
+ class Block(nn.Module):
99
+ def __init__(self, input_dim, output_dim, head_dim, window_size, drop_path, type='W', input_resolution=None):
100
+ """ SwinTransformer Block
101
+ """
102
+ super(Block, self).__init__()
103
+ self.input_dim = input_dim
104
+ self.output_dim = output_dim
105
+ assert type in ['W', 'SW']
106
+ self.type = type
107
+ if input_resolution <= window_size:
108
+ self.type = 'W'
109
+
110
+ # print("Block Initial Type: {}, drop_path_rate:{:.6f}".format(self.type, drop_path))
111
+ self.ln1 = nn.LayerNorm(input_dim)
112
+ self.msa = WMSA(input_dim, input_dim, head_dim, window_size, self.type)
113
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
114
+ self.ln2 = nn.LayerNorm(input_dim)
115
+ self.mlp = nn.Sequential(
116
+ nn.Linear(input_dim, 4 * input_dim),
117
+ nn.GELU(),
118
+ nn.Linear(4 * input_dim, output_dim),
119
+ )
120
+
121
+ def forward(self, x):
122
+ x = x + self.drop_path(self.msa(self.ln1(x)))
123
+ x = x + self.drop_path(self.mlp(self.ln2(x)))
124
+ return x
125
+
126
+
127
+ class ConvTransBlock(nn.Module):
128
+ def __init__(self, conv_dim, trans_dim, head_dim, window_size, drop_path, type='W', input_resolution=None):
129
+ """ SwinTransformer and Conv Block
130
+ """
131
+ super(ConvTransBlock, self).__init__()
132
+ self.conv_dim = conv_dim
133
+ self.trans_dim = trans_dim
134
+ self.head_dim = head_dim
135
+ self.window_size = window_size
136
+ self.drop_path = drop_path
137
+ self.type = type
138
+ self.input_resolution = input_resolution
139
+
140
+ assert self.type in ['W', 'SW']
141
+ if self.input_resolution <= self.window_size:
142
+ self.type = 'W'
143
+
144
+ self.trans_block = Block(self.trans_dim, self.trans_dim, self.head_dim, self.window_size, self.drop_path, self.type, self.input_resolution)
145
+ self.conv1_1 = nn.Conv2d(self.conv_dim+self.trans_dim, self.conv_dim+self.trans_dim, 1, 1, 0, bias=True)
146
+ self.conv1_2 = nn.Conv2d(self.conv_dim+self.trans_dim, self.conv_dim+self.trans_dim, 1, 1, 0, bias=True)
147
+
148
+ self.conv_block = nn.Sequential(
149
+ nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False),
150
+ nn.ReLU(True),
151
+ nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False)
152
+ )
153
+
154
+ self.gamma = nn.Sequential(
155
+ nn.Conv2d(1, self.conv_dim+self.trans_dim, 1, 1, 0, bias=True),
156
+ nn.SiLU(inplace=True),
157
+ nn.Conv2d(self.conv_dim+self.trans_dim, self.conv_dim+self.trans_dim, 1, 1, 0, bias=True),
158
+ )
159
+ self.beta = nn.Sequential(
160
+ nn.SiLU(),
161
+ nn.Conv2d(self.conv_dim+self.trans_dim, self.conv_dim+self.trans_dim, 1, 1, 0, bias=True),
162
+ )
163
+
164
+ def forward(self, x, t):
165
+ if len(t.shape) > 0:
166
+ t = F.interpolate(t, size=x.shape[2:], mode='bilinear', align_corners=False)
167
+ k = self.gamma(t)
168
+ conv_trans_x = (1 + k) * self.conv1_1(x) + self.beta(k) # f_out = (1+α) f_in + β
169
+ conv_x, trans_x = torch.split(conv_trans_x, (self.conv_dim, self.trans_dim), dim=1)
170
+ conv_x = self.conv_block(conv_x) + conv_x
171
+ trans_x = Rearrange('b c h w -> b h w c')(trans_x)
172
+ trans_x = self.trans_block(trans_x)
173
+ trans_x = Rearrange('b h w c -> b c h w')(trans_x)
174
+ res = self.conv1_2(torch.cat((conv_x, trans_x), dim=1))
175
+ x = x + res
176
+
177
+ return x
178
+
179
+
180
+ class SCUNet(nn.Module):
181
+
182
+ def __init__(self, args={}):
183
+ super().__init__()
184
+ self.args = args
185
+ self.res = self.args['res'] if 'res' in args else False
186
+ self.rectified = self.args['rectified'] if 'rectified' in args else False
187
+ in_nc = self.args['in_nc'] if 'in_nc' in args else 4
188
+ out_nc = self.args['out_nc'] if 'out_nc' in args else 4
189
+ config = self.args['config'] if 'config' in args else [2,2,2,2,2,2,2]
190
+ dim = self.args['nf'] if 'nf' in args else 64
191
+ drop_path_rate = self.args['drop_path_rate'] if 'drop_path_rate' in args else 0.0
192
+ input_resolution = self.args['input_resolution'] if 'input_resolution' in args else 256
193
+ self.head_dim = 32
194
+ self.window_size = 8
195
+
196
+ # drop path rate for each layer
197
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(config))]
198
+
199
+ self.m_head = [nn.Conv2d(in_nc, dim, 3, 1, 1, bias=False)]
200
+
201
+ begin = 0
202
+ self.m_down1 = [ConvTransBlock(dim//2, dim//2, self.head_dim, self.window_size, dpr[i+begin], 'W' if not i%2 else 'SW', input_resolution)
203
+ for i in range(config[0])] + \
204
+ [nn.Conv2d(dim, 2*dim, 2, 2, 0, bias=False)]
205
+
206
+ begin += config[0]
207
+ self.m_down2 = [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i+begin], 'W' if not i%2 else 'SW', input_resolution//2)
208
+ for i in range(config[1])] + \
209
+ [nn.Conv2d(2*dim, 4*dim, 2, 2, 0, bias=False)]
210
+
211
+ begin += config[1]
212
+ self.m_down3 = [ConvTransBlock(2*dim, 2*dim, self.head_dim, self.window_size, dpr[i+begin], 'W' if not i%2 else 'SW',input_resolution//4)
213
+ for i in range(config[2])] + \
214
+ [nn.Conv2d(4*dim, 8*dim, 2, 2, 0, bias=False)]
215
+
216
+ begin += config[2]
217
+ self.m_body = [ConvTransBlock(4*dim, 4*dim, self.head_dim, self.window_size, dpr[i+begin], 'W' if not i%2 else 'SW', input_resolution//8)
218
+ for i in range(config[3])]
219
+
220
+ begin += config[3]
221
+ self.m_up3 = [nn.ConvTranspose2d(8*dim, 4*dim, 2, 2, 0, bias=False),] + \
222
+ [ConvTransBlock(2*dim, 2*dim, self.head_dim, self.window_size, dpr[i+begin], 'W' if not i%2 else 'SW',input_resolution//4)
223
+ for i in range(config[4])]
224
+
225
+ begin += config[4]
226
+ self.m_up2 = [nn.ConvTranspose2d(4*dim, 2*dim, 2, 2, 0, bias=False),] + \
227
+ [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i+begin], 'W' if not i%2 else 'SW', input_resolution//2)
228
+ for i in range(config[5])]
229
+
230
+ begin += config[5]
231
+ self.m_up1 = [nn.ConvTranspose2d(2*dim, dim, 2, 2, 0, bias=False),] + \
232
+ [ConvTransBlock(dim//2, dim//2, self.head_dim, self.window_size, dpr[i+begin], 'W' if not i%2 else 'SW', input_resolution)
233
+ for i in range(config[6])]
234
+
235
+ self.m_tail = [nn.Conv2d(dim, out_nc, 3, 1, 1, bias=False)]
236
+
237
+ self.m_head = nn.Sequential(*self.m_head)
238
+ self.m_down1 = nn.Sequential(*self.m_down1)
239
+ self.m_down2 = nn.Sequential(*self.m_down2)
240
+ self.m_down3 = nn.Sequential(*self.m_down3)
241
+ self.m_body = nn.Sequential(*self.m_body)
242
+ self.m_up3 = nn.Sequential(*self.m_up3)
243
+ self.m_up2 = nn.Sequential(*self.m_up2)
244
+ self.m_up1 = nn.Sequential(*self.m_up1)
245
+ self.m_tail = nn.Sequential(*self.m_tail)
246
+ #self.apply(self._init_weights)
247
+
248
+ def layer_by_layer(self, modules, x, t):
249
+ # 手动处理需要t的模块
250
+ for layer in modules:
251
+ if isinstance(layer, ConvTransBlock):
252
+ x = layer(x, t) # 传递t
253
+ else:
254
+ x = layer(x)
255
+ return x
256
+
257
+ def forward(self, x0, t):
258
+ h, w = x0.size()[-2:]
259
+ if self.rectified:
260
+ t = t / (1 + t)# sigma->t
261
+ x0 = x0 * (1-t)
262
+
263
+ paddingBottom = int(np.ceil(h/64)*64-h)
264
+ paddingRight = int(np.ceil(w/64)*64-w)
265
+ x0 = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x0)
266
+ if len(t.shape) > 0:
267
+ t = F.interpolate(t, size=x0.shape[2:], mode='bilinear', align_corners=False)
268
+ t = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(t)
269
+ else:
270
+ t = torch.mean(torch.zeros_like(x0), dim=1, keepdim=True) + t # 保持t的形状与x0一致
271
+
272
+ x1 = self.m_head(x0)
273
+ x2 = self.layer_by_layer(self.m_down1, x1, t)
274
+ x3 = self.layer_by_layer(self.m_down2, x2, t)
275
+ x4 = self.layer_by_layer(self.m_down3, x3, t)
276
+ x = self.layer_by_layer(self.m_body, x4, t)
277
+ x = self.layer_by_layer(self.m_up3, x+x4, t)
278
+ x = self.layer_by_layer(self.m_up2, x+x3, t)
279
+ x = self.layer_by_layer(self.m_up1, x+x2, t)
280
+ x = self.m_tail(x+x1)
281
+
282
+ if self.res: x = x + x0
283
+ x = x[..., :h, :w]
284
+
285
+ if self.rectified:
286
+ x = x / (1-t)
287
+
288
+ return x
289
+
290
+ def _init_weights(self, m):
291
+ if isinstance(m, nn.Linear):
292
+ trunc_normal_(m.weight, std=.02)
293
+ if m.bias is not None:
294
+ nn.init.constant_(m.bias, 0)
295
+ elif isinstance(m, nn.LayerNorm):
296
+ nn.init.constant_(m.bias, 0)
297
+ nn.init.constant_(m.weight, 1.0)
298
+
299
+
300
+ if __name__ == '__main__':
301
+
302
+ # torch.cuda.empty_cache()
303
+ net = SCUNet({})
304
+
305
+ x = torch.randn((2, 4, 640, 128))
306
+ x = net(x, torch.randn((2, 1, 1, 1)))
307
+ print(x.shape)
archs/Unet.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .modules import *
2
+
3
+ def data_normalize(data, lower=None, upper=None):
4
+ lower = 0#torch.tensor([data[b].min() for b in range(data.shape[0])],
5
+ #dtype=data.dtype, device=data.device).view(-1,1,1,1)
6
+ # upper = torch.tensor([data[b].max() for b in range(data.shape[0])],
7
+ # dtype=data.dtype, device=data.device).view(-1,1,1,1)
8
+ upper = torch.amax(data, dim=(1,2,3), keepdim=True).clip(1e-5, 1) # 不会暗到1e-5这么逆天吧……
9
+ data = (data - lower) / (upper - lower)
10
+ return data, lower, upper
11
+
12
+ def data_inv_normalize(data, lower, upper):
13
+ data = data * (upper - lower) + lower
14
+ return data
15
+
16
+ # SID Unet
17
+ class UNetSeeInDark(nn.Module):
18
+ def __init__(self, args=None):
19
+ super().__init__()
20
+ self.args = args
21
+ self.nframes = args['nframes']
22
+ self.cf = 0
23
+ self.res = args['res']
24
+ self.norm = args['norm'] if 'norm' in args else False
25
+ nframes = self.args['nframes'] if 'nframes' in args else 1
26
+ nf = args['nf']
27
+ in_nc = args['in_nc']
28
+ out_nc = args['out_nc']
29
+
30
+ self.conv1_1 = nn.Conv2d(in_nc*nframes, nf, kernel_size=3, stride=1, padding=1)
31
+ self.conv1_2 = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1)
32
+ self.pool1 = nn.MaxPool2d(kernel_size=2)
33
+
34
+ self.conv2_1 = nn.Conv2d(nf, nf*2, kernel_size=3, stride=1, padding=1)
35
+ self.conv2_2 = nn.Conv2d(nf*2, nf*2, kernel_size=3, stride=1, padding=1)
36
+ self.pool2 = nn.MaxPool2d(kernel_size=2)
37
+
38
+ self.conv3_1 = nn.Conv2d(nf*2, nf*4, kernel_size=3, stride=1, padding=1)
39
+ self.conv3_2 = nn.Conv2d(nf*4, nf*4, kernel_size=3, stride=1, padding=1)
40
+ self.pool3 = nn.MaxPool2d(kernel_size=2)
41
+
42
+ self.conv4_1 = nn.Conv2d(nf*4, nf*8, kernel_size=3, stride=1, padding=1)
43
+ self.conv4_2 = nn.Conv2d(nf*8, nf*8, kernel_size=3, stride=1, padding=1)
44
+ self.pool4 = nn.MaxPool2d(kernel_size=2)
45
+
46
+ self.conv5_1 = nn.Conv2d(nf*8, nf*16, kernel_size=3, stride=1, padding=1)
47
+ self.conv5_2 = nn.Conv2d(nf*16, nf*16, kernel_size=3, stride=1, padding=1)
48
+
49
+ self.upv6 = nn.ConvTranspose2d(nf*16, nf*8, 2, stride=2)
50
+ self.conv6_1 = nn.Conv2d(nf*16, nf*8, kernel_size=3, stride=1, padding=1)
51
+ self.conv6_2 = nn.Conv2d(nf*8, nf*8, kernel_size=3, stride=1, padding=1)
52
+
53
+ self.upv7 = nn.ConvTranspose2d(nf*8, nf*4, 2, stride=2)
54
+ self.conv7_1 = nn.Conv2d(nf*8, nf*4, kernel_size=3, stride=1, padding=1)
55
+ self.conv7_2 = nn.Conv2d(nf*4, nf*4, kernel_size=3, stride=1, padding=1)
56
+
57
+ self.upv8 = nn.ConvTranspose2d(nf*4, nf*2, 2, stride=2)
58
+ self.conv8_1 = nn.Conv2d(nf*4, nf*2, kernel_size=3, stride=1, padding=1)
59
+ self.conv8_2 = nn.Conv2d(nf*2, nf*2, kernel_size=3, stride=1, padding=1)
60
+
61
+ self.upv9 = nn.ConvTranspose2d(nf*2, nf, 2, stride=2)
62
+ self.conv9_1 = nn.Conv2d(nf*2, nf, kernel_size=3, stride=1, padding=1)
63
+ self.conv9_2 = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1)
64
+
65
+ self.conv10_1 = nn.Conv2d(nf, out_nc, kernel_size=1, stride=1)
66
+ self.relu = nn.LeakyReLU(0.2, inplace=True)
67
+
68
+ def forward(self, x):
69
+ if self.norm:
70
+ x, lb, ub = data_normalize(x)
71
+ conv1 = self.relu(self.conv1_1(x))
72
+ conv1 = self.relu(self.conv1_2(conv1))
73
+ pool1 = self.pool1(conv1)
74
+
75
+ conv2 = self.relu(self.conv2_1(pool1))
76
+ conv2 = self.relu(self.conv2_2(conv2))
77
+ pool2 = self.pool1(conv2)
78
+
79
+ conv3 = self.relu(self.conv3_1(pool2))
80
+ conv3 = self.relu(self.conv3_2(conv3))
81
+ pool3 = self.pool1(conv3)
82
+
83
+ conv4 = self.relu(self.conv4_1(pool3))
84
+ conv4 = self.relu(self.conv4_2(conv4))
85
+ pool4 = self.pool1(conv4)
86
+
87
+ conv5 = self.relu(self.conv5_1(pool4))
88
+ conv5 = self.relu(self.conv5_2(conv5))
89
+
90
+ up6 = self.upv6(conv5)
91
+ up6 = torch.cat([up6, conv4], 1)
92
+ conv6 = self.relu(self.conv6_1(up6))
93
+ conv6 = self.relu(self.conv6_2(conv6))
94
+
95
+ up7 = self.upv7(conv6)
96
+ up7 = torch.cat([up7, conv3], 1)
97
+ conv7 = self.relu(self.conv7_1(up7))
98
+ conv7 = self.relu(self.conv7_2(conv7))
99
+
100
+ up8 = self.upv8(conv7)
101
+ up8 = torch.cat([up8, conv2], 1)
102
+ conv8 = self.relu(self.conv8_1(up8))
103
+ conv8 = self.relu(self.conv8_2(conv8))
104
+
105
+ up9 = self.upv9(conv8)
106
+ up9 = torch.cat([up9, conv1], 1)
107
+ conv9 = self.relu(self.conv9_1(up9))
108
+ conv9 = self.relu(self.conv9_2(conv9))
109
+
110
+ out = self.conv10_1(conv9)
111
+ if self.res:
112
+ out = out + x[:, self.cf*4:self.cf*4+4]
113
+
114
+ if self.norm:
115
+ out = data_inv_normalize(out, lb, ub)
116
+
117
+ return out
118
+
119
+ def get_updown_module(nf, updown_type='conv', mode='up'):
120
+ if updown_type == 'conv':
121
+ if mode == 'down':
122
+ return conv3x3(nf, nf*2)
123
+ elif mode == 'up':
124
+ return nn.ConvTranspose2d(nf, nf//2, 2, stride=2)
125
+ elif updown_type in ['bilinear', 'bicubic', 'nearest']:
126
+ if mode == 'down':
127
+ return nn.Sequential(
128
+ nn.Upsample(1/2, mode=updown_type),
129
+ nn.Conv2d(nf, nf*2, kernel_size=3, stride=1, padding=1),
130
+ )
131
+ if mode == 'up':
132
+ return nn.Sequential(
133
+ nn.Upsample(2, mode=updown_type),
134
+ nn.Conv2d(nf, nf//2, kernel_size=3, stride=1, padding=1),
135
+ )
136
+ elif updown_type == 'shuffle':
137
+ if mode == 'down':
138
+ return nn.Sequential(
139
+ nn.PixelUnshuffle(2),
140
+ nn.Conv2d(nf*4, nf*2, kernel_size=3, stride=1, padding=1),
141
+ )
142
+ if mode == 'up':
143
+ return nn.Sequential(
144
+ nn.PixelShuffle(2),
145
+ nn.Conv2d(nf//4, nf//2, kernel_size=3, stride=1, padding=1),
146
+ )
147
+ elif updown_type in ['haar','db1','db2','db3']:
148
+ if mode == 'down':
149
+ return nn.Sequential(
150
+ WaveletDecompose(updown_type),
151
+ nn.Conv2d(nf*4, nf*2, kernel_size=3, stride=1, padding=1),
152
+ )
153
+ if mode == 'up':
154
+ return nn.Sequential(
155
+ WaveletReconstruct(updown_type),
156
+ nn.Conv2d(nf//4, nf//2, kernel_size=3, stride=1, padding=1),
157
+ )
158
+
159
+ class GuidedResUnet(nn.Module):
160
+ def __init__(self, args=None):
161
+ super().__init__()
162
+ self.args = args
163
+ self.cf = 0
164
+ self.nframes = nframes = args.get('nframes', 1)
165
+ self.res = args.get('res', False)
166
+ self.norm = args.get('norm', False)
167
+ self.updown_type = args.get('updown_type', 'conv')
168
+ self.downsample = args.get('downsample', False)
169
+ if self.downsample == 'shuffle':
170
+ self.down_fn = nn.PixelUnshuffle(2)
171
+ self.up_fn = nn.PixelShuffle(2)
172
+ elif self.downsample != False:
173
+ self.down_fn = WaveletDecompose(mode=self.downsample)
174
+ self.up_fn = WaveletReconstruct(mode=self.downsample)
175
+ ext = 4 if self.downsample else 1
176
+ nf = args.get('nf', 32)
177
+ in_nc = args.get('in_nc', 4)
178
+ out_nc = args.get('out_nc', 4)
179
+
180
+ self.conv_in = nn.Conv2d(in_nc*nframes*ext, nf, kernel_size=3, stride=1, padding=1)
181
+
182
+ self.conv1 = GuidedResidualBlock(nf, nf, is_activate=False)
183
+ self.pool1 = get_updown_module(nf, self.updown_type, mode='down')
184
+
185
+ self.conv2 = GuidedResidualBlock(nf*2, nf*2, is_activate=False)
186
+ self.pool2 = get_updown_module(nf*2, self.updown_type, mode='down')
187
+
188
+ self.conv3 = GuidedResidualBlock(nf*4, nf*4, is_activate=False)
189
+ self.pool3 = get_updown_module(nf*4, self.updown_type, mode='down')
190
+
191
+ self.conv4 = GuidedResidualBlock(nf*8, nf*8, is_activate=False)
192
+ self.pool4 = get_updown_module(nf*8, self.updown_type, mode='down')
193
+
194
+ self.conv5 = GuidedResidualBlock(nf*16, nf*16, is_activate=False)
195
+
196
+ self.upv6 = get_updown_module(nf*16, self.updown_type, mode='up')
197
+ self.conv6 = GuidedResidualBlock(nf*16, nf*8, is_activate=False)
198
+
199
+ self.upv7 = get_updown_module(nf*8, self.updown_type, mode='up')
200
+ self.conv7 = GuidedResidualBlock(nf*8, nf*4, is_activate=False)
201
+
202
+ self.upv8 = get_updown_module(nf*4, self.updown_type, mode='up')
203
+ self.conv8 = GuidedResidualBlock(nf*4, nf*2, is_activate=False)
204
+
205
+ self.upv9 = get_updown_module(nf*2, self.updown_type, mode='up')
206
+ self.conv9 = GuidedResidualBlock(nf*2, nf, is_activate=False)
207
+
208
+ self.conv10 = nn.Conv2d(nf, out_nc*ext, kernel_size=1, stride=1)
209
+ self.lrelu = nn.LeakyReLU(0.01, inplace=True)
210
+
211
+ def forward(self, x, t):
212
+ # shape= x.size()
213
+ # x = x.view(-1,shape[-3],shape[-2],shape[-1])
214
+ if self.norm:
215
+ x, lb, ub = data_normalize(x)
216
+ t = t / (ub-lb)
217
+
218
+ if self.downsample:
219
+ x = self.down_fn(x)
220
+
221
+ conv_in = self.lrelu(self.conv_in(x))
222
+
223
+ conv1 = self.conv1(conv_in, t)
224
+ pool1 = self.pool1(conv1)
225
+
226
+ conv2 = self.conv2(pool1, t)
227
+ pool2 = self.pool2(conv2)
228
+
229
+ conv3 = self.conv3(pool2, t)
230
+ pool3 = self.pool3(conv3)
231
+
232
+ conv4 = self.conv4(pool3, t)
233
+ pool4 = self.pool4(conv4)
234
+
235
+ conv5 = self.conv5(pool4, t)
236
+
237
+ up6 = self.upv6(conv5)
238
+ up6 = torch.cat([up6, conv4], 1)
239
+ conv6 = self.conv6(up6, t)
240
+
241
+ up7 = self.upv7(conv6)
242
+ up7 = torch.cat([up7, conv3], 1)
243
+ conv7 = self.conv7(up7, t)
244
+
245
+ up8 = self.upv8(conv7)
246
+ up8 = torch.cat([up8, conv2], 1)
247
+ conv8 = self.conv8(up8, t)
248
+
249
+ up9 = self.upv9(conv8)
250
+ up9 = torch.cat([up9, conv1], 1)
251
+ conv9 = self.conv9(up9, t)
252
+
253
+ out = self.conv10(conv9)
254
+ if self.res:
255
+ out = out + x[:, self.cf*4:self.cf*4+4]
256
+
257
+ if self.downsample:
258
+ out = self.up_fn(out)
259
+
260
+ if self.norm:
261
+ out = data_inv_normalize(out, lb, ub)
262
+
263
+ return out
archs/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from .Unet import *
7
+ from .comp import *
8
+ from .SCUNet import SCUNet
9
+ from .Restormer import Restormer
10
+
11
+ def initialize_weights(net):
12
+ for m in net.modules():
13
+ if isinstance(m, nn.Conv2d):
14
+ m.weight.data.normal_(0.0, 0.02)
15
+ if m.bias is not None:
16
+ m.bias.data.normal_(0.0, 0.02)
17
+ if isinstance(m, nn.ConvTranspose2d):
18
+ m.weight.data.normal_(0.0, 0.02)
19
+
20
+ if __name__ == '__main__':
21
+ pass
archs/comp.py ADDED
@@ -0,0 +1,1082 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .modules import *
2
+
3
+ class DnCNN(nn.Module):
4
+ def __init__(self, args=None):
5
+ super().__init__()
6
+ self.args = args
7
+ self.res = args['res']
8
+ self.raw2rgb = True if args['in_nc']==4 and args['out_nc']==3 else False
9
+ nf = args['nf']
10
+ in_nc = args['in_nc']
11
+ out_nc = args['out_nc']
12
+ depth = args['depth']
13
+ use_bn = args['use_bn']
14
+
15
+ layers = []
16
+
17
+ layers.append(nn.Conv2d(in_channels=in_nc, out_channels=nf, kernel_size=3, padding=1, bias=True))
18
+ layers.append(nn.ReLU(inplace=True))
19
+ for _ in range(depth-2):
20
+ layers.append(nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, padding=1, bias=False))
21
+ if use_bn:
22
+ layers.append(nn.BatchNorm2d(nf, eps=0.0001, momentum=0.95))
23
+ layers.append(nn.ReLU(inplace=True))
24
+ layers.append(nn.Conv2d(in_channels=nf, out_channels=out_nc, kernel_size=3, padding=1, bias=False))
25
+ self.dncnn = nn.Sequential(*layers)
26
+
27
+ def forward(self, x):
28
+ out = self.dncnn(x)
29
+ if self.raw2rgb:
30
+ out = nn.functional.pixel_shuffle(out, 2)
31
+ elif self.res:
32
+ out = x - out #out = out + x
33
+ return out
34
+
35
+ def conv33(in_channels, out_channels, stride=1,
36
+ padding=1, bias=True, groups=1):
37
+ return nn.Conv2d(
38
+ in_channels,
39
+ out_channels,
40
+ kernel_size=3,
41
+ stride=stride,
42
+ padding=padding,
43
+ bias=bias,
44
+ groups=groups)
45
+
46
+ def upconv2x2(in_channels, out_channels, mode='transpose'):
47
+ if mode == 'transpose':
48
+ return nn.ConvTranspose2d(
49
+ in_channels,
50
+ out_channels,
51
+ kernel_size=2,
52
+ stride=2)
53
+ else:
54
+ # out_channels is always going to be the same
55
+ # as in_channels
56
+ return nn.Sequential(
57
+ nn.Upsample(mode='bilinear', scale_factor=2),
58
+ conv1x1(in_channels, out_channels))
59
+
60
+ class DownConv(nn.Module):
61
+ """
62
+ A helper Module that performs 2 convolutions and 1 MaxPool.
63
+ A ReLU activation follows each convolution.
64
+ """
65
+ def __init__(self, in_channels, out_channels, pooling=True):
66
+ super(DownConv, self).__init__()
67
+
68
+ self.in_channels = in_channels
69
+ self.out_channels = out_channels
70
+ self.pooling = pooling
71
+
72
+ self.conv1 = conv33(self.in_channels, self.out_channels)
73
+ self.conv2 = conv33(self.out_channels, self.out_channels)
74
+
75
+ if self.pooling:
76
+ self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
77
+
78
+ def forward(self, x):
79
+ x = F.relu(self.conv1(x))
80
+ x = F.relu(self.conv2(x))
81
+ before_pool = x
82
+ if self.pooling:
83
+ x = self.pool(x)
84
+ return x, before_pool
85
+
86
+
87
+ class UpConv(nn.Module):
88
+ """
89
+ A helper Module that performs 2 convolutions and 1 UpConvolution.
90
+ A ReLU activation follows each convolution.
91
+ """
92
+ def __init__(self, in_channels, out_channels,
93
+ merge_mode='concat', up_mode='transpose'):
94
+ super(UpConv, self).__init__()
95
+
96
+ self.in_channels = in_channels
97
+ self.out_channels = out_channels
98
+ self.merge_mode = merge_mode
99
+ self.up_mode = up_mode
100
+
101
+ self.upconv = upconv2x2(self.in_channels, self.out_channels,
102
+ mode=self.up_mode)
103
+
104
+ if self.merge_mode == 'concat':
105
+ self.conv1 = conv33(
106
+ 2*self.out_channels, self.out_channels)
107
+ else:
108
+ # num of input channels to conv2 is same
109
+ self.conv1 = conv33(self.out_channels, self.out_channels)
110
+ self.conv2 = conv33(self.out_channels, self.out_channels)
111
+
112
+
113
+ def forward(self, from_down, from_up):
114
+ """ Forward pass
115
+ Arguments:
116
+ from_down: tensor from the encoder pathway
117
+ from_up: upconv'd tensor from the decoder pathway
118
+ """
119
+ from_up = self.upconv(from_up)
120
+ if self.merge_mode == 'concat':
121
+ x = torch.cat((from_up, from_down), 1)
122
+ else:
123
+ x = from_up + from_down
124
+ x = F.relu(self.conv1(x))
125
+ x = F.relu(self.conv2(x))
126
+ return x
127
+
128
+ class est_UNet(nn.Module):
129
+ """ `UNet` class is based on https://arxiv.org/abs/1505.04597
130
+ The U-Net is a convolutional encoder-decoder neural network.
131
+ Contextual spatial information (from the decoding,
132
+ expansive pathway) about an input tensor is merged with
133
+ information representing the localization of details
134
+ (from the encoding, compressive pathway).
135
+ Modifications to the original paper:
136
+ (1) padding is used in 3x3 convolutions to prevent loss
137
+ of border pixels
138
+ (2) merging outputs does not require cropping due to (1)
139
+ (3) residual connections can be used by specifying
140
+ UNet(merge_mode='add')
141
+ (4) if non-parametric upsampling is used in the decoder
142
+ pathway (specified by upmode='upsample'), then an
143
+ additional 1x1 2d convolution occurs after upsampling
144
+ to reduce channel dimensionality by a factor of 2.
145
+ This channel halving happens with the convolution in
146
+ the tranpose convolution (specified by upmode='transpose')
147
+ """
148
+
149
+ def __init__(self, args):
150
+ """
151
+ Arguments:
152
+ in_channels: int, number of channels in the input tensor.
153
+ Default is 3 for RGB images.
154
+ depth: int, number of MaxPools in the U-Net.
155
+ start_filts: int, number of convolutional filters for the
156
+ first conv.
157
+ up_mode: string, type of upconvolution. Choices: 'transpose'
158
+ for transpose convolution or 'upsample' for nearest neighbour
159
+ upsampling.
160
+ """
161
+ super(est_UNet, self).__init__()
162
+
163
+ num_classes = args['out_nc']
164
+ in_channels = args['in_nc']
165
+ depth = args['depth']
166
+ start_filts = args['nf']
167
+ up_mode='transpose'
168
+ merge_mode='add'
169
+ use_type='optimize_gat'
170
+
171
+ self.use_type=use_type
172
+ if up_mode in ('transpose', 'upsample'):
173
+ self.up_mode = up_mode
174
+ else:
175
+ raise ValueError("\"{}\" is not a valid mode for "
176
+ "upsampling. Only \"transpose\" and "
177
+ "\"upsample\" are allowed.".format(up_mode))
178
+
179
+ if merge_mode in ('concat', 'add'):
180
+ self.merge_mode = merge_mode
181
+ else:
182
+ raise ValueError("\"{}\" is not a valid mode for"
183
+ "merging up and down paths. "
184
+ "Only \"concat\" and "
185
+ "\"add\" are allowed.".format(up_mode))
186
+
187
+ # NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'
188
+ if self.up_mode == 'upsample' and self.merge_mode == 'add':
189
+ raise ValueError("up_mode \"upsample\" is incompatible "
190
+ "with merge_mode \"add\" at the moment "
191
+ "because it doesn't make sense to use "
192
+ "nearest neighbour to reduce "
193
+ "depth channels (by half).")
194
+
195
+ self.num_classes = num_classes
196
+ self.in_channels = in_channels
197
+ self.start_filts = start_filts
198
+ self.depth = depth
199
+
200
+ self.down_convs = []
201
+ self.up_convs = []
202
+
203
+ self.noiseSTD = nn.Parameter(data=torch.log(torch.tensor(0.5)))
204
+
205
+
206
+
207
+ # create the encoder pathway and add to a list
208
+ for i in range(depth):
209
+ ins = self.in_channels if i == 0 else outs
210
+ outs = self.start_filts*(2**i)
211
+ pooling = True if i < depth-1 else False
212
+
213
+ down_conv = DownConv(ins, outs, pooling=pooling)
214
+ self.down_convs.append(down_conv)
215
+
216
+ # create the decoder pathway and add to a list
217
+ # - careful! decoding only requires depth-1 blocks
218
+ for i in range(depth-1):
219
+ ins = outs
220
+ outs = ins // 2
221
+ up_conv = UpConv(ins, outs, up_mode=up_mode,
222
+ merge_mode=merge_mode)
223
+ self.up_convs.append(up_conv)
224
+
225
+ self.conv_final = conv1x1(outs, self.num_classes)
226
+ self.sigmoid=nn.Sigmoid().cuda()
227
+ # add the list of modules to current module
228
+ self.down_convs = nn.ModuleList(self.down_convs)
229
+ self.up_convs = nn.ModuleList(self.up_convs)
230
+
231
+ self.reset_params()
232
+
233
+ @staticmethod
234
+ def weight_init(m):
235
+ if isinstance(m, nn.Conv2d):
236
+ nn.init.xavier_normal(m.weight)
237
+ nn.init.constant(m.bias, 0)
238
+
239
+
240
+ def reset_params(self):
241
+ for i, m in enumerate(self.modules()):
242
+ self.weight_init(m)
243
+
244
+ def forward(self, x):
245
+ encoder_outs = []
246
+
247
+ # encoder pathway, save outputs for merging
248
+ for i, module in enumerate(self.down_convs):
249
+ x, before_pool = module(x)
250
+ encoder_outs.append(before_pool)
251
+
252
+ for i, module in enumerate(self.up_convs):
253
+ before_pool = encoder_outs[-(i+2)]
254
+ x = module(before_pool, x)
255
+
256
+ before_x=self.conv_final(x)
257
+ if self.use_type=='optimze_gat':
258
+ x=before_x
259
+ else:
260
+ x = before_x**2
261
+
262
+ return torch.mean(x, dim=(2,3)).squeeze()
263
+
264
+ class New1(nn.Module):
265
+ def __init__(self, in_ch, out_ch):
266
+ super(New1, self).__init__()
267
+
268
+ self.mask = torch.from_numpy(np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.float32)).cuda()
269
+ self.conv1 = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, padding = 1, kernel_size = 3)
270
+
271
+ def forward(self, x):
272
+ self.conv1.weight.data = self.conv1.weight * self.mask
273
+ x = self.conv1(x)
274
+
275
+ return x
276
+
277
+ class New2(nn.Module):
278
+ def __init__(self, in_ch, out_ch):
279
+ super(New2, self).__init__()
280
+
281
+ self.mask = torch.from_numpy(np.array([[0,1,0,1,0],[1,0,0,0,1],[0,0,1,0,0],[1,0,0,0,1],[0,1,0,1,0]], dtype=np.float32)).cuda()
282
+ self.conv1 = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, padding = 2, kernel_size = 5)
283
+
284
+ def forward(self, x):
285
+ self.conv1.weight.data = self.conv1.weight * self.mask
286
+ x = self.conv1(x)
287
+
288
+ return x
289
+
290
+ class New3(nn.Module):
291
+ def __init__(self, in_ch, out_ch, dilated_value):
292
+ super(New3, self).__init__()
293
+
294
+ self.mask = torch.from_numpy(np.array([[1,0,1],[0,1,0],[1,0,1]], dtype=np.float32)).cuda()
295
+ self.conv1 = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size = 3, padding=dilated_value, dilation=dilated_value)
296
+
297
+ def forward(self, x):
298
+ self.conv1.weight.data = self.conv1.weight * self.mask
299
+ x = self.conv1(x)
300
+
301
+ return x
302
+
303
+ class Residual_module(nn.Module):
304
+ def __init__(self, in_ch, mul = 1):
305
+ super(Residual_module, self).__init__()
306
+
307
+ self.activation1 = nn.PReLU(in_ch*mul,0).cuda()
308
+ self.activation2 = nn.PReLU(in_ch,0).cuda()
309
+
310
+ self.conv1_1by1 = nn.Conv2d(in_channels=in_ch, out_channels=in_ch*mul, kernel_size = 1)
311
+ self.conv2_1by1 = nn.Conv2d(in_channels=in_ch*mul, out_channels=in_ch, kernel_size = 1)
312
+
313
+ def forward(self, input):
314
+
315
+ output_residual = self.conv1_1by1(input)
316
+ output_residual = self.activation1(output_residual)
317
+ output_residual = self.conv2_1by1(output_residual)
318
+
319
+ output = (input + output_residual) / 2.
320
+ output = self.activation2(output)
321
+
322
+ return output
323
+
324
+ class Gaussian(nn.Module):
325
+ def forward(self,input):
326
+ return torch.exp(-torch.mul(input,input))
327
+
328
+
329
+ class Receptive_attention(nn.Module):
330
+ def __init__(self, in_ch, at_type = 'softmax'):
331
+ super(Receptive_attention, self).__init__()
332
+
333
+ self.activation1 = nn.ReLU().cuda()
334
+ self.activation2 = nn.ReLU().cuda()
335
+ self.activation3 = nn.PReLU(in_ch,0).cuda()
336
+
337
+ self.conv1_1by1 = nn.Conv2d(in_channels=in_ch, out_channels=in_ch*4, kernel_size = 1)
338
+ self.conv2_1by1 = nn.Conv2d(in_channels=in_ch*4, out_channels=in_ch*4, kernel_size = 1)
339
+ self.conv3_1by1 = nn.Conv2d(in_channels=in_ch*4, out_channels=9, kernel_size = 1)
340
+ self.at_type = at_type
341
+ if at_type == 'softmax':
342
+ self.softmax = nn.Softmax()
343
+ else:
344
+ self.gaussian = Gaussian()
345
+ self.sigmoid = nn.Sigmoid()
346
+
347
+
348
+ def forward(self, input, receptive):
349
+
350
+ if self.at_type == 'softmax':
351
+ output_residual = self.conv1_1by1(input)
352
+ output_residual = self.activation1(output_residual)
353
+ output_residual = self.conv2_1by1(output_residual)
354
+ output_residual = self.activation2(output_residual)
355
+ output_residual = self.conv3_1by1(output_residual)
356
+ output_residual = F.adaptive_avg_pool2d(output_residual, (1, 1))
357
+ # output_residual = self.Gaussian(output_residual)
358
+ output_residual = self.softmax(output_residual).permute((1,0,2,3)).unsqueeze(-1)
359
+ else:
360
+
361
+ output_residual = self.conv1_1by1(input)
362
+ output_residual = self.activation1(output_residual)
363
+ output_residual = self.conv2_1by1(output_residual)
364
+ output_residual = self.activation2(output_residual)
365
+ output_residual = self.conv3_1by1(output_residual)
366
+ output_residual = F.adaptive_avg_pool2d(output_residual, (1, 1))
367
+ output_residual = self.gaussian(output_residual)
368
+ output_residual = self.sigmoid(output_residual).permute((1,0,2,3)).unsqueeze(-1)
369
+
370
+ output = torch.sum(receptive * output_residual, dim = 0)
371
+ output = self.activation3(output)
372
+
373
+ return output
374
+
375
+ class New1_layer(nn.Module):
376
+ def __init__(self, in_ch, out_ch, case = 'FBI_Net', mul = 1):
377
+ super(New1_layer, self).__init__()
378
+ self.case = case
379
+ self.new1 = New1(in_ch,out_ch).cuda()
380
+ if case == 'case1' or case == 'case2' or case == 'case7' or case == 'FBI_Net':
381
+ self.residual_module = Residual_module(out_ch, mul)
382
+
383
+ self.activation_new1 = nn.PReLU(in_ch,0).cuda()
384
+
385
+
386
+ def forward(self, x):
387
+
388
+
389
+ if self.case == 'case1' or self.case =='case2' or self.case =='case7' or self.case == 'FBI_Net': # plain NN architecture wo residual module and residual connection
390
+
391
+ output_new1 = self.new1(x)
392
+ output_new1 = self.activation_new1(output_new1)
393
+ output = self.residual_module(output_new1)
394
+
395
+ return output, output_new1
396
+
397
+ else: # final model
398
+
399
+ output_new1 = self.new1(x)
400
+ output = self.activation_new1(output_new1)
401
+
402
+ return output, output_new1
403
+
404
+ class New2_layer(nn.Module):
405
+ def __init__(self, in_ch, out_ch, case = 'FBI_Net', mul = 1):
406
+ super(New2_layer, self).__init__()
407
+
408
+ self.case = case
409
+
410
+ self.new2 = New2(in_ch,out_ch).cuda()
411
+ self.activation_new1 = nn.PReLU(in_ch,0).cuda()
412
+ if case == 'case1' or case == 'case2' or case == 'case7' or case == 'FBI_Net':
413
+ self.residual_module = Residual_module(out_ch, mul)
414
+ if case == 'case1' or case == 'case3' or case == 'case6' or case == 'FBI_Net':
415
+ self.activation_new2 = nn.PReLU(in_ch,0).cuda()
416
+
417
+
418
+ def forward(self, x, output_new):
419
+
420
+ if self.case == 'case1': #
421
+
422
+ output_new2 = self.new2(output_new)
423
+ output_new2 = self.activation_new1(output_new2)
424
+
425
+ output = (output_new2 + x) / 2.
426
+ output = self.activation_new2(output)
427
+ output = self.residual_module(output)
428
+
429
+ return output, output_new2
430
+
431
+
432
+ elif self.case == 'case2' or self.case == 'case7': #
433
+
434
+ output_new2 = self.new2(x)
435
+ output_new2 = self.activation_new1(output_new2)
436
+
437
+ output = output_new2
438
+ output = self.residual_module(output)
439
+
440
+ return output, output_new2
441
+
442
+ elif self.case == 'case3' or self.case == 'case6': #
443
+
444
+ output_new2 = self.new2(output_new)
445
+ output_new2 = self.activation_new1(output_new2)
446
+
447
+ output = (output_new2 + x) / 2.
448
+ output = self.activation_new2(output)
449
+
450
+ return output, output_new2
451
+
452
+ elif self.case == 'case4': #
453
+
454
+ output_new2 = self.new2(x)
455
+ output_new2 = self.activation_new1(output_new2)
456
+
457
+ output = output_new2
458
+
459
+ return output, output_new2
460
+
461
+ elif self.case == 'case5' : #
462
+
463
+ output_new2 = self.new2(x)
464
+ output_new2 = self.activation_new1(output_new2)
465
+
466
+ output = output_new2
467
+
468
+ return output, output_new2
469
+
470
+ else:
471
+
472
+ output_new2 = self.new2(output_new)
473
+ output_new2 = self.activation_new1(output_new2)
474
+
475
+ output = (output_new2 + x) / 2.
476
+ output = self.activation_new2(output)
477
+ output = self.residual_module(output)
478
+
479
+ return output, output_new2
480
+
481
+
482
+ class New3_layer(nn.Module):
483
+ def __init__(self, in_ch, out_ch, dilated_value=3, case = 'FBI_Net', mul = 1):
484
+ super(New3_layer, self).__init__()
485
+
486
+ self.case = case
487
+
488
+ self.new3 = New3(in_ch,out_ch,dilated_value).cuda()
489
+ self.activation_new1 = nn.PReLU(in_ch,0).cuda()
490
+ if case == 'case1' or case == 'case2' or case == 'case7' or case == 'FBI_Net':
491
+ self.residual_module = Residual_module(out_ch, mul)
492
+ if case == 'case1' or case == 'case3' or case == 'case6'or case == 'FBI_Net':
493
+ self.activation_new2 = nn.PReLU(in_ch,0).cuda()
494
+
495
+
496
+ def forward(self, x, output_new):
497
+
498
+ if self.case == 'case1': #
499
+
500
+ output_new3 = self.new3(output_new)
501
+ output_new3 = self.activation_new1(output_new3)
502
+
503
+ output = (output_new3 + x) / 2.
504
+ output = self.activation_new2(output)
505
+ output = self.residual_module(output)
506
+
507
+ return output, output_new3
508
+
509
+
510
+ elif self.case == 'case2' or self.case == 'case7': #
511
+
512
+ output_new3 = self.new3(x)
513
+ output_new3 = self.activation_new1(output_new3)
514
+
515
+ output = output_new3
516
+ output = self.residual_module(output)
517
+
518
+ return output, output_new3
519
+
520
+ elif self.case == 'case3' or self.case == 'case6': #
521
+
522
+ output_new3 = self.new3(output_new)
523
+ output_new3 = self.activation_new1(output_new3)
524
+
525
+ output = (output_new3 + x) / 2.
526
+ output = self.activation_new2(output)
527
+
528
+ return output, output_new3
529
+
530
+ elif self.case == 'case4': #
531
+
532
+ output_new3 = self.new3(x)
533
+ output_new3 = self.activation_new1(output_new3)
534
+
535
+ output = output_new3
536
+
537
+ return output, output_new3
538
+
539
+ elif self.case == 'case5': #
540
+
541
+ output_new3 = self.new3(x)
542
+ output_new3 = self.activation_new1(output_new3)
543
+
544
+ output = output_new3
545
+
546
+ return output, output_new3
547
+
548
+ else:
549
+
550
+ output_new3 = self.new3(output_new)
551
+ output_new3 = self.activation_new1(output_new3)
552
+
553
+ output = (output_new3 + x) / 2.
554
+ output = self.activation_new2(output)
555
+ output = self.residual_module(output)
556
+
557
+ return output, output_new3
558
+
559
+ class AttrProxy(object):
560
+ """Translates index lookups into attribute lookups."""
561
+ def __init__(self, module, prefix):
562
+ self.module = module
563
+ self.prefix = prefix
564
+
565
+ def __getitem__(self, i):
566
+ return getattr(self.module, self.prefix + str(i))
567
+
568
+ class FBI_Net(nn.Module):
569
+ def __init__(self, args):
570
+ super().__init__()
571
+ self.args = args
572
+ channel = args['channel']
573
+ output_channel = args['output_channel']
574
+ filters = args['nf']
575
+ mul = args['mul']
576
+ num_of_layers = args['num_of_layers']
577
+ case = args['case']
578
+ output_type = args['output_type']
579
+ sigmoid_value = args['sigmoid_value']
580
+ self.res = args['res']
581
+
582
+ self.case = case
583
+
584
+ self.new1 = New1_layer(channel, filters, mul = mul, case = case).cuda()
585
+ self.new2 = New2_layer(filters, filters, mul = mul, case = case).cuda()
586
+
587
+ self.num_layers = num_of_layers
588
+ self.output_type = output_type
589
+ self.sigmoid_value = sigmoid_value
590
+
591
+ dilated_value = 3
592
+
593
+ for layer in range (num_of_layers-2):
594
+ self.add_module('new_' + str(layer), New3_layer(filters, filters, dilated_value, mul = mul, case = case).cuda())
595
+
596
+ self.residual_module = Residual_module(filters, mul)
597
+ self.activation = nn.PReLU(filters,0).cuda()
598
+ self.output_layer = nn.Conv2d(in_channels=filters, out_channels=output_channel, kernel_size = 1).cuda()
599
+
600
+ if self.output_type == 'sigmoid':
601
+ self.sigmoid=nn.Sigmoid().cuda()
602
+
603
+ self.new = AttrProxy(self, 'new_')
604
+
605
+ def forward(self, x):
606
+
607
+ if self.case == 'FBI_Net' or self.case == 'case2' or self.case == 'case3' or self.case == 'case4':
608
+
609
+ output, output_new = self.new1(x)
610
+ output_sum = output
611
+ output, output_new = self.new2(output, output_new)
612
+ output_sum = output + output_sum
613
+
614
+ for i, (new_layer) in enumerate(self.new):
615
+
616
+ output, output_new = new_layer(output, output_new)
617
+ output_sum = output + output_sum
618
+
619
+ if i == self.num_layers - 3:
620
+ break
621
+
622
+ final_output = self.activation(output_sum/self.num_layers)
623
+ final_output = self.residual_module(final_output)
624
+ final_output = self.output_layer(final_output)
625
+
626
+ else:
627
+
628
+ output, output_new = self.new1(x)
629
+ output, output_new = self.new2(output, output_new)
630
+
631
+ for i, (new_layer) in enumerate(self.new):
632
+
633
+ output, output_new = new_layer(output, output_new)
634
+
635
+ if i == self.num_layers - 3:
636
+ break
637
+
638
+ final_output = self.activation(output)
639
+ final_output = self.residual_module(final_output)
640
+ final_output = self.output_layer(final_output)
641
+
642
+ if self.output_type=='sigmoid':
643
+ final_output[:,0]=(torch.ones_like(final_output[:,0])*self.sigmoid_value)*self.sigmoid(final_output[:,0])
644
+
645
+ if self.res:
646
+ final_output = final_output[:,:1] * x + final_output[:,1:]
647
+
648
+ return final_output
649
+
650
+ class SelfSupUNet(nn.Module):
651
+ def __init__(self, args):
652
+ """
653
+ Args:
654
+ in_channels (int): number of input channels, Default 4
655
+ depth (int): depth of the network, Default 5
656
+ nf (int): number of filters in the first layer, Default 32
657
+ """
658
+ super().__init__()
659
+ in_channels = args['in_nc']
660
+ out_channels = args['out_nc']
661
+ depth = args['depth'] if 'depth' in args else 5
662
+ nf = args['nf'] if 'nf' in args else 32
663
+ slope = args['slope'] if 'slope' in args else 0.1
664
+ self.norm = args['norm'] if 'norm' in args else False
665
+ self.res = args['res'] if 'res' in args else False
666
+
667
+ self.depth = depth
668
+ self.head = nn.Sequential(
669
+ LR(in_channels, nf, 3, slope), LR(nf, nf, 3, slope))
670
+ self.down_path = nn.ModuleList()
671
+ for i in range(depth):
672
+ self.down_path.append(LR(nf, nf, 3, slope))
673
+
674
+ self.up_path = nn.ModuleList()
675
+ for i in range(depth):
676
+ if i != depth-1:
677
+ self.up_path.append(UP(nf*2 if i==0 else nf*3, nf*2, slope))
678
+ else:
679
+ self.up_path.append(UP(nf*2+in_channels, nf*2, slope))
680
+
681
+ self.last = nn.Sequential(LR(2*nf, 2*nf, 1, slope),
682
+ LR(2*nf, 2*nf, 1, slope), conv1x1(2*nf, out_channels, bias=True))
683
+
684
+ def forward(self, x):
685
+ if self.norm:
686
+ x, lb, ub = data_normalize(x)
687
+ blocks = []
688
+ blocks.append(x)
689
+ x = self.head(x)
690
+ for i, down in enumerate(self.down_path):
691
+ x = F.max_pool2d(x, 2)
692
+ if i != len(self.down_path) - 1:
693
+ blocks.append(x)
694
+ x = down(x)
695
+
696
+ for i, up in enumerate(self.up_path):
697
+ x = up(x, blocks[-i-1])
698
+
699
+ out = self.last(x)
700
+ if self.res:
701
+ out = out + x
702
+
703
+ if self.norm:
704
+ out = data_inv_normalize(out, lb, ub)
705
+
706
+ return out
707
+
708
+
709
+ class LR(nn.Module):
710
+ def __init__(self, in_size, out_size, ksize=3, slope=0.1):
711
+ super(LR, self).__init__()
712
+ block = []
713
+ block.append(nn.Conv2d(in_size, out_size,
714
+ kernel_size=ksize, padding=ksize//2, bias=True))
715
+ block.append(nn.LeakyReLU(slope, inplace=False))
716
+
717
+ self.block = nn.Sequential(*block)
718
+
719
+ def forward(self, x):
720
+ out = self.block(x)
721
+ return out
722
+
723
+
724
+ class UP(nn.Module):
725
+ def __init__(self, in_size, out_size, slope=0.1):
726
+ super(UP, self).__init__()
727
+ self.conv_1 = LR(in_size, out_size)
728
+ self.conv_2 = LR(out_size, out_size)
729
+
730
+ def up(self, x):
731
+ s = x.shape
732
+ x = x.reshape(s[0], s[1], s[2], 1, s[3], 1)
733
+ x = x.repeat(1, 1, 1, 2, 1, 2)
734
+ x = x.reshape(s[0], s[1], s[2]*2, s[3]*2)
735
+ return x
736
+
737
+ def forward(self, x, pool):
738
+ x = self.up(x)
739
+ x = torch.cat([x, pool], 1)
740
+ x = self.conv_1(x)
741
+ x = self.conv_2(x)
742
+
743
+ return x
744
+
745
+ class SelfResUNet(nn.Module):
746
+ def __init__(self, args):
747
+ """
748
+ Args:
749
+ in_channels (int): number of input channels, Default 4
750
+ depth (int): depth of the network, Default 5
751
+ nf (int): number of filters in the first layer, Default 32
752
+ """
753
+ super().__init__()
754
+ in_channels = args['in_nc']
755
+ out_channels = args['out_nc']
756
+ depth = args['depth'] if 'depth' in args else 5
757
+ nf = args['nf'] if 'nf' in args else 32
758
+ slope = args['slope'] if 'slope' in args else 0.1
759
+ self.norm = args['norm'] if 'norm' in args else False
760
+ self.res = args['res'] if 'res' in args else False
761
+
762
+ self.depth = depth
763
+ self.head = Res(in_channels, nf, slope)
764
+ self.down_path = nn.ModuleList()
765
+ for i in range(depth):
766
+ self.down_path.append(Res(nf, nf, slope, ksize=3))
767
+
768
+ self.up_path = nn.ModuleList()
769
+ for i in range(depth):
770
+ if i != depth-1:
771
+ self.up_path.append(RUP(nf*2 if i==0 else nf*3, nf*2, slope))
772
+ else:
773
+ self.up_path.append(RUP(nf*2+in_channels, nf*2, slope))
774
+
775
+ self.last = Res(2*nf, 2*nf, slope, ksize=1)
776
+ self.out = conv1x1(2*nf, out_channels, bias=True)
777
+
778
+ def forward(self, x):
779
+ if self.norm:
780
+ x, lb, ub = data_normalize(x)
781
+ inp = x
782
+ blocks = []
783
+ blocks.append(x)
784
+ x = self.head(x)
785
+ for i, down in enumerate(self.down_path):
786
+ x = F.max_pool2d(x, 2)
787
+ if i != len(self.down_path) - 1:
788
+ blocks.append(x)
789
+ x = down(x)
790
+
791
+ for i, up in enumerate(self.up_path):
792
+ x = up(x, blocks[-i-1])
793
+
794
+ out = self.last(x)
795
+ out = self.out(out)
796
+ if self.res:
797
+ out = out + inp
798
+
799
+ if self.norm:
800
+ out = data_inv_normalize(out, lb, ub)
801
+
802
+ return out
803
+
804
+ class RUP(nn.Module):
805
+ def __init__(self, in_size, out_size, slope=0.1, ksize=3):
806
+ super(RUP, self).__init__()
807
+ self.conv_1 = LR(out_size, out_size, ksize=ksize, slope=slope)
808
+ self.conv_2 = LR(out_size, out_size, ksize=ksize, slope=slope)
809
+ if in_size != out_size:
810
+ self.short_cut = nn.Sequential(conv1x1(in_size, out_size))
811
+ else:
812
+ self.short_cut = nn.Sequential(OrderedDict([]))
813
+
814
+ def up(self, x):
815
+ s = x.shape
816
+ x = x.reshape(s[0], s[1], s[2], 1, s[3], 1)
817
+ x = x.repeat(1, 1, 1, 2, 1, 2)
818
+ x = x.reshape(s[0], s[1], s[2]*2, s[3]*2)
819
+ return x
820
+
821
+ def forward(self, x, pool):
822
+ x = self.up(x)
823
+ x = torch.cat([x, pool], 1)
824
+ x = self.short_cut(x)
825
+ z = self.conv_1(x)
826
+ z = self.conv_2(z)
827
+ z += x
828
+ return z
829
+
830
+ class Res(nn.Module):
831
+ def __init__(self, in_size, out_size, slope=0.1, ksize=3):
832
+ super().__init__()
833
+ self.conv_1 = LR(out_size, out_size, ksize=ksize, slope=slope)
834
+ self.conv_2 = LR(out_size, out_size, ksize=ksize, slope=slope)
835
+ if in_size != out_size:
836
+ self.short_cut = nn.Sequential(conv1x1(in_size, out_size))
837
+ else:
838
+ self.short_cut = nn.Sequential(OrderedDict([]))
839
+
840
+ def forward(self, x):
841
+ x = self.short_cut(x)
842
+ z = self.conv_1(x)
843
+ z = self.conv_2(z)
844
+ z += x
845
+ return z
846
+
847
+ def conv1x1(in_chn, out_chn, bias=True):
848
+ layer = nn.Conv2d(in_chn, out_chn, kernel_size=1,
849
+ stride=1, padding=0, bias=bias)
850
+ return layer
851
+
852
+ class GuidedSelfUnet(nn.Module):
853
+ def __init__(self, args):
854
+ """
855
+ Args:
856
+ in_channels (int): number of input channels, Default 4
857
+ depth (int): depth of the network, Default 5
858
+ nf (int): number of filters in the first layer, Default 32
859
+ """
860
+ super().__init__()
861
+ in_channels = args['in_nc']
862
+ out_channels = args['out_nc']
863
+ depth = args['depth'] if 'depth' in args else 5
864
+ nf = args['nf'] if 'nf' in args else 32
865
+ slope = args['slope'] if 'slope' in args else 0.1
866
+ self.norm = args['norm'] if 'norm' in args else False
867
+ self.res = args['res'] if 'res' in args else False
868
+
869
+ self.depth = depth
870
+ self.head = GRes(in_channels, nf, slope)
871
+ self.down_path = nn.ModuleList()
872
+ for i in range(depth):
873
+ self.down_path.append(GLR(nf, nf, 3, slope))
874
+
875
+ self.up_path = nn.ModuleList()
876
+ for i in range(depth):
877
+ if i != depth-1:
878
+ self.up_path.append(GUP(nf*2 if i==0 else nf*3, nf*2, slope))
879
+ else:
880
+ self.up_path.append(GUP(nf*2+in_channels, nf*2, slope))
881
+
882
+ self.last = GRes(2*nf, 2*nf, slope, ksize=1)
883
+ self.out = conv1x1(2*nf, out_channels, bias=True)
884
+
885
+ def forward(self, x, t):
886
+ if self.norm:
887
+ x, lb, ub = data_normalize(x)
888
+ t = t / (ub-lb)
889
+ blocks = []
890
+ blocks.append(x)
891
+ x = self.head(x, t)
892
+ for i, down in enumerate(self.down_path):
893
+ x = F.max_pool2d(x, 2)
894
+ if i != len(self.down_path) - 1:
895
+ blocks.append(x)
896
+ x = down(x, t)
897
+
898
+ for i, up in enumerate(self.up_path):
899
+ x = up(x, blocks[-i-1], t)
900
+
901
+ out = self.last(x, t)
902
+ out = self.out(out)
903
+
904
+ if self.res:
905
+ out = out + x
906
+
907
+ if self.norm:
908
+ out = data_inv_normalize(out, lb, ub)
909
+
910
+ return out
911
+
912
+ class GLR(nn.Module):
913
+ def __init__(self, in_size, out_size, ksize=3, slope=0.1):
914
+ super(GLR, self).__init__()
915
+ self.block = nn.Conv2d(in_size, out_size,
916
+ kernel_size=ksize, padding=ksize//2, bias=True)
917
+ self.act = nn.LeakyReLU(slope, inplace=False)
918
+ self.gamma = nn.Sequential(
919
+ conv1x1(1, out_size),
920
+ nn.SiLU(),
921
+ conv1x1(out_size, out_size),
922
+ )
923
+ self.beta = nn.Sequential(
924
+ nn.SiLU(),
925
+ conv1x1(out_size, out_size),
926
+ )
927
+
928
+ def forward(self, x, t):
929
+ z = self.block(x)
930
+ tk = self.gamma(t)
931
+ tb = self.beta(tk)
932
+ z = z * tk + tb
933
+ out = self.act(z)
934
+ return out
935
+
936
+ class GRes(nn.Module):
937
+ def __init__(self, in_size, out_size, slope=0.1, ksize=3):
938
+ super(GRes, self).__init__()
939
+ self.conv_1 = LR(out_size, out_size, ksize=ksize)
940
+ self.conv_2 = GLR(out_size, out_size, ksize=ksize)
941
+ if in_size != out_size:
942
+ self.short_cut = nn.Sequential(
943
+ conv1x1(in_size, out_size)
944
+ )
945
+ else:
946
+ self.short_cut = nn.Sequential(OrderedDict([]))
947
+
948
+ def forward(self, x, t):
949
+ x = self.short_cut(x)
950
+ z = self.conv_1(x)
951
+ z = self.conv_2(z, t)
952
+ z += x
953
+
954
+ return z
955
+
956
+ class GUP(nn.Module):
957
+ def __init__(self, in_size, out_size, slope=0.1):
958
+ super(GUP, self).__init__()
959
+ self.conv_1 = LR(out_size, out_size)
960
+ self.conv_2 = GLR(out_size, out_size)
961
+ if in_size != out_size:
962
+ self.short_cut = nn.Sequential(
963
+ conv1x1(in_size, out_size)
964
+ )
965
+ else:
966
+ self.short_cut = nn.Sequential(OrderedDict([]))
967
+
968
+ def up(self, x):
969
+ s = x.shape
970
+ x = x.reshape(s[0], s[1], s[2], 1, s[3], 1)
971
+ x = x.repeat(1, 1, 1, 2, 1, 2)
972
+ x = x.reshape(s[0], s[1], s[2]*2, s[3]*2)
973
+ return x
974
+
975
+ def forward(self, x, pool, t):
976
+ x = self.up(x)
977
+ x = torch.cat([x, pool], 1)
978
+ x = self.short_cut(x)
979
+ z = self.conv_1(x)
980
+ z = self.conv_2(z, t)
981
+ z += x
982
+
983
+ return z
984
+
985
+
986
+ class N2NF_Unet(nn.Module):
987
+ def __init__(self, args=None):
988
+ super().__init__()
989
+ self.args = args
990
+ in_nc = args['in_nc']
991
+ out_nc = args['out_nc']
992
+ self.norm = args['norm'] if 'norm' in args else False
993
+
994
+ # Layers: enc_conv0, enc_conv1, pool1
995
+ self._block1 = nn.Sequential(
996
+ nn.Conv2d(in_nc, 48, 3, stride=1, padding=1),
997
+ nn.ReLU(inplace=True),
998
+ nn.Conv2d(48, 48, 3, padding=1),
999
+ nn.ReLU(inplace=True),
1000
+ nn.MaxPool2d(2))
1001
+
1002
+ # Layers: enc_conv(i), pool(i); i=2..5
1003
+ self._block2 = nn.Sequential(
1004
+ nn.Conv2d(48, 48, 3, stride=1, padding=1),
1005
+ nn.ReLU(inplace=True),
1006
+ nn.MaxPool2d(2))
1007
+
1008
+ # Layers: enc_conv6, upsample5
1009
+ self._block3 = nn.Sequential(
1010
+ nn.Conv2d(48, 48, 3, stride=1, padding=1),
1011
+ nn.ReLU(inplace=True),
1012
+ nn.ConvTranspose2d(48, 48, 3, stride=2, padding=1, output_padding=1))
1013
+ #nn.Upsample(scale_factor=2, mode='nearest'))
1014
+
1015
+ # Layers: dec_conv5a, dec_conv5b, upsample4
1016
+ self._block4 = nn.Sequential(
1017
+ nn.Conv2d(96, 96, 3, stride=1, padding=1),
1018
+ nn.ReLU(inplace=True),
1019
+ nn.Conv2d(96, 96, 3, stride=1, padding=1),
1020
+ nn.ReLU(inplace=True),
1021
+ nn.ConvTranspose2d(96, 96, 3, stride=2, padding=1, output_padding=1))
1022
+ #nn.Upsample(scale_factor=2, mode='nearest'))
1023
+
1024
+ # Layers: dec_deconv(i)a, dec_deconv(i)b, upsample(i-1); i=4..2
1025
+ self._block5 = nn.Sequential(
1026
+ nn.Conv2d(144, 96, 3, stride=1, padding=1),
1027
+ nn.ReLU(inplace=True),
1028
+ nn.Conv2d(96, 96, 3, stride=1, padding=1),
1029
+ nn.ReLU(inplace=True),
1030
+ nn.ConvTranspose2d(96, 96, 3, stride=2, padding=1, output_padding=1))
1031
+ #nn.Upsample(scale_factor=2, mode='nearest'))
1032
+
1033
+ # Layers: dec_conv1a, dec_conv1b, dec_conv1c,
1034
+ self._block6 = nn.Sequential(
1035
+ nn.Conv2d(96 + in_nc, 64, 3, stride=1, padding=1),
1036
+ nn.ReLU(inplace=True),
1037
+ nn.Conv2d(64, 32, 3, stride=1, padding=1),
1038
+ nn.ReLU(inplace=True),
1039
+ nn.Conv2d(32, out_nc, 3, stride=1, padding=1),
1040
+ nn.LeakyReLU(0.1))
1041
+
1042
+ # Initialize weights
1043
+ self._init_weights()
1044
+
1045
+
1046
+ def _init_weights(self):
1047
+ """Initializes weights using He et al. (2015)."""
1048
+
1049
+ for m in self.modules():
1050
+ if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
1051
+ nn.init.kaiming_normal_(m.weight.data)
1052
+ m.bias.data.zero_()
1053
+
1054
+
1055
+ def forward(self, x):
1056
+ if self.norm:
1057
+ x, lb, ub = data_normalize(x)
1058
+
1059
+ # Encoder
1060
+ pool1 = self._block1(x)
1061
+ pool2 = self._block2(pool1)
1062
+ pool3 = self._block2(pool2)
1063
+ pool4 = self._block2(pool3)
1064
+ pool5 = self._block2(pool4)
1065
+
1066
+ # Decoder
1067
+ upsample5 = self._block3(pool5)
1068
+ concat5 = torch.cat((upsample5, pool4), dim=1)
1069
+ upsample4 = self._block4(concat5)
1070
+ concat4 = torch.cat((upsample4, pool3), dim=1)
1071
+ upsample3 = self._block5(concat4)
1072
+ concat3 = torch.cat((upsample3, pool2), dim=1)
1073
+ upsample2 = self._block5(concat3)
1074
+ concat2 = torch.cat((upsample2, pool1), dim=1)
1075
+ upsample1 = self._block5(concat2)
1076
+ concat1 = torch.cat((upsample1, x), dim=1)
1077
+
1078
+ # Final activation
1079
+ out = self._block6(concat1)
1080
+ if self.norm:
1081
+ out = data_inv_normalize(out, lb, ub)
1082
+ return out
archs/modules.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import functools
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ import numpy as np
7
+ from collections import OrderedDict
8
+ # 小波分解相关代码
9
+ from pytorch_wavelets import DWTForward, DWTInverse # (or import DWT, IDWT)
10
+
11
+ class WaveletDecompose(nn.Module):
12
+ def __init__(self, mode='haar'):
13
+ super().__init__()
14
+ self.xfm = DWTForward(J=1, wave=mode, mode='reflect')
15
+
16
+ def forward(self, x):
17
+ """
18
+ 将一层小波分解结果转换为通道拼接格式
19
+
20
+ Args:
21
+ x: 输入张量,形状为 (B, C, H, W)
22
+
23
+ Returns:
24
+ output: 拼接后的张量,形状为 (B, 4*C, H//2, W//2)
25
+ 通道顺序: [LL, HL, LH, HH]
26
+ """
27
+ yl, yh = self.xfm(x)
28
+
29
+ # yl: (B, C, H//2, W//2) - LL子带
30
+ # yh[0]: (B, C, 3, H//2, W//2) - 高频系数
31
+
32
+ # 提取三个方向的高频系数
33
+ hl = yh[0][:, :, 0, :, :] # HL: 水平细节
34
+ lh = yh[0][:, :, 1, :, :] # LH: 垂直细节
35
+ hh = yh[0][:, :, 2, :, :] # HH: 对角细节
36
+
37
+ # 沿通道维度拼接
38
+ output = torch.cat([yl, hl, lh, hh], dim=1)
39
+
40
+ return output
41
+
42
+ class WaveletReconstruct(nn.Module):
43
+ def __init__(self, mode='haar'):
44
+ super().__init__()
45
+ self.ifm = DWTInverse(wave=mode, mode='reflect')
46
+
47
+ def forward(self, x):
48
+ """
49
+ 将通道拼接的小波系数还原为原始图像
50
+
51
+ Args:
52
+ x: 输入张量,形状为 (B, 4*C, H, W)
53
+
54
+ Returns:
55
+ 重构后的图像,形状为 (B, C, 2*H, 2*W)
56
+ """
57
+ batch_size, total_channels, height, width = x.shape
58
+ channels = total_channels // 4
59
+
60
+ # 分割通道
61
+ yl = x[:, :channels, :, :] # LL
62
+ hl = x[:, channels:2*channels, :, :] # HL
63
+ lh = x[:, 2*channels:3*channels, :, :] # LH
64
+ hh = x[:, 3*channels:4*channels, :, :] # HH
65
+
66
+ # 重新组织为 pytorch_wavelets 需要的格式
67
+ # 创建 yh 列表,第一个元素是形状为 (B, C, 3, H, W) 的张量
68
+ yh_coeff = torch.stack([hl, lh, hh], dim=2) # 在dim=2上堆叠
69
+ yh = [yh_coeff] # 必须放在列表中
70
+
71
+ # 执行逆变换
72
+ reconstructed = self.ifm((yl, yh))
73
+
74
+ return reconstructed
75
+
76
+ def make_layer(block, n_layers):
77
+ layers = []
78
+ for _ in range(n_layers):
79
+ layers.append(block)
80
+ return nn.Sequential(*layers)
81
+
82
+ class SiLU(nn.Module):
83
+ def forward(self, x):
84
+ return x * torch.sigmoid(x)
85
+
86
+ class GroupNorm32(nn.GroupNorm):
87
+ def forward(self, x):
88
+ return super().forward(x.float()).type(x.dtype)
89
+
90
+ class Module_with_Init(nn.Module):
91
+ def __init__(self,):
92
+ super().__init__()
93
+
94
+ def _initialize_weights(self):
95
+ for m in self.modules():
96
+ if isinstance(m, nn.Conv2d):
97
+ m.weight.data.normal_(0.0, 0.02)
98
+ if m.bias is not None:
99
+ m.bias.data.normal_(0.0, 0.02)
100
+ if isinstance(m, nn.ConvTranspose2d):
101
+ m.weight.data.normal_(0.0, 0.02)
102
+
103
+ def lrelu(self, x):
104
+ outt = torch.max(0.2*x, x)
105
+ return outt
106
+
107
+ class ResConvBlock_CBAM(nn.Module):
108
+ def __init__(self, in_nc, nf=64, res_scale=1):
109
+ super().__init__()
110
+ self.res_scale = res_scale
111
+ self.conv1 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
112
+ self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
113
+ self.cbam = CBAM(nf)
114
+ self.relu = nn.ReLU()
115
+
116
+ def forward(self, x):
117
+ x = self.relu(self.conv1(x))
118
+ out = self.res_scale * self.cbam(self.relu(self.conv2(x))) + x
119
+ return x + out * self.res_scale
120
+
121
+ class ResidualBlockNoBN(nn.Module):
122
+ """Residual block without BN.
123
+
124
+ It has a style of:
125
+ ---Conv-ReLU-Conv-+-
126
+ |________________|
127
+
128
+ Args:
129
+ nf (int): Channel number of intermediate features.
130
+ Default: 64.
131
+ res_scale (float): Residual scale. Default: 1.
132
+ """
133
+
134
+ def __init__(self, nf=64, res_scale=1):
135
+ super(ResidualBlockNoBN, self).__init__()
136
+ self.res_scale = res_scale
137
+ self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
138
+ self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
139
+ self.relu = nn.ReLU()
140
+
141
+ def forward(self, x):
142
+ identity = x
143
+ out = self.conv2(self.relu(self.conv1(x)))
144
+ return identity + out * self.res_scale
145
+
146
+ def conv1x1(in_nc, out_nc, groups=1):
147
+ return nn.Conv2d(in_nc, out_nc,kernel_size=1,groups=groups,stride=1)
148
+
149
+ class Identity(nn.Identity):
150
+ def __init__(self, args):
151
+ super().__init__()
152
+
153
+ class ResidualBlock3D(nn.Module):
154
+ def __init__(self, in_c, out_c, is_activate=True):
155
+ super().__init__()
156
+ self.activation = nn.ReLU(inplace=True) if is_activate else nn.Sequential()
157
+ self.block = nn.Sequential(
158
+ nn.Conv3d(in_c, out_c, kernel_size=3, padding=1, stride=1),
159
+ self.activation,
160
+ nn.Conv3d(out_c, out_c, kernel_size=3, padding=1, stride=1)
161
+ )
162
+
163
+ if in_c != out_c:
164
+ self.short_cut = nn.Sequential(
165
+ nn.Conv3d(in_c, out_c, kernel_size=1, padding=0, stride=1)
166
+ )
167
+ else:
168
+ self.short_cut = nn.Sequential(OrderedDict([]))
169
+
170
+ def forward(self, x):
171
+ output = self.block(x)
172
+ output += self.short_cut(x)
173
+ output = self.activation(output)
174
+ return output
175
+
176
+ class conv3x3(nn.Module):
177
+ def __init__(self, in_nc, out_nc, stride=2, is_activate=True):
178
+ super().__init__()
179
+ self.conv =nn.Conv2d(in_nc, out_nc, kernel_size=3, padding=1, stride=stride)
180
+ if is_activate:
181
+ self.conv.add_module("relu", nn.ReLU(inplace=True))
182
+
183
+ def forward(self, x):
184
+ return self.conv(x)
185
+
186
+ class convWithBN(nn.Module):
187
+ def __init__(self, in_c, out_c, kernel_size=3, padding=1, stride=1, is_activate=True, is_bn=True):
188
+ super(convWithBN, self).__init__()
189
+ self.conv = nn.Sequential(OrderedDict([
190
+ ("conv", nn.Conv2d(in_c, out_c, kernel_size=kernel_size, padding=padding,
191
+ stride=stride, bias=False)),
192
+ ]))
193
+ if is_bn:
194
+ self.conv.add_module("BN", nn.BatchNorm2d(out_c))
195
+ if is_activate:
196
+ self.conv.add_module("relu", nn.ReLU(inplace=True))
197
+
198
+ def forward(self, x):
199
+ return self.conv(x)
200
+
201
+
202
+ class DoubleCvBlock(nn.Module):
203
+ def __init__(self, in_c, out_c):
204
+ super(DoubleCvBlock, self).__init__()
205
+ self.block = nn.Sequential(
206
+ convWithBN(in_c, out_c, kernel_size=3, padding=1, stride=1, is_bn=False),
207
+ convWithBN(out_c, out_c, kernel_size=3, padding=1, stride=1, is_bn=False)
208
+ )
209
+
210
+ def forward(self, x):
211
+ output = self.block(x)
212
+ return output
213
+
214
+ class nResBlocks(nn.Module):
215
+ def __init__(self, nf, nlayers=2):
216
+ super().__init__()
217
+ self.blocks = make_layer(ResidualBlock(nf, nf), n_layers=nlayers)
218
+
219
+ def forward(self, x):
220
+ return self.blocks(x)
221
+
222
+ class GuidedResidualBlock(nn.Module):
223
+ def __init__(self, in_c, out_c, is_activate=False):
224
+ super().__init__()
225
+ # self.norm = nn.LayerNorm(out_c)
226
+ self.act = nn.SiLU()
227
+ self.conv1 = nn.Conv2d(out_c, out_c, 3, 1, 1, bias=True)
228
+ self.conv2 = nn.Conv2d(out_c, out_c, 3, 1, 1, bias=True)
229
+ self.gamma = nn.Sequential(
230
+ conv1x1(1, out_c),
231
+ nn.SiLU(),
232
+ conv1x1(out_c, out_c),
233
+ )
234
+ self.beta = nn.Sequential(
235
+ nn.SiLU(),
236
+ conv1x1(out_c, out_c),
237
+ )
238
+ if in_c != out_c:
239
+ self.short_cut = nn.Sequential(
240
+ conv1x1(in_c, out_c)
241
+ )
242
+ else:
243
+ self.short_cut = nn.Sequential(OrderedDict([]))
244
+
245
+ def forward(self, x, t):
246
+ if len(t.shape) > 0 and t.shape[-1] != 1:
247
+ t = F.interpolate(t, size=x.shape[2:], mode='bilinear', align_corners=False)
248
+ x = self.short_cut(x)
249
+ z = self.act(x)
250
+ z = self.conv1(z)
251
+ tk = self.gamma(t)
252
+ tb = self.beta(tk)
253
+ z = z * tk + tb
254
+ z = self.act(z)
255
+ z = self.conv2(z)
256
+ z += x
257
+ return z
258
+
259
+ class GuidedConvBlock(nn.Module):
260
+ def __init__(self, in_c, out_c, is_activate=False):
261
+ super().__init__()
262
+ # self.norm = nn.LayerNorm(out_c)
263
+ self.act = nn.SiLU()
264
+ self.conv1 = nn.Conv2d(out_c, out_c, 3, 1, 1, bias=True)
265
+ self.conv2 = nn.Conv2d(out_c, out_c, 3, 1, 1, bias=True)
266
+ self.gamma = nn.Sequential(
267
+ conv1x1(1, out_c),
268
+ nn.SiLU(),
269
+ conv1x1(out_c, out_c),
270
+ )
271
+ self.beta = nn.Sequential(
272
+ nn.SiLU(),
273
+ conv1x1(out_c, out_c),
274
+ )
275
+ if in_c != out_c:
276
+ self.short_cut = nn.Sequential(
277
+ conv1x1(in_c, out_c)
278
+ )
279
+ else:
280
+ self.short_cut = nn.Sequential(OrderedDict([]))
281
+
282
+ def forward(self, x, t):
283
+ x = self.short_cut(x)
284
+ z = self.act(x)
285
+ z = self.conv1(z)
286
+ tk = self.gamma(t)
287
+ tb = self.beta(tk)
288
+ z = z * tk + tb
289
+ z = self.act(z)
290
+ z = self.conv2(z)
291
+ return z
292
+
293
+ class SNR_Block(nn.Module):
294
+ def __init__(self, in_c, out_c, is_activate=False):
295
+ super().__init__()
296
+ # self.norm = nn.LayerNorm(out_c)
297
+ self.act = nn.SiLU()
298
+ self.conv1 = nn.Conv2d(out_c, out_c, 3, 1, 1, bias=True)
299
+ self.conv2 = nn.Conv2d(out_c, out_c, 3, 1, 1, bias=True)
300
+ self.sfm1 = nn.Sequential(
301
+ conv1x1(1, out_c),
302
+ nn.SiLU(),
303
+ conv1x1(out_c, out_c),
304
+ )
305
+ self.sfm2 = nn.Sequential(
306
+ conv1x1(1, out_c),
307
+ nn.SiLU(),
308
+ conv1x1(out_c, out_c),
309
+ )
310
+ if in_c != out_c:
311
+ self.short_cut = nn.Sequential(
312
+ conv1x1(in_c, out_c)
313
+ )
314
+ else:
315
+ self.short_cut = nn.Sequential(OrderedDict([]))
316
+
317
+ def forward(self, x, t):
318
+ x = self.short_cut(x)
319
+ z = self.act(x)
320
+ z = self.conv1(z)
321
+ a1 = self.sfm1(t)
322
+ z *= a1
323
+ z = self.act(z)
324
+ z = self.conv2(z)
325
+ a2 = self.sfm2(t)
326
+ z *= a2
327
+ z += x
328
+ return z
329
+
330
+ class ResBlock(nn.Module):
331
+ def __init__(self, in_c, out_c, is_activate=False):
332
+ super().__init__()
333
+ # self.norm = nn.LayerNorm(out_c)
334
+ self.act = nn.LeakyReLU(0.2) if is_activate else nn.SiLU()
335
+ self.conv1 = nn.Conv2d(out_c, out_c, 3, 1, 1, bias=True)
336
+ self.conv2 = nn.Conv2d(out_c, out_c, 3, 1, 1, bias=True)
337
+ self.gamma = nn.Sequential(
338
+ conv1x1(1, out_c),
339
+ self.act,
340
+ conv1x1(out_c, out_c),
341
+ )
342
+ self.beta = nn.Sequential(
343
+ self.act,
344
+ conv1x1(out_c, out_c),
345
+ )
346
+ if in_c != out_c:
347
+ self.short_cut = nn.Sequential(
348
+ conv1x1(in_c, out_c)
349
+ )
350
+ else:
351
+ self.short_cut = nn.Sequential(OrderedDict([]))
352
+
353
+ def forward(self, x):
354
+ x = self.short_cut(x)
355
+ z = self.act(x)
356
+ z = self.conv1(z)
357
+ z = self.act(z)
358
+ z = self.conv2(z)
359
+ z += x
360
+ return z
361
+
362
+ class ResidualBlock(nn.Module):
363
+ def __init__(self, in_c, out_c, is_activate=True):
364
+ super(ResidualBlock, self).__init__()
365
+ self.block = nn.Sequential(
366
+ convWithBN(in_c, out_c, kernel_size=3, padding=1, stride=1, is_bn=False),
367
+ convWithBN(out_c, out_c, kernel_size=3, padding=1, stride=1, is_activate=False, is_bn=False)
368
+ )
369
+
370
+ if in_c != out_c:
371
+ self.short_cut = nn.Sequential(
372
+ convWithBN(in_c, out_c, kernel_size=1, padding=0, stride=1, is_activate=False, is_bn=False)
373
+ )
374
+ else:
375
+ self.short_cut = nn.Sequential(OrderedDict([]))
376
+
377
+ self.activation = nn.LeakyReLU(0.2, inplace=False) if is_activate else nn.Sequential()
378
+
379
+ def forward(self, x):
380
+ output = self.block(x)
381
+ output = self.activation(output)
382
+ output += self.short_cut(x)
383
+ return output
384
+
385
+ class ChannelAttention(nn.Module):
386
+ def __init__(self, in_planes, ratio=16):
387
+ super().__init__()
388
+ self.in_nc = in_planes
389
+ self.ratio = ratio
390
+ self.avg_pool = nn.AdaptiveAvgPool2d(1)
391
+ self.max_pool = nn.AdaptiveMaxPool2d(1)
392
+
393
+ self.sharedMLP = nn.Sequential(
394
+ nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False), nn.ReLU(),
395
+ nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False))
396
+ self.sigmoid = nn.Sigmoid()
397
+
398
+ def forward(self, x):
399
+ avgout = self.sharedMLP(self.avg_pool(x))
400
+ maxout = self.sharedMLP(self.max_pool(x))
401
+ return self.sigmoid(avgout + maxout)
402
+
403
+
404
+ class SpatialAttention(nn.Module):
405
+ def __init__(self, kernel_size=3):
406
+ super().__init__()
407
+ self.conv = nn.Conv2d(2,1,kernel_size, padding=1, bias=False)
408
+ self.sigmoid = nn.Sigmoid()
409
+ self.concat = Concat()
410
+ self.mean = torch.mean
411
+ self.max = torch.max
412
+
413
+ def forward(self, x):
414
+ avgout = self.mean(x, 1, True)
415
+ maxout, _ = self.max(x, 1, True)
416
+ x = self.concat([avgout, maxout], 1)
417
+ x = self.conv(x)
418
+ return self.sigmoid(x)
419
+
420
+
421
+ class CBAM(nn.Module):
422
+ def __init__(self, planes):
423
+ super().__init__()
424
+ self.ca = ChannelAttention(planes)
425
+ self.sa = SpatialAttention()
426
+ def forward(self, x):
427
+ x = self.ca(x) * x
428
+ out = self.sa(x) * x
429
+ return out
430
+
431
+ class MaskMul(nn.Module):
432
+ def __init__(self, scale_factor=1):
433
+ super().__init__()
434
+ self.scale_factor = scale_factor
435
+
436
+ def forward(self, x, mask):
437
+ if mask.shape[1] != x.shape[1]:
438
+ mask = torch.mean(mask, dim=1, keepdim=True)
439
+ pooled_mask = F.avg_pool2d(mask, self.scale_factor)
440
+ out = torch.mul(x, pooled_mask)
441
+ return out
442
+
443
+ class UpsampleBLock(nn.Module):
444
+ def __init__(self, in_channels, out_channels=None, up_scale=2, mode='bilinear'):
445
+ super(UpsampleBLock, self).__init__()
446
+ if mode == 'pixel_shuffle':
447
+ self.conv = nn.Conv2d(in_channels, in_channels * up_scale ** 2, kernel_size=3, padding=1)
448
+ self.up = nn.PixelShuffle(up_scale)
449
+ elif mode=='bilinear':
450
+ self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
451
+ self.up = nn.UpsamplingBilinear2d(scale_factor=up_scale)
452
+ else:
453
+ print(f"Please tell me what is '{mode}' mode ????")
454
+ raise NotImplementedError
455
+ self.relu = nn.ReLU(inplace=True)
456
+
457
+ def forward(self, x):
458
+ x = self.conv(x)
459
+ x = self.up(x)
460
+ x = self.relu(x)
461
+ return x
462
+
463
+ def pixel_unshuffle(input, downscale_factor):
464
+ '''
465
+ input: batchSize * c * k*w * k*h
466
+ kdownscale_factor: k
467
+ batchSize * c * k*w * k*h -> batchSize * k*k*c * w * h
468
+ '''
469
+ c = input.shape[1]
470
+
471
+ kernel = torch.zeros(size=[downscale_factor * downscale_factor * c,
472
+ 1, downscale_factor, downscale_factor],
473
+ device=input.device)
474
+ for y in range(downscale_factor):
475
+ for x in range(downscale_factor):
476
+ kernel[x + y * downscale_factor::downscale_factor*downscale_factor, 0, y, x] = 1
477
+ return F.conv2d(input, kernel, stride=downscale_factor, groups=c)
478
+
479
+ class PixelUnshuffle(nn.Module):
480
+ def __init__(self, downscale_factor):
481
+ super(PixelUnshuffle, self).__init__()
482
+ self.downscale_factor = downscale_factor
483
+ def forward(self, input):
484
+ '''
485
+ input: batchSize * c * k*w * k*h
486
+ kdownscale_factor: k
487
+ batchSize * c * k*w * k*h -> batchSize * k*k*c * w * h
488
+ '''
489
+
490
+ return pixel_unshuffle(input, self.downscale_factor)
491
+
492
+ class Concat(nn.Module):
493
+ def __init__(self, dim=1):
494
+ super().__init__()
495
+ self.dim = 1
496
+ self.concat = torch.cat
497
+
498
+ def padding(self, tensors):
499
+ if len(tensors) > 2:
500
+ return tensors
501
+ x , y = tensors
502
+ xb, xc, xh, xw = x.size()
503
+ yb, yc, yh, yw = y.size()
504
+ diffY = xh - yh
505
+ diffX = xw - yw
506
+ y = F.pad(y, (diffX // 2, diffX - diffX//2,
507
+ diffY // 2, diffY - diffY//2))
508
+ return (x, y)
509
+
510
+ def forward(self, x, dim=None):
511
+ x = self.padding(x)
512
+ return self.concat(x, dim if dim is not None else self.dim)
513
+
514
+ # if __name__ == '__main__':
515
+ # from torchsummary import summary
516
+ # x = torch.randn((1,32,16,16))
517
+ # for k in range(1,3):
518
+ # # up = upsample(32, 2**k)
519
+ # # down = downsample(32//(2**k), 2**k)
520
+ # # x_up = up(x)
521
+ # # x_down = down(x_up)
522
+ # # s_up = (32,16,16)
523
+ # # summary(up,s,device='cpu')
524
+ # # summary(down,s,device='cpu')
525
+ # print(k)
checkpoints/Gaussian/Gaussian_GRU_mix_5to50_norm_last_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11e093d6437297060d0289cbfe6ed8b9db606ff469cc95cf9e0a5d1912fc98a7
3
+ size 44739485
checkpoints/Gaussian/Gaussian_GRU_mix_5to50_norm_noclip_last_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:926c88a5434e81c65913b19f289e70f073f60654a8e4825a381470f9d4714e6c
3
+ size 44740325
checkpoints/Gaussian/Gaussian_gru32n_lsdir2sid_last_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d9b7793d13cd91d1b42d2e00d3cc882dc5987807dc1d8e063e74f43853c9c15
3
+ size 44739736
checkpoints/Gaussian/Gaussian_gru64n_mix_noclip_last_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ef06926aa363d96fbc08e7c07c8a0005b9c2cc2ef2515e8ea4a8303220e9743
3
+ size 178726885
checkpoints/bias_lut_2d.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a81f9de5a2ff5ebc1e9131f825dcecbfc164d938daa49532cf0cd79cccfeb5e
3
+ size 8460212
data_process/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .unprocess import *
2
+ from .process import *
3
+ from .yond_datasets import *
data_process/process.py ADDED
@@ -0,0 +1,906 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Forward processing of raw data to sRGB images.
2
+ Unprocessing Images for Learned Raw Denoising
3
+ http://timothybrooks.com/tech/unprocessing
4
+ """
5
+
6
+ import rawpy
7
+ import rawpy.enhance
8
+ import exifread
9
+ import numpy as np
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.distributions as tdist
13
+ from scipy import stats
14
+ from utils import *
15
+ import random
16
+ from .unprocess import random_gains
17
+
18
+ Dual_ISO_Cameras = ['SonyA7S2']
19
+ HALF_CLIP = 2
20
+
21
+ def bpc_aug(data, ratio=1e-6, wp=1):
22
+ B, C, H, W = data.shape
23
+ npoints = int(2 * np.random.rand() * ratio * data.numel())
24
+ for i in range(npoints):
25
+ x = np.random.randint(H)
26
+ y = np.random.randint(W)
27
+ c = np.random.randint(C)
28
+ b = np.random.randint(B)
29
+ data[b, c, x, y] = wp
30
+ return data
31
+
32
+ def data_aug(data, choice, bias=0, rot=False):
33
+ if choice[0] == 1:
34
+ data = np.flip(data, axis=2+bias)
35
+ if choice[1] == 1:
36
+ data = np.flip(data, axis=3+bias)
37
+ return data
38
+
39
+ def inverse_VST_torch(x, noiseparam, iso_list, wp=1):
40
+ x = x * wp
41
+ b = len(iso_list)
42
+ for i in range(b):
43
+ iso = iso_list[i].item()
44
+ gain = noiseparam[iso]['Kmax']
45
+ sigma = noiseparam[iso]['sigGs']
46
+ x[i] = (x[i] / 2.0)**2 - 3.0/8.0 - sigma**2 / gain**2
47
+ x[i] = x[i] * gain
48
+ x = x / wp
49
+ return x
50
+
51
+ def pack_raw_bayer(raw, wp=1023, clip=True):
52
+ #pack Bayer image to 4 channels
53
+ im = raw.raw_image_visible.astype(np.float32)
54
+ raw_pattern = raw.raw_pattern
55
+ R = np.where(raw_pattern==0)
56
+ G1 = np.where(raw_pattern==1)
57
+ B = np.where(raw_pattern==2)
58
+ G2 = np.where(raw_pattern==3)
59
+
60
+ white_point = wp
61
+ img_shape = im.shape
62
+ H = img_shape[0]
63
+ W = img_shape[1]
64
+
65
+ out = np.stack((im[R[0][0]:H:2, R[1][0]:W:2], #RGBG
66
+ im[G1[0][0]:H:2, G1[1][0]:W:2],
67
+ im[B[0][0]:H:2, B[1][0]:W:2],
68
+ im[G2[0][0]:H:2, G2[1][0]:W:2]), axis=0).astype(np.float32)
69
+
70
+ black_level = np.array(raw.black_level_per_channel)[:,None,None].astype(np.float32)
71
+
72
+ out = (out - black_level) / (white_point - black_level)
73
+ out = np.clip(out, 0.0, 1.0) if clip else out
74
+
75
+ return out
76
+
77
+ def postprocess_bayer(rawpath, img4c, white_point=1023):
78
+ if torch.is_tensor(img4c):
79
+ img4c = img4c.detach()
80
+ img4c = img4c[0].cpu().float().numpy()
81
+ img4c = np.clip(img4c, 0, 1)
82
+
83
+ #unpack 4 channels to Bayer image
84
+ raw = rawpy.imread(rawpath)
85
+ raw_pattern = raw.raw_pattern
86
+ R = np.where(raw_pattern==0)
87
+ G1 = np.where(raw_pattern==1)
88
+ G2 = np.where(raw_pattern==3)
89
+ B = np.where(raw_pattern==2)
90
+
91
+ black_level = np.array(raw.black_level_per_channel)[:,None,None]
92
+
93
+ img4c = img4c * (white_point - black_level) + black_level
94
+
95
+ img_shape = raw.raw_image_visible.shape
96
+ H = img_shape[0]
97
+ W = img_shape[1]
98
+
99
+ raw.raw_image_visible[R[0][0]:H:2, R[1][0]:W:2] = img4c[0, :,:]
100
+ raw.raw_image_visible[G1[0][0]:H:2,G1[1][0]:W:2] = img4c[1, :,:]
101
+ raw.raw_image_visible[B[0][0]:H:2,B[1][0]:W:2] = img4c[2, :,:]
102
+ raw.raw_image_visible[G2[0][0]:H:2,G2[1][0]:W:2] = img4c[3, :,:]
103
+
104
+ # out = raw.postprocess(use_camera_wb=False, user_wb=[1,1,1,1], half_size=True, no_auto_bright=True, output_bps=8, bright=1, user_black=None, user_sat=None)
105
+ # out = raw.postprocess(use_camera_wb=False, user_wb=[1.96875, 1, 1.444, 1], half_size=True, no_auto_bright=True, output_bps=8, bright=1, user_black=None, user_sat=None)
106
+ out = raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=8, bright=1, user_black=None, user_sat=None)
107
+ return out
108
+
109
+ def postprocess_bayer_v2(rawpath, img4c):
110
+ with rawpy.imread(rawpath) as raw:
111
+ out_srgb = raw2rgb_postprocess(img4c.detach(), raw)
112
+
113
+ return out_srgb
114
+
115
+ def apply_gains(bayer_images, wbs):
116
+ """Applies white balance to a batch of Bayer images."""
117
+ N, C, _, _ = bayer_images.shape
118
+ wbs = wbs.repeat((N,1)).view(N, C, 1, 1)
119
+ outs = bayer_images * wbs
120
+ return outs
121
+
122
+
123
+ def apply_ccms(images, ccms):
124
+ """Applies color correction matrices."""
125
+ images = images.permute(
126
+ 0, 2, 3, 1) # Permute the image tensor to BxHxWxC format from BxCxHxW format
127
+ images = images[:, :, :, None, :]
128
+ ccms = ccms[:, None, None, :, :]
129
+ outs = torch.sum(images * ccms, dim=-1)
130
+ # Re-Permute the tensor back to BxCxHxW format
131
+ outs = outs.permute(0, 3, 1, 2)
132
+ return outs
133
+
134
+
135
+ def gamma_compression(images, gamma=2.2):
136
+ """Converts from linear to gamma space."""
137
+ outs = torch.clamp(images, min=1e-8) ** (1 / gamma)
138
+ # outs = (1 + gamma[0]) * np.power(images, 1.0/gamma[1]) - gamma[0] + gamma[2]*images
139
+ outs = torch.clamp((outs*255).int(), min=0, max=255).float() / 255
140
+ return outs
141
+
142
+
143
+ def raw2LRGB(bayer_images):
144
+ """RGBG -> linear RGB"""
145
+ lin_rgb = torch.stack([
146
+ bayer_images[:,0,...],
147
+ torch.mean(bayer_images[:, [1,3], ...], dim=1),
148
+ bayer_images[:,2,...]], dim=1)
149
+
150
+ return lin_rgb
151
+
152
+
153
+ def process(bayer_images, wbs, cam2rgbs, gamma=2.2):
154
+ """Processes a batch of Bayer RGBG images into sRGB images."""
155
+ # White balance.
156
+ bayer_images = apply_gains(bayer_images, wbs)
157
+ # Binning
158
+ bayer_images = torch.clamp(bayer_images, min=0.0, max=1.0)
159
+ images = raw2LRGB(bayer_images)
160
+ # Color correction.
161
+ images = apply_ccms(images, cam2rgbs)
162
+ # Gamma compression.
163
+ images = torch.clamp(images, min=0.0, max=1.0)
164
+ images = gamma_compression(images, gamma)
165
+
166
+ return images
167
+
168
+
169
+ def raw2rgb(packed_raw, raw):
170
+ """Raw2RGB pipeline (preprocess version)"""
171
+ wb = np.array(raw.camera_whitebalance)
172
+ wb /= wb[1]
173
+ cam2rgb = raw.color_matrix[:3, :3]
174
+ if cam2rgb[0,0] == 0:
175
+ cam2rgb = np.eye(3, dtype=np.float32)
176
+
177
+ if isinstance(packed_raw, np.ndarray):
178
+ packed_raw = torch.from_numpy(packed_raw).float()
179
+
180
+ wb = torch.from_numpy(wb).float().to(packed_raw.device)
181
+ cam2rgb = torch.from_numpy(cam2rgb).float().to(packed_raw.device)
182
+ out = process(packed_raw[None], wbs=wb[None], cam2rgbs=cam2rgb[None], gamma=2.2)[0, ...].numpy()
183
+
184
+ return out
185
+
186
+
187
+ def raw2rgb_v2(packed_raw, wb, ccm):
188
+ if torch.is_tensor(packed_raw):
189
+ packed_raw = packed_raw.detach().cpu().float()
190
+ else:
191
+ packed_raw = torch.from_numpy(packed_raw).float()
192
+ wb = torch.from_numpy(wb).float()
193
+ cam2rgb = torch.from_numpy(ccm).float()
194
+ out = process(packed_raw[None], wbs=wb[None], cam2rgbs=cam2rgb[None], gamma=2.2)[0, ...].numpy()
195
+ return out.transpose(1,2,0)
196
+
197
+
198
+ def raw2rgb_postprocess(packed_raw, raw):
199
+ """Raw2RGB pipeline (postprocess version)"""
200
+ assert packed_raw.ndimension() == 4
201
+ wb = np.array(raw.camera_whitebalance)
202
+ wb /= wb[1]
203
+ cam2rgb = raw.color_matrix[:3, :3]
204
+ if cam2rgb[0,0] == 0:
205
+ cam2rgb = np.eye(3, dtype=np.float32)
206
+
207
+ wb = torch.from_numpy(wb[None]).float().to(packed_raw.device)
208
+ cam2rgb = torch.from_numpy(cam2rgb[None]).float().to(packed_raw.device)
209
+ out = process(packed_raw, wbs=wb, cam2rgbs=cam2rgb, gamma=2.2)
210
+ # out = raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
211
+ return out
212
+
213
+ # def get_specific_noise_params(camera_type=None, iso='100'):
214
+ # cam_noisy_params = {}
215
+ # cam_noisy_params['IMX686'] = {
216
+ # '100':{'K':0.1366021, 'sigGs':0.6926457, 'sigGssig':0.002096},
217
+ # '6400':{'K':8.7425333, 'sigGs':14.303619546153575, 'sigGssig':0.0696716845864088},
218
+
219
+ # }
220
+ # if camera_type in cam_noisy_params:
221
+ # return cam_noisy_params[camera_type][iso]
222
+ # else:
223
+ # log(f'''Warning: we have not test the noisy parameters of camera "{camera_type}". Now we use NikonD850's parameters to test.''')
224
+ # return cam_noisy_params['IMX686']
225
+
226
+ def get_camera_noisy_params(camera_type=None):
227
+ cam_noisy_params = {}
228
+ cam_noisy_params['OS04J10'] = {#K_ISO=(0.01134,0.56113)
229
+ 'K_ISO': (0.01143, 0.4127), 'ISOmin':100, 'ISOmax':1593, 'tmin':30, 'tmax':30, # exposure time(ms)
230
+ 'Kmin':0.44193, 'Kmax':2.92427, 'lam':-0.094, 'q':2.442e-04, 'wp':4095, 'bl':256,
231
+ 'sigRk':0.81696, 'sigRb':-2.69453, 'sigRsig':0.04015,
232
+ 'sigTLk':0.82984, 'sigTLb':-0.76241, 'sigTLsig':0.06671,
233
+ 'sigGsk':0.89423, 'sigGsb':-0.08060, 'sigGssig':0.04103,
234
+ 'sigReadk':0.00772, 'sigReadb':0.65775, 'sigReadsig':0.34109,
235
+ 'uReadk':0.00001, 'uReadb':0.00207, 'uReadsig':0.00059
236
+ }
237
+ cam_noisy_params['SC850AI'] = {
238
+ 'Kmin':1.42030, 'Kmax':1.70373, 'lam':-0.035, 'q':1/959, 'wp':1023, 'bl':64,
239
+ 'sigRk':0.85792, 'sigRb':-2.82338, 'sigRsig':0.01944,
240
+ 'sigTLk':-0.15504, 'sigTLb':0.71196, 'sigTLsig':0.04500,
241
+ 'sigGsk':0.60800, 'sigGsb':0.17009, 'sigGssig':0.02261,
242
+ 'sigReadk':0.60940, 'sigReadb':0.16932, 'sigReadsig':0.02257,
243
+ 'uReadk':0.00000, 'uReadb':-0.00292, 'uReadsig':0.00400
244
+ }
245
+ cam_noisy_params['SC450AI'] = { # ISO_3600_6052 (0.00065631, 0.02588613)
246
+ 'Kmin':0.87071, 'Kmax':1.38576, 'lam':0.042, 'q':1/959, 'wp':1023, 'bl':64,
247
+ 'sigRk':0.98972, 'sigRb':-2.69256, 'sigRsig':0.03678,
248
+ 'sigTLk':0.75890, 'sigTLb':-0.51098, 'sigTLsig':0.04700,
249
+ 'sigGsk':0.87135, 'sigGsb':-0.11635, 'sigGssig':0.04866,
250
+ 'sigReadk':0.00044, 'sigReadb':0.30172, 'sigReadsig':0.12209,
251
+ 'uReadk':-0.00005, 'uReadb':0.32066, 'uReadsig':0.01992
252
+ }
253
+ # new calibration (smaller)
254
+ cam_noisy_params['SC450'] = { # ISO_3600_6052 (0.00065631, 0.02588613)
255
+ 'Kmin':0.87071, 'Kmax':1.38576, 'lam':0.042, 'q':1/959, 'wp':1023, 'bl':64,
256
+ 'sigRk':0.97496, 'sigRb':-2.78752, 'sigRsig':0.02109,
257
+ 'sigTLk':0.57693, 'sigTLb':-0.44032, 'sigTLsig':0.01515,
258
+ 'sigGsk':0.88683, 'sigGsb':-0.26233, 'sigGssig':0.00695,
259
+ 'sigReadk':0.00040, 'sigReadb':0.23393, 'sigReadsig':0.01396,
260
+ 'uReadk':0.00000, 'uReadb':0.00076, 'uReadsig':0.00125
261
+ }
262
+ cam_noisy_params['NikonD850'] = {
263
+ 'Kmin':1.2, 'Kmax':2.4828, 'lam':-0.26, 'q':1/(2**14), 'wp':16383, 'bl':512,
264
+ 'sigTLk':0.906, 'sigTLb':-0.6754, 'sigTLsig':0.035165,
265
+ 'sigRk':0.8322, 'sigRb':-2.3326, 'sigRsig':0.301333,
266
+ 'sigGsk':0.906, 'sigGsb':-0.1754, 'sigGssig':0.035165,
267
+ }
268
+ cam_noisy_params['IMX686'] = { # ISO-640~6400
269
+ 'Kmin':-0.19118, 'Kmax':2.16820, 'lam':0.102, 'q':1/(2**10), 'wp':1023, 'bl':64,
270
+ 'sigTLk':0.85187, 'sigTLb':0.07991, 'sigTLsig':0.02921,
271
+ 'sigRk':0.87611, 'sigRb':-2.11455, 'sigRsig':0.03274,
272
+ 'sigGsk':0.85187, 'sigGsb':0.67991, 'sigGssig':0.02921,
273
+ }
274
+ cam_noisy_params['SonyA7S2_lowISO'] = {
275
+ 'Kmin':-1.67214, 'Kmax':0.42228, 'lam':-0.026, 'q':1/(2**14), 'wp':16383, 'bl':512,
276
+ 'sigRk':0.78782, 'sigRb':-0.34227, 'sigRsig':0.02832,
277
+ 'sigTLk':0.74043, 'sigTLb':0.86182, 'sigTLsig':0.00712,
278
+ 'sigGsk':0.82966, 'sigGsb':1.49343, 'sigGssig':0.00359,
279
+ 'sigReadk':0.82879, 'sigReadb':1.50601, 'sigReadsig':0.00362,
280
+ 'uReadk':0.01472, 'uReadb':0.01129, 'uReadsig':0.00034,
281
+ }
282
+ cam_noisy_params['SonyA7S2_highISO'] = {
283
+ 'Kmin':0.64567, 'Kmax':2.51606, 'lam':-0.025, 'q':1/(2**14), 'wp':16383, 'bl':512,
284
+ 'sigRk':0.62945, 'sigRb':-1.51040, 'sigRsig':0.02609,
285
+ 'sigTLk':0.74901, 'sigTLb':-0.12348, 'sigTLsig':0.00638,
286
+ 'sigGsk':0.82878, 'sigGsb':0.44162, 'sigGssig':0.00153,
287
+ 'sigReadk':0.82645, 'sigReadb':0.45061, 'sigReadsig':0.00156,
288
+ 'uReadk':0.00385, 'uReadb':0.00674, 'uReadsig':0.00039,
289
+ }
290
+ cam_noisy_params['CRVD'] = {
291
+ 'Kmin':1.31339, 'Kmax':3.95448, 'lam':0.015, 'q':1/(2**12), 'wp':4095, 'bl':240,
292
+ 'sigRk':0.93368, 'sigRb':-2.19692, 'sigRsig':0.02473,
293
+ 'sigGsk':0.95387, 'sigGsb':0.01552, 'sigGssig':0.00855,
294
+ 'sigTLk':0.95495, 'sigTLb':0.01618, 'sigTLsig':0.00790
295
+ }
296
+ if camera_type in cam_noisy_params:
297
+ return cam_noisy_params[camera_type]
298
+ else:
299
+ log(f'''Warning: we have not test the noisy parameters of camera "{camera_type}". Now we use NikonD850's parameters to test.''')
300
+ return cam_noisy_params['NikonD850']
301
+
302
+ def get_specific_noise_params(camera_type=None, iso='100'):
303
+ iso = str(iso)
304
+ cam_noisy_params = {}
305
+ cam_noisy_params['SonyA7S2'] = {
306
+ '50': {'Kmax': 0.047815, 'lam': 0.1474653, 'sigGs': 1.0164667, 'sigGssig': 0.005272454, 'sigTL': 0.70727646, 'sigTLsig': 0.004360543, 'sigR': 0.13997398, 'sigRsig': 0.0064381803, 'bias': 0, 'biassig': 0.010093017, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
307
+ '64': {'Kmax': 0.0612032, 'lam': 0.13243394, 'sigGs': 1.0509665, 'sigGssig': 0.008081373, 'sigTL': 0.71535635, 'sigTLsig': 0.0056863446, 'sigR': 0.14346549, 'sigRsig': 0.006400559, 'bias': 0, 'biassig': 0.008690166, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
308
+ '80': {'Kmax': 0.076504, 'lam': 0.1121489, 'sigGs': 1.180899, 'sigGssig': 0.011333668, 'sigTL': 0.7799473, 'sigTLsig': 0.009347968, 'sigR': 0.19540153, 'sigRsig': 0.008197397, 'bias': 0, 'biassig': 0.0107246125, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
309
+ '100': {'Kmax': 0.09563, 'lam': 0.14875287, 'sigGs': 1.0067395, 'sigGssig': 0.0033682834, 'sigTL': 0.70181876, 'sigTLsig': 0.0037532174, 'sigR': 0.1391465, 'sigRsig': 0.006530218, 'bias': 0, 'biassig': 0.007235429, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
310
+ '125': {'Kmax': 0.1195375, 'lam': 0.12904578, 'sigGs': 1.0279676, 'sigGssig': 0.007364685, 'sigTL': 0.6961967, 'sigTLsig': 0.0048687346, 'sigR': 0.14485553, 'sigRsig': 0.006731584, 'bias': 0, 'biassig': 0.008026363, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
311
+ '160': {'Kmax': 0.153008, 'lam': 0.094135, 'sigGs': 1.1293099, 'sigGssig': 0.008340453, 'sigTL': 0.7258587, 'sigTLsig': 0.008032158, 'sigR': 0.19755602, 'sigRsig': 0.0082754735, 'bias': 0, 'biassig': 0.0101351, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
312
+ '200': {'Kmax': 0.19126, 'lam': 0.07902429, 'sigGs': 1.2926387, 'sigGssig': 0.012171176, 'sigTL': 0.8117464, 'sigTLsig': 0.010250768, 'sigR': 0.22815849, 'sigRsig': 0.010726711, 'bias': 0, 'biassig': 0.011413908, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
313
+ '250': {'Kmax': 0.239075, 'lam': 0.051688068, 'sigGs': 1.4345995, 'sigGssig': 0.01606571, 'sigTL': 0.8630922, 'sigTLsig': 0.013844714, 'sigR': 0.26271912, 'sigRsig': 0.0130637, 'bias': 0, 'biassig': 0.013569083, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
314
+ '320': {'Kmax': 0.306016, 'lam': 0.040700804, 'sigGs': 1.7481371, 'sigGssig': 0.019626873, 'sigTL': 1.0334468, 'sigTLsig': 0.017629284, 'sigR': 0.3097104, 'sigRsig': 0.016202712, 'bias': 0, 'biassig': 0.017825918, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
315
+ '400': {'Kmax': 0.38252, 'lam': 0.0222538, 'sigGs': 2.0595572, 'sigGssig': 0.024872316, 'sigTL': 1.1816813, 'sigTLsig': 0.02505812, 'sigR': 0.36209714, 'sigRsig': 0.01994737, 'bias': 0, 'biassig': 0.021005306, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
316
+ '500': {'Kmax': 0.47815, 'lam': -0.0031342343, 'sigGs': 2.3956928, 'sigGssig': 0.030144656, 'sigTL': 1.31772, 'sigTLsig': 0.028629242, 'sigR': 0.42528257, 'sigRsig': 0.025104137, 'bias': 0, 'biassig': 0.02981831, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
317
+ '640': {'Kmax': 0.612032, 'lam': 0.002566592, 'sigGs': 2.9662898, 'sigGssig': 0.045661453, 'sigTL': 1.6474211, 'sigTLsig': 0.04671843, 'sigR': 0.48839623, 'sigRsig': 0.031589635, 'bias': 0, 'biassig': 0.10000693, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
318
+ '800': {'Kmax': 0.76504, 'lam': -0.008199721, 'sigGs': 3.5475867, 'sigGssig': 0.052318197, 'sigTL': 1.9346539, 'sigTLsig': 0.046128694, 'sigR': 0.5723769, 'sigRsig': 0.037824076, 'bias': 0, 'biassig': 0.025339302, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
319
+ '1000': {'Kmax': 0.9563, 'lam': -0.021061005, 'sigGs': 4.2727833, 'sigGssig': 0.06972333, 'sigTL': 2.2795107, 'sigTLsig': 0.059203167, 'sigR': 0.6845563, 'sigRsig': 0.04879781, 'bias': 0, 'biassig': 0.027911892, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
320
+ '1250': {'Kmax': 1.195375, 'lam': -0.032423194, 'sigGs': 5.177596, 'sigGssig': 0.092677385, 'sigTL': 2.708437, 'sigTLsig': 0.07622563, 'sigR': 0.8177013, 'sigRsig': 0.06162229, 'bias': 0, 'biassig': 0.03293372, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
321
+ '1600': {'Kmax': 1.53008, 'lam': -0.0441045, 'sigGs': 6.29925, 'sigGssig': 0.1153261, 'sigTL': 3.2283993, 'sigTLsig': 0.09118158, 'sigR': 0.988786, 'sigRsig': 0.078567736, 'bias': 0, 'biassig': 0.03877672, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
322
+ '2000': {'Kmax': 1.9126, 'lam': -0.012963797, 'sigGs': 2.653871, 'sigGssig': 0.015890995, 'sigTL': 1.4356787, 'sigTLsig': 0.02178686, 'sigR': 0.33124214, 'sigRsig': 0.018801652, 'bias': 0, 'biassig': 0.01570677, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
323
+ '2500': {'Kmax': 2.39075, 'lam': -0.027097283, 'sigGs': 3.200225, 'sigGssig': 0.019307792, 'sigTL': 1.6897862, 'sigTLsig': 0.025873765, 'sigR': 0.38264316, 'sigRsig': 0.023769397, 'bias': 0, 'biassig': 0.018728448, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
324
+ '3200': {'Kmax': 3.06016, 'lam': -0.034863412, 'sigGs': 3.9193838, 'sigGssig': 0.02649232, 'sigTL': 2.0417721, 'sigTLsig': 0.032873377, 'sigR': 0.44543457, 'sigRsig': 0.030114045, 'bias': 0, 'biassig': 0.021355819, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
325
+ '4000': {'Kmax': 3.8252, 'lam': -0.043700505, 'sigGs': 4.8015847, 'sigGssig': 0.03781628, 'sigTL': 2.4629273, 'sigTLsig': 0.042401053, 'sigR': 0.52347374, 'sigRsig': 0.03929801, 'bias': 0, 'biassig': 0.026152484, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
326
+ '5000': {'Kmax': 4.7815, 'lam': -0.053150143, 'sigGs': 5.8995814, 'sigGssig': 0.0625814, 'sigTL': 2.9761007, 'sigTLsig': 0.061326735, 'sigR': 0.6190265, 'sigRsig': 0.05335372, 'bias': 0, 'biassig': 0.058574405, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
327
+ '6400': {'Kmax': 6.12032, 'lam': -0.07517104, 'sigGs': 7.1163535, 'sigGssig': 0.08435366, 'sigTL': 3.4502964, 'sigTLsig': 0.08226275, 'sigR': 0.7218788, 'sigRsig': 0.0642334, 'bias': 0, 'biassig': 0.059074216, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
328
+ '8000': {'Kmax': 7.6504, 'lam': -0.08208357, 'sigGs': 8.916516, 'sigGssig': 0.12763213, 'sigTL': 4.269624, 'sigTLsig': 0.13381928, 'sigR': 0.87760293, 'sigRsig': 0.07389065, 'bias': 0, 'biassig': 0.084842026, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
329
+ '10000': {'Kmax': 9.563, 'lam': -0.073289566, 'sigGs': 11.291476, 'sigGssig': 0.1639773, 'sigTL': 5.495318, 'sigTLsig': 0.16279395, 'sigR': 1.0522343, 'sigRsig': 0.094359785, 'bias': 0, 'biassig': 0.107438326, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
330
+ '12800': {'Kmax': 12.24064, 'lam': -0.06495205, 'sigGs': 14.245901, 'sigGssig': 0.17283991, 'sigTL': 7.038261, 'sigTLsig': 0.18822834, 'sigR': 1.2749791, 'sigRsig': 0.120479785, 'bias': 0, 'biassig': 0.0944684, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
331
+ '16000': {'Kmax': 15.3008, 'lam': -0.060692135, 'sigGs': 17.833515, 'sigGssig': 0.19809262, 'sigTL': 8.877547, 'sigTLsig': 0.23338738, 'sigR': 1.5559287, 'sigRsig': 0.15791349, 'bias': 0, 'biassig': 0.09725099, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
332
+ '20000': {'Kmax': 19.126, 'lam': -0.060213074, 'sigGs': 22.084776, 'sigGssig': 0.21820943, 'sigTL': 11.002351, 'sigTLsig': 0.28806436, 'sigR': 1.8810822, 'sigRsig': 0.18937257, 'bias': 0, 'biassig': 0.4984733, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512},
333
+ '25600': {'Kmax': 24.48128, 'lam': -0.09089118, 'sigGs': 25.853043, 'sigGssig': 0.35371417, 'sigTL': 12.175712, 'sigTLsig': 0.4215717, 'sigR': 2.2760193, 'sigRsig': 0.2609267, 'bias': 0, 'biassig': 0.37568903, 'q': 6.103515625e-05, 'wp': 16383, 'bl': 512}
334
+ }
335
+ cam_noisy_params['NikonD850'] = {
336
+ '800':{
337
+ 'Kmax':3.320, 'sigGs':4.858579, 'sigGssig':0.002096,
338
+ 'sigTL':1.509532, 'lam':-0.26, 'sigR':0.263432,
339
+ 'q':1/(2**14), 'wp':16383, 'bl':512,
340
+ 'bias':np.array([0,0,0,0])
341
+ },
342
+ '1600':{
343
+ 'Kmax':6.305, 'sigGs':8.695116, 'sigGssig':0.06967,
344
+ 'sigTL':2.699073, 'lam':-0.26, 'sigR':0.449245,
345
+ 'q':1/(2**14), 'wp':16383, 'bl':512,
346
+ 'bias':np.array([0,0,0,0])
347
+ },
348
+ '3200':{
349
+ 'Kmax':11.975, 'sigGs':15.514215, 'sigGssig':0.06967,
350
+ 'sigTL':4.825994, 'lam':-0.26, 'sigR':0.766122,
351
+ 'q':1/(2**14), 'wp':16383, 'bl':512,
352
+ 'bias':np.array([0,0,0,0])
353
+ }
354
+ }
355
+ cam_noisy_params['IMX686'] = {
356
+ '100':{
357
+ 'Kmax':0.083805, 'sigGs':0.6926457, 'sigGssig':0.002096,
358
+ 'sigTL':0.67998, 'lam':0.10229621, 'sigR':0.23668,
359
+ 'q':1/(2**10), 'wp':1023, 'bl':64,
360
+ 'bias':np.array([0,0,0,0])
361
+ },
362
+ '6400':{
363
+ 'Kmax':8.74253, 'sigGs':12.8901, 'sigGssig':0.06967,
364
+ 'sigTL':12.8901, 'lam':0.10229621, 'sigR':0,
365
+ 'q':1/(2**10), 'wp':1023, 'bl':64,
366
+ 'bias':np.array([-0.08113494,-0.04906388,-0.9408157,-1.2048522])
367
+ }
368
+ }
369
+ if camera_type in cam_noisy_params:
370
+ return cam_noisy_params[camera_type][iso]
371
+ else:
372
+ # log(f'''Warning: we have not test the noisy parameters of camera "{camera_type}".''')
373
+ return None
374
+
375
+ # 取最大对应相机的最大ISO生成噪声参数
376
+ def sample_params_max(camera_type='NikonD850', ratio=None, iso=None):
377
+ # 获取已经测算好的相机噪声参数
378
+ params = None
379
+ if iso is not None:
380
+ params = get_specific_noise_params(camera_type=camera_type, iso=iso)
381
+ if params is None:
382
+ if camera_type in Dual_ISO_Cameras:
383
+ choice = np.random.randint(2)
384
+ camera_type += '_lowISO' if choice<1 else '_highISO'
385
+ params = get_camera_noisy_params(camera_type=camera_type)
386
+ # 根据最小二乘法得到的噪声参数回归模型采样噪声参数
387
+ bias = 0
388
+ if 'K_ISO' in params and iso is not None:
389
+ K_ISO = params['K_ISO']
390
+ K = np.array(K_ISO[0] * iso + K_ISO[1]) * (1 + np.random.uniform(low=-0.02, high=+0.02))
391
+ log_K = np.log(K)
392
+ else:
393
+ log_K = params['Kmax'] * (1 + np.random.uniform(low=-0.02, high=+0.02)) # 增加一些扰动,以防测的不准
394
+ K = np.exp(log_K)
395
+ mu_TL = params['sigTLk']*log_K + params['sigTLb']
396
+ mu_R = params['sigRk']*log_K + params['sigRb']
397
+ mu_Gs = params['sigGsk']*log_K + params['sigGsb'] if 'sigGsk' in params else 2**(-14)
398
+ # 去掉log
399
+ sigTL = np.exp(mu_TL)
400
+ sigR = np.exp(mu_R)
401
+ sigGs = np.exp(np.random.normal(loc=mu_Gs, scale=params['sigGssig']) if 'sigGssig' in params else mu_Gs)
402
+ else:
403
+ K = params['Kmax'] * (1 + np.random.uniform(low=-0.01, high=+0.01)) # 增加一些扰动,以防测的不准
404
+ sigGs = np.random.normal(loc=params['sigGs'], scale=params['sigGssig']) if 'sigGssig' in params else params['sigGs']
405
+ sigTL = np.random.normal(loc=params['sigTL'], scale=params['sigTLsig']) if 'sigTLsig' in params else params['sigTL']
406
+ sigR = np.random.normal(loc=params['sigR'], scale=params['sigRsig']) if 'sigRsig' in params else params['sigR']
407
+ bias = params['bias']
408
+ wp = params['wp']
409
+ bl = params['bl']
410
+ lam = params['lam']
411
+ q = params['q']
412
+
413
+ if ratio is None:
414
+ if 'SonyA7S2' in camera_type:
415
+ ratio = np.random.uniform(low=100, high=300)
416
+ else:
417
+ log_ratio = np.random.uniform(low=0, high=2.08)
418
+ ratio = np.exp(log_ratio)
419
+
420
+ return {'K':K, 'sigTL':sigTL, 'sigR':sigR, 'sigGs':sigGs, 'bias':bias,
421
+ 'lam':lam, 'q':q, 'ratio':ratio, 'wp':wp, 'bl':bl}
422
+
423
+ # 噪声参数采样
424
+ def sample_params(camera_type='NikonD850', ln_ratio=False):
425
+ choice = 1
426
+ Dual_ISO_Cameras = ['SonyA7S2']
427
+ if camera_type in Dual_ISO_Cameras:
428
+ choice = np.random.randint(2)
429
+ camera_type += '_lowISO' if choice<1 else '_highISO'
430
+
431
+ # 获取已经测算好的相机噪声参数
432
+ params = get_camera_noisy_params(camera_type=camera_type)
433
+ wp = params['wp']
434
+ bl = params['bl']
435
+ lam = params['lam']
436
+ q = params['q']
437
+ Point_ISO_Cameras = ['CRVD', 'BM3D']
438
+ if camera_type in Point_ISO_Cameras:
439
+ if camera_type == 'CRVD':
440
+ iso_list = [1600,3200,6400,12800,25600]
441
+ a_list = np.array([3.513262,6.955588,13.486051,26.585953,52.032536])
442
+ b_list = np.array([11.917691,38.117816,130.818508,484.539790,1819.818657])
443
+ bias_points = np.array([-1.12660, -1.69546, -3.25935, -6.68111, -12.66876])
444
+ K_points = np.log(a_list)
445
+ Gs_points = np.log(np.sqrt(b_list))
446
+ choice = np.random.randint(5)
447
+ # 取点噪声参数
448
+ log_K = K_points[choice]
449
+ K = a_list[choice]
450
+ mu_TL = params['sigTLk']*log_K + params['sigTLb'] if 'sigTLk' in params else 0
451
+ mu_R = params['sigRk']*log_K + params['sigRb'] if 'sigRk' in params else 0
452
+ mu_Gs = Gs_points[choice]
453
+ bias = bias_points[choice]
454
+ else:
455
+ # 根据最小二乘法得到的噪声参数回归模型采样噪声参数
456
+ bias = 0
457
+ log_K = np.random.uniform(low=params['Kmin'], high=params['Kmax'])
458
+ K = np.exp(log_K)
459
+ mu_TL = params['sigTLk']*log_K + params['sigTLb'] if 'sigTLk' in params else q
460
+ mu_R = params['sigRk']*log_K + params['sigRb'] if 'sigRk' in params else q
461
+ mu_Gs = params['sigGsk']*log_K + params['sigGsb'] if 'sigGsk' in params else q
462
+ mu_bias = params['uReadk']*log_K + params['uReadb']
463
+
464
+ log_sigTL = np.random.normal(loc=mu_TL, scale=params['sigTLsig']) if 'sigTLk' in params else 0
465
+ log_sigR = np.random.normal(loc=mu_R, scale=params['sigRsig']) if 'sigRk' in params else 0
466
+ log_sigGs = np.random.normal(loc=mu_Gs, scale=params['sigGssig']) if 'sigGsk' in params else q
467
+ log_bias = np.random.normal(loc=mu_bias, scale=params['uReadsig']) if 'uReadk' in params else 0
468
+ # 去掉log
469
+ sigTL = np.exp(log_sigTL)
470
+ sigR = np.exp(log_sigR)
471
+ sigGs = np.exp(log_sigGs)
472
+ bias = np.exp(log_bias)
473
+ # 模拟曝光衰减的系数, ln_ratio模式会照顾弱噪声场景,更有通用性
474
+ if ln_ratio:
475
+ high = 1 if 'CRVD' in camera_type else 5
476
+ log_ratio = np.random.uniform(low=-0.01, high=high)
477
+ ratio = np.exp(log_ratio) # np.random.uniform(low=1, high=200) if choice else np.exp(log_ratio)
478
+ else:
479
+ ratio = np.random.uniform(low=100, high=300)
480
+
481
+ return {'K':K, 'sigTL':sigTL, 'sigR':sigR, 'sigGs':sigGs, 'bias':bias,
482
+ 'lam':lam, 'q':q, 'ratio':ratio, 'wp':wp, 'bl':bl}
483
+
484
+
485
+ def get_aug_param_torch(data, b=8, command='augv5', numpy=False, camera_type='SonyA7S2'):
486
+ aug_r, aug_g, aug_b = torch.zeros(b), torch.zeros(b), torch.zeros(b)
487
+ r = np.random.randint(2) * 0.25 + 0.25
488
+ u = r
489
+ if np.random.randint(4):
490
+ if 'augv5' in command:
491
+ # v5,依照相机白平衡经验参数增强,激进派作风
492
+ rgb_gain, red_gain, blue_gain = random_gains(camera_type)
493
+ rgb_gain = 1 / rgb_gain
494
+ rg = data["wb"][:, 0] / red_gain[0]
495
+ bg = data["wb"][:, 2] / blue_gain[0]
496
+ aug_g = torch.rand(b) * r + rgb_gain[0] - 0.9
497
+ aug_r = torch.rand(b) * r + rg * (1+aug_g) - 1.1
498
+ aug_b = torch.rand(b) * r + bg * (1+aug_g) - 1.1
499
+ elif 'augv2' in command:
500
+ # v2,相信原数据颜色的准确性,保守派作风
501
+ aug_g = torch.clamp(torch.randn(b) * r, 0, 4*u)
502
+ aug_r = torch.clamp((1+torch.randn(b)*r) * (1+aug_g) - 1, 0, 4*u)
503
+ aug_b = torch.clamp((1+torch.randn(b)*r) * (1+aug_g) - 1, 0, 4*u)
504
+
505
+ # 保证非负
506
+ daug, _ = torch.min(torch.stack((aug_r, aug_g, aug_b)), dim=0)
507
+ daug[daug>0] = 0
508
+ aug_r = (1+aug_r) / (1+daug) - 1
509
+ aug_g = (1+aug_g) / (1+daug) - 1
510
+ aug_b = (1+aug_b) / (1+daug) - 1
511
+ if numpy:
512
+ aug_r = np.squeeze(aug_r.numpy())
513
+ aug_g = np.squeeze(aug_g.numpy())
514
+ aug_b = np.squeeze(aug_b.numpy())
515
+ return aug_r, aug_g, aug_b
516
+
517
+ def raw_wb_aug(noisy, gt, aug_wb=None, camera_type='SonyA7S2', ratio=1, ori=True, iso=None):
518
+ # [c, h, w]
519
+ p = get_specific_noise_params(camera_type=camera_type, iso=iso)
520
+ if p is None:
521
+ assert camera_type == 'SonyA7S2'
522
+ camera_type += '_lowISO' if iso<=1600 else '_highISO'
523
+ p = get_camera_noisy_params(camera_type=camera_type)
524
+ # 根据最小二乘法得到的噪声参数回归模型采样噪声参数
525
+ p['K'] = 0.0009546 * iso * (1 + np.random.uniform(low=-0.01, high=+0.01)) - 0.00193
526
+ log_K = np.log(p['K'])
527
+ mu_Gs = p['sigGsk']*log_K + p['sigGsb']
528
+ p['sigGs'] = np.exp(np.random.normal(loc=mu_Gs, scale=p['sigGssig']))
529
+ else:
530
+ p['K'] = p['Kmax'] * (1 + np.random.uniform(low=-0.01, high=+0.01)) # 增加一些扰动,以防测的不准
531
+ p['sigGs'] = np.random.normal(loc=p['sigGs'], scale=p['sigGssig']) if 'sigGssig' in p else p['sigGs']
532
+
533
+ if aug_wb is not None:
534
+ # 默认pattern为RGGB!(通道rgbg排列)
535
+ gt = gt * (p['wp'] - p['bl']) / ratio
536
+ noisy = noisy * (p['wp'] - p['bl'])
537
+ # 补噪声
538
+ daug = -np.minimum(np.min(aug_wb), 0)
539
+ # aug_wb = aug_wb + daug
540
+ if daug == 0:
541
+ # 只有增益的话很好处理,叠加泊松分布就行
542
+ dy = gt * aug_wb.reshape(-1,1,1) # 我考虑过这里dy和dn要不要量化一下,感觉不量化对多样性更友好
543
+ dn = np.random.poisson(dy/p['K']).astype(np.float32) * p['K']
544
+ else:
545
+ # BiSNA,折中方案,不好讲故事,弃疗了
546
+ raise NotImplementedError
547
+ # 存在减益的话就很麻烦,需要考虑read noise并且补齐分布
548
+ scale = 1 - daug
549
+ # 要保证dyn是非负的
550
+ aug_wb_new = aug_wb + daug
551
+ dy = gt * aug_wb.reshape(-1,1,1)
552
+ dyn = gt * aug_wb_new.reshape(-1,1,1)
553
+ # 先通过缩放减小噪声图
554
+ noisy *= scale
555
+ # 补齐单个照片的读噪声
556
+ dn_read = np.random.randn(*gt.shape).astype(np.float32) * p['sigGs'] * np.sqrt(1-scale**2)
557
+ # 补齐由于除法导致的分布变化,恢复shot noise应有的分布(不完全相等)
558
+ scale_sigma = scale - scale**2
559
+ dn_shot = np.random.poisson(scale_sigma * gt/p['K']).astype(np.float32)*p['K'] - gt * scale_sigma
560
+ # 叠加泊松分布
561
+ dn_aug = np.random.poisson(dyn/p['K']).astype(np.float32) * p['K']
562
+ dn = dn_read + dn_shot + dn_aug
563
+
564
+ gt = np.clip((gt + dy)*ratio, 0, (p['wp'] - p['bl']))
565
+ noisy = np.clip(noisy + dn, -p['bl'], (p['wp'] - p['bl']))
566
+ gt /= (p['wp'] - p['bl'])
567
+ noisy /= (p['wp'] - p['bl'])
568
+
569
+ if ori is False:
570
+ noisy *= ratio
571
+
572
+ return noisy.astype(np.float32), gt.astype(np.float32)
573
+
574
+ def raw_wb_aug_torch(noisy, gt, aug_wb=None, camera_type='IMX686', ratio=1, ori=True, iso=None, ratiofix=False):
575
+ p = get_specific_noise_params(camera_type=camera_type, iso=iso)
576
+ if p is None:
577
+ assert camera_type == 'SonyA7S2'
578
+ camera_type += '_lowISO' if iso<=1600 else '_highISO'
579
+ p = get_camera_noisy_params(camera_type=camera_type)
580
+ # 根据最小二乘法得到的噪声参数回归模型采样噪声参数
581
+ p['K'] = 0.0009546 * iso * (1 + np.random.uniform(low=-0.01, high=+0.01)) - 0.00193
582
+ log_K = np.log(p['K'])
583
+ mu_Gs = p['sigGsk']*log_K + p['sigGsb']
584
+ p['sigGs'] = np.exp(np.random.normal(loc=mu_Gs, scale=p['sigGssig']))
585
+ else:
586
+ p['K'] = p['Kmax'] * (1 + np.random.uniform(low=-0.01, high=+0.01)) # 增加一些扰动,以防测的不准
587
+ p['sigGs'] = np.random.normal(loc=p['sigGs'], scale=p['sigGssig']) if 'sigGssig' in p else p['sigGs']
588
+
589
+ if aug_wb is not None:
590
+ # 默认pattern为RGGB!(通道rgbg排列)
591
+ gt = gt * (p['wp'] - p['bl']) / ratio
592
+ noisy = noisy * (p['wp'] - p['bl'])
593
+ # 补噪声
594
+ daug = -np.minimum(np.min(aug_wb), 0)
595
+ daug = torch.from_numpy(np.array(daug)).to(noisy.device)
596
+ aug_wb = torch.from_numpy(aug_wb).to(noisy.device)
597
+ dy = gt * aug_wb.reshape(-1,1,1) # 我考虑过这里dy和dn要不要量化一下,感觉不量化对多样性更友好
598
+ if daug == 0:
599
+ # 只有增益的话很好处理,叠加泊松分布就行
600
+ dn = tdist.Poisson(dy/p['K']).sample() * p['K']
601
+ else:
602
+ # BiSNA,折中方案,不好讲故事,弃疗了
603
+ warnings.warn('You are using BiSNA!!!')
604
+ raise NotImplementedError
605
+ # 存在减益的话就很麻烦,需要考虑read noise并且补齐分布
606
+ scale = 1 - daug
607
+ # 要保证dyn是非负的
608
+ aug_wb_new = aug_wb + daug
609
+ dyn = gt * aug_wb_new.reshape(-1,1,1)
610
+ # 先通过缩放减小噪声图
611
+ noisy *= scale
612
+ # 补齐单个照片的读噪声
613
+ dn_read = tdist.Normal(0, p['sigGs']).sample() * torch.sqrt(1-scale**2)
614
+ # 补齐由于除法导致的分布变化,恢复shot noise应有的分布(不完全相等)
615
+ scale_sigma = scale - scale**2
616
+ dn_shot = tdist.Poisson(scale_sigma * gt/p['K']).sample() *p['K'] - gt * scale_sigma
617
+ # 叠加泊松分布
618
+ dn_aug = tdist.Poisson(dyn/p['K']).sample() * p['K']
619
+ dn = dn_read + dn_shot + dn_aug
620
+ if ratiofix:
621
+ ratio = ratio / (1 + daug)
622
+ gt = torch.clamp((gt + dy)*ratio, 0, (p['wp'] - p['bl']))
623
+ noisy = torch.clamp(noisy + dn, -p['bl'], (p['wp'] - p['bl']))
624
+ gt /= (p['wp'] - p['bl'])
625
+ noisy /= (p['wp'] - p['bl'])
626
+
627
+ if ori is False:
628
+ noisy *= ratio
629
+
630
+ return noisy, gt
631
+
632
+ def SNA_torch(gt, aug_wb, camera_type='IMX686', ratio=1, black_lr=False, ori=True, iso=None):
633
+ p = get_specific_noise_params(camera_type=camera_type, iso=iso)
634
+ if p is None:
635
+ assert camera_type == 'SonyA7S2'
636
+ camera_type += '_lowISO' if iso<=1600 else '_highISO'
637
+ p = get_camera_noisy_params(camera_type=camera_type)
638
+ # 根据最小二乘法得到的噪声参数回归模型采样噪声参数
639
+ p['K'] = 0.0009546 * iso * (1 + np.random.uniform(low=-0.01, high=+0.01)) - 0.00193
640
+ else:
641
+ p['K'] = p['Kmax'] * (1 + np.random.uniform(low=-0.01, high=+0.01)) # 增加一些扰动,以防测的不准
642
+
643
+ # 默认pattern为RGGB!(通道rgbg排列)
644
+ gt = gt * (p['wp'] - p['bl']) / ratio
645
+ # 补噪声
646
+ aug_wb = torch.from_numpy(aug_wb).to(gt.device)
647
+ dy = gt * aug_wb.reshape(-1,1,1) # 我考虑过这里dy和dn要不要量化一下,感觉不量化对多样性更友好
648
+ # 只有增益的话很好处理,叠加泊松分布就行
649
+ dn = tdist.Poisson(dy/p['K']).sample() * p['K']
650
+ # 贴黑图的,所以抛去gt中多算的一份泊松分布
651
+ if black_lr: dy = dy - gt
652
+ dy = dy * ratio / (p['wp'] - p['bl'])
653
+ dn = dn / (p['wp'] - p['bl'])
654
+
655
+ if ori is False:
656
+ dn *= ratio
657
+
658
+ return dn, dy
659
+
660
+ # @ fn_timer
661
+ def generate_noisy_obs(y, camera_type=None, noise_code='p', param=None, MultiFrameMean=1, ori=False, clip=False):
662
+ p = param
663
+ y = y * (p['wp'] - p['bl'])
664
+ # p['ratio'] = 1/p['ratio'] # 临时行为,为了快速实现MFM
665
+ y = y / p['ratio']
666
+ MFM = MultiFrameMean ** 0.5
667
+
668
+ use_R = True if 'r' in noise_code.lower() else False
669
+ use_Q = True if 'q' in noise_code.lower() else False
670
+ use_TL = True if 'g' in noise_code.lower() else False
671
+ use_P = True if 'p' in noise_code.lower() else False
672
+ use_D = True if 'd' in noise_code.lower() else False
673
+ use_black = True if 'b' in noise_code.lower() else False
674
+
675
+ if use_P: # 使用泊松噪声作为shot noisy
676
+ noisy_shot = np.random.poisson(MFM*y/p['K']).astype(np.float32) * p['K'] / MFM
677
+ else: # 不考虑shot noisy
678
+ noisy_shot = y + np.random.randn(*y.shape).astype(np.float32) * np.sqrt(np.maximum(y/p['K'], 1e-10)) * p['K'] / MFM
679
+ if not use_black:
680
+ if use_TL: # 使用TL噪声作为read noisy
681
+ noisy_read = stats.tukeylambda.rvs(p['lam'], scale=p['sigTL']/MFM, size=y.shape).astype(np.float32)
682
+ else: # 使用高斯噪声作为read noisy
683
+ noisy_read = stats.norm.rvs(scale=p['sigGs']/MFM, size=y.shape).astype(np.float32)
684
+ # 行噪声需要使用行的维度h,[1,c,h,w]所以-2是h
685
+ noisy_row = np.random.randn(y.shape[-3], y.shape[-2], 1).astype(np.float32) * p['sigR']/MFM if use_R else 0
686
+ noisy_q = np.random.uniform(low=-0.5, high=0.5, size=y.shape) if use_Q else 0
687
+ noisy_bias = p['bias'].reshape(-1,1,1) if use_D else 0
688
+ else:
689
+ noisy_read = 0
690
+ noisy_row = 0
691
+ noisy_q = 0
692
+ noisy_bias = 0
693
+
694
+ # 归一化回[0, 1]
695
+ z = (noisy_shot + noisy_read + noisy_row + noisy_q + noisy_bias) / (p['wp'] - p['bl'])
696
+ # 模拟实际情况
697
+ z = np.clip(z, -p['bl']/p['wp'], 1) if not clip else np.clip(z, 0, 1)
698
+ if ori is False:
699
+ z = z * p['ratio']
700
+
701
+ return z.astype(np.float32)
702
+
703
+ # @fn_timer
704
+ def generate_noisy_torch(y, camera_type=None, noise_code='p', param=None, MultiFrameMean=1, ori=False, clip=False):
705
+ p = param
706
+ y = y * (p['wp'] - p['bl'])
707
+ # p['ratio'] = 1/p['ratio'] # 临时行为,为了快速实现MFM
708
+ y = y / p['ratio']
709
+ MFM = MultiFrameMean ** 0.5
710
+
711
+ use_R = True if 'r' in noise_code.lower() else False
712
+ use_Q = True if 'q' in noise_code.lower() else False
713
+ use_TL = True if 'g' in noise_code.lower() else False
714
+ use_P = True if 'p' in noise_code.lower() else False
715
+ use_D = True if 'd' in noise_code.lower() else False
716
+ use_black = True if 'b' in noise_code.lower() else False
717
+
718
+ if use_P: # 使用泊松噪声作为shot noisy
719
+ noisy_shot = tdist.Poisson(MFM*y/p['K']).sample() * p['K'] / MFM
720
+ else: # 不考虑shot noisy
721
+ noisy_shot = tdist.Normal(y).sample() * torch.sqrt(torch.max(y/p['K'], 1e-10)) * p['K'] / MFM
722
+ if not use_black:
723
+ if use_TL: # 使用TL噪声作为read noisy
724
+ raise NotImplementedError
725
+ # noisy_read = stats.tukeylambda.rvs(p['lam'], scale=p['sigTL'], size=y.shape).astype(np.float32)
726
+ else: # 使用高斯噪声作为read noisy
727
+ noisy_read = tdist.Normal(loc=torch.zeros_like(y), scale=p['sigGs']/MFM).sample()
728
+ else:
729
+ noisy_read = 0
730
+ # 使用行噪声
731
+ noisy_row = torch.randn(y.shape[-3], y.shape[-2], 1, device=y.device) * p['sigR'] / MFM if use_R else 0
732
+ noisy_q = (torch.rand(y.shape, device=y.device) - 0.5) * p['q'] * (p['wp'] - p['bl']) if use_Q else 0
733
+ noisy_bias = torch.from_numpy(p['bias'].reshape(-1,1,1)) if use_D else 0
734
+
735
+ # 归一化回[0, 1]
736
+ z = (noisy_shot + noisy_read + noisy_row + noisy_q + noisy_bias) / (p['wp'] - p['bl'])
737
+ # 模拟实际情况
738
+ z = torch.clamp(z, -p['bl']/p['wp'], 1) if not clip else torch.clamp(z, 0, 1)
739
+ # ori_brightness
740
+ if ori is False:
741
+ z = z * p['ratio']
742
+
743
+ return z
744
+
745
+ class HighBitRecovery:
746
+ def __init__(self, camera_type='IMX686', noise_code='prq', param=None,
747
+ perturb=True, factor=6, float=True):
748
+ self.camera_type = camera_type
749
+ self.noise_code = noise_code
750
+ self.param = param
751
+ self.perturb = perturb
752
+ self.factor = factor
753
+ self.float = float
754
+ self.lut = {}
755
+
756
+ def get_lut(self, iso_list, blc_mean=None):
757
+ for iso in iso_list:
758
+ if blc_mean is None:
759
+ bias = 0
760
+ else:
761
+ bias = np.mean(blc_mean[iso])
762
+ if self.perturb:
763
+ sigma_t = 0.1
764
+ bias += np.random.randn() * sigma_t
765
+ self.lut[iso] = self.HB2LB_LUT(iso, bias)
766
+
767
+ def HB2LB_LUT(self, iso, bias=0, param=None):
768
+ # 标记LUT区间
769
+ lut_info = {}
770
+ # 获得标定的噪声参数
771
+ p = sample_params_max(self.camera_type, iso=iso) if param is None else param
772
+ lut_info['param'] = p
773
+ # 选择一种分布,依照该分布映射到HighBit
774
+ if 'g' in self.noise_code.lower():
775
+ dist = stats.tukeylambda(p['lam'], loc=bias, scale=p['sigTL'])
776
+ sigma = p['sigTL']
777
+ lut_info['dist'] = dist
778
+ else:
779
+ dist = stats.norm(loc=bias, scale=p['sigGs'])
780
+ sigma = p['sigGs']
781
+ lut_info['dist'] = dist
782
+
783
+ # 寻址范围为[-6σ,6σ],离群点不做映射恢复
784
+ low = max(int(-sigma*self.factor + bias), -p['bl']+1)
785
+ high = int(sigma*self.factor + bias)
786
+ for x in range(low, high):
787
+ lut_info[x] = {
788
+ # 累积概率函数的起点
789
+ 'cdf': dist.cdf(x-0.5),
790
+ # 累积概率函数的变化范围
791
+ 'range': dist.cdf(x+0.5) - dist.cdf(x-0.5),
792
+ }
793
+ lut_info['low'] = low
794
+ lut_info['high'] = high
795
+ lut_info['bias'] = bias
796
+ lut_info['sigma'] = sigma
797
+ return lut_info
798
+
799
+ def map(self, data, iso=6400, norm=True): # 将LB图像映射成HB图像
800
+ p = self.lut[iso]['param']
801
+ if np.max(data) <= 1: data = data * (p['wp'] - p['bl'])
802
+ data_float = data.copy()
803
+ data = np.round(data_float)
804
+ if self.float:
805
+ delta = data_float - data
806
+ rand = np.random.uniform(0, 1, size=data.shape)
807
+ # 快速版:寻址范围为[-6σ,6σ],离群点不做映射恢复
808
+ for x in range(self.lut[iso]['low'], self.lut[iso]['high']):
809
+ keys = (data==x)
810
+ cdf = self.lut[iso][x]['cdf']
811
+ r = self.lut[iso][x]['range']
812
+ # 根据ppf反推分位点
813
+ data[keys] = self.lut[iso]['dist'].ppf(cdf + rand[keys] * r)
814
+ if self.float:
815
+ data = data + delta
816
+ if norm:
817
+ data = data / (p['wp'] - p['bl'])
818
+ else:
819
+ data = data + p['bl']
820
+
821
+ return data
822
+
823
+ class ELDEvalDataset(torch.utils.data.Dataset):
824
+ # 初始化函数,传入参数:basedir(基础路径),camera_suffix(相机后缀),scenes(场景),img_ids(图像id)
825
+ def __init__(self, basedir, camera_suffix=('NikonD850','.nef'), scenes=None, img_ids=None):
826
+ super(ELDEvalDataset, self).__init__()
827
+ self.basedir = basedir
828
+ self.camera_suffix = camera_suffix # ('Canon', '.CR2')
829
+ self.scenes = scenes
830
+ self.img_ids = img_ids
831
+ # self.input_dict = {}
832
+ # self.target_dict = {}
833
+
834
+ # 获取每一个图像的数据
835
+ def __getitem__(self, i):
836
+ camera, suffix = self.camera_suffix
837
+
838
+ scene_id = i // len(self.img_ids)
839
+ img_id = i % len(self.img_ids)
840
+
841
+ scene ='scene-{}'.format(self.scenes[scene_id])
842
+
843
+ datadir = os.path.join(self.basedir, camera, scene)
844
+
845
+ input_path = os.path.join(datadir, 'IMG_{:04d}{}'.format(self.img_ids[img_id], suffix))
846
+
847
+ gt_ids = np.array([1, 6, 11, 16])
848
+ ind = np.argmin(np.abs(self.img_ids[img_id] - gt_ids))
849
+
850
+ target_path = os.path.join(datadir, 'IMG_{:04d}{}'.format(gt_ids[ind], suffix))
851
+
852
+ iso, expo = metainfo(target_path)
853
+ target_expo = iso * expo
854
+ iso, expo = metainfo(input_path)
855
+
856
+ ratio = target_expo / (iso * expo)
857
+
858
+ with rawpy.imread(input_path) as raw:
859
+ input = pack_raw_bayer(raw) * ratio
860
+
861
+ with rawpy.imread(target_path) as raw:
862
+ target = pack_raw_bayer(raw)
863
+
864
+ input = np.maximum(np.minimum(input, 1.0), 0)
865
+ target = np.maximum(np.minimum(target, 1.0), 0)
866
+ input = np.ascontiguousarray(input)
867
+ target = np.ascontiguousarray(target)
868
+
869
+ data = {'input': input, 'target': target, 'fn':input_path, 'rawpath': target_path}
870
+
871
+ return data
872
+
873
+ # 获取数据的长度
874
+ def __len__(self):
875
+ return len(self.scenes) * len(self.img_ids)
876
+
877
+ if __name__ == '__main__':
878
+ path = 'F:/datasets/ELD/SonyA7S2/scene-8'
879
+ files = [os.path.join(path, name) for name in os.listdir(path) if '.ARW' in name]
880
+ for name in files:
881
+ print(name)
882
+ raw = rawpy.imread(name)
883
+ img = raw.raw_image_visible.astype(np.float32)[np.newaxis,:,:]
884
+ # img = img[:, 1000:1500, 2200:2700]
885
+ fig = plt.figure(figsize=(16,10))
886
+ img = np.clip((img-512) / (16383-512), 0, 1)
887
+ p = sample_params_max(camera_type='SonyA7S2', ratio=100, iso=1600)
888
+
889
+ noisy = generate_noisy_obs(img,camera_type='SonyA7S2', param=p)
890
+ # refer_path = path+'\\'+'DSC02750.ARW'
891
+ # raw_refer = rawpy.imread(refer_path)
892
+ # print(np.min(raw_refer.raw_image_visible), np.max(raw_refer.raw_image_visible), np.mean(raw_refer.raw_image_visible))
893
+ # raw_refer.raw_image_visible[:,:] = np.clip((raw_refer.raw_image_visible.astype(np.float32)-512) / (16383-512)*200, 0, 1)*16383
894
+ # print(np.min(raw_refer.raw_image_visible), np.max(raw_refer.raw_image_visible), np.mean(raw_refer.raw_image_visible))
895
+ # out1 = raw_refer.postprocess(use_camera_wb=True, no_auto_bright=True)
896
+ # print(np.min(out1), np.max(out1), np.mean(out1))
897
+ # plt.imsave('real.png', out1)
898
+ # plt.imshow(out1)
899
+ # plt.show()
900
+ raw.raw_image_visible[:,:] = noisy[0,:,:]*16383
901
+ out = raw.postprocess(use_camera_wb=True)
902
+ plt.imshow(out)
903
+ plt.show()
904
+ plt.imsave('gen.png', out)
905
+ print('test')
906
+ print("")
data_process/unprocess.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.distributions as tdist
4
+ # from utils import fn_timer
5
+
6
+
7
+ def random_ccm(camera_type='IMX686'):
8
+ """Generates random RGB -> Camera color correction matrices."""
9
+ # Takes a random convex combination of XYZ -> Camera CCMs.
10
+ xyz2cams = [[[1.0234, -0.2969, -0.2266],
11
+ [-0.5625, 1.6328, -0.0469],
12
+ [-0.0703, 0.2188, 0.6406]],
13
+ [[0.4913, -0.0541, -0.0202],
14
+ [-0.613, 1.3513, 0.2906],
15
+ [-0.1564, 0.2151, 0.7183]],
16
+ [[0.838, -0.263, -0.0639],
17
+ [-0.2887, 1.0725, 0.2496],
18
+ [-0.0627, 0.1427, 0.5438]],
19
+ [[0.6596, -0.2079, -0.0562],
20
+ [-0.4782, 1.3016, 0.1933],
21
+ [-0.097, 0.1581, 0.5181]]]
22
+ num_ccms = len(xyz2cams)
23
+ xyz2cams = torch.FloatTensor(xyz2cams)
24
+ weights = torch.FloatTensor(num_ccms, 1, 1).uniform_(1e-8, 1e8)
25
+ weights_sum = torch.sum(weights, dim=0)
26
+ xyz2cam = torch.sum(xyz2cams * weights, dim=0) / weights_sum
27
+
28
+ # Multiplies with RGB -> XYZ to get RGB -> Camera CCM.
29
+ rgb2xyz = torch.FloatTensor([[0.4124564, 0.3575761, 0.1804375],
30
+ [0.2126729, 0.7151522, 0.0721750],
31
+ [0.0193339, 0.1191920, 0.9503041]])
32
+ rgb2cam = torch.mm(xyz2cam, rgb2xyz)
33
+ # if camera_type == 'SonyA7S2':
34
+ # # SonyA7S2 ccm's inv
35
+ # rgb2cam = [[1.,0.,0.],
36
+ # [0.,1.,0.],
37
+ # [0.,0.,1.]]
38
+ # elif camera_type == 'IMX686':
39
+ # # RedMi K30 ccm's inv
40
+ # rgb2cam = [[0.61093086,0.31565922,0.07340994],
41
+ # [0.09433191,0.7658969,0.1397712 ],
42
+ # [0.03532438,0.3020709,0.6626047 ]]
43
+ # rgb2cam = torch.FloatTensor(rgb2cam)
44
+
45
+ # Normalizes each row.
46
+ rgb2cam = rgb2cam / torch.sum(rgb2cam, dim=-1, keepdim=True)
47
+ return rgb2cam
48
+
49
+
50
+ def random_gains():
51
+ """Generates random gains for brightening and white balance."""
52
+ # RGB gain represents brightening.
53
+ n = tdist.Normal(loc=torch.tensor([0.8]), scale=torch.tensor([0.1]))
54
+ rgb_gain = 1.0 / n.sample() if torch.rand(1) < 0.9 else 5 / n.sample()
55
+
56
+ # Red and blue gains represent white balance.
57
+ red_gain = torch.FloatTensor(1).uniform_(1.4, 2.5)#(1.9, 2.4)
58
+ blue_gain = torch.FloatTensor(1).uniform_(1.5, 2.4)#(1.5, 1.9)
59
+ return rgb_gain, red_gain, blue_gain
60
+
61
+ # def random_gains(camera_type='SonyA7S2'):
62
+ # # return torch.FloatTensor(np.array([[1.],[1.],[1.]]))
63
+ # n = tdist.Normal(loc=torch.tensor([0.8]), scale=torch.tensor([0.1]))
64
+ # rgb_gain = 1.0 / n.sample()
65
+ # # SonyA7S2
66
+ # if camera_type == 'SonyA7S2':
67
+ # red_gain = np.random.uniform(1.75, 2.65)
68
+ # ployfit = [14.65 ,-9.63942308, 1.80288462 ]
69
+ # blue_gain= ployfit[0] + ployfit[1] * red_gain + ployfit[2] * red_gain ** 2# + np.random.uniform(0, 0.4)686
70
+ # elif camera_type == 'IMX686':
71
+ # red_gain = np.random.uniform(1.4, 2.3)
72
+ # ployfit = [6.14381188, -3.65620261, 0.70205967]
73
+ # blue_gain= ployfit[0] + ployfit[1] * red_gain + ployfit[2] * red_gain ** 2# + np.random.uniform(0, 0.4)
74
+ # else:
75
+ # raise NotImplementedError
76
+ # red_gain = torch.FloatTensor(np.array([red_gain])).view(1)
77
+ # blue_gain = torch.FloatTensor(np.array([blue_gain])).view(1)
78
+ # return rgb_gain, red_gain, blue_gain
79
+
80
+ def inverse_smoothstep(image):
81
+ """Approximately inverts a global tone mapping curve."""
82
+ #image = image.permute(1, 2, 0) # Permute the image tensor to HxWxC format from CxHxW format
83
+ image = torch.clamp(image, min=0.0, max=1.0)
84
+ out = 0.5 - torch.sin(torch.asin(1.0 - 2.0 * image) / 3.0)
85
+ #out = out.permute(2, 0, 1) # Re-Permute the tensor back to CxHxW format
86
+ return out
87
+
88
+
89
+ def gamma_expansion(image):
90
+ """Converts from gamma to linear space."""
91
+ # Clamps to prevent numerical instability of gradients near zero.
92
+ #image = image.permute(1, 2, 0) # Permute the image tensor to HxWxC format from CxHxW format
93
+ out = torch.clamp(image, min=1e-8) ** 2.2
94
+ #out = out.permute(2, 0, 1) # Re-Permute the tensor back to CxHxW format
95
+ return out
96
+
97
+
98
+ def apply_ccm(image, ccm):
99
+ """Applies a color correction matrix."""
100
+ shape = image.size()
101
+ image = torch.reshape(image, [-1, 3])
102
+ image = torch.tensordot(image, ccm, dims=[[-1], [-1]])
103
+ out = torch.reshape(image, shape)
104
+ return out
105
+
106
+
107
+ def safe_invert_gains(image, rgb_gain, red_gain, blue_gain, use_gpu=False):
108
+ """Inverts gains while safely handling saturated pixels."""
109
+ # H, W, C
110
+ green = torch.tensor([1.0])
111
+ if use_gpu: green = green.cuda()
112
+ gains = torch.stack((1.0 / red_gain, green, 1.0 / blue_gain)) / rgb_gain
113
+ new_shape = (1,) * (len(image.shape) - 1) + (-1,)
114
+ gains = gains.view(new_shape)
115
+ #gains = gains[None, None, :]
116
+ # Prevents dimming of saturated pixels by smoothly masking gains near white.
117
+ gray = torch.mean(image, dim=-1, keepdim=True)
118
+ inflection = 0.9
119
+ mask = (torch.clamp(gray - inflection, min=0.0) / (1.0 - inflection)) ** 2.0
120
+
121
+ safe_gains = torch.max(mask + (1.0 - mask) * gains, gains)
122
+ out = image * safe_gains
123
+ return out
124
+
125
+ def mosaic(image):
126
+ """Extracts RGGB Bayer planes from an RGB image."""
127
+ if image.size() == 3:
128
+ # image = image.permute(1, 2, 0) # Permute the image tensor to HxWxC format from CxHxW format
129
+ shape = image.size()
130
+ red = image[0::2, 0::2, 0]
131
+ green_red = image[0::2, 1::2, 1]
132
+ green_blue = image[1::2, 0::2, 1]
133
+ blue = image[1::2, 1::2, 2]
134
+ out = torch.stack((red, green_red, green_blue, blue), dim=-1)
135
+ # out = torch.reshape(out, (shape[0] // 2, shape[1] // 2, 4))
136
+ # out = out.permute(2, 0, 1) # Re-Permute the tensor back to CxHxW format
137
+ else: # [crops, t, h, w, c]
138
+ shape = image.size()
139
+ red = image[..., 0::2, 0::2, 0]
140
+ green_red = image[..., 0::2, 1::2, 1]
141
+ green_blue = image[..., 1::2, 0::2, 1]
142
+ blue = image[..., 1::2, 1::2, 2]
143
+ out = torch.stack((red, green_red, green_blue, blue), dim=-1)
144
+ # out = torch.reshape(out, (shape[0], shape[1], shape[-3] // 2, shape[-2] // 2, 4))
145
+ # out = out.permute(2, 0, 1) # Re-Permute the tensor back to CxHxW format
146
+ return out
147
+
148
+ # def mosaic(image, mode=0):
149
+ # """Extracts Random Bayer planes from an RGB image."""
150
+ # if mode == 0: # RGGB
151
+ # R, Gr, Gb, B = (0,0), (0,1), (1,0), (0,0)
152
+ # elif mode == 1: # GRBG
153
+ # Gr, R, B, Gb = (0,0), (0,1), (1,0), (0,0)
154
+ # elif mode == 2: # GBRG
155
+ # Gb, B, R, Gr = (0,0), (0,1), (1,0), (0,0)
156
+ # elif mode == 3: # BGGR
157
+ # B, Gb, Gr, R = (0,0), (0,1), (1,0), (0,0)
158
+ # shape = image.size()
159
+ # red = image[..., R[0]::2, R[1]::2, 0]
160
+ # green_red = image[..., Gr[0]::2, Gr[1]::2, 1]
161
+ # green_blue = image[..., Gb[0]::2, Gb[1]::2, 1]
162
+ # blue = image[..., B[0]::2, B[1]::2, 2]
163
+ # out = torch.stack((red, green_red, green_blue, blue), dim=-1)
164
+ # # out = torch.reshape(out, (shape[0], shape[1], shape[-3] // 2, shape[-2] // 2, 4))
165
+ # # out = out.permute(2, 0, 1) # Re-Permute the tensor back to CxHxW format
166
+ # return out
167
+
168
+ # @ fn_timer
169
+ def unprocess(image, lock_wb=False, use_gpu=False, camera_type='IMX686', seed=None):
170
+ """Unprocesses an image from sRGB to realistic raw data."""
171
+ # Randomly creates image metadata.
172
+ rgb2cam = random_ccm()
173
+ cam2rgb = torch.inverse(rgb2cam)
174
+ # rgb_gain, red_gain, blue_gain = random_gains() if lock_wb is False else torch.FloatTensor(np.array([[1.],[2.],[2.]]))
175
+ rgb_gain, red_gain, blue_gain = random_gains() if lock_wb is False else torch.FloatTensor(np.array(lock_wb))
176
+ if use_gpu:
177
+ rgb_gain, red_gain, blue_gain = rgb_gain.cuda(), red_gain.cuda(), blue_gain.cuda()
178
+ if len(image.size()) >= 4:
179
+ res = image.clone()
180
+ for i in range(image.size()[0]):
181
+ temp = image[i]
182
+ temp = inverse_smoothstep(temp)
183
+ temp = gamma_expansion(temp)
184
+ temp = apply_ccm(temp, rgb2cam)
185
+ temp = safe_invert_gains(temp, rgb_gain, red_gain, blue_gain, use_gpu)
186
+ temp = torch.clamp(temp, min=0.0, max=1.0)
187
+ res[i]= temp.clone()
188
+
189
+ metadata = {
190
+ 'cam2rgb': cam2rgb,
191
+ 'rgb_gain': rgb_gain,
192
+ 'red_gain': red_gain,
193
+ 'blue_gain': blue_gain,
194
+ }
195
+ return res, metadata
196
+ else:
197
+ # Approximately inverts global tone mapping.
198
+ image = inverse_smoothstep(image)
199
+ # Inverts gamma compression.
200
+ image = gamma_expansion(image)
201
+ # Inverts color correction.
202
+ image = apply_ccm(image, rgb2cam)
203
+ # Approximately inverts white balance and brightening.
204
+ image = safe_invert_gains(image, rgb_gain, red_gain, blue_gain, use_gpu)
205
+ # Clips saturated pixels.
206
+ image = torch.clamp(image, min=0.0, max=1.0)
207
+ # Applies a Bayer mosaic.
208
+ #image = mosaic(image)
209
+
210
+ metadata = {
211
+ 'cam2rgb': cam2rgb,
212
+ 'rgb_gain': rgb_gain,
213
+ 'red_gain': red_gain,
214
+ 'blue_gain': blue_gain,
215
+ }
216
+ return image, metadata
217
+
218
+ def unprocess_rpdc(image, lock_wb=False, use_gpu=False, camera_type='IMX686', known=None):
219
+ """Unprocesses an image from sRGB to realistic raw data."""
220
+ # Randomly creates image metadata.
221
+ if known is not None:
222
+ cam2rgb = known['cam2rgb']
223
+ rgb2cam = known['rgb2cam']
224
+ rgb_gain = known['rgb_gain']
225
+ red_gain = known['red_gain']
226
+ blue_gain = known['blue_gain']
227
+ else:
228
+ rgb2cam = random_ccm()
229
+ cam2rgb = torch.inverse(rgb2cam)
230
+ rgb_gain, red_gain, blue_gain = random_gains() if lock_wb is False else torch.FloatTensor(np.array(lock_wb))
231
+
232
+ if use_gpu:
233
+ rgb_gain, red_gain, blue_gain = rgb_gain.cuda(), red_gain.cuda(), blue_gain.cuda()
234
+
235
+ res = image.clone()
236
+ for i in range(image.size()[0]):
237
+ temp = image[i]
238
+ temp = inverse_smoothstep(temp)
239
+ temp = gamma_expansion(temp)
240
+ temp = apply_ccm(temp, rgb2cam)
241
+ temp = safe_invert_gains(temp, rgb_gain, red_gain, blue_gain, use_gpu)
242
+ temp = torch.clamp(temp, min=0.0, max=1.0)
243
+ res[i]= temp.clone()
244
+
245
+ metadata = {
246
+ 'rgb2cam': rgb2cam,
247
+ 'cam2rgb': cam2rgb,
248
+ 'rgb_gain': rgb_gain,
249
+ 'red_gain': red_gain,
250
+ 'blue_gain': blue_gain,
251
+ }
252
+ return res, metadata
253
+
254
+ def random_noise_levels():
255
+ """Generates random noise levels from a log-log linear distribution."""
256
+ log_min_shot_noise = np.log(0.0001)
257
+ log_max_shot_noise = np.log(0.012)
258
+ log_shot_noise = torch.FloatTensor(1).uniform_(log_min_shot_noise, log_max_shot_noise)
259
+ shot_noise = torch.exp(log_shot_noise)
260
+
261
+ line = lambda x: 2.18 * x + 1.20
262
+ n = tdist.Normal(loc=torch.tensor([0.0]), scale=torch.tensor([0.26]))
263
+ log_read_noise = line(log_shot_noise) + n.sample()
264
+ read_noise = torch.exp(log_read_noise)
265
+ return shot_noise, read_noise
266
+
267
+
268
+ def add_noise(image, shot_noise=0.01, read_noise=0.0005):
269
+ """Adds random shot (proportional to image) and read (independent) noise."""
270
+ image = image.permute(1, 2, 0) # Permute the image tensor to HxWxC format from CxHxW format
271
+ variance = image * shot_noise + read_noise
272
+ n = tdist.Normal(loc=torch.zeros_like(variance), scale=torch.sqrt(variance))
273
+ noise = n.sample()
274
+ out = image + noise
275
+ out = out.permute(2, 0, 1) # Re-Permute the tensor back to CxHxW format
276
+ return out
277
+
278
+ if __name__ == '__main__':
279
+ m = tdist.Poisson(torch.tensor([10.,100.,1000.]))
280
+ for i in range(10):
281
+ s = m.sample()
282
+ print(s.numpy())
data_process/yond_datasets.py ADDED
@@ -0,0 +1,1431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import torch
3
+ import numpy as np
4
+ import cv2
5
+ import os
6
+ import h5py
7
+ import rawpy
8
+ from matplotlib import pyplot as plt
9
+ from torch.utils.data import Dataset, DataLoader
10
+ from tqdm import tqdm
11
+ from .unprocess import mosaic, unprocess, random_gains
12
+ from .process import *
13
+ from utils import *
14
+
15
+ def bayer_aug(rggb, k=0):
16
+ bayer = rggb2bayer(rggb)
17
+ bayer = np.rot90(bayer, k=k, axes=(-2,-1))
18
+ rggb = bayer2rggb(bayer)
19
+ return rggb
20
+
21
+ # def get_threshold(data, step=1, mode='score2', print=False):
22
+ # if mode == 'naive':
23
+ # quants = list(range(step, 100, step))
24
+ # ths = np.percentile(data.reshape(-1), quants, method='linear')
25
+ # ths = np.array(ths.tolist() + [data.max()])
26
+ # th = ths[0]
27
+ # diffs = ths[1:] - ths[:-1]
28
+ # gap = data.max() / (100/step)**2
29
+ # for i in range(len(diffs)):
30
+ # if diffs[i] > gap:
31
+ # if print: log(f'Adaptive percent: {quants[i]}% - th: {ths[i]*959:.2f}')
32
+ # th = ths[i]
33
+ # break
34
+ # elif mode == 'score':
35
+ # quants = np.linspace(step, 100, 100//step)
36
+ # ths = np.percentile(data.reshape(-1), quants, method='linear')
37
+ # diffs = ths[1:] - ths[:-1]
38
+ # quants = quants[:-1]
39
+ # score = diffs/quants
40
+ # i = np.argmin(score)
41
+ # th = ths[i]
42
+ # if print: log(f'Adaptive percent: {quants[i]}% - th: {ths[i]*959:.2f}')
43
+ # return th
44
+
45
+ # Raw AWGN Dataset(Raw->Raw)
46
+ class SID_Raw_Dataset(Dataset):
47
+ def __init__(self, args=None):
48
+ super().__init__()
49
+ self.default_args()
50
+ if args is not None:
51
+ for key in args:
52
+ self.args[key] = args[key]
53
+ self.initialization()
54
+
55
+ def default_args(self):
56
+ self.args = {}
57
+ self.args['root_dir'] = 'SID'
58
+ self.args['crop_per_image'] = 8
59
+ self.args['crop_size'] = 512
60
+ self.args['ori'] = False
61
+ self.args['iso'] = None
62
+ self.args['dgain'] = None
63
+ self.args['params'] = None
64
+ self.args['lock_wb'] = False
65
+ self.args['gpu_preprocess'] = False
66
+ self.args['dstname'] = 'SID'
67
+ self.args['mode'] = 'train'
68
+ self.args['wp'] = 16383
69
+ self.args['bl'] = 512
70
+ self.args['command'] = ''
71
+
72
+ def initialization(self):
73
+ # 获取数据地址
74
+ self.suffix = 'npy'
75
+ self.root_dir = self.args['root_dir']
76
+ self.mode = self.args['mode']
77
+ self.dataset_file = f'SID_{self.args["mode"]}.info'
78
+ with open(f"infos/{self.dataset_file}", 'rb') as info_file:
79
+ self.infos = pkl.load(info_file)
80
+ print(f'>> Successfully load "{self.dataset_file}" (Length: {len(self.infos)})')
81
+ self.datapath = [info['long'] for info in self.infos]
82
+ self.names = [info['name'] for info in self.infos]
83
+ self.buffer = [None] * len(self.infos)
84
+ if 'cache' in self.args['command']:
85
+ log(f'Loading {len(self.infos)} crops!!!')
86
+ self.buffer = [dataload(path) for path in tqdm(self.datapath)]
87
+ self.length = len(self.infos)
88
+ self.sigma = -1
89
+ self.get_shape()
90
+
91
+ def __len__(self):
92
+ return self.length
93
+
94
+ def get_shape(self):
95
+ self.H, self.W = self.args['H'], self.args['W']
96
+ self.C = 3
97
+ self.h = self.H // 2
98
+ self.w = self.W // 2
99
+ self.c = 4
100
+
101
+ def data_aug(self, data, mode=0):
102
+ if mode == 0: return data
103
+ rot = mode % 4
104
+ flip = mode // 4
105
+ data = np.rot90(data, k=rot, axes=(-2, -1))
106
+ if flip:
107
+ data = data[..., ::-1]
108
+ return data
109
+
110
+ def init_random_crop_point(self, mode='non-overlapped', raw_crop=False):
111
+ self.h_start = []
112
+ self.w_start = []
113
+ self.h_end = []
114
+ self.w_end = []
115
+ self.aug = np.random.randint(8, size=self.args['crop_per_image'])
116
+ h, w = self.h, self.w
117
+ if raw_crop:
118
+ h, w = self.H, self.W
119
+ if mode == 'non-overlapped':
120
+ nh = h // self.args["patch_size"]
121
+ nw = w // self.args["patch_size"]
122
+ h_start = np.random.randint(0, h - nh*self.args["patch_size"] + 1)
123
+ w_start = np.random.randint(0, w - nw*self.args["patch_size"] + 1)
124
+ for i in range(nh):
125
+ for j in range(nw):
126
+ self.h_start.append(h_start + i * self.args["patch_size"])
127
+ self.w_start.append(w_start + j * self.args["patch_size"])
128
+ self.h_end.append(h_start + (i+1) * self.args["patch_size"])
129
+ self.w_end.append(w_start + (j+1) * self.args["patch_size"])
130
+
131
+ else: # random_crop
132
+ for i in range(self.args['crop_per_image']):
133
+ h_start = np.random.randint(0, h - self.args["patch_size"] + 1)
134
+ w_start = np.random.randint(0, w - self.args["patch_size"] + 1)
135
+ self.h_start.append(h_start)
136
+ self.w_start.append(w_start)
137
+ self.h_end.append(h_start + self.args["patch_size"])
138
+ self.w_end.append(w_start + self.args["patch_size"])
139
+
140
+ def random_crop(self, img):
141
+ # 本函数用于将numpy随机裁剪成以crop_size为边长的方形crop_per_image等份
142
+ c, h, w = img.shape
143
+ # 创建空numpy做画布, [crops, h, w]
144
+ crops = np.empty((self.args["crop_per_image"], c, self.args["patch_size"], self.args["patch_size"]), dtype=np.float32)
145
+ # 往空tensor的通道上贴patchs
146
+ for i in range(self.args["crop_per_image"]):
147
+ crop = img[:, self.h_start[i]:self.h_end[i], self.w_start[i]:self.w_end[i]]
148
+ # crop = self.data_aug(crop, mode=self.aug[i]) # 会导致格子纹
149
+ crops[i] = crop
150
+
151
+ return crops
152
+
153
+ def __getitem__(self, idx):
154
+ data = {}
155
+ # 读取数据
156
+ data['wb'] = self.infos[idx]['wb']
157
+ data['ccm'] = self.infos[idx]['ccm']
158
+ data['name'] = self.infos[idx]['name']
159
+
160
+ if self.buffer[idx] is None:
161
+ self.buffer[idx] = dataload(self.datapath[idx])
162
+ hr_raw = (self.buffer[idx].astype(np.float32) - self.args['bl']) / (self.args['wp'] - self.args['bl'])
163
+ # BayerAug, 这个旋转是用来改变bayer模式的
164
+ data['pattern'] = np.random.randint(4) if self.args["mode"] == 'train' else idx%4
165
+ hr_raw = np.rot90(hr_raw, k=data['pattern'], axes=(-2,-1))
166
+ hr_raw = bayer2rggb(hr_raw).clip(0, 1).transpose(2,0,1)
167
+ # 模拟VST后的gt数值范围 y=sqrt(x+3/8)≈sqrt(x)
168
+ data['vst_aug'] = True if np.random.randint(2) else False
169
+ hr_raw = hr_raw ** 0.5 if data['vst_aug'] else hr_raw
170
+
171
+ if self.args["mode"] == 'train':
172
+ # 随机裁剪成crop_per_image份
173
+ self.init_random_crop_point(mode=self.args['croptype'], raw_crop=False)
174
+ if data['pattern'] % 2:
175
+ self.h_start, self.h_end, self.w_start, self.w_end = self.w_start, self.w_end, self.h_start, self.h_end
176
+ hr_crops = self.random_crop(hr_raw)
177
+ else:
178
+ setup_seed(idx)
179
+ hr_crops = hr_raw[None,:]
180
+
181
+ lr_shape = hr_crops.shape
182
+
183
+ if self.args["lock_wb"] is False and np.random.randint(2):
184
+ rgb_gain, red_gain, blue_gain = random_gains()
185
+ red_gain = data['wb'][0] / red_gain.numpy()
186
+ blue_gain = data['wb'][2] / blue_gain.numpy()
187
+ hr_crops *= rgb_gain.numpy()
188
+ hr_crops[:,0] = hr_crops[:,0] * red_gain
189
+ hr_crops[:,2] = hr_crops[:,2] * blue_gain
190
+ data['wb'][0] = red_gain
191
+ data['wb'][2] = blue_gain
192
+ lr_crops = hr_crops.copy()
193
+
194
+ # 人工加噪声
195
+ if self.args['gpu_preprocess'] is False:
196
+ if self.args['mode'] == 'train':
197
+ lower, upper = np.log(self.args['sigma_min']), np.log(self.args['sigma_max'])
198
+ data['sigma'] = np.exp(np.random.rand()*(upper-lower)+lower) / 255.
199
+ else:
200
+ data['sigma'] = self.sigma
201
+ setup_seed(idx)
202
+ noise = np.random.randn(*lr_crops.shape) * data['sigma']
203
+ lr_crops += noise
204
+
205
+ data["lr"] = np.ascontiguousarray(lr_crops)
206
+ data["hr"] = np.ascontiguousarray(hr_crops)
207
+
208
+ if self.args['clip']:
209
+ data["lr"] = lr_crops.clip(0, 1)
210
+ data["hr"] = hr_crops.clip(0, 1)
211
+
212
+ return data
213
+
214
+ # Unprocess Synthetic Dataset(sRGB->Raw)
215
+ class RGB_Img2Raw_Dataset(Dataset):
216
+ def __init__(self, args=None):
217
+ super().__init__()
218
+ self.default_args()
219
+ if args is not None:
220
+ for key in args:
221
+ self.args[key] = args[key]
222
+ self.initialization()
223
+
224
+ def default_args(self):
225
+ self.args = {}
226
+ self.args['root_dir'] = 'YOND'
227
+ self.args['crop_size'] = 256
228
+ self.args['ori'] = False
229
+ self.args['iso'] = None
230
+ self.args['dgain'] = None
231
+ self.args['params'] = None
232
+ self.args['lock_wb'] = False
233
+ self.args['gpu_preprocess'] = False
234
+ self.args['dstname'] = 'YOND'
235
+ self.args['mode'] = 'train'
236
+ self.args['command'] = ''
237
+
238
+ def initialization(self):
239
+ # 获取数据地址
240
+ self.suffix = 'npy'
241
+ self.root_dir = self.args['root_dir']
242
+ self.mode = self.args['mode']
243
+ self.data_dir = f"{self.root_dir}/{self.mode}"
244
+ if self.mode == 'train':
245
+ self.data_dir += f'_{self.args["subname"]}'
246
+ self.datapath = sorted(glob.glob(f'{self.data_dir}/*.{self.suffix}'))
247
+ self.names = [os.path.basename(path)[:-4] for path in self.datapath]
248
+ self.infos = [{'name':name, 'path':path} for name, path in zip(self.names, self.datapath)]
249
+ self.buffer = [None] * len(self.infos)
250
+ if 'cache' in self.args['command']:
251
+ log(f'Loading {len(self.infos)} crops!!!')
252
+ self.buffer = [dataload(path) for path in tqdm(self.datapath)]
253
+ self.length = len(self.infos)
254
+ self.sigma = -1
255
+ self.get_shape()
256
+ log(f'Successfully cache {self.length} npy data!!!')
257
+
258
+ def __len__(self):
259
+ return self.length
260
+
261
+ def get_shape(self):
262
+ self.H, self.W = self.args['H'], self.args['W']
263
+ self.C = 3
264
+ self.h = self.H // 2
265
+ self.w = self.W // 2
266
+ self.c = 4
267
+
268
+ def data_aug(self, data, mode=0):
269
+ if mode == 0: return data
270
+ rot = mode % 4
271
+ flip = mode // 4
272
+ data = np.rot90(data, k=rot, axes=(-2, -1))
273
+ if flip:
274
+ data = data[..., ::-1]
275
+ return data
276
+
277
+ def __getitem__(self, idx):
278
+ data = {}
279
+ # 读取数据
280
+ data['name'] = self.infos[idx]['name']
281
+ if self.buffer[idx] is None:
282
+ max_val = 255.
283
+ hr_imgs = dataload(self.datapath[idx]).astype(np.float32) / max_val
284
+ else:
285
+ max_val = 65535. if self.buffer[idx].dtype == np.uint16 else 255.
286
+ hr_imgs = self.buffer[idx].astype(np.float32) / max_val
287
+ if self.args["mode"] == 'train':
288
+ data['aug_id1'] = np.random.randint(8)
289
+ self.data_aug(hr_imgs, data['aug_id1'])
290
+ else:
291
+ setup_seed(idx)
292
+ hr_crops = hr_imgs
293
+
294
+ # RAW需要复杂的unproces
295
+ lr_shape = hr_crops.shape
296
+ hr_crops = torch.from_numpy(hr_crops)
297
+
298
+ hr_crops, metadata = unprocess(hr_crops, lock_wb=self.args["lock_wb"], use_gpu=self.args['gpu_preprocess'])
299
+ data['wb'] = np.array([metadata['red_gain'].item(), 1., metadata['blue_gain'].item()])
300
+ data['ccm'] = metadata['cam2rgb'].numpy()
301
+ if 'blur' in self.args['command']:
302
+ lr_crops = hr_crops.clone().numpy()
303
+ lr_crops = cv2.GaussianBlur(lr_crops, (3,3), -1)
304
+ red = lr_crops[0::2, 0::2, 0]
305
+ green_red = lr_crops[0::2, 1::2, 1]
306
+ green_blue = lr_crops[1::2, 0::2, 1]
307
+ blue = lr_crops[1::2, 1::2, 2]
308
+ lr_crops = np.stack((red, green_red, green_blue, blue), axis=0)
309
+ hr_crops = mosaic(hr_crops).numpy() # rggb
310
+ # 这个旋转是用来改变bayer模式的
311
+ if 'no_bayeraug' in self.args["command"]:
312
+ data['pattern'] = 0
313
+ else:
314
+ if self.args["mode"] == 'train':
315
+ data['pattern'] = np.random.randint(4)
316
+ else:
317
+ data['pattern'] = idx%4
318
+ hr_crops = bayer_aug(hr_crops, k=data['pattern'])
319
+ data['vst_aug'] = False
320
+ hr_crops = hr_crops ** 0.5 if data['vst_aug'] else hr_crops
321
+ # [crops,h,w,c] -> [crops,c,h,w]
322
+ hr_crops = hr_crops.transpose(2,0,1)
323
+ if 'blur' not in self.args['command']:
324
+ lr_crops = hr_crops.copy()
325
+
326
+ # 人工加噪声
327
+ if self.args['gpu_preprocess'] is False:
328
+ if self.args['mode'] == 'train':
329
+ lower, upper = np.log(self.args['sigma_min']), np.log(self.args['sigma_max'])
330
+ data['sigma'] = np.exp(np.random.rand()*(upper-lower)+lower) / 255.
331
+ else:
332
+ data['sigma'] = self.sigma
333
+ setup_seed(idx)
334
+ noise = np.random.randn(*lr_crops.shape) * data['sigma']
335
+ lr_crops += noise
336
+
337
+ data["lr"] = np.ascontiguousarray(lr_crops)
338
+ data["hr"] = np.ascontiguousarray(hr_crops)
339
+
340
+ if self.args['clip']:
341
+ data["lr"] = lr_crops.clip(0, 1)
342
+ data["hr"] = hr_crops.clip(0, 1)
343
+
344
+ return data
345
+
346
+ # Unprocess Synthetic Dataset(sRGB->Raw)
347
+ class RGB_Img2Raw_Syn_Dataset(Dataset):
348
+ def __init__(self, args=None):
349
+ super().__init__()
350
+ self.default_args()
351
+ if args is not None:
352
+ for key in args:
353
+ self.args[key] = args[key]
354
+ self.initialization()
355
+
356
+ def default_args(self):
357
+ self.args = {}
358
+ self.args['root_dir'] = 'YOND'
359
+ self.args['crop_size'] = 256
360
+ self.args['ori'] = False
361
+ self.args['iso'] = None
362
+ self.args['dgain'] = None
363
+ self.args['params'] = None
364
+ self.args['lock_wb'] = False
365
+ self.args['gpu_preprocess'] = False
366
+ self.args['dstname'] = 'YOND'
367
+ self.args['mode'] = 'train'
368
+ self.args['command'] = ''
369
+
370
+ def initialization(self):
371
+ # 获取数据地址
372
+ self.suffix = 'npy'
373
+ self.root_dir = self.args['root_dir']
374
+ self.mode = self.args['mode']
375
+ self.data_dir = f"{self.root_dir}/{self.mode}"
376
+ if self.mode == 'train':
377
+ self.data_dir += f'_{self.args["subname"]}'
378
+ self.datapath = sorted(glob.glob(f'{self.data_dir}/*.{self.suffix}'))
379
+ self.names = [os.path.basename(path)[:-4] for path in self.datapath]
380
+ self.infos = [{'name':name, 'path':path} for name, path in zip(self.names, self.datapath)]
381
+ self.buffer = [None] * len(self.infos)
382
+ if 'cache' in self.args['command']:
383
+ log(f'Loading {len(self.infos)} crops!!!')
384
+ self.buffer = [dataload(path) for path in tqdm(self.datapath)]
385
+ self.length = len(self.infos)
386
+ self.sigma = -1
387
+ self.get_shape()
388
+ log(f'Successfully cache {self.length} npy data!!!')
389
+
390
+ def __len__(self):
391
+ return self.length
392
+
393
+ def get_shape(self):
394
+ self.H, self.W = self.args['H'], self.args['W']
395
+ self.C = 3
396
+ self.h = self.H // 2
397
+ self.w = self.W // 2
398
+ self.c = 4
399
+
400
+ def data_aug(self, data, mode=0):
401
+ if mode == 0: return data
402
+ rot = mode % 4
403
+ flip = mode // 4
404
+ data = np.rot90(data, k=rot, axes=(-2, -1))
405
+ if flip:
406
+ data = data[..., ::-1]
407
+ return data
408
+
409
+ def __getitem__(self, idx):
410
+ data = {}
411
+ # 读取数据
412
+ data['name'] = self.infos[idx]['name']
413
+ if self.buffer[idx] is None:
414
+ self.buffer[idx] = dataload(self.datapath[idx])
415
+ max_val = 65535. if self.buffer[idx].dtype == np.uint16 else 255.
416
+ hr_imgs = self.buffer[idx].astype(np.float32) / max_val
417
+ if self.args["mode"] == 'train':
418
+ data['aug_id1'] = np.random.randint(8)
419
+ self.data_aug(hr_imgs, data['aug_id1'])
420
+ else:
421
+ setup_seed(idx)
422
+ hr_crops = hr_imgs
423
+
424
+ # RAW需要复杂的unproces
425
+ lr_shape = hr_crops.shape
426
+ hr_crops = torch.from_numpy(hr_crops)
427
+
428
+ hr_crops, metadata = unprocess(hr_crops, lock_wb=self.args["lock_wb"], use_gpu=self.args['gpu_preprocess'])
429
+ data['wb'] = np.array([metadata['red_gain'].item(), 1., metadata['blue_gain'].item()])
430
+ data['ccm'] = metadata['cam2rgb'].numpy()
431
+ hr_crops = mosaic(hr_crops).numpy() # rgbg
432
+ # 这个旋转是用来改变bayer模式的
433
+ if 'no_bayeraug' in self.args["command"]:
434
+ data['pattern'] = 0
435
+ else:
436
+ if self.args["mode"] == 'train':
437
+ data['pattern'] = np.random.randint(4)
438
+ else:
439
+ data['pattern'] = idx%4
440
+ hr_crops = bayer_aug(hr_crops, k=data['pattern'])
441
+ data['vst_aug'] = False
442
+ hr_crops = hr_crops ** 0.5 if data['vst_aug'] else hr_crops
443
+ # [crops,h,w,c] -> [crops,c,h,w]
444
+ hr_crops = hr_crops.transpose(2,0,1)
445
+ lr_crops = hr_crops.copy()
446
+
447
+ # 人工加噪声
448
+ if self.args['gpu_preprocess'] is False:
449
+ if self.args['mode'] == 'train':
450
+ p = sample_params(self.args['camera_type'])
451
+ p['ratio'] = np.random.rand(1) * 19 + 1
452
+ lr_crops = generate_noisy_obs(lr_crops, param=p, noise_code=self.args['noise_code'], ori=self.args['ori'])
453
+ if self.args['ori']:
454
+ hr_crops = hr_crops / p['ratio']
455
+ else:
456
+ p['K'] = p['K'] * p['ratio']
457
+ p['sigGs'] = p['sigGs'] * p['ratio']
458
+
459
+ hr_crops = hr_crops * (p['wp'] - p['bl'])
460
+ lr_crops = lr_crops * (p['wp'] - p['bl'])
461
+ bias = close_form_bias(hr_crops, p['sigGs'], p['K'])
462
+ hr_crops = VST(hr_crops, p['sigGs'], gain=p['K'])
463
+ lr_crops = VST(lr_crops, p['sigGs'], gain=p['K'])
464
+ lr_crops = lr_crops - bias
465
+
466
+ lower = VST(0, p['sigGs'], gain=p['K'])
467
+ upper = VST((p['wp'] - p['bl']), p['sigGs'], gain=p['K'])
468
+ nsr = 1 / (upper - lower)
469
+ hr_crops = (hr_crops - lower) / (upper - lower)
470
+ lr_crops = (lr_crops - lower) / (upper - lower)
471
+
472
+ data['sigma'] = nsr
473
+ else:
474
+ data['sigma'] = self.sigma
475
+ setup_seed(idx)
476
+ noise = np.random.randn(*lr_crops.shape) * data['sigma']
477
+ lr_crops += noise
478
+
479
+ data["lr"] = np.ascontiguousarray(lr_crops)
480
+ data["hr"] = np.ascontiguousarray(hr_crops)
481
+
482
+ if self.args['clip']:
483
+ data["lr"] = lr_crops.clip(0, 1)
484
+ data["hr"] = hr_crops.clip(0, 1)
485
+
486
+ return data
487
+
488
+ # Unprocess Synthetic Dataset(sRGB->Raw)
489
+ class RGB_Img2Raw_SFRN_Dataset(Dataset):
490
+ def __init__(self, args=None):
491
+ super().__init__()
492
+ self.default_args()
493
+ if args is not None:
494
+ for key in args:
495
+ self.args[key] = args[key]
496
+ self.initialization()
497
+
498
+ def default_args(self):
499
+ self.args = {}
500
+ self.args['root_dir'] = 'YOND'
501
+ self.args['crop_size'] = 256
502
+ self.args['ori'] = False
503
+ self.args['iso'] = None
504
+ self.args['dgain'] = None
505
+ self.args['params'] = None
506
+ self.args['lock_wb'] = False
507
+ self.args['gpu_preprocess'] = False
508
+ self.args['dstname'] = 'YOND'
509
+ self.args['mode'] = 'train'
510
+ self.args['command'] = ''
511
+
512
+ def initialization(self):
513
+ # 获取数据地址
514
+ self.suffix = 'npy'
515
+ self.root_dir = self.args['root_dir']
516
+ self.mode = self.args['mode']
517
+ self.data_dir = f"{self.root_dir}/{self.mode}"
518
+ if self.mode == 'train':
519
+ self.data_dir += f'_{self.args["subname"]}'
520
+ self.datapath = sorted(glob.glob(f'{self.data_dir}/*.{self.suffix}'))
521
+ self.names = [os.path.basename(path)[:-4] for path in self.datapath]
522
+ self.infos = [{'name':name, 'path':path} for name, path in zip(self.names, self.datapath)]
523
+ self.buffer = [None] * len(self.infos)
524
+ if 'cache' in self.args['command']:
525
+ log(f'Loading {len(self.infos)} crops!!!')
526
+ self.buffer = [dataload(path) for path in tqdm(self.datapath)]
527
+ self.legal_iso = sorted([int(iso) for iso in os.listdir(f"{self.args['bias_dir']}")])
528
+ self.bias_frames = {iso: sorted(glob.glob(f"{self.args['bias_dir']}/{iso}/*.npy")) for iso in self.legal_iso}
529
+ # self.buffer_lr = [bayer2rggb(dataload(path)).transpose(2,0,1) for path in tqdm(self.bias_frames)]
530
+ self.buffer_lr = {}
531
+ for iso in self.legal_iso:
532
+ log(f'Loading {len(self.bias_frames[iso])} bias frames (ISO-{iso})!!!')
533
+ self.buffer_lr[iso] = [bayer2rggb(dataload(path)).transpose(2,0,1) for path in tqdm(self.bias_frames[iso])]
534
+ self.bias_lut = BiasLUT()
535
+ self.length = len(self.infos)
536
+ self.sigma = -1
537
+ self.get_shape()
538
+ log(f'Successfully cache {self.length} npy data!!!')
539
+
540
+ def __len__(self):
541
+ return self.length
542
+
543
+ def get_shape(self):
544
+ self.H, self.W = self.args['H'], self.args['W']
545
+ self.C = 3
546
+ self.h = self.H // 2
547
+ self.w = self.W // 2
548
+ self.c = 4
549
+
550
+ def data_aug(self, data, mode=0):
551
+ if mode == 0: return data
552
+ rot = mode % 4
553
+ flip = mode // 4
554
+ data = np.rot90(data, k=rot, axes=(-2, -1))
555
+ if flip:
556
+ data = data[..., ::-1]
557
+ return data
558
+
559
+ def __getitem__(self, idx):
560
+ data = {}
561
+ # 读取数据
562
+ data['name'] = self.infos[idx]['name']
563
+ hr_imgs = dataload(self.datapath[idx])
564
+ max_val = 65535. if hr_imgs.dtype == np.uint16 else 255.
565
+ hr_imgs = hr_imgs.astype(np.float32) / max_val
566
+ if self.args["mode"] == 'train':
567
+ data['aug_id1'] = np.random.randint(8)
568
+ self.data_aug(hr_imgs, data['aug_id1'])
569
+ else:
570
+ setup_seed(idx)
571
+ hr_crops = hr_imgs
572
+
573
+ # RAW需要复杂的unproces
574
+ lr_shape = hr_crops.shape
575
+ hr_crops = torch.from_numpy(hr_crops)
576
+
577
+ hr_crops, metadata = unprocess(hr_crops, lock_wb=self.args["lock_wb"], use_gpu=self.args['gpu_preprocess'])
578
+ data['wb'] = np.array([metadata['red_gain'].item(), 1., metadata['blue_gain'].item()])
579
+ data['ccm'] = metadata['cam2rgb'].numpy()
580
+ if 'blur' in self.args['command']:
581
+ lr_crops = hr_crops.clone().numpy()
582
+ lr_crops = cv2.GaussianBlur(lr_crops, (3,3), 0.4)
583
+ red = lr_crops[0::2, 0::2, 0]
584
+ green_red = lr_crops[0::2, 1::2, 1]
585
+ green_blue = lr_crops[1::2, 0::2, 1]
586
+ blue = lr_crops[1::2, 1::2, 2]
587
+ lr_crops = np.stack((red, green_red, green_blue, blue), axis=0)
588
+ hr_crops = mosaic(hr_crops).numpy() # rggb
589
+ # 这个旋转是用来改变bayer模式的
590
+ if 'no_bayeraug' in self.args["command"]:
591
+ data['pattern'] = 0
592
+ else:
593
+ if self.args["mode"] == 'train':
594
+ data['pattern'] = np.random.randint(4)
595
+ else:
596
+ data['pattern'] = idx%4
597
+ hr_crops = bayer_aug(hr_crops, k=data['pattern'])
598
+ data['vst_aug'] = False
599
+ hr_crops = hr_crops ** 0.5 if data['vst_aug'] else hr_crops
600
+ # [crops,h,w,c] -> [crops,c,h,w]
601
+ hr_crops = hr_crops.transpose(2,0,1)
602
+ if 'blur' not in self.args['command']:
603
+ lr_crops = hr_crops.copy()
604
+ data['ISO'] = iso = self.legal_iso[np.random.randint(len(self.legal_iso))]
605
+ idr = np.random.randint(len(self.buffer_lr[iso]))
606
+ dh, dw = self.args['patch_size']//2, self.args['patch_size']//2
607
+ xx = np.random.randint(self.h - dh + 1)
608
+ yy = np.random.randint(self.w - dw + 1)
609
+ black_crops = self.buffer_lr[iso][idr][:, xx:xx+dh, yy:yy+dw].copy()
610
+
611
+ # 人工加噪声
612
+ data['ratio'] = np.ones(1)
613
+ if self.args['gpu_preprocess'] is False:
614
+ if self.args['mode'] == 'train':
615
+ p = sample_params_max(self.args['camera_type'], iso=iso)
616
+ p['ratio'] = np.exp(np.random.rand() * 4)
617
+ data['ratio'] = p['ratio']
618
+ y = lr_crops * (p['wp'] - p['bl']) / p['ratio']
619
+ y = np.random.poisson(y/p['K']).astype(np.float32) * p['K']
620
+ n_read = black_crops
621
+ n_read += np.random.randn(y.shape[-3], y.shape[-2], 1).astype(np.float32) * p['sigR']
622
+ n_read += np.random.uniform(low=-0.5, high=0.5, size=y.shape)
623
+ n_read += np.random.randn() * 0.03 # BLE
624
+ y += n_read
625
+ lr_crops = y / (p['wp'] - p['bl'])
626
+
627
+ p['sigGs'] = n_read.std()
628
+ if self.args['ori']:
629
+ hr_crops = hr_crops / p['ratio']
630
+ else:
631
+ lr_crops = lr_crops * p['ratio']
632
+ p['K'] = p['K'] * p['ratio']
633
+ p['sigGs'] = p['sigGs'] * p['ratio']
634
+
635
+ hr_crops = hr_crops * (p['wp'] - p['bl'])
636
+ lr_crops = lr_crops * (p['wp'] - p['bl'])
637
+ bias = self.bias_lut.get_lut(lr_crops.clip(0,None), p['sigGs'], p['K'])
638
+ # bias = close_form_bias(lr_crops.clip(0,None), p['sigGs'], p['K'])
639
+ hr_crops = VST(hr_crops, p['sigGs'], gain=p['K'])
640
+ lr_crops = VST(lr_crops, p['sigGs'], gain=p['K'])
641
+ lr_crops = lr_crops - bias
642
+
643
+ lower = VST(0, p['sigGs'], gain=p['K'])
644
+ upper = VST((p['wp'] - p['bl']), p['sigGs'], gain=p['K'])
645
+ nsr = 1 / (upper - lower)
646
+ hr_crops = (hr_crops - lower) / (upper - lower)
647
+ lr_crops = (lr_crops - lower) / (upper - lower)
648
+
649
+ data['sigma'] = nsr
650
+ else:
651
+ data['sigma'] = self.sigma
652
+ setup_seed(idx)
653
+ noise = np.random.randn(*lr_crops.shape) * data['sigma']
654
+ lr_crops += noise
655
+
656
+ data["lr"] = np.ascontiguousarray(lr_crops)
657
+ data["hr"] = np.ascontiguousarray(hr_crops)
658
+
659
+ if self.args['clip']:
660
+ data["lr"] = lr_crops.clip(0, 1)
661
+ data["hr"] = hr_crops.clip(0, 1)
662
+
663
+ return data
664
+
665
+ # Synthetic Dataset(sRGB)
666
+ class RGB_Img_Dataset(Dataset):
667
+ def __init__(self, args=None):
668
+ super().__init__()
669
+ self.default_args()
670
+ if args is not None:
671
+ for key in args:
672
+ self.args[key] = args[key]
673
+ self.initialization()
674
+
675
+ def default_args(self):
676
+ self.args = {}
677
+ self.args['root_dir'] = 'YOND'
678
+ self.args['crop_size'] = 256
679
+ self.args['ori'] = False
680
+ self.args['iso'] = None
681
+ self.args['dgain'] = None
682
+ self.args['params'] = None
683
+ self.args['lock_wb'] = False
684
+ self.args['gpu_preprocess'] = False
685
+ self.args['dstname'] = 'YOND'
686
+ self.args['mode'] = 'train'
687
+ self.args['command'] = ''
688
+
689
+ def initialization(self):
690
+ # 获取数据地址
691
+ self.suffix = 'npy'
692
+ self.root_dir = self.args['root_dir']
693
+ self.mode = self.args['mode']
694
+ self.data_dir = f"{self.root_dir}/{self.mode}"
695
+ if self.mode == 'train':
696
+ self.data_dir += f'_{self.args["subname"]}'
697
+ self.datapath = sorted(glob.glob(f'{self.data_dir}/*.{self.suffix}'))
698
+ self.names = [os.path.basename(path)[:-4] for path in self.datapath]
699
+ self.infos = [{'name':name, 'path':path} for name, path in zip(self.names, self.datapath)]
700
+ self.buffer = [None] * len(self.infos)
701
+ if 'cache' in self.args['command']:
702
+ log(f'Loading {len(self.infos)} crops!!!')
703
+ self.buffer = [dataload(path) for path in tqdm(self.datapath)]
704
+ self.length = len(self.infos)
705
+ self.sigma = -1
706
+ self.get_shape()
707
+ log(f'Successfully cache {self.length} npy data!!!')
708
+
709
+ def __len__(self):
710
+ return self.length
711
+
712
+ def get_shape(self):
713
+ self.H, self.W = self.args['H'], self.args['W']
714
+ self.C = 3
715
+ self.h = self.H // 2
716
+ self.w = self.W // 2
717
+ self.c = 4
718
+
719
+ def data_aug(self, data, mode=0):
720
+ if mode == 0: return data
721
+ rot = mode % 4
722
+ flip = mode // 4
723
+ data = np.rot90(data, k=rot, axes=(-2, -1))
724
+ if flip:
725
+ data = data[..., ::-1]
726
+ return data
727
+
728
+ def __getitem__(self, idx):
729
+ data = {}
730
+ # 读取数据
731
+ data['name'] = self.infos[idx]['name']
732
+ if self.buffer[idx] is None:
733
+ self.buffer[idx] = dataload(self.datapath[idx])
734
+ max_val = 255. if self.buffer[idx].dtype == np.uint8 else 65535.
735
+ hr_imgs = self.buffer[idx].astype(np.float32) / max_val
736
+ if self.args["mode"] == 'train':
737
+ data['aug_id1'] = np.random.randint(8)
738
+ self.data_aug(hr_imgs, data['aug_id1'])
739
+ else:
740
+ setup_seed(idx)
741
+ hr_crops = hr_imgs.transpose(2,0,1)
742
+ lr_crops = hr_crops.copy()
743
+
744
+ # 人工加噪声
745
+ if self.args['gpu_preprocess'] is False:
746
+ if self.args['mode'] == 'train':
747
+ # lower, upper = np.log(self.args['sigma_min']), np.log(self.args['sigma_max'])
748
+ # data['sigma'] = np.exp(np.random.rand()*(upper-lower)+lower) / 255.
749
+ lower, upper = self.args['sigma_min'], self.args['sigma_max']
750
+ data['sigma'] = (np.random.rand()*(upper-lower)+lower) / 255.
751
+ else:
752
+ data['sigma'] = self.sigma
753
+ setup_seed(idx)
754
+ noise = np.random.randn(*lr_crops.shape) * data['sigma']
755
+ lr_crops += noise
756
+
757
+ data["lr"] = np.ascontiguousarray(lr_crops)
758
+ data["hr"] = np.ascontiguousarray(hr_crops)
759
+
760
+ if self.args['clip']:
761
+ data["lr"] = lr_crops.clip(0, 1)
762
+ data["hr"] = hr_crops.clip(0, 1)
763
+
764
+ return data
765
+
766
+ # Unprocess Synthetic Dataset(sRGB->Raw)
767
+ class DIV2K_Img2Raw_Dataset(Dataset):
768
+ def __init__(self, args=None):
769
+ super().__init__()
770
+ self.default_args()
771
+ if args is not None:
772
+ for key in args:
773
+ self.args[key] = args[key]
774
+ self.initialization()
775
+
776
+ def default_args(self):
777
+ self.args = {}
778
+ self.args['root_dir'] = 'DIV2K'
779
+ self.args['crop_size'] = 256
780
+ self.args['ori'] = False
781
+ self.args['iso'] = None
782
+ self.args['dgain'] = None
783
+ self.args['params'] = None
784
+ self.args['lock_wb'] = False
785
+ self.args['gpu_preprocess'] = False
786
+ self.args['dstname'] = 'DIV2K'
787
+ self.args['mode'] = 'train'
788
+ self.args['command'] = ''
789
+
790
+ def initialization(self):
791
+ # 获取数据地址
792
+ self.suffix = 'npy'
793
+ self.root_dir = self.args['root_dir']
794
+ self.mode = self.args['mode']
795
+ self.data_dir = f"{self.root_dir}/npy/{self.mode}"
796
+ self.datapath = sorted(glob.glob(f'{self.data_dir}/*.{self.suffix}'))
797
+ self.names = [os.path.basename(path)[:-4] for path in self.datapath]
798
+ self.infos = [{'name':name, 'path':path} for name, path in zip(self.names, self.datapath)]
799
+ self.buffer = [None] * len(self.infos)
800
+ if 'cache' in self.args['command']:
801
+ log(f'Loading {len(self.infos)} crops!!!')
802
+ self.buffer = [dataload(path) for path in tqdm(self.datapath)]
803
+ self.length = len(self.infos)
804
+ self.sigma = -1
805
+ self.get_shape()
806
+ log(f'Successfully cache {self.length} npy data!!!')
807
+
808
+ def __len__(self):
809
+ return self.length
810
+
811
+ def get_shape(self):
812
+ self.H, self.W = self.args['H'], self.args['W']
813
+ self.C = 3
814
+ self.h = self.H // 2
815
+ self.w = self.W // 2
816
+ self.c = 4
817
+
818
+ def data_aug(self, data, mode=0):
819
+ if mode == 0: return data
820
+ rot = mode % 4
821
+ flip = mode // 4
822
+ data = np.rot90(data, k=rot, axes=(-2, -1))
823
+ if flip:
824
+ data = data[..., ::-1]
825
+ return data
826
+
827
+ def __getitem__(self, idx):
828
+ data = {}
829
+ # 读取数据
830
+ data['name'] = self.infos[idx]['name']
831
+ if self.buffer[idx] is None:
832
+ self.buffer[idx] = dataload(self.datapath[idx])
833
+ hr_imgs = self.buffer[idx].astype(np.float32) / 255.
834
+ if self.args["mode"] == 'train':
835
+ data['aug_id1'] = np.random.randint(8)
836
+ self.data_aug(hr_imgs, data['aug_id1'])
837
+ else:
838
+ setup_seed(idx)
839
+ hr_crops = hr_imgs
840
+
841
+ # RAW需要复杂的unproces
842
+ lr_shape = hr_crops.shape
843
+ hr_crops = torch.from_numpy(hr_crops)
844
+
845
+ hr_crops, metadata = unprocess(hr_crops, lock_wb=self.args["lock_wb"], use_gpu=self.args['gpu_preprocess'])
846
+ data['wb'] = np.array([metadata['red_gain'].item(), 1., metadata['blue_gain'].item()])
847
+ data['ccm'] = metadata['cam2rgb'].numpy()
848
+ hr_crops = mosaic(hr_crops).numpy() # rgbg
849
+ # 这个旋转是用来改变bayer模式的
850
+ # if self.args["mode"] == 'train':
851
+ data['pattern'] = np.random.randint(4) if self.args["mode"] == 'train' else idx%4
852
+ hr_crops = bayer_aug(hr_crops, k=data['pattern'])
853
+ data['vst_aug'] = False
854
+ hr_crops = hr_crops ** 0.5 if data['vst_aug'] else hr_crops
855
+ # [crops,h,w,c] -> [crops,c,h,w]
856
+ hr_crops = hr_crops.transpose(2,0,1)
857
+ lr_crops = hr_crops.copy()
858
+
859
+ # 人工加噪声
860
+ if self.args['gpu_preprocess'] is False:
861
+ if self.args['mode'] == 'train':
862
+ lower, upper = np.log(self.args['sigma_min']), np.log(self.args['sigma_max'])
863
+ data['sigma'] = np.exp(np.random.rand()*(upper-lower)+lower) / 255.
864
+ else:
865
+ data['sigma'] = self.sigma
866
+ setup_seed(idx)
867
+ noise = np.random.randn(*lr_crops.shape) * data['sigma']
868
+ lr_crops += noise
869
+
870
+ data["lr"] = np.ascontiguousarray(lr_crops)
871
+ data["hr"] = np.ascontiguousarray(hr_crops)
872
+
873
+ if self.args['clip']:
874
+ data["lr"] = lr_crops.clip(0, 1)
875
+ data["hr"] = hr_crops.clip(0, 1)
876
+
877
+ return data
878
+
879
+ # Unprocess Synthetic Dataset(sRGB->Raw3c1n)
880
+ class RGB_Img2Raw3c1n_Dataset(Dataset):
881
+ def __init__(self, args=None):
882
+ super().__init__()
883
+ self.default_args()
884
+ if args is not None:
885
+ for key in args:
886
+ self.args[key] = args[key]
887
+ self.initialization()
888
+
889
+ def default_args(self):
890
+ self.args = {}
891
+ self.args['root_dir'] = 'YOND'
892
+ self.args['crop_size'] = 256
893
+ self.args['ori'] = False
894
+ self.args['iso'] = None
895
+ self.args['dgain'] = None
896
+ self.args['params'] = None
897
+ self.args['lock_wb'] = False
898
+ self.args['gpu_preprocess'] = False
899
+ self.args['dstname'] = 'YOND'
900
+ self.args['mode'] = 'train'
901
+ self.args['command'] = ''
902
+
903
+ def initialization(self):
904
+ # 获取数据地址
905
+ self.suffix = 'npy'
906
+ self.root_dir = self.args['root_dir']
907
+ self.mode = self.args['mode']
908
+ self.data_dir = f"{self.root_dir}/{self.mode}"
909
+ if self.mode == 'train':
910
+ self.data_dir += f'_{self.args["subname"]}'
911
+ self.datapath = sorted(glob.glob(f'{self.data_dir}/*.{self.suffix}'))
912
+ self.names = [os.path.basename(path)[:-4] for path in self.datapath]
913
+ self.infos = [{'name':name, 'path':path} for name, path in zip(self.names, self.datapath)]
914
+ self.buffer = [None] * len(self.infos)
915
+ if 'cache' in self.args['command']:
916
+ log(f'Loading {len(self.infos)} crops!!!')
917
+ self.buffer = [dataload(path) for path in tqdm(self.datapath)]
918
+ self.length = len(self.infos)
919
+ self.sigma = -1
920
+ self.get_shape()
921
+ log(f'Successfully cache {self.length} npy data!!!')
922
+
923
+ def __len__(self):
924
+ return self.length
925
+
926
+ def get_shape(self):
927
+ self.H, self.W = self.args['H'], self.args['W']
928
+ self.C = 3
929
+ self.h = self.H // 2
930
+ self.w = self.W // 2
931
+ self.c = 4
932
+
933
+ def data_aug(self, data, mode=0):
934
+ if mode == 0: return data
935
+ rot = mode % 4
936
+ flip = mode // 4
937
+ data = np.rot90(data, k=rot, axes=(-2, -1))
938
+ if flip:
939
+ data = data[..., ::-1]
940
+ return data
941
+
942
+ def __getitem__(self, idx):
943
+ data = {}
944
+ # 读取数据
945
+ data['name'] = self.infos[idx]['name']
946
+ if self.buffer[idx] is None:
947
+ self.buffer[idx] = dataload(self.datapath[idx])
948
+ max_val = 255. if self.buffer[idx].dtype == np.uint8 else 65535.
949
+ hr_imgs = self.buffer[idx].astype(np.float32) / max_val
950
+ if self.args["mode"] == 'train':
951
+ data['aug_id1'] = np.random.randint(8)
952
+ self.data_aug(hr_imgs, data['aug_id1'])
953
+ else:
954
+ setup_seed(idx)
955
+ hr_crops = hr_imgs
956
+
957
+ # RAW需要复杂的unproces
958
+ lr_shape = hr_crops.shape
959
+ hr_crops = torch.from_numpy(hr_crops)
960
+
961
+ hr_crops, metadata = unprocess(hr_crops, lock_wb=self.args["lock_wb"], use_gpu=self.args['gpu_preprocess'])
962
+ data['wb'] = np.array([metadata['red_gain'].item(), 1., metadata['blue_gain'].item()])
963
+ data['ccm'] = metadata['cam2rgb'].numpy()
964
+ hr_crops = mosaic(hr_crops).numpy() # rggb
965
+ # [crops,h,w,c] -> [crops,c,h,w]
966
+ hr_crops = hr_crops.transpose(2,0,1)
967
+ lr_crops = hr_crops.copy()
968
+
969
+ # 人工加噪声
970
+ if self.args['gpu_preprocess'] is False:
971
+ if self.args['mode'] == 'train':
972
+ lower, upper = np.log(self.args['sigma_min']), np.log(self.args['sigma_max'])
973
+ data['sigma'] = np.exp(np.random.rand()*(upper-lower)+lower) / 255.
974
+ else:
975
+ data['sigma'] = self.sigma
976
+ setup_seed(idx)
977
+ noise = np.random.randn(*lr_crops.shape[-2:]) * data['sigma']
978
+ lr_crops[2] += noise
979
+
980
+ data["lr"] = np.ascontiguousarray(lr_crops)
981
+ data["hr"] = np.ascontiguousarray(hr_crops)
982
+
983
+ if self.args['clip']:
984
+ data["lr"] = lr_crops.clip(0, 1)
985
+ data["hr"] = hr_crops.clip(0, 1)
986
+
987
+ return data
988
+
989
+
990
+ # Unprocess Synthetic Dataset(sRGB->Raw)
991
+ class DIV2K_PG_Dataset(DIV2K_Img2Raw_Dataset):
992
+ def __init__(self, args=None):
993
+ super().__init__(args)
994
+ self.noise_params= {
995
+ 'Kmin': -2.5, 'Kmax': 3.5, 'lam': 0.102, 'q': 1/(2**10), 'wp': 1023, 'bl': 64,
996
+ 'sigTLk': 0.85187, 'sigTLb': 0.07991, 'sigTLsig': 0.02921,
997
+ 'sigRk': 0.87611, 'sigRb': -2.11455, 'sigRsig': 0.03274,
998
+ 'sigGsk': 0.85187, 'sigGsb': 0.67991, 'sigGssig': 0.02921,
999
+ }
1000
+ self.p = self.get_noise_params()
1001
+
1002
+ def get_noise_params(self):
1003
+ p = self.noise_params
1004
+ log_K = np.random.uniform(low=p['Kmin'], high=p['Kmax'])
1005
+ mu_Gs = (p['sigGsk'] + np.random.uniform(-0.2, 0.2))*log_K + (p['sigGsb'] + np.random.uniform(-1, 1))
1006
+ log_sigGs = np.random.normal(loc=mu_Gs, scale=p['sigGssig'])
1007
+ K = np.exp(log_K)
1008
+ sigma = np.exp(log_sigGs)
1009
+ scale = p['wp'] - p['bl']
1010
+ self.p = {'K':K, 'sigma':sigma, 'beta1':K/scale, 'beta2':(sigma/scale)**2,
1011
+ 'wp':p['wp'], 'bl':p['bl'], 'scale':p['wp']-p['bl']}
1012
+ return self.p
1013
+
1014
+ def __getitem__(self, idx):
1015
+ data = {}
1016
+ # 读取数据
1017
+ data['name'] = self.infos[idx]['name']
1018
+ if self.buffer[idx] is None:
1019
+ self.buffer[idx] = dataload(self.datapath[idx])
1020
+ hr_imgs = self.buffer[idx].astype(np.float32) / 255.
1021
+ if self.args["mode"] == 'train':
1022
+ data['aug_id1'] = np.random.randint(8)
1023
+ # self.data_aug(hr_imgs, data['aug_id1'])
1024
+ else:
1025
+ setup_seed(idx)
1026
+ hr_crops = hr_imgs
1027
+
1028
+ # RAW需要复杂的unproces
1029
+ lr_shape = hr_crops.shape
1030
+ hr_crops = torch.from_numpy(hr_crops)
1031
+
1032
+ hr_crops, metadata = unprocess(hr_crops, lock_wb=self.args["lock_wb"], use_gpu=False)
1033
+ data['wb'] = np.array([metadata['red_gain'].item(), 1., metadata['blue_gain'].item()])
1034
+ data['ccm'] = metadata['cam2rgb'].numpy()
1035
+ hr_crops = mosaic(hr_crops).numpy() # rgbg
1036
+ # 这个旋转是用来改变bayer模式的
1037
+ # if self.args["mode"] == 'train':
1038
+ data['pattern'] = np.random.randint(4) if self.args["mode"] == 'train' else idx%4
1039
+ hr_crops = bayer_aug(hr_crops, k=data['pattern'])
1040
+ lr_crops = hr_crops.copy()
1041
+
1042
+ # 人工加噪声
1043
+ if self.args['mode'] == 'train':
1044
+ p = self.get_noise_params()
1045
+ data.update(p)
1046
+ else:
1047
+ data.update(self.p)
1048
+
1049
+ if self.args['gpu_preprocess'] is False:
1050
+ lr_crops = np.random.poisson(lr_crops/p['beta1'])*p['beta1'] + np.random.randn(*lr_crops.shape) * data['beta2']**0.5
1051
+ if 'est' in self.args['command']:
1052
+ # 对噪图求blur
1053
+ k = 19
1054
+ lr_crops = lr_crops
1055
+ hr_crops = hr_crops
1056
+ lr_blur = cv2.blur(lr_crops, (k, k))
1057
+ hr_blur = cv2.blur(hr_crops, (k, k))
1058
+ lr_std = stdfilt(lr_crops, k)
1059
+ hr_std = stdfilt(hr_crops, k)
1060
+ hr_target = (p['beta1'] * hr_blur + p['beta2']) ** 0.5
1061
+
1062
+ var = lr_std**2
1063
+ mean = lr_blur
1064
+ th, percent = get_threshold(hr_std)
1065
+ mask = (hr_std <= th)
1066
+ # 按阈值分割图像平滑区域与非平滑区域
1067
+ if var[mask].size > 0:
1068
+ var, mean = var[mask], mean[mask]
1069
+ else:
1070
+ mask = (hr_std <= hr_std.max())
1071
+
1072
+ data['th'] = th
1073
+ data['hr_mask'] = mask
1074
+ data['lr_rggb'] = lr_crops
1075
+ data['hr_rggb'] = hr_crops
1076
+ data['lr_std'] = lr_std
1077
+ data['hr_std'] = hr_std
1078
+ data['lr_blur'] = lr_blur
1079
+ data['hr_blur'] = hr_blur
1080
+ data['lr'] = np.concatenate([lr_std, lr_blur, lr_crops], axis=-1)
1081
+ data['hr'] = hr_target
1082
+ else:
1083
+ data["lr"] = np.ascontiguousarray(lr_crops)
1084
+ data["hr"] = np.ascontiguousarray(hr_crops)
1085
+
1086
+ for key in data:
1087
+ if 'lr' in key or 'hr' in key:
1088
+ data[key] = data[key].transpose(2,0,1)
1089
+
1090
+ if self.args['clip']:
1091
+ data['lr'] = lr_crops.clip(0, 1)
1092
+ data['hr'] = hr_crops.clip(0, 1)
1093
+
1094
+ return data
1095
+
1096
+ # SIDD Paired Real Data
1097
+ class SIDD_Dataset(Dataset):
1098
+ def __init__(self, args=None):
1099
+ super().__init__()
1100
+ self.default_args()
1101
+ if args is not None:
1102
+ for key in args:
1103
+ self.args[key] = args[key]
1104
+ self.initialization()
1105
+
1106
+ def default_args(self):
1107
+ self.args = {}
1108
+ self.args['root_dir'] = '/data/fenghansen/datasets/SIDD'
1109
+ self.args['params'] = None
1110
+ self.args['lock_wb'] = False
1111
+ self.args['gpu_preprocess'] = False
1112
+ self.args['dstname'] = 'SIDD'
1113
+ self.args['mode'] = 'eval'
1114
+ self.args['clip'] = 'True'
1115
+ self.args['wp'] = 1023
1116
+ self.args['bl'] = 64
1117
+ self.args['patch_size'] = 256
1118
+ self.args['H'] = 256
1119
+ self.args['W'] = 256
1120
+ self.args['command'] = ''
1121
+
1122
+ def initialization(self):
1123
+ # 获取数据地址
1124
+ self.suffix = 'npy'
1125
+ self.root_dir = self.args['root_dir']
1126
+ self.mode = self.args['mode']
1127
+ if self.mode == 'train':
1128
+ self.data_dir = f'{self.root_dir}/SIDD_Medium_Raw/data'
1129
+ else:
1130
+ self.data_dir = f'{self.root_dir}/SIDD_Benchmark_Data'
1131
+ if self.mode == 'eval':
1132
+ self.lr_data = sio.loadmat(f'{self.root_dir}/SIDD_Validation_Raw/ValidationNoisyBlocksRaw.mat')['ValidationNoisyBlocksRaw']
1133
+ self.hr_data = sio.loadmat(f'{self.root_dir}/SIDD_Validation_Raw/ValidationGtBlocksRaw.mat')['ValidationGtBlocksRaw']
1134
+ else:
1135
+ self.lr_data = sio.loadmat(f'{self.root_dir}/SIDD_Validation_Raw/BenchmarkNoisyBlocksRaw.mat')['BenchmarkNoisyBlocksRaw']
1136
+ self.hr_data = None
1137
+ self.pos = sio.loadmat(f'{self.root_dir}/SIDD_Validation_Raw/BenchmarkBlocks32.mat')['BenchmarkBlocks32']
1138
+ self.names = sorted(os.listdir(self.data_dir))
1139
+ self.datapaths = sorted(glob.glob(f'{self.data_dir}//*/*_010.MAT'))
1140
+ self.metapaths = sorted([path for path in self.datapaths if 'META' in path])
1141
+ self.lr_paths = sorted([path for path in self.datapaths if 'NOISY' in path])
1142
+ self.hr_paths = sorted([path for path in self.datapaths if 'GT' in path])
1143
+ self.length = len(self.names)
1144
+ self.infos = [None] * self.length
1145
+ for i in range(self.length):
1146
+ metadata = read_metadata(dataload(self.metapaths[i]))
1147
+ self.infos[i] = {
1148
+ 'name': self.names[i],
1149
+ 'lr_path': self.lr_paths[i],
1150
+ 'hr_path': self.hr_paths[i] if len(self.hr_paths)>0 else None,
1151
+ 'metadata': metadata,
1152
+ }
1153
+ self.sigma = -1
1154
+ self.get_shape()
1155
+ log(f'Successfully load {self.length} data!!! ({self.mode})')
1156
+
1157
+ def __len__(self):
1158
+ return self.length
1159
+
1160
+ def get_shape(self):
1161
+ self.H, self.W = self.args['H'], self.args['W']
1162
+ self.C = 3
1163
+ self.h = self.H // 2
1164
+ self.w = self.W // 2
1165
+ self.c = 4
1166
+
1167
+ def data_aug(self, data, mode=0):
1168
+ if mode == 0: return data
1169
+ rot = mode % 4
1170
+ flip = mode // 4
1171
+ data = np.rot90(data, k=rot, axes=(-2, -1))
1172
+ if flip:
1173
+ data = data[..., ::-1]
1174
+ return data
1175
+
1176
+ def __getitem__(self, idx):
1177
+ data = {}
1178
+ # 读取数据
1179
+ data['name'] = self.infos[idx]['name']
1180
+ data['meta'] = self.infos[idx]['metadata']
1181
+ data['lr_path_full'] = self.infos[idx]['lr_path']
1182
+ data['hr_path_full'] = self.infos[idx]['hr_path']
1183
+ data['wb'] = data['meta']['wb']
1184
+ data['cfa'] = data['meta']['bayer_2by2']
1185
+ data['ccm'] = data['meta']['cst2']
1186
+ data['iso'] = data['meta']['iso']
1187
+ data['reg'] = (data['meta']['beta1'], data['meta']['beta2'])
1188
+
1189
+ if self.args["mode"] == 'train':
1190
+ raise NotImplementedError
1191
+ else:
1192
+ data['lr'] = self.lr_data[idx]
1193
+ if self.mode == 'eval':
1194
+ data['hr'] = self.hr_data[idx]
1195
+
1196
+ return data
1197
+
1198
+ class LRID_Dataset(Dataset):
1199
+ def __init__(self, args=None):
1200
+ super().__init__()
1201
+ self.default_args()
1202
+ if args is not None:
1203
+ for key in args:
1204
+ self.args[key] = args[key]
1205
+ self.initialization()
1206
+
1207
+ def default_args(self):
1208
+ self.args = {}
1209
+ self.args['root_dir'] = 'LRID/'
1210
+ self.args['suffix'] = 'dng'
1211
+ self.args['dgain'] = 1
1212
+ self.args['dstname'] = 'indoor_x5'
1213
+ self.args['camera_type'] = 'IMX686'
1214
+ self.args['params'] = None
1215
+ self.args['mode'] = 'eval'
1216
+ self.args['GT_type'] = 'GT_align_ours'
1217
+ self.args['command'] = ''
1218
+ self.args['H'] = 3472
1219
+ self.args['W'] = 4624
1220
+ self.args['wp'] = 1023
1221
+ self.args['bl'] = 64
1222
+ self.args['clip'] = False
1223
+
1224
+ def initialization(self):
1225
+ # 获取数据地址
1226
+ self.suffix = 'dng'
1227
+ self.change_eval_ratio(ratio=1)
1228
+ self.iso = 6400
1229
+
1230
+ def __len__(self):
1231
+ return self.length
1232
+
1233
+ def get_shape(self):
1234
+ self.shape = self.templet_raw.raw_image_visible.shape
1235
+ self.H, self.W = self.shape
1236
+ if 'bl' not in self.args:
1237
+ self.bl_all = np.array(self.templet_raw.black_level_per_channel)
1238
+ if np.mean(self.bl_all - self.bl_all[0]) != 0:
1239
+ warnings.warn(f'4 channel have different black level!!! ({self.bl_all})')
1240
+ self.bl = self.bl_all[0]
1241
+ else:
1242
+ self.bl = self.args['bl']
1243
+ self.wp = self.templet_raw.white_level
1244
+
1245
+ def change_eval_ratio(self, ratio):
1246
+ self.ratio = ratio
1247
+ self.infos_gt = []
1248
+ self.infos_short = []
1249
+ for dstname in self.args['dstname']:
1250
+ with open(f"infos/{dstname}_{self.args['GT_type']}.info", 'rb') as info_file:
1251
+ info = pkl.load(info_file)
1252
+ eval_id = self.get_eval_id(dstname)
1253
+ for idx in eval_id:
1254
+ self.infos_gt.append(info[idx])
1255
+ with open(f'infos/{dstname}_short.info', 'rb') as info_file:
1256
+ info = pkl.load(info_file)[ratio]
1257
+ eval_id = self.get_eval_id(dstname)
1258
+ for idx in eval_id:
1259
+ self.infos_short.append(info[idx])
1260
+ self.infos = self.infos_gt
1261
+ for i in range(len(self.infos)):
1262
+ self.infos[i]['hr'] = self.infos[i]['data']
1263
+ self.infos[i]['lr'] = self.infos_short[i]['data'][0]
1264
+ if 'syn_noise' in self.args['command'].lower():
1265
+ nt = self.args['noise_type'].upper()
1266
+ self.infos[i]['lr'] = self.infos[i]['hr'].replace('GT_align_ours', f'Noisy_{nt}/{ratio}')
1267
+ self.infos[i]['ExposureTime'] = self.infos_short[i]['metadata'][0]['ExposureTime']
1268
+ del self.infos[i]['data']
1269
+ print(f'>> Successfully load infos.pkl (Length: {len(self.infos)})')
1270
+ self.iso = 6400
1271
+ self.length = len(self.infos)
1272
+ self.templet_raw_path = self.infos_short[0]['data'][0]
1273
+ self.templet_raw = rawpy.imread(self.templet_raw_path)
1274
+ self.get_shape()
1275
+
1276
+ def get_eval_id(self, dstname='indoor_x5'):
1277
+ if dstname == 'indoor_x5':
1278
+ eval_ids = [4,14,25,41,44,51,52,53,58]
1279
+ elif dstname == 'indoor_x3':
1280
+ eval_ids = []#[0,6,15]
1281
+ elif dstname == 'outdoor_x5':
1282
+ eval_ids = [1,2,5]
1283
+ elif dstname == 'outdoor_x3':
1284
+ eval_ids = [9,21,22,32,44,51]
1285
+ else:
1286
+ eval_ids = []
1287
+ return eval_ids
1288
+
1289
+ def __getitem__(self, idx):
1290
+ data = {}
1291
+ # dataload
1292
+ hr_raw = np.array(dataload(self.infos[idx]['hr'])).reshape(self.H,self.W)
1293
+ lr_raw = np.array(dataload(self.infos[idx]['lr'])).reshape(self.H,self.W)
1294
+ data["hr"] = (hr_raw.astype(np.float32) - self.bl) / (self.wp - self.bl)
1295
+ # lr_raw = bayer2rggb(lr_raw.astype(np.float32)) - self.bl_all.reshape(1,1,-1)
1296
+ data["lr"] = (lr_raw.astype(np.float32) - self.bl) * self.ratio / (self.wp - self.bl)
1297
+
1298
+ data['name'] = f"{self.infos[idx]['name']}_x{self.ratio:02d}"
1299
+ data['ratio'] = self.ratio
1300
+ data['ccm'] = self.infos[idx]['ccm']
1301
+ data['wb'] = self.infos[idx]['wb']
1302
+ data['cfa'] = 'rggb'
1303
+ data['ISO'] = self.iso
1304
+ data['ExposureTime'] = self.infos[idx]['ExposureTime'] * 1000
1305
+
1306
+ if self.args['clip']:
1307
+ data["hr"] = data["hr"].clip(0,1)
1308
+ data["lr"] = data["lr"].clip(0,1)
1309
+
1310
+ return data
1311
+
1312
+ class ELD_Full_Dataset(Dataset):
1313
+ def __init__(self, args=None):
1314
+ super().__init__()
1315
+ self.default_args()
1316
+ if args is not None:
1317
+ for key in args:
1318
+ self.args[key] = args[key]
1319
+ self.initialization()
1320
+
1321
+ def default_args(self):
1322
+ self.args = {}
1323
+ self.args['root_dir'] = 'ELD/'
1324
+ self.args['ratio'] = 1
1325
+ self.args['dstname'] = 'ELD'
1326
+ self.args['params'] = None
1327
+ self.args['mode'] = 'eval'
1328
+ self.args['command'] = ''
1329
+ self.args['wp'] = 16383
1330
+ self.args['bl'] = 512
1331
+ self.args['clip'] = False
1332
+
1333
+ def initialization(self):
1334
+ # 获取数据地址
1335
+ self.suffix = {'CanonEOS70D':'CR2', 'CanonEOS700D':'CR2', 'NikonD850':'nef', 'SonyA7S2':'ARW'}
1336
+ self.infos_all = {'CanonEOS70D':[], 'CanonEOS700D':[], 'NikonD850':[], 'SonyA7S2':[]}
1337
+ iso_list = [800, 1600, 3200]
1338
+ ratio_list = [1,10,100,200]
1339
+ hr_ids = np.array([1, 6, 11, 16])
1340
+ for camera_type in self.infos_all:
1341
+ sub_dir = f'{self.args["root_dir"]}/{camera_type}'
1342
+ for scene in range(1,11):
1343
+ for iso_id, iso in enumerate(iso_list):
1344
+ for ratio_id, ratio in enumerate(ratio_list):
1345
+ lr_id = iso_id*5 + ratio_id + 2
1346
+ ind = np.argmin(np.abs(lr_id - hr_ids))
1347
+ hr_id = hr_ids[ind]
1348
+ name = f'IMG_{lr_id:04d}.{self.suffix[camera_type]}'
1349
+ hr_name = f'IMG_{hr_id:04d}.{self.suffix[camera_type]}'
1350
+ self.infos_all[camera_type].append({
1351
+ 'cam': camera_type,
1352
+ 'name': f'{camera_type}_{scene:02d}_{name[:-4]}',
1353
+ 'hr': f'{sub_dir}/scene-{scene}/{hr_name}',
1354
+ 'lr': f'{sub_dir}/scene-{scene}/{name}',
1355
+ 'iso': iso,
1356
+ 'ratio': ratio,
1357
+ })
1358
+ self.change_eval_ratio('SonyA7S2', ratio=1)
1359
+
1360
+ def __len__(self):
1361
+ return self.length
1362
+
1363
+ def change_eval_ratio(self, cam='SonyA7S2', ratio=1, iso_list=None):
1364
+ if iso_list is None:
1365
+ iso_list = [800, 1600, 3200]
1366
+ self.infos = []
1367
+ for i in range(len(self.infos_all[cam])):
1368
+ if self.infos_all[cam][i]['iso'] in iso_list and self.infos_all[cam][i]['ratio'] == ratio:
1369
+ self.infos.append(self.infos_all[cam][i])
1370
+ self.length = len(self.infos)
1371
+ self.ratio = ratio
1372
+ self.templet_raw_path = self.infos[0]['lr']
1373
+ self.templet_raw = rawpy.imread(self.templet_raw_path)
1374
+ self.get_shape()
1375
+ log(f'Eval change to {cam} (length:{self.length}): ratio={ratio}, iso_list={iso_list}')
1376
+
1377
+ def get_shape(self):
1378
+ self.shape = self.templet_raw.raw_image_visible.shape
1379
+ self.H, self.W = self.shape
1380
+ self.bl = np.array(self.templet_raw.black_level_per_channel)
1381
+ if np.mean(self.bl - self.bl[0]) != 0:
1382
+ warnings.warn(f'4 channel have different black level!!! ({self.bl})')
1383
+ self.bl = self.bl[0]
1384
+ self.wp = self.templet_raw.white_level
1385
+
1386
+ def __getitem__(self, idx):
1387
+ data = {}
1388
+ # dataload
1389
+ hr_raw = np.array(dataload(self.infos[idx]['hr'])).reshape(self.H,self.W)
1390
+ lr_raw = np.array(dataload(self.infos[idx]['lr'])).reshape(self.H,self.W)
1391
+ data["hr"] = (hr_raw.astype(np.float32) - self.bl) / (self.wp - self.bl)
1392
+ data["lr"] = (lr_raw.astype(np.float32) - self.bl) * self.infos[idx]['ratio'] / (self.wp - self.bl)
1393
+
1394
+ data['name'] = self.infos[idx]['name']
1395
+ data['wb'], data['ccm'] = read_wb_ccm(rawpy.imread(self.infos[idx]['hr']))
1396
+ data['ratio'] = self.infos[idx]['ratio']
1397
+ data['ISO'] = self.infos[idx]['iso']
1398
+
1399
+ if self.args['clip']:
1400
+ data["hr"] = data["hr"].clip(0,1)
1401
+ data["lr"] = data["lr"].clip(0,1)
1402
+
1403
+ return data
1404
+
1405
+ '''
1406
+ ds = np.load('E:/datasets/LRID/resources/darkshading-iso-6400.npy')
1407
+ lr_paths = glob.glob()
1408
+ raw_hr = rawpy.imread('F:/datasets/SELD/indoor_x5/100/004/000071_exp-512000000_iso-100_2022_08_17_06_21_43_034_12562490542059_orientation_0_camera-0.dng')
1409
+ raw_lr = rawpy.imread('E:/datasets/LRID/indoor_x5/6400/1/004/000115_exp-8000000_iso-6400_2022_08_17_06_21_58_288_12578251173465_orientation_0_camera-0.dng')#.raw_image_visible
1410
+ p = get_ISO_ExposureTime('F:/datasets/SELD/indoor_x5/100/004/000071_exp-512000000_iso-100_2022_08_17_06_21_43_034_12562490542059_orientation_0_camera-0.dng')
1411
+ p['name'] = 'IMX686'
1412
+ bl = raw_lr.black_level_per_channel[0]
1413
+ print(raw_lr.black_level_per_channel)
1414
+ p['bl'] = raw_lr.black_level_per_channel[0]
1415
+ p['wp'] = 1023
1416
+ p['ratio'] = 1
1417
+ p['scale'] = (p['wp']-p['bl']) / p['ratio']
1418
+ print(p)
1419
+ lr_raw = (raw_lr.raw_image_visible.astype(np.float32) - p['bl']) / (p['wp'] - p['bl'])
1420
+ hr_raw = (raw_hr.raw_image_visible.astype(np.float32) - p['bl']) / (p['wp'] - p['bl'])
1421
+ hr_raw = np.load('E:/datasets/LRID/indoor_x5/npy/GT_align_ours/004.npy')
1422
+ hr_raw = (hr_raw.astype(np.float32) - p['bl']) / (p['wp'] - p['bl'])
1423
+ print(hr_raw.shape, hr_raw.min(), hr_raw.max())
1424
+ print(lr_raw.shape, lr_raw.min(), lr_raw.max())
1425
+
1426
+ raw_hr.raw_image_visible[:] = hr_raw * (p['wp'] - p['bl']) + p['bl']
1427
+ img_hr = raw_hr.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=8)
1428
+ plt.imsave(f"{p['name']}.png", img_hr)
1429
+
1430
+ data = {'lr': lr_raw, 'hr':hr_raw.clip(0,1), 'name':p['name']}
1431
+ '''
dist/isp_algos.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from pytransform import pyarmor_runtime
2
+ pyarmor_runtime()
3
+ __pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x0a\x00\x6f\x0d\x0d\x0a\x09\x34\xe0\x02\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\xe8\x47\x00\x00\x00\x00\x00\x18\x8b\x63\xf5\x63\xd1\x3d\xee\xe2\x9d\x65\xbc\x19\x16\x5a\x57\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x8e\x07\xca\x89\x8b\xdb\x53\x42\xc3\x31\x56\x6c\x11\xb4\xa2\xae\xf9\x18\xa3\xc2\x6e\xec\x69\x65\x45\x81\xc3\x8b\x91\x3c\x6f\x50\xab\xcf\x70\x6e\x5f\x70\xf2\xcb\xd3\x36\x09\xa6\x41\xbe\xe4\x7b\xd1\x52\xf6\x01\xb8\x58\xbb\x4c\xba\xba\xd1\x35\x47\x0c\x3d\x90\xbb\x35\x3d\x34\x8c\x3d\xa3\xed\x25\x93\x70\x31\x34\x7a\xed\x64\x8e\xae\x28\xd7\xcf\x91\xab\x1c\xc2\x79\xf4\xc2\x89\x09\x6c\x0f\xff\x66\xbc\xd3\x5d\xc8\xa0\x07\xa2\x5c\x77\xc3\x81\xdc\xb8\x7a\x17\xa8\x34\xc4\xc2\x6a\x77\xe6\x38\x31\x72\xba\xd4\xe4\x43\xda\x52\x72\xbd\xec\x81\xbf\x27\x3b\x9d\xc0\x95\xdd\x71\x9b\xee\x8b\xe4\xe1\x22\x6b\x71\x5e\x51\xe7\x21\x84\xca\xdb\x09\x83\x91\x8f\x18\x63\xe1\x09\x5e\x81\x9b\x91\x21\xeb\x12\x89\x8b\xed\x8c\x58\xd9\x4f\xb8\xab\x7f\x90\xc1\xdd\xde\x9f\x10\xd3\x84\xe2\xba\x18\x95\xea\xdf\x44\x6e\xc0\x91\xc4\xa3\x8f\xce\x87\xf8\x7e\x6c\x8d\xd3\x27\x45\xd5\xe3\xad\xff\xa8\xdb\x58\x27\x16\x08\xca\xcf\x8f\x79\x25\x7c\x2d\xb3\x60\x94\x5b\xda\x57\xa0\x19\xa9\xcf\xe3\xab\xac\xa0\xca\x3a\xc7\xfd\x88\x86\xbe\x4b\xc1\x9f\x49\x84\x2c\x14\xf8\x62\x52\x22\x8e\x73\xd5\x96\xb4\xf3\xac\x1a\x8e\xed\x13\xe2\x39\x8c\x9b\xc9\x45\x01\x0d\x0a\x1d\x8f\x30\x02\x96\x7c\x57\x30\x47\x2d\x23\x59\xbe\x0c\x43\x34\x12\x65\xd1\x07\x42\xc9\xb6\x42\x51\x65\x46\x71\x5a\xf2\xc8\xff\x8f\x4e\x97\x6e\x7b\x23\x98\x28\xdc\x7e\x32\x41\x66\x9c\x47\xb2\x97\x63\x23\x55\x9a\xe1\x26\x78\x70\x82\x4d\x48\xbd\xcf\xf7\x17\xdb\x3a\xcc\x29\xfe\xa4\xf8\x6b\x0b\x2e\x77\x30\xfc\xff\x45\xf1\x0a\x25\x2d\x45\x4c\x11\xcb\xc0\xaf\xa1\x9b\xbf\xd2\x2e\xce\x7e\x8b\xbd\x18\x88\x96\x24\x16\xad\x18\x6a\xae\x93\x61\x11\xab\x5b\x03\xd3\xae\xc4\x91\xc8\x72\x4a\x6d\x2a\xd4\x52\x45\x7f\xec\x36\xef\xce\xed\xff\x0a\x10\xc9\xd4\x6e\x42\x9c\xd6\x27\x5d\xa4\xd6\xa1\x53\x1a\x80\xc4\x8f\x62\xe7\xc8\xb8\x20\xf3\x4e\x2a\x83\x57\xf1\x4d\xaa\x3e\xe4\x79\xac\x3c\x5b\xaf\x59\x2a\x9f\xa5\xc0\xd5\x95\xe8\x84\x21\x06\xb6\xf0\x02\x3f\x04\x9d\x9c\x96\x30\xd5\x3a\x9d\x2e\x56\x2f\x25\x81\xeb\xbf\x18\x9c\xcf\xdd\xe2\x2b\xe6\x90\xf9\xc5\xbc\x5c\xac\x1f\x9f\x20\x78\x92\xbc\xd1\xc8\x94\xc9\xde\x5a\x28\x44\x3f\x60\x29\x07\xab\xad\x2b\x6d\xb9\x31\xa1\x99\x58\x13\x91\x8a\x7f\xec\x4d\xd8\x54\x12\xf6\x08\xcc\xb7\x8d\xd6\xab\x7e\xaf\x52\xae\xa6\x5b\x1c\x4f\x90\x38\x6e\x97\xc8\x46\x50\xed\x0a\x22\xa3\x99\x7c\x25\x1d\x8a\x9f\x53\x59\x72\x05\x28\xa9\x63\xff\x91\xc5\x6b\x5d\x3c\x2c\xbf\x89\xc5\x1e\x92\xa0\xc8\xf0\xbf\x9e\xa2\x07\x03\xcd\x74\x15\x5f\x3c\x6d\x34\x7b\x8e\x1b\x6c\x3d\x1d\xc4\xc8\x8c\x71\x44\xe3\xf2\x08\x2a\x94\xd3\x96\x58\xef\x7a\x82\x85\xec\xc8\x76\x5c\x34\xb2\x61\x8c\x64\xc8\x60\x93\x53\x2c\xab\x06\x6f\x07\xef\x26\xf3\x2c\x31\x5a\x90\xcc\x3c\x2c\x3c\x32\x33\xa0\x2d\x3e\x54\xef\x0d\x6e\x41\xcd\x59\x1b\x29\x1e\x42\xc2\xa4\xae\x98\x80\xa9\x00\x30\xde\xe3\x99\xf7\x97\x8b\xc1\x4a\x7f\xcf\x50\x67\x56\x67\x57\x2a\x71\xb3\x00\x3b\x1b\x38\xfc\x1c\xe4\x05\xc9\xe8\x86\x6a\x9a\xfd\xdc\x35\x00\x6a\xc0\xf6\x10\xb8\x69\xc8\xcd\x02\x1c\xe1\xbc\x92\xf3\x37\xf9\xf7\x10\xef\xce\x02\xa7\x34\x34\xb8\x47\xd1\x48\xac\x44\xdd\xd5\xd8\xe4\x54\xeb\xb6\x3b\xd0\x4e\x2f\x44\xd0\x6f\x40\x6b\xd0\x2b\x81\x06\xfc\x09\x80\xe1\x4a\xce\x55\xa2\xee\xd7\x2a\x9c\x9d\x1a\xea\x0d\xfb\x83\xb7\x53\x8a\x44\x50\xe2\x09\x1b\x4c\x11\xf7\x1f\x31\x34\xcd\xae\xe7\x46\xaa\x09\xa5\x6c\x58\x16\xe8\xfb\xc4\x59\x5e\x94\x8b\x08\x48\xfc\xd0\x7d\xcc\x4b\xae\xad\xdb\xd1\x26\x51\xd0\x23\x83\x9d\x52\x32\xbc\x01\x01\x72\x0d\x18\x4e\xe4\x46\x06\x9d\x9c\x3b\x5c\x3b\xb6\x8c\x62\xe9\xb1\x25\xe8\x07\x0a\xf9\x65\x1f\x66\x5c\x39\x26\xf1\x1a\xb9\xcf\xdb\x1b\xc9\x33\x4c\xf5\xfc\x98\x74\xd5\xd8\xf8\xb4\x46\xdd\x3a\xa8\xf6\xca\xcf\xd5\x01\x0b\x36\xfd\x68\x38\x7c\xc1\x9b\x0d\x3a\x72\x69\x5f\xb6\xc8\x3f\x92\x39\x5f\x9d\x83\xc5\x4b\xcc\xd4\x98\xcf\x90\x82\x20\x76\xa4\xa3\xf4\x73\x4f\x54\x2a\x20\x70\x9f\x79\x6d\x5d\x42\xf1\x80\xa2\x44\xfc\x73\xd6\xc3\x70\xfc\xf6\x7d\xcc\x2c\xcf\xbe\xbe\x81\x8f\x92\xb4\x62\x04\xfe\x9c\x7a\x4d\x2f\x96\xbb\x4a\xa1\x41\x27\xfa\x5f\x49\xe4\x09\xf1\x61\x61\x95\xd1\x16\x4f\x97\x1e\x8b\x76\xf0\x5f\x4f\xd7\x15\x15\xe6\xe7\x3b\x47\x44\xbc\x93\x0a\x26\x8a\xb3\xdc\x05\x1b\xa1\x0c\xc1\x8c\xcd\xda\xc0\x68\x45\x99\x0e\x4e\xb5\x64\x0c\x87\xbb\xf6\xd4\x3b\xcd\x19\xd2\x65\x68\xf9\x18\x74\x8b\x9c\xab\xdf\xdf\x54\x51\xc1\xe8\x58\xcb\x2c\xdc\xea\xa1\x0f\x42\x1c\x9d\xc0\x9b\x52\x96\xd5\x07\xb7\xcd\xd6\x82\x74\x6c\xb3\x87\x5e\x46\x1c\x2a\xa3\x08\x0b\x92\x79\x0c\xec\x47\xf0\x8b\xc6\x71\x9c\x7f\x24\x28\x24\xfb\x54\xbd\xe7\xe3\x0a\x4b\x54\x50\x0b\xba\x9c\x23\x08\xcc\xe8\x04\xef\xde\x9e\x4d\x10\xde\x4a\x32\x16\x13\x15\x6c\xa9\xd8\xe0\x3c\xb7\xef\x7e\xcc\xb2\x5e\xd0\x96\xb9\x2d\x58\x73\xe7\xd7\x21\xee\x73\x87\x4a\x7e\xaf\x59\x39\x8d\x2f\x0a\xe0\x92\xf3\xc8\xf0\x6e\x9f\x1e\x67\x9f\xd1\xbb\xec\x68\x33\x19\x68\x94\xc5\x52\xce\x53\x92\xdc\x19\x5d\xf2\x30\x69\xd7\xcb\x5b\x5f\x81\x43\xd3\xdd\x00\x2f\xc9\xe2\xb3\x14\xc3\x27\x69\x25\xa2\xa5\xeb\xf5\x85\x65\x3e\x8f\xfb\x23\xce\xd5\xbc\xf8\x60\xb8\x61\x89\x2b\xcc\x7e\xe1\x4c\xab\xf7\x72\x6a\x5a\x9f\xef\xba\x14\x01\xaa\xb9\x96\xf6\xda\x7b\x43\x71\xa9\xe9\x86\x42\xa4\xd5\x7d\xc7\x59\x8b\xdd\x14\x26\xeb\xb7\x77\xd4\x64\x3d\x92\x84\x62\x1e\x86\x51\xac\x2b\x1b\x04\x22\x6c\x27\xb8\x0e\xab\x79\xf0\xf9\xfc\x08\x7a\xf2\xa4\xce\x38\xd5\x19\x72\x52\xb9\x0e\x0c\xe7\x3f\x4c\x0c\xa5\x0b\xed\xe7\x7f\xbc\x45\x2a\x99\x13\xb1\x3f\xc7\x67\xbc\x7d\x58\x8b\x1f\xf7\x1d\xd2\x05\x8b\x1a\xcc\x55\x1c\x96\xb6\xa9\xde\x15\x16\xbb\x7d\x51\x38\x41\x86\x8d\x8f\xca\xee\xfd\x18\xba\x47\x59\x9f\xba\xad\x10\x9f\x20\x90\xa6\xb6\xc2\xb6\xb4\xaf\x3f\xc9\xd4\xfc\x0b\x76\xe6\x31\x2f\x0c\xa3\x2d\xca\x62\xec\x15\xd2\x29\x80\xdb\x3f\xfc\xab\x1b\x81\x1f\x20\x5f\xe5\xca\x94\x68\xa2\xa9\xb7\xc5\x75\xfc\xc4\x0c\xea\x68\xb3\x97\x44\x04\xa0\x50\x8c\x4d\x44\x12\xd2\x89\x94\x28\xdf\x98\x0b\x50\xbd\x86\x5b\xa4\xc9\xba\x22\xdc\xf5\x5a\xd7\xb4\x79\x7a\x90\x6d\x6c\xd7\x0a\x49\x23\x15\x0c\x2b\x0c\x08\xab\xa0\xda\xa6\x2b\xfc\xf9\xbd\xee\x2f\xf4\x49\x32\x90\x07\x77\xa9\x4b\x6a\xa8\x6e\x0b\x80\x8a\xe9\x6d\x47\x7c\x9d\xc2\xcf\xe2\xeb\x9f\x2c\x80\xed\x41\xa6\x5e\xcc\x33\x24\xd6\x70\x96\xa6\x7f\x38\x12\xd1\x15\xfa\x29\xe0\x2d\x51\xfc\x4a\x8a\x60\xba\xd3\xfb\x83\xa9\xff\xf4\xb4\xa6\x72\x74\xc5\x66\x9b\x6b\xd2\xdd\x4c\x10\x99\x79\x5b\x42\xac\xd5\x8b\xc0\xbc\x50\x11\x8c\x09\x29\xf5\x65\xe3\x17\xc1\xfb\x7f\x44\x8d\x7b\x7e\x67\xf6\xd9\x90\xd3\x4a\xd4\x2b\x41\x7e\xeb\x59\x21\x3d\xe7\xe2\x86\x52\xd8\x3b\x64\xa4\xd5\x98\x67\x74\xaf\x69\x2d\x59\xb4\x2e\xbd\x3d\x84\x80\xa0\x7e\x27\x14\x26\x1b\x6f\xf2\x3f\x5d\x22\x99\x59\x48\xfe\x4b\x04\x90\x9d\x5f\xa1\x18\xd2\xd5\x76\x02\xa1\xc3\x20\x2a\x6d\xfc\xc8\x74\xa4\x95\x4f\xf3\xf1\x57\x4d\xa8\xae\x29\xb1\xc1\x5f\xce\x34\xdd\x1f\xe0\x97\x4f\xe7\x6c\x66\xfd\xe6\xc9\x0a\x10\x89\x6a\x45\xf9\x53\xcf\xd2\x01\xc0\x02\x0c\x99\xae\x23\x14\x05\x20\x2c\x77\xf6\xd2\x19\xb0\x5d\xfe\xaf\x13\xf3\x91\x4f\xd9\x1b\x5c\x3e\x68\x89\xfa\x50\x00\x50\x9c\x2f\x01\xb4\x06\xfb\xd9\x56\xec\x69\xd7\xa6\xb1\x77\x58\x5e\x8b\xcd\xe0\x53\x30\xb1\xd8\x03\xab\xfd\x77\xcb\xe7\x51\x6d\x16\x10\xc5\x4d\x86\x33\x7e\x52\xd3\x4a\x02\x0d\x12\x01\x53\xae\xc2\x72\x7f\x4d\xa6\x83\x43\xd9\xaa\x25\x71\x8d\x94\x2f\xcf\x77\x08\x7f\x59\x30\x5b\x2f\x92\xef\x25\x3b\x1a\x3b\xbb\x1b\x03\x77\x41\xa9\xbd\xff\xb6\x9e\xc6\x80\x4c\x60\x77\x54\xa3\xf5\x9f\x0f\x25\x79\xe7\x73\x28\xd0\xdf\xb0\x54\x75\xfc\x26\x74\xd1\x31\x86\xbd\x27\x8f\xae\x65\x2c\x56\x57\x42\xc7\xae\x69\x1a\xb7\x49\xdb\x56\x78\x22\x09\x10\x51\x4e\xa1\x8b\xa1\x58\xc7\x6b\xbe\xbd\xeb\xf4\x7f\xdd\x9c\xb9\x88\xb9\x60\x8a\x56\xa2\x0b\x9d\x4e\xc1\xbc\xd2\xbe\xcf\x62\x86\x6d\x28\xdb\xd3\xc4\xc5\x3a\x68\xcf\x5b\x76\x3b\xd8\xd2\x94\xb8\x7c\x18\x8a\x11\x24\xbb\xef\x17\x45\x67\xa5\x60\x0a\xc0\x7f\x9a\x62\xeb\xe3\xca\x72\x98\x67\xc5\x58\x3a\x89\x56\x71\xcb\x08\xbe\x1e\xe9\x2e\x70\xa8\xef\x66\x64\xf6\x98\x9e\x67\xf4\xae\x9c\x3b\x1e\x08\x9c\xc3\x8e\x81\xc6\x2a\xef\xfb\xc1\xd7\x97\x98\xbb\x60\x76\x73\x0f\xcc\x49\x3c\xc0\xb7\xdb\xda\x7b\xb5\xaa\xea\x0c\x51\x73\xdd\xc3\xa6\xbd\x08\xb2\xf1\x9b\xbf\xe4\xdd\xb1\xad\x56\xe6\xe8\x10\x2a\xee\x9e\x26\x7c\x47\x80\xec\x86\x51\x46\xf8\x7d\x13\x92\x5d\x78\x34\xf0\x6d\xea\x49\x7c\x4e\x94\x65\x0a\x7c\x09\x93\xd4\xca\x55\x0f\x5e\x46\xf7\x95\x8b\x60\x7c\x21\xbb\x9d\xac\x54\x14\x72\xa3\x6c\xe4\x8b\xa6\x99\x32\x4c\x90\x17\x3d\x5e\x3e\x99\xd9\xa8\x82\x72\xd7\xf0\x90\xaf\xb3\x54\x7c\xd4\xbd\x52\x04\xb7\x16\x6b\x5c\x82\xf2\x58\x7a\xcd\x67\x17\x3d\xd1\xfb\x0c\x67\x4f\x3d\x59\x3f\xc2\x00\xbe\x11\x44\x7c\xd8\x4e\x4a\x66\xc3\xf2\xec\x18\xd1\x98\x41\x5e\xeb\x35\xe2\xa6\x10\xb6\x48\x7b\x31\x5a\x36\xea\x42\x10\xce\xe0\x6a\x24\x9a\xf6\x97\x1e\xc5\xc9\x0c\x99\x6c\x86\xab\x6a\xd2\x78\xd6\xb9\xcd\x8f\x85\xd6\x6e\xab\xf4\xa4\x82\x50\x81\xce\x6e\x1b\x3d\x64\x07\xac\x32\x5b\x2b\x4e\x98\x66\x7c\x46\x93\x61\x95\x88\xf2\x2f\xa7\x1a\xd8\x8e\xdd\xc2\x10\x02\xca\x9f\x63\xa8\x21\xbc\x1d\x71\x6d\xdf\xb5\x74\xa0\x4f\x11\xa1\xb1\x9e\x15\xcf\x5a\xf1\x2e\x5f\x62\x48\xe4\xed\xb1\xbd\x85\xa6\x26\xac\x1d\x1b\xf0\x09\x08\x9b\x11\xed\x8f\xfb\xfe\xf4\x11\xb0\xbb\xe3\x0a\x4f\x5b\x38\x62\x15\xa3\xe9\xd6\x43\x55\xc8\x81\xe3\x6f\x8e\xdc\xd1\xa8\x4f\xe6\xee\xb6\x13\x37\x1b\x1b\x61\x41\x0a\x83\x8f\x06\x0b\x41\xee\xb5\xc8\x2b\x12\x38\xb8\x1b\x78\x2e\x43\x5e\x4e\xb8\xaf\x81\x4e\x09\x73\x98\x56\x09\xdb\x65\xcd\x51\x0e\x31\x04\xe5\xa7\x26\xbe\x9f\x3a\xcd\x47\x8e\xbd\x09\x08\x47\x02\x2e\xe5\xea\x92\x10\x0b\x01\x4c\x76\x68\x60\xc0\x64\x52\x9f\x56\x68\x98\x09\xe0\x71\x1f\xca\xdd\xbf\x48\x1a\x5b\x98\x98\xdd\x11\xca\xc5\x28\xbb\xec\x6f\x96\x95\xb6\x5a\xef\x7f\x3d\x88\x7b\xd8\x33\xf5\xc0\xde\x51\x29\x63\xcb\xad\xdf\x8d\x8f\x8e\x4b\x03\x24\xf8\x9d\x8c\xa9\xfa\x1f\xad\x76\x9c\x29\xf0\x59\x5e\x9e\x43\x3e\x39\x54\x3c\x87\x79\x21\x48\x97\xa9\xf8\x2a\x88\x41\xcf\x66\x8b\x5f\x94\x80\x74\x43\x04\xf1\x91\x14\xcd\x19\xe2\xc7\x41\x56\x49\x88\x18\x19\xd3\x17\x87\xda\xf5\x53\xf3\x73\x95\x37\x3f\xea\x14\x0a\x1b\xc4\x5b\xb3\x73\x09\xa9\xca\xe2\x33\x22\xe6\x95\x1d\xb9\x59\x72\x9a\xb8\xdf\x6e\xbb\x7a\x3f\xb6\x4e\x47\x7b\xb1\xea\xc1\xf7\x2f\x4e\x7f\x93\x7a\xb4\xf5\xba\xb2\xa8\x16\x62\xf1\x6f\xd6\xad\xcd\x83\x57\xda\x79\xe8\x44\x4d\x90\x92\x1c\xe9\xc5\xc8\xaa\xbc\xeb\x84\x53\x8b\x64\x64\xe2\x57\x94\x5d\xa9\x22\xe5\x09\x3d\x99\x60\xcc\xdc\x46\x91\x70\x0e\x12\x24\x63\xa8\x7e\x16\x31\x05\x38\xbd\x17\x21\x6b\xcb\x8f\x30\xc7\x70\x79\xfc\xb9\x5a\x3d\xd4\x77\xe3\x09\xba\xe8\x49\xbc\x63\x79\x27\x89\x71\x83\xb9\x36\xf7\x8d\x9d\xf1\xde\x90\xe9\x69\x2c\x3b\xc0\xfd\xda\x82\xdc\xbb\x5f\x93\x91\x73\xc3\xbf\x95\xf8\x16\xd1\x5d\x8f\xe1\x58\x3e\xe0\x8c\xb9\x23\x4c\xdf\xeb\x67\xe7\x67\xb5\xe5\xb3\x6d\x44\x18\x51\x27\xaa\x89\xe8\x5e\x2e\x1a\x98\xdd\x96\x87\x9c\xbe\x10\xe5\x4d\x7a\x32\x6b\x8d\xc7\x82\x6c\x2d\x86\x8b\x14\xae\x5b\x24\x50\x58\x23\x64\x23\x26\x9e\x6d\xa8\x28\x6b\x05\x32\xdc\xc0\x92\xf1\x5d\x9f\x36\x4c\xa7\xba\x9b\xf8\x05\x7e\xe2\x2c\x85\x48\x9b\xe0\xf7\x6e\x6d\x7a\x86\xe4\x05\x88\xb1\xf2\x51\x80\x1f\x59\x38\xdf\xd8\xc1\x49\x75\xce\x7e\x97\xf9\xb0\xef\x00\xff\x13\x01\x29\x98\x59\x97\x7a\xd0\xe6\x3c\xfa\xc9\xb9\xac\x77\xaa\x13\x22\xf0\x82\xbf\x00\x1f\xa4\xb7\x05\xbf\x34\x11\x61\x38\xac\xe9\x1b\x51\x87\x0c\x14\xe3\x55\xeb\x3d\x38\x64\x52\x07\xea\xb2\x1b\x8e\x1c\xac\x64\x3e\x51\x90\x14\xdd\x5f\x52\xc8\x0a\xcf\x4c\xfa\x50\x54\x3c\xbe\xca\x71\x8e\xce\xae\x95\xc3\x39\xe3\xd5\x52\x00\x4b\x97\x1d\xc2\x68\xf6\x24\xe1\x18\x0b\x4f\xf0\x26\x62\xe7\xfc\x52\x9b\x39\x73\xa4\x75\xbd\x31\x2c\x25\xb6\x82\xdf\xc1\xbf\x69\x48\x1e\xba\xab\xf9\x8b\xfa\x26\x6f\x9a\x09\x91\xe1\xbe\xea\x99\x62\x21\x72\xf6\x35\xf5\x8f\x48\x69\x6c\xae\x62\x76\x4a\xa4\x94\x58\xcf\x27\x99\x66\x2b\x6d\x6a\x1b\x41\x16\x21\xe6\x64\x68\x2e\x35\x17\x3c\xe9\xd9\x5b\xe1\x17\xb9\xa3\x83\xed\xed\xdb\xfe\x95\x8c\x4e\x0f\xc7\xe2\xc5\x73\xd1\x03\x16\xc8\xcb\xcb\x4b\x42\x85\x0e\x64\x51\x32\xed\x07\x4d\x01\x76\xc3\x08\xd3\xe4\x23\x6f\x07\xa4\x90\x1d\x12\xf7\xcf\xca\x1c\x39\x91\x26\x9e\xcf\x99\xbc\xfc\x24\xd9\x85\x93\x67\xb3\xe7\xd2\x06\x7a\x67\xf4\x49\x48\x7d\x1f\xa2\x6a\x7e\x50\x46\xd2\x76\x95\x1a\x60\xdc\xc3\x2d\x5b\x94\xb8\x98\xe9\xb0\x2a\x6f\xf0\x87\x5e\xd6\x38\x51\x65\xb8\xaf\x8e\xca\xcf\x80\xa6\x5d\xf4\x0e\x47\x31\x01\x6e\x26\x0b\x44\x8b\x37\xe3\x2c\x37\x01\xe3\xd8\xdd\x00\xba\xd0\xae\x25\xc1\x7f\x91\x82\xdb\xab\x9e\x10\x45\xfb\x12\x5a\x81\x39\x67\x87\x0d\xaf\xab\x65\x6e\x11\x4d\xc4\xbd\x70\x6f\x97\x7a\x14\x12\x43\xdb\x19\x74\xc4\x41\x93\xe8\xd5\xea\x41\x3a\x94\xde\x18\xe3\x7b\xf2\x98\x9a\xba\xbf\x45\x97\x3e\x91\x03\xfc\x77\xb2\x58\x38\x18\x3a\x40\x41\x58\xdf\xdb\x62\x3e\x05\x5b\xdf\x6f\xe2\x6a\x95\xb1\x54\x5c\xb3\x05\x32\xb6\xae\x96\xa7\x85\x12\x69\x2a\x2c\x28\x63\xcd\x36\x61\xab\x07\x0d\xd8\xc9\x43\x1a\xc7\x2e\x5e\xfe\x4e\xf5\xd2\xaa\xdc\x17\xb0\x94\x12\x7d\x0e\x0b\xa0\x20\x51\x61\x12\x64\xd6\x48\x42\xf4\x06\xf8\xca\xb8\x09\x73\xe8\x14\x3c\x81\xc4\x80\xf1\x9a\xbc\x9b\x45\xa9\x73\x36\xa3\x9d\x3f\x28\xf3\x30\x66\x8a\x1b\x89\xb9\x99\x1e\xd7\x6e\xb4\x1c\x3a\xd7\x23\x33\xf3\x42\xea\xcf\x9a\x98\xf1\xbe\x70\x48\xf5\x7d\x80\x83\x55\x96\xec\x12\x80\xec\xf2\xcb\x90\x46\x3b\xff\x22\x14\x11\x83\xe4\xf2\xa9\x36\x68\x2d\x22\x27\xb7\x47\x3c\x54\xd5\xe2\x30\xde\x64\x61\x2d\x85\xec\x7e\x95\xcd\xf3\x21\xe1\x9b\x33\x4e\x4d\x8b\x3a\x16\x91\xa4\xb7\x11\xf4\x6c\xd4\x42\xb6\x41\x46\xc8\x0c\xe1\xf1\xa8\x6b\x5a\xfb\xd0\x5f\x6c\x14\x54\x14\x46\x78\x32\xcb\x4c\x91\x9f\x00\x87\x0c\xad\xb2\xbe\x39\x05\x43\x16\x47\x38\x58\x13\xb9\x87\x14\xdc\x9c\xdb\x7d\x3e\x9d\xef\x52\x66\x67\x22\x77\xae\x33\x48\x12\x44\xef\x19\x4d\x00\x22\xc7\xf8\x31\x81\xa8\x7b\xe4\x81\xce\xcb\xe8\x2b\x6b\xd0\x72\x80\x82\xd6\xa0\xe3\xcd\x0a\x72\x57\x95\xc9\x95\x0f\x8a\x81\x54\x1e\xa8\x16\x3c\xb6\x9d\x8a\x6b\x86\xcb\x73\x52\xad\xa0\x74\x82\x8d\x6e\xb1\xa8\x90\x2a\x6b\xeb\x6a\x7c\xa2\x59\x7a\xf2\x3f\xce\x67\xc6\xe7\xf3\x39\xc9\x4b\xfd\x8c\xd8\xd9\x5d\x5e\x50\x06\x33\x1b\x7b\x95\x53\x4e\x5c\x25\x84\xda\xba\xb6\x95\x95\xaa\x17\x3e\x32\x89\xe5\x8e\x14\x7c\xc2\x0a\x2e\x38\xee\x36\x39\xf8\xc7\x6d\x7f\x6b\xd4\x61\x1d\xb2\x93\x42\xe8\x21\x2e\xcf\x76\xad\x6e\x65\x51\xcb\x62\x91\x05\x02\x40\xc7\x1f\x93\x16\x87\xe3\xbf\xb1\x1e\x8b\xdb\x95\x20\x30\x82\x34\x98\xe4\x14\xe4\x57\x62\x80\x69\x31\x6c\x6f\x4f\xcd\xca\x4e\xeb\x58\x5a\x38\x18\xa1\x17\xc0\xbb\x9f\x49\xf1\x9a\xa8\xde\xf8\x6a\xbb\x73\x29\x03\xa1\xed\x23\xb3\x51\x7f\x31\xd8\x64\xa2\xeb\xa5\x98\xcb\x23\x74\x8d\x49\x15\xd6\x01\x89\x3b\x16\x89\x1e\x7d\x5a\xb4\xa9\x6e\x30\xe8\xc8\xe1\xf6\x60\x36\x60\x00\x8c\x40\x8e\x5e\x0b\x54\xf5\x83\x2a\xa5\x0c\xe3\x61\xc6\x9d\x06\x5d\x76\x90\x3a\xce\xad\xd6\x4c\x7b\x8b\xa0\x18\x7f\xb6\xba\x57\xae\x32\x56\xe0\x86\xee\x8b\x51\xc7\xbe\xb0\x2a\xd7\xa2\xc9\xcb\x08\x40\x13\x0f\xc6\x1f\x2f\xd1\xde\x74\x23\x5e\x79\xef\xb4\x35\x4b\x71\x65\x5b\xbf\x7c\xa1\xf0\xaa\x4e\xf0\x29\xde\xa4\x47\x86\x1e\xa5\x04\x44\x03\x9e\xaa\x8d\xc5\xe0\x89\xef\xd3\x06\x44\x1b\x3e\xef\xa8\x11\x90\xb5\xfa\xf3\x37\x7d\x8d\x7f\x99\x25\xa6\xdd\xee\x17\x97\x59\x4b\xc0\x04\x70\x29\xdc\x6c\x4b\xa7\xe9\x72\xb1\x51\xd8\xe6\x5d\xc2\xe1\x2e\xa0\xa3\x52\xb4\x94\xcc\x06\x19\x7d\xbd\x49\xbc\xdf\x93\xed\x2e\x10\x04\xaf\xba\x60\x75\x9d\x8a\x95\xd8\x68\x9c\xb0\x09\x51\x72\x7d\xaf\xc6\x75\x8d\x44\x9d\xc5\xc6\xf1\x65\x2c\x2c\x11\x58\xfb\x34\xa2\xe1\xf5\xc8\x00\xbc\x02\x5d\x07\x55\xaa\x6d\x74\x21\x6a\x39\xbc\x5b\x95\xf5\x79\x3b\xf5\xac\xf1\x52\xf6\x2d\x09\x73\x00\xd6\x88\xd4\xef\x0a\x1d\x19\xc3\x11\x11\xd0\xa6\x98\x40\x42\x41\x4f\x4c\x67\xf1\xc8\x0e\x1e\xf1\xeb\x1f\x1e\x18\xc1\xa2\xbd\xc7\x37\xda\x23\xa8\x1c\xb4\x12\x12\xf2\x80\x49\x8a\x2f\x7e\xa2\xf8\x54\x6d\x0d\x7f\xdc\x3c\x4d\xc0\x10\xf2\xf7\xff\x64\xe4\xb6\x49\x79\xce\xca\x80\x33\xe6\x79\x30\xce\xb8\x80\x6a\x30\x23\x9a\x8b\xf9\x7f\xf6\xdb\x4a\x36\xba\x3e\x4b\xcb\x39\xe2\x09\x9f\x6b\xca\xe4\x63\xd3\x33\x81\xe9\x92\xed\x27\x43\x07\x1f\x70\x44\x49\x11\xc6\xeb\x6b\xd8\xa5\x45\x3a\xf3\x33\xdc\x90\xef\x94\xc3\x7f\x36\x38\xbb\x22\xbe\x8f\x51\x9e\x56\x18\x6e\x3c\x66\x33\xf0\x3b\xdd\xe9\xbb\x27\x51\x8c\x06\xbd\x11\xf9\x75\x83\xfb\x6e\xfd\x0d\x91\xcd\x9e\xbe\x6d\x81\x6c\x36\xd2\xae\x38\xb0\x56\x87\x45\xd3\x0d\xca\x7f\x3f\x79\x1b\xa2\x38\xf2\xba\xe6\x19\x19\x8a\x20\x93\x54\xee\x70\x64\x51\x28\x1d\x14\x78\x88\x10\xfd\x2b\x0b\xad\x01\x2e\xa7\x22\x7c\xe5\x34\xaa\xfa\xee\xe6\xde\xcb\xf7\x97\x02\x00\x8b\x5b\xe1\xa4\x1d\x4c\xc8\xb9\x7e\x6b\x1e\xa8\x11\x0f\x4a\xc1\x19\xa7\xc7\xcb\x6b\xfa\xee\xf9\x64\x54\xe1\xdd\xbe\x51\xbc\xb7\x57\x9b\x4d\x98\x93\x3a\x13\x41\xdf\x1d\x68\x3f\x96\xa7\xd2\xd2\x07\x47\x5e\xaa\x81\xe2\x68\x90\x73\xe6\xdc\xb5\x49\x05\xb3\x13\x5f\x3d\x57\x74\xc0\x57\xdc\x5e\xa3\xf3\x47\x14\x4f\x1e\xcc\x93\x79\x8b\xa0\xe9\x91\x2b\x32\xd1\xfd\x0e\x25\x8d\xac\xfd\xe9\xe4\x12\xf6\x36\x8b\x73\x35\x7c\xb3\x82\x11\x95\x75\x41\x45\x89\x32\x7b\x5f\x05\x39\xf2\x6a\xdf\xd7\x70\x9c\x70\x26\x90\x37\xcd\x36\x6a\x97\x37\xd0\xb4\xaa\xb4\x9d\xb5\xa2\x2a\x79\x91\xa7\x9f\x38\xd9\xce\x89\xa8\xdb\x75\x51\xab\xbe\x2c\xef\xe4\x4f\x3e\x3e\x61\x46\x0c\xc5\xf2\xf4\xec\xe7\xbf\x7b\xc0\x32\x88\xd4\x1e\x9b\xdf\x85\xfb\x0f\xdf\x98\x7b\x50\xee\xbc\xd2\x40\xb6\x9d\x93\x41\x99\xf2\xd3\xcf\x17\xf8\x15\xf2\xb0\xbc\xd0\xa8\xe7\x96\xf0\xb2\x16\x93\x73\xff\xa2\xd3\xb2\x02\x3e\xef\x62\xac\x95\x17\x9f\x42\x92\x85\xc2\xb8\x43\x57\xa2\x82\xce\x43\xdd\xcf\x08\xc0\x75\xd3\x4b\xe7\xd0\x1b\xbf\x89\xd1\x7d\xf6\x46\x87\x17\xd6\x2b\xdc\x82\xcb\xff\x02\x28\xac\x87\x33\x02\xbd\xc5\x69\x0a\x0e\xfb\x35\x2a\xbf\x9d\x57\x03\x12\xa1\x74\xcd\x1b\x53\xb8\xa8\x50\x41\x1a\xe1\x9b\x0b\x18\xe5\xe3\x7b\x4c\x12\xc0\xd2\xdc\xdd\x6d\xc0\x1e\xc3\x49\x39\x1a\x49\xb4\xc9\xe8\xc7\x6d\xbd\xf1\xdc\x14\x74\x7a\x05\x11\x0d\xeb\xe3\x19\x97\xee\x80\x54\xb5\x0e\xc2\x25\x26\x03\xbe\x29\x7c\xa5\x12\xf2\x0f\x0a\x30\xa7\xf6\x72\x9f\xab\x0e\xe3\xae\xb2\xc1\xf8\x71\x00\xe0\x0a\xd6\xc3\xcc\xe8\x4d\x82\x6c\xc9\xd0\x5f\x28\xa4\x58\xcc\x6f\xd5\x31\x85\x1e\xcf\x3b\x02\x64\x7c\x9b\xc6\x69\x88\x60\xc8\xde\x16\xd4\x8e\x74\x88\x5c\xb3\xdf\xb0\x08\x3b\xb4\x21\x15\xe9\x79\xa6\x58\xc7\x2a\xea\xf0\xb8\x5b\x66\x06\xfe\x33\xcb\xec\xaf\xd9\xb5\xbc\x9e\x86\x43\x54\xc1\x3e\x58\x50\x11\xaa\x43\xc3\x13\x02\xe0\x45\xfa\x2a\x9e\xc7\x9a\xee\xa9\x0e\x0b\xa4\xd9\x6b\x52\x88\xec\x53\xa4\xa4\x27\x49\xc8\x86\xd9\xa7\x2a\x53\x76\x9e\x8d\x1b\xa5\x46\x98\x38\xa6\x5e\x3a\x20\x70\x4c\xee\xa9\x2d\x5c\x86\xc2\x5d\x5b\xb4\x70\x04\xc5\xd1\xfe\x23\xbc\x12\x4d\x79\x70\x60\x8e\x7c\xac\x5c\x5b\x51\x96\x8a\xe9\xd2\xca\x8b\x4c\xe1\xca\x26\x0b\xc8\x56\xb5\x2a\x7b\x4a\x0f\x29\xc9\xdf\x91\xf7\x54\xff\x9a\x4b\xcd\xb5\x28\xa3\xb5\xbe\xf2\xd6\xf0\x7b\x70\x41\x2f\x4f\xea\x82\x9a\xf9\xdf\xf8\xdd\x95\x0d\xff\xd6\x1e\x7b\x43\xeb\xc4\x34\x44\xc6\x8b\x6a\x38\x52\x46\x9c\x6e\xb6\xbe\xd7\x27\x36\x28\x9d\x55\xff\xa5\x34\x53\x7d\x6c\xe6\xc0\xd0\x40\x5f\xff\x08\x49\x44\x64\x07\x28\x54\xfe\x72\x09\xb9\x7e\x9e\x0c\x30\x02\xe2\x36\xca\x60\x0c\x75\x6b\x15\x31\xae\x3c\x01\xd6\x93\xb4\x12\x61\xbb\xd6\xb5\xb6\xef\xb7\xe0\xad\x4c\x59\xd7\xca\xb6\xce\x5b\x06\x5a\x90\x22\x51\x80\x49\xc7\x52\x6b\xd9\xd0\x47\x41\x69\xa3\xf6\x93\x88\xef\xcc\x1f\x35\xca\xc8\x93\x3e\x07\xbc\x8b\x63\xfb\xe1\xd1\x47\x2d\x78\xb0\x09\x40\x4b\x06\xf2\x41\x3c\xe7\xf4\x8c\xe8\xfb\x07\x14\xe3\x2f\xcc\xbd\x4e\x03\x0a\x74\xf3\x78\x22\x19\x8b\xe9\xb4\x5f\x23\xad\xed\x4f\xc1\x07\x0a\xe5\x8b\xc4\xd4\x8c\xd0\xc8\xe6\x9e\x51\x97\x79\x11\xe9\x5e\xa4\x69\xf3\xbc\x91\x3a\xcc\xe0\xcc\x6b\xfd\xad\xbe\xf3\x13\xee\xdf\x7a\x2a\x6e\x4c\x8c\x43\xdf\x21\x51\xb6\x97\x40\xf5\x2e\x9a\x70\xa9\xf2\x4e\x3f\xc4\x3b\x67\xfc\xea\x52\x74\x8f\x1c\x1a\xee\x97\x93\x1b\x45\x03\x01\x32\x05\x69\x4a\x62\x31\xf7\x65\xcc\xae\x7d\xf9\x8c\x8f\x85\x3e\xd3\xd3\x05\xd1\xfa\xe7\xde\x35\xb4\x87\x70\x4b\x0c\xa8\x48\x2b\x4e\x79\xf3\x3c\x06\xfe\xd6\x51\xc0\xd8\xac\xd4\x29\xb9\xc5\xb6\xfc\xf4\x9a\x4e\x5e\x95\x56\x6f\x56\xd0\x1c\x3e\x21\x4a\xf3\x17\xdc\xf8\x34\x5c\xc7\x20\x55\x7b\xb0\x2d\x81\x30\x62\x09\x0e\xf2\x8b\x3c\x32\x39\x1a\x18\x22\x15\x3b\x30\x92\xe9\xe8\xb6\xc8\x37\x21\xce\xf5\xfe\xcc\xf7\x30\xf7\x8d\x06\x17\xa6\x35\x04\x0e\xab\x52\x58\x58\x33\x01\xd1\x2c\x84\x36\x2c\x65\x12\xfe\xba\x80\xb7\xcf\x1b\x01\x2a\xf7\xbd\x9c\xb4\xd8\x42\xc6\x29\xce\xae\x40\x91\x82\x90\x0a\xf8\x16\x5f\x4e\x55\x3b\xd4\x63\xb6\x8c\xd6\x8d\x1f\xa8\xdd\x4d\x95\xe3\xdb\x42\xf2\x1e\x9b\x5a\x0f\x4a\xeb\xca\xe2\xef\x95\xb0\x4f\xbd\x7f\x3e\xc9\x3d\xbb\x0d\xc6\x6f\x4e\x0c\xa0\x95\xe2\xbf\xdc\x35\x4a\x9a\x48\x55\x12\x05\x72\xdd\x47\x73\xfc\x0a\xa6\x0d\xd5\x49\x54\xe3\x2f\x0b\x76\xeb\xdb\xa7\x8e\xd8\xe5\x98\x97\xbe\x27\x03\x68\xb3\x54\x6f\xc9\x82\xbe\x4f\x01\x12\x76\x65\xcd\xc9\xb6\xcb\x7f\x84\xd7\x6b\x9e\x08\xef\xfb\x14\xb3\x97\x90\x7e\xd3\x7b\x34\xfc\xca\xbe\x92\x6a\x35\x75\xa5\x60\x1b\xdd\xeb\x48\xcd\xd8\xe9\xbb\x7b\x41\xf1\xa4\xbe\x95\x55\xc1\x6e\xa7\x5d\x31\x6d\xe5\x32\x11\x9f\x2b\xf9\x3b\xd4\xfd\x95\xb0\x77\x9e\xca\x2f\xff\x86\x8d\xaa\xdd\xc7\x44\x42\xca\xa5\x89\xa5\xb3\xd4\x35\xc2\x1d\x5a\x29\x75\x56\x02\x96\x31\xc1\xac\xe1\x2a\x9f\x3e\x7c\x59\x82\x73\x21\xad\x24\x8d\x18\x49\xbc\x2f\x54\xc8\x33\x18\x75\x15\x97\x5a\x1d\x6a\xb5\x78\x73\xc5\x5b\xc1\x6d\xba\x22\x5d\xfa\x96\x44\x2c\x44\xeb\x50\x19\x26\x29\x65\x7c\xd2\x98\x1f\x32\xbc\xbf\x57\x8f\x1b\x80\xea\x8f\xf9\xda\xef\x06\x8c\x26\xd4\x34\x81\xf4\x87\x13\xa5\x5f\x54\x75\x5b\x89\x07\x28\xe0\x6a\x8a\xc3\x73\x91\x5b\x25\xc6\xe4\x85\x9c\x5a\xc4\xbc\x35\xc3\x82\x24\xaa\xba\x2c\x9b\x8f\x8d\x00\x36\x25\xec\x0c\xc5\x47\xd9\xef\x81\x8d\x7f\x07\xb4\xe0\xe1\x8e\xf9\x07\xef\x00\x8e\xf7\x4b\xca\x7b\x38\x39\x7c\x71\x9f\x36\xe0\x05\x4d\x92\x41\xca\x27\x85\x13\x67\x3c\x70\xec\xc2\x93\x29\xa2\xfb\xbb\x05\x3e\x62\xca\x05\x2c\xe0\x41\xe2\xdb\x69\xf1\x6b\x5b\xfd\x93\xfc\xcd\x1a\x4c\xb2\x5c\x38\xfe\x39\xf4\xe8\xeb\x03\x2e\x70\x3b\x05\x70\x25\x2f\x65\x8f\xe2\x4d\xed\x17\xfb\x19\xdb\x6b\x4c\xf6\x92\xb5\x23\x65\xd2\xae\xd7\xe5\x13\x47\x99\x0b\x27\xf1\xd2\xd2\x35\xfb\x86\xf6\xe8\x68\x18\xdc\xcc\x64\x13\x60\x41\xff\xc1\xe1\x2a\x1c\x06\xfc\xe6\x9b\x7d\x22\xc4\x3d\x43\x4c\x42\xc0\xfb\x42\x71\x09\x4e\x3f\xef\xee\x22\xc8\xc5\xfa\x96\xab\xfb\x2f\x3c\xbc\xec\x85\x53\x0c\x8d\x0d\x3a\xc2\x1d\xc7\x72\xc7\x06\x05\xb8\xa5\x91\xb3\x01\xc0\xc8\x3c\x30\x3a\x57\xf8\x5c\x2e\x2e\x97\x44\x12\x9a\x38\x1d\x23\x99\x6b\x62\xf4\xb6\x1a\x57\xf4\xe5\xf0\x7a\x13\x34\xca\x1e\x8a\x74\x23\x1a\x7a\x95\x2a\xd5\x6d\x9b\x05\x20\xde\xf0\x2b\x29\xad\x3a\xc7\x55\x64\xb2\x72\xc3\xc0\x25\xf7\xf9\xd5\xff\x1c\xf4\xa8\x86\x55\x3a\xfc\x06\x3a\x2b\x7b\x84\x8a\x07\x0f\xe8\x83\xbf\x5b\xad\x54\x39\xae\xf5\xb9\x19\x67\xf2\x07\x22\x19\x29\x90\x62\x98\x4a\xef\x18\xdc\xb8\xa9\xbc\xfd\x95\x89\xf5\x52\x4e\x04\xa7\xe0\x05\x50\x78\x09\xa1\x1f\x11\x82\xd2\x65\x24\xb3\x47\x84\x19\x85\x45\xbf\x14\xbc\x99\xbc\xf0\xc6\x30\x1f\x93\x0a\x32\x78\x14\x45\xde\x1b\x1c\x5c\x36\x5d\xc3\x82\xe8\xc8\x90\x2c\x7d\xdf\x73\x5b\x4d\x46\x4d\xe2\x8f\xf5\x31\x0b\x4b\x26\x05\x86\x82\x64\x8d\x9f\x61\x52\x65\x8e\x98\xba\xb7\x2c\x46\x62\xc0\x37\xc4\x70\x45\xe8\xc6\x4a\x5d\xe1\x90\xc7\x4d\x85\x00\xc2\x08\x59\xd6\xcf\x03\xef\x73\x80\x8c\xc2\xbc\xf6\xeb\xa3\x06\x69\x41\xce\x0c\xff\x72\xb9\xbc\xd1\x63\x1c\xbc\xa3\x06\xd1\xc6\xaa\x34\x7c\x10\x74\x91\x7c\xfa\x0e\x8b\x6a\x1f\xd4\x8c\xf6\xdf\xc9\xe6\x95\x7f\x9b\xee\x2d\x91\xd5\xa1\x02\xe3\x5d\xf7\x32\x37\xb5\x5b\x75\xc7\x19\x6c\x26\xd4\xb5\xfc\x18\x7e\xc9\x64\x21\x4d\x6d\xf8\x08\xcf\xe0\x53\x14\x24\xc0\xd6\x54\x67\xda\x99\xf1\xc1\x3f\x8a\xdf\xb7\x4b\x6e\xc8\x7d\x42\x6f\x0c\xf6\x31\xc1\x4e\x7e\x95\x13\xce\xc3\x6c\xd2\xae\x0e\xe1\x76\x92\x76\x48\xcb\xce\x1b\xda\xba\xbf\xe9\x93\x0c\xd9\x9b\x0d\xda\x6f\x2d\x7f\x06\xaf\x25\xb3\x6f\x2c\xa9\xaf\x9e\x91\x5b\x6c\x72\xb7\xb9\x7f\x81\xd2\x63\x20\x75\xdf\x65\xae\xd3\xcb\x44\xf9\x4a\xb9\x13\xf0\xcd\xaf\x6e\x6a\x7b\x1f\x82\xc7\x4f\xd4\x7c\x2e\x2b\x1f\x75\x73\x78\xad\x41\x98\x09\xba\x3b\xde\xa6\xd4\x00\x8f\xd8\x4c\x0e\x35\x66\x1c\xb9\x62\x7c\xa6\x48\x0e\x1e\xe4\xef\x4e\xc4\x99\xd1\x97\xc8\xe0\xdb\xfa\xf1\x8c\x31\x8d\xfa\x10\x00\xfe\xa8\xba\x58\xb2\x8f\x21\x2e\xe5\x96\x83\xee\x91\xfe\xbc\xf2\x3c\xdf\x82\x3f\xb5\xd7\x1e\x2f\xb1\x44\x6e\x2a\xf7\xae\x14\xb4\xef\x3b\x70\x0f\xf9\xa5\x64\xdc\x11\x01\xa6\xa9\x9c\x58\x4a\xbc\x76\x85\x81\x72\xc6\xa3\x58\x51\x2a\x9d\x7e\x6f\x5d\x53\xbb\x26\x16\x1a\x9b\x7e\xbb\x67\x42\xd4\xec\xbc\x69\x11\xb2\x01\xbf\x76\xc0\xbd\x51\x14\xb0\x4c\x2b\x80\x5b\xa7\xc0\x8d\x6e\x1d\xea\x55\xa9\x04\x69\xfe\x71\x17\xa8\x4c\x25\x2a\x58\xaf\x5c\xa9\x86\x66\x1a\x79\xdc\xda\x75\xc9\xe8\x41\x49\x34\x1a\xf7\x5b\x13\xa2\x29\x3a\xdf\xa4\x6e\x2e\xea\x0a\x10\x51\x96\xe8\xe1\xb7\xa0\x05\x23\x4d\x13\xbc\xa2\x4a\x1d\x1a\xc1\xaa\xc2\xf5\xcb\xac\xf4\x37\x6c\x0b\x39\xbd\x47\xf9\xc0\x5a\x51\x34\xff\x79\x5d\x32\x02\x5e\xef\x25\xd1\x23\x06\xea\x9b\xae\xa9\xf0\x39\x52\xc6\x71\xdc\x11\x13\x72\x41\xe8\x98\x08\x70\x7d\x34\x48\x93\xea\xb2\x1b\x2c\xc8\xb9\x98\x07\xc3\x01\xa8\x4f\xdf\x81\x4e\x3f\x75\x10\x6b\xd4\xca\x05\xad\xdf\x80\x42\x9e\x12\x5d\x84\x13\x20\x2b\xbe\x88\xa1\xe0\x8f\x3b\x67\xb7\xf2\xa2\xe8\x44\xed\x2c\x39\xca\xa2\xcc\xef\xa9\xc9\x77\x49\x8c\xdb\x9b\x42\x77\x3f\x0f\x6d\xc2\x67\x6f\xea\x49\x94\xe1\x38\x65\x0f\x57\xf3\xab\xc7\x0c\x68\x09\xe0\x87\x9e\x7c\x72\xfa\xba\x35\x52\xad\x05\xde\x4e\x4d\x5b\x82\x5d\xc6\x2e\x55\xf4\x44\xce\x85\xe1\x72\xd9\x95\x93\x20\x42\x40\x8d\x0f\xd8\x2a\x4b\x08\xfe\x1d\xe6\x2c\xdb\x88\x9f\x43\x2b\x8a\x26\xa2\x3e\xc6\x8d\xf6\x8d\x90\xfb\x94\x2d\x1e\x35\x1c\x56\xe7\xde\xc2\xa5\x9e\x71\x4a\xca\x48\xb5\xa0\x2d\x68\x45\xed\x0a\x94\x34\x16\x89\x44\x45\x78\xce\xfa\x6a\x8c\xaf\x10\xb4\x18\x5b\x69\x5e\x89\x76\x12\x76\x74\x0b\xae\x25\x70\x5d\x5f\xa5\xe8\x9e\x06\xe6\x3c\x99\x87\xb7\x20\x1c\x30\x87\x56\x11\x12\xaa\xaf\xb0\x3a\xbc\x06\x4b\x80\x00\xa3\x8a\x6b\xfb\x6f\x22\x42\x93\xdd\x92\x96\x7d\x88\x5e\x7c\xe2\x99\xf5\x4c\x83\x07\x72\x59\xa9\xfb\x8e\x28\x56\xc9\xe6\x16\x4e\xb1\xdb\x2b\xeb\xc7\x56\xdd\xfc\x11\x7d\xb1\x1f\x85\xb1\x46\xea\xa5\xb7\x2c\x3a\xf4\xe7\x25\x3d\x28\x84\x4a\xfd\xcc\x20\x7b\x97\xaf\x74\xbd\x5e\x8d\x72\xb4\x6e\xc2\xbe\x1a\xfb\x97\x1c\x4a\xe2\x96\x8a\xde\xae\x36\xd4\xdb\xa6\xde\x8a\x08\x48\xdb\xb3\x41\x56\xb6\x9e\xb6\x92\xe6\x95\xe8\x3f\x09\xea\x56\x43\xe7\x81\xb4\xe2\x9a\x15\xce\x2f\x65\x7b\x10\xaf\xae\x05\xc2\x03\xee\x2e\x69\xba\xf6\xaf\x17\xca\xa1\x6e\x9e\x88\x8d\x60\x9f\x2e\xc0\x66\xb3\xc2\xfd\x4d\xc9\x1b\x1b\x96\x6a\xc9\x80\xfe\x11\x15\x5e\x5c\x55\x25\x2e\x2d\x7b\xd3\x0a\xa4\x3e\x81\x85\x99\x69\xfb\x47\x04\xc6\x16\xb4\x21\xfe\x40\x0c\x1e\x7e\x31\xcc\xac\x4b\xc3\x1a\xba\x12\xc6\x52\x30\xa5\x39\x24\xe7\x09\x91\xe8\x6a\x62\xdf\x72\x97\x6c\xb6\x30\x50\x71\xb2\x86\x4a\xb5\xa8\xf7\x3f\xbf\x89\x57\x9d\x48\xc4\xe1\xc9\x7c\x77\x8d\xb6\xa8\x0e\x7f\x5e\x24\xa8\x57\x46\x35\x9d\x64\xf4\x13\xfc\x63\x8d\x51\x6f\xaa\x7b\x7d\x01\x8b\x92\xbe\xce\x6e\xef\xe4\xb0\xab\x2f\x47\x61\x9d\x99\x00\x64\x77\xd2\xd5\x29\xab\xa1\x1c\xf4\xbb\x0b\x51\x91\xa5\xb7\x10\xbc\xa3\x13\x8f\x35\x41\x9b\x29\x01\xc2\xf3\xd8\x35\x15\x04\xa7\xb4\xff\x65\xc0\xea\xe6\x20\x13\x54\xc9\x76\xcf\xa6\x09\x2b\x0a\x39\xd9\x8c\xc8\x3e\xb9\xff\x6e\x4e\x6b\x56\xd9\x2d\xe7\xe8\xf4\x9b\x60\xb9\xc4\x06\xf8\xc5\x3d\x20\xab\x4b\x28\xf6\x9d\x59\xb2\x05\xc3\x78\xff\x9c\xc4\xd9\x3d\x6c\xc8\xf9\x3b\x1f\x60\xf4\x7a\xf3\xf7\x27\x18\xa6\xaa\x5a\x17\xa5\xd7\xd1\xb0\x3d\x43\xca\x91\x4e\x2e\x32\x53\x53\x20\x51\x9d\xb2\xc6\x44\xbf\xcd\x89\x65\xee\x7c\x91\xb8\x70\x64\x5f\x4a\x96\xad\xef\x64\xd4\x15\x24\x52\x24\x3f\x8b\x45\x88\x43\x79\xb4\xc9\xb3\xfa\xf6\x54\x63\xc2\xf9\x06\x8e\xd5\x7c\xb2\x97\x73\xa8\xc1\x68\xff\xfa\x5b\x25\x72\xb5\xd2\x76\x3d\xe0\x72\xeb\x6f\xac\xd2\xba\x12\x37\x92\x77\x9c\xb5\xfd\x7c\xbe\x19\xc0\x2a\x3f\xce\x2f\xfe\x4e\x61\xdf\x0f\x77\xc6\x60\x3c\x61\xca\x59\xcb\xf6\xe2\x19\xc5\x43\x73\x40\xcf\x49\x93\x8f\x6e\x42\x58\xd3\x0a\xd8\x9c\x22\xbe\xfa\x0e\xec\xab\x35\x04\x4a\xf6\xde\xf9\x42\x71\x3b\x65\xfb\xc0\x2f\x34\x0d\xde\xdf\x8f\x76\x89\xc5\xf9\x26\x4a\x70\x63\xae\xbc\x65\xcf\x87\xb6\x44\xc0\x5d\x9e\x6b\xa2\x28\x32\x43\xe3\x6a\xfc\x94\xdd\xf2\xa5\x39\x54\x65\xbf\x33\xaa\x9b\x01\xf9\x9a\x0e\xca\x21\xed\x33\x4c\x26\x5a\xea\x90\xeb\x08\x53\x1f\x78\x53\x39\xaa\x38\xc6\xde\xbf\xbd\x6b\x73\x1b\x1b\x3b\x42\x2e\x3f\x84\x90\x96\x6f\xf3\x24\xd7\x4d\x1a\x50\x47\xf8\xaa\x15\x44\x3a\x0c\x7c\x0b\x91\x43\xf0\x93\x5e\xa2\x52\xec\x36\xc3\x96\x9d\x20\x36\x82\xe7\xb9\x00\xa0\x06\x92\x25\xa5\xf0\xae\xe8\xa1\x01\xfe\x26\xe1\x3c\x94\x8b\x1e\xcc\xdb\x37\x0e\xf0\x16\x09\x47\x9d\x15\xbf\xe6\x21\xec\x36\x3d\xd3\x34\x86\xb4\xc5\x78\x26\x50\x9b\xb8\xe4\xdc\x15\xc9\x86\xa4\xd4\xc9\xc1\xa8\xfb\xc2\x70\xd0\x60\xa5\x79\x6d\x22\xa4\xbf\xa3\x32\x8d\xe4\xe0\xc5\x8d\x30\x97\xe7\x1d\x7c\x6c\xe2\x25\xa5\xa2\xc1\x0f\x28\x79\x44\xd8\xf7\xeb\x61\x2e\x05\x9e\x48\x99\xa0\x70\x7a\x97\xf4\xb0\xd2\xa9\xe5\x20\xc9\xe1\x95\x56\x38\x4e\xff\x35\xa5\x4d\xb4\xa3\xc7\xe8\x78\x83\xb4\xd8\x36\x26\x81\xf4\xcc\xa0\xf4\xc2\x5a\xf5\x58\x9d\x7c\x51\x92\x06\xbf\x3f\x6a\x84\xac\x0b\x91\x6a\x9b\x0f\x2b\x27\x5f\x11\x2c\xd6\x7f\xa5\xae\x81\xad\x1e\xed\xfb\x05\xcf\x10\xaa\x5d\x19\x32\x07\x77\x3f\x4e\xea\x12\xb1\xba\xe0\x34\x5e\x34\x09\x03\xe5\x4c\x1f\x38\x6f\xe2\x44\x35\x94\x81\x50\x89\x02\x13\xc4\xdf\xc4\x13\x0d\x1c\x0a\xf9\x6c\x3b\x35\x5c\xce\xce\x2e\x4f\x1c\xa6\x69\x3b\xd8\xf3\x9e\x78\x1f\x3a\x81\x30\xed\x7a\x9e\x9f\x32\x49\x2d\x6f\x99\xaf\xec\x98\x09\xe6\x59\x16\x64\xc1\x56\x2e\xc1\x06\x59\x3a\x1d\xd9\x71\x30\xa6\x85\x2f\x55\xd6\x87\xdb\x5b\x8a\x43\xb1\x28\x1b\x3e\xe7\xd9\x73\xef\x0a\xe0\x98\x8a\xbc\x2a\x8e\xa5\x08\x48\x29\x0c\xf5\x6f\x87\x4b\x22\x66\x17\xfb\x51\x19\x78\xd9\x16\x12\x9f\x24\x06\x8f\x33\x17\xb7\x61\x62\xa7\xce\x22\x5d\xc6\x7a\x7d\x50\x94\x08\x99\x01\x27\x83\xfa\x37\x8e\xe9\x59\x97\x44\x47\x57\x1a\x4d\x59\x92\xcc\xad\x59\x35\xfe\xc1\x54\x20\x09\xfc\x26\xba\x6f\x7d\x25\x3a\xb7\x06\x54\x4c\x16\x23\x08\xde\x10\x44\x0f\xcf\x09\xba\x2b\x55\x97\x61\xc6\xa9\x25\xb4\x85\x44\x6e\xd8\x8e\xe2\x68\xe6\xb5\xba\x61\xbd\x6e\x06\x02\xab\xa7\x42\x20\x91\xea\xe9\xb0\x7d\x0c\x31\x23\x47\x67\xcf\xc0\x49\x73\x35\xae\xa3\x27\x9e\xed\xb8\xc1\x92\x37\xa8\x88\x78\x18\xbb\x6d\xb2\xa2\x88\xad\x5a\x2f\xff\x5f\xa8\x45\x92\x30\x5e\xc0\xa6\x3d\xe6\x35\x7b\xd1\x07\xf9\xea\x5a\xd8\xbd\x65\x0f\x17\x19\xe1\xd1\x69\x78\x84\xd5\xf7\x99\xd2\xe9\xda\xe3\xc1\xf9\xf9\x18\x2e\xff\x4c\xd3\x4d\x51\xeb\xc3\x84\xb4\xae\xb3\x5a\x7b\xed\x8c\xbc\xae\x6c\x33\xac\xf6\x61\xd2\xe6\x3d\xed\x39\x5c\xe1\x48\x1f\xc9\xcf\xf0\x58\x12\x19\xe9\xb7\x66\x7a\x9c\xc6\xdc\x75\x93\x37\xee\x32\x35\x2f\x52\x08\x6a\x6a\xa4\x9b\xf4\xe0\xbd\x9d\xff\xef\x6f\xbc\x0c\x14\x08\x7e\xe1\x66\xb7\xef\x34\x7d\xe2\x0c\x1f\x27\xc5\x7e\x01\x22\xf3\x08\xa2\xb6\x98\xf5\xed\x17\x4c\x32\x2a\x30\xfd\x8f\x14\xa0\xfb\xde\xab\x47\xe2\x5c\x5b\x47\xe0\x87\x3e\x39\xea\xbb\x1b\xb9\x54\x45\x92\xd3\xf1\x6b\xa1\xce\x7d\x29\xb1\x88\x92\xf1\xf6\x1f\x3a\x24\x98\x77\xbd\xda\xf3\xe9\x37\x68\xf6\x07\x37\x7b\x76\x91\x21\x9d\xf4\x06\xd6\xbc\x29\x1b\x8f\x34\xe4\x98\xe7\x56\xaf\x08\xe9\x58\x7f\x1f\x06\x86\x78\x58\xea\xfa\xe6\xf0\xd8\x27\xe6\xda\x7b\x71\x6c\xc8\x1c\x37\x34\x49\x8f\x14\x83\xba\x6a\xde\x28\xfd\x60\x89\xd2\xcd\x91\xc9\xa5\x58\x47\xc8\x8b\xad\x19\xe9\x6d\x76\x72\xa7\x65\xbd\x31\x1c\xd6\xae\x79\xe8\x7b\x70\x8a\xa4\x4d\xbd\x03\xd0\xd1\xa3\x07\x77\xe9\x9c\x3d\x66\x92\x55\x4d\x36\xf9\xde\x59\x77\x5a\x0a\x7c\x03\x4d\x2e\xca\x00\x96\xc4\x44\x19\xc0\x25\x64\xef\xa2\xd1\xdb\x06\xf0\x38\x0f\xef\xf0\xb9\x35\xf9\x42\xc7\x6c\x8f\xc6\x3d\x45\x4b\xef\x3d\xfe\x7a\xb2\xd3\x2c\xb1\xc1\x9c\xa7\xa8\x70\x6b\x51\x7c\xf7\xb8\x72\x48\xa1\x93\x93\x9d\x06\x1b\x1f\xbb\xaa\x7c\x9a\xaf\x27\x19\x55\xf1\x2b\xc3\x4d\x78\x78\x70\xaa\x2c\x0c\xa7\x1e\x68\xaf\x3c\xc8\xce\xb8\x9b\xa2\x0d\x1d\xd4\x80\x96\x8a\x19\x68\x5a\xe4\xa6\x2e\xb6\xc4\x5d\xf9\x52\xba\x60\xb9\xc6\x2f\x14\x33\xcf\xb9\x55\x68\xc9\xfa\x5d\x8b\x67\x39\x9c\x9d\x99\xcc\x52\x61\x83\x4e\x6d\xe9\x80\x25\x20\xac\x7d\xde\xf7\x97\x8d\xb6\x4a\x83\x10\x89\xfe\x95\xad\x2d\x67\xf0\xf4\x1b\xd6\x63\x0a\x1b\x78\x9c\xc5\xe2\x4e\xd4\x1f\xde\xbb\xb1\xbb\x9e\x4d\x60\x85\x9b\x69\xc3\xba\xea\x42\x64\x56\xac\xad\x64\xb8\x0d\x34\xdd\x3a\x11\x0d\x10\x07\x93\xb2\x1e\x18\xe2\x13\x74\x00\x06\xf0\xb7\xa6\x94\xa7\xb4\xa6\x94\x03\x8b\x5b\xa9\x16\x98\xf0\x53\x22\x21\xc7\x64\x4e\xbf\xc1\xf4\xae\x60\x66\x36\x03\xb8\xa0\xe7\xfb\x52\x9b\xb1\x54\xdb\xb2\xb9\xf6\xfd\x25\x05\x5d\x7b\xe3\xf8\xce\x53\x10\x14\xf2\x49\xbf\xc4\x00\x51\x3d\x58\x35\x35\xd5\x65\x1b\xf8\x06\x21\x22\xf6\xbb\xba\xdd\x72\xdb\x40\x62\xbf\xe0\x68\xfc\xc8\x9e\x8a\xc4\xfe\x8e\x55\x1b\xcc\x4c\xac\x3d\x1e\x92\x8b\xe1\xbb\xf3\xc6\x9e\xd7\xeb\x8d\x7e\x83\xc4\x0a\x4c\x93\xf5\xe3\x3a\x54\x70\x46\x24\x05\x4a\xc2\x55\xf9\x51\x65\x6a\x6b\x6e\x78\x35\xc6\x90\x6e\x3d\x2b\xa5\x37\x39\x64\x37\x0a\x8c\x6a\x56\xe8\x15\xf0\x36\xa1\x91\xce\x68\xfc\x6d\x17\x8e\x3a\xbf\x90\x4c\x64\x7d\x73\xcb\x34\x1a\x0b\x11\xd1\xa8\xb6\xc2\xee\x4b\xc7\xb0\xf2\x08\x6d\xbf\x79\xa6\x8d\x42\x51\x48\x50\xff\x2e\x32\xa8\xfc\xc0\x5f\x08\x20\xd9\x22\x3e\xc3\x0b\xc8\x9f\x35\xe0\xc4\xc9\xca\xce\x9e\x69\x25\x0e\x8d\x5b\x7d\xa2\x13\x46\x25\xd0\xff\x0c\xce\x8d\xf4\x2c\x65\xe4\x82\xcf\x2d\xe7\xef\xb6\x0c\xee\xe0\x2c\x85\x56\xe6\x0b\x7c\xc0\x68\x8c\x6d\xf2\x0b\xd0\x92\x0f\x60\xfb\x44\x42\x74\x20\x88\x80\x03\x39\x7c\x3c\x5a\x26\xdf\x63\x24\xc0\x27\x62\x5f\x6a\x92\xf5\xa2\x12\x61\x5f\x4e\x08\x45\x1c\x5c\x6c\x8d\x14\x6c\x7c\x9d\xb1\x65\xc6\x5d\x01\x66\x75\x01\xfa\x89\x33\xfa\x16\x21\x56\xf2\xb3\x5a\x02\xd2\xd5\x4e\x96\x63\x4d\x23\x58\xbe\xf1\xf9\xa7\x19\x19\x4a\xdb\x06\x28\x62\x8f\x49\xf1\x38\xc5\xbe\x47\xac\x06\x26\x7c\x32\x73\xed\xc9\xf4\x91\xc2\x43\x09\x06\x03\x76\x89\x0b\xdf\xe6\xc5\x12\x05\x52\x9b\xd3\xaa\x19\x58\x36\xed\xd0\x6d\x93\x5a\x03\xbf\xa9\x68\xf3\xd6\x08\x97\x3e\xd6\x6e\x1d\xb4\x08\xc0\x62\x64\x36\x1d\xe0\x6d\x84\xbe\x06\x99\x5c\xcd\xaf\x55\xc6\xfe\xa7\x80\x1e\x3d\x9f\x6a\x03\x55\x5a\xc5\x1d\x5e\x9c\xc3\xc1\x0d\x8e\x29\x70\xf8\x20\x9b\xde\xef\xb7\x98\xb4\x4c\x8e\x3a\x0e\x13\xd8\x09\x41\x4d\x8d\xd7\x9b\xf9\x22\x34\x85\x45\xa6\x1d\xc1\xa4\x5b\x37\xda\x01\xab\x5b\xf9\x6d\xd6\x4b\x3c\x9b\x35\xdf\x2f\x47\x08\xcd\xdc\xce\xca\x31\xe7\xdc\x5c\xed\x07\x66\x3e\x05\x82\xd2\x29\x70\x42\x09\xcf\x1d\x78\x52\x67\xda\x2b\xb4\xdd\xb2\x51\xb2\x29\xdd\x4f\x78\x4a\x5f\xbd\xbf\x24\xe5\xd5\xc8\xda\x58\xfe\x7c\xda\x99\xd3\x8f\xfc\x56\xc5\x9a\x89\x81\x8f\x74\xc2\x9a\x17\x7f\x3e\x75\x61\xeb\x1f\xe3\x47\x3c\x64\x52\xe2\x76\x36\x29\xbd\x2f\x34\x80\x9a\x42\x51\x4f\x61\x8f\xc2\xb4\x16\xcf\x24\x0f\xc6\x3f\xdb\xa2\xd9\xa6\x33\xb1\x2a\x65\x38\x45\x0a\xf9\xac\xd2\xce\xe5\x23\xa1\xa1\x8a\x8e\x53\x82\xb6\x42\x83\xd7\x8e\xca\xa9\x0d\x6c\x46\xb7\x09\x0e\xfe\xa9\x55\xe2\x92\x9b\x71\x98\x2f\x35\x30\x6d\x38\xdd\xd3\x64\x8e\xe7\x45\x79\x4b\x7d\xf7\x32\x97\xe9\x58\xe5\x2c\x70\x7f\xb8\x95\xb7\x50\xa7\x58\xf8\x5a\x49\x72\xc8\xac\x68\x76\xa6\xb4\x10\x3f\xec\x88\xe1\xad\x99\xb1\x7f\xf7\xb8\x11\x2c\xcc\x33\x9e\xc6\xc2\xcd\x38\x15\x16\x12\xef\x02\x3f\x2e\x07\x9d\x1c\x7e\x86\xad\x7b\xc5\x19\x2a\x9a\x6d\x18\x25\x8e\x10\x48\xa2\x31\x7b\x41\x64\xaf\x54\x80\x66\x2d\x4d\x73\xb0\x21\xae\xf4\x4b\x63\xf4\x74\x09\xd3\x45\xa8\x88\x5c\xf2\x72\x4a\x14\xdd\x96\x1d\xdb\xff\xf9\x85\x2e\xbe\x83\xa9\xce\xa8\x7a\x73\x24\xfb\x6f\xdc\xe1\x59\x78\x2a\x6e\x43\xbf\x59\xff\x9c\xcb\xdb\xb2\xe0\xb6\x86\x7f\x79\x51\x5e\x34\xb2\x63\x64\xe1\xec\xd8\x53\xa8\xe7\xac\xcc\x38\x62\x65\x85\x38\xad\x86\x34\x87\xa8\xa8\x6a\x55\x08\x1c\xda\x1a\x3d\x55\x15\x80\x2b\xf1\x94\xa3\x46\x6f\x0b\xc2\x0c\x13\x4c\x84\x78\x72\xd9\x4a\x1d\x2b\xa9\x50\xe8\xfe\xd0\x0d\xe3\xbc\x56\x92\xde\x68\x69\xb0\x54\x8e\xd6\x32\x06\x8e\x9c\x52\xf4\xff\x2f\x5b\x0d\xd0\x87\x57\xae\x54\x92\xd7\x8c\x03\x11\x69\x7f\x58\x1a\xc7\x0d\xad\xf5\x09\x91\xfd\x65\x1f\xe5\x8b\x3e\x6b\xb8\xf0\x79\x52\x03\x12\x5b\x7b\xf9\x0a\xa7\x72\x55\xbb\x3a\x75\x60\x16\xf9\x75\x08\x41\xa3\x9e\xaa\x12\xed\x02\xb1\x6b\xcb\xc0\x7f\x08\x89\x2a\x27\xf2\x52\x3d\xe9\xeb\x47\xed\xd5\xb5\x2e\xea\xf7\x2e\xd3\x4c\x47\xc8\xad\x9d\x7c\x76\x2f\x2c\x27\x3b\x20\xec\x9c\xf5\x92\xbf\x0b\xd6\xcc\x9f\x80\xb9\x31\xb9\xe3\x20\xba\x09\xfc\x3d\x6b\x3d\x19\x65\x9c\x0c\x3e\xbe\x2e\x70\x76\xa4\x3b\xce\x2b\x0a\x7e\x47\x30\xf0\xdb\x22\xec\x10\x77\xf2\x23\x19\x98\xbe\xa8\x58\x8c\x19\x80\x9a\x60\xd3\x69\x7e\x71\x2a\xc8\xaf\x83\x3b\x90\xb3\x58\x15\xcb\x3f\x42\x60\xfd\xe4\xed\xc4\xac\x7a\xc8\x04\xb6\xb2\x48\x7d\x92\x49\x08\x8c\x28\x17\x4f\x5e\x8f\x1d\xde\x7a\x04\x62\x10\x63\x89\xe5\xc7\xfe\xa5\x05\xec\xfa\x8e\x52\x91\xdc\x2c\xee\xc2\x55\xff\x52\xca\x2c\x02\x6e\xee\xe6\x76\xcb\x60\x1d\xfc\x47\x85\x71\x8c\xa5\x7a\x34\x1d\x1d\x52\xb9\x25\x83\xbd\x43\x1e\x1e\xcd\xa9\xbc\x1f\x94\x46\xdc\x3a\x2a\x46\x59\x1a\x74\xc9\xdf\x8a\x92\x2e\x46\xb3\x9b\xe7\x5c\xd0\x7a\x34\xa1\x36\xdb\x10\xeb\x88\x4b\x2f\x66\xdd\x44\xe5\x23\x70\x01\x80\xe2\x37\x12\x1d\x74\x46\xce\x84\x8d\x07\xcb\x7a\x27\x11\x8b\x4f\xdf\xe5\xab\x4e\x5a\x3d\x54\xee\x4f\x3e\x7d\xcf\xc3\xb2\x4b\x65\xd0\xc8\x35\x26\xfc\xb2\xd5\x8e\x10\x2d\x5f\x1a\xa4\xae\x81\xed\x77\xea\xd9\xdf\x9f\xac\x47\xc2\x59\x1e\xcb\xab\xfa\x8f\xbf\xb6\x5a\x2f\x5f\x07\x16\x22\x93\x99\x1a\x7f\x6f\xe2\xa0\xfe\x99\xe5\x61\xbe\xee\x9b\x0b\xcd\xd2\xef\x5b\xc7\x59\x38\xe4\x99\x33\x74\x83\x24\x0a\x15\x17\x1c\xe5\xae\x00\xc3\x21\xea\x31\xe7\x1f\xbe\xf5\x7d\xe2\x43\x8c\xf2\x65\x3c\x0f\x01\x4c\xee\x18\x05\x41\x59\x35\x7f\x15\x71\xd0\x2a\x2e\xd0\xc8\xfc\xbd\x6d\x0d\x4b\xeb\x28\x8d\xe0\xf8\x4a\x0c\x6e\xee\x00\x5f\x74\x51\x91\x31\xb6\x6a\x6c\x4b\x43\x5a\xd8\x71\x41\x9a\xf9\xfa\xa5\x86\xb3\x0a\xc2\x99\xa4\x3d\xd7\x89\x22\x27\x25\xaa\x60\x81\x25\x45\x45\x9b\xdd\xfa\x71\x91\x08\x10\x27\x67\x18\x09\x93\x69\xe7\xb1\x8f\x52\x81\x71\xb6\xbd\x8c\xff\x6d\xb8\x0f\xff\x24\xe9\x7f\x12\x71\x28\xfb\xd0\x95\x18\x93\x68\x2a\x20\x95\x71\x23\x7a\x31\x47\x6a\xca\x99\x42\xc2\xdc\x2f\x91\x35\xb1\x14\xcd\x61\xe1\x3b\xf0\xe7\x90\x14\x9f\xca\xd3\x3c\xf6\x79\xb2\xcd\x9f\x8e\x8a\x7d\x73\xf8\xfc\xc4\x40\x03\xd8\x0a\xe6\xbd\x9e\xb9\xb6\xdf\xe9\xb3\xdb\xb0\xf1\xe7\x93\xe7\xeb\x25\x41\xbb\xf9\x11\x6f\x1d\x2a\xa6\xfb\x75\xa1\x23\x46\xc4\xb9\x5c\xc4\xd5\xd5\xc6\xf0\xcb\x74\xc3\x7c\xb6\xda\x64\x08\x41\x1f\xc4\x0e\x78\x04\x7d\x35\xea\x82\x29\xea\x89\xdb\x20\x79\xdb\xaf\x33\x00\x70\x4f\xf3\x56\x6d\x3a\x0a\xa1\x7b\xfd\x29\x49\x85\x4c\x0c\x37\x44\xf0\xec\x71\x42\x4c\x41\x0b\xef\xc1\x88\xc4\x31\x78\xe5\xbb\x28\xce\xd8\xbb\xb4\x5e\xaf\x20\xd9\xf6\x44\x33\x46\xfc\x5b\xd8\x93\x78\x07\x93\xbe\xb0\x25\xd1\x0b\x9b\x98\x2f\xda\x92\xdc\x33\x4b\x0e\xdd\xd2\x71\xbf\x22\xce\x2d\xd8\x00\x0f\xc3\xde\x88\xb9\xcf\x0e\x25\xa3\x71\x48\xef\x12\xc0\x0f\x88\x0c\x4e\xc3\x9b\x3a\x4f\x98\x68\xeb\xbe\xfe\x40\x00\xd5\xdf\xc9\x36\x9c\xf3\x76\x2f\x1c\x05\xd8\x3f\x26\xde\xa5\x9a\x4b\xaf\x80\xf5\xfc\xdf\x29\xd8\xe4\xbc\x43\x42\x02\xe8\x28\xaa\xec\x2b\x66\x16\x05\xd2\x03\xd7\x04\xe6\xc6\x5e\x9d\x7d\x0d\x47\x18\x67\x1a\x21\x2f\x81\x15\x82\x98\x1d\x0f\xe6\x8b\xb7\xa3\xa9\xb8\x7b\x53\x46\x0b\xf9\x05\x10\xe6\xa0\x35\x4d\xf1\xd0\xc1\x60\x9e\x3d\x98\xf7\xe5\xcb\x93\x92\x07\x88\xa2\x1a\x36\x49\x94\x0c\x25\xc8\x05\x4a\xae\x0a\xba\x33\xeb\x10\xb5\x3c\x40\xef\xb0\x8e\x89\xa6\x59\x47\xdf\x44\x61\x6c\x50\x8b\x5f\xc5\x6e\xf4\xab\xde\x8d\xdf\xfe\x82\xc5\xb6\x84\x28\x9d\x3d\x87\x6b\xfb\x81\x18\xdb\x8c\xa3\x8d\x2f\x77\x51\x99\xbc\xbd\xea\x04\x15\xe7\x74\x8d\xb0\x90\x37\x6b\x55\x28\x80\xaf\x89\x9d\x74\x7e\x95\x18\x80\xcd\x4b\xa7\xec\x3f\x12\xb9\xb5\x54\x90\x8e\xbd\x18\x49\xfc\x72\xb7\xab\x8a\x03\x15\xb9\x11\x7b\xed\x4c\xf2\x2d\xac\x5f\x9e\xc5\xf9\xe1\xeb\xd4\xab\x68\xf1\xb9\x20\x2f\xf9\xbc\xed\xf1\xc4\x59\x69\xfe\xfe\x2a\xf0\x7d\xc4\xed\x05\xc5\x5e\xf3\xeb\x1c\x40\x7b\x0b\x86\x56\xf0\xb4\xe1\xd9\x9b\x07\x2c\x16\x63\x23\xce\x00\x4d\x06\xbe\x98\xc0\xb9\x98\xd3\x32\xdb\x98\xc3\x20\x66\x50\xbd\x66\x6d\x0e\x40\x36\x63\x6b\x57\xbd\x1d\x3e\x79\xcc\xce\x2d\x2c\x9f\x2f\x40\x68\x42\x22\x49\xfb\x03\x35\x54\x61\xdd\xd7\xae\x09\xba\x76\xf4\x92\x03\xa4\xa1\x6e\x41\x42\x88\xfd\x40\x71\xa0\xc2\x1e\x9d\x15\xb2\xe2\x9d\xe6\x3e\x54\x9d\x7f\x4f\xb8\x92\xf3\x52\x56\x12\xce\x37\x2d\x9d\x18\x33\x5d\x2e\xea\x4d\x7c\xc3\x76\x2c\x2a\x36\x30\x58\xae\x59\x7f\xe9\x0b\x32\x94\x58\x46\x2d\x77\xee\x1f\xe1\x05\xef\xec\x62\x67\xcf\x19\x11\xaf\x86\xf5\xf7\xed\xce\x73\x4f\x68\x4e\x4c\xdd\xa0\x4a\xde\x16\x36\x3e\xe4\x27\xdd\x7a\x8c\x43\xdf\x68\x3f\xf1\xaf\x7c\x2d\x5f\x7e\x6b\xfc\xa6\x8c\x7c\xf6\x9a\x46\xd8\xde\xb9\x1a\x09\xa0\xd3\x56\x14\xf3\x6a\x7a\x47\x2e\xac\x5a\x12\xeb\x7c\xf9\x96\xc9\x72\xb4\xbc\x10\x02\x71\xa8\xab\x3d\x30\xbb\x42\x63\xa6\x5d\x27\x62\x10\xb0\xe4\xdc\x58\xe7\x2f\x42\x54\xd6\xe1\xf9\xe0\x29\x3f\xbd\x61\x9c\xd8\x52\x36\x11\x73\x55\x61\x07\x3c\x2d\x4f\x33\x77\x57\x31\xd1\x50\xfb\xf8\x59\x05\xa8\x8b\x92\x11\x82\xf7\x8d\x22\x85\xc3\x32\xd8\xb8\x1d\x94\xf2\xe2\x52\xcc\xd9\x94\xe0\x0c\xd3\xa9\x13\x62\x24\x2d\x18\x2d\x6f\x81\x03\xb8\x20\xdc\xf8\xc4\x99\x9c\x60\x8b\x1b\xe9\x9b\x4f\xd8\xbe\x97\x37\x02\xad\xc8\x1b\xc9\x06\x47\x77\x70\xeb\x11\x8b\x22\x96\x14\x15\x09\xab\x7f\x1b\x8d\xa6\x81\xb9\xa0\x3e\x1c\x06\xf6\xdf\xb9\x3d\xab\xbe\x50\xd9\x99\x09\xcc\x4d\xeb\x12\x28\x3a\x52\x3d\xeb\x63\x68\x2e\x77\xa0\x86\xef\xb6\x40\xfb\x44\xc4\xc1\x3d\x14\xea\x4c\x4d\x6b\x7b\x2b\x33\xb7\xeb\x74\x94\xb5\x05\xfb\x8b\xcc\x27\xff\x93\x1a\x83\xdd\x8a\x52\xe6\xea\xca\x2b\xd0\x65\xf0\xb8\x06\x29\xd4\x4f\x6a\xfc\x3c\xc2\x4e\x50\x3a\x12\x32\xea\xe0\xe4\xf6\xad\x51\xa5\xcd\xcb\x0b\x29\x57\x98\x76\xc0\xfb\x11\xb1\x8c\x3b\x01\xbc\x02\x48\x80\x35\x7c\x32\x62\x13\xbc\x7e\xe3\xf5\xb3\x85\xb5\x31\xbd\xee\x06\x1b\x51\xac\x64\x86\x31\x39\x14\x1a\xbb\xb6\xf7\xa1\xe0\x07\xc7\x52\xfd\xcb\xc5\xf6\xb5\x05\x9e\xaf\x65\xb4\xff\x8d\x3e\x14\x60\xce\xd1\x36\x02\x2e\xb0\x00\xff\xfa\xd1\x47\x94\x71\xfa\x6f\x30\xdb\x9d\x28\x05\x10\xb0\x92\xe2\xc3\x88\xeb\xd6\xe3\xbc\xfc\xa6\x83\x02\x68\x25\xc9\xb1\x53\xb0\x8b\x44\x1c\x95\x1a\x9d\x1b\x9f\x51\x30\x13\xd3\x17\xc6\x86\xee\xbd\x81\x3b\xb8\xe0\xef\xe7\xe9\x91\xea\x6c\xb6\xa5\x60\x0d\x95\x50\x8b\x0d\x71\x07\x75\xf1\xb6\xc6\x72\x29\x98\xaa\x77\x07\x14\x86\x02\xd2\xe1\x74\x2f\xee\xa4\x0c\x50\x8c\xfe\x5c\xf1\x8f\x0d\x7a\x50\xee\x80\xe6\x2c\x90\x24\x11\x18\x52\xad\xe4\xfe\xce\xf4\xc2\xdb\xad\xed\x0a\x86\xa9\x9c\x13\x17\x96\x06\xb1\xa7\x79\x2e\xbf\xc3\x31\x3e\x72\x53\xf3\xad\xa1\x7f\x44\x4f\x9a\xc7\xea\x37\x82\x25\x19\x8b\xae\xb2\x9b\x5d\xaf\xf7\xe5\x83\xb5\x86\xc6\x6f\xb0\x5e\xe7\x95\x95\xf7\x9f\x9b\x3d\xb4\x71\x32\x3c\x94\x3f\xfc\x8e\x42\xc5\x9c\x1a\x41\x38\xed\x74\xe1\x9d\xa9\x07\x2f\xd7\x43\x66\x58\xde\x3c\x69\x41\xdc\x3b\xd9\xae\xb4\x3b\x51\x2e\x88\xed\xad\x0a\x9d\x82\x56\x02\xcd\xcb\xc1\x97\xda\x88\xc0\xb8\xc0\x2a\xf0\xd0\x50\x33\x62\xc1\x47\x80\xb5\x29\xf1\xcb\xee\xe7\xe6\xce\xcb\xbc\x00\x20\xa6\x38\x76\x46\x99\x2d\x74\x67\xe0\x96\xb2\x26\xf2\x80\xe9\xac\x5e\x48\x09\x0a\x9b\x91\xb9\x7b\x66\xda\x0a\x1a\x40\x5e\xaf\x37\x40\xf0\x07\x78\xed\xdc\xd7\x60\x04\x29\x63\x81\xa5\xd2\x56\x96\xfb\x68\x23\x67\x57\x8d\x6f\x0e\x67\x65\x8f\x67\x07\x71\xb0\xd8\xbb\x1d\x12\x4d\xee\xd4\xf9\xbb\x96\xfe\xaf\xf8\xb3\x63\x6c\x1b\xdc\x99\x31\xc9\x1f\x79\x84\xb8\x27\x69\x77\x3b\x19\x43\xfa\xe9\x5f\x9b\x30\x0e\xa1\xda\x87\x2d\xcf\x83\x7a\x26\x53\x63\xa9\x95\xb5\x2f\x15\x28\xfb\x27\x46\xdd\x72\xe7\x25\xb6\x11\xde\x9b\xc0\x64\xa7\xa5\x2b\xda\xbd\x2a\xbd\x12\xb2\x04\x3b\x7c\x44\xbe\x10\x74\xfc\x89\xd9\xa4\x86\xd2\x91\xec\x15\x6e\xfe\xd7\xea\xf0\x67\x4d\xac\xea\xe6\xf3\x5d\x7d\xe2\xcf\x93\x1f\x29\x74\xfb\xf4\x08\x55\xf6\x23\x5e\x2b\xe6\xcd\xfd\xfb\x11\xab\x8b\x0a\xd8\x72\x7f\xe3\x97\x5c\x68\x75\x07\x11\xce\x11\x8b\x0f\x7c\xa2\x5b\x6c\x0f\xf7\x8b\xea\x8f\xaf\xfe\xfd\x17\xe8\xa1\x2e\x4f\x2a\x81\x3f\xe7\x70\x6e\xb7\x91\xb2\x83\x8c\x5b\x7d\xbc\x45\xd5\x8c\x3e\x1a\x95\x1e\x85\x75\xc4\x6f\xc6\xe3\x36\x8d\xb8\x62\xf2\xa3\x22\x19\x67\x23\xbb\xfd\xe2\x68\xe8\x74\x53\x25\x58\x80\xda\x66\x8b\x4b\xb1\x88\x4e\x30\xd4\x27\xbf\x83\x18\xa5\x3a\x8b\x4d\x64\xfb\x11\x77\x4c\xda\x71\xa7\x4a\xca\xd6\xb5\x5d\xef\xe8\x78\xfa\x64\xd0\x79\xcc\x2d\x2c\x71\x90\xc0\xd9\x62\x57\xc5\x3c\xbb\x66\x33\x9c\xf1\x0d\x3c\x07\xd7\x25\x77\x2c\x84\xa3\x0e\x8f\x0e\xab\x83\xe2\x0d\x8a\x71\xe3\x65\x27\x11\xa1\x6f\xd1\xc3\x2a\xf2\x5a\xda\xe5\x2f\x15\xe6\x88\xda\x12\x0f\x13\x97\x82\x68\x81\xd2\x51\x83\xd6\x58\xf6\x31\x7c\xb4\xfd\x7a\x19\xa2\x7e\x23\x62\xfb\x2a\x2f\x9d\x86\xd7\x90\x19\x67\xae\x09\x9e\xd2\x17\xe5\x46\x05\x61\x26\xf9\x4c\x1c\x2a\x36\xfa\xba\xd1\xd4\x21\xbe\xb2\xd5\x71\xe0\x00\x06\xc8\x69\x96\x3a\x02\x71\x5c\x8c\x36\x9a\x10\xd3\x47\xed\xc9\x73\x2c\xa3\x09\x35\xbf\x8e\x91\x9d\x76\x84\x04\xc0\x2b\x92\xdf\x68\x9a\x24\x51\xce\xef\xb0\x0e\x3c\xb8\xf2\xe3\x11\x96\x04\x86\x45\xd7\x35\x4d\x7d\xd9\xe7\x3c\x91\x93\x83\x83\x8f\xb7\xf1\x3a\x24\x78\x63\xed\x45\x62\xaa\x4a\x83\x8d\x43\xed\x4e\xb9\x7d\x23\xff\xb2\x30\x76\x1a\xcf\xe5\xdc\xcb\x04\x47\x52\xa7\xfe\x17\x26\x1c\xfa\xd9\x9c\xad\x45\x4d\x1e\xea\x71\xde\x41\x38\x4f\xb3\xb5\xb6\xff\xd0\xcd\xe4\x90\x6e\x65\xe1\xf6\x70\xd5\x98\xf4\xe6\x9e\x50\x61\xf4\x77\x08\x9c\xe2\x7b\xf0\x3c\x26\x7e\xe0\xa0\x11\x04\x1a\xb0\xc4\xf0\x07\xb2\x19\x85\xc9\x2e\x1c\xd9\x55\x47\x81\x4a\x92\xca\x72\x11\x0a\x9f\xff\xea\x6f\x42\x65\x2b\xf0\xb2\x84\x11\x71\xa8\x6d\x7a\xf5\x5b\x01\x58\x3f\x8e\x85\x4b\x6c\x42\x05\x60\xfb\x27\x99\x5d\x05\xd4\x44\x44\xbf\x4e\x95\x2d\x8d\xa8\xc2\xfc\xc1\xa9\x1b\x25\x11\xa3\xb2\xff\x38\x78\x93\x26\xe3\x32\x01\xd4\x54\xb7\x17\x1e\xa5\x21\x09\x0f\x7a\x7a\xfc\xd4\xab\x2e\x54\x8f\xd1\x5a\xc6\xce\xb1\x6f\x18\x6e\x55\x65\x6b\x79\xa3\x43\x45\x67\x78\xed\x7a\x19\x65\x5c\xe5\x7e\xf7\x9d\x34\x0a\x4e\x53\x3e\x2f\xcb\x79\xa9\x3e\x83\x78\xb8\xed\x6a\xb0\x07\x7e\x5a\x56\x0c\xab\x4d\x98\xdb\xc0\xda\x5e\x04\x7c\x36\xb1\x9c\x12\x9d\x6d\xe3\xa9\x5d\xf9\x51\x90\xd5\xe7\x3f\xc4\x1e\x49\x99\x0a\x94\xfa\x4a\x8e\xfd\xa4\xe3\x17\xfe\x4c\xcb\xea\x60\xd2\x5c\xa7\xf8\x7b\x59\x4d\x5d\xca\xf1\x85\x1a\x89\xa4\xdd\xff\xb6\x8b\x58\xbb\x22\xd4\xfe\xcc\x92\xb1\x60\x12\x16\xa9\x08\xa9\xdc\x46\xc9\x57\xa9\x9f\xd3\xab\xca\xaa\x49\x5b\xb3\x78\x62\x7e\x61\xc9\xa7\x5f\xa3\x2f\x03\x1a\x42\xb3\x2a\x64\xf6\xf1\x22\x8f\x3c\x64\x1f\x31\x00\x45\xfb\x61\x22\x94\xf7\x4d\x8a\x1b\x25\x85\xc8\xb3\x46\x68\xa5\xbc\x55\x12\x14\xe1\x28\x0b\xfe\xbb\x3a\x82\xe3\x45\x4e\xc0\x87\xcf\x30\x3f\xa5\x2b\x6d\x2a\x32\x79\x8e\xbd\x88\xa0\x35\x29\xf2\xa9\x6a\xc4\xdb\x7d\x62\xae\x05\x0f\xea\xde\x47\xdf\x75\x0f\x3c\xaf\x82\x48\xb3\x89\x5f\xc0\xdd\xa0\x2b\xfc\x10\xb7\xb1\x6c\xa0\xb6\x2f\xb4\xec\xae\x1c\x68\x61\xd8\x3f\x24\xf9\xf6\x05\xa1\x70\xee\xea\x85\xbc\x47\xd8\x81\x4c\x06\x1d\xf5\x5d\x4c\x3c\x2a\x80\xec\xa5\x5e\xf7\x48\x33\x5c\xe4\x20\x97\xd4\x18\x39\xaa\x87\x32\x53\x33\x17\x56\x0d\xd1\x07\x38\x04\xc6\xec\x00\x9d\x03\x91\xc8\xc3\xf8\x91\xe4\x63\x65\x8d\x56\x3b\x34\x69\x0e\x81\x22\x64\x3a\xd2\xfc\x91\xe7\x6d\x0c\x1f\xc4\xf6\xfe\xf6\x84\x64\x16\x0b\x49\x22\xad\x34\x29\x52\x58\xdc\xd0\x49\x52\x46\x73\xd9\xfc\x13\x06\x9c\x04\x82\x7d\xad\x38\x4c\x75\xa7\xf4\x1c\x50\x6f\x2a\xb1\xbf\x25\xef\x9a\xb2\x8c\x52\x51\xa4\x0c\xeb\x95\xae\x9f\x53\x92\x21\x3c\x8a\xd6\xb4\xbc\x0b\x5a\x86\xe2\xac\xfd\x46\x22\x7e\x18\x33\x04\x90\xcc\xf7\x24\xaf\x0b\xf7\x64\xad\x07\xaa\xfd\x29\x16\xe8\x04\x97\x25\x44\x20\xe6\xcb\xe0\x65\x2a\x0a\xed\x04\x34\xda\x85\xef\xe3\xe7\xa0\xa4\xfd\x25\xd0\x4b\xc1\x84\xe3\xdf\x02\xa5\xe4\xf5\x4b\x9b\x36\xda\xdb\x80\x8e\xcd\x2f\xee\xa4\x25\xf3\x33\x0d\x9b\x97\x33\xc6\xc9\x2a\x32\xc1\xf6\x66\x9e\xfc\xa3\x7d\x91\x42\xb2\x05\xb1\xd3\x3b\x8c\x61\x55\x82\xb6\x40\x6e\x65\x2c\x46\xa9\xa6\x04\xc3\x6a\xd3\xbb\x39\xc2\x0c\x6a\x0b\x22\x1e\xfb\x93\x34\x59\x2c\x12\x2e\x28\xa5\x55\x77\x5c\x09\x7e\x18\x73\x38\x23\xd6\xf2\x98\x5b\x27\x3c\xeb\x68\x0b\x97\x74\x3e\xe2\xe6\xcc\x74\xc0\x8d\x24\x08\x9e\x0f\xf7\x1e\x1b\x36\x83\x65\xba\x20\x90\x20\x10\xe2\xa7\x2d\xa3\x9e\x51\x79\x7c\xcf\xc1\xf0\x21\xf6\x17\x9c\xab\xa3\x05\xb4\x3c\xf5\xd9\x15\x62\xfd\xa8\xb2\xc6\x14\xec\x3e\x30\x30\x36\x13\x15\x40\x86\x27\xda\x88\xd7\x59\xd7\x95\xaf\x8e\x80\x98\xdb\x94\xb7\x8e\xb5\xb3\xb2\x3c\x4a\x89\x6d\xb6\x9c\x71\xae\x16\x7d\xb6\xe3\x99\xff\x21\x8a\xb0\x68\xf1\x64\xea\xd6\xe5\x1e\x49\xe2\x20\x52\x4a\x39\xcb\x4d\xdc\x52\x35\xbd\x54\x58\x17\x20\xf3\xcd\x05\xc1\x8f\xe6\x5b\xa7\xbf\x58\x21\x13\xb0\x4e\x53\x75\x5f\x52\xef\x6e\x4b\xa5\x8a\x4f\x01\x9b\x01\xaf\x25\xfd\x6c\x52\x66\xa7\x50\x0e\xdf\x6c\xf2\xbf\xb4\x8f\x87\xab\xb3\x44\xd7\x42\x6e\xfb\x9e\x7e\x7e\x9e\x92\x2f\xe2\xaf\x4a\x7d\x8b\x63\xf4\xde\x7f\x64\xce\xc5\xfb\x7a\x81\x72\x22\xcf\x6b\x15\xf6\x28\x13\x51\x80\xe3\x36\x32\xb9\x56\x0a\x03\xa0\x27\xa7\xa6\x65\x48\x00\xe9\xd9\xba\xf2\x48\xc0\xfa\x9f\x6d\x78\x05\x0c\xaa\xb9\xea\xa1\x5f\xc2\x82\x02\x49\x50\x25\x6d\xd2\x0a\xfc\x7b\x12\x22\x1f\xac\xfe\xdf\xd0\x4d\xaf\xd5\x21\x6d\x0a\xb9\xb7\x6e\x29\x72\x25\xfc\x66\x9a\x10\xa5\x09\x3a\x50\xf2\xc0\xc1\x8b\x46\xe1\xcd\x22\xc0\x08\x9a\xa0\x4e\xc5\x79\x40\x15\x08\x98\x67\x26\xed\x2f\xa5\x3c\xa5\x55\xd8\x50\x24\x85\xe6\xe3\x97\x71\x6c\xfa\x53\x40\x7b\x13\xb1\xc7\xaf\x17\x2a\x9e\x98\x79\x38\xe9\xb9\x24\xf3\x8a\xe0\xfa\x48\x26\x2b\x0b\x79\x5b\xda\x71\x55\x42\x51\xf9\xd0\x91\x57\xa4\x00\x6b\xbe\x46\x0a\x02\x0d\x11\x5b\x42\x72\x8f\x28\xdb\x73\x56\x54\x97\x6c\xc7\xb1\x0a\xa3\x0c\xb8\xfa\x8e\xfb\x47\x38\x06\x62\x09\xdb\xb2\xe8\x15\xeb\xc9\x9a\x5b\x23\x89\xfb\x7c\x7a\x60\x91\x2c\xf6\x35\x2c\xb7\xbd\x1f\xc7\x76\xf5\xc9\x06\x8d\x11\x90\x95\x46\x6a\xb0\x2b\x74\xd1\x91\x83\x47\x13\xa3\x35\x2a\x7b\x9d\x13\xac\xdb\xfc\x90\xd2\xfd\xdd\xd6\xa1\x13\x24\x85\x4a\x1a\x22\x5b\x36\x9c\x40\x1f\x7f\xe8\xee\xbd\x8a\xa4\xc5\x93\xca\x43\xd4\x54\x42\x75\x05\xfe\xd7\xdf\x59\x98\x5c\x9d\xcc\x03\x0c\xfa\xdf\x3a\xc5\x13\xd9\xc0\xba\xe0\x13\x5f\xca\x3c\x58\x51\x3b\x63\xb2\x40\x86\xd2\xe2\xd2\x63\xb0\xe1\x64\x10\x9c\xd5\x14\x50\x12\x60\x9a\xbf\xd6\x44\xbb\x6b\x22\x7f\x8b\x27\x5c\xcb\x71\x3c\x60\xac\xb9\x01\xf6\x08\x69\xf2\xb9\xc4\x29\x0f\x71\x65\x71\x1c\xcb\x92\x2b\xd4\xf5\xe2\xfd\x52\x53\xab\x96\x96\xc9\xe7\x60\x89\x4d\xab\x43\xcb\xd4\x14\xef\x2c\x16\x73\x1a\x28\x09\xde\x60\x09\x38\x94\x82\x5f\x69\xb0\xcd\x0f\xf5\x61\xfb\x9b\xf6\x9f\xb7\xd1\xed\x6d\x96\x8c\x4d\x06\x5b\x98\x04\xe2\xa3\x37\xa4\x7a\x9a\x0e\x1a\xc1\x77\x06\xcc\x9e\x40\x12\xfd\xbc\x34\xc1\x15\x04\x6b\x4d\xf2\x34\x82\xa5\xb8\x85\xc5\xa9\xe7\xa7\xe5\x58\xe8\x53\xd5\x58\x68\x91\xfa\x74\xdf\xfc\x68\xf8\xcc\xfc\x38\x09\x50\xa7\x22\xcf\xd3\xf3\x27\xb3\x7b\xea\x43\xba\x0a\x62\xaf\x69\xe9\x79\x26\x07\xc3\xe5\x7c\x55\x7a\x46\x87\xe7\xc3\xfb\x42\xc2\xaf\x72\x4b\x3c\x74\x91\x06\x27\x33\x21\x36\x5e\x66\x5e\xce\x3d\x1e\xef\xdd\x40\xc5\x00\x8b\xeb\xb9\x85\xfa\xc3\x9d\x45\x4c\xed\x7e\xc8\x55\x88\x47\xb9\x20\xc3\xa2\x91\x9e\xf7\xb4\xdd\xee\x78\xb9\x5b\xd8\x87\x9b\x48\x4e\x05\x33\x67\x34\xc2\xbf\xc2\x9a\xd1\xd6\xf4\x6f\x46\xd7\x6f\x53\x37\x08\x74\xda\x71\xb1\xdd\x26\x73\x43\x04\x3d\x02\x84\xf0\xad\x1f\x25\x9b\xf0\x3d\x22\x39\x18\xfa\x15\xff\xdc\x7b\xd3\xce\xdb\x6b\x55\xd3\xdc\xc1\x77\x89\xef\x6d\x8e\x54\x39\x1e\x91\x71\xce\x05\x64\xb8\xbe\x25\x25\x4a\x0d\x1f\x1e\x8c\xdd\x6c\xad\xeb\xa2\x4d\x02\xd6\x7a\x88\x98\xde\xe8\x5d\xe9\x46\x81\x87\xe4\xc8\x06\x38\x8d\x08\xa6\xfd\x54\x25\x3d\xb4\x7a\x80\xdf\xae\xb6\x9f\x9b\xc4\xb7\x16\x1b\x94\x5f\xd8\x97\xcc\x2c\x8d\xa3\x62\x5c\xfa\xdd\xa5\x36\x3b\x24\xb8\x9d\x2f\x9b\x5c\x1a\x4d\x9a\xf0\x34\x92\xae\x15\x98\x10\xad\x94\x52\x85\x09\x34\x1c\x4e\x60\x9f\xde\xc3\x63\x45\xff\x63\x5e\x63\xcc\x1d\xae\x5a\x8d\xa2\xa2\x2d\xfe\x4c\x3f\x49\xe7\x71\x73\xce\x95\x0c\xfa\x6e\x8c\xa9\x3b\xe5\xe4\x38\xe8\x52\xd1\x4f\xfd\x46\x48\x53\xfb\xca\xbe\x5d\xc3\xc9\x78\xb0\xc9\x7b\xc6\x3b\x00\x49\xa8\x0b\xb9\xcf\xb6\xaa\x56\x49\xbf\x54\x62\x95\xd2\x47\x38\xe5\xa9\x99\x54\x22\x65\x49\x18\x0a\x73\x29\x2a\xf4\x46\x3f\xd1\xf7\x04\x4a\x47\x49\x24\x0b\xe3\xa0\xd6\x9b\xcd\xd3\xf6\x27\xcc\xf8\x0b\x64\xc3\x40\xad\x0b\x5d\x21\x02\xed\x64\xaf\x52\xea\xc3\x3b\x05\x12\xe1\xd1\xe1\x2b\x41\x2c\x60\x6a\x86\x3c\x14\x41\x71\xce\xfe\xef\xf1\x24\x16\xff\x05\xf6\x3e\x1d\xd0\x57\xb3\x5e\x6f\x1d\x20\xee\x80\x40\x1d\x40\x90\x66\x90\x0f\x0a\xae\x6f\x19\x95\x51\x8c\x3b\x6f\xe4\x15\xf0\x38\x1b\x4f\xca\xbe\x34\x15\x45\x3b\x91\x72\x70\xb3\x1a\x5d\xc6\xfb\x3a\xb8\x3c\x19\x80\x15\xea\x55\xcf\xf1\x49\x8a\x2f\x3e\x0d\x07\xb8\xa7\x0c\xea\xc6\xde\xdd\x2e\x79\x3e\x4f\xf8\x11\xa6\xc1\xfa\x81\x92\xdc\x93\xed\x43\x95\x35\x9e\x01\x90\x47\xca\x8f\xe8\x6e\x15\x89\xbf\x89\xc6\xb2\xdf\xbc\xaa\x92\x60\x24\x85\xc3\x54\xb8\x7a\x47\x77\x85\x9c\x12\x49\xdd\xd6\x65\x84\x8b\x61\x9e\x23\xdd\xf9\x39\x66\x50\x09\xd4\x57\x08\x6d\x9b\xd6\xfe\xf4\x41\x03\xba\x97\x2b\x40\x53\x4c\x2b\x96\x0b\x3a\x8b\xc0\x6d\x0e\x4c\xf3\xd8\xbb\xbc\xcd\xd1\xce\xc5\xe2\xd5\xdc\x36\xce\xd7\x8a\xef\x28\x1b\x95\x46\x13\xfd\x78\x97\x53\xf9\x3c\x6c\xa4\x63\x76\xdb\x9a\x33\x41\xc8\x67\x47\x63\xf6\x93\xcb\x4d\x11\xf2\x02\x42\xb8\xe4\xba\xed\x18\xb7\xc0\x04\x30\xde\xfd\x84\x91\x4d\xd1\xe9\xb3\xfb\xda\x07\x53\x2b\xb0\x78\xce\xcd\x57\x95\xd0\xfc\x86\x45\xd7\x19\xc2\x17\x32\x6a\x7b\xc4\xfb\x10\xd8\x8e\x31\x40\xe7\xef\x64\x75\x19\xa6\xae\xaf\x10\x1a\x8e\xc7\xf1\x03\x50\xf0\xc2\xa5\xdd\x02\x80\x3e\xe1\x84\xb1\x57\x86\x74\x8b\x5b\xfe\xfb\x20\x0c\x14\x84\x16\x97\x03\x1e\xa7\x17\x65\xd5\x8e\x2a\xcb\x06\x47\x6e\x16\x20\xc8\x4a\xe5\x50\x94\xfc\xfd\x45\xe4\x99\x2e\xb7\xfd\x73\x50\xde\x30\xdd\x67\x6a\xbf\x2a\xb2\x75\x04\x52\xe6\xbf\x16\x4d\xab\x88\xe8\x65\xdd\x90\xe3\x46\xa2\x05\xf4\x66\xd5\x8c\x05\x5f\xe7\x4a\xc7\x63\x2a\x79\x9b\x7f\xec\x3e\x59\x40\xa5\xb5\xb0\xfd\x6f\x0a\x62\x32\xc3\x12\xaf\x0b\x68\xb3\xae\x15\xb7\xe8\xb4\x1f\x2a\xa8\xbf\x3a\x57\xea\x76\x4e\x78\x0d\x8a\xdf\x00\x2a\xc4\xea\xb8\x97\xa7\x17\xaa\x65\x15\xfc\x62\x7a\x6c\x53\x2d\x55\x87\x7b\xf4\xde\x54\x12\xf9\x95\x42\x9a\xd8\xff\x81\x5f\xfe\x6e\xed\xef\x19\x37\xf1\x63\xe8\xda\xb9\x00\x9d\x24\x07\x7e\x39\xa8\xf4\x30\xf1\x5e\xc9\xf6\x44\xdc\x5c\x86\xb1\xfc\x0e\x02\xde\xf6\xe0\x34\xc1\x5a\xef\x7c\x92\x0d\x10\xf5\x1d\x9d\xec\xd9\xc2\x4d\x01\x6b\x25\xab\xde\x5e\xcb\xbd\xab\x5f\xc8\x81\xb0\x5f\x88\x48\xaf\xb4\x26\x24\x64\x94\x25\x4b\x27\x21\x6a\xaa\xa8\x85\x1d\x4f\x12\x6f\xdc\x5f\x63\xaf\x93\x79\x54\xf0\xc7\x30\x88\xb4\xa1\xa4\xce\xab\x39\x09\x49\xec\x1d\x6e\x72\x20\xad\xf5\x2e\x8f\xb8\x06\x43\x98\xa8\xb3\xa7\xb6\x69\xbc\x89\x42\x76\x00\x0a\xfb\xbd\x46\x9a\x21\x1e\xec\x6a\x83\xf5\xc2\xb0\x73\x93\x20\x37\x56\x0b\xde\x3c\xc7\xcd\x66\x9e\x4c\x09\x27\x2f\x42\x39\x4c\x83\x50\x82\x85\x17\xf6\xe1\x48\x3c\x50\xc2\x4b\xb3\xb2\x6c\xe5\x1a\xe4\x71\xaa\xae\x3e\x3e\x79\x55\x34\xc8\xa3\xf7\xc7\x2d\xdb\xa3\xba\x3e\x84\xb8\xd4\x7f\x35\x2f\x8c\x2b\x07\xd1\x8b\xa9\xfa\x04\x2b\x48\x86\xfd\x97\x44\xce\x61\xca\x4d\x5d\xaf\xc4\x8f\x4a\x29\xe1\xcb\x74\x79\x2b\x18\x56\xd5\x64\x90\x69\xd5\x0e\x8d\x9e\x18\xe6\x32\x05\x86\xfa\xa8\x89\x0a\x0e\xaf\xe0\x61\xb5\x12\xbb\x3a\xb5\x0a\x39\x74\x9a\xb5\xc3\xa8\x78\xa1\x97\x1e\x33\xc4\xf7\x30\x94\xa6\x6d\x80\x59\x15\x16\x17\x89\xc3\x4f\x39\xb7\xc3\x35\x44\x52\x9f\x58\xef\xba\xdf\x31\xc9\x99\x72\x57\xfc\xfb\x3b\xe5\xb7\x59\x55\x91\xc1\x91\x6f\xd2\x81\xd4\x9b\x1f\xa5\xe4\x19\xe0\x95\xdc\x5f\xa1\x55\xa6\x9f\x91\x4e\x1e\x71\x05\xa9\xca\x24\xc6\xa5\x66\x97\x9c\x90\xe2\xe2\xe5\xe5\xc9\xbc\xf5\x26\x13\x95\xaa\x57\x92\x04\x89\x32\x77\x70\xee\x38\xc8\xb9\xa3\x0e\x18\x58\x25\x33\x88\x6d\x9e\xbf\xef\x4c\xc7\x0a\xbb\x2c\xed\x22\x61\x06\x33\xf7\x2e\xdb\x05\xa2\x32\xb0\xe6\x9f\x4c\xb7\x1c\xa7\xe1\x77\x4e\x85\xcf\x73\xef\xab\xf1\x03\xc0\x8d\x7c\x1e\x84\x3f\x31\xd6\x85\x5a\x78\x6b\x41\xc4\xa5\x60\xfc\x40\xc6\xe2\x7c\xda\x4d\x26\x21\x53\x9c\x7b\x86\xc2\x0d\xca\x6b\x18\x33\xb9\x0c\x47\x3e\xe2\xe3\x7b\x62\xba\x99\x46\xab\xf5\x93\x27\xe1\x0f\xd2\x71\x9e\xb0\xd4\x2c\x75\x8b\xb6\x7b\xdb\x64\xba\xc7\x0d\x50\x7f\x7c\xaf\x3c\x90\x03\x42\x0c\x79\x7c\xb1\xc0\xa0\xb4\xb8\x4d\xe2\x34\x54\x22\xc0\x79\x54\x0d\x19\xa1\x7a\x42\xff\x0c\x36\xd7\xf0\xb9\xab\x97\xe1\x82\xb0\xfd\x11\xfc\x78\x28\x16\x43\xd1\x35\x7d\xae\xe6\xca\xa0\x28\x69\x45\x46\x6f\x00\xbd\x8c\x55\x12\x35\x93\xbd\x80\xb0\x16\x3c\xba\x74\x71\x7a\xce\x77\xa5\x05\xf2\x1f\x38\xd1\xea\x49\x2c\x7c\x45\x15\x9e\xfd\x5b\xe3\xef\xb7\x07\xe5\xe5\xff\x05\x57\xb8\xae\x78\xa6\x01\xa1\x27\x66\x2b\xb0\x21\xce\x4a\x70\xb2\x0d\xe2\x23\xe0\xbf\x02\xc6\x5c\x41\x05\xb0\xae\xd7\x98\xd4\xbd\x6c\x4a\xdc\x27\xcd\xb1\x74\xbe\x27\x6d\x01\x97\xe1\x55\x4e\x15\x61\xec\x17\x8c\xd6\x51\x85\x23\x70\xfc\x2f\x87\xdb\x1c\xd8\xe0\x74\x1c\x06\x9b\xbc\xa7\x16\xda\x59\xfe\xf8\x8c\xdb\x24\x78\xff\x3a\x4a\x0d\x85\xc3\xbb\x19\x10\xa3\xf0\x45\xf3\x13\x47\x95\x68\xe4\xb9\x01\x79\x45\xe2\x0c\x2b\xdf\x1b\xff\xe7\xab\x4c\xa5\xdf\x8f\xf4\x58\xb0\x0f\x00\x26\x13\x6c\x5d\xbe\x7d\x83\x41\x7b\x99\x61\x59\x07\xe0\x67\xb4\x7c\xc6\x51\xce\x7f\x0d\xa1\xaf\xbf\xde\x88\xd3\x6b\x25\x08\x21\x93\xcf\xa0\xac\x35\x2b\x90\xc6\x71\x25\x7f\x1b\xbc\xa9\x15\x21\x96\x5f\x19\xc0\x07\x3e\xe6\xaa\x4c\xf4\x26\x9c\x5e\x8c\x55\xbe\x36\x96\xea\xe0\x9e\x59\x0e\x06\x24\xb3\x31\xca\x35\xf6\x91\xab\x96\x3f\x94\x19\xda\x4a\x19\x3b\xea\x41\x6c\x51\x00\x62\x85\x0a\x79\x18\x81\xf3\xfa\x8d\x4d\x5d\x9b\x31\x19\x1b\x01\x1d\x8e\x0b\x11\xa5\x26\xc7\xa1\xb5\x6a\x78\x0c\xb7\x9e\x43\x00\xfd\x7b\x70\xa3\x69\x31\x17\x9f\xff\x71\x2e\x1f\xe8\xf7\x30\x62\x9d\xac\xc5\xaf\x9c\x85\x8e\x00\x39\xa1\x88\xce\x94\x72\x26\x20\x3f\xe1\x7e\x7f\x87\xaa\xbf\x9b\x3f\xa4\x48\x07\x3b\x82\x65\x49\x86\x60\x59\xaf\xc7\x01\xfc\xa5\x4e\xbb\xeb\x91\x82\x98\xe4\x82\x66\x77\x66\xa1\x6f\xbb\xcf\x0b\xaa\x25\xeb\x38\xc8\x98\x98\x99\x3c\x1b\xa7\x5a\x21\xf3\x1e\x66\xa5\x2d\x88\x8b\xcd\x8f\xab\x57\x82\xc1\xde\xb5\x2c\x21\x08\xf6\x65\xaa\xc6\xaf\x3c\x3d\x38\x67\x5c\xfc\x0c\x4f\xea\xb7\x5d\x86\xf7\xc5\x38\xf2\x0f\x68\x88\x4c\x4a\x75\x50\xed\x73\xc9\x36\x81\x5d\x14\x9b\x4f\xf7\x7b\xa2\xd3\xca\x50\x44\xe9\xa5\x45\x61\xd3\xeb\x02\x02\x21\x1b\xa9\x46\x10\xa5\x94\x58\xe0\xf3\xd9\x03\xf5\x47\xb9\x11\xf3\x4a\x13\xce\x37\xb9\xce\xd5\x78\xe6\x1d\x13\xe8\x16\x7f\x29\xed\x93\x13\x04\xde\x9e\x72\xfb\x43\x85\x1d\xe6\xd4\xcb\x41\x80\xea\x17\xa3\xa1\xe1\x72\xf6\x50\xb9\xcc\x63\x68\xe5\xe6\x04\xe4\xa2\x8f\x55\x71\x4e\x01\xf1\x3d\x74\x8a\x9e\x4b\x3c\xcf\x98\x1a\x40\xae\xdd\xcd\xaa\x0d\x58\x01\x27\xec\x4c\x05\xe1\xd0\xff\x73\xee\x02\xee\x48\x51\x0d\xde\xac\xc3\x5e\x42\xf5\x69\xb6\x19\x0e\xcb\x0a\xb8\x0a\x68\x77\xbe\xb0\x0e\x37\x21\x43\xed\xa6\xa6\x69\x5f\x7a\x2d\xb5\xf1\xcd\xe6\x42\xbd\xdb\xe6\xb2\x62\x66\xbf\x29\x9a\xa3\xb5\x6f\x7a\x9e\x5d\xfe\x21\x95\x37\xab\x78\x6b\xe1\xcc\xf0\x37\x60\x36\x7d\xe8\x81\x15\x4e\x64\x90\x2d\x62\xcb\xdf\x7f\x91\x8c\xca\x17\x70\x2f\xf6\xe4\xda\xf5\x60\xaf\x0a\x70\x28\x07\x8e\x4d\x91\xeb\x28\x93\x83\x6d\xad\x6a\x17\x30\x98\xfc\xdb\xe3\x7e\x86\x26\x9a\x61\x8a\xca\x61\xf1\xbf\xf4\x77\x23\x6d\x76\x36\xe2\x13\xdd\xae\xc5\x3d\x46\x70\x80\x0b\x64\x50\x04\xd7\xa1\xb6\xdb\xdd\x44\x62\xa0\xcc\x9a\xa6\xa6\xbd\x52\xb9\xd9\x66\x49\x73\xf6\xe7\x9e\x5d\xd4\x03\x79\xab\x3b\x55\xb3\xb5\x53\x6f\xa5\x03\x4c\x72\x32\x84\x39\xe0\xfd\xe2\xf6\xda\x3e\xd0\xb8\x8e\xac\x4f\x1d\xc6\x5c\xc8\x2c\xc9\xdf\x8f\x32\x53\x3a\x6c\x33\x80\xd2\xf7\x19\xbf\x0d\x88\xba\x4e\x76\x70\xb7\xe7\x21\x2b\x77\x59\xbd\xd8\x21\x8f\xc8\x12\xc4\xfd\xa8\x36\xc7\xe4\x7e\x80\x9e\xaf\x3a\xc2\xba\xa7\x1e\x96\x06\x8a\xa5\xdb\xf7\xd8\x47\x5c\x71\xff\xaa\x9f\x42\x67\x9c\xeb\x2b\x4b\x0b\x88\xee\x4d\xe7\x22\xc1\xf9\x97\xa7\xa6\x86\x4c\xc4\x8e\x91\x49\x92\xb6\xeb\x35\xee\xfa\x24\x04\xf9\x20\x6a\x85\xcd\x7d\x41\xa1\x19\xb1\x30\xf0\x18\xe9\xdb\x24\x5d\x6a\x52\xc6\x11\x2b\x51\x1a\xd8\x38\x10\x3a\xaf\xb1\xeb\x64\x18\x8b\xf9\x69\xbf\xb2\xed\x28\x30\xc6\xa6\x8a\xac\x95\x95\x74\xd4\xe8\xdc\x5c\x50\xf3\xde\xbf\xed\xf1\x1c\xf5\x10\x9c\x18\x2c\x45\x26\x37\xc3\x9f\xa2\x98\x3b\x1c\x9d\xcd\xfb\xde\xb1\xd1\x89\x3c\xd7\xde\xa6\x77\xb8\x30\x34\xd5\x51\xa6\x6c\xee\xb4\x83\x96\x13\xe9\x04\xa8\x01\x49\xf5\xff\x04\xf0\xd2\x52\xad\x65\x75\xd0\xe7\xc2\x62\xee\xb6\x08\xbb\xdd\xba\x91\x60\x59\x1f\xa3\xa0\xe7\x43\x09\x25\xe5\x61\x4c\x0c\x91\x10\xbe\xaf\xc5\xf7\x24\x00\xc1\xab\xf5\xc4\x66\xd1\x13\x6b\x50\xf0\xf2\x04\xb8\xc7\x08\xb0\x5b\x39\x2b\xb1\x75\x8c\x10\x1d\x1a\x45\x25\x70\xb0\xc0\xd8\xac\x8d\xbc\xdf\x55\x81\x64\xf1\xb3\x03\xa8\x05\xe6\xb9\xc5\xb4\x0a\x61\x66\x73\x9d\xd0\x03\x84\x4a\xcf\x8b\x9f\x50\x4d\xd9\x61\xe5\x19\xce\xfc\x5c\x27\xd2\x8a\xd9\x35\x76\x55\x1a\xf5\x36\x67\x06\x91\x85\x79\x03\xc6\x89\xa2\x0f\xb4\xb9\x2f\x33\xa3\x63\x5f\xa4\xa6\x25\xa2\x48\xce\x26\x40\x4a\x1c\xb6\xf9\xf9\x21\x51\xc0\x15\xb8\xbb\xe2\xc4\x45\x01\x86\xdf\xca\x36\x34\xef\x0d\x1e\x6a\x75\xa9\x8e\x7d\xa0\x8d\x98\x61\xcf\x79\x31\xe6\x03\xb7\x76\x89\x61\xc5\x74\x8a\xb1\xa7\xcc\x73\x40\x2b\x52\xe1\xc1\xf1\x13\x00\xdc\xd7\xa6\x74\xd6\xc3\x19\xdd\xd2\x20\x7b\x02\x09\xde\xb5\x87\xfc\xdf\xe3\xeb\xb5\x29\x29\x6e\xc2\x62\x64\xe7\x7c\x86\xf5\x78\x71\xad\xf8\x63\x27\xc5\x8c\xfa\x8b\x25\x29\x64\xd5\x44\x6c\xf2\x01\xba\xa1\xca\xf3\xd9\x54\xdb\x17\xf1\x88\x4e\x2b\x92\x75\x1e\x12\xa1\x83\x02\x1c\x48\xa6\x06\xe8\x4c\x7d\x35\x2a\x8a\x8e\xaa\x55\x12\x90\x8e\x6e\xec\x7f\x63\xb1\x1c\x1c\x35\x7b\x94\x66\x13\x0e\xbf\x83\x1c\x2c\xee\x2e\x72\xce\xee\x47\x71\x45\x6e\x9d\xad\xaf\xfb\xb0\x12\x71\xe5\x0a\x2f\x4d\xaa\x35\xfd\x52\x37\x71\x37\x34\xe4\x0d\x21\xb3\x01\x19\xbe\xa4\xb7\xee\x6c\x24\x2b\x0e\x18\xc7\x87\x52\x76\xf0\xab\xa2\x3a\x37\xd4\x92\x19\xb2\x2e\x9a\xbe\x28\x96\x0f\x5f\x56\xaa\x89\xe4\xe8\x2f\x6f\x20\x54\x39\x96\x5d\x16\x03\xef\xdb\x44\x84\x22\xf6\xda\xa4\xf6\x37\x31\x9c\xcd\x74\x87\x6e\xb9\xd0\xb3\x32\x43\x6f\x6e\xfc\x21\x02\x0d\x94\x80\x7e\x3f\x42\x5d\x51\x1f\x29\x9f\x42\x98\x4e\x8b\xd9\x73\x83\xc2\x3a\xcb\x0d\xbd\x31\x64\xcf\x69\xe4\xd7\xc0\xaf\x63\x2b\x67\x4a\x6a\xdc\x5e\x38\xae\xd3\x85\x6d\x24\xfb\xe2\xbc\x42\x4c\x4c\xe7\xd5\x28\x44\xc1\x72\x3a\xb7\x1a\x58\x51\xf0\xd4\x64\xe3\xa9\x60\x83\x0f\xe4\x37\x08\x52\xec\x11\x52\x79\x83\x8b\x2a\x35\xc6\x16\x30\x5d\x6c\x03\x4d\x85\x46\xd2\x69\xce\x68\x2d\x96\xce\x61\x2e\xb0\x61\x46\xe5\x4d\x8b\x2a\x51\x9a\x4c\xf7\x81\x65\x15\x8f\xd9\xa1\xaf\x2b\xd1\xd7\xb1\x8c\xdc\xcd\xd9\x2e\x06\x83\xa3\xfb\x88\x3e\xb0\x58\xc9\xce\x73\xd4\xf3\xa8\x14\xe1\x70\x3a\x92\x29\x05\x21\x76\x7d\x55\x3a\xa0\x78\x4e\xea\x1c\x3c\x0c\x56\xd0\x2f\x18\x23\xba\x70\x97\xb6\xb7\xeb\xdd\x97\xcc\xe0\xe8\xfd\xa8\xc6\x43\x59\xd0\xac\xa2\xf4\x13\x93\xdc\x05\x71\x78\xf3\x68\x83\x80\x47\x2c\xdc\x5f\xf9\x6c\x7b\x6a\x5a\x16\x30\xd4\x51\x0b\x78\xdd\xff\xe9\x32\x1a\xd3\xad\x4f\x7c\xb1\x6e\xa2\x41\x73\xb7\x69\x92\x31\x39\xe4\xf7\xbf\x2f\xeb\x1d\xd4\xc9\xfe\x9c\x8e\x5e\x3d\x9c\x3e\xcb\xb2\xa0\x59\x3d\xfe\xf3\x19\xaa\xb4\xee\x9f\x44\xe8\x83\xf4\xb6\x53\x92\x59\x5c\x05\x5c\x2f\xe5\xca\x7b\x79\x0a\x51\xc6\xd9\x38\x3a\x7f\x26\x64\xc8\x68\x3c\x7c\x0e\x9f\x41\xb8\xcb\x84\xed\x53\x03\xd1\x60\xd5\x9d\x8c\x45\x66\x76\x4c\xf4\x91\x1b\xf3\xf3\x15\x73\x68\x8b\x71\xf7\x41\xb5\x94\x3e\xf7\xcf\xca\x21\xf5\x77\xbe\xa1\x29\xd7\x2f\x38\x91\xf2\xf2\x55\x61\x37\x62\x89\x60\xb8\x93\x8e\x1a\x4b\xad\x3b\x88\xe6\x3f\x10\xca\x94\xee\x0c\x13\x06\xec\x98\xde\x03\xa7\xef\x60\x17\xda\x83\x39\x6e\xba\xeb\xf2\x2f\xcd\xb2\xb7\x27\x37\x3f\x4b\xea\xcd\xb4\xcb\x6f\x65\xa0\xff\xac\x17\x53\x68\x13\x9f\xbc\x6c\x45\x16\xea\xdc\xd5\x1a\x82\x0b\xd9\xcd\xfd\x63\x2e\x3e\xf8\xe1\x4f\x27\x52\xa6\x83\x74\x0d\xae\xee\x82\x43\xa8\x16\x5f\x64\x48\xac\x85\x91\xb4\xfa\xf2\xd6\x48\x41\x15\x06\x68\x9a\x75\xcb\x58\xbc\x15\xd0\xe4\x83\x61\x89\x59\xab\x0f\x00\xd8\xac\xc8\xb3\xb8\x98\x3c\xed\x9f\xd6\x19\x6d\x35\x93\xf4\x86\x75\xbd\x0a\xa4\x02\x15\x2a\x63\x90\x39\x03\x22\x75\x7f\x62\xe5\xa8\xee\x0a\x1e\xc1\xd2\x4f\xba\xc1\x03\x39\x9c\x0c\xad\x12\xf7\xbe\x35\x5e\xc1\x7c\x9b\x7e\x3e\xaa\x24\x38\x17\xa3\x2a\xec\xee\x27\xd0\xc6\x7a\x7f\x30\x03\x08\xac\x82\x09\x48\xaa\x08\x8a\xc8\x24\x5f\x24\xf9\xa4\x39\xfc\xd1\x5a\x42\xe1\x74\x1a\x88\x92\xcf\x0e\x94\xc9\x4a\x10\xb9\x9c\x70\x25\x7c\xe8\x38\xc3\x92\x41\xc7\x7a\x9b\xc1\x12\x8d\xc6\xdc\x7c\x7f\xe9\x38\xd1\x6b\xf4\xeb\x41\xf1\x2d\x44\x2a\xa3\xd2\x4b\x29\xd0\xb1\x32\x85\x69\x4c\x3f\x5a\xdf\x90\xa2\xce\x29\xb2\xd0\xd3\x70\x6a\x4c\xa8\xb4\x9b\x95\x21\x28\x79\x2c\xd3\x3a\xfb\xff\xe1\x02\x8e\x67\x28\x54\x9b\xb5\x80\x61\x54\xb4\x37\xc5\x0c\x5f\x6a\x7e\x07\x2f\x44\x4e\x0c\xba\x9a\xe3\xf8\x47\xbe\xc4\xe8\xbf\xb0\x36\x27\xe1\x7c\xef\x8f\x08\x76\xe9\x5b\x69\xe4\x88\xdb\xcb\xa1\xb5\x14\x6f\x87\x95\x90\xe0\xcb\xf9\xc1\x15\xd0\xdb\x7d\x58\x23\x02\x4a\xe8\x0e\xb1\xac\x4d\x74\x51\x6a\x12\x58\x21\x4b\xcb\xa6\xc3\x37\x8a\xb2\xf2\x8b\xb6\x75\x30\x70\x82\xd5\x57\x17\xf2\xfa\xcf\x4c\x49\x42\x7a\xed\xae\x7e\xb5\xc1\x34\x76\xf9\xc8\xc8\x3c\xe8\x99\x9e\x1b\xac\x36\x84\x4e\x3b\x26\xfe\x9b\x18\xe5\xff\xb3\x1b\x2b\xf0\xb0\x5b\x9d\xd5\x00\x8f\xd1\x0a\xc1\x88\x5d\x7d\x58\xc5\x89\x4f\x3e\xec\xd8\x37\x32\x12\xb0\xcf\xd0\xa4\x51\x53\x4c\x29\x23\x48\x12\xf4\x27\xa7\xd3\x27\x10\x24\xf3\x1e\x3b\x4d\x33\xf0\x93\x3f\x7a\x5f\x10\xbd\xfc\x45\x10\xa5\xbc\x99\x4e\x1e\x79\xe9\x3d\xda\x71\x42\x5a\xc0\xad\xe2\xde\x53\x30\x4e\xa6\x75\xf8\xc6\x97\xda\xaa\x97\x6f\xf0\xf2\x39\xe1\x32\x61\xb8\x5e\x05\xbe\x86\x1a\x25\x84\xa5\x9c\xc7\x04\xd3\x2e\x1c\x08\x80\x02\x33\x12\xd2\x21\x99\x90\x87\x34\xdb\x9d\x66\x05\x50\x97\xdf\x9d\xd1\x9a\xf0\xd0\xee\x68\xd9\x10\xf2\x00\x05\x49\xef\xad\xcf\x68\x6c\x67\x4e\xb1\xb0\xe7\x20\x8a\x1b\xa2\x25\x28\x0c\x3a\x2e\x48\xaf\xb0\x8a\x43\xd2\xd4\x35\x14\xb8\xed\x12\xb0\x7a\x79\xb6\xf2\x33\x3f\xa0\x3b\x1e\x66\x29\xd8\xa9\xaa\x1c\x34\x55\xd2\x9f\x45\x7c\xc4\xb1\x3e\x14\x32\xff\x1a\x07\xb1\xcd\x47\xeb\x57\x01\xf5\xed\x01\x51\x20\xae\x5f\x4e\xf4\x60\x09\xfa\x20\x6f\x86\x5a\x61\xbe\x0e\xc4\xd5\x62\xcf\xfe\x23\x1a\xb1\xf5\xc5\x87\xf5\x9e\x3e\xda\x4b\xee\x55\xe6\xc5\x0c\x65\x95\x0d\x74\xeb\x61\x49\xee\xed\x80\xd2\x4f\x5d\x9f\xdf\x5e\x28\x5f\x11\x91\x22\xb0\x32\x45\x9c\x1e\x59\x49\x10\x9c\x89\xa3\x90\x3e\xda\x5d\xb6\x5e\xa1\x1d\x3f\xaa\x6e\xa7\x68\x0e\x07\x31\x79\xb4\xb3\x3e\x0f\xec\xde\xaa\x77\x22\x5f\x90\x74\xbc\xc6\x0f\x5f\x58\xdc\xde\xdb\x15\xa0\xd4\xa7\xf6\xcf\x67\x69\x18\x06\x3a\x38\x30\x62\x71\x4e\x0a\x40\x34\x3e\xf7\x19\xe5\x41\xe7\x83\xc2\x4d\xb0\xc1\xee\xdd\xc6\x03\x18\xc7\x15\xa6\x32\xdd\x5b\xf3\xb7\x2a\x6f\xb7\xb7\x95\xaa\x4a\xb3\x09\x06\x85\x5c\xf0\xf0\xa7\x02\x97\x11\x30\xa8\xd0\xf4\x3e\x63\xc8\xbe\x67\x8e\xa3\x75\x80\x52\xf2\x43\x9f\x69\x9b\xef\xe2\xfd\xbd\x0f\x1e\xac\xe2\xf0\xaf\xe4\x40\x93\xac\xb1\xbb\xb8\x1c\x25\xaa\x9a\xc5\x01\x85\xe0\xda\x52\xfd\x38\x9e\x03\x9c\xae\xaf\x30\x32\x3c\x18\x7e\x74\x1b\xa8\x41\x2f\xd3\x31\xb3\xa1\xb8\xb0\xef\xe8\x0c\xf1\xcf\xbf\xa1\x35\x1c\x3a\xc5\x26\x7b\xcb\xe2\xfa\x7b\xf2\x8c\x61\xce\x4e\x17\xac\x3a\x25\xf3\x3f\xaa\x93\xed\x3f\x97\xf0\x0c\x16\x8e\x57\x0e\x46\x79\xf6\xa9\x68\x91\x84\x73\x5a\x99\xcb\xdd\x90\xc5\x57\x78\x38\xe0\xff\x72\x67\x8f\xaf\xf5\x55\xd8\x3b\x5b\x6f\xa6\x44\x4c\xd5\x22\xba\xde\x89\x4f\xa3\xee\x09\x5c\xd8\x1b\x43\xff\xaf\xc8\x18\x32\x27\xcf\xfb\xa7\x8d\xe7\x55\x51\x4b\x54\xd4\x02\x40\x2e\x50\x10\x83\x64\x14\x24\x81\x65\xb8\x71\xc2\x41\xeb\x17\xf4\xf0\x69\x21\x90\x9d\xb6\x61\x53\x75\x63\x9f\xec\xee\x99\x6c\x30\x88\x22\xb3\xbb\x2d\xfe\xf4\x67\xc2\x73\x9c\x7a\xe4\xff\xda\x31\xad\xfc\xb9\x18\x28\x83\x7f\xc7\x37\x0e\xb8\x44\xd5\x78\x97\x52\xd3\xbd\xf7\xcd\x63\x17\x8b\x7b\x42\x4d\x7b\x31\x95\xc9\xe6\xfb\x5d\x48\x62\x3d\x4d\x23\x7a\x2c\x93\xa4\xb6\x0f\xef\xc9\x11\xf7\xc6\x64\xdb\x96\x46\xb7\xb6\x40\x20\x3d\x62\xa2\xbd\xf7\x99\xf9\x2f\x0d\x6b\xfc\x62\xb6\x45\xe2\xba\x21\x20\x9d\xca\x55\xe2\x81\x16\x67\xdb\x91\x2e\xfe\x1a\x36\x50\xba\x70\xa8\x90\x91\xc1\x57\xc8\x99\x74\x41\x15\xd2\xb3\x4c\xf7\x48\x1e\x1c\x6a\xe2\x9e\x67\x62\x96\x28\x07\x90\xde\x42\xfe\x2a\xc7\xe2\x54\x4d\x06\x4b\x25\x2e\xb4\x33\x05\x30\x66\xf6\xee\x40\xd6\x3b\xc2\x69\xf0\x5f\x44\x2c\x6c\x61\x93\xfa\x61\x7e\x8a\x3a\x71\x9d\x11\x3a\xe9\xfe\xa0\x22\xe7\x5e\x3e\xdb\x43\xa5\xd3\x0f\xf8\x62\xb3\xb5\x86\x8c\x47\xde\x1b\xf1\x2d\x27\x48\xb5\x43\xfe\x24\x04\x4e\x1f\x88\x8d\xef\x2a\x75\x16\x5c\x6e\xba\xb5\x6e\x1d\xd5\xe8\xe1\xf4\xb8\xd2\x4b\x0b\x37\x03\xdd\x25\x95\x90\x7f\x59\x5a\xd2\x5f\x29\x5d\x4d\x23\x31\x10\x70\xda\x81\x37\x2a\xde\x6b\x27\xab\x89\x8f\xba\x36\xd5\xcd\x4d\x85\x1c\x5a\xa8\xdf\x80\x86\x77\x5e\xaf\x8e\x4e\x91\x7b\xd1\xbd\x51\x36\xe0\x63\x1a\x18\xbf\xbb\xec\xc6\x07\x7c\x2d\xb8\xde\xa6\x7b\xd8\xfd\x8e\xe6\xdf\xd4\x82\x4f\x0f\x00\x81\xf7\x4c\xe0\xf4\x7d\x80\xaf\xd2\xe4\x1b\xd9\x08\x40\x95\x2e\xa1\xb4\xa9\x4f\xbd\x91\xfb\x23\xc1\x9c\x7d\x88\x36\x20\x34\x0a\x41\x78\x2d\x2a\x45\xd6\xe1\x6b\xb0\xc9\x78\xac\x86\xe3\x8c\x95\x5c\x64\xe7\x16\x24\x98\x51\x8a\x57\xb2\x90\xce\xe1\x6c\xd0\x93\x8b\x16\xbe\x3b\xbd\x05\x2e\xa4\x20\x33\xd8\xa4\x5b\xeb\x9a\xc1\xe5\x8b\x16\xde\x8f\x88\x63\xca\xfa\xe2\x67\x35\xa8\x55\x78\x78\xfc\xea\xb5\x22\x16\x0f\xac\x17\xdb\x73\x99\x24\x27\x3b\x62\x26\x0f\x0c\x3a\x55\x0b\x93\x15\xee\x1e\xa1\x35\x51\xb2\x0c\x88\xca\xa9\x4f\xde\x9e\xae\x53\x8b\x3f\x0a\x9c\x21\xcf\x08\x25\x30\x2e\x4e\x9f\x4e\xcc\xae\xb1\xc3\xa3\x60\xeb\x28\xee\x6e\xdb\x44\x1d\x42\x7e\x13\x5c\xd3\x7f\xed\x31\x28\x40\x9c\x48\x26\x78\xc3\x4d\x7b\x11\x64\xb1\xf7\x11\x0a\x64\xb0\x3f\xcf\x92\x56\x9d\xad\x15\x1d\x4d\x48\xb4\x3e\x48\x96\xeb\xc0\x36\xa4\xcc\x42\xe9\xe9\x54\xc7\x2a\x22\x03\xfd\xf3\x00\x39\x60\x48\xb7\xc6\xb0\xd2\x69\x2c\x0b\x36\x7c\x10\xb8\x72\x65\xd7\xbe\xbc\xf8\x9b\x4e\x2c\x6d\x94\xe8\xe6\x35\x8c\x58\x35\xd4\x9b\xf3\x09\x1f\x98\x38\xdf\xf3\x50\xe8\xa6\x57\x64\x12\xe9\x80\x31\x9e\xc1\x79\x5b\x2c\xbd\xfc\x33\x86\x29\xd4\x4d\x9e\xa3\x8b\x49\xdb\xfa\x9a\x24\x10\x60\xb0\x16\xee\xf9\x20\x19\x31\x5d\x4f\x9a\xf7\x97\x9b\xd1\xa1\x04\x85\x11\xb1\x26\xd6\x2d\xe9\xf3\x8c\xf4\x9f\x4f\x83\x4c\xa2\x49\x4e\x6f\x5f\x37\xdf\xb3\x3c\x1e\x64\x04\xb1\x1e\xbf\xa6\x80\x29\xc9\x06\xd5\xa8\xec\x2f\xc6\xa6\x60\x15\x10\xc3\x5e\xe0\x0f\x0e\xcf\xa3\xdb\xcf\x86\x6d\x05\x87\x24\x57\xec\x73\x10\xff\x1a\x96\x17\x9e\x22\x5a\x14\x3f\xa0\x1e\xb2\xfc\x62\xbc\xaa\xa3\xff\xaf\xb1\xc5\x26\xb3\x88\x53\x98\x2a\x62\x44\x2d\xfb\x3d\x99\x8c\xab\xa6\x03\xe7\x42\x70\x10\x96\x15\x44\xcb\x3b\xac\x2f\x12\x61\x9e\xde\xf1\xc1\x43\x9f\xc3\x8b\x13\xae\x0a\xd1\xb7\xb4\x2b\xa3\x8c\x29\xd4\x8e\xc9\x7f\xfc\xe7\xc6\xad\x84\xef\xd5\xf1\xbe\xa2\xb2\xdd\x63\x50\xbc\x67\x36\xfc\xed\xab\x11\xe8\x7b\x93\xa5\x2a\x9a\xa2\x19\xe3\xd7\x3f\xaf\x4e\x69\xaf\x85\xdc\xc1\xb0\x53\x41\x77\xa2\x8e\x6c\x5a\x47\xe6\xa1\x0a\x9b\x33\xae\x4e\xb8\x1e\xb6\x94\x77\xb4\x9a\x21\x2a\xf8\xc5\xea\xe9\xa2\x99\x95\xb8\x94\x32\xe5\x4d\x4b\xdd\x6e\x4e\x5f\x3c\x73\xd8\x4a\xef\x7a\x0c\x87\xd1\xcf\xed\x5b\xe1\xa9\x5e\x83\xf1\x39\x17\x1c\x96\x46\x2c\x52\xb0\x8b\x43\xdc\xb8\x82\x97\x2f\x97\xb8\xfc\x1f\x6e\x13\xf4\x65\xd7\xd2\x7e\xc7\xfb\x5a\x58\x63\x87\x0a\xe4\xc3\xf6\x01\x52\xb7\x3e\x9f\xd8\x25\x7f\xb7\x1d\xfc\x76\xf7\x4c\x6e\x9f\x55\x45\x8f\xa0\x9b\x74\x54\x27\x3f\x0f\x43\x56\x18\xb8\x72\x67\x1b\xf5\x0d\xa5\xfa\xba\x01\x50\x8d\x1a\xa6\x76\x86\x7c\xf8\x48\x82\x89\xa4\x50\x00\x68\x81\x48\xcb\x41\xea\x4a\x86\x1b\x9c\x62\xa6\xe8\xda\xb7\xf8\xa4\xd8\x0b\x43\x49\x4a\xf8\x45\x0b\x80\x18\x31\xf1\xab\x4f\x82\x65\x07\x31\x3e\x4c\x7d\x66\x90\x9c\xcc\xec\xe9\x08\xfd\xe2\xb8\x35\xb8\xe3\x53\x63\x19\x66\x41\x6f\x0d\x99\x5d\xd2\xd9\xbe\xfd\x19\x39\x7e\x95\xe1\x34\x75\x4a\x3a\x0a\xdf\x10\x2f\x1a\xc9\xe9\x6b\xd9\xbb\xf4\x7a\x3e\x66\xb7\xd0\x11\x3e\x82\xc8\x78\x1f\xef\xa0\xb8\x39\xbe\x7e\xfe\x78\x6c\x13\xd1\xd6\xa8\x8f\xe3\xc5\xd6\xfa\x79\x23\xe8\x9f\xe6\x7e\x93\x25\xbb\x55\x5c\x1a\x6e\xce\xf7\x16\x28\xfb\x25\x76\xa3\xd0\x40\x18\xda\x0a\x04\x50\x7b\x77\x00\xd9\xf7\x7d\xd3\xb3\xa3\x21\xe3\x62\x46\xad\xc0\x8e\x85\x72\xac\xf2\xaf\xf4\x17\x26\xec\xfa\xb0\xa2\x3b\xb3\x66\xa3\x6f\xcd\x1a\x1b\x2f\x69\x33\x95\xb4\x35\x69\x81\xf0\x79\x65\xcf\x5c\xb3\x4b\xee\x43\x67\xd8\x17\x39\xc7\xf4\xba\xfd\x9c\x37\x94\x7b\xd7\xce\x73\xf3\x49\xcd\xf5\xb7\x7e\xca\x6b\xad\xba\x48\x81\x08\x97\xa6\xe2\xf6\xe0\x06\x43\x9a\x30\x96\x93\x63\xec\x68\x81\x78\x1a\x6b\xd4\x3f\x7b\x51\x6f\x7d\x6e\x0e\x14\xf5\xe8\x53\x5d\x65\x8e\x55\x6a\x40\xe8\x12\xf7\x98\x44\x90\xc2\x4f\xcc\xfa\x7e\xd9\x16\x9f\xd9\x3a\x82\x72\xcc\x38\x7a\xf7\x86\x25\x7b\xcc\x4e\xf5\x53\xc5\x7f\x19\x10\x01\x59\xdd\xf3\x38\xf2\x99\xef\x1f\x99\xc6\x18\xd4\x0e\xc5\x98\x42\xdd\x90\x11\x01\x03\x21\xdc\xed\x3c\xba\x1c\xf9\x4d\xef\xfa\xc3\x44\xaa\xb6\x58\x6d\xc7\x78\x4f\x9c\x36\x19\x48\xd8\x80\x23\x2b\xfa\xdc\x3f\x97\x09\x3b\xdd\xda\x1e\xf5\x7b\xbf\xdf\x20\x97\x8e\xee\x62\x2c\x12\xcd\x84\x30\x7a\x8d\x41\x00\x84\xd2\xf9\x39\x82\x9e\x5b\x1a\x57\x17\xba\x47\xfd\x89\x4a\x6d\x64\xa5\x83\x32\x4e\x9d\xda\x30\x10\x4b\x26\xd4\x15\x68\x49\x1e\xee\x2d\xba\x9d\xd2\x61\xe8\xc6\x22\x52\x09\x70\x63\x67\x5d\x16\x6f\x13\x62\x54\xeb\xf2\x42\x6b\x43\x40\xaf\x7e\x86\x59\x3a\xbd\x7f\xd8\x78\xc6\x16\xf5\xbc\x9a\x48\xf7\xa7\xcd\x66\x0c\xac\xa5\xbb\x42\x94\xbc\xe0\xe9\x4b\xfa\xd3\xe8\x92\x8f\xe6\x1c\xb8\x53\x7e\xe7\x7b\x98\xb5\xb1\xcc\x86\x76\xa4\x2a\x3c\x6e\x9f\x86\x68\xc7\x3d\x70\xf7\xec\x73\x0f\xf1\xdb\x69\x76\xa0\x96\x09\x6c\xb1\x28\x8c\xc3\xe3\x9e\x7d\xc8\xaf\xfe\x9c\x5d\xb9\xe3\x3e\x81\xff\x71\x7c\x9c\xb1\x89\xb8\x4c\x34\xc8\x40\xc7\x82\xc9\x0f\xa0\x82\xd9\x57\xe0\x11\x45\xaa\x8c\x14\xcf\x54\xeb\xcc\xb6\x5f\xa6\x1b\xb1\xa6\x01\x45\xf7\xd3\xdd\x7d\x31\x44\x2f\x71\xfa\xc8\xd6\x53\x04\x5c\x95\xeb\x89\x48\xc2\xc1\xe4\xdc\x31\xe8\x4a\x12\xcf\x99\xcc\x17\x7b\xe7\xcb\x84\x9e\x0b\x91\x81\x9c\xa8\x1f\x96\x1e\x26\x92\x00\x62\x15\xd1\x40\x14\xd9\x10\x1c\xa7\xa0\x46\x6e\x40\xbb\xce\x78\xb1\x16\x35\x15\xb4\x3f\x62\x2c\x2d\x44\x97\xd0\xaa\x76\xb6\xf7\xbc\xe5\xb7\x61\x5c\xb7\x3e\x47\xaf\x1d\xd9\xe2\x84\x32\x82\x89\x7c\x86\x88\x57\x6b\xe9\x4c\xd4\xf4\xba\x2f\xb0\xc7\x56\x65\xfc\x4f\x72\x97\xc8\x95\x27\x8c\x9b\x66\xf6\xeb\xcd\xa8\xbc\x50\xe2\xbe\x94\x10\x48\x85\xdd\x8e\xf0\x0c\x7c\xac\x57\xc9\x8b\xb1\x7a\x0e\x4e\x59\xb2\xf2\xc9\x73\x94\xd1\x6d\x16\x9d\x1c\xed\xcd\x24\xf1\x10\xb5\x4f\x34\xd7\x43\x00\x5d\xc1\x16\x33\x76\x01\x6d\xb3\xd0\x92\xaf\x82\x68\x4d\x47\xd9\xa7\x6c\x45\xb2\x91\x16\x17\x0f\x42\x85\xf3\xd8\x96\x4c\xbd\xa4\x43\x7d\x9c\x7b\x73\x1e\xfa\x2a\x08\x1a\x0c\x22\xcf\x5e\x1a\xee\x82\xfe\xdb\xb9\x44\xa1\x88\x7b\x9c\x80\x39\xe4\x2b\x64\x48\x14\xa1\xaa\x30\x0c\xab\x21\xe6\x66\xcb\x6a\x1c\x9b\xea\xc9\x83\xfa\xbd\xd3\x46\x84\xb1\x25\x92\x35\xa4\x52\x67\x23\x2f\x49\x2b\x56\x2c\xfc\xd3\x5a\x83\x8a\x8f\x11\x53\x1f\x1c\x5d\x52\x9e\xdc\xa2\x52\x5f\x40\xcf\x86\x54\x05\x80\x50\x92\x7a\xf2\xd5\x6f\x97\x3d\x5f\xc7\x40\x7a\x13\x97\x9c\x2f\x3b\xb6\x1f\xaf\xab\x87\x61\x47\xed\x20\x0f\xbc\xd4\x60\x83\x46\xca\x8d\x38\x31\x63\xf1\x94\x22\xfa\x06\xb9\x74\x11\xbb\x2f\x09\xb8\x81\x5b\xcf\x25\x2f\xb0\x5f\x2b\x80\xd9\x72\x82\x99\xb1\x31\x41\xef\xaf\x68\x97\x7d\xc0\x6a\x14\xce\x30\x40\x1d\x65\x47\x4f\x5f\x89\x71\x94\xe7\xe9\x66\x72\x22\x68\x83\x04\x59\x4d\x48\xf3\x7e\xd7\x0a\xb3\x0a\xf3\xf7\xe1\x98\x87\x12\xfc\xb7\xbf\x91\x85\x23\x8a\x1c\xd8\x70\x01\xfc\x6f\x96\x47\xde\x5c\x56\x89\x56\x0a\xcd\xf5\x72\x43\xe5\x82\x62\x6b\x86\xac\x56\x52\xc8\xb2\x06\x12\x17\x04\xbf\x95\x34\xa6\x61\xde\x43\xfa\x18\xa6\xf4\x59\x41\x91\xe3\x1b\xef\x58\x03\xc2\x5f\x5c\xdc\xea\xc5\x4d\xa5\x61\xfa\xc6\x6c\xf7\xb3\x92\x7c\xba\xac\x45\x53\xf2\x4c\x6c\xac\x2d\x4f\xdb\xef\xe8\x42\x10\x86\xcc\x4e\x7b\x6a\xec\x44\xc3\xea\x59\xac\x80\x7e\x3a\xcc\x65\x5d\x10\xc3\x54\x5f\xb2\x20\xed\x69\x26\xb6\x46\xca\xbe\x6d\x3f\xb2\xe5\xe4\xca\x12\xe9\x5b\x5f\x18\x09\xe5\xfa\x7d\xd1\x6f\x29\xde\x01\xdb\x1a\xf6\xbf\x34\xbf\x9c\x5f\xb0\x96\x53\x91\xa1\x71\x6b\xdd\x40\x59\xf6\x96\x3d\x80\x29\x62\x9a\x2e\x35\x02\xfe\x4e\x69\xb4\xa9\xce\x22\xa4\xb2\x39\x0b\x56\x1f\x81\x3f\x1f\xdf\x94\xe7\xb5\x6d\xd3\xa5\x50\xab\x8e\x62\x4c\x48\xf8\xff\x10\x54\x97\xea\x80\x10\xf1\xc9\x9d\x1f\x51\xd2\x7b\x93\xe0\x2c\x88\xf9\x0e\xde\xb1\x0c\x18\xbd\x6d\x88\xb9\xb9\xc7\x67\xb1\x1c\xc7\xaa\x19\x32\x81\x12\x92\xf9\xea\x62\xa9\x54\x9f\xba\xdb\x32\xea\x02\xd7\xd2\xb6\x66\x2c\x78\x24\xc2\x01\xdc\x4b\xc2\x46\x3f\x91\xa0\x99\x95\x39\x09\xbf\xb3\x8e\xbd\x60\x3e\x81\xf7\x31\xf2\x15\xad\x07\x57\x18\x7f\x81\xac\xc7\x83\x60\xac\x3d\xd2\x0c\x6d\xad\x5f\xf7\x1a\xfe\xb5\xfb\x49\x17\xa4\x59\x0f\x4f\x6a\x09\x42\xfe\x4d\xbb\x31\xe1\xeb\x85\x79\xd8\xc2\xbe\x52\xbd\x35\xcf\x3b\x8b\x33\x81\xd7\x90\x6b\x52\x78\x94\xfb\xb9\x86\x75\xe2\x75\xc5\xc8\x35\x36\xee\x59\x35\xe1\xa9\xaa\x33\xef\x17\x13\xde\x32\xda\xfd\x39\xc9\xb4\x29\xac\x52\x81\x26\xef\x3e\xb4\xb2\xd1\x2b\x8d\xa5\x6b\xdb\x98\x20\xbd\xdc\x2c\x17\x5f\xab\x8e\x4b\x60\x41\x93\x26\xa0\xe6\x0a\x74\x57\xe5\xe9\x5c\xc6\xf5\x58\x11\xdb\x4a\xa6\xf5\xdc\x41\xfb\xc6\xe2\xc7\xc2\x5c\x21\xb8\xbf\x09\x44\x74\x0f\xf0\xeb\xb7\x88\x1d\x12\x5a\x42\x73\xb2\x5d\xcb\x09\xad\x8c\xf7\x67\xac\x42\xe9\xad\x2d\xb5\x29\x8c\x2d\x84', 2)
dist/pytransform/__init__.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # These module alos are used by protection code, so that protection
2
+ # code needn't import anything
3
+ import os
4
+ import platform
5
+ import sys
6
+ import struct
7
+
8
+ # Because ctypes is new from Python 2.5, so pytransform doesn't work
9
+ # before Python 2.5
10
+ #
11
+ from ctypes import cdll, c_char, c_char_p, c_int, c_void_p, \
12
+ pythonapi, py_object, PYFUNCTYPE, CFUNCTYPE
13
+ from fnmatch import fnmatch
14
+
15
+ #
16
+ # Support Platforms
17
+ #
18
+ plat_path = 'platforms'
19
+
20
+ plat_table = (
21
+ ('windows', ('windows', 'cygwin*')),
22
+ ('darwin', ('darwin',)),
23
+ ('ios', ('ios',)),
24
+ ('linux', ('linux*',)),
25
+ ('freebsd', ('freebsd*', 'openbsd*', 'isilon onefs')),
26
+ ('poky', ('poky',)),
27
+ )
28
+
29
+ arch_table = (
30
+ ('x86', ('i?86', )),
31
+ ('x86_64', ('x64', 'x86_64', 'amd64', 'intel')),
32
+ ('arm', ('armv5',)),
33
+ ('armv6', ('armv6l',)),
34
+ ('armv7', ('armv7l',)),
35
+ ('ppc64', ('ppc64le',)),
36
+ ('mips32', ('mips',)),
37
+ ('aarch32', ('aarch32',)),
38
+ ('aarch64', ('aarch64', 'arm64'))
39
+ )
40
+
41
+ #
42
+ # Hardware type
43
+ #
44
+ HT_HARDDISK, HT_IFMAC, HT_IPV4, HT_IPV6, HT_DOMAIN = range(5)
45
+
46
+ #
47
+ # Global
48
+ #
49
+ _pytransform = None
50
+
51
+
52
+ class PytransformError(Exception):
53
+ pass
54
+
55
+
56
+ def dllmethod(func):
57
+ def wrap(*args, **kwargs):
58
+ return func(*args, **kwargs)
59
+ return wrap
60
+
61
+
62
+ @dllmethod
63
+ def version_info():
64
+ prototype = PYFUNCTYPE(py_object)
65
+ dlfunc = prototype(('version_info', _pytransform))
66
+ return dlfunc()
67
+
68
+
69
+ @dllmethod
70
+ def init_pytransform():
71
+ major, minor = sys.version_info[0:2]
72
+ # Python2.5 no sys.maxsize but sys.maxint
73
+ # bitness = 64 if sys.maxsize > 2**32 else 32
74
+ prototype = PYFUNCTYPE(c_int, c_int, c_int, c_void_p)
75
+ init_module = prototype(('init_module', _pytransform))
76
+ ret = init_module(major, minor, pythonapi._handle)
77
+ if (ret & 0xF000) == 0x1000:
78
+ raise PytransformError('Initialize python wrapper failed (%d)'
79
+ % (ret & 0xFFF))
80
+ return ret
81
+
82
+
83
+ @dllmethod
84
+ def init_runtime():
85
+ prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
86
+ _init_runtime = prototype(('init_runtime', _pytransform))
87
+ return _init_runtime(0, 0, 0, 0)
88
+
89
+
90
+ @dllmethod
91
+ def encrypt_code_object(pubkey, co, flags, suffix=''):
92
+ _pytransform.set_option(6, suffix.encode())
93
+ prototype = PYFUNCTYPE(py_object, py_object, py_object, c_int)
94
+ dlfunc = prototype(('encrypt_code_object', _pytransform))
95
+ return dlfunc(pubkey, co, flags)
96
+
97
+
98
+ @dllmethod
99
+ def generate_license_key(prikey, keysize, rcode):
100
+ prototype = PYFUNCTYPE(py_object, c_char_p, c_int, c_char_p)
101
+ dlfunc = prototype(('generate_license_key', _pytransform))
102
+ return dlfunc(prikey, keysize, rcode) if sys.version_info[0] == 2 \
103
+ else dlfunc(prikey, keysize, rcode.encode())
104
+
105
+
106
+ @dllmethod
107
+ def get_registration_code():
108
+ prototype = PYFUNCTYPE(py_object)
109
+ dlfunc = prototype(('get_registration_code', _pytransform))
110
+ return dlfunc()
111
+
112
+
113
+ @dllmethod
114
+ def get_expired_days():
115
+ prototype = PYFUNCTYPE(py_object)
116
+ dlfunc = prototype(('get_expired_days', _pytransform))
117
+ return dlfunc()
118
+
119
+
120
+ @dllmethod
121
+ def clean_obj(obj, kind):
122
+ prototype = PYFUNCTYPE(c_int, py_object, c_int)
123
+ dlfunc = prototype(('clean_obj', _pytransform))
124
+ return dlfunc(obj, kind)
125
+
126
+
127
+ def clean_str(*args):
128
+ tdict = {
129
+ 'str': 0,
130
+ 'bytearray': 1,
131
+ 'unicode': 2
132
+ }
133
+ for obj in args:
134
+ k = tdict.get(type(obj).__name__)
135
+ if k is None:
136
+ raise RuntimeError('Can not clean object: %s' % obj)
137
+ clean_obj(obj, k)
138
+
139
+
140
+ def get_hd_info(hdtype, name=None):
141
+ if hdtype not in range(HT_DOMAIN + 1):
142
+ raise RuntimeError('Invalid parameter hdtype: %s' % hdtype)
143
+ size = 256
144
+ t_buf = c_char * size
145
+ buf = t_buf()
146
+ cname = c_char_p(0 if name is None
147
+ else name.encode('utf-8') if hasattr('name', 'encode')
148
+ else name)
149
+ if (_pytransform.get_hd_info(hdtype, buf, size, cname) == -1):
150
+ raise PytransformError('Get hardware information failed')
151
+ return buf.value.decode()
152
+
153
+
154
+ def show_hd_info():
155
+ return _pytransform.show_hd_info()
156
+
157
+
158
+ def assert_armored(*names):
159
+ prototype = PYFUNCTYPE(py_object, py_object)
160
+ dlfunc = prototype(('assert_armored', _pytransform))
161
+
162
+ def wrapper(func):
163
+ def wrap_execute(*args, **kwargs):
164
+ dlfunc(names)
165
+ return func(*args, **kwargs)
166
+ return wrap_execute
167
+ return wrapper
168
+
169
+
170
+ def check_armored(*names):
171
+ try:
172
+ prototype = PYFUNCTYPE(py_object, py_object)
173
+ prototype(('assert_armored', _pytransform))(names)
174
+ return True
175
+ except RuntimeError:
176
+ return False
177
+
178
+
179
+ def get_license_info():
180
+ info = {
181
+ 'ISSUER': None,
182
+ 'EXPIRED': None,
183
+ 'HARDDISK': None,
184
+ 'IFMAC': None,
185
+ 'IFIPV4': None,
186
+ 'DOMAIN': None,
187
+ 'DATA': None,
188
+ 'CODE': None,
189
+ }
190
+ rcode = get_registration_code().decode()
191
+ if rcode.startswith('*VERSION:'):
192
+ index = rcode.find('\n')
193
+ info['ISSUER'] = rcode[9:index].split('.')[0].replace('-sn-1.txt', '')
194
+ rcode = rcode[index+1:]
195
+
196
+ index = 0
197
+ if rcode.startswith('*TIME:'):
198
+ from time import ctime
199
+ index = rcode.find('\n')
200
+ info['EXPIRED'] = ctime(float(rcode[6:index]))
201
+ index += 1
202
+
203
+ if rcode[index:].startswith('*FLAGS:'):
204
+ index += len('*FLAGS:') + 1
205
+ info['FLAGS'] = ord(rcode[index - 1])
206
+
207
+ prev = None
208
+ start = index
209
+ for k in ['HARDDISK', 'IFMAC', 'IFIPV4', 'DOMAIN', 'FIXKEY', 'CODE']:
210
+ index = rcode.find('*%s:' % k)
211
+ if index > -1:
212
+ if prev is not None:
213
+ info[prev] = rcode[start:index]
214
+ prev = k
215
+ start = index + len(k) + 2
216
+ info['CODE'] = rcode[start:]
217
+ i = info['CODE'].find(';')
218
+ if i > 0:
219
+ info['DATA'] = info['CODE'][i+1:]
220
+ info['CODE'] = info['CODE'][:i]
221
+ return info
222
+
223
+
224
+ def get_license_code():
225
+ return get_license_info()['CODE']
226
+
227
+
228
+ def get_user_data():
229
+ return get_license_info()['DATA']
230
+
231
+
232
+ def _match_features(patterns, s):
233
+ for pat in patterns:
234
+ if fnmatch(s, pat):
235
+ return True
236
+
237
+
238
+ def _gnu_get_libc_version():
239
+ try:
240
+ prototype = CFUNCTYPE(c_char_p)
241
+ ver = prototype(('gnu_get_libc_version', cdll.LoadLibrary('')))()
242
+ return ver.decode().split('.')
243
+ except Exception:
244
+ pass
245
+
246
+
247
+ def format_platform(platid=None):
248
+ if platid:
249
+ return os.path.normpath(platid)
250
+
251
+ plat = platform.system().lower()
252
+ mach = platform.machine().lower()
253
+
254
+ for alias, platlist in plat_table:
255
+ if _match_features(platlist, plat):
256
+ plat = alias
257
+ break
258
+
259
+ if plat == 'linux':
260
+ cname, cver = platform.libc_ver()
261
+ if cname == 'musl':
262
+ plat = 'musl'
263
+ elif cname == 'libc':
264
+ plat = 'android'
265
+ elif cname == 'glibc':
266
+ v = _gnu_get_libc_version()
267
+ if v and len(v) >= 2 and (int(v[0]) * 100 + int(v[1])) < 214:
268
+ plat = 'centos6'
269
+
270
+ for alias, archlist in arch_table:
271
+ if _match_features(archlist, mach):
272
+ mach = alias
273
+ break
274
+
275
+ if plat == 'windows' and mach == 'x86_64':
276
+ bitness = struct.calcsize('P'.encode()) * 8
277
+ if bitness == 32:
278
+ mach = 'x86'
279
+
280
+ return os.path.join(plat, mach)
281
+
282
+
283
+ # Load _pytransform library
284
+ def _load_library(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
285
+ path = os.path.dirname(__file__) if path is None \
286
+ else os.path.normpath(path)
287
+
288
+ plat = platform.system().lower()
289
+ for alias, platlist in plat_table:
290
+ if _match_features(platlist, plat):
291
+ plat = alias
292
+ break
293
+
294
+ name = '_pytransform' + suffix
295
+ if plat == 'linux':
296
+ filename = os.path.abspath(os.path.join(path, name + '.so'))
297
+ elif plat in ('darwin', 'ios'):
298
+ filename = os.path.join(path, name + '.dylib')
299
+ elif plat == 'windows':
300
+ filename = os.path.join(path, name + '.dll')
301
+ elif plat in ('freebsd', 'poky'):
302
+ filename = os.path.join(path, name + '.so')
303
+ else:
304
+ filename = None
305
+
306
+ if platid is not None and os.path.isfile(platid):
307
+ filename = platid
308
+ elif platid is not None or not os.path.exists(filename) or not is_runtime:
309
+ libpath = platid if platid is not None and os.path.isabs(platid) else \
310
+ os.path.join(path, plat_path, format_platform(platid))
311
+ filename = os.path.join(libpath, os.path.basename(filename))
312
+
313
+ if filename is None:
314
+ raise PytransformError('Platform %s not supported' % plat)
315
+
316
+ if not os.path.exists(filename):
317
+ raise PytransformError('Could not find "%s"' % filename)
318
+
319
+ try:
320
+ m = cdll.LoadLibrary(filename)
321
+ except Exception as e:
322
+ if sys.flags.debug:
323
+ print('Load %s failed:\n%s' % (filename, e))
324
+ raise
325
+
326
+ # Removed from v4.6.1
327
+ # if plat == 'linux':
328
+ # m.set_option(-1, find_library('c').encode())
329
+
330
+ if not os.path.abspath('.') == os.path.abspath(path):
331
+ m.set_option(1, path.encode() if sys.version_info[0] == 3 else path)
332
+ elif (not is_runtime) and sys.platform.startswith('cygwin'):
333
+ path = os.environ['PYARMOR_CYGHOME']
334
+ m.set_option(1, path.encode() if sys.version_info[0] == 3 else path)
335
+
336
+ # Required from Python3.6
337
+ m.set_option(2, sys.byteorder.encode())
338
+
339
+ if sys.flags.debug:
340
+ m.set_option(3, c_char_p(1))
341
+ m.set_option(4, c_char_p(not is_runtime))
342
+
343
+ # Disable advanced mode by default
344
+ m.set_option(5, c_char_p(not advanced))
345
+
346
+ # Set suffix for private package
347
+ if suffix:
348
+ m.set_option(6, suffix.encode())
349
+
350
+ return m
351
+
352
+
353
+ def pyarmor_init(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
354
+ global _pytransform
355
+ _pytransform = _load_library(path, is_runtime, platid, suffix, advanced)
356
+ return init_pytransform()
357
+
358
+
359
+ def pyarmor_runtime(path=None, suffix='', advanced=0):
360
+ if _pytransform is not None:
361
+ return
362
+
363
+ try:
364
+ pyarmor_init(path, is_runtime=1, suffix=suffix, advanced=advanced)
365
+ init_runtime()
366
+ except Exception as e:
367
+ if sys.flags.debug or hasattr(sys, '_catch_pyarmor'):
368
+ raise
369
+ sys.stderr.write("%s\n" % str(e))
370
+ sys.exit(1)
371
+
372
+
373
+ # ----------------------------------------------------------
374
+ # End of pytransform
375
+ # ----------------------------------------------------------
376
+
377
+ #
378
+ # Unused
379
+ #
380
+
381
+
382
+ @dllmethod
383
+ def generate_license_file(filename, priname, rcode, start=-1, count=1):
384
+ prototype = PYFUNCTYPE(c_int, c_char_p, c_char_p, c_char_p, c_int, c_int)
385
+ dlfunc = prototype(('generate_project_license_files', _pytransform))
386
+ return dlfunc(filename.encode(), priname.encode(), rcode.encode(),
387
+ start, count) if sys.version_info[0] == 3 \
388
+ else dlfunc(filename, priname, rcode, start, count)
389
+
390
+ #
391
+ # Not available from v5.6
392
+ #
393
+
394
+
395
+ def generate_capsule(licfile):
396
+ prikey, pubkey, prolic = _generate_project_capsule()
397
+ capkey, newkey = _generate_pytransform_key(licfile, pubkey)
398
+ return prikey, pubkey, capkey, newkey, prolic
399
+
400
+
401
+ @dllmethod
402
+ def _generate_project_capsule():
403
+ prototype = PYFUNCTYPE(py_object)
404
+ dlfunc = prototype(('generate_project_capsule', _pytransform))
405
+ return dlfunc()
406
+
407
+
408
+ @dllmethod
409
+ def _generate_pytransform_key(licfile, pubkey):
410
+ prototype = PYFUNCTYPE(py_object, c_char_p, py_object)
411
+ dlfunc = prototype(('generate_pytransform_key', _pytransform))
412
+ return dlfunc(licfile.encode() if sys.version_info[0] == 3 else licfile,
413
+ pubkey)
414
+
415
+
416
+ #
417
+ # Deprecated functions from v5.1
418
+ #
419
+
420
+
421
+ @dllmethod
422
+ def encrypt_project_files(proname, filelist, mode=0):
423
+ prototype = PYFUNCTYPE(c_int, c_char_p, py_object, c_int)
424
+ dlfunc = prototype(('encrypt_project_files', _pytransform))
425
+ return dlfunc(proname.encode(), filelist, mode)
426
+
427
+
428
+ def generate_project_capsule(licfile):
429
+ prikey, pubkey, prolic = _generate_project_capsule()
430
+ capkey = _encode_capsule_key_file(licfile)
431
+ return prikey, pubkey, capkey, prolic
432
+
433
+
434
+ @dllmethod
435
+ def _encode_capsule_key_file(licfile):
436
+ prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
437
+ dlfunc = prototype(('encode_capsule_key_file', _pytransform))
438
+ return dlfunc(licfile.encode(), None)
439
+
440
+
441
+ @dllmethod
442
+ def encrypt_files(key, filelist, mode=0):
443
+ t_key = c_char * 32
444
+ prototype = PYFUNCTYPE(c_int, t_key, py_object, c_int)
445
+ dlfunc = prototype(('encrypt_files', _pytransform))
446
+ return dlfunc(t_key(*key), filelist, mode)
447
+
448
+
449
+ @dllmethod
450
+ def generate_module_key(pubname, key):
451
+ t_key = c_char * 32
452
+ prototype = PYFUNCTYPE(py_object, c_char_p, t_key, c_char_p)
453
+ dlfunc = prototype(('generate_module_key', _pytransform))
454
+ return dlfunc(pubname.encode(), t_key(*key), None)
455
+
456
+ #
457
+ # Compatible for PyArmor v3.0
458
+ #
459
+
460
+
461
+ @dllmethod
462
+ def old_init_runtime(systrace=0, sysprofile=1, threadtrace=0, threadprofile=1):
463
+ '''Only for old version, before PyArmor 3'''
464
+ pyarmor_init(is_runtime=1)
465
+ prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
466
+ _init_runtime = prototype(('init_runtime', _pytransform))
467
+ return _init_runtime(systrace, sysprofile, threadtrace, threadprofile)
468
+
469
+
470
+ @dllmethod
471
+ def import_module(modname, filename):
472
+ '''Only for old version, before PyArmor 3'''
473
+ prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
474
+ _import_module = prototype(('import_module', _pytransform))
475
+ return _import_module(modname.encode(), filename.encode())
476
+
477
+
478
+ @dllmethod
479
+ def exec_file(filename):
480
+ '''Only for old version, before PyArmor 3'''
481
+ prototype = PYFUNCTYPE(c_int, c_char_p)
482
+ _exec_file = prototype(('exec_file', _pytransform))
483
+ return _exec_file(filename.encode())
dist/pytransform/_pytransform.dll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36408d46959d79b1a949f0bd425c5204be797c967007dd7f8517cafe6cd79a3f
3
+ size 1165824
dist/pytransform/_pytransform.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:827135fa090d0b226050cba0d44ce81d69db628f446b2c77b5c2a1d71916275d
3
+ size 1198080
images/LRID_outdoor_x5_004_iso25600.dng ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3976c558d29f362364aa7cb425eb1edddd2d1a7187102d94b823f1fa4b1fa5d8
3
+ size 32142308
images/LRID_outdoor_x5_004_iso6400.dng ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15e514215592deca0a048c59a84d2c5287d616b388b83f8aadd51d0781a1c314
3
+ size 32142308
pyarmor.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # 定义变量
4
+ PY_FILE="isp_algos.py"
5
+ PRIVATE_DIR="private"
6
+ PYARMOR_VERSION="7.7.4"
7
+
8
+ # 检查源文件是否存在
9
+ if [ ! -f "$PY_FILE" ]; then
10
+ echo "错误:文件 $PY_FILE 不存在!"
11
+ exit 1
12
+ fi
13
+
14
+ # 检查pyarmor是否安装及版本是否正确
15
+ if ! command -v pyarmor &> /dev/null; then
16
+ echo "错误:pyarmor 未安装!请先安装 pyarmor $PYARMOR_VERSION"
17
+ exit 1
18
+ fi
19
+
20
+ INSTALLED_VERSION=$(pyarmor --version | awk '{print $2}')
21
+ if [ "$INSTALLED_VERSION" != "$PYARMOR_VERSION" ]; then
22
+ echo "警告:pyarmor 版本不是 $PYARMOR_VERSION"
23
+ echo "已安装版本:$INSTALLED_VERSION"
24
+ echo "继续执行加密操作..."
25
+ # 若希望严格检查版本,可取消下面一行的注释
26
+ # exit 1
27
+ fi
28
+
29
+ # 创建private目录(如果不存在)
30
+ mkdir -p "$PRIVATE_DIR"
31
+
32
+ # 使用pyarmor加密文件
33
+ echo "开始使用pyarmor加密 $PY_FILE..."
34
+ pyarmor obfuscate --exact "$PY_FILE"
35
+
36
+ # 检查加密是否成功
37
+ if [ $? -ne 0 ]; then
38
+ echo "错误:pyarmor加密失败!"
39
+ exit 1
40
+ fi
41
+
42
+ # 移动源文件到private目录
43
+ echo "将源文件移动到 $PRIVATE_DIR 目录..."
44
+ mv "$PY_FILE" "$PRIVATE_DIR/"
45
+
46
+ # 检查移动是否成功
47
+ if [ $? -ne 0 ]; then
48
+ echo "错误:移动源文件失败!"
49
+ exit 1
50
+ fi
51
+
52
+ echo "操作完成!"
53
+ echo "加密后的文件已生成在 dist 目录中"
54
+ echo "源文件已保存到 $PRIVATE_DIR/$PY_FILE"
requirements.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch>=2.0.0 --extra-index-url https://download.pytorch.org/whl/cu121
2
+ matplotlib
3
+ opencv-python
4
+ bm3d
5
+ scipy
6
+ scikit-learn
7
+ scikit-image
8
+ lpips
9
+ rawpy
10
+ exifread
11
+ tqdm
12
+ pyyaml
13
+ h5py
14
+ natsort
15
+ kornia
16
+ gradio
17
+ numpy
18
+ Pillow
19
+ einops
20
+ timm
21
+ pyarmor
22
+ pytorch_wavelets
runfiles/Gaussian/gru32n_ft.yml ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mode: 'train'
2
+ checkpoint: 'saved_model/Gaussian'
3
+ fast_ckpt: 'checkpoints/Gaussian'
4
+ model_name: 'Gaussian_gru32n_lsdir2sid'
5
+ method_name: 'YOND_ANY_cal_gru32n_lsdir'
6
+ result_dir: './images/'
7
+ num_gpu: 1
8
+ num_workers: 2
9
+
10
+ pipeline:
11
+ data_type: "LRID" # ["SIDD", "DND"]
12
+ full_est: True
13
+ est_type: 'simple+full' # ["simple", "Foi", "Liu", "PGE", "Ours"]
14
+ full_dn: True
15
+ vst_type: 'exact' # ["exact", "asym", "nnVST", "Ours"] (Ours中还有前处理pre和后处理post)
16
+ bias_corr: 'pre' # pre为前处理, post为后处理
17
+ denoiser_type: 'unetn' # ["BM3D", "FBI", "DMID"]
18
+ iter: 'iter' # ["iter", "once"]
19
+ max_iter: 1
20
+ clip: False
21
+ epoch: 10
22
+ sigma_t: 0.8
23
+ eta_t: 0.85
24
+ history: False
25
+
26
+ dst: &base_dst
27
+ root_dir: 'LRID'
28
+ dataset: 'LRID_Dataset'
29
+ mode: 'eval'
30
+ dstname: ['indoor_x5', 'indoor_x3', 'outdoor_x3']
31
+ command: ''
32
+ H: 3472
33
+ W: 4624
34
+ wp: 1023
35
+ bl: 64
36
+ clip: False
37
+ gpu_preprocess: False
38
+ dst_eval:
39
+ <<: *base_dst
40
+ ratio_list: [1,2]
41
+ dstname: ['indoor_x5']
42
+ mode: 'eval'
43
+ dst_test:
44
+ <<: *base_dst
45
+ ratio_list: [1,2]
46
+ dstname: ['outdoor_x3']
47
+ mode: 'test'
48
+
49
+ arch:
50
+ name: 'GuidedResUnet'
51
+ guided: True
52
+ in_nc: 4
53
+ out_nc: 4
54
+ nf: 32
55
+ nframes: 1
56
+ res: False
57
+ norm: True
runfiles/Gaussian/gru32n_paper_clip.yml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mode: 'train'
2
+ checkpoint: 'saved_model/Gaussian'
3
+ fast_ckpt: 'checkpoints/Gaussian'
4
+ model_name: 'Gaussian_GRU_mix_5to50_norm'
5
+ method_name: 'YOND_ANY_cal_gru32n_lsdir'
6
+ result_dir: './images/'
7
+ num_gpu: 1
8
+ num_workers: 2
9
+
10
+ pipeline:
11
+ data_type: "LRID" # ["SIDD", "DND"]
12
+ full_est: True
13
+ est_type: 'simple+full' # ["simple", "Foi", "Liu", "PGE", "Ours"]
14
+ full_dn: True
15
+ vst_type: 'exact' # ["exact", "asym", "nnVST", "Ours"] (Ours中还有前处理pre和后处理post)
16
+ bias_corr: 'pre' # pre为前处理, post为后处理
17
+ denoiser_type: 'unetn' # ["BM3D", "FBI", "DMID"]
18
+ iter: 'iter' # ["iter", "once"]
19
+ max_iter: 1
20
+ clip: True
21
+ epoch: 10
22
+ sigma_t: 0.8
23
+ eta_t: 0.85
24
+ history: False
25
+
26
+ dst: &base_dst
27
+ root_dir: 'LRID'
28
+ dataset: 'LRID_Dataset'
29
+ mode: 'eval'
30
+ dstname: ['indoor_x5', 'indoor_x3', 'outdoor_x3']
31
+ command: ''
32
+ H: 3472
33
+ W: 4624
34
+ wp: 1023
35
+ bl: 64
36
+ clip: False
37
+ gpu_preprocess: False
38
+ dst_eval:
39
+ <<: *base_dst
40
+ ratio_list: [1,2]
41
+ dstname: ['indoor_x5']
42
+ mode: 'eval'
43
+ dst_test:
44
+ <<: *base_dst
45
+ ratio_list: [1,2]
46
+ dstname: ['outdoor_x3']
47
+ mode: 'test'
48
+
49
+ arch:
50
+ name: 'GuidedResUnet'
51
+ guided: True
52
+ in_nc: 4
53
+ out_nc: 4
54
+ nf: 32
55
+ nframes: 1
56
+ res: True
57
+ norm: True
58
+
59
+ mcgmask32n_vm240k_detach_sigbid_0527:
60
+ weights: 'checkpoints/Mask/Mask_mcgmn32n_vm240kmix_detach_sigbid_0527_18.40_last_model.pth'
61
+ name: 'MultiCascadeGuidedMaskNet'
62
+ mode: 'diff+detach+sigbid+meanmask'
63
+ guided: True
64
+ depth: 6
65
+ in_nc: 20
66
+ out_nc: 4
67
+ nc: 4
68
+ nf: 32
69
+ res: True
70
+ norm: True
runfiles/Gaussian/gru32n_paper_noclip.yml ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mode: 'train'
2
+ checkpoint: 'saved_model/Gaussian'
3
+ fast_ckpt: 'checkpoints/Gaussian'
4
+ model_name: 'Gaussian_GRU_mix_5to50_norm_noclip'
5
+ method_name: 'YOND_ANY_cal_gru32n_lsdir'
6
+ result_dir: './images/'
7
+ num_gpu: 1
8
+ num_workers: 2
9
+
10
+ pipeline:
11
+ data_type: "LRID" # ["SIDD", "DND"]
12
+ full_est: True
13
+ est_type: 'simple+full' # ["simple", "Foi", "Liu", "PGE", "Ours"]
14
+ full_dn: True
15
+ vst_type: 'exact' # ["exact", "asym", "nnVST", "Ours"] (Ours中还有前处理pre和后处理post)
16
+ bias_corr: 'pre' # pre为前处理, post为后处理
17
+ denoiser_type: 'unetn' # ["BM3D", "FBI", "DMID"]
18
+ iter: 'iter' # ["iter", "once"]
19
+ max_iter: 1
20
+ clip: False
21
+ epoch: 10
22
+ sigma_t: 0.8
23
+ eta_t: 0.85
24
+ history: False
25
+
26
+ dst: &base_dst
27
+ root_dir: 'LRID'
28
+ dataset: 'LRID_Dataset'
29
+ mode: 'eval'
30
+ dstname: ['indoor_x5', 'indoor_x3', 'outdoor_x3']
31
+ command: ''
32
+ H: 3472
33
+ W: 4624
34
+ wp: 1023
35
+ bl: 64
36
+ clip: False
37
+ gpu_preprocess: False
38
+ dst_eval:
39
+ <<: *base_dst
40
+ ratio_list: [1,2]
41
+ dstname: ['indoor_x5']
42
+ mode: 'eval'
43
+ dst_test:
44
+ <<: *base_dst
45
+ ratio_list: [1,2]
46
+ dstname: ['outdoor_x3']
47
+ mode: 'test'
48
+
49
+ arch:
50
+ name: 'GuidedResUnet'
51
+ guided: True
52
+ in_nc: 4
53
+ out_nc: 4
54
+ nf: 32
55
+ nframes: 1
56
+ res: True
57
+ norm: True
runfiles/Gaussian/gru64n_paper_noclip.yml ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mode: 'train'
2
+ checkpoint: 'saved_model/Gaussian'
3
+ fast_ckpt: 'checkpoints/Gaussian'
4
+ model_name: 'Gaussian_gru64n_mix_noclip'
5
+ method_name: 'YOND_ANY_cal_gru32n_lsdir'
6
+ result_dir: './images/'
7
+ num_gpu: 1
8
+ num_workers: 2
9
+
10
+ pipeline:
11
+ data_type: "LRID" # ["SIDD", "DND"]
12
+ full_est: True
13
+ est_type: 'simple+full' # ["simple", "Foi", "Liu", "PGE", "Ours"]
14
+ full_dn: True
15
+ vst_type: 'exact' # ["exact", "asym", "nnVST", "Ours"] (Ours中还有前处理pre和后处理post)
16
+ bias_corr: 'pre' # pre为前处理, post为后处理
17
+ denoiser_type: 'unetn' # ["BM3D", "FBI", "DMID"]
18
+ iter: 'iter' # ["iter", "once"]
19
+ max_iter: 1
20
+ clip: False
21
+ epoch: 10
22
+ sigma_t: 0.8
23
+ eta_t: 0.85
24
+ history: False
25
+
26
+ dst: &base_dst
27
+ root_dir: 'LRID'
28
+ dataset: 'LRID_Dataset'
29
+ mode: 'eval'
30
+ dstname: ['indoor_x5', 'indoor_x3', 'outdoor_x3']
31
+ command: ''
32
+ H: 3472
33
+ W: 4624
34
+ wp: 1023
35
+ bl: 64
36
+ clip: False
37
+ gpu_preprocess: False
38
+ dst_eval:
39
+ <<: *base_dst
40
+ ratio_list: [1,2]
41
+ dstname: ['indoor_x5']
42
+ mode: 'eval'
43
+ dst_test:
44
+ <<: *base_dst
45
+ ratio_list: [1,2]
46
+ dstname: ['outdoor_x3']
47
+ mode: 'test'
48
+
49
+ arch:
50
+ name: 'GuidedResUnet'
51
+ guided: True
52
+ in_nc: 4
53
+ out_nc: 4
54
+ nf: 64
55
+ nframes: 1
56
+ res: True
57
+ norm: True
utils/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from .utils import *
2
+ from .isp_ops import *
3
+ # from .isp_algos import *
4
+ from .video_ops import *
5
+ from .visualization import *
6
+ from .sidd_utils import *
7
+ from .auto_rawread import rawread
utils/auto_rawread.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+
4
+ def bit_check(bit_num):
5
+ return 2**bit_num-1, 2**(bit_num-4)
6
+
7
+ def rawread(name, pt = -1):
8
+ bl = 64
9
+ wh = 1023
10
+ if name.endswith('dng'):
11
+ import rawpy
12
+ a = rawpy.imread(name).raw_image_visible.copy()
13
+ h,w = a.shape[:2]
14
+ elif name.endswith('npy'):
15
+ a = np.load(name)
16
+ if len(a.shape) > 2:
17
+ a = rggb2bayer(a)
18
+ h,w = a.shape[:2]
19
+ else:
20
+ a = np.fromfile(name, 'uint16')
21
+ #depth
22
+ wh, bl = bit_check(10)
23
+ if a.max()>2*(2**14):
24
+ wh, bl = bit_check(16)
25
+ elif a.max()>2*(2**12):
26
+ wh, bl = bit_check(14)
27
+ elif a.max()>2*(2**10):
28
+ wh, bl = bit_check(12)
29
+ print('bl:', bl, 'wh:',wh, 'max=', a.max())
30
+ #size
31
+ if len(a) == 4512*6016:
32
+ h = 4512
33
+ w = 6016
34
+ elif len(a) == 3000*4000:
35
+ h = 3000
36
+ w = 4000
37
+ elif len(a) == 1824*2432:
38
+ h = 1824
39
+ w = 2432
40
+ elif np.abs(len(a) - 2448*3264)<=1024:
41
+ h = 2448
42
+ w = 3264
43
+ elif np.abs(len(a) - 2400*3200)<=1024:
44
+ h = 2400
45
+ w = 3200
46
+ elif np.abs(len(a) - 1824*2432)<=1024:
47
+ h = 1824
48
+ w = 2432
49
+ elif len(a) == 4640*3472:
50
+ h = 3472
51
+ w = 4640
52
+ elif len(a) == 2632*3504:
53
+ h = 2632
54
+ w = 3504
55
+ elif len(a) == 1940*2592:
56
+ h = 1940
57
+ w = 2592
58
+ elif len(a) == 3472*4624:
59
+ h = 3472
60
+ w = 4624
61
+ elif len(a) == 3072*4096:
62
+ h = 3072
63
+ w = 4096
64
+ elif len(a) == 2720*3648:
65
+ h = 2720
66
+ w = 3648
67
+ elif len(a) == 3072*4080:
68
+ h = 3072
69
+ w = 4080
70
+ elif len(a) == 2304*4096:
71
+ h = 2304
72
+ w = 4096
73
+ elif len(a) == 2304*1728:
74
+ h = 1728
75
+ w = 2304
76
+ elif len(a) == 4160*3120:
77
+ h = 3120
78
+ w = 4160
79
+ elif np.abs(len(a) - 3648*2736)<=1024:
80
+ h = 2736
81
+ w = 3648
82
+ elif np.abs(len(a) - 3648*2736*4)<=1024:
83
+ h = 2736*2
84
+ w = 3648*2
85
+ elif np.abs(len(a) - 4096*3072)<=1024:
86
+ h = 3072
87
+ w = 4096
88
+ elif np.abs(len(a) - 4160*3120)<=1024:
89
+ h = 3120
90
+ w = 4160
91
+ elif np.abs(len(a) - 3264*2432)<=1024:
92
+ h = 2432
93
+ w = 3264
94
+ elif len(a)==4032*3024:
95
+ h = 3024
96
+ w = 4032
97
+ elif len(a)==4208*3120:
98
+ h = 3120
99
+ w = 4208
100
+ elif len(a)==2944*2208:
101
+ h = 2208
102
+ w = 2944
103
+ elif len(a)==3840*2160:
104
+ h = 2160
105
+ w = 3840
106
+ elif len(a)==2880*1616:
107
+ h = 1616
108
+ w = 2880
109
+ elif len(a)==2880*1624:
110
+ h = 1624
111
+ w = 2880
112
+ elif len(a)==2880*1620:
113
+ h = 1620
114
+ w = 2880
115
+ elif len(a)==2688*1520:
116
+ h = 1520
117
+ w = 2688
118
+ elif len(a)==1920*1080:
119
+ h = 1080
120
+ w = 1920
121
+ print('h:',h,'w:',w)
122
+ a = a[:h*w].reshape([h, w])
123
+ m0 = a[::2, ::2].mean()
124
+ m1 = a[::2, 1::2].mean()
125
+ m2 = a[1::2, ::2].mean()
126
+ m3 = a[1::2, 1::2].mean()
127
+ m12 = max(m1, m2)/min(m1, m2)
128
+ m03 = max(m0, m3)/min(m0, m3)
129
+ if m12<m03:
130
+ #XG
131
+ #GX
132
+ if m0>m3:
133
+ #RGGB
134
+ mode = 0
135
+ else:#BGGR
136
+ mode = 3
137
+ else:
138
+ #GX
139
+ #XG
140
+ if m1>m2:
141
+ #GRBG
142
+ mode = 1
143
+ else:
144
+ #GBRG
145
+ mode = 2
146
+ if pt>=0:
147
+ mode = pt
148
+ print('mode:', mode)
149
+ #any to rggb
150
+ if mode == 0:
151
+ pass
152
+ elif mode==1:
153
+ a = a[:, ::-1]
154
+ elif mode==2:
155
+ a = a[::-1, :]
156
+ elif mode==3:
157
+ a = a[::-1, ::-1]
158
+ return {'raw':a,
159
+ 'h':h,
160
+ 'w':w,
161
+ 'bl':bl,
162
+ 'wp':wh,
163
+ 'mode':mode}
164
+
165
+ def rgb2rggb(rgb):
166
+ R = rgb[::2, ::2, 0:1]
167
+ Gr = rgb[::2, 1::2, 1:2]
168
+ Gb = rgb[1::2, ::2, 1:2]
169
+ B = rgb[1::2, 1::2, 2:]
170
+ rggb = np.concatenate((R, Gr, Gb, B), axis=2)
171
+ return rggb
172
+
173
+ def rggb2bayer(rggb):
174
+ h,w = rggb.shape[:2]
175
+ bayer = rggb.reshape([h, w, 2, 2]).transpose([0, 2, 1, 3]).reshape([h*2, w*2])
176
+ return bayer
177
+
178
+ def bayer2rggb(bayer):
179
+ h,w = bayer.shape[:2]
180
+ rggb = bayer.reshape([h//2, 2, w//2, 2]).transpose([0, 2, 1, 3]).reshape([h//2, w//2, 4])
181
+ return rggb
182
+
183
+ def easydemoisac(bayer):
184
+ if len(bayer.shape)==3:
185
+ bayer = rggb2bayer(bayer)
186
+ h,w = bayer.shape
187
+ rgb = np.zeros([h//2, w//2, 3], bayer.dtype)
188
+ rgb[:, :, 0] = bayer[::2, ::2]
189
+ rgb[:, :, 1] = (bayer[::2, 1::2]+bayer[1::2, ::2])*0.5
190
+ rgb[:, :, 2] = bayer[1::2, 1::2]
191
+ rgb = cv2.GaussianBlur(rgb, (3, 3), 0)
192
+ return rgb
193
+
194
+ def easyremosaic(rgb):
195
+ bayer = np.zeros((rgb.shape[0], rgb.shape[1]))
196
+ bayer[0::2, 0::2] = rgb[0::2, 0::2, 0]
197
+ bayer[0::2, 1::2] = rgb[0::2, 1::2, 1]
198
+ bayer[1::2, 0::2] = rgb[1::2, 0::2, 1]
199
+ bayer[1::2, 1::2] = rgb[1::2, 1::2, 2]
200
+ return bayer
utils/isp_ops.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .utils import *
2
+
3
+ def read_wb_ccm(raw):
4
+ wb = np.array(raw.camera_whitebalance)
5
+ wb /= wb[1]
6
+ wb = wb.astype(np.float32)
7
+ ccm = raw.color_matrix[:3, :3].astype(np.float32)
8
+ if ccm[0,0] == 0:
9
+ ccm = np.eye(3, dtype=np.float32)
10
+ return wb, ccm
11
+
12
+ def get_ISO_ExposureTime(filepath):
13
+ # 不限于RAW,RGB图片也适用
14
+ raw_file = open(filepath, 'rb')
15
+ exif_file = exifread.process_file(raw_file, details=False, strict=True)
16
+ # 获取曝光时间
17
+ if 'EXIF ExposureTime' in exif_file:
18
+ exposure_str = exif_file['EXIF ExposureTime'].printable
19
+ else:
20
+ exposure_str = exif_file['Image ExposureTime'].printable
21
+ if '/' in exposure_str:
22
+ fenmu = float(exposure_str.split('/')[0])
23
+ fenzi = float(exposure_str.split('/')[-1])
24
+ exposure = fenmu / fenzi
25
+ else:
26
+ exposure = float(exposure_str)
27
+ # 获取ISO
28
+ if 'EXIF ISOSpeedRatings' in exif_file:
29
+ ISO_str = exif_file['EXIF ISOSpeedRatings'].printable
30
+ else:
31
+ ISO_str = exif_file['Image ISOSpeedRatings'].printable
32
+ if '/' in ISO_str:
33
+ fenmu = float(ISO_str.split('/')[0])
34
+ fenzi = float(ISO_str.split('/')[-1])
35
+ ISO = fenmu / fenzi
36
+ else:
37
+ ISO = float(ISO_str)
38
+ info = {'ISO':int(ISO), 'ExposureTime':exposure, 'name':filepath.split('/')[-1]}
39
+ return info
40
+
41
+ def metainfo(rawpath):
42
+ with open(rawpath, 'rb') as f:
43
+ tags = exifread.process_file(f)
44
+ _, suffix = os.path.splitext(os.path.basename(rawpath))
45
+
46
+ if suffix == '.dng':
47
+ expo = eval(str(tags['Image ExposureTime']))
48
+ iso = eval(str(tags['Image ISOSpeedRatings']))
49
+ else:
50
+ expo = eval(str(tags['EXIF ExposureTime']))
51
+ iso = eval(str(tags['EXIF ISOSpeedRatings']))
52
+
53
+ # print('ISO: {}, ExposureTime: {}'.format(iso, expo))
54
+ return iso, expo
55
+
56
+ # Yuzhi Wang's ISP
57
+ def bayer2rggb(bayer):
58
+ H, W = bayer.shape
59
+ return bayer.reshape(H//2, 2, W//2, 2).transpose(0, 2, 1, 3).reshape(H//2, W//2, 4)
60
+
61
+ def rggb2bayer(rggb):
62
+ H, W, _ = rggb.shape
63
+ return rggb.reshape(H, W, 2, 2).transpose(0, 2, 1, 3).reshape(H*2, W*2)
64
+
65
+ def bayer2rggbs(bayers):
66
+ H, W = bayers.shape[-2:]
67
+ return bayers.reshape(-1, H//2, 2, W//2, 2).permute(0, 1, 3, 2, 4).reshape(-1, H//2, W//2, 4)
68
+
69
+ def rggb2bayers(rggbs):
70
+ H, W, _ = rggbs.shape[-3:]
71
+ return rggbs.reshape(-1, H, W, 2, 2).permute(0, 1, 3, 2, 4).reshape(-1, H*2, W*2)
72
+
73
+ def bayer2rows(bayer):
74
+ # 分行
75
+ H, W = bayer.shape
76
+ return np.stack((bayer[0:H:2], bayer[1:H:2]))
77
+
78
+ def bayer2gray(raw):
79
+ # 相当于双线性插值的bayer2gray
80
+ kernel = np.array([[1,2,1],[2,4,2],[1,2,1]], np.float32) / 16.
81
+ gray = cv2.filter2D(raw, -1, kernel, borderType=cv2.BORDER_REFLECT)
82
+ return gray
83
+
84
+ def rows2bayer(rows):
85
+ c, H, W = rows.shape
86
+ bayer = np.empty((H*2, W))
87
+ bayer[0:H*2:2] = rows[0]
88
+ bayer[1:H*2:2] = rows[1]
89
+ return bayer
90
+
91
+ # Kaixuan Wei's ISP
92
+ def raw2bayer(raw, wp=1023, bl=64, norm=True, clip=False, bias=np.array([0,0,0,0])):
93
+ raw = raw.astype(np.float32)
94
+ H, W = raw.shape
95
+ out = np.stack((raw[0:H:2, 0:W:2], #RGBG
96
+ raw[0:H:2, 1:W:2],
97
+ raw[1:H:2, 1:W:2],
98
+ raw[1:H:2, 0:W:2]), axis=0).astype(np.float32)
99
+ if norm:
100
+ bl = bias + bl
101
+ bl = bl.reshape(4, 1, 1)
102
+ out = (out - bl) / (wp - bl)
103
+ if clip: out = np.clip(out, 0, 1)
104
+ return out.astype(np.float32)
105
+
106
+ def bayer2raw(packed_raw, wp=16383, bl=512):
107
+ if torch.is_tensor(packed_raw):
108
+ packed_raw = packed_raw.detach()
109
+ packed_raw = packed_raw[0].cpu().float().numpy()
110
+ packed_raw = np.clip(packed_raw, 0, 1)
111
+ packed_raw = packed_raw * (wp - bl) + bl
112
+ C, H, W = packed_raw.shape
113
+ H *= 2
114
+ W *= 2
115
+ raw = np.empty((H, W), dtype=np.uint16)
116
+ raw[0:H:2, 0:W:2] = packed_raw[0, :,:]
117
+ raw[0:H:2, 1:W:2] = packed_raw[1, :,:]
118
+ raw[1:H:2, 1:W:2] = packed_raw[2, :,:]
119
+ raw[1:H:2, 0:W:2] = packed_raw[3, :,:]
120
+ return raw
121
+
122
+ # Hansen Feng's ISP
123
+ def repair_bad_pixels(raw, bad_points, method='median'):
124
+ fixed_raw = bayer2rggb(raw)
125
+ for i in range(4):
126
+ fixed_raw[:,:,i] = cv2.medianBlur(fixed_raw[:,:,i],3)
127
+ fixed_raw = rggb2bayer(fixed_raw)
128
+ # raw = (1-bpc_map) * raw + bpc_map * fixed_raw
129
+ for p in bad_points:
130
+ raw[p[0],p[1]] = fixed_raw[p[0],p[1]]
131
+ return raw
132
+
133
+ def SimpleISP(raw, bl=0, wp=1, wb=[2,1,1,2], gamma=2.2):
134
+ # rggb2RGB (SimpleISP)
135
+ raw = (raw.astype(np.float32) - bl) / (wp-bl)
136
+ wb = np.array(wb)
137
+ raw = raw * wb.reshape(1,1,-1)
138
+ raw = raw.clip(0, 1)[:,:,(0,1,3)]
139
+ raw = raw ** (1/gamma)
140
+ return raw
141
+
142
+ def FastISP(img4c, wb=None, ccm=None, gamma=2.2, low_mem=True):
143
+ # rgbg2RGB (FastISP)
144
+ if torch.is_tensor(img4c):
145
+ img4c = img4c[0].detach().cpu().numpy()
146
+ h,w = img4c.shape[:2]
147
+ H = h * 2
148
+ W = w * 2
149
+ raw = np.zeros((H,W), np.float32)
150
+ red_gain = wb[0] if wb is not None else 2
151
+ blue_gain = wb[2] if wb is not None else 2
152
+ raw[0:H:2,0:W:2] = img4c[:,:,0] * red_gain # R
153
+ raw[0:H:2,1:W:2] = img4c[:,:,1] # G1
154
+ raw[1:H:2,0:W:2] = img4c[:,:,2] # G2
155
+ raw[1:H:2,1:W:2] = img4c[:,:,3] * blue_gain # B
156
+ raw = np.clip(raw, 0, 1)
157
+ white_point = 16383
158
+ raw = raw * white_point
159
+ img = cv2.cvtColor(raw.astype(np.uint16), cv2.COLOR_BAYER_BG2RGB_EA) / white_point
160
+ if ccm is None: ccm = np.eye(3, dtype=np.float32)
161
+ img = img[:, :, None, :].astype(np.float32)
162
+ ccm = ccm[None, None, :, :].astype(np.float32)
163
+ if low_mem:
164
+ n = 8
165
+ h = img.shape[0] // n
166
+ img_ccm = img.copy()
167
+ img = img[:,:,0]
168
+ for i in range(n):
169
+ img[h*i:h*(i+1)] = np.sum(img_ccm[h*i:h*(i+1)] * ccm, axis=-1)
170
+ img[h*n-h:] = np.sum(img_ccm[h*n-h:] * ccm, axis=-1)
171
+ else:
172
+ img = np.sum(img * ccm, axis=-1)
173
+ img = np.clip(img, 0, 1) ** (1/gamma)
174
+ return img
175
+
176
+ def raw2rgb_rawpy(packed_raw, wb=None, ccm=None, raw=None):
177
+ """Raw2RGB pipeline (rawpy postprocess version)"""
178
+ if raw is None:
179
+ if packed_raw.shape[-2] > 1500:
180
+ raw = rawpy.imread('templet.dng')
181
+ wp = 1023
182
+ bl = 64
183
+ else:
184
+ raw = rawpy.imread('templet.ARW')
185
+ wp = 16383
186
+ bl = 512
187
+ if wb is None:
188
+ wb = np.array(raw.camera_whitebalance)
189
+ wb /= wb[1]
190
+ wb = list(wb)
191
+ if ccm is None:
192
+ try:
193
+ ccm = raw.rgb_camera_matrix[:3, :3]
194
+ except:
195
+ warnings.warn("You have no Wei Kaixuan's customized rawpy, you can't get right ccm of SonyA7S2...")
196
+ ccm = raw.color_matrix[:3, :3]
197
+ elif np.max(np.abs(ccm - np.identity(3))) == 0:
198
+ ccm = np.array([[ 1.9712269,-0.6789218,-0.29230508],
199
+ [-0.29104823,1.748401,-0.45735288],
200
+ [ 0.02051281,-0.5380369,1.5175241 ]], dtype=np.float32)
201
+
202
+ if len(packed_raw.shape) >= 3:
203
+ raw.raw_image_visible[:] = bayer2raw(packed_raw, wp, bl)
204
+ else: # 传进来的就是raw图
205
+ raw.raw_image_visible[:] = packed_raw
206
+
207
+ out = raw.postprocess(use_camera_wb=False, user_wb=wb, half_size=False, no_auto_bright=True,
208
+ output_bps=8, bright=1, user_black=None, user_sat=None)
209
+ return out
210
+
211
+ def mask_pos_init(seq=7, max_speed=20, patch_size=512, crop_size=256):
212
+ ps, cs = patch_size, crop_size
213
+ pos = np.zeros((seq, 2), np.int16)
214
+ v = np.random.randint(2*max_speed+1, size=2) - max_speed
215
+ max_a = (max_speed // 4) * 2 // 2 + 1
216
+ # 模拟运动
217
+ for i in range(1, seq):
218
+ pxmin, pymin = pos.min(axis=0)
219
+ pxmax, pymax = pos.max(axis=0)
220
+ # 保证不越界
221
+ if pxmax - pxmin >= ps - cs: v[0] = -v[0]
222
+ if pymax - pymin >= ps - cs: v[1] = -v[1]
223
+ pos[i] = pos[i-1] + v
224
+ a = np.random.randint(2*max_a+1, size=2) - max_a
225
+ v = (v + a).clip(-max_speed, max_speed)
226
+
227
+ pxmin, pymin = pos.min(axis=0)
228
+ pxmax, pymax = pos.max(axis=0)
229
+ # 保证不越界
230
+ if pxmax - pxmin >= ps - cs:
231
+ pos[:,0] = np.int16(pos[:,0] / (pxmax-pxmin+1))
232
+ if pymax - pymin >= ps - cs:
233
+ pos[:,1] = np.int16(pos[:,1] / (pymax-pymin+1))
234
+ pxmin, pymin = pos.min(axis=0)
235
+ pxmax, pymax = pos.max(axis=0)
236
+ pxstart = np.random.randint(-pxmin, ps - cs - pxmax + 1)
237
+ pystart = np.random.randint(-pymin, ps - cs - pymax + 1)
238
+ pos[:,0] += pxstart
239
+ pos[:,1] += pystart
240
+ return pos
241
+
242
+ def generate_gradient_square(sx, sy, min_val_range=(0.1, 0.9), max_val_range=(0.1, 0.9), color_aug=0.5, device='cuda'):
243
+ """
244
+ 生成一个三通道的渐变正方形图像,min_val和max_val在指定范围内随机选取,
245
+ 横轴和纵轴的渐变会根据随机选择的函数进行调整,每个通道随机缩放。
246
+
247
+ 参数:
248
+ sx (int): 正方形的宽度。
249
+ sy (int): 正方形的高度。
250
+ min_val_range (tuple): 最小值的范围,默认为(0.1, 0.9)。
251
+ max_val_range (tuple): 最大值的范围,默认为(0.1, 0.9)。
252
+ color_aug: 随机色彩aug的概率,需要在(0, 1)之间
253
+ device (str): 设备类型,可以是 'cuda' 或 'cpu'。
254
+
255
+ 返回:
256
+ torch.Tensor: 生成的三通道渐变正方形图像。(sx, sy, 3)
257
+ """
258
+ # 随机选择最小值和最大值
259
+ min_val = random.uniform(*min_val_range)
260
+ max_val = random.uniform(max(min_val, max_val_range[0]), max_val_range[1])
261
+
262
+ # 创建横轴和纵轴的线性渐变(从0到1)
263
+ x = torch.linspace(0, 1, steps=sx, device=device, dtype=torch.float32)
264
+ y = torch.linspace(0, 1, steps=sy, device=device, dtype=torch.float32)
265
+
266
+ # 随机选择函数和参数
267
+ functions = [
268
+ lambda t: t.flip(0) if random.choice([True, False]) else t,
269
+ lambda t: torch.sin(2 * torch.pi * random.uniform(0.25, 4) * t) / 2 + 0.5,
270
+ lambda t: t ** random.uniform(0.25, 4)
271
+ ]
272
+ x_func = random.choice(functions)
273
+ y_func = random.choice(functions)
274
+
275
+ # 应用随机函数
276
+ x_grad = x_func(x)
277
+ y_grad = y_func(y)
278
+
279
+ # 将渐变扩展成二维矩阵
280
+ x_matrix = x_grad.unsqueeze(0).expand(sy, sx)
281
+ y_matrix = y_grad.unsqueeze(1).expand(sy, sx)
282
+
283
+ # 将横轴和纵轴渐变相乘
284
+ combined_grad = x_matrix * y_matrix
285
+
286
+ # 扩展为三通道
287
+ grad_3c = combined_grad.unsqueeze(0).expand(3, sy, sx)
288
+
289
+ # 随机对每个通道进行缩放(0.5)
290
+ color_scales = torch.tensor([random.uniform(0, 1) for _ in range(3)], device=device, dtype=torch.float32).view(3, 1, 1)
291
+ grad_3c = grad_3c * color_scales / color_scales.max() if random.uniform(0, 1) < color_aug else grad_3c
292
+
293
+ # 缩放到指定的最小值和最大值(默认原范围为0~1)
294
+ scaled_grad = min_val + (max_val - min_val) * grad_3c
295
+
296
+ return scaled_grad
utils/sidd_utils.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .isp_ops import *
2
+
3
+ def read_metadata(metadata):
4
+ meta = metadata['metadata'][0, 0]
5
+ beta1, beta2 = meta['UnknownTags'][7, 0][2][0][0:2]
6
+ cam = get_cam(meta)
7
+ bayer_pattern = get_bayer_pattern(meta)
8
+ # We found that the correct Bayer pattern is GBRG in S6
9
+ if cam == 'S6':
10
+ bayer_pattern = [1, 2, 0, 1]
11
+ bayer_2by2 = (np.asarray(bayer_pattern) + 1).reshape((2, 2)).tolist()
12
+ wb = get_wb(meta)
13
+ cst1, cst2 = get_csts(meta) # use cst2 for rendering
14
+ iso = get_iso(meta)
15
+ metadata = {
16
+ 'meta': meta, 'beta1': beta1, 'beta2': beta2,
17
+ 'bayer_2by2': bayer_2by2, 'wb':wb, 'cst2':cst2,
18
+ 'iso':iso, 'cam':cam
19
+ }
20
+ return metadata
21
+
22
+ def get_iso(metadata):
23
+ try:
24
+ iso = metadata['ISOSpeedRatings'][0][0]
25
+ except:
26
+ try:
27
+ iso = metadata['DigitalCamera'][0, 0]['ISOSpeedRatings'][0][0]
28
+ except:
29
+ raise Exception('ISO not found.')
30
+ return iso
31
+
32
+
33
+ def get_cam(metadata):
34
+ model = metadata['Make'][0]
35
+ cam_dict = {'Apple': 'IP', 'Google': 'GP', 'samsung': 'S6', 'motorola': 'N6', 'LGE': 'G4'}
36
+ return cam_dict[model]
37
+
38
+
39
+ def get_bayer_pattern(metadata):
40
+ bayer_id = 33422
41
+ bayer_tag_idx = 1
42
+ try:
43
+ unknown_tags = metadata['UnknownTags']
44
+ if unknown_tags[bayer_tag_idx]['ID'][0][0][0] == bayer_id:
45
+ bayer_pattern = unknown_tags[bayer_tag_idx]['Value'][0][0]
46
+ else:
47
+ raise Exception
48
+ except:
49
+ try:
50
+ unknown_tags = metadata['SubIFDs'][0, 0]['UnknownTags'][0, 0]
51
+ if unknown_tags[bayer_tag_idx]['ID'][0][0][0] == bayer_id:
52
+ bayer_pattern = unknown_tags[bayer_tag_idx]['Value'][0][0]
53
+ else:
54
+ raise Exception
55
+ except:
56
+ try:
57
+ unknown_tags = metadata['SubIFDs'][0, 1]['UnknownTags']
58
+ if unknown_tags[1]['ID'][0][0][0] == bayer_id:
59
+ bayer_pattern = unknown_tags[bayer_tag_idx]['Value'][0][0]
60
+ else:
61
+ raise Exception
62
+ except:
63
+ print('Bayer pattern not found. Assuming RGGB.')
64
+ bayer_pattern = [1, 2, 2, 3]
65
+ return bayer_pattern
66
+
67
+
68
+ def get_wb(metadata):
69
+ return metadata['AsShotNeutral']
70
+
71
+
72
+ def get_csts(metadata):
73
+ return metadata['ColorMatrix1'].reshape((3, 3)), metadata['ColorMatrix2'].reshape((3, 3))
74
+
75
+
76
+ def toTensor(patch, cam):
77
+ # Convert Bayer into BGGR
78
+ if cam == 'IP': # RGGB
79
+ patch = np.rot90(patch, 2)
80
+ elif cam == 'S6': # GBRG
81
+ patch = np.flip(patch, axis=1)
82
+ else: # GP, N6, G4: BGGR
83
+ patch = patch
84
+ # Space to depth
85
+ patch = space_to_depth(np.expand_dims(patch, axis=-1))
86
+ # Add the batch size channel
87
+ patch = np.expand_dims(patch, 0)
88
+ # To tensor
89
+ tensor = torch.from_numpy(patch.transpose((0, 3, 1, 2))).float()
90
+
91
+ return tensor
92
+
93
+
94
+ def toPatch(tensor, cam):
95
+ # To numpy
96
+ patch = tensor.cpu().detach().numpy()
97
+ # Depth to space and squeeze the batch size channel
98
+ patch = np.squeeze(depth_to_space(np.transpose(patch[0],(1,2,0))), axis=2)
99
+ # Conver back to original Bayer
100
+ if cam == 'IP': # BGGR
101
+ patch = np.rot90(patch, 2)
102
+ elif cam == 'S6': # GBRG
103
+ patch = np.flip(patch, axis=1)
104
+ else: # GP, N6, G4: BGGR
105
+ patch = patch
106
+
107
+ return patch
108
+
109
+ def toTensor_nf(patch, cam):
110
+ # Convert Bayer into RGGB
111
+ if cam == 'IP': # RGGB
112
+ patch = patch
113
+ elif cam == 'S6': # GBRG
114
+ patch = np.rot90(patch, 3)
115
+ else: # GP, N6, G4: BGGR
116
+ patch = np.rot90(patch, 2)
117
+ # Space to depth
118
+ patch = space_to_depth(np.expand_dims(patch, axis=-1))
119
+ # Add the batch size channel
120
+ patch = np.expand_dims(patch, 0)
121
+
122
+ return patch
123
+
124
+ def toPatch_nf(patch, cam):
125
+ # Depth to space and squeeze the batch size channel
126
+ patch = np.squeeze(depth_to_space(patch[0]), axis=2)
127
+ # Conver back to original Bayer
128
+ if cam == 'IP': # RGGB
129
+ patch = patch
130
+ elif cam == 'S6': # GBRG
131
+ patch = np.rot90(patch, 1)
132
+ else: # GP, N6, G4: BGGR
133
+ patch = np.rot90(patch, 2)
134
+
135
+ return patch
136
+
137
+ def space_to_depth(x, block_size=2):
138
+ x = np.asarray(x)
139
+ height, width, depth = x.shape
140
+ reduced_height = height // block_size
141
+ reduced_width = width // block_size
142
+ y = x.reshape(reduced_height, block_size, reduced_width, block_size, depth)
143
+ z = np.swapaxes(y, 1, 2).reshape(reduced_height, reduced_width, -1)
144
+ return z
145
+
146
+ def depth_to_space(x, block_size=2):
147
+ x = np.asarray(x)
148
+ height, width, _ = x.shape
149
+ increased_height = height * block_size
150
+ increased_width = width * block_size
151
+ y = x.reshape(height, width, block_size, block_size, -1)
152
+ z = np.swapaxes(y, 1, 2).reshape(increased_height, increased_width, -1)
153
+ return z
154
+
155
+
156
+ def process_sidd_image(image, bayer_pattern, wb, cst, *, save_file_rgb=None):
157
+ """Simple processing pipeline"""
158
+ image = image.clip(0, 1)
159
+ image = flip_bayer(image, bayer_pattern)
160
+ image = stack_rggb_channels(image)
161
+ rgb2xyz = np.array(
162
+ [
163
+ [0.4124564, 0.3575761, 0.1804375],
164
+ [0.2126729, 0.7151522, 0.0721750],
165
+ [0.0193339, 0.1191920, 0.9503041],
166
+ ]
167
+ )
168
+ rgb2cam = np.matmul(cst, rgb2xyz)
169
+ cam2rgb = np.linalg.inv(rgb2cam)
170
+ cam2rgb = cam2rgb / np.sum(cam2rgb, axis=-1, keepdims=True)
171
+ image_srgb = process(image, 1 / wb[0][0], 1 / wb[0][1], 1 / wb[0][2], cam2rgb)
172
+ image_srgb = swap_channels(image_srgb)
173
+ image_srgb = image_srgb * 255.0
174
+ image_srgb = image_srgb.astype(np.uint8)
175
+
176
+ if save_file_rgb:
177
+ # Save
178
+ cv2.imwrite(save_file_rgb, image_srgb)
179
+
180
+ return image_srgb
181
+
182
+ def flip_bayer(image, bayer_pattern):
183
+ if (bayer_pattern == [[1, 2], [2, 3]]):
184
+ pass
185
+ elif (bayer_pattern == [[2, 1], [3, 2]]):
186
+ image = np.fliplr(image)
187
+ elif (bayer_pattern == [[2, 3], [1, 2]]):
188
+ image = np.flipud(image)
189
+ elif (bayer_pattern == [[3, 2], [2, 1]]):
190
+ image = np.fliplr(image)
191
+ image = np.flipud(image)
192
+ else:
193
+ import pdb
194
+ pdb.set_trace()
195
+ print('Unknown Bayer pattern.')
196
+ return image
197
+
198
+ def rot_bayer(image, bayer_pattern, rev=False, axis=(-2, -1)):
199
+ if (bayer_pattern == [[1, 2], [2, 3]]):
200
+ k = 0
201
+ elif (bayer_pattern == [[2, 1], [3, 2]]):
202
+ k = 3
203
+ elif (bayer_pattern == [[2, 3], [1, 2]]):
204
+ k = 1
205
+ elif (bayer_pattern == [[3, 2], [2, 1]]):
206
+ k = 2
207
+ else:
208
+ import pdb
209
+ pdb.set_trace()
210
+ print('Unknown Bayer pattern.')
211
+ if rev: k = (4-k) % 4
212
+ image = np.rot90(image, k=k, axes=axis)
213
+ return image
214
+
215
+ def stack_rggb_channels(raw_image):
216
+ """Stack the four RGGB channels of a Bayer raw image along a third dimension"""
217
+ height, width = raw_image.shape
218
+ channels = []
219
+ for yy in range(2):
220
+ for xx in range(2):
221
+ raw_image_c = raw_image[yy:height:2, xx:width:2].copy()
222
+ channels.append(raw_image_c)
223
+ channels = np.stack(channels, axis=-1)
224
+ return channels
225
+
226
+ def swap_channels(image):
227
+ """Swap the order of channels: RGB --> BGR"""
228
+ h, w, c = image.shape
229
+ image1 = np.zeros(image.shape)
230
+ for i in range(c):
231
+ image1[:, :, i] = image[:, :, c - i - 1]
232
+ return image1
233
+
234
+ def RGGB2Bayer(im):
235
+ # convert RGGB stacked image to one channel Bayer
236
+ bayer = np.zeros((im.shape[0] * 2, im.shape[1] * 2))
237
+ bayer[0::2, 0::2] = im[:, :, 0]
238
+ bayer[0::2, 1::2] = im[:, :, 1]
239
+ bayer[1::2, 0::2] = im[:, :, 2]
240
+ bayer[1::2, 1::2] = im[:, :, 3]
241
+ return bayer
242
+
243
+ def demosaic_CV2(rggb_channels_stack):
244
+ # using opencv demosaic
245
+ bayer = RGGB2Bayer(rggb_channels_stack)
246
+ dem = cv2.cvtColor(np.clip(bayer * 16383, 0, 16383).astype(dtype=np.uint16), cv2.COLOR_BayerBG2RGB_EA)
247
+ dem = dem.astype(dtype=np.float32) / 16383
248
+ return dem
249
+
250
+ def apply_gains(bayer_image, red_gains, green_gains, blue_gains):
251
+ gains = np.stack([red_gains, green_gains, green_gains, blue_gains], axis=-1)
252
+ gains = gains[np.newaxis, np.newaxis, :]
253
+ return bayer_image * gains
254
+
255
+ def demosaic_simple(rggb_channels_stack):
256
+ channels_rgb = rggb_channels_stack[:, :, :3]
257
+ channels_rgb[:, :, 0] = channels_rgb[:, :, 0]
258
+ channels_rgb[:, :, 1] = np.mean(rggb_channels_stack[:, :, 1:3], axis=2)
259
+ channels_rgb[:, :, 2] = rggb_channels_stack[:, :, 3]
260
+ return channels_rgb
261
+
262
+ def apply_ccm(image, ccm):
263
+ images = image[:, :, np.newaxis, :]
264
+ ccms = ccm[np.newaxis, np.newaxis, :, :]
265
+ return np.sum(images * ccms, axis=-1)
266
+
267
+ def gamma_compression(images, gamma=2.2):
268
+ return np.maximum(images, 1e-8) ** (1.0 / gamma)
269
+
270
+ def process(bayer_images, red_gains, green_gains, blue_gains, cam2rgbs):
271
+ bayer_images = apply_gains(bayer_images, red_gains, green_gains, blue_gains)
272
+ bayer_images = np.clip(bayer_images, 0.0, 1.0)
273
+ images = demosaic_CV2(bayer_images)
274
+ images = apply_ccm(images, cam2rgbs)
275
+ images = np.clip(images, 0.0, 1.0)
276
+ images = gamma_compression(images)
277
+ return images
278
+
279
+
280
+ def get_histogram(data, bin_edges=None, left_edge=0.0, right_edge=1.0, n_bins=1000):
281
+ data_range = right_edge - left_edge
282
+ bin_width = data_range / n_bins
283
+ if bin_edges is None:
284
+ bin_edges = np.arange(left_edge, right_edge + bin_width, bin_width)
285
+ bin_centers = bin_edges[:-1] + (bin_width / 2.0)
286
+ n = np.prod(data.shape)
287
+ hist, _ = np.histogram(data, bin_edges)
288
+ return hist / n, bin_centers
289
+
290
+ def cal_kld(p_data, q_data, left_edge=0.0, right_edge=1.0, n_bins=1000):
291
+ """Returns forward, inverse, and symmetric KL divergence between two sets of data points p and q"""
292
+ bw = 0.2 / 64
293
+ bin_edges = np.concatenate(([-1000.0], np.arange(-0.1, 0.1 + 1e-9, bw), [1000.0]), axis=0)
294
+ p, _ = get_histogram(p_data, bin_edges, left_edge, right_edge, n_bins)
295
+ q, _ = get_histogram(q_data, bin_edges, left_edge, right_edge, n_bins)
296
+ idx = (p > 0) & (q > 0)
297
+ p = p[idx]
298
+ q = q[idx]
299
+ logp = np.log(p)
300
+ logq = np.log(q)
301
+ kl_fwd = np.sum(p * (logp - logq))
302
+ kl_inv = np.sum(q * (logq - logp))
303
+ kl_sym = (kl_fwd + kl_inv) / 2.0
304
+ return kl_fwd #, kl_inv, kl_sym
utils/utils.py ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ # os.environ['OPENBLAS_WARNINGS'] = '0'
3
+ # os.environ["OMP_NUM_THREADS"] = "1"
4
+ # os.environ["MKL_NUM_THREADS"] = "1"
5
+ import cv2
6
+ cv2.setNumThreads(0)
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ import torch.distributed as dist
11
+ from torch.utils.data import Dataset, DataLoader, DistributedSampler
12
+ import torch.multiprocessing as mp
13
+ from torch.nn.parallel import DistributedDataParallel as DDP
14
+ from torch.optim.lr_scheduler import *
15
+ import glob
16
+ import matplotlib
17
+ # matplotlib.use('AGG')
18
+ import matplotlib.pyplot as plt
19
+ import numpy as np
20
+ import gc
21
+ from PIL import Image
22
+ import time
23
+ import socket
24
+ import scipy
25
+ import scipy.io as sio
26
+ from scipy import stats
27
+ import argparse
28
+ from skimage.metrics import peak_signal_noise_ratio as compare_psnr
29
+ from skimage.metrics import structural_similarity as compare_ssim
30
+ from multiprocessing import Pool
31
+ from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
32
+ import threading
33
+ from functools import wraps
34
+ from tqdm import tqdm
35
+ import exifread
36
+ import rawpy
37
+ import math
38
+ import random
39
+ import yaml
40
+ import pickle
41
+ import warnings
42
+ import h5py
43
+ import pickle
44
+ import pickle as pkl
45
+ from natsort import natsort
46
+ import scipy.io
47
+ from scipy.stats import poisson, norm
48
+ from scipy.signal import convolve
49
+ from scipy.interpolate import interp1d
50
+ import warnings
51
+ import kornia.filters as kf
52
+ from natsort import natsorted
53
+
54
+ def setup_seed(seed):
55
+ torch.manual_seed(seed)
56
+ torch.cuda.manual_seed_all(seed)
57
+ np.random.seed(seed)
58
+ random.seed(seed)
59
+ torch.backends.cudnn.deterministic = True
60
+ setup_seed(1997)
61
+
62
+ fn_time = {}
63
+
64
+ def timestamp(time_points, n):
65
+ time_points[n] = time.time()
66
+ return time_points[n] - time_points[n-1]
67
+
68
+ def fn_timer(function, print_log=False):
69
+ @wraps(function)
70
+ def function_timer(*args, **kwargs):
71
+ global fn_timer
72
+ t0 = time.time()
73
+ result = function(*args, **kwargs)
74
+ t1 = time.time()
75
+ if print_log:
76
+ print ("Total time running %s: %.6f seconds" %
77
+ (function.__name__, t1-t0))
78
+ if function.__name__ in fn_time :
79
+ fn_time[function.__name__] += t1-t0
80
+ else:
81
+ fn_time[function.__name__] = t1-t0
82
+ return result
83
+ return function_timer
84
+
85
+ def log(string, log=None, str=False, end='\n', notime=False):
86
+ log_string = f'{time.strftime("%Y-%m-%d %H:%M:%S")} >> {string}' if not notime else string
87
+ print(log_string)
88
+ if log is not None:
89
+ with open(log,'a+') as f:
90
+ f.write(log_string+'\n')
91
+ else:
92
+ pass
93
+ # os.makedirs('worklog', exist_ok=True)
94
+ # log = f'worklog/worklog-{time.strftime("%Y-%m-%d")}.txt'
95
+ # with open(log,'a+') as f:
96
+ # f.write(log_string+'\n')
97
+ if str:
98
+ return string+end
99
+
100
+ class AverageMeter(object):
101
+ """Computes and stores the average and current value"""
102
+ def __init__(self, name, fmt=':f', log=True, last_epoch=0):
103
+ self.name = name
104
+ self.fmt = fmt
105
+ self.log = log
106
+ self.history = []
107
+ self.last_epoch = last_epoch
108
+ self.history_init_flag = False
109
+ self.reset()
110
+
111
+ def reset(self):
112
+ if self.log:
113
+ try:
114
+ if self.avg>0: self.history.append(self.avg)
115
+ except:
116
+ pass#print(f'Start log {self.name}!')
117
+ self.val = 0
118
+ self.avg = 0
119
+ self.sum = 0
120
+ self.count = 0
121
+
122
+ def update(self, val, n=1):
123
+ self.val = val
124
+ self.sum += val * n
125
+ self.count += n
126
+ self.avg = self.sum / self.count
127
+
128
+ def plot_history(self, savefile='log.jpg', logfile='log.pkl'):
129
+ # 读取老log
130
+ if os.path.exists(logfile) and not self.history_init_flag:
131
+ self.history_init_flag = True
132
+ with open(logfile, 'rb') as f:
133
+ history_old = pickle.load(f)
134
+ if self.last_epoch: # 为0则重置
135
+ self.history = history_old + self.history[:self.last_epoch]
136
+ # 记录log
137
+ with open(logfile, 'wb') as f:
138
+ pickle.dump(self.history, f)
139
+ # 画图
140
+ plt.figure(figsize=(12,9))
141
+ plt.title(f'{self.name} log')
142
+ x = list(range(len(self.history)))
143
+ plt.plot(x, self.history)
144
+ plt.xlabel('Epoch')
145
+ plt.ylabel(self.name)
146
+ plt.savefig(savefile, bbox_inches='tight')
147
+ plt.close()
148
+
149
+ def __str__(self):
150
+ fmtstr = '{name}:{val' + self.fmt + '}({avg' + self.fmt + '})'
151
+ return fmtstr.format(**self.__dict__)
152
+
153
+ def pkl_convert(param):
154
+ return {
155
+ k.replace("module.", ""): v
156
+ for k, v in param.items()
157
+ if "module." in k
158
+ }
159
+
160
+ def load_weights(model, pretrained_dict, multi_gpu=False, by_name=False):
161
+ model_dict = model.module.state_dict() if multi_gpu else model.state_dict()
162
+ # 1. filter out unnecessary keys
163
+ tsm_replace = []
164
+ for k in pretrained_dict:
165
+ if 'tsm_shift' in k:
166
+ k_new = k.replace('tsm_shift', 'tsm_buffer')
167
+ tsm_replace.append((k, k_new))
168
+ for k, k_new in tsm_replace:
169
+ pretrained_dict[k_new] = pretrained_dict[k]
170
+ if by_name:
171
+ del_list = []
172
+ for k, v in pretrained_dict.items():
173
+ if k in model_dict:
174
+ if model_dict[k].shape != pretrained_dict[k].shape:
175
+ # 1. Delete values not in key
176
+ del_list.append(k)
177
+ # 2. Cat it to the end
178
+ # diff = model_dict[k].size()[1] - pretrained_dict[k].size()[1]
179
+ # v = torch.cat((v, v[:,:diff]), dim=1)
180
+ # 3. Repeat it to same
181
+ # nframe = model_dict[k].shape[1] // pretrained_dict[k].shape[1]
182
+ # v = torch.repeat_interleave(v, nframe, dim=1)
183
+ # 4. Clip it to same
184
+ # c_model = model_dict[k].shape[1]
185
+ # c_save = pretrained_dict[k].shape[1]
186
+ # c_diff = c_model - c_save
187
+ # if c_model > c_save:
188
+ # v = torch.cat((v, torch.empty(b_model, c_diff, h_model, w_model).cuda()), dim=1)
189
+ # else:
190
+ # v = v[:,:c_diff]
191
+ log(f'Warning: "{k}":{pretrained_dict[k].shape}->{model_dict[k].shape}')
192
+ pretrained_dict[k] = v
193
+ else:
194
+ del_list.append(k)
195
+ log(f'Warning: "{k}" is not exist and has been deleted!!')
196
+ for k in del_list:
197
+ del pretrained_dict[k]
198
+ # 2. overwrite entries in the existing state dict
199
+ model_dict.update(pretrained_dict)
200
+ if multi_gpu:
201
+ model.module.load_state_dict(model_dict)
202
+ else:
203
+ model.load_state_dict(model_dict)
204
+ # for name, parameter in model.named_parameters():
205
+ # if name not in ["input_blocks.0.0.weight", "out.2.weight", "out.2.bias"]:
206
+ # # if name not in del_list:
207
+ # parameter.requires_grad = False
208
+ # log(f'Warning: layer except "{name}" is freezed...')
209
+ return model
210
+
211
+ def tensor_dimxto4(tensor):
212
+ c, h, w = tensor.shape[-3:]
213
+ tensor = tensor.reshape(-1, c, h, w)
214
+ return tensor
215
+
216
+ def tensor_dimxto5(tensor):
217
+ t, c, h, w = tensor.shape[-4:]
218
+ tensor = tensor.reshape(-1, t, c, h, w)
219
+ return tensor
220
+
221
+ def tensor_dim5to4(tensor):
222
+ batchsize, crops, c, h, w = tensor.shape
223
+ tensor = tensor.reshape(batchsize*crops, c, h, w)
224
+ return tensor
225
+
226
+ def tensor_dim6to5(tensor):
227
+ batchsize, crops, t, c, h, w = tensor.shape
228
+ tensor = tensor.reshape(batchsize*crops, t, c, h, w)
229
+ return tensor
230
+
231
+ def get_host_with_dir(dataset_name=''):
232
+ multi_gpu = False
233
+ hostname = socket.gethostname()
234
+ log(f"User's hostname is '{hostname}'")
235
+ if hostname == 'ubun':
236
+ host = '/data/fenghansen/datasets'
237
+ elif hostname == 'ubuntu':
238
+ host = '/data4/fenghansen/datasets'
239
+ elif hostname == 'DESKTOP-FCAMIOQ':
240
+ host = 'F:/datasets'
241
+ elif hostname == 'DESKTOP-LGD8S6F': # BIT-816
242
+ host = 'E:/datasets'
243
+ elif hostname[:6] == 'isp-gn':
244
+ host = '/home/fenghansen/datasets'
245
+ else:
246
+ host = '/data'
247
+ multi_gpu = True if torch.cuda.device_count() > 1 else False
248
+ return hostname, host + dataset_name, multi_gpu
249
+
250
+ def get_p2d(shape, base=16):
251
+ xb, xc, xh, xw = shape
252
+ yh, yw = ((xh-1)//base+1)*base, ((xw-1)//base+1)*base
253
+ diffY = yh - xh
254
+ diffX = yw - xw
255
+ p2d = (diffX // 2, diffX - diffX//2, diffY // 2, diffY - diffY//2)
256
+ return p2d
257
+
258
+ # def big_image_split(data, n=2, pad=64):
259
+ # # 把大图分割成小图
260
+ # p2d = get_p2d(data.shape, base=pad)
261
+ # data = F.pad(data, p2d, mode='reflect')
262
+ # data = torch.cat(torch.chunk(data, n, dim=2), dim=0)
263
+ # data = torch.cat(torch.chunk(data, n, dim=3), dim=0)
264
+ # return data, p2d
265
+
266
+ # def big_image_merge(data, n=2, p2d=[0,0,0,0]):
267
+ # # 把小图合并成大图
268
+ # data = torch.cat(torch.chunk(data, n, dim=0), dim=3)
269
+ # data = torch.cat(torch.chunk(data, n, dim=0), dim=2)
270
+ # H, W = data.shape[-2:]
271
+ # data = data[..., p2d[-2]:H-p2d[-1], p2d[0]:W-p2d[1]]
272
+ # return data
273
+
274
+ def calculate_padding(shape, target_size, overlap_ratio=0.25):
275
+ """计算需要的padding以确保图片能被均匀切块,只在右下角pad"""
276
+ h, w = shape[-2:]
277
+
278
+ # 如果图片尺寸小于目标尺寸,不进行padding和裁剪
279
+ if h <= target_size and w <= target_size:
280
+ return [0, 0, 0, 0], (1, 1), True # 不进行padding,只生成一个块
281
+
282
+ stride_h = int(target_size * (1 - overlap_ratio))
283
+ stride_w = int(target_size * (1 - overlap_ratio))
284
+
285
+ # 计算需要的块数
286
+ n_h = max(1, (h - target_size + stride_h - 1) // stride_h + 1)
287
+ n_w = max(1, (w - target_size + stride_w - 1) // stride_w + 1)
288
+
289
+ # 计算最终需要的尺寸
290
+ final_h = (n_h - 1) * stride_h + target_size
291
+ final_w = (n_w - 1) * stride_w + target_size
292
+
293
+ # 只在右下角添加padding
294
+ pad_bottom = final_h - h
295
+ pad_right = final_w - w
296
+
297
+ return [0, pad_right, 0, pad_bottom], (n_h, n_w), False # [左, 右, 上, 下]
298
+
299
+ def big_image_split(data, target_size=512, overlap_ratio=0.25, pad_mode='reflect'):
300
+ """
301
+ 将大图分割成多个有重叠区域的小图,只在右下角进行padding
302
+
303
+ 参数:
304
+ data: 输入的图像张量 [B,C,H,W]
305
+ target_size: 每个切块的目标尺寸
306
+ overlap_ratio: 重叠区域比例(0-1)
307
+ pad_mode: padding模式
308
+
309
+ 返回:
310
+ patches: 切块后的图像张量 [B*num_patches,C,target_size,target_size]
311
+ metadata: 包含padding信息和切块数量的元数据
312
+ """
313
+ # 确保输入是4D张量
314
+ if data.dim() == 3:
315
+ data = data.unsqueeze(0)
316
+
317
+ # 计算padding(只在右下角)
318
+ p2d, (n_h, n_w), is_original = calculate_padding(data.shape, target_size, overlap_ratio)
319
+
320
+ # 保存原始尺寸
321
+ original_shape = data.shape
322
+
323
+ # 如果图片尺寸小于目标尺寸,不进行padding和裁剪
324
+ if is_original:
325
+ # 确保输出格式一致,但实际上只有一个块
326
+ patches = data
327
+ metadata = {
328
+ 'p2d': p2d,
329
+ 'n_h': n_h,
330
+ 'n_w': n_w,
331
+ 'stride_h': target_size, # 整个图片作为一个块
332
+ 'stride_w': target_size,
333
+ 'target_size': target_size,
334
+ 'original_shape': original_shape,
335
+ 'is_original': True
336
+ }
337
+ return patches, metadata
338
+
339
+ data = F.pad(data, p2d, mode=pad_mode)
340
+
341
+ # 获取padding后的尺寸
342
+ B, C, H, W = data.shape
343
+
344
+ # 计算步长
345
+ stride_h = int(target_size * (1 - overlap_ratio))
346
+ stride_w = int(target_size * (1 - overlap_ratio))
347
+
348
+ # 创建滑动窗口
349
+ patches = data.unfold(2, target_size, stride_h).unfold(3, target_size, stride_w)
350
+ patches = patches.contiguous().view(B, C, n_h, n_w, target_size, target_size)
351
+
352
+ # 重新排列维度
353
+ patches = patches.permute(0, 2, 3, 1, 4, 5).contiguous()
354
+ patches = patches.view(B * n_h * n_w, C, target_size, target_size)
355
+
356
+ # 保存元数据
357
+ metadata = {
358
+ 'p2d': p2d,
359
+ 'n_h': n_h,
360
+ 'n_w': n_w,
361
+ 'stride_h': stride_h,
362
+ 'stride_w': stride_w,
363
+ 'target_size': target_size,
364
+ 'original_shape': original_shape,
365
+ 'is_original': False
366
+ }
367
+
368
+ return patches, metadata
369
+
370
+ def big_image_merge(patches, metadata, blend_mode='triangle'):
371
+ """
372
+ 将多个小图重新合并成大图
373
+
374
+ 参数:
375
+ patches: 切块后的图像张量 [B*num_patches,C,target_size,target_size]
376
+ metadata: 包含padding信息和切块数量的元数据
377
+ blend_mode: 融合模式,支持 'avg'(平均)、'max'(最大值)或'triangle'(三角模糊过渡)
378
+
379
+ 返回:
380
+ merged: 合并后的图像张量 [B,C,H,W]
381
+ """
382
+ # 如果是原图,直接返回
383
+ if metadata.get('is_original', False):
384
+ return patches
385
+
386
+ # 提取元数据
387
+ p2d = metadata['p2d']
388
+ n_h = metadata['n_h']
389
+ n_w = metadata['n_w']
390
+ stride_h = metadata['stride_h']
391
+ stride_w = metadata['stride_w']
392
+ target_size = metadata['target_size']
393
+ original_shape = metadata['original_shape']
394
+ B_merged = patches.shape[0] // (n_h * n_w)
395
+
396
+ # 重塑patches
397
+ patches = patches.view(B_merged, n_h, n_w, patches.shape[1], target_size, target_size)
398
+ patches = patches.permute(0, 3, 1, 4, 2, 5).contiguous() # [B,C,n_h,target_size,n_w,target_size]
399
+
400
+ # 创建用于合并的张量和权重图
401
+ merged = torch.zeros((B_merged, patches.shape[1],
402
+ (n_h-1)*stride_h+target_size,
403
+ (n_w-1)*stride_w+target_size), device=patches.device)
404
+
405
+ if blend_mode in ['avg', 'triangle']:
406
+ weight_map = torch.zeros_like(merged)
407
+
408
+ # 创建三角模糊权重图(如果使用该模式)
409
+ if blend_mode == 'triangle':
410
+ # 创建水平方向的权重渐变
411
+ overlap_w = target_size - stride_w
412
+ if overlap_w > 0:
413
+ h_weights = torch.ones(target_size, target_size, device=patches.device)
414
+ # 左侧权重从0线性增加到1
415
+ left_weights = torch.linspace(0, 1, overlap_w, device=patches.device).view(1, overlap_w)
416
+ h_weights[:, :overlap_w] = left_weights
417
+ # 右侧权重从1线性减少到0
418
+ right_weights = torch.linspace(1, 0, overlap_w, device=patches.device).view(1, overlap_w)
419
+ h_weights[:, -overlap_w:] = right_weights
420
+ else:
421
+ h_weights = torch.ones(target_size, target_size, device=patches.device)
422
+
423
+ # 创建垂直方向的权重渐变
424
+ overlap_h = target_size - stride_h
425
+ if overlap_h > 0:
426
+ v_weights = torch.ones(target_size, target_size, device=patches.device)
427
+ # 顶部权重从0线性增加到1
428
+ top_weights = torch.linspace(0, 1, overlap_h, device=patches.device).view(overlap_h, 1)
429
+ v_weights[:overlap_h, :] = top_weights
430
+ # 底部权重从1线性减少到0
431
+ bottom_weights = torch.linspace(1, 0, overlap_h, device=patches.device).view(overlap_h, 1)
432
+ v_weights[-overlap_h:, :] = bottom_weights
433
+ else:
434
+ v_weights = torch.ones(target_size, target_size, device=patches.device)
435
+
436
+ # 合并水平和垂直权重
437
+ triangle_weights = h_weights * v_weights
438
+
439
+ # 填充合并后的张量
440
+ for i in range(n_h):
441
+ for j in range(n_w):
442
+ h_start = i * stride_h
443
+ w_start = j * stride_w
444
+
445
+ if blend_mode == 'avg':
446
+ # 创建权重图以处理重叠区域
447
+ weight = torch.ones_like(patches[:, :, i, :, j, :])
448
+ merged[:, :, h_start:h_start+target_size, w_start:w_start+target_size] += patches[:, :, i, :, j, :]
449
+ weight_map[:, :, h_start:h_start+target_size, w_start:w_start+target_size] += weight
450
+ elif blend_mode == 'triangle':
451
+ # 使用三角模糊权重
452
+ current_patch = patches[:, :, i, :, j, :]
453
+ current_weight = triangle_weights.expand_as(current_patch)
454
+ merged[:, :, h_start:h_start+target_size, w_start:w_start+target_size] += current_patch * current_weight
455
+ weight_map[:, :, h_start:h_start+target_size, w_start:w_start+target_size] += current_weight
456
+ elif blend_mode == 'max':
457
+ # 使用最大值融合
458
+ current_patch = patches[:, :, i, :, j, :]
459
+ current_region = merged[:, :, h_start:h_start+target_size, w_start:w_start+target_size]
460
+ merged[:, :, h_start:h_start+target_size, w_start:w_start+target_size] = torch.max(current_region, current_patch)
461
+
462
+ # 处理平均融合或三角模糊融合
463
+ if blend_mode in ['avg', 'triangle']:
464
+ # 避免除零
465
+ weight_map = torch.clamp(weight_map, min=1e-8)
466
+ merged = merged / weight_map
467
+
468
+ # 去除padding(只去除右下角的padding)
469
+ pad_left, pad_right, pad_top, pad_bottom = p2d
470
+ H, W = merged.shape[-2:]
471
+ merged = merged[..., :H-pad_bottom, :W-pad_right] # 只移除右下角的padding
472
+
473
+ return merged
474
+
475
+ def tensor2numpy(data, eval=True, transpose=True):
476
+ if eval: data = data[0]
477
+ data = data.detach().cpu().numpy()
478
+ if transpose:
479
+ length = len(data.shape)
480
+ if length == 3:
481
+ data = data.transpose(1,2,0)
482
+ elif length == 4:
483
+ data = data.transpose(0,2,3,1)
484
+ return data
485
+
486
+ def numpy2tensor(data, device='cpu', eval=True, transpose=True, clone=False):
487
+ if clone: data = data.copy()
488
+ data = torch.from_numpy(np.ascontiguousarray(data)).float().to(device)
489
+ length = len(data.shape)
490
+ if transpose:
491
+ if length == 3:
492
+ data = data.permute(2,0,1)
493
+ elif length == 2:
494
+ data = data[None,:]
495
+ elif length == 4:
496
+ return data.permute(0,3,1,2)
497
+ if eval:
498
+ data = data[None,:]
499
+ return data
500
+
501
+ def read_paired_fns(filename):
502
+ with open(filename) as f:
503
+ fns = f.readlines()
504
+ fns = [tuple(fn.strip().split(' ')) for fn in fns]
505
+ return fns
506
+
507
+ def metrics_recorder(file, names, psnrs, ssims):
508
+ if os.path.exists(file):
509
+ with open(file, 'rb') as f:
510
+ metrics = pkl.load(f)
511
+ else:
512
+ metrics = {}
513
+ for name, psnr, ssim in zip(names, psnrs, ssims):
514
+ metrics[name] = [psnr, ssim]
515
+ with open(file, 'wb') as f:
516
+ pkl.dump(metrics, f)
517
+ return metrics
518
+
519
+ def mpop(func, idx, *args, **kwargs):
520
+ data = func(*args, **kwargs)
521
+ log(f'Finish task No.{idx}...')
522
+ return idx, func(*args, **kwargs)
523
+
524
+ def dataload(path):
525
+ suffix = path[-4:].lower()
526
+ if suffix in ['.arw','.dng','.nef','.cr2']:
527
+ data = rawpy.imread(path).raw_image_visible
528
+ elif suffix in ['.raw']:
529
+ data = np.fromfile(path, np.uint16).reshape(1440, 2560)
530
+ elif suffix in ['.npy']:
531
+ data = np.load(path)
532
+ elif suffix in ['.mat']:
533
+ if 'metadata' in path.lower():
534
+ data = scipy.io.loadmat(path)
535
+ else:
536
+ with h5py.File(path, 'r') as f:
537
+ data = np.array(f['x'])
538
+ elif suffix in ['.jpg', '.png', '.bmp', 'tiff']:
539
+ data = cv2.imread(path)[:,:,::-1]
540
+ return data
541
+
542
+ # 把ELD模型中的Unet权重单独提取出来
543
+ def pth_transfer(src_path='/data/ELD/checkpoints/sid-ours-inc4/model_200_00257600.pt',
544
+ dst_path='checkpoints/SonyA7S2_Official.pth',
545
+ reverse=False):
546
+ model_src = torch.load(src_path, map_location='cpu')
547
+ if reverse:
548
+ model_dst = torch.load(dst_path, map_location='cpu')
549
+ model_src['netG'] = model_dst
550
+ save_dir = os.path.join('pth_transfer', os.path.basename(dst_path)[9:-15])
551
+ os.makedirs(save_dir, exist_ok=True)
552
+ save_path = os.path.join(save_dir, os.path.basename(src_path))
553
+ torch.save(model_src, save_path)
554
+ else:
555
+ model_src = model_src['netG']
556
+ torch.save(model_src, dst_path)
utils/video_ops.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .utils import *
2
+
3
+ def frame_index_splitor(nframes=1, pad=True, reflect=True):
4
+ # [b, 7, c, h ,w]
5
+ r = nframes // 2
6
+ length = 7 if pad else 8-nframes
7
+ frames = []
8
+ for i in range(length):
9
+ frames.append([None]*nframes)
10
+ if pad:
11
+ for i in range(7):
12
+ for k in range(nframes):
13
+ frames[i][k] = i+k-r
14
+ else:
15
+ for i in range(8-nframes):
16
+ for k in range(nframes):
17
+ frames[i][k] = i+k
18
+ if reflect:
19
+ frames = num_reflect(frames,0,6)
20
+ else:
21
+ frames = num_clip(frames, 0, 6)
22
+ return frames
23
+
24
+ def multi_frame_loader(frames ,index, gt=False, keepdims=False):
25
+ loader = []
26
+ for ind in index:
27
+ imgs = []
28
+ if gt:
29
+ r = len(index[0]) // 2
30
+ tensor = frames[:,ind[r],:,:,:]
31
+ if keepdims:
32
+ tensor = tensor.unsqueeze(dim=1)
33
+ else:
34
+ for i in ind:
35
+ imgs.append(frames[:,i,:,:,:])
36
+ tensor = torch.stack(imgs, dim=1)
37
+ loader.append(tensor)
38
+ return torch.stack(loader, dim=0)
39
+
40
+ def num_clip(nums, mininum, maxinum):
41
+ nums = np.array(nums)
42
+ nums = np.clip(nums, mininum, maxinum)
43
+ return nums
44
+
45
+ def num_reflect(nums, mininum, maxinum):
46
+ nums = np.array(nums)
47
+ nums = np.abs(nums-mininum)
48
+ nums = maxinum-np.abs(maxinum-nums)
49
+ return nums
utils/visualization.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .utils import *
2
+
3
+ def scale_down(img):
4
+ return np.float32(img) / 255.
5
+
6
+ def scale_up(img):
7
+ return np.uint8(img.clip(0,1) * 255.)
8
+
9
+ def tensor2im(image_tensor, visualize=False, video=False):
10
+ image_tensor = image_tensor.detach()
11
+
12
+ if visualize:
13
+ image_tensor = image_tensor[:, 0:3, ...]
14
+
15
+ if not video:
16
+ image_numpy = image_tensor[0].cpu().float().numpy()
17
+ image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255.0
18
+ else:
19
+ image_numpy = image_tensor.cpu().float().numpy()
20
+ image_numpy = (np.transpose(image_numpy, (0, 2, 3, 1))) * 255.0
21
+
22
+ image_numpy = np.clip(image_numpy, 0, 255)
23
+
24
+ return image_numpy
25
+
26
+ def quality_assess(X, Y, data_range=255):
27
+ # Y: correct; X: estimate
28
+ if X.ndim == 3:
29
+ psnr = compare_psnr(Y, X, data_range=data_range)
30
+ ssim = compare_ssim(Y, X, data_range=data_range, channel_axis=-1)
31
+ return {'PSNR':psnr, 'SSIM': ssim}
32
+ else:
33
+ raise NotImplementedError
34
+
35
+ def feature_vis(tensor, name='out', save=False, first_only=True):
36
+ feature = tensor.detach().cpu().numpy().transpose(0,2,3,1)
37
+ if save:
38
+ if feature.min() < 0 or feature.max()>1:
39
+ warnings.warn('Signals are clipped to [0, 1] for visualization!!!!')
40
+ os.makedirs('worklog/feature_vis', exist_ok=True)
41
+ for i in range(len(feature)):
42
+ cv2.imwrite(f'worklog/feature_vis/{name}_{i}.png', np.uint8(feature[i,:,:,::-1]*255))
43
+ if first_only: break
44
+ return feature
45
+
46
+ def plot_sample(img_lr, img_dn, img_hr, filename='result', model_name='Unet',
47
+ epoch=-1, print_metrics=False, save_plot=True, save_path='./', res=None):
48
+ if np.max(img_hr) <= 1:
49
+ # 变回uint8
50
+ img_lr = scale_up(img_lr)
51
+ img_dn = scale_up(img_dn)
52
+ img_hr = scale_up(img_hr)
53
+ # 计算PSNR和SSIM
54
+ if res is None:
55
+ psnr = []
56
+ ssim = []
57
+ psnr.append(compare_psnr(img_hr, img_lr))
58
+ psnr.append(compare_psnr(img_hr, img_dn))
59
+ ssim.append(compare_ssim(img_hr, img_lr, channel_axis=-1))
60
+ ssim.append(compare_ssim(img_hr, img_dn, channel_axis=-1))
61
+ psnr.append(-1)
62
+ ssim.append(-1)
63
+ else:
64
+ psnr = [res[0], res[2], -1]
65
+ ssim = [res[1], res[3], -1]
66
+ # Images and titles
67
+ images = {
68
+ 'Noisy Image': img_lr,
69
+ model_name: img_dn,
70
+ 'Ground Truth': img_hr
71
+ }
72
+ if os.path.exists(save_path) is False:
73
+ os.makedirs(save_path)
74
+ # Plot the images. Note: rescaling and using squeeze since we are getting batches of size 1
75
+ fig, axes = plt.subplots(1, 3, figsize=(20, 4.5))
76
+ for i, (title, img) in enumerate(images.items()):
77
+ axes[i].imshow(img)
78
+ axes[i].set_title("{}\n{} - psnr:{:.2f} - ssim{:.4f}".format(title, img.shape, psnr[i], ssim[i]))
79
+ axes[i].axis('off')
80
+ plt.suptitle('{} - Epoch: {}'.format(filename, epoch))
81
+ if print_metrics:
82
+ log('PSNR:', psnr)
83
+ log('SSIM:', ssim)
84
+ # Save directory
85
+ if os.path.exists(save_path) is False:
86
+ os.makedirs(save_path)
87
+ savefile = os.path.join(save_path, "{}-Epoch{}.jpg".format(filename, epoch))
88
+ if save_plot:
89
+ denoisedfile = os.path.join(save_path, "{}_denoised.png".format(filename))
90
+ cv2.imwrite(denoisedfile, img_dn[:,:,::-1])
91
+ fig.savefig(savefile, bbox_inches='tight')
92
+ plt.close()
93
+ return psnr, ssim, filename
94
+
95
+ def save_picture(img_sr, save_path='./images/test',frame_id='0000'):
96
+ # 变回uint8
97
+ img_sr = scale_up(img_sr.transpose(1,2,0))
98
+ if os._exists(save_path) is not True:
99
+ os.makedirs(save_path, exist_ok=True)
100
+ plt.imsave(os.path.join(save_path, frame_id+'.png'), img_sr)
101
+ gc.collect()
102
+
103
+ def save_tensor_to_numpy(tensor, name, save_path='./debug'):
104
+ """
105
+ 将 PyTorch 张量转换为 NumPy 数组并保存到文件中
106
+
107
+ 参数:
108
+ tensor (torch.Tensor): 要保存的 PyTorch 张量。
109
+ name (str): 保存的文件名(不包括路径和扩展名)。
110
+ save_path (str): 保存路径,默认为当前目录('.')。
111
+ """
112
+ # 确保张量在 CPU 上
113
+ tensor = tensor.detach().clone().cpu()
114
+
115
+ # 转换为 NumPy 数组
116
+ array = tensor.numpy()
117
+
118
+ # 构造保存路径
119
+ os.makedirs(save_path, exist_ok=True)
120
+ full_path = f"{save_path}/{name}.npy"
121
+
122
+ # 保存到文件
123
+ np.save(full_path, array)
124
+ print(f"Tensor saved as NumPy array to: {full_path}")