ZyKINvice commited on
Commit
f6a79cc
·
verified ·
1 Parent(s): 7352269

undefined - Initial Deployment

Browse files
Files changed (2) hide show
  1. README.md +7 -5
  2. index.html +370 -19
README.md CHANGED
@@ -1,10 +1,12 @@
1
  ---
2
- title: Githubpage
3
- emoji: 🌖
4
- colorFrom: red
5
- colorTo: blue
6
  sdk: static
7
  pinned: false
 
 
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: githubpage
3
+ emoji: 🐳
4
+ colorFrom: pink
5
+ colorTo: green
6
  sdk: static
7
  pinned: false
8
+ tags:
9
+ - deepsite
10
  ---
11
 
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
index.html CHANGED
@@ -1,19 +1,370 @@
1
- <!doctype html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8" />
5
- <meta name="viewport" content="width=device-width" />
6
- <title>My static Space</title>
7
- <link rel="stylesheet" href="style.css" />
8
- </head>
9
- <body>
10
- <div class="card">
11
- <h1>Welcome to your static Space!</h1>
12
- <p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
13
- <p>
14
- Also don't forget to check the
15
- <a href="https://huggingface.co/docs/hub/spaces" target="_blank">Spaces documentation</a>.
16
- </p>
17
- </div>
18
- </body>
19
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Mingru Huang - Computer Vision Researcher</title>
7
+ <script src="https://cdn.tailwindcss.com"></script>
8
+ <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
9
+ <style>
10
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
11
+
12
+ body {
13
+ font-family: 'Inter', sans-serif;
14
+ scroll-behavior: smooth;
15
+ }
16
+
17
+ .gradient-text {
18
+ background: linear-gradient(90deg, #3b82f6, #8b5cf6);
19
+ -webkit-background-clip: text;
20
+ background-clip: text;
21
+ color: transparent;
22
+ }
23
+
24
+ .hero-image {
25
+ clip-path: circle(50% at 50% 50%);
26
+ transition: all 0.3s ease;
27
+ }
28
+
29
+ .hero-image:hover {
30
+ transform: scale(1.05);
31
+ box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1);
32
+ }
33
+
34
+ .publication-card {
35
+ transition: all 0.3s ease;
36
+ }
37
+
38
+ .publication-card:hover {
39
+ transform: translateY(-5px);
40
+ box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1);
41
+ }
42
+
43
+ .timeline-item:not(:last-child)::after {
44
+ content: '';
45
+ position: absolute;
46
+ left: 23px;
47
+ top: 32px;
48
+ height: calc(100% - 32px);
49
+ width: 2px;
50
+ background: #e5e7eb;
51
+ }
52
+
53
+ .news-item {
54
+ position: relative;
55
+ padding-left: 2rem;
56
+ }
57
+
58
+ .news-item::before {
59
+ content: '';
60
+ position: absolute;
61
+ left: 0.5rem;
62
+ top: 0.5rem;
63
+ width: 0.75rem;
64
+ height: 0.75rem;
65
+ border-radius: 50%;
66
+ background: #3b82f6;
67
+ }
68
+ </style>
69
+ </head>
70
+ <body class="bg-gray-50 text-gray-800">
71
+ <!-- Navigation -->
72
+ <nav class="bg-white shadow-sm sticky top-0 z-50">
73
+ <div class="max-w-6xl mx-auto px-4 sm:px-6 lg:px-8">
74
+ <div class="flex justify-between h-16">
75
+ <div class="flex items-center">
76
+ <a href="#" class="text-xl font-bold gradient-text">Mingru Huang</a>
77
+ </div>
78
+ <div class="hidden md:flex items-center space-x-8">
79
+ <a href="#about" class="text-gray-700 hover:text-blue-600 transition">About</a>
80
+ <a href="#news" class="text-gray-700 hover:text-blue-600 transition">News</a>
81
+ <a href="#publications" class="text-gray-700 hover:text-blue-600 transition">Publications</a>
82
+ <a href="#contact" class="text-gray-700 hover:text-blue-600 transition">Contact</a>
83
+ </div>
84
+ <div class="md:hidden flex items-center">
85
+ <button id="menu-toggle" class="text-gray-700 focus:outline-none">
86
+ <svg class="h-6 w-6" fill="none" viewBox="0 0 24 24" stroke="currentColor">
87
+ <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M4 6h16M4 12h16M4 18h16"></path>
88
+ </svg>
89
+ </button>
90
+ </div>
91
+ </div>
92
+ </div>
93
+ <!-- Mobile menu -->
94
+ <div id="mobile-menu" class="hidden md:hidden bg-white shadow-lg">
95
+ <div class="px-2 pt-2 pb-3 space-y-1 sm:px-3">
96
+ <a href="#about" class="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-blue-600 hover:bg-gray-50">About</a>
97
+ <a href="#news" class="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-blue-600 hover:bg-gray-50">News</a>
98
+ <a href="#publications" class="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-blue-600 hover:bg-gray-50">Publications</a>
99
+ <a href="#contact" class="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-blue-600 hover:bg-gray-50">Contact</a>
100
+ </div>
101
+ </div>
102
+ </nav>
103
+
104
+ <!-- Hero Section -->
105
+ <section id="about" class="py-12 md:py-20 bg-gradient-to-r from-blue-50 to-purple-50">
106
+ <div class="max-w-6xl mx-auto px-4 sm:px-6 lg:px-8">
107
+ <div class="flex flex-col md:flex-row items-center">
108
+ <div class="md:w-1/3 mb-8 md:mb-0 flex justify-center">
109
+ <img src="https://huangmr0719.github.io/images/portrait.png" alt="Mingru Huang" class="hero-image w-64 h-64 object-cover border-4 border-white shadow-lg">
110
+ </div>
111
+ <div class="md:w-2/3 md:pl-12 text-center md:text-left">
112
+ <h1 class="text-4xl md:text-5xl font-bold mb-4">
113
+ <span class="gradient-text">Hello, I'm Mingru Huang</span>
114
+ </h1>
115
+ <p class="text-lg text-gray-700 mb-6 leading-relaxed">
116
+ I am a Master's degree student at Wuhan University of Technology. I'm passionate about computer vision research, particularly video understanding. My work spans video Q&A, video-text retrieval, and video captioning. I also explore large language models, prompt engineering, operator development, knowledge graphs, and Q&A systems. My goal is to develop an affordable, secure, and trustworthy generalized multimodal video model for everyone.
117
+ </p>
118
+ <div class="flex justify-center md:justify-start space-x-4">
119
+ <a href="https://huangmr0719.github.io/cv.html" class="px-6 py-2 bg-blue-600 text-white rounded-lg hover:bg-blue-700 transition flex items-center">
120
+ <i class="fas fa-file-alt mr-2"></i> CV
121
+ </a>
122
+ <a href="https://scholar.google.com/citations?user=NeFDR38AAAAJ" class="px-6 py-2 bg-gray-800 text-white rounded-lg hover:bg-gray-900 transition flex items-center">
123
+ <i class="fas fa-graduation-cap mr-2"></i> Scholar
124
+ </a>
125
+ <a href="https://github.com/Huangmr0719" class="px-6 py-2 bg-gray-700 text-white rounded-lg hover:bg-gray-800 transition flex items-center">
126
+ <i class="fab fa-github mr-2"></i> GitHub
127
+ </a>
128
+ </div>
129
+ </div>
130
+ </div>
131
+ </div>
132
+ </section>
133
+
134
+ <!-- News Section -->
135
+ <section id="news" class="py-12 md:py-20 bg-white">
136
+ <div class="max-w-6xl mx-auto px-4 sm:px-6 lg:px-8">
137
+ <h2 class="text-3xl font-bold text-center mb-12">
138
+ <span class="gradient-text">Latest News</span>
139
+ </h2>
140
+
141
+ <div class="grid grid-cols-1 md:grid-cols-2 gap-8">
142
+ <div class="bg-gray-50 p-6 rounded-xl shadow-sm">
143
+ <h3 class="text-xl font-semibold mb-4 text-blue-600">2024</h3>
144
+ <ul class="space-y-4">
145
+ <li class="news-item">
146
+ <span class="font-medium">Nov. 2024:</span> Invited as a reviewer for the ICME2025 conference.
147
+ </li>
148
+ <li class="news-item">
149
+ <span class="font-medium">Aug. 2024:</span> Incorporated a project on automotive maintenance inspection using a multimodal large model.
150
+ </li>
151
+ <li class="news-item">
152
+ <span class="font-medium">Jul. 2024:</span> Joined the SpConv operator optimization project based on MetaX MXMACA computing platform.
153
+ </li>
154
+ <li class="news-item">
155
+ <span class="font-medium">Jun. 2024:</span> Approved for the Chinese Software Copyright "Dermatology Clinical Feature Detection and Diagnosis System".
156
+ </li>
157
+ <li class="news-item">
158
+ <span class="font-medium">May 2024:</span> The paper "ST-CLIP" has been accepted at ICIC 2024 conference.
159
+ </li>
160
+ </ul>
161
+ </div>
162
+
163
+ <div class="bg-gray-50 p-6 rounded-xl shadow-sm">
164
+ <h3 class="text-xl font-semibold mb-4 text-blue-600">2023</h3>
165
+ <ul class="space-y-4">
166
+ <li class="news-item">
167
+ <span class="font-medium">Jan. 2024:</span> Invited as a reviewer for the ICME2024 conference.
168
+ </li>
169
+ <li class="news-item">
170
+ <span class="font-medium">Dec. 2023:</span> Joined the school-enterprise cooperation program of Haluo Corporation, responsible for the AI speech generation part.
171
+ </li>
172
+ <li class="news-item">
173
+ <span class="font-medium">Nov. 2023:</span> Completed the Transformer Heterogeneous Bisheng C++ Arithmetic Development Project of Huawei Crowd Intelligence Program.
174
+ </li>
175
+ <li class="news-item">
176
+ <span class="font-medium">Sept. 2023:</span> Joined a video understanding project focused on dense video captioning.
177
+ </li>
178
+ </ul>
179
+ </div>
180
+ </div>
181
+ </div>
182
+ </section>
183
+
184
+ <!-- Publications Section -->
185
+ <section id="publications" class="py-12 md:py-20 bg-gray-50">
186
+ <div class="max-w-6xl mx-auto px-4 sm:px-6 lg:px-8">
187
+ <h2 class="text-3xl font-bold text-center mb-12">
188
+ <span class="gradient-text">Publications</span>
189
+ </h2>
190
+
191
+ <div class="space-y-8">
192
+ <!-- Publication 1 -->
193
+ <div class="publication-card bg-white p-6 rounded-xl shadow-sm hover:shadow-md transition">
194
+ <div class="flex flex-col md:flex-row">
195
+ <div class="md:w-1/3 mb-4 md:mb-0">
196
+ <img src="https://huangmr0719.github.io/images/skmr.png" alt="Publication Image" class="w-full h-auto rounded-lg">
197
+ </div>
198
+ <div class="md:w-2/3 md:pl-6">
199
+ <h3 class="text-xl font-bold mb-2 text-blue-600">
200
+ <a href="https://github.com/Huangmr0719/MVSA" class="hover:underline">Scene Knowledge Enhanced Multimodal Retrieval Model for Dense Video Captioning</a>
201
+ </h3>
202
+ <p class="text-gray-700 mb-2">
203
+ Mingru Huang, <a href="http://cst.whut.edu.cn/xygk/szdw/201505/t20150527_876872.shtml" class="text-blue-600 hover:underline">Pengfei Duan</a>, Yifang Zhang, Huimin Chen, Jiawang Peng, <a href="http://cst.whut.edu.cn/xygk/szdw/201505/t20150527_876901.shtml" class="text-blue-600 hover:underline">Shengwu Xiong</a>
204
+ </p>
205
+ <p class="text-gray-600 mb-4">
206
+ 2025 Twenty-first International Conference on Intelligent Computing (ICIC 2025)
207
+ </p>
208
+ <p class="text-gray-700 mb-4">
209
+ Introducing a Memory Enhanced Visual-Speech Aggregation model for dense video captioning, inspired by cognitive informatics on human memory recall. The model enhances visual representations by merging them with relevant text features retrieved from a memory bank through multimodal retrieval involving transcribed speech and visual inputs.
210
+ </p>
211
+ <div class="flex flex-wrap gap-2">
212
+ <a href="https://github.com/Huangmr0719/MVSA" class="px-4 py-1 bg-blue-100 text-blue-700 rounded-full text-sm hover:bg-blue-200 transition flex items-center">
213
+ <i class="fas fa-link mr-1"></i> Project Page
214
+ </a>
215
+ <a href="https://arxiv.org/pdf/.pdf" class="px-4 py-1 bg-gray-100 text-gray-700 rounded-full text-sm hover:bg-gray-200 transition flex items-center">
216
+ <i class="fas fa-file-pdf mr-1"></i> PDF
217
+ </a>
218
+ <a href="https://arxiv.org/abs/" class="px-4 py-1 bg-gray-100 text-gray-700 rounded-full text-sm hover:bg-gray-200 transition flex items-center">
219
+ <i class="fas fa-book-open mr-1"></i> arXiv
220
+ </a>
221
+ </div>
222
+ </div>
223
+ </div>
224
+ </div>
225
+
226
+ <!-- Publication 2 -->
227
+ <div class="publication-card bg-white p-6 rounded-xl shadow-sm hover:shadow-md transition">
228
+ <div class="flex flex-col md:flex-row">
229
+ <div class="md:w-1/3 mb-4 md:mb-0">
230
+ <img src="https://huangmr0719.github.io/images/ldit.png" alt="Publication Image" class="w-full h-auto rounded-lg">
231
+ </div>
232
+ <div class="md:w-2/3 md:pl-6">
233
+ <h3 class="text-xl font-bold mb-2 text-blue-600">
234
+ LDIT: Pseudo-Label Noise Adaptation via Label Diffusion Transformer
235
+ </h3>
236
+ <p class="text-gray-700 mb-2">
237
+ Jiawang Peng, <a href="http://cst.whut.edu.cn/xygk/szdw/201505/t20150527_876872.shtml" class="text-blue-600 hover:underline">Pengfei Duan</a>, Mingru Huang, <a href="http://cst.whut.edu.cn/xygk/szdw/201505/t20150527_876901.shtml" class="text-blue-600 hover:underline">Shengwu Xiong</a>
238
+ </p>
239
+ <p class="text-gray-600 mb-4">
240
+ 2025 Twenty-first International Conference on Intelligent Computing (ICIC 2025)
241
+ </p>
242
+ <p class="text-gray-700 mb-4">
243
+ We reformulate label prediction as a progressive refinement process starting from an initial random guess, and propose LDiT (Label Diffusion Transformer) for pseudo-label noise adaptation. By modeling label uncertainty through a diffusion process, LDiT enables more robust learning under noisy supervision. In addition, to effectively capture the long-range dependencies in textual data, we adopt a Transformer-based latent denoising architecture with self-attention mechanisms.
244
+ </p>
245
+ <div class="flex flex-wrap gap-2">
246
+ <a href="https://arxiv.org/pdf/.pdf" class="px-4 py-1 bg-gray-100 text-gray-700 rounded-full text-sm hover:bg-gray-200 transition flex items-center">
247
+ <i class="fas fa-file-pdf mr-1"></i> PDF
248
+ </a>
249
+ <a href="https://arxiv.org/abs/" class="px-4 py-1 bg-gray-100 text-gray-700 rounded-full text-sm hover:bg-gray-200 transition flex items-center">
250
+ <i class="fas fa-book-open mr-1"></i> arXiv
251
+ </a>
252
+ </div>
253
+ </div>
254
+ </div>
255
+ </div>
256
+
257
+ <!-- Publication 3 -->
258
+ <div class="publication-card bg-white p-6 rounded-xl shadow-sm hover:shadow-md transition">
259
+ <div class="flex flex-col md:flex-row">
260
+ <div class="md:w-1/3 mb-4 md:mb-0">
261
+ <img src="https://huangmr0719.github.io/images/stclip.png" alt="Publication Image" class="w-full h-auto rounded-lg">
262
+ </div>
263
+ <div class="md:w-2/3 md:pl-6">
264
+ <h3 class="text-xl font-bold mb-2 text-blue-600">
265
+ <a href="https://link.springer.com/content/pdf/10.1007/978-981-97-5612-4.pdf" class="hover:underline">ST-CLIP: Spatio-Temporal enhanced CLIP towards Dense Video Captioning</a>
266
+ </h3>
267
+ <p class="text-gray-700 mb-2">
268
+ Huimin Chen, <a href="http://cst.whut.edu.cn/xygk/szdw/201505/t20150527_876872.shtml" class="text-blue-600 hover:underline">Pengfei Duan</a>, Mingru Huang, Jingyi Guo, <a href="http://cst.whut.edu.cn/xygk/szdw/201505/t20150527_876901.shtml" class="text-blue-600 hover:underline">Shengwu Xiong</a>
269
+ </p>
270
+ <p class="text-gray-600 mb-4">
271
+ 2024 Twentieth International Conference on Intelligent Computing (ICIC 2024)
272
+ </p>
273
+ <p class="text-gray-700 mb-4">
274
+ Proposing a new factorized spatio-temporal self-attention paradigm to address inaccurate event descriptions caused by insufficient temporal relationship modeling between video frames and apply it to dense video captioning tasks.
275
+ </p>
276
+ <div class="flex flex-wrap gap-2">
277
+ <a href="https://link.springer.com/content/pdf/10.1007/978-981-97-5612-4.pdf" class="px-4 py-1 bg-blue-100 text-blue-700 rounded-full text-sm hover:bg-blue-200 transition flex items-center">
278
+ <i class="fas fa-link mr-1"></i> Project Page
279
+ </a>
280
+ <a href="https://arxiv.org/pdf/.pdf" class="px-4 py-1 bg-gray-100 text-gray-700 rounded-full text-sm hover:bg-gray-200 transition flex items-center">
281
+ <i class="fas fa-file-pdf mr-1"></i> PDF
282
+ </a>
283
+ <a href="https://arxiv.org/abs/" class="px-4 py-1 bg-gray-100 text-gray-700 rounded-full text-sm hover:bg-gray-200 transition flex items-center">
284
+ <i class="fas fa-book-open mr-1"></i> arXiv
285
+ </a>
286
+ </div>
287
+ </div>
288
+ </div>
289
+ </div>
290
+ </div>
291
+ </div>
292
+ </section>
293
+
294
+ <!-- Contact Section -->
295
+ <section id="contact" class="py-12 md:py-20 bg-white">
296
+ <div class="max-w-6xl mx-auto px-4 sm:px-6 lg:px-8">
297
+ <h2 class="text-3xl font-bold text-center mb-12">
298
+ <span class="gradient-text">Get In Touch</span>
299
+ </h2>
300
+
301
+ <div class="flex flex-col items-center">
302
+ <p class="text-lg text-gray-700 mb-8 max-w-2xl text-center">
303
+ I'm always open to discussing research collaborations, new projects, or opportunities. Feel free to reach out!
304
+ </p>
305
+
306
+ <div class="flex space-x-6 mb-8">
307
+ <a href="mailto:[email protected]" class="w-12 h-12 rounded-full bg-blue-100 flex items-center justify-center text-blue-600 hover:bg-blue-200 transition">
308
+ <i class="fas fa-envelope text-xl"></i>
309
+ </a>
310
+ <a href="https://github.com/Huangmr0719" class="w-12 h-12 rounded-full bg-gray-100 flex items-center justify-center text-gray-700 hover:bg-gray-200 transition">
311
+ <i class="fab fa-github text-xl"></i>
312
+ </a>
313
+ <a href="https://scholar.google.com/citations?user=NeFDR38AAAAJ" class="w-12 h-12 rounded-full bg-gray-100 flex items-center justify-center text-gray-700 hover:bg-gray-200 transition">
314
+ <i class="fas fa-graduation-cap text-xl"></i>
315
+ </a>
316
+ <a href="https://twitter.com/ZyKINvice" class="w-12 h-12 rounded-full bg-blue-100 flex items-center justify-center text-blue-400 hover:bg-blue-200 transition">
317
+ <i class="fab fa-twitter text-xl"></i>
318
+ </a>
319
+ </div>
320
+
321
+ <div class="text-center text-gray-500 text-sm">
322
+ <p>© 2024 Mingru Huang. All rights reserved.</p>
323
+ <p class="mt-2">Template inspired by <a href="https://github.com/keunhong/keunhong.github.io" class="text-blue-600 hover:underline">Keunhong Park</a></p>
324
+ </div>
325
+ </div>
326
+ </div>
327
+ </section>
328
+
329
+ <script>
330
+ // Mobile menu toggle
331
+ document.getElementById('menu-toggle').addEventListener('click', function() {
332
+ const menu = document.getElementById('mobile-menu');
333
+ menu.classList.toggle('hidden');
334
+ });
335
+
336
+ // Smooth scrolling for anchor links
337
+ document.querySelectorAll('a[href^="#"]').forEach(anchor => {
338
+ anchor.addEventListener('click', function (e) {
339
+ e.preventDefault();
340
+
341
+ const targetId = this.getAttribute('href');
342
+ const targetElement = document.querySelector(targetId);
343
+
344
+ if (targetElement) {
345
+ window.scrollTo({
346
+ top: targetElement.offsetTop - 80,
347
+ behavior: 'smooth'
348
+ });
349
+
350
+ // Close mobile menu if open
351
+ const menu = document.getElementById('mobile-menu');
352
+ if (!menu.classList.contains('hidden')) {
353
+ menu.classList.add('hidden');
354
+ }
355
+ }
356
+ });
357
+ });
358
+
359
+ // Add shadow to navbar on scroll
360
+ window.addEventListener('scroll', function() {
361
+ const nav = document.querySelector('nav');
362
+ if (window.scrollY > 10) {
363
+ nav.classList.add('shadow-md');
364
+ } else {
365
+ nav.classList.remove('shadow-md');
366
+ }
367
+ });
368
+ </script>
369
+ <p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - 🧬 <a href="https://enzostvs-deepsite.hf.space?remix=ZyKINvice/githubpage" style="color: #fff;text-decoration: underline;" target="_blank" >Remix</a></p></body>
370
+ </html>