Upload 130 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +86 -0
- README.md +359 -0
- app/app.py +1050 -0
- app/run_app.py +64 -0
- app/utils/__init__.py +1 -0
- app/utils/file_utils.py +127 -0
- app/utils/image_processing.py +511 -0
- app/utils/model_loader.py +379 -0
- app/utils/onnx_processing.py +729 -0
- app/utils/ui_components.py +137 -0
- camie-tagger-v2-metadata.json +0 -0
- camie-tagger-v2.onnx +3 -0
- camie-tagger-v2.safetensors +3 -0
- config.json +21 -0
- config.yaml +26 -0
- full_validation_results.json +178 -0
- game/dev_tools.py +580 -0
- game/essence_generator.py +0 -0
- game/game.py +0 -0
- game/game_constants.py +205 -0
- game/library_system.py +2010 -0
- game/mosaics/templates/1st_costume_template.png +3 -0
- game/mosaics/templates/animal_crossing_template.png +3 -0
- game/mosaics/templates/arknights_template.png +3 -0
- game/mosaics/templates/azur_lane_template.png +3 -0
- game/mosaics/templates/blue_archive_template.png +3 -0
- game/mosaics/templates/boku_no_hero_academia_template.png +3 -0
- game/mosaics/templates/casual_template.png +3 -0
- game/mosaics/templates/chainsaw_man_template.png +3 -0
- game/mosaics/templates/character_extended_template.png +3 -0
- game/mosaics/templates/company_template.png +3 -0
- game/mosaics/templates/cosplay_template.png +3 -0
- game/mosaics/templates/disgaea_template.png +3 -0
- game/mosaics/templates/disney_template.png +3 -0
- game/mosaics/templates/dragon_ball_template.png +3 -0
- game/mosaics/templates/dungeon_and_fighter_template.png +3 -0
- game/mosaics/templates/elsword_template.png +3 -0
- game/mosaics/templates/emblem_template.png +3 -0
- game/mosaics/templates/ensemble_stars!_template.png +3 -0
- game/mosaics/templates/fate_template.png +3 -0
- game/mosaics/templates/ff14_template.png +3 -0
- game/mosaics/templates/fire_emblem_template.png +3 -0
- game/mosaics/templates/flower_template.png +3 -0
- game/mosaics/templates/food_template.png +3 -0
- game/mosaics/templates/genshin_impact_template.png +3 -0
- game/mosaics/templates/girls'_frontline_template.png +3 -0
- game/mosaics/templates/girls_und_panzer_template.png +3 -0
- game/mosaics/templates/granblue_fantasy_template.png +3 -0
- game/mosaics/templates/honkai_impact_template.png +3 -0
- game/mosaics/templates/honkai_star_rail_template.png +3 -0
.gitattributes
CHANGED
@@ -119,3 +119,89 @@ camie-tagger-v2/images/training_monitor_overview.png filter=lfs diff=lfs merge=l
|
|
119 |
camie-tagger-v2/images/training_monitor_predictions.png filter=lfs diff=lfs merge=lfs -text
|
120 |
camie-tagger-v2/images/training_monitor_selection.png filter=lfs diff=lfs merge=lfs -text
|
121 |
camie-tagger-v2/training/val_dataset.csv filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
camie-tagger-v2/images/training_monitor_predictions.png filter=lfs diff=lfs merge=lfs -text
|
120 |
camie-tagger-v2/images/training_monitor_selection.png filter=lfs diff=lfs merge=lfs -text
|
121 |
camie-tagger-v2/training/val_dataset.csv filter=lfs diff=lfs merge=lfs -text
|
122 |
+
game/mosaics/templates/1st_costume_template.png filter=lfs diff=lfs merge=lfs -text
|
123 |
+
game/mosaics/templates/animal_crossing_template.png filter=lfs diff=lfs merge=lfs -text
|
124 |
+
game/mosaics/templates/arknights_template.png filter=lfs diff=lfs merge=lfs -text
|
125 |
+
game/mosaics/templates/azur_lane_template.png filter=lfs diff=lfs merge=lfs -text
|
126 |
+
game/mosaics/templates/blue_archive_template.png filter=lfs diff=lfs merge=lfs -text
|
127 |
+
game/mosaics/templates/boku_no_hero_academia_template.png filter=lfs diff=lfs merge=lfs -text
|
128 |
+
game/mosaics/templates/casual_template.png filter=lfs diff=lfs merge=lfs -text
|
129 |
+
game/mosaics/templates/chainsaw_man_template.png filter=lfs diff=lfs merge=lfs -text
|
130 |
+
game/mosaics/templates/character_extended_template.png filter=lfs diff=lfs merge=lfs -text
|
131 |
+
game/mosaics/templates/company_template.png filter=lfs diff=lfs merge=lfs -text
|
132 |
+
game/mosaics/templates/cosplay_template.png filter=lfs diff=lfs merge=lfs -text
|
133 |
+
game/mosaics/templates/disgaea_template.png filter=lfs diff=lfs merge=lfs -text
|
134 |
+
game/mosaics/templates/disney_template.png filter=lfs diff=lfs merge=lfs -text
|
135 |
+
game/mosaics/templates/dragon_ball_template.png filter=lfs diff=lfs merge=lfs -text
|
136 |
+
game/mosaics/templates/dungeon_and_fighter_template.png filter=lfs diff=lfs merge=lfs -text
|
137 |
+
game/mosaics/templates/elsword_template.png filter=lfs diff=lfs merge=lfs -text
|
138 |
+
game/mosaics/templates/emblem_template.png filter=lfs diff=lfs merge=lfs -text
|
139 |
+
game/mosaics/templates/ensemble_stars!_template.png filter=lfs diff=lfs merge=lfs -text
|
140 |
+
game/mosaics/templates/fate_template.png filter=lfs diff=lfs merge=lfs -text
|
141 |
+
game/mosaics/templates/ff14_template.png filter=lfs diff=lfs merge=lfs -text
|
142 |
+
game/mosaics/templates/fire_emblem_template.png filter=lfs diff=lfs merge=lfs -text
|
143 |
+
game/mosaics/templates/flower_template.png filter=lfs diff=lfs merge=lfs -text
|
144 |
+
game/mosaics/templates/food_template.png filter=lfs diff=lfs merge=lfs -text
|
145 |
+
game/mosaics/templates/genshin_impact_template.png filter=lfs diff=lfs merge=lfs -text
|
146 |
+
game/mosaics/templates/girls_und_panzer_template.png filter=lfs diff=lfs merge=lfs -text
|
147 |
+
game/mosaics/templates/girls'_frontline_template.png filter=lfs diff=lfs merge=lfs -text
|
148 |
+
game/mosaics/templates/granblue_fantasy_template.png filter=lfs diff=lfs merge=lfs -text
|
149 |
+
game/mosaics/templates/honkai_impact_template.png filter=lfs diff=lfs merge=lfs -text
|
150 |
+
game/mosaics/templates/honkai_star_rail_template.png filter=lfs diff=lfs merge=lfs -text
|
151 |
+
game/mosaics/templates/housamo_template.png filter=lfs diff=lfs merge=lfs -text
|
152 |
+
game/mosaics/templates/idolmaster_template.png filter=lfs diff=lfs merge=lfs -text
|
153 |
+
game/mosaics/templates/jojo_template.png filter=lfs diff=lfs merge=lfs -text
|
154 |
+
game/mosaics/templates/kancolle_template.png filter=lfs diff=lfs merge=lfs -text
|
155 |
+
game/mosaics/templates/kemono_friends_template.png filter=lfs diff=lfs merge=lfs -text
|
156 |
+
game/mosaics/templates/kirby_template.png filter=lfs diff=lfs merge=lfs -text
|
157 |
+
game/mosaics/templates/league_of_legends_template.png filter=lfs diff=lfs merge=lfs -text
|
158 |
+
game/mosaics/templates/love_live!_template.png filter=lfs diff=lfs merge=lfs -text
|
159 |
+
game/mosaics/templates/madoka_magica_template.png filter=lfs diff=lfs merge=lfs -text
|
160 |
+
game/mosaics/templates/main_template.gif filter=lfs diff=lfs merge=lfs -text
|
161 |
+
game/mosaics/templates/manga_template.png filter=lfs diff=lfs merge=lfs -text
|
162 |
+
game/mosaics/templates/mega_man_template.png filter=lfs diff=lfs merge=lfs -text
|
163 |
+
game/mosaics/templates/meme_template.png filter=lfs diff=lfs merge=lfs -text
|
164 |
+
game/mosaics/templates/monster_girl_encyclopedia_template.png filter=lfs diff=lfs merge=lfs -text
|
165 |
+
game/mosaics/templates/naruto_template.png filter=lfs diff=lfs merge=lfs -text
|
166 |
+
game/mosaics/templates/new_year_template.png filter=lfs diff=lfs merge=lfs -text
|
167 |
+
game/mosaics/templates/nijisanji_template.png filter=lfs diff=lfs merge=lfs -text
|
168 |
+
game/mosaics/templates/nikke_template.png filter=lfs diff=lfs merge=lfs -text
|
169 |
+
game/mosaics/templates/omori_template.png filter=lfs diff=lfs merge=lfs -text
|
170 |
+
game/mosaics/templates/pokemon_template.png filter=lfs diff=lfs merge=lfs -text
|
171 |
+
game/mosaics/templates/precure_template.png filter=lfs diff=lfs merge=lfs -text
|
172 |
+
game/mosaics/templates/princess_connect!_template.png filter=lfs diff=lfs merge=lfs -text
|
173 |
+
game/mosaics/templates/punishing_gray_raven_template.png filter=lfs diff=lfs merge=lfs -text
|
174 |
+
game/mosaics/templates/ragnarok_online_template.png filter=lfs diff=lfs merge=lfs -text
|
175 |
+
game/mosaics/templates/sailor_moon_template.png filter=lfs diff=lfs merge=lfs -text
|
176 |
+
game/mosaics/templates/sao_template.png filter=lfs diff=lfs merge=lfs -text
|
177 |
+
game/mosaics/templates/sekaiju_template.png filter=lfs diff=lfs merge=lfs -text
|
178 |
+
game/mosaics/templates/senran_kagura_template.png filter=lfs diff=lfs merge=lfs -text
|
179 |
+
game/mosaics/templates/series_template.png filter=lfs diff=lfs merge=lfs -text
|
180 |
+
game/mosaics/templates/show_by_rock!!_template.png filter=lfs diff=lfs merge=lfs -text
|
181 |
+
game/mosaics/templates/skullgirls_template.png filter=lfs diff=lfs merge=lfs -text
|
182 |
+
game/mosaics/templates/sousou_no_frierem_template.png filter=lfs diff=lfs merge=lfs -text
|
183 |
+
game/mosaics/templates/splatoon_template.png filter=lfs diff=lfs merge=lfs -text
|
184 |
+
game/mosaics/templates/stand_template.png filter=lfs diff=lfs merge=lfs -text
|
185 |
+
game/mosaics/templates/street_fighter_template.png filter=lfs diff=lfs merge=lfs -text
|
186 |
+
game/mosaics/templates/style_template.png filter=lfs diff=lfs merge=lfs -text
|
187 |
+
game/mosaics/templates/symbol_template.png filter=lfs diff=lfs merge=lfs -text
|
188 |
+
game/mosaics/templates/tarot_template.png filter=lfs diff=lfs merge=lfs -text
|
189 |
+
game/mosaics/templates/tf2_template.png filter=lfs diff=lfs merge=lfs -text
|
190 |
+
game/mosaics/templates/umamusume_template.png filter=lfs diff=lfs merge=lfs -text
|
191 |
+
game/mosaics/templates/vocaloid_template.png filter=lfs diff=lfs merge=lfs -text
|
192 |
+
game/mosaics/templates/vtuber_template.png filter=lfs diff=lfs merge=lfs -text
|
193 |
+
game/mosaics/templates/warship_girls_r_template.png filter=lfs diff=lfs merge=lfs -text
|
194 |
+
game/mosaics/templates/weapon_template.png filter=lfs diff=lfs merge=lfs -text
|
195 |
+
game/mosaics/templates/wuthering_wave_template.png filter=lfs diff=lfs merge=lfs -text
|
196 |
+
game/mosaics/templates/xenoblade_template.png filter=lfs diff=lfs merge=lfs -text
|
197 |
+
images/app_screenshot.png filter=lfs diff=lfs merge=lfs -text
|
198 |
+
images/collect_tags.PNG filter=lfs diff=lfs merge=lfs -text
|
199 |
+
images/essence_tab.PNG filter=lfs diff=lfs merge=lfs -text
|
200 |
+
images/lamp_essence.jpg filter=lfs diff=lfs merge=lfs -text
|
201 |
+
images/library.PNG filter=lfs diff=lfs merge=lfs -text
|
202 |
+
images/mosaics.PNG filter=lfs diff=lfs merge=lfs -text
|
203 |
+
images/tag_results_example.png filter=lfs diff=lfs merge=lfs -text
|
204 |
+
images/training_monitor_overview.png filter=lfs diff=lfs merge=lfs -text
|
205 |
+
images/training_monitor_predictions.png filter=lfs diff=lfs merge=lfs -text
|
206 |
+
images/training_monitor_selection.png filter=lfs diff=lfs merge=lfs -text
|
207 |
+
training/val_dataset.csv filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,3 +1,362 @@
|
|
1 |
---
|
2 |
license: gpl-3.0
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: gpl-3.0
|
3 |
+
datasets:
|
4 |
+
- p1atdev/danbooru-2024
|
5 |
+
language:
|
6 |
+
- en
|
7 |
+
pipeline_tag: image-classification
|
8 |
---
|
9 |
+
|
10 |
+
# Camie Tagger v2
|
11 |
+
|
12 |
+
An advanced deep learning model for automatically tagging anime/manga illustrations with relevant tags across multiple categories, achieving **67.3% micro F1 score** (50.6% macro F1 score using the macro optimized threshold preset) across 70,527 possible tags on a test set of 20,116 samples. Now with Vision Transformer backbone and significantly improved performance.
|
13 |
+
|
14 |
+
## 🚀 What's New in v2
|
15 |
+
|
16 |
+
### Major Performance Improvements
|
17 |
+
- **Micro F1**: 58.1% → **67.3%** (+9.2 percentage points)
|
18 |
+
- **Macro F1**: 31.5% → **50.6%** (+19.1 percentage points)
|
19 |
+
- **Model Size**: 424M → **143M parameters** (-66% reduction)
|
20 |
+
- **Architecture**: Switched from EfficientNetV2-L to Vision Transformer (ViT) backbone
|
21 |
+
- **Simplified Design**: Streamlined from dual-stage to single refined prediction model
|
22 |
+
|
23 |
+
### Training Innovations
|
24 |
+
- **Multi-Resolution Training**: Progressive scaling from 384px → 512px resolution
|
25 |
+
- **IRFS (Instance-Aware Repeat Factor Sampling)**: Significant macro F1 improvements for rare tags
|
26 |
+
- **Adaptive Training**: Models quickly adapt to resolution/distribution changes after initial pretraining
|
27 |
+
|
28 |
+
*v2 demonstrates that Vision Transformers can achieve superior anime image tagging performance with fewer parameters and cleaner architecture.*
|
29 |
+
|
30 |
+
## 🔑 Key Highlights
|
31 |
+
|
32 |
+
- **Efficient Training**: Completed on just a single RTX 3060 GPU (12GB VRAM)
|
33 |
+
- **Fast Adaptation**: Models adapt to new resolutions/distributions within partial epochs after pretraining
|
34 |
+
- **Comprehensive Coverage**: 70,527 tags across 7 categories (general, character, copyright, artist, meta, rating, year)
|
35 |
+
- **Modern Architecture**: Vision Transformer backbone with cross-attention refinement
|
36 |
+
- **User-Friendly Interface**: Easy-to-use application with customizable thresholds and tag collection game
|
37 |
+
|
38 |
+
## ✨ Features
|
39 |
+
|
40 |
+
- **Multi-category tagging system**: Handles general tags, characters, copyright (series), artists, meta information, and content ratings
|
41 |
+
- **High performance**: 67.3% micro F1 score (50.6% macro F1) across 70,527 possible tags
|
42 |
+
- **Windows compatibility**: Works on Windows without Flash Attention requirements
|
43 |
+
- **Streamlit web interface**: User-friendly UI for uploading and analyzing images and a tag collection game
|
44 |
+
- **Adjustable threshold profiles**: Micro, Macro, Balanced, Category-specific, High Precision, and High Recall profiles
|
45 |
+
- **Fine-grained control**: Per-category threshold adjustments for precision-recall tradeoffs
|
46 |
+
- **Safetensors and ONNX**: Original pickle files available in /models
|
47 |
+
- **Vision Transformer Backbone**: Modern architecture with superior performance-to-parameter ratio
|
48 |
+
|
49 |
+
## 📊 Performance Analysis
|
50 |
+
|
51 |
+
### Complete v1 vs v2 Performance Comparison
|
52 |
+
|
53 |
+
| CATEGORY | v1 Micro F1 | v2 Micro F1 | Micro Δ | v1 Macro F1 | v2 Macro F1 | Macro Δ |
|
54 |
+
|----------|-------------|-------------|---------|-------------|-------------|---------|
|
55 |
+
| **Overall** | 58.1% | **67.3%** | **+9.2pp** | 31.5% | **50.6%** | **+19.1pp** |
|
56 |
+
| **Artist** | 47.4% | **70.0%** | **+22.6pp** | 29.8% | **64.4%** | **+34.6pp** |
|
57 |
+
| **Character** | 74.6% | **83.4%** | **+8.8pp** | 47.8% | **64.5%** | **+16.7pp** |
|
58 |
+
| **Copyright** | 76.3% | **86.6%** | **+10.3pp** | 37.7% | **53.1%** | **+15.4pp** |
|
59 |
+
| **General** | 57.6% | **66.4%** | **+8.8pp** | 20.4% | **27.4%** | **+7.0pp** |
|
60 |
+
| **Meta** | 55.7% | **61.2%** | **+5.5pp** | 14.4% | **19.2%** | **+4.8pp** |
|
61 |
+
| **Rating** | 77.9% | **83.1%** | **+5.2pp** | 76.8% | **81.8%** | **+5.0pp** |
|
62 |
+
| **Year** | 33.1% | **30.8%** | **-2.3pp** | 28.6% | **21.3%** | **-7.3pp** |
|
63 |
+
|
64 |
+
*Both using the balanced preset.
|
65 |
+
|
66 |
+
### Key Performance Insights
|
67 |
+
|
68 |
+
The v2 model shows remarkable improvements across nearly all categories:
|
69 |
+
|
70 |
+
- **Artist Recognition**: Massive +22.6pp micro F1 improvement, indicating much better artist identification
|
71 |
+
- **Character Detection**: Strong +8.8pp micro F1 and +16.7pp macro F1 gains
|
72 |
+
- **Copyright Recognition**: Excellent +10.3pp micro F1 improvement for series identification
|
73 |
+
- **General Tags**: Consistent +8.8pp micro F1 improvement for visual attributes
|
74 |
+
- **Overall Macro F1**: Exceptional +19.1pp improvement shows much better rare tag recognition
|
75 |
+
|
76 |
+
Only the year category shows slight regression, likely due to the reduced model complexity making temporal classification more challenging.
|
77 |
+
|
78 |
+
### Detailed v2 Performance
|
79 |
+
|
80 |
+
#### MACRO OPTIMIZED (Recommended)
|
81 |
+
|
82 |
+
| CATEGORY | THRESHOLD | MICRO-F1 | MACRO-F1 |
|
83 |
+
|----------|-----------|----------|----------|
|
84 |
+
| **overall** | 0.492 | **60.9%** | **50.6%** |
|
85 |
+
| artist | 0.492 | 62.3% | 66.1% |
|
86 |
+
| character | 0.492 | 79.9% | 66.2% |
|
87 |
+
| copyright | 0.492 | 81.8% | 56.2% |
|
88 |
+
| general | 0.492 | 60.2% | 34.6% |
|
89 |
+
| meta | 0.492 | 56.3% | 23.7% |
|
90 |
+
| rating | 0.492 | 78.7% | 77.5% |
|
91 |
+
| year | 0.492 | 37.2% | 32.6% |
|
92 |
+
|
93 |
+
#### MICRO OPTIMIZED
|
94 |
+
|
95 |
+
| CATEGORY | THRESHOLD | MICRO-F1 | MACRO-F1 |
|
96 |
+
|----------|-----------|----------|----------|
|
97 |
+
| **overall** | 0.614 | **67.3%** | **46.3%** |
|
98 |
+
| artist | 0.614 | 70.0% | 64.4% |
|
99 |
+
| character | 0.614 | 83.4% | 64.5% |
|
100 |
+
| copyright | 0.614 | 86.6% | 53.1% |
|
101 |
+
| general | 0.614 | 66.4% | 27.4% |
|
102 |
+
| meta | 0.614 | 61.2% | 19.2% |
|
103 |
+
| rating | 0.614 | 83.1% | 81.8% |
|
104 |
+
| year | 0.614 | 30.8% | 21.3% |
|
105 |
+
|
106 |
+
The model performs exceptionally well on character identification (83.4% F1 across 26,968 tags), copyright/series detection (86.6% F1 across 5,364 tags), and content rating classification (83.1% F1 across 4 tags).
|
107 |
+
|
108 |
+
### Real-world Tag Accuracy
|
109 |
+
|
110 |
+
The macro optimized threshold is recommended as many "false positives" according to the benchmark are actually correct tags missing from the Danbooru dataset. The model frequently identifies appropriate tags that weren't included in the original tagging, making perceived accuracy higher than formal metrics suggest.
|
111 |
+
|
112 |
+
## 🧠 Architecture Overview
|
113 |
+
|
114 |
+
### Vision Transformer Backbone
|
115 |
+
- **Base Model**: Vision Transformer (ViT) with patch-based image processing
|
116 |
+
- **Dual Output**: Patch feature map + CLS token for comprehensive image understanding
|
117 |
+
- **Efficient Design**: 86.4M backbone parameters vs previous 214M+ classifier layers
|
118 |
+
|
119 |
+
### Refined Prediction Pipeline
|
120 |
+
1. **Feature Extraction**: ViT processes image into patch tokens and global CLS token
|
121 |
+
2. **Global Pooling**: Combines mean-pooled patches with CLS token (dual-pool approach)
|
122 |
+
3. **Initial Predictions**: Shared weights between tag embeddings and classification layer
|
123 |
+
4. **Candidate Selection**: Top-K tag selection based on initial confidence
|
124 |
+
5. **Cross-Attention**: Tag embeddings attend to image patch features
|
125 |
+
6. **Final Scoring**: Refined predictions for selected candidate tags
|
126 |
+
|
127 |
+
### Key Improvements
|
128 |
+
- **Shared Weights**: Tag embeddings directly used for initial classification
|
129 |
+
- **Simplified Pipeline**: Single refined prediction stage (vs previous initial + refined)
|
130 |
+
- **Native PyTorch**: Uses optimized MultiheadAttention instead of Flash Attention
|
131 |
+
- **Custom Embeddings**: No dependency on external models like CLIP
|
132 |
+
- **Gradient Checkpointing**: Memory-efficient training on consumer hardware
|
133 |
+
|
134 |
+
## 🛠️ Training Details
|
135 |
+
|
136 |
+
### Multi-Resolution Training Strategy
|
137 |
+
|
138 |
+
The model was trained using an innovative multi-resolution approach:
|
139 |
+
|
140 |
+
1. **Phase 1**: 3 epochs at 384px resolution with learning rate 1e-4
|
141 |
+
2. **Phase 2**: IRFS (Instance-Aware Repeat Factor Sampling) - addresses long-tailed distribution imbalance
|
142 |
+
3. **Phase 3**: 512px resolution fine-tuning with learning rate 5e-5
|
143 |
+
|
144 |
+
### Key Training Insights
|
145 |
+
|
146 |
+
**Rapid Adaptation**: Once the model learns good general features during initial pretraining, it adapts to resolution changes and distribution shifts very quickly - often within a fraction of an epoch rather than requiring full retraining.
|
147 |
+
|
148 |
+
**IRFS Benefits**: Instance-Aware Repeat Factor Sampling provided substantial macro F1 improvements by addressing the long-tailed distribution of anime tags, where instance counts vary dramatically between classes even with similar image counts.
|
149 |
+
|
150 |
+
**Efficient Scaling**: The ViT architecture generalizes resolution and capacity changes to the entire dataset, making incremental training highly efficient.
|
151 |
+
|
152 |
+
#### Training Data:
|
153 |
+
- **Training subset**: 2,000,000 images
|
154 |
+
- **Training duration**: 3+ epochs with multi-resolution scaling
|
155 |
+
- **Final resolution**: 512x512 pixels
|
156 |
+
|
157 |
+
## 🛠️ Requirements
|
158 |
+
|
159 |
+
- **Python 3.11.9 specifically** (newer versions are incompatible)
|
160 |
+
- PyTorch 1.10+
|
161 |
+
- Streamlit
|
162 |
+
- PIL/Pillow
|
163 |
+
- NumPy
|
164 |
+
- Flash Attention (note: doesn't work properly on Windows only needed for refined model which I'm not supporting that much anyway)
|
165 |
+
|
166 |
+
## 🔧 Usage
|
167 |
+
|
168 |
+
Setup the application and game by executing `setup.bat`. This installs the required virtual environment:
|
169 |
+
|
170 |
+
- Upload your own images or select from example images
|
171 |
+
- Choose different threshold profiles
|
172 |
+
- Adjust category-specific thresholds
|
173 |
+
- View predictions organized by category
|
174 |
+
- Filter and sort tags based on confidence
|
175 |
+
|
176 |
+
Use run_app.bat and run_game.bat.
|
177 |
+
|
178 |
+
## 🎮 Tag Collector Game (Camie Collector)
|
179 |
+
|
180 |
+
Introducing a Tagging game - a gamified approach to anime image tagging that helps you understand the performance and limits of the model. This was a shower thought gone to far! Lots of Project Moon references.
|
181 |
+
|
182 |
+
### How to Play:
|
183 |
+
1. Upload an image
|
184 |
+
2. Scan for tags to discover them
|
185 |
+

|
186 |
+
3. Earn TagCoins for new discoveries
|
187 |
+
4. Spend TagCoins on upgrades to lower the threshold
|
188 |
+

|
189 |
+
5. Lower thresholds reveal rarer tags!
|
190 |
+
6. Collect sets of related tags for bonuses and reveal unique mosaics!
|
191 |
+

|
192 |
+
7. Visit the Library System to discover unique tags (not collect)
|
193 |
+

|
194 |
+
8. Use collected tags to either inspire new searches or generate essence
|
195 |
+
9. Use Enkephalin to generate Tag Essences
|
196 |
+

|
197 |
+
10. Use the Tag Essence Generator to collect the tag and related tags to it. Lamp Essence:
|
198 |
+

|
199 |
+
|
200 |
+
## 🖥️ Web Interface Guide
|
201 |
+
|
202 |
+
The interface is divided into three main sections:
|
203 |
+
|
204 |
+
1. **Model Selection** (Sidebar):
|
205 |
+
- Choose between Full Model, Initial-only Model or ONNX accelerated (initial only)
|
206 |
+
- View model information and memory usage
|
207 |
+
|
208 |
+
2. **Image Upload** (Left Panel):
|
209 |
+
- Upload your own images or select from examples
|
210 |
+
- View the selected image
|
211 |
+
|
212 |
+
3. **Tagging Controls** (Right Panel):
|
213 |
+
- Select threshold profile
|
214 |
+
- Adjust thresholds for precision-recall and micro/macro tradeoff
|
215 |
+
- Configure display options
|
216 |
+
- View predictions organized by category
|
217 |
+
|
218 |
+
### Display Options:
|
219 |
+
|
220 |
+
- **Show all tags**: Display all tags including those below threshold
|
221 |
+
- **Compact view**: Hide progress bars for cleaner display
|
222 |
+
- **Minimum confidence**: Filter out low-confidence predictions
|
223 |
+
- **Category selection**: Choose which categories to include in the summary
|
224 |
+
|
225 |
+
### Interface Screenshots:
|
226 |
+
|
227 |
+

|
228 |
+
|
229 |
+

|
230 |
+
|
231 |
+
## 🧠 Training Details
|
232 |
+
|
233 |
+
### Dataset
|
234 |
+
|
235 |
+
The model was trained on a carefully filtered subset of the [Danbooru 2024 dataset](https://huggingface.co/datasets/p1atdev/danbooru-2024), which contains a vast collection of anime/manga illustrations with comprehensive tagging.
|
236 |
+
|
237 |
+
#### Filtering Process:
|
238 |
+
|
239 |
+
The dataset was filtered with the following constraints:
|
240 |
+
|
241 |
+
```python
|
242 |
+
# Minimum tags per category required for each image
|
243 |
+
min_tag_counts = {
|
244 |
+
'general': 25,
|
245 |
+
'character': 1,
|
246 |
+
'copyright': 1,
|
247 |
+
'artist': 0,
|
248 |
+
'meta': 0
|
249 |
+
}
|
250 |
+
|
251 |
+
# Minimum samples per tag required for tag to be included
|
252 |
+
min_tag_samples = {
|
253 |
+
'general': 20,
|
254 |
+
'character': 40,
|
255 |
+
'copyright': 50,
|
256 |
+
'artist': 200,
|
257 |
+
'meta': 50
|
258 |
+
}
|
259 |
+
```
|
260 |
+
|
261 |
+
This filtering process:
|
262 |
+
1. First removed low-sample tags (tags with fewer occurrences than specified in `min_tag_samples`)
|
263 |
+
2. Then removed images with insufficient tags per category (as specified in `min_tag_counts`)
|
264 |
+
|
265 |
+
#### Training Data:
|
266 |
+
|
267 |
+
- **Starting dataset size**: ~3,000,000 filtered images
|
268 |
+
- **Training subset**: 2,000,000 images (due to storage and time constraints)
|
269 |
+
- **Training duration**: 3.5 epochs
|
270 |
+
|
271 |
+
#### Preprocessing:
|
272 |
+
|
273 |
+
Images were preprocessed with minimal transformations:
|
274 |
+
- Tensor normalization (scaled to 0-1 range)
|
275 |
+
- ImageNet normalization.
|
276 |
+
- Resized while maintaining original aspect ratio
|
277 |
+
- No additional augmentations were applied
|
278 |
+
|
279 |
+
#### Tag Categories:
|
280 |
+
|
281 |
+
The model recognizes tags across these categories:
|
282 |
+
- **General**: Visual elements, concepts, clothing, etc. (30,841 tags)
|
283 |
+
- **Character**: Individual characters appearing in the image (26,968 tags)
|
284 |
+
- **Copyright**: Source material (anime, manga, game) (5,364 tags)
|
285 |
+
- **Artist**: Creator of the artwork (7,007 tags)
|
286 |
+
- **Meta**: Meta information about the image (323 tags)
|
287 |
+
- **Rating**: Content rating (4 tags)
|
288 |
+
- **Year**: Year of upload (20 tags)
|
289 |
+
|
290 |
+
All supported tags are stored in `model/metadata.json`, which maps tag IDs to their names and categories.
|
291 |
+
|
292 |
+
### Training Notebooks
|
293 |
+
|
294 |
+
The repository includes the main training notebook:
|
295 |
+
|
296 |
+
1. **camie-tagger-v2.ipynb**:
|
297 |
+
- Main training notebook
|
298 |
+
- Dataset loading and preprocessing
|
299 |
+
- Model initialization
|
300 |
+
- Initial training loop with DeepSpeed integration
|
301 |
+
- Tag selection optimization
|
302 |
+
- Metric tracking and visualization
|
303 |
+
|
304 |
+
### Training Monitor
|
305 |
+
|
306 |
+
The project includes a real-time training monitor accessible via browser at `localhost:5000` during training:
|
307 |
+
|
308 |
+
#### Performance Tips:
|
309 |
+
|
310 |
+
⚠️ **Important**: For optimal training speed, keep VSCode minimized and the training monitor open in your browser. This can improve iteration speed by **3-5x** due to how the Windows/WSL graphics stack handles window focus and CUDA kernel execution.
|
311 |
+
|
312 |
+
#### Monitor Features:
|
313 |
+
|
314 |
+
The training monitor provides three main views:
|
315 |
+
|
316 |
+
##### 1. Overview Tab:
|
317 |
+
|
318 |
+

|
319 |
+
|
320 |
+
- **Training Progress**: Real-time metrics including epoch, batch, speed, and time estimates
|
321 |
+
- **Loss Chart**: Training and validation loss visualization
|
322 |
+
- **F1 Scores**: Initial and refined F1 metrics for both training and validation
|
323 |
+
|
324 |
+
##### 2. Predictions Tab:
|
325 |
+
|
326 |
+

|
327 |
+
|
328 |
+
- **Image Preview**: Shows the current sample being analyzed
|
329 |
+
- **Prediction Controls**: Toggle between initial and refined predictions
|
330 |
+
- **Tag Analysis**:
|
331 |
+
- Color-coded tag results (correct, incorrect, missing)
|
332 |
+
- Confidence visualization with probability bars
|
333 |
+
- Category-based organization
|
334 |
+
- Filtering options for error analysis
|
335 |
+
|
336 |
+
##### 3. Selection Analysis Tab:
|
337 |
+
|
338 |
+

|
339 |
+
|
340 |
+
- **Selection Metrics**: Statistics on tag selection quality
|
341 |
+
- Ground truth recall
|
342 |
+
- Average probability for ground truth vs. non-ground truth tags
|
343 |
+
- Unique tags selected
|
344 |
+
- **Selection Graph**: Trends in selection quality over time
|
345 |
+
- **Selected Tags Details**: Detailed view of model-selected tags with confidence scores
|
346 |
+
|
347 |
+
The monitor provides invaluable insights into how the two-stage prediction model is performing, particularly how the tag selection process is working between the initial and refined prediction stages.
|
348 |
+
|
349 |
+
### Training Notes:
|
350 |
+
|
351 |
+
- Training notebooks may require WSL and 32GB+ of RAM to handle the dataset
|
352 |
+
- With more computational resources, the model could be trained longer on the full dataset
|
353 |
+
|
354 |
+
## 🙏 Acknowledgments
|
355 |
+
|
356 |
+
- Claude Sonnet 3.5 and 4 for development assistance and architectural insights
|
357 |
+
- [Vision Transformer](https://arxiv.org/abs/2010.11929) for the foundational architecture
|
358 |
+
- [Danbooru](https://danbooru.donmai.us/) for the comprehensive tagged anime image dataset
|
359 |
+
- [p1atdev](https://huggingface.co/p1atdev) for the processed Danbooru 2024 dataset
|
360 |
+
- [IRFS paper](https://arxiv.org/abs/2305.08069) for Instance-Aware Repeat Factor Sampling methodology
|
361 |
+
- PyTorch team for optimized attention implementations and gradient checkpointing
|
362 |
+
- The open-source ML community for foundational tools and methods
|
app/app.py
ADDED
@@ -0,0 +1,1050 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Camie-Tagger-V2 Application
|
4 |
+
A Streamlit web app for tagging images using an AI model.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import streamlit as st
|
8 |
+
import os
|
9 |
+
import sys
|
10 |
+
import traceback
|
11 |
+
import tempfile
|
12 |
+
import time
|
13 |
+
import platform
|
14 |
+
import subprocess
|
15 |
+
import webbrowser
|
16 |
+
import glob
|
17 |
+
import numpy as np
|
18 |
+
import matplotlib.pyplot as plt
|
19 |
+
import io
|
20 |
+
import base64
|
21 |
+
import json
|
22 |
+
from matplotlib.colors import LinearSegmentedColormap
|
23 |
+
from PIL import Image
|
24 |
+
from pathlib import Path
|
25 |
+
|
26 |
+
# Add parent directory to path to allow importing from utils
|
27 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
28 |
+
|
29 |
+
# Import utilities
|
30 |
+
from utils.image_processing import process_image, batch_process_images
|
31 |
+
from utils.file_utils import save_tags_to_file, get_default_save_locations
|
32 |
+
from utils.ui_components import display_progress_bar, show_example_images, display_batch_results
|
33 |
+
from utils.onnx_processing import batch_process_images_onnx
|
34 |
+
|
35 |
+
# Define the model directory
|
36 |
+
MODEL_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
37 |
+
print(f"Using model directory: {MODEL_DIR}")
|
38 |
+
|
39 |
+
# Define threshold profile descriptions and explanations
|
40 |
+
threshold_profile_descriptions = {
|
41 |
+
"Micro Optimized": "Maximizes micro-averaged F1 score (best for dominant classes). Optimal for overall prediction quality.",
|
42 |
+
"Macro Optimized": "Maximizes macro-averaged F1 score (equal weight to all classes). Better for balanced performance across all tags.",
|
43 |
+
"Balanced": "Provides a trade-off between precision and recall with moderate thresholds. Good general-purpose setting.",
|
44 |
+
"Overall": "Uses a single threshold value across all categories. Simplest approach for consistent behavior.",
|
45 |
+
"Category-specific": "Uses different optimal thresholds for each category. Best for fine-tuning results."
|
46 |
+
}
|
47 |
+
|
48 |
+
threshold_profile_explanations = {
|
49 |
+
"Micro Optimized": """
|
50 |
+
### Micro Optimized Profile
|
51 |
+
|
52 |
+
**Technical definition**: Maximizes micro-averaged F1 score, which calculates metrics globally across all predictions.
|
53 |
+
|
54 |
+
**When to use**: When you want the best overall accuracy, especially for common tags and dominant categories.
|
55 |
+
|
56 |
+
**Effects**:
|
57 |
+
- Optimizes performance for the most frequent tags
|
58 |
+
- Gives more weight to categories with many examples (like 'character' and 'general')
|
59 |
+
- Provides higher precision in most common use cases
|
60 |
+
|
61 |
+
**Performance from validation**:
|
62 |
+
- Micro F1: ~67.3%
|
63 |
+
- Macro F1: ~46.3%
|
64 |
+
- Threshold: ~0.614
|
65 |
+
""",
|
66 |
+
|
67 |
+
"Macro Optimized": """
|
68 |
+
### Macro Optimized Profile
|
69 |
+
|
70 |
+
**Technical definition**: Maximizes macro-averaged F1 score, which gives equal weight to all categories regardless of size.
|
71 |
+
|
72 |
+
**When to use**: When balanced performance across all categories is important, including rare tags.
|
73 |
+
|
74 |
+
**Effects**:
|
75 |
+
- More balanced performance across all tag categories
|
76 |
+
- Better at detecting rare or unusual tags
|
77 |
+
- Generally has lower thresholds than micro-optimized
|
78 |
+
|
79 |
+
**Performance from validation**:
|
80 |
+
- Micro F1: ~60.9%
|
81 |
+
- Macro F1: ~50.6%
|
82 |
+
- Threshold: ~0.492
|
83 |
+
""",
|
84 |
+
|
85 |
+
"Balanced": """
|
86 |
+
### Balanced Profile
|
87 |
+
|
88 |
+
**Technical definition**: Same as Micro Optimized but provides a good reference point for manual adjustment.
|
89 |
+
|
90 |
+
**When to use**: For general-purpose tagging when you don't have specific recall or precision requirements.
|
91 |
+
|
92 |
+
**Effects**:
|
93 |
+
- Good middle ground between precision and recall
|
94 |
+
- Works well for most common use cases
|
95 |
+
- Default choice for most users
|
96 |
+
|
97 |
+
**Performance from validation**:
|
98 |
+
- Micro F1: ~67.3%
|
99 |
+
- Macro F1: ~46.3%
|
100 |
+
- Threshold: ~0.614
|
101 |
+
""",
|
102 |
+
|
103 |
+
"Overall": """
|
104 |
+
### Overall Profile
|
105 |
+
|
106 |
+
**Technical definition**: Uses a single threshold value across all categories.
|
107 |
+
|
108 |
+
**When to use**: When you want consistent behavior across all categories and a simple approach.
|
109 |
+
|
110 |
+
**Effects**:
|
111 |
+
- Consistent tagging threshold for all categories
|
112 |
+
- Simpler to understand than category-specific thresholds
|
113 |
+
- User-adjustable with a single slider
|
114 |
+
|
115 |
+
**Default threshold value**: 0.5 (user-adjustable)
|
116 |
+
|
117 |
+
**Note**: The threshold value is user-adjustable with the slider below.
|
118 |
+
""",
|
119 |
+
|
120 |
+
"Category-specific": """
|
121 |
+
### Category-specific Profile
|
122 |
+
|
123 |
+
**Technical definition**: Uses different optimal thresholds for each category, allowing fine-tuning.
|
124 |
+
|
125 |
+
**When to use**: When you want to customize tagging sensitivity for different categories.
|
126 |
+
|
127 |
+
**Effects**:
|
128 |
+
- Each category has its own independent threshold
|
129 |
+
- Full control over category sensitivity
|
130 |
+
- Best for fine-tuning results when some categories need different treatment
|
131 |
+
|
132 |
+
**Default threshold values**: Starts with balanced thresholds for each category
|
133 |
+
|
134 |
+
**Note**: Use the category sliders below to adjust thresholds for individual categories.
|
135 |
+
"""
|
136 |
+
}
|
137 |
+
|
138 |
+
def load_validation_results(results_path):
|
139 |
+
"""Load validation results from JSON file"""
|
140 |
+
try:
|
141 |
+
with open(results_path, 'r') as f:
|
142 |
+
data = json.load(f)
|
143 |
+
return data
|
144 |
+
except Exception as e:
|
145 |
+
print(f"Error loading validation results: {e}")
|
146 |
+
return None
|
147 |
+
|
148 |
+
def extract_thresholds_from_results(validation_data):
|
149 |
+
"""Extract threshold information from validation results"""
|
150 |
+
if not validation_data or 'results' not in validation_data:
|
151 |
+
return {}
|
152 |
+
|
153 |
+
thresholds = {
|
154 |
+
'overall': {},
|
155 |
+
'categories': {}
|
156 |
+
}
|
157 |
+
|
158 |
+
# Process results to extract thresholds
|
159 |
+
for result in validation_data['results']:
|
160 |
+
category = result['CATEGORY'].lower()
|
161 |
+
profile = result['PROFILE'].lower().replace(' ', '_')
|
162 |
+
threshold = result['THRESHOLD']
|
163 |
+
micro_f1 = result['MICRO-F1']
|
164 |
+
macro_f1 = result['MACRO-F1']
|
165 |
+
|
166 |
+
# Map profile names
|
167 |
+
if profile == 'micro_opt':
|
168 |
+
profile = 'micro_optimized'
|
169 |
+
elif profile == 'macro_opt':
|
170 |
+
profile = 'macro_optimized'
|
171 |
+
|
172 |
+
threshold_info = {
|
173 |
+
'threshold': threshold,
|
174 |
+
'micro_f1': micro_f1,
|
175 |
+
'macro_f1': macro_f1
|
176 |
+
}
|
177 |
+
|
178 |
+
if category == 'overall':
|
179 |
+
thresholds['overall'][profile] = threshold_info
|
180 |
+
else:
|
181 |
+
if category not in thresholds['categories']:
|
182 |
+
thresholds['categories'][category] = {}
|
183 |
+
thresholds['categories'][category][profile] = threshold_info
|
184 |
+
|
185 |
+
return thresholds
|
186 |
+
|
187 |
+
def load_model_and_metadata():
|
188 |
+
"""Load model and metadata from available files"""
|
189 |
+
# Check for SafeTensors model
|
190 |
+
safetensors_path = os.path.join(MODEL_DIR, "camie-tagger-v2.safetensors")
|
191 |
+
safetensors_metadata_path = os.path.join(MODEL_DIR, "camie-tagger-v2-metadata.json")
|
192 |
+
|
193 |
+
# Check for ONNX model
|
194 |
+
onnx_path = os.path.join(MODEL_DIR, "camie-tagger-v2.onnx")
|
195 |
+
|
196 |
+
# Check for validation results
|
197 |
+
validation_results_path = os.path.join(MODEL_DIR, "full_validation_results.json")
|
198 |
+
|
199 |
+
model_info = {
|
200 |
+
'safetensors_available': os.path.exists(safetensors_path) and os.path.exists(safetensors_metadata_path),
|
201 |
+
'onnx_available': os.path.exists(onnx_path) and os.path.exists(safetensors_metadata_path),
|
202 |
+
'validation_results_available': os.path.exists(validation_results_path)
|
203 |
+
}
|
204 |
+
|
205 |
+
# Load metadata (same for both model types)
|
206 |
+
metadata = None
|
207 |
+
if os.path.exists(safetensors_metadata_path):
|
208 |
+
try:
|
209 |
+
with open(safetensors_metadata_path, 'r') as f:
|
210 |
+
metadata = json.load(f)
|
211 |
+
except Exception as e:
|
212 |
+
print(f"Error loading metadata: {e}")
|
213 |
+
|
214 |
+
# Load validation results for thresholds
|
215 |
+
thresholds = {}
|
216 |
+
if model_info['validation_results_available']:
|
217 |
+
validation_data = load_validation_results(validation_results_path)
|
218 |
+
if validation_data:
|
219 |
+
thresholds = extract_thresholds_from_results(validation_data)
|
220 |
+
|
221 |
+
# Add default thresholds if not available
|
222 |
+
if not thresholds:
|
223 |
+
thresholds = {
|
224 |
+
'overall': {
|
225 |
+
'balanced': {'threshold': 0.5, 'micro_f1': 0, 'macro_f1': 0},
|
226 |
+
'micro_optimized': {'threshold': 0.6, 'micro_f1': 0, 'macro_f1': 0},
|
227 |
+
'macro_optimized': {'threshold': 0.4, 'micro_f1': 0, 'macro_f1': 0}
|
228 |
+
},
|
229 |
+
'categories': {}
|
230 |
+
}
|
231 |
+
|
232 |
+
return model_info, metadata, thresholds
|
233 |
+
|
234 |
+
def load_safetensors_model(safetensors_path, metadata_path):
|
235 |
+
"""Load SafeTensors model"""
|
236 |
+
try:
|
237 |
+
from safetensors.torch import load_file
|
238 |
+
import torch
|
239 |
+
|
240 |
+
# Load metadata
|
241 |
+
with open(metadata_path, 'r') as f:
|
242 |
+
metadata = json.load(f)
|
243 |
+
|
244 |
+
# Import the model class (assuming it's available)
|
245 |
+
# You'll need to make sure the ImageTagger class is importable
|
246 |
+
from utils.model_loader import ImageTagger # Update this import
|
247 |
+
|
248 |
+
model_info = metadata['model_info']
|
249 |
+
dataset_info = metadata['dataset_info']
|
250 |
+
|
251 |
+
# Recreate model architecture
|
252 |
+
model = ImageTagger(
|
253 |
+
total_tags=dataset_info['total_tags'],
|
254 |
+
dataset=None,
|
255 |
+
model_name=model_info['backbone'],
|
256 |
+
num_heads=model_info['num_attention_heads'],
|
257 |
+
dropout=0.0,
|
258 |
+
pretrained=False,
|
259 |
+
tag_context_size=model_info['tag_context_size'],
|
260 |
+
use_gradient_checkpointing=False,
|
261 |
+
img_size=model_info['img_size']
|
262 |
+
)
|
263 |
+
|
264 |
+
# Load weights
|
265 |
+
state_dict = load_file(safetensors_path)
|
266 |
+
model.load_state_dict(state_dict)
|
267 |
+
model.eval()
|
268 |
+
|
269 |
+
return model, metadata
|
270 |
+
except Exception as e:
|
271 |
+
raise Exception(f"Failed to load SafeTensors model: {e}")
|
272 |
+
|
273 |
+
def get_profile_metrics(thresholds, profile_name):
|
274 |
+
"""Extract metrics for the given profile from the thresholds dictionary"""
|
275 |
+
profile_key = None
|
276 |
+
|
277 |
+
# Map UI-friendly names to internal keys
|
278 |
+
if profile_name == "Micro Optimized":
|
279 |
+
profile_key = "micro_optimized"
|
280 |
+
elif profile_name == "Macro Optimized":
|
281 |
+
profile_key = "macro_optimized"
|
282 |
+
elif profile_name == "Balanced":
|
283 |
+
profile_key = "balanced"
|
284 |
+
elif profile_name in ["Overall", "Category-specific"]:
|
285 |
+
profile_key = "macro_optimized" # Use macro as default for these modes
|
286 |
+
|
287 |
+
if profile_key and 'overall' in thresholds and profile_key in thresholds['overall']:
|
288 |
+
return thresholds['overall'][profile_key]
|
289 |
+
|
290 |
+
return None
|
291 |
+
|
292 |
+
def on_threshold_profile_change():
|
293 |
+
"""Handle threshold profile changes"""
|
294 |
+
new_profile = st.session_state.threshold_profile
|
295 |
+
|
296 |
+
if hasattr(st.session_state, 'thresholds') and hasattr(st.session_state, 'settings'):
|
297 |
+
# Initialize category thresholds if needed
|
298 |
+
if st.session_state.settings['active_category_thresholds'] is None:
|
299 |
+
st.session_state.settings['active_category_thresholds'] = {}
|
300 |
+
|
301 |
+
current_thresholds = st.session_state.settings['active_category_thresholds']
|
302 |
+
|
303 |
+
# Map profile names to keys
|
304 |
+
profile_key = None
|
305 |
+
if new_profile == "Micro Optimized":
|
306 |
+
profile_key = "micro_optimized"
|
307 |
+
elif new_profile == "Macro Optimized":
|
308 |
+
profile_key = "macro_optimized"
|
309 |
+
elif new_profile == "Balanced":
|
310 |
+
profile_key = "balanced"
|
311 |
+
|
312 |
+
# Update thresholds based on profile
|
313 |
+
if profile_key and 'overall' in st.session_state.thresholds and profile_key in st.session_state.thresholds['overall']:
|
314 |
+
st.session_state.settings['active_threshold'] = st.session_state.thresholds['overall'][profile_key]['threshold']
|
315 |
+
|
316 |
+
# Set category thresholds
|
317 |
+
for category in st.session_state.categories:
|
318 |
+
if category in st.session_state.thresholds['categories'] and profile_key in st.session_state.thresholds['categories'][category]:
|
319 |
+
current_thresholds[category] = st.session_state.thresholds['categories'][category][profile_key]['threshold']
|
320 |
+
else:
|
321 |
+
current_thresholds[category] = st.session_state.settings['active_threshold']
|
322 |
+
|
323 |
+
elif new_profile == "Overall":
|
324 |
+
# Use balanced threshold for Overall profile
|
325 |
+
if 'overall' in st.session_state.thresholds and 'balanced' in st.session_state.thresholds['overall']:
|
326 |
+
st.session_state.settings['active_threshold'] = st.session_state.thresholds['overall']['balanced']['threshold']
|
327 |
+
else:
|
328 |
+
st.session_state.settings['active_threshold'] = 0.5
|
329 |
+
|
330 |
+
# Clear category-specific overrides
|
331 |
+
st.session_state.settings['active_category_thresholds'] = {}
|
332 |
+
|
333 |
+
elif new_profile == "Category-specific":
|
334 |
+
# Initialize with balanced thresholds
|
335 |
+
if 'overall' in st.session_state.thresholds and 'balanced' in st.session_state.thresholds['overall']:
|
336 |
+
st.session_state.settings['active_threshold'] = st.session_state.thresholds['overall']['balanced']['threshold']
|
337 |
+
else:
|
338 |
+
st.session_state.settings['active_threshold'] = 0.5
|
339 |
+
|
340 |
+
# Initialize category thresholds
|
341 |
+
for category in st.session_state.categories:
|
342 |
+
if category in st.session_state.thresholds['categories'] and 'balanced' in st.session_state.thresholds['categories'][category]:
|
343 |
+
current_thresholds[category] = st.session_state.thresholds['categories'][category]['balanced']['threshold']
|
344 |
+
else:
|
345 |
+
current_thresholds[category] = st.session_state.settings['active_threshold']
|
346 |
+
|
347 |
+
def apply_thresholds(all_probs, threshold_profile, active_threshold, active_category_thresholds, min_confidence, selected_categories):
|
348 |
+
"""Apply thresholds to raw probabilities and return filtered tags"""
|
349 |
+
tags = {}
|
350 |
+
all_tags = []
|
351 |
+
|
352 |
+
# Handle None case for active_category_thresholds
|
353 |
+
active_category_thresholds = active_category_thresholds or {}
|
354 |
+
|
355 |
+
for category, cat_probs in all_probs.items():
|
356 |
+
# Get the appropriate threshold for this category
|
357 |
+
threshold = active_category_thresholds.get(category, active_threshold)
|
358 |
+
|
359 |
+
# Filter tags above threshold
|
360 |
+
tags[category] = [(tag, prob) for tag, prob in cat_probs if prob >= threshold]
|
361 |
+
|
362 |
+
# Add to all_tags if selected
|
363 |
+
if selected_categories.get(category, True):
|
364 |
+
for tag, prob in tags[category]:
|
365 |
+
all_tags.append(tag)
|
366 |
+
|
367 |
+
return tags, all_tags
|
368 |
+
|
369 |
+
def image_tagger_app():
|
370 |
+
"""Main Streamlit application for image tagging."""
|
371 |
+
st.set_page_config(layout="wide", page_title="Camie Tagger", page_icon="🖼️")
|
372 |
+
|
373 |
+
st.title("Camie-Tagger-v2 Interface")
|
374 |
+
st.markdown("---")
|
375 |
+
|
376 |
+
# Initialize settings
|
377 |
+
if 'settings' not in st.session_state:
|
378 |
+
st.session_state.settings = {
|
379 |
+
'show_all_tags': False,
|
380 |
+
'compact_view': True,
|
381 |
+
'min_confidence': 0.01,
|
382 |
+
'threshold_profile': "Macro",
|
383 |
+
'active_threshold': 0.5,
|
384 |
+
'active_category_thresholds': {}, # Initialize as empty dict, not None
|
385 |
+
'selected_categories': {},
|
386 |
+
'replace_underscores': False
|
387 |
+
}
|
388 |
+
st.session_state.show_profile_help = False
|
389 |
+
|
390 |
+
# Session state initialization for model
|
391 |
+
if 'model_loaded' not in st.session_state:
|
392 |
+
st.session_state.model_loaded = False
|
393 |
+
st.session_state.model = None
|
394 |
+
st.session_state.thresholds = None
|
395 |
+
st.session_state.metadata = None
|
396 |
+
st.session_state.model_type = "onnx" # Default to ONNX
|
397 |
+
|
398 |
+
# Sidebar for model selection and information
|
399 |
+
with st.sidebar:
|
400 |
+
# Support information
|
401 |
+
st.subheader("💡 Notes")
|
402 |
+
|
403 |
+
st.markdown("""
|
404 |
+
This tagger was trained on a subset of the available data due to hardware limitations.
|
405 |
+
|
406 |
+
A more comprehensive model trained on the full 3+ million image dataset would provide:
|
407 |
+
- More recent characters and tags.
|
408 |
+
- Improved accuracy.
|
409 |
+
|
410 |
+
If you find this tool useful and would like to support future development:
|
411 |
+
""")
|
412 |
+
|
413 |
+
# Add Buy Me a Coffee button with Star of the City-like glow effect
|
414 |
+
st.markdown("""
|
415 |
+
<style>
|
416 |
+
@keyframes coffee-button-glow {
|
417 |
+
0% { box-shadow: 0 0 5px #FFD700; }
|
418 |
+
50% { box-shadow: 0 0 15px #FFD700; }
|
419 |
+
100% { box-shadow: 0 0 5px #FFD700; }
|
420 |
+
}
|
421 |
+
|
422 |
+
.coffee-button {
|
423 |
+
display: inline-block;
|
424 |
+
animation: coffee-button-glow 2s infinite;
|
425 |
+
border-radius: 5px;
|
426 |
+
transition: transform 0.3s ease;
|
427 |
+
}
|
428 |
+
|
429 |
+
.coffee-button:hover {
|
430 |
+
transform: scale(1.05);
|
431 |
+
}
|
432 |
+
</style>
|
433 |
+
|
434 |
+
<a href="https://ko-fi.com/camais" target="_blank" class="coffee-button">
|
435 |
+
<img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png"
|
436 |
+
alt="Buy Me A Coffee"
|
437 |
+
style="height: 45px; width: 162px; border-radius: 5px;" />
|
438 |
+
</a>
|
439 |
+
""", unsafe_allow_html=True)
|
440 |
+
|
441 |
+
st.markdown("""
|
442 |
+
Your support helps with:
|
443 |
+
- GPU costs for training
|
444 |
+
- Storage for larger datasets
|
445 |
+
- Development of new features
|
446 |
+
- Future projects
|
447 |
+
|
448 |
+
Thank you! 🙏
|
449 |
+
|
450 |
+
Full Details: https://huggingface.co/Camais03/camie-tagger
|
451 |
+
""")
|
452 |
+
|
453 |
+
st.header("Model Selection")
|
454 |
+
|
455 |
+
# Load model information
|
456 |
+
model_info, metadata, thresholds = load_model_and_metadata()
|
457 |
+
|
458 |
+
# Determine available model options
|
459 |
+
model_options = []
|
460 |
+
if model_info['onnx_available']:
|
461 |
+
model_options.append("ONNX (Recommended)")
|
462 |
+
if model_info['safetensors_available']:
|
463 |
+
model_options.append("SafeTensors (PyTorch)")
|
464 |
+
|
465 |
+
if not model_options:
|
466 |
+
st.error("No model files found!")
|
467 |
+
st.info(f"Looking for models in: {MODEL_DIR}")
|
468 |
+
st.info("Expected files:")
|
469 |
+
st.info("- camie-tagger-v2.onnx")
|
470 |
+
st.info("- camie-tagger-v2.safetensors")
|
471 |
+
st.info("- camie-tagger-v2-metadata.json")
|
472 |
+
st.stop()
|
473 |
+
|
474 |
+
# Model type selection
|
475 |
+
default_index = 0 if model_info['onnx_available'] else 0
|
476 |
+
model_type = st.radio(
|
477 |
+
"Select Model Type:",
|
478 |
+
model_options,
|
479 |
+
index=default_index,
|
480 |
+
help="ONNX: Optimized for speed and compatibility\nSafeTensors: Native PyTorch format"
|
481 |
+
)
|
482 |
+
|
483 |
+
# Convert selection to internal model type
|
484 |
+
if model_type == "ONNX (Recommended)":
|
485 |
+
selected_model_type = "onnx"
|
486 |
+
else:
|
487 |
+
selected_model_type = "safetensors"
|
488 |
+
|
489 |
+
# If model type changed, reload
|
490 |
+
if selected_model_type != st.session_state.model_type:
|
491 |
+
st.session_state.model_loaded = False
|
492 |
+
st.session_state.model_type = selected_model_type
|
493 |
+
|
494 |
+
# Reload button
|
495 |
+
if st.button("Reload Model") and st.session_state.model_loaded:
|
496 |
+
st.session_state.model_loaded = False
|
497 |
+
st.info("Reloading model...")
|
498 |
+
|
499 |
+
# Try to load the model
|
500 |
+
if not st.session_state.model_loaded:
|
501 |
+
try:
|
502 |
+
with st.spinner(f"Loading {st.session_state.model_type.upper()} model..."):
|
503 |
+
if st.session_state.model_type == "onnx":
|
504 |
+
# Load ONNX model
|
505 |
+
import onnxruntime as ort
|
506 |
+
|
507 |
+
onnx_path = os.path.join(MODEL_DIR, "camie-tagger-v2.onnx")
|
508 |
+
|
509 |
+
# Check ONNX providers
|
510 |
+
providers = ort.get_available_providers()
|
511 |
+
gpu_available = any('CUDA' in provider for provider in providers)
|
512 |
+
|
513 |
+
# Create ONNX session
|
514 |
+
session = ort.InferenceSession(onnx_path, providers=providers)
|
515 |
+
|
516 |
+
st.session_state.model = session
|
517 |
+
st.session_state.device = f"ONNX Runtime ({'GPU' if gpu_available else 'CPU'})"
|
518 |
+
st.session_state.param_dtype = "float32"
|
519 |
+
|
520 |
+
else:
|
521 |
+
# Load SafeTensors model
|
522 |
+
safetensors_path = os.path.join(MODEL_DIR, "camie-tagger-v2.safetensors")
|
523 |
+
metadata_path = os.path.join(MODEL_DIR, "camie-tagger-v2-metadata.json")
|
524 |
+
|
525 |
+
model, loaded_metadata = load_safetensors_model(safetensors_path, metadata_path)
|
526 |
+
|
527 |
+
st.session_state.model = model
|
528 |
+
device = next(model.parameters()).device
|
529 |
+
param_dtype = next(model.parameters()).dtype
|
530 |
+
st.session_state.device = device
|
531 |
+
st.session_state.param_dtype = param_dtype
|
532 |
+
metadata = loaded_metadata # Use loaded metadata instead
|
533 |
+
|
534 |
+
# Store common info
|
535 |
+
st.session_state.thresholds = thresholds
|
536 |
+
st.session_state.metadata = metadata
|
537 |
+
st.session_state.model_loaded = True
|
538 |
+
|
539 |
+
# Get categories
|
540 |
+
if metadata and 'dataset_info' in metadata:
|
541 |
+
tag_mapping = metadata['dataset_info']['tag_mapping']
|
542 |
+
categories = list(set(tag_mapping['tag_to_category'].values()))
|
543 |
+
st.session_state.categories = categories
|
544 |
+
|
545 |
+
# Initialize selected categories
|
546 |
+
if not st.session_state.settings['selected_categories']:
|
547 |
+
st.session_state.settings['selected_categories'] = {cat: True for cat in categories}
|
548 |
+
|
549 |
+
# Set initial threshold from validation results
|
550 |
+
if 'overall' in thresholds and 'balanced' in thresholds['overall']:
|
551 |
+
st.session_state.settings['active_threshold'] = thresholds['overall']['macro_optimized']['threshold']
|
552 |
+
|
553 |
+
except Exception as e:
|
554 |
+
st.error(f"Error loading model: {str(e)}")
|
555 |
+
st.code(traceback.format_exc())
|
556 |
+
st.stop()
|
557 |
+
|
558 |
+
# Display model information in sidebar
|
559 |
+
with st.sidebar:
|
560 |
+
st.header("Model Information")
|
561 |
+
if st.session_state.model_loaded:
|
562 |
+
if st.session_state.model_type == "onnx":
|
563 |
+
st.success("Using ONNX Model")
|
564 |
+
else:
|
565 |
+
st.success("Using SafeTensors Model")
|
566 |
+
|
567 |
+
st.write(f"Device: {st.session_state.device}")
|
568 |
+
st.write(f"Precision: {st.session_state.param_dtype}")
|
569 |
+
|
570 |
+
if st.session_state.metadata:
|
571 |
+
if 'dataset_info' in st.session_state.metadata:
|
572 |
+
total_tags = st.session_state.metadata['dataset_info']['total_tags']
|
573 |
+
st.write(f"Total tags: {total_tags}")
|
574 |
+
elif 'total_tags' in st.session_state.metadata:
|
575 |
+
st.write(f"Total tags: {st.session_state.metadata['total_tags']}")
|
576 |
+
|
577 |
+
# Show categories
|
578 |
+
with st.expander("Available Categories"):
|
579 |
+
for category in sorted(st.session_state.categories):
|
580 |
+
st.write(f"- {category.capitalize()}")
|
581 |
+
|
582 |
+
# About section
|
583 |
+
with st.expander("About this app"):
|
584 |
+
st.write("""
|
585 |
+
This app uses a trained image tagging model to analyze and tag images.
|
586 |
+
|
587 |
+
**Model Options**:
|
588 |
+
- **ONNX (Recommended)**: Optimized for inference speed with broad compatibility
|
589 |
+
- **SafeTensors**: Native PyTorch format for advanced users
|
590 |
+
|
591 |
+
**Features**:
|
592 |
+
- Upload or process images in batches
|
593 |
+
- Multiple threshold profiles based on validation results
|
594 |
+
- Category-specific threshold adjustment
|
595 |
+
- Export tags in various formats
|
596 |
+
- Fast inference with GPU acceleration (when available)
|
597 |
+
|
598 |
+
**Threshold Profiles**:
|
599 |
+
- **Micro Optimized**: Best overall F1 score (67.3% micro F1)
|
600 |
+
- **Macro Optimized**: Balanced across categories (50.6% macro F1)
|
601 |
+
- **Balanced**: Good general-purpose setting
|
602 |
+
- **Overall**: Single adjustable threshold
|
603 |
+
- **Category-specific**: Fine-tune each category individually
|
604 |
+
""")
|
605 |
+
|
606 |
+
# Main content area - Image upload and processing
|
607 |
+
col1, col2 = st.columns([1, 1.5])
|
608 |
+
|
609 |
+
with col1:
|
610 |
+
st.header("Image")
|
611 |
+
|
612 |
+
upload_tab, batch_tab = st.tabs(["Upload Image", "Batch Processing"])
|
613 |
+
|
614 |
+
image_path = None
|
615 |
+
|
616 |
+
with upload_tab:
|
617 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
618 |
+
|
619 |
+
if uploaded_file:
|
620 |
+
# Create temporary file
|
621 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
|
622 |
+
tmp_file.write(uploaded_file.getvalue())
|
623 |
+
image_path = tmp_file.name
|
624 |
+
|
625 |
+
st.session_state.original_filename = uploaded_file.name
|
626 |
+
|
627 |
+
# Display image
|
628 |
+
image = Image.open(uploaded_file)
|
629 |
+
st.image(image, use_container_width=True)
|
630 |
+
|
631 |
+
with batch_tab:
|
632 |
+
st.subheader("Batch Process Images")
|
633 |
+
|
634 |
+
# Folder selection
|
635 |
+
batch_folder = st.text_input("Enter folder path containing images:", "")
|
636 |
+
|
637 |
+
# Save options
|
638 |
+
save_options = st.radio(
|
639 |
+
"Where to save tag files:",
|
640 |
+
["Same folder as images", "Custom location", "Default save folder"],
|
641 |
+
index=0
|
642 |
+
)
|
643 |
+
|
644 |
+
# Batch size control
|
645 |
+
st.subheader("Performance Options")
|
646 |
+
batch_size = st.number_input("Batch size", min_value=1, max_value=32, value=4,
|
647 |
+
help="Higher values may improve speed but use more memory")
|
648 |
+
|
649 |
+
# Category limits
|
650 |
+
enable_category_limits = st.checkbox("Limit tags per category in batch output", value=False)
|
651 |
+
|
652 |
+
if enable_category_limits and hasattr(st.session_state, 'categories'):
|
653 |
+
if 'category_limits' not in st.session_state:
|
654 |
+
st.session_state.category_limits = {}
|
655 |
+
|
656 |
+
st.markdown("**Limit Values:** -1 = no limit, 0 = exclude, N = top N tags")
|
657 |
+
|
658 |
+
limit_cols = st.columns(2)
|
659 |
+
for i, category in enumerate(sorted(st.session_state.categories)):
|
660 |
+
col_idx = i % 2
|
661 |
+
with limit_cols[col_idx]:
|
662 |
+
current_limit = st.session_state.category_limits.get(category, -1)
|
663 |
+
new_limit = st.number_input(
|
664 |
+
f"{category.capitalize()}:",
|
665 |
+
value=current_limit,
|
666 |
+
min_value=-1,
|
667 |
+
step=1,
|
668 |
+
key=f"limit_{category}"
|
669 |
+
)
|
670 |
+
st.session_state.category_limits[category] = new_limit
|
671 |
+
|
672 |
+
# Process batch button
|
673 |
+
if batch_folder and os.path.isdir(batch_folder):
|
674 |
+
image_files = []
|
675 |
+
for ext in ['*.jpg', '*.jpeg', '*.png']:
|
676 |
+
image_files.extend(glob.glob(os.path.join(batch_folder, ext)))
|
677 |
+
image_files.extend(glob.glob(os.path.join(batch_folder, ext.upper())))
|
678 |
+
|
679 |
+
if image_files:
|
680 |
+
st.write(f"Found {len(image_files)} images")
|
681 |
+
|
682 |
+
if st.button("🔄 Process All Images", type="primary"):
|
683 |
+
if not st.session_state.model_loaded:
|
684 |
+
st.error("Model not loaded")
|
685 |
+
else:
|
686 |
+
with st.spinner("Processing images..."):
|
687 |
+
progress_bar = st.progress(0)
|
688 |
+
status_text = st.empty()
|
689 |
+
|
690 |
+
def update_progress(current, total, image_path):
|
691 |
+
progress = current / total if total > 0 else 0
|
692 |
+
progress_bar.progress(progress)
|
693 |
+
status_text.text(f"Processing {current}/{total}: {os.path.basename(image_path) if image_path else 'Complete'}")
|
694 |
+
|
695 |
+
# Determine save directory
|
696 |
+
if save_options == "Same folder as images":
|
697 |
+
save_dir = batch_folder
|
698 |
+
elif save_options == "Custom location":
|
699 |
+
save_dir = st.text_input("Custom save directory:", batch_folder)
|
700 |
+
else:
|
701 |
+
save_dir = os.path.join(os.path.dirname(__file__), "saved_tags")
|
702 |
+
os.makedirs(save_dir, exist_ok=True)
|
703 |
+
|
704 |
+
# Get current settings
|
705 |
+
category_limits = st.session_state.category_limits if enable_category_limits else None
|
706 |
+
|
707 |
+
# Process based on model type
|
708 |
+
if st.session_state.model_type == "onnx":
|
709 |
+
batch_results = batch_process_images_onnx(
|
710 |
+
folder_path=batch_folder,
|
711 |
+
model_path=os.path.join(MODEL_DIR, "camie-tagger-v2.onnx"),
|
712 |
+
metadata_path=os.path.join(MODEL_DIR, "camie-tagger-v2-metadata.json"),
|
713 |
+
threshold_profile=st.session_state.settings['threshold_profile'],
|
714 |
+
active_threshold=st.session_state.settings['active_threshold'],
|
715 |
+
active_category_thresholds=st.session_state.settings['active_category_thresholds'],
|
716 |
+
save_dir=save_dir,
|
717 |
+
progress_callback=update_progress,
|
718 |
+
min_confidence=st.session_state.settings['min_confidence'],
|
719 |
+
batch_size=batch_size,
|
720 |
+
category_limits=category_limits
|
721 |
+
)
|
722 |
+
else:
|
723 |
+
# SafeTensors processing (would need to implement)
|
724 |
+
st.error("SafeTensors batch processing not implemented yet")
|
725 |
+
batch_results = None
|
726 |
+
|
727 |
+
if batch_results:
|
728 |
+
display_batch_results(batch_results)
|
729 |
+
|
730 |
+
# Column 2: Controls and Results
|
731 |
+
with col2:
|
732 |
+
st.header("Tagging Controls")
|
733 |
+
|
734 |
+
# Threshold profile selection
|
735 |
+
all_profiles = [
|
736 |
+
"Micro Optimized",
|
737 |
+
"Macro Optimized",
|
738 |
+
"Balanced",
|
739 |
+
"Overall",
|
740 |
+
"Category-specific"
|
741 |
+
]
|
742 |
+
|
743 |
+
profile_col1, profile_col2 = st.columns([3, 1])
|
744 |
+
|
745 |
+
with profile_col1:
|
746 |
+
threshold_profile = st.selectbox(
|
747 |
+
"Select threshold profile",
|
748 |
+
options=all_profiles,
|
749 |
+
index=1, # Default to Macro
|
750 |
+
key="threshold_profile",
|
751 |
+
on_change=on_threshold_profile_change
|
752 |
+
)
|
753 |
+
|
754 |
+
with profile_col2:
|
755 |
+
if st.button("ℹ️ Help", key="profile_help"):
|
756 |
+
st.session_state.show_profile_help = not st.session_state.get('show_profile_help', False)
|
757 |
+
|
758 |
+
# Show profile help
|
759 |
+
if st.session_state.get('show_profile_help', False):
|
760 |
+
st.markdown(threshold_profile_explanations[threshold_profile])
|
761 |
+
else:
|
762 |
+
st.info(threshold_profile_descriptions[threshold_profile])
|
763 |
+
|
764 |
+
# Show profile metrics if available
|
765 |
+
if st.session_state.model_loaded:
|
766 |
+
metrics = get_profile_metrics(st.session_state.thresholds, threshold_profile)
|
767 |
+
|
768 |
+
if metrics:
|
769 |
+
metrics_cols = st.columns(3)
|
770 |
+
|
771 |
+
with metrics_cols[0]:
|
772 |
+
st.metric("Threshold", f"{metrics['threshold']:.3f}")
|
773 |
+
|
774 |
+
with metrics_cols[1]:
|
775 |
+
st.metric("Micro F1", f"{metrics['micro_f1']:.1f}%")
|
776 |
+
|
777 |
+
with metrics_cols[2]:
|
778 |
+
st.metric("Macro F1", f"{metrics['macro_f1']:.1f}%")
|
779 |
+
|
780 |
+
# Threshold controls based on profile
|
781 |
+
if st.session_state.model_loaded:
|
782 |
+
active_threshold = st.session_state.settings.get('active_threshold', 0.5)
|
783 |
+
active_category_thresholds = st.session_state.settings.get('active_category_thresholds', {})
|
784 |
+
|
785 |
+
if threshold_profile in ["Micro Optimized", "Macro Optimized", "Balanced"]:
|
786 |
+
# Show reference threshold (disabled)
|
787 |
+
st.slider(
|
788 |
+
"Threshold (from validation)",
|
789 |
+
min_value=0.01,
|
790 |
+
max_value=1.0,
|
791 |
+
value=float(active_threshold),
|
792 |
+
step=0.01,
|
793 |
+
disabled=True,
|
794 |
+
help="This threshold is optimized from validation results"
|
795 |
+
)
|
796 |
+
|
797 |
+
elif threshold_profile == "Overall":
|
798 |
+
# Adjustable overall threshold
|
799 |
+
active_threshold = st.slider(
|
800 |
+
"Overall threshold",
|
801 |
+
min_value=0.01,
|
802 |
+
max_value=1.0,
|
803 |
+
value=float(active_threshold),
|
804 |
+
step=0.01
|
805 |
+
)
|
806 |
+
st.session_state.settings['active_threshold'] = active_threshold
|
807 |
+
|
808 |
+
elif threshold_profile == "Category-specific":
|
809 |
+
# Show reference overall threshold
|
810 |
+
st.slider(
|
811 |
+
"Overall threshold (reference)",
|
812 |
+
min_value=0.01,
|
813 |
+
max_value=1.0,
|
814 |
+
value=float(active_threshold),
|
815 |
+
step=0.01,
|
816 |
+
disabled=True
|
817 |
+
)
|
818 |
+
|
819 |
+
st.write("Adjust thresholds for individual categories:")
|
820 |
+
|
821 |
+
# Category sliders
|
822 |
+
slider_cols = st.columns(2)
|
823 |
+
|
824 |
+
if not active_category_thresholds:
|
825 |
+
active_category_thresholds = {}
|
826 |
+
|
827 |
+
for i, category in enumerate(sorted(st.session_state.categories)):
|
828 |
+
col_idx = i % 2
|
829 |
+
with slider_cols[col_idx]:
|
830 |
+
default_val = active_category_thresholds.get(category, active_threshold)
|
831 |
+
new_threshold = st.slider(
|
832 |
+
f"{category.capitalize()}",
|
833 |
+
min_value=0.01,
|
834 |
+
max_value=1.0,
|
835 |
+
value=float(default_val),
|
836 |
+
step=0.01,
|
837 |
+
key=f"slider_{category}"
|
838 |
+
)
|
839 |
+
active_category_thresholds[category] = new_threshold
|
840 |
+
|
841 |
+
st.session_state.settings['active_category_thresholds'] = active_category_thresholds
|
842 |
+
|
843 |
+
# Display options
|
844 |
+
with st.expander("Display Options", expanded=False):
|
845 |
+
col1, col2 = st.columns(2)
|
846 |
+
with col1:
|
847 |
+
show_all_tags = st.checkbox("Show all tags (including below threshold)",
|
848 |
+
value=st.session_state.settings['show_all_tags'])
|
849 |
+
compact_view = st.checkbox("Compact view (hide progress bars)",
|
850 |
+
value=st.session_state.settings['compact_view'])
|
851 |
+
replace_underscores = st.checkbox("Replace underscores with spaces",
|
852 |
+
value=st.session_state.settings.get('replace_underscores', False))
|
853 |
+
|
854 |
+
with col2:
|
855 |
+
min_confidence = st.slider("Minimum confidence to display", 0.0, 0.5,
|
856 |
+
st.session_state.settings['min_confidence'], 0.01)
|
857 |
+
|
858 |
+
# Update settings
|
859 |
+
st.session_state.settings.update({
|
860 |
+
'show_all_tags': show_all_tags,
|
861 |
+
'compact_view': compact_view,
|
862 |
+
'min_confidence': min_confidence,
|
863 |
+
'replace_underscores': replace_underscores
|
864 |
+
})
|
865 |
+
|
866 |
+
# Category selection
|
867 |
+
st.write("Categories to include in 'All Tags' section:")
|
868 |
+
|
869 |
+
category_cols = st.columns(3)
|
870 |
+
selected_categories = {}
|
871 |
+
|
872 |
+
if hasattr(st.session_state, 'categories'):
|
873 |
+
for i, category in enumerate(sorted(st.session_state.categories)):
|
874 |
+
col_idx = i % 3
|
875 |
+
with category_cols[col_idx]:
|
876 |
+
default_val = st.session_state.settings['selected_categories'].get(category, True)
|
877 |
+
selected_categories[category] = st.checkbox(
|
878 |
+
f"{category.capitalize()}",
|
879 |
+
value=default_val,
|
880 |
+
key=f"cat_select_{category}"
|
881 |
+
)
|
882 |
+
|
883 |
+
st.session_state.settings['selected_categories'] = selected_categories
|
884 |
+
|
885 |
+
# Run tagging button
|
886 |
+
if image_path and st.button("Run Tagging"):
|
887 |
+
if not st.session_state.model_loaded:
|
888 |
+
st.error("Model not loaded")
|
889 |
+
else:
|
890 |
+
with st.spinner("Analyzing image..."):
|
891 |
+
try:
|
892 |
+
# Process image based on model type
|
893 |
+
if st.session_state.model_type == "onnx":
|
894 |
+
from utils.onnx_processing import process_single_image_onnx
|
895 |
+
|
896 |
+
result = process_single_image_onnx(
|
897 |
+
image_path=image_path,
|
898 |
+
model_path=os.path.join(MODEL_DIR, "camie-tagger-v2.onnx"),
|
899 |
+
metadata=st.session_state.metadata,
|
900 |
+
threshold_profile=threshold_profile,
|
901 |
+
active_threshold=st.session_state.settings['active_threshold'],
|
902 |
+
active_category_thresholds=st.session_state.settings.get('active_category_thresholds', {}),
|
903 |
+
min_confidence=st.session_state.settings['min_confidence']
|
904 |
+
)
|
905 |
+
else:
|
906 |
+
# SafeTensors processing
|
907 |
+
result = process_image(
|
908 |
+
image_path=image_path,
|
909 |
+
model=st.session_state.model,
|
910 |
+
thresholds=st.session_state.thresholds,
|
911 |
+
metadata=st.session_state.metadata,
|
912 |
+
threshold_profile=threshold_profile,
|
913 |
+
active_threshold=st.session_state.settings['active_threshold'],
|
914 |
+
active_category_thresholds=st.session_state.settings.get('active_category_thresholds', {}),
|
915 |
+
min_confidence=st.session_state.settings['min_confidence']
|
916 |
+
)
|
917 |
+
|
918 |
+
if result['success']:
|
919 |
+
st.session_state.all_probs = result['all_probs']
|
920 |
+
st.session_state.tags = result['tags']
|
921 |
+
st.session_state.all_tags = result['all_tags']
|
922 |
+
st.success("Analysis completed!")
|
923 |
+
else:
|
924 |
+
st.error(f"Analysis failed: {result.get('error', 'Unknown error')}")
|
925 |
+
|
926 |
+
except Exception as e:
|
927 |
+
st.error(f"Error during analysis: {str(e)}")
|
928 |
+
st.code(traceback.format_exc())
|
929 |
+
|
930 |
+
# Display results
|
931 |
+
if image_path and hasattr(st.session_state, 'all_probs'):
|
932 |
+
st.header("Predictions")
|
933 |
+
|
934 |
+
# Apply current thresholds
|
935 |
+
filtered_tags, current_all_tags = apply_thresholds(
|
936 |
+
st.session_state.all_probs,
|
937 |
+
threshold_profile,
|
938 |
+
st.session_state.settings['active_threshold'],
|
939 |
+
st.session_state.settings.get('active_category_thresholds', {}),
|
940 |
+
st.session_state.settings['min_confidence'],
|
941 |
+
st.session_state.settings['selected_categories']
|
942 |
+
)
|
943 |
+
|
944 |
+
all_tags = []
|
945 |
+
|
946 |
+
# Display by category
|
947 |
+
for category in sorted(st.session_state.all_probs.keys()):
|
948 |
+
all_tags_in_category = st.session_state.all_probs.get(category, [])
|
949 |
+
filtered_tags_in_category = filtered_tags.get(category, [])
|
950 |
+
|
951 |
+
if all_tags_in_category:
|
952 |
+
expander_label = f"{category.capitalize()} ({len(filtered_tags_in_category)} tags)"
|
953 |
+
|
954 |
+
with st.expander(expander_label, expanded=True):
|
955 |
+
# Get threshold for this category (handle None case)
|
956 |
+
active_category_thresholds = st.session_state.settings.get('active_category_thresholds') or {}
|
957 |
+
threshold = active_category_thresholds.get(category, st.session_state.settings['active_threshold'])
|
958 |
+
|
959 |
+
# Determine tags to display
|
960 |
+
if st.session_state.settings['show_all_tags']:
|
961 |
+
tags_to_display = all_tags_in_category
|
962 |
+
else:
|
963 |
+
tags_to_display = [(tag, prob) for tag, prob in all_tags_in_category if prob >= threshold]
|
964 |
+
|
965 |
+
if not tags_to_display:
|
966 |
+
st.info(f"No tags above {st.session_state.settings['min_confidence']:.2f} confidence")
|
967 |
+
continue
|
968 |
+
|
969 |
+
# Display tags
|
970 |
+
if st.session_state.settings['compact_view']:
|
971 |
+
# Compact view
|
972 |
+
tag_list = []
|
973 |
+
replace_underscores = st.session_state.settings.get('replace_underscores', False)
|
974 |
+
|
975 |
+
for tag, prob in tags_to_display:
|
976 |
+
percentage = int(prob * 100)
|
977 |
+
display_tag = tag.replace('_', ' ') if replace_underscores else tag
|
978 |
+
tag_list.append(f"{display_tag} ({percentage}%)")
|
979 |
+
|
980 |
+
if prob >= threshold and st.session_state.settings['selected_categories'].get(category, True):
|
981 |
+
all_tags.append(tag)
|
982 |
+
|
983 |
+
st.markdown(", ".join(tag_list))
|
984 |
+
else:
|
985 |
+
# Expanded view with progress bars
|
986 |
+
for tag, prob in tags_to_display:
|
987 |
+
replace_underscores = st.session_state.settings.get('replace_underscores', False)
|
988 |
+
display_tag = tag.replace('_', ' ') if replace_underscores else tag
|
989 |
+
|
990 |
+
if prob >= threshold and st.session_state.settings['selected_categories'].get(category, True):
|
991 |
+
all_tags.append(tag)
|
992 |
+
tag_display = f"**{display_tag}**"
|
993 |
+
else:
|
994 |
+
tag_display = display_tag
|
995 |
+
|
996 |
+
st.write(tag_display)
|
997 |
+
st.markdown(display_progress_bar(prob), unsafe_allow_html=True)
|
998 |
+
|
999 |
+
# All tags summary
|
1000 |
+
st.markdown("---")
|
1001 |
+
st.subheader(f"All Tags ({len(all_tags)} total)")
|
1002 |
+
if all_tags:
|
1003 |
+
replace_underscores = st.session_state.settings.get('replace_underscores', False)
|
1004 |
+
if replace_underscores:
|
1005 |
+
display_tags = [tag.replace('_', ' ') for tag in all_tags]
|
1006 |
+
st.write(", ".join(display_tags))
|
1007 |
+
else:
|
1008 |
+
st.write(", ".join(all_tags))
|
1009 |
+
else:
|
1010 |
+
st.info("No tags detected above the threshold.")
|
1011 |
+
|
1012 |
+
# Save tags section
|
1013 |
+
st.markdown("---")
|
1014 |
+
st.subheader("Save Tags")
|
1015 |
+
|
1016 |
+
if 'custom_folders' not in st.session_state:
|
1017 |
+
st.session_state.custom_folders = get_default_save_locations()
|
1018 |
+
|
1019 |
+
selected_folder = st.selectbox(
|
1020 |
+
"Select save location:",
|
1021 |
+
options=st.session_state.custom_folders,
|
1022 |
+
format_func=lambda x: os.path.basename(x) if os.path.basename(x) else x
|
1023 |
+
)
|
1024 |
+
|
1025 |
+
if st.button("💾 Save to Selected Location"):
|
1026 |
+
try:
|
1027 |
+
original_filename = st.session_state.original_filename if hasattr(st.session_state, 'original_filename') else None
|
1028 |
+
|
1029 |
+
saved_path = save_tags_to_file(
|
1030 |
+
image_path=image_path,
|
1031 |
+
all_tags=all_tags,
|
1032 |
+
original_filename=original_filename,
|
1033 |
+
custom_dir=selected_folder,
|
1034 |
+
overwrite=True
|
1035 |
+
)
|
1036 |
+
|
1037 |
+
st.success(f"Tags saved to: {os.path.basename(saved_path)}")
|
1038 |
+
st.info(f"Full path: {saved_path}")
|
1039 |
+
|
1040 |
+
# Show file preview
|
1041 |
+
with st.expander("File Contents", expanded=True):
|
1042 |
+
with open(saved_path, 'r', encoding='utf-8') as f:
|
1043 |
+
content = f.read()
|
1044 |
+
st.code(content, language='text')
|
1045 |
+
|
1046 |
+
except Exception as e:
|
1047 |
+
st.error(f"Error saving tags: {str(e)}")
|
1048 |
+
|
1049 |
+
if __name__ == "__main__":
|
1050 |
+
image_tagger_app()
|
app/run_app.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Launcher script for the Image Tagger application.
|
4 |
+
"""
|
5 |
+
import os
|
6 |
+
import sys
|
7 |
+
import subprocess
|
8 |
+
import webbrowser
|
9 |
+
import time
|
10 |
+
from pathlib import Path
|
11 |
+
|
12 |
+
def run_app():
|
13 |
+
"""Run the Streamlit app"""
|
14 |
+
# Check if app.py exists
|
15 |
+
app_path = "app.py"
|
16 |
+
if not os.path.exists(app_path):
|
17 |
+
print(f"Error: {app_path} not found")
|
18 |
+
return False
|
19 |
+
|
20 |
+
# Get parent directory path (where venv is located)
|
21 |
+
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
22 |
+
|
23 |
+
# Get the path to streamlit in the virtual environment (in parent directory)
|
24 |
+
if sys.platform == "win32":
|
25 |
+
streamlit_path = os.path.join(parent_dir, "venv", "Scripts", "streamlit.exe")
|
26 |
+
else:
|
27 |
+
streamlit_path = os.path.join(parent_dir, "venv", "bin", "streamlit")
|
28 |
+
|
29 |
+
if not os.path.exists(streamlit_path):
|
30 |
+
print(f"Error: Streamlit not found at {streamlit_path}")
|
31 |
+
print("Make sure you've run setup.py first to create the virtual environment")
|
32 |
+
return False
|
33 |
+
|
34 |
+
print("=" * 60)
|
35 |
+
print(" Starting Image Tagger Application")
|
36 |
+
print("=" * 60)
|
37 |
+
|
38 |
+
print("\nLaunching the web interface...")
|
39 |
+
|
40 |
+
# Create a directory for example images if it doesn't exist
|
41 |
+
examples_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "examples")
|
42 |
+
os.makedirs(examples_dir, exist_ok=True)
|
43 |
+
|
44 |
+
# Check if there are example images
|
45 |
+
example_files = [f for f in os.listdir(examples_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
|
46 |
+
if not example_files:
|
47 |
+
print("\nTip: Add some example images to the 'examples' directory for testing")
|
48 |
+
|
49 |
+
# Run Streamlit app - using streamlit's built-in browser opening
|
50 |
+
# This avoids the double browser opening issue
|
51 |
+
try:
|
52 |
+
command = [streamlit_path, "run", app_path]
|
53 |
+
subprocess.run(command, check=True)
|
54 |
+
return True
|
55 |
+
except subprocess.CalledProcessError as e:
|
56 |
+
print(f"Error running the app: {e}")
|
57 |
+
return False
|
58 |
+
except KeyboardInterrupt:
|
59 |
+
print("\nApplication stopped by user")
|
60 |
+
return True
|
61 |
+
|
62 |
+
if __name__ == "__main__":
|
63 |
+
success = run_app()
|
64 |
+
sys.exit(0 if success else 1)
|
app/utils/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# Make utils a proper Python package
|
app/utils/file_utils.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
File utilities for Image Tagger application.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import os
|
6 |
+
import time
|
7 |
+
|
8 |
+
def save_tags_to_file(image_path, all_tags, original_filename=None, custom_dir=None, overwrite=False):
|
9 |
+
"""
|
10 |
+
Save tags to a text file in a dedicated 'saved_tags' folder or custom directory.
|
11 |
+
|
12 |
+
Args:
|
13 |
+
image_path: Path to the original image
|
14 |
+
all_tags: List of all tags to save
|
15 |
+
original_filename: Original filename if uploaded through Streamlit
|
16 |
+
custom_dir: Custom directory to save tags to (if None, uses 'saved_tags' folder)
|
17 |
+
|
18 |
+
Returns:
|
19 |
+
Path to the saved file
|
20 |
+
"""
|
21 |
+
# Determine the save directory
|
22 |
+
if custom_dir and os.path.isdir(custom_dir):
|
23 |
+
save_dir = custom_dir
|
24 |
+
else:
|
25 |
+
# Create a dedicated folder for saved tags in the app's root directory
|
26 |
+
app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
27 |
+
save_dir = os.path.join(app_dir, "saved_tags")
|
28 |
+
|
29 |
+
# Ensure the directory exists
|
30 |
+
os.makedirs(save_dir, exist_ok=True)
|
31 |
+
|
32 |
+
# Determine the filename
|
33 |
+
if original_filename:
|
34 |
+
# For uploaded files, use original filename
|
35 |
+
base_name = os.path.splitext(original_filename)[0]
|
36 |
+
else:
|
37 |
+
# For non-uploaded files, use the image path
|
38 |
+
base_name = os.path.splitext(os.path.basename(image_path))[0]
|
39 |
+
|
40 |
+
# Create the output path
|
41 |
+
output_path = os.path.join(save_dir, f"{base_name}.txt")
|
42 |
+
|
43 |
+
# If overwrite is False and file exists, add a timestamp to avoid overwriting
|
44 |
+
if not overwrite and os.path.exists(output_path):
|
45 |
+
timestamp = time.strftime("%Y%m%d-%H%M%S")
|
46 |
+
output_path = os.path.join(save_dir, f"{base_name}_{timestamp}.txt")
|
47 |
+
|
48 |
+
# Write the tags to file
|
49 |
+
with open(output_path, 'w', encoding='utf-8') as f:
|
50 |
+
if all_tags:
|
51 |
+
# Add comma after each tag including the last one
|
52 |
+
tag_text = ", ".join(all_tags) + ","
|
53 |
+
f.write(tag_text)
|
54 |
+
|
55 |
+
return output_path
|
56 |
+
|
57 |
+
def get_default_save_locations():
|
58 |
+
"""
|
59 |
+
Get default save locations for tag files.
|
60 |
+
|
61 |
+
Returns:
|
62 |
+
List of default save locations
|
63 |
+
"""
|
64 |
+
# App directory
|
65 |
+
app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
66 |
+
save_dir = os.path.join(app_dir, "saved_tags")
|
67 |
+
|
68 |
+
# Common user directories
|
69 |
+
desktop_dir = os.path.expanduser("~/Desktop")
|
70 |
+
download_dir = os.path.expanduser("~/Downloads")
|
71 |
+
documents_dir = os.path.expanduser("~/Documents")
|
72 |
+
|
73 |
+
# List of default save locations
|
74 |
+
save_locations = [
|
75 |
+
save_dir,
|
76 |
+
desktop_dir,
|
77 |
+
download_dir,
|
78 |
+
documents_dir,
|
79 |
+
]
|
80 |
+
|
81 |
+
# Ensure directories exist
|
82 |
+
for folder in save_locations:
|
83 |
+
os.makedirs(folder, exist_ok=True)
|
84 |
+
|
85 |
+
return save_locations
|
86 |
+
|
87 |
+
def apply_category_limits(result, category_limits):
|
88 |
+
"""
|
89 |
+
Apply category limits to a result dictionary.
|
90 |
+
|
91 |
+
Args:
|
92 |
+
result: Result dictionary containing tags and all_tags
|
93 |
+
category_limits: Dictionary mapping categories to their tag limits
|
94 |
+
(0 = exclude category, -1 = no limit/include all)
|
95 |
+
|
96 |
+
Returns:
|
97 |
+
Updated result dictionary with limits applied
|
98 |
+
"""
|
99 |
+
if not category_limits or not result['success']:
|
100 |
+
return result
|
101 |
+
|
102 |
+
# Get the filtered tags
|
103 |
+
filtered_tags = result['tags']
|
104 |
+
|
105 |
+
# Apply limits to each category
|
106 |
+
for category, cat_tags in list(filtered_tags.items()):
|
107 |
+
# Get limit for this category, default to -1 (no limit)
|
108 |
+
limit = category_limits.get(category, -1)
|
109 |
+
|
110 |
+
if limit == 0:
|
111 |
+
# Exclude this category entirely
|
112 |
+
del filtered_tags[category]
|
113 |
+
elif limit > 0 and len(cat_tags) > limit:
|
114 |
+
# Limit to top N tags for this category
|
115 |
+
filtered_tags[category] = cat_tags[:limit]
|
116 |
+
|
117 |
+
# Regenerate all_tags list after applying limits
|
118 |
+
all_tags = []
|
119 |
+
for category, cat_tags in filtered_tags.items():
|
120 |
+
for tag, _ in cat_tags:
|
121 |
+
all_tags.append(tag)
|
122 |
+
|
123 |
+
# Update the result with limited tags
|
124 |
+
result['tags'] = filtered_tags
|
125 |
+
result['all_tags'] = all_tags
|
126 |
+
|
127 |
+
return result
|
app/utils/image_processing.py
ADDED
@@ -0,0 +1,511 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Image processing functions for the Image Tagger application.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import os
|
6 |
+
import traceback
|
7 |
+
import glob
|
8 |
+
|
9 |
+
|
10 |
+
def process_image(image_path, model, thresholds, metadata, threshold_profile, active_threshold, active_category_thresholds, min_confidence=0.1):
|
11 |
+
"""
|
12 |
+
Process a single image and return the tags.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
image_path: Path to the image
|
16 |
+
model: The image tagger model
|
17 |
+
thresholds: Thresholds dictionary
|
18 |
+
metadata: Metadata dictionary
|
19 |
+
threshold_profile: Selected threshold profile
|
20 |
+
active_threshold: Overall threshold value
|
21 |
+
active_category_thresholds: Category-specific thresholds
|
22 |
+
min_confidence: Minimum confidence to include in results
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
Dictionary with tags, all probabilities, and other info
|
26 |
+
"""
|
27 |
+
try:
|
28 |
+
# Run inference directly using the model's predict method
|
29 |
+
if threshold_profile in ["Category-specific", "High Precision", "High Recall"]:
|
30 |
+
results = model.predict(
|
31 |
+
image_path=image_path,
|
32 |
+
category_thresholds=active_category_thresholds
|
33 |
+
)
|
34 |
+
else:
|
35 |
+
results = model.predict(
|
36 |
+
image_path=image_path,
|
37 |
+
threshold=active_threshold
|
38 |
+
)
|
39 |
+
|
40 |
+
# Extract and organize all probabilities
|
41 |
+
all_probs = {}
|
42 |
+
probs = results['refined_probabilities'][0] # Remove batch dimension
|
43 |
+
|
44 |
+
for idx in range(len(probs)):
|
45 |
+
prob_value = probs[idx].item()
|
46 |
+
if prob_value >= min_confidence:
|
47 |
+
tag, category = model.dataset.get_tag_info(idx)
|
48 |
+
|
49 |
+
if category not in all_probs:
|
50 |
+
all_probs[category] = []
|
51 |
+
|
52 |
+
all_probs[category].append((tag, prob_value))
|
53 |
+
|
54 |
+
# Sort tags by probability within each category
|
55 |
+
for category in all_probs:
|
56 |
+
all_probs[category] = sorted(
|
57 |
+
all_probs[category],
|
58 |
+
key=lambda x: x[1],
|
59 |
+
reverse=True
|
60 |
+
)
|
61 |
+
|
62 |
+
# Get the filtered tags based on the selected threshold
|
63 |
+
tags = {}
|
64 |
+
for category, cat_tags in all_probs.items():
|
65 |
+
threshold = active_category_thresholds.get(category, active_threshold) if active_category_thresholds else active_threshold
|
66 |
+
tags[category] = [(tag, prob) for tag, prob in cat_tags if prob >= threshold]
|
67 |
+
|
68 |
+
# Create a flat list of all tags above threshold
|
69 |
+
all_tags = []
|
70 |
+
for category, cat_tags in tags.items():
|
71 |
+
for tag, _ in cat_tags:
|
72 |
+
all_tags.append(tag)
|
73 |
+
|
74 |
+
return {
|
75 |
+
'tags': tags,
|
76 |
+
'all_probs': all_probs,
|
77 |
+
'all_tags': all_tags,
|
78 |
+
'success': True
|
79 |
+
}
|
80 |
+
|
81 |
+
except Exception as e:
|
82 |
+
print(f"Error processing {image_path}: {str(e)}")
|
83 |
+
traceback.print_exc()
|
84 |
+
return {
|
85 |
+
'tags': {},
|
86 |
+
'all_probs': {},
|
87 |
+
'all_tags': [],
|
88 |
+
'success': False,
|
89 |
+
'error': str(e)
|
90 |
+
}
|
91 |
+
|
92 |
+
def apply_category_limits(result, category_limits):
|
93 |
+
"""
|
94 |
+
Apply category limits to a result dictionary.
|
95 |
+
|
96 |
+
Args:
|
97 |
+
result: Result dictionary containing tags and all_tags
|
98 |
+
category_limits: Dictionary mapping categories to their tag limits
|
99 |
+
(0 = exclude category, -1 = no limit/include all)
|
100 |
+
|
101 |
+
Returns:
|
102 |
+
Updated result dictionary with limits applied
|
103 |
+
"""
|
104 |
+
if not category_limits or not result['success']:
|
105 |
+
return result
|
106 |
+
|
107 |
+
# Get the filtered tags
|
108 |
+
filtered_tags = result['tags']
|
109 |
+
|
110 |
+
# Apply limits to each category
|
111 |
+
for category, cat_tags in list(filtered_tags.items()):
|
112 |
+
# Get limit for this category, default to -1 (no limit)
|
113 |
+
limit = category_limits.get(category, -1)
|
114 |
+
|
115 |
+
if limit == 0:
|
116 |
+
# Exclude this category entirely
|
117 |
+
del filtered_tags[category]
|
118 |
+
elif limit > 0 and len(cat_tags) > limit:
|
119 |
+
# Limit to top N tags for this category
|
120 |
+
filtered_tags[category] = cat_tags[:limit]
|
121 |
+
|
122 |
+
# Regenerate all_tags list after applying limits
|
123 |
+
all_tags = []
|
124 |
+
for category, cat_tags in filtered_tags.items():
|
125 |
+
for tag, _ in cat_tags:
|
126 |
+
all_tags.append(tag)
|
127 |
+
|
128 |
+
# Update the result with limited tags
|
129 |
+
result['tags'] = filtered_tags
|
130 |
+
result['all_tags'] = all_tags
|
131 |
+
|
132 |
+
return result
|
133 |
+
|
134 |
+
def batch_process_images(folder_path, model, thresholds, metadata, threshold_profile, active_threshold,
|
135 |
+
active_category_thresholds, save_dir=None, progress_callback=None,
|
136 |
+
min_confidence=0.1, batch_size=1, category_limits=None):
|
137 |
+
"""
|
138 |
+
Process all images in a folder with optional batching for improved performance.
|
139 |
+
|
140 |
+
Args:
|
141 |
+
folder_path: Path to folder containing images
|
142 |
+
model: The image tagger model
|
143 |
+
thresholds: Thresholds dictionary
|
144 |
+
metadata: Metadata dictionary
|
145 |
+
threshold_profile: Selected threshold profile
|
146 |
+
active_threshold: Overall threshold value
|
147 |
+
active_category_thresholds: Category-specific thresholds
|
148 |
+
save_dir: Directory to save tag files (if None uses default)
|
149 |
+
progress_callback: Optional callback for progress updates
|
150 |
+
min_confidence: Minimum confidence threshold
|
151 |
+
batch_size: Number of images to process at once (default: 1)
|
152 |
+
category_limits: Dictionary mapping categories to their tag limits (0 = unlimited)
|
153 |
+
|
154 |
+
Returns:
|
155 |
+
Dictionary with results for each image
|
156 |
+
"""
|
157 |
+
from .file_utils import save_tags_to_file # Import here to avoid circular imports
|
158 |
+
import torch
|
159 |
+
from PIL import Image
|
160 |
+
import time
|
161 |
+
|
162 |
+
print(f"Starting batch processing on {folder_path} with batch size {batch_size}")
|
163 |
+
start_time = time.time()
|
164 |
+
|
165 |
+
# Find all image files in the folder
|
166 |
+
image_extensions = ['*.jpg', '*.jpeg', '*.png']
|
167 |
+
image_files = []
|
168 |
+
|
169 |
+
for ext in image_extensions:
|
170 |
+
image_files.extend(glob.glob(os.path.join(folder_path, ext)))
|
171 |
+
image_files.extend(glob.glob(os.path.join(folder_path, ext.upper())))
|
172 |
+
|
173 |
+
# Use a set to remove duplicate files (Windows filesystems are case-insensitive)
|
174 |
+
if os.name == 'nt': # Windows
|
175 |
+
# Use lowercase paths for comparison on Windows
|
176 |
+
unique_paths = set()
|
177 |
+
unique_files = []
|
178 |
+
for file_path in image_files:
|
179 |
+
normalized_path = os.path.normpath(file_path).lower()
|
180 |
+
if normalized_path not in unique_paths:
|
181 |
+
unique_paths.add(normalized_path)
|
182 |
+
unique_files.append(file_path)
|
183 |
+
image_files = unique_files
|
184 |
+
|
185 |
+
# Sort files for consistent processing order
|
186 |
+
image_files.sort()
|
187 |
+
|
188 |
+
if not image_files:
|
189 |
+
return {
|
190 |
+
'success': False,
|
191 |
+
'error': f"No images found in {folder_path}",
|
192 |
+
'results': {}
|
193 |
+
}
|
194 |
+
|
195 |
+
print(f"Found {len(image_files)} images to process")
|
196 |
+
|
197 |
+
# Use the provided save directory or create a default one
|
198 |
+
if save_dir is None:
|
199 |
+
app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
200 |
+
save_dir = os.path.join(app_dir, "saved_tags")
|
201 |
+
|
202 |
+
# Ensure the directory exists
|
203 |
+
os.makedirs(save_dir, exist_ok=True)
|
204 |
+
|
205 |
+
# Process images in batches
|
206 |
+
results = {}
|
207 |
+
total_images = len(image_files)
|
208 |
+
processed = 0
|
209 |
+
|
210 |
+
# Process in batches
|
211 |
+
for i in range(0, total_images, batch_size):
|
212 |
+
batch_start = time.time()
|
213 |
+
# Get current batch of images
|
214 |
+
batch_files = image_files[i:i+batch_size]
|
215 |
+
batch_size_actual = len(batch_files)
|
216 |
+
|
217 |
+
print(f"Processing batch {i//batch_size + 1}/{(total_images + batch_size - 1)//batch_size}: {batch_size_actual} images")
|
218 |
+
|
219 |
+
if batch_size > 1:
|
220 |
+
# True batch processing for multiple images at once
|
221 |
+
try:
|
222 |
+
# Using batch processing if batch_size > 1
|
223 |
+
batch_results = process_image_batch(
|
224 |
+
image_paths=batch_files,
|
225 |
+
model=model,
|
226 |
+
thresholds=thresholds,
|
227 |
+
metadata=metadata,
|
228 |
+
threshold_profile=threshold_profile,
|
229 |
+
active_threshold=active_threshold,
|
230 |
+
active_category_thresholds=active_category_thresholds,
|
231 |
+
min_confidence=min_confidence
|
232 |
+
)
|
233 |
+
|
234 |
+
# Process and save results for each image in the batch
|
235 |
+
for j, image_path in enumerate(batch_files):
|
236 |
+
# Update progress if callback provided
|
237 |
+
if progress_callback:
|
238 |
+
progress_callback(processed + j, total_images, image_path)
|
239 |
+
|
240 |
+
if j < len(batch_results):
|
241 |
+
result = batch_results[j]
|
242 |
+
|
243 |
+
# Apply category limits if specified
|
244 |
+
if category_limits and result['success']:
|
245 |
+
# Use the apply_category_limits function instead of the inline code
|
246 |
+
result = apply_category_limits(result, category_limits)
|
247 |
+
|
248 |
+
# Debug print if you want
|
249 |
+
print(f"Applied limits for {os.path.basename(image_path)}, remaining tags: {len(result['all_tags'])}")
|
250 |
+
|
251 |
+
# Save the tags to a file
|
252 |
+
if result['success']:
|
253 |
+
output_path = save_tags_to_file(
|
254 |
+
image_path=image_path,
|
255 |
+
all_tags=result['all_tags'],
|
256 |
+
custom_dir=save_dir,
|
257 |
+
overwrite=True
|
258 |
+
)
|
259 |
+
result['output_path'] = str(output_path)
|
260 |
+
|
261 |
+
# Store the result
|
262 |
+
results[image_path] = result
|
263 |
+
else:
|
264 |
+
# Handle case where batch processing returned fewer results than expected
|
265 |
+
results[image_path] = {
|
266 |
+
'success': False,
|
267 |
+
'error': 'Batch processing error: missing result',
|
268 |
+
'all_tags': []
|
269 |
+
}
|
270 |
+
|
271 |
+
except Exception as e:
|
272 |
+
print(f"Batch processing error: {str(e)}")
|
273 |
+
traceback.print_exc()
|
274 |
+
|
275 |
+
# Fall back to processing images one by one in this batch
|
276 |
+
for j, image_path in enumerate(batch_files):
|
277 |
+
if progress_callback:
|
278 |
+
progress_callback(processed + j, total_images, image_path)
|
279 |
+
|
280 |
+
result = process_image(
|
281 |
+
image_path=image_path,
|
282 |
+
model=model,
|
283 |
+
thresholds=thresholds,
|
284 |
+
metadata=metadata,
|
285 |
+
threshold_profile=threshold_profile,
|
286 |
+
active_threshold=active_threshold,
|
287 |
+
active_category_thresholds=active_category_thresholds,
|
288 |
+
min_confidence=min_confidence
|
289 |
+
)
|
290 |
+
|
291 |
+
# Apply category limits if specified
|
292 |
+
if category_limits and result['success']:
|
293 |
+
# Use the apply_category_limits function
|
294 |
+
result = apply_category_limits(result, category_limits)
|
295 |
+
|
296 |
+
if result['success']:
|
297 |
+
output_path = save_tags_to_file(
|
298 |
+
image_path=image_path,
|
299 |
+
all_tags=result['all_tags'],
|
300 |
+
custom_dir=save_dir,
|
301 |
+
overwrite=True
|
302 |
+
)
|
303 |
+
result['output_path'] = str(output_path)
|
304 |
+
|
305 |
+
results[image_path] = result
|
306 |
+
else:
|
307 |
+
# Process one by one if batch_size is 1
|
308 |
+
for j, image_path in enumerate(batch_files):
|
309 |
+
if progress_callback:
|
310 |
+
progress_callback(processed + j, total_images, image_path)
|
311 |
+
|
312 |
+
result = process_image(
|
313 |
+
image_path=image_path,
|
314 |
+
model=model,
|
315 |
+
thresholds=thresholds,
|
316 |
+
metadata=metadata,
|
317 |
+
threshold_profile=threshold_profile,
|
318 |
+
active_threshold=active_threshold,
|
319 |
+
active_category_thresholds=active_category_thresholds,
|
320 |
+
min_confidence=min_confidence
|
321 |
+
)
|
322 |
+
|
323 |
+
# Apply category limits if specified
|
324 |
+
if category_limits and result['success']:
|
325 |
+
# Use the apply_category_limits function
|
326 |
+
result = apply_category_limits(result, category_limits)
|
327 |
+
|
328 |
+
if result['success']:
|
329 |
+
output_path = save_tags_to_file(
|
330 |
+
image_path=image_path,
|
331 |
+
all_tags=result['all_tags'],
|
332 |
+
custom_dir=save_dir,
|
333 |
+
overwrite=True
|
334 |
+
)
|
335 |
+
result['output_path'] = str(output_path)
|
336 |
+
|
337 |
+
results[image_path] = result
|
338 |
+
|
339 |
+
# Update processed count
|
340 |
+
processed += batch_size_actual
|
341 |
+
|
342 |
+
# Calculate batch timing
|
343 |
+
batch_end = time.time()
|
344 |
+
batch_time = batch_end - batch_start
|
345 |
+
print(f"Batch processed in {batch_time:.2f} seconds ({batch_time/batch_size_actual:.2f} seconds per image)")
|
346 |
+
|
347 |
+
# Final progress update
|
348 |
+
if progress_callback:
|
349 |
+
progress_callback(total_images, total_images, None)
|
350 |
+
|
351 |
+
end_time = time.time()
|
352 |
+
total_time = end_time - start_time
|
353 |
+
print(f"Batch processing finished. Total time: {total_time:.2f} seconds, Average: {total_time/total_images:.2f} seconds per image")
|
354 |
+
|
355 |
+
return {
|
356 |
+
'success': True,
|
357 |
+
'total': total_images,
|
358 |
+
'processed': len(results),
|
359 |
+
'results': results,
|
360 |
+
'save_dir': save_dir,
|
361 |
+
'time_elapsed': end_time - start_time
|
362 |
+
}
|
363 |
+
|
364 |
+
def process_image_batch(image_paths, model, thresholds, metadata, threshold_profile, active_threshold, active_category_thresholds, min_confidence=0.1):
|
365 |
+
"""
|
366 |
+
Process a batch of images at once.
|
367 |
+
|
368 |
+
Args:
|
369 |
+
image_paths: List of paths to the images
|
370 |
+
model: The image tagger model
|
371 |
+
thresholds: Thresholds dictionary
|
372 |
+
metadata: Metadata dictionary
|
373 |
+
threshold_profile: Selected threshold profile
|
374 |
+
active_threshold: Overall threshold value
|
375 |
+
active_category_thresholds: Category-specific thresholds
|
376 |
+
min_confidence: Minimum confidence to include in results
|
377 |
+
|
378 |
+
Returns:
|
379 |
+
List of dictionaries with tags, all probabilities, and other info for each image
|
380 |
+
"""
|
381 |
+
try:
|
382 |
+
import torch
|
383 |
+
from PIL import Image
|
384 |
+
import torchvision.transforms as transforms
|
385 |
+
|
386 |
+
# Identify the model type we're using for better error handling
|
387 |
+
model_type = model.__class__.__name__
|
388 |
+
print(f"Running batch processing with model type: {model_type}")
|
389 |
+
|
390 |
+
# Prepare the transformation for the images
|
391 |
+
transform = transforms.Compose([
|
392 |
+
transforms.Resize((512, 512)), # Adjust based on your model's expected input
|
393 |
+
transforms.ToTensor(),
|
394 |
+
])
|
395 |
+
|
396 |
+
# Get model information
|
397 |
+
device = next(model.parameters()).device
|
398 |
+
dtype = next(model.parameters()).dtype
|
399 |
+
print(f"Model is using device: {device}, dtype: {dtype}")
|
400 |
+
|
401 |
+
# Load and preprocess all images
|
402 |
+
batch_tensor = []
|
403 |
+
valid_images = []
|
404 |
+
|
405 |
+
for img_path in image_paths:
|
406 |
+
try:
|
407 |
+
img = Image.open(img_path).convert('RGB')
|
408 |
+
img_tensor = transform(img)
|
409 |
+
img_tensor = img_tensor.to(device=device, dtype=dtype)
|
410 |
+
batch_tensor.append(img_tensor)
|
411 |
+
valid_images.append(img_path)
|
412 |
+
except Exception as e:
|
413 |
+
print(f"Error loading image {img_path}: {str(e)}")
|
414 |
+
|
415 |
+
if not batch_tensor:
|
416 |
+
return []
|
417 |
+
|
418 |
+
# Stack all tensors into a single batch
|
419 |
+
batch_input = torch.stack(batch_tensor)
|
420 |
+
|
421 |
+
# Process entire batch at once
|
422 |
+
with torch.no_grad():
|
423 |
+
try:
|
424 |
+
# Forward pass on the whole batch
|
425 |
+
output = model(batch_input)
|
426 |
+
|
427 |
+
# Handle tuple output format
|
428 |
+
if isinstance(output, tuple):
|
429 |
+
probs_batch = torch.sigmoid(output[1])
|
430 |
+
else:
|
431 |
+
probs_batch = torch.sigmoid(output)
|
432 |
+
|
433 |
+
# Process each image's results
|
434 |
+
results = []
|
435 |
+
for i, img_path in enumerate(valid_images):
|
436 |
+
probs = probs_batch[i].unsqueeze(0) # Add batch dimension back
|
437 |
+
|
438 |
+
# Extract and organize all probabilities
|
439 |
+
all_probs = {}
|
440 |
+
for idx in range(probs.size(1)):
|
441 |
+
prob_value = probs[0, idx].item()
|
442 |
+
if prob_value >= min_confidence:
|
443 |
+
tag, category = model.dataset.get_tag_info(idx)
|
444 |
+
|
445 |
+
if category not in all_probs:
|
446 |
+
all_probs[category] = []
|
447 |
+
|
448 |
+
all_probs[category].append((tag, prob_value))
|
449 |
+
|
450 |
+
# Sort tags by probability
|
451 |
+
for category in all_probs:
|
452 |
+
all_probs[category] = sorted(all_probs[category], key=lambda x: x[1], reverse=True)
|
453 |
+
|
454 |
+
# Get filtered tags
|
455 |
+
tags = {}
|
456 |
+
for category, cat_tags in all_probs.items():
|
457 |
+
threshold = active_category_thresholds.get(category, active_threshold) if active_category_thresholds else active_threshold
|
458 |
+
tags[category] = [(tag, prob) for tag, prob in cat_tags if prob >= threshold]
|
459 |
+
|
460 |
+
# Create a flat list of all tags above threshold
|
461 |
+
all_tags = []
|
462 |
+
for category, cat_tags in tags.items():
|
463 |
+
for tag, _ in cat_tags:
|
464 |
+
all_tags.append(tag)
|
465 |
+
|
466 |
+
results.append({
|
467 |
+
'tags': tags,
|
468 |
+
'all_probs': all_probs,
|
469 |
+
'all_tags': all_tags,
|
470 |
+
'success': True
|
471 |
+
})
|
472 |
+
|
473 |
+
return results
|
474 |
+
|
475 |
+
except RuntimeError as e:
|
476 |
+
# If we encounter CUDA out of memory or another runtime error,
|
477 |
+
# fall back to processing one by one
|
478 |
+
print(f"Error in batch processing: {str(e)}")
|
479 |
+
print("Falling back to one-by-one processing...")
|
480 |
+
|
481 |
+
# Process one by one as fallback
|
482 |
+
results = []
|
483 |
+
for i, (img_tensor, img_path) in enumerate(zip(batch_tensor, valid_images)):
|
484 |
+
try:
|
485 |
+
input_tensor = img_tensor.unsqueeze(0)
|
486 |
+
output = model(input_tensor)
|
487 |
+
|
488 |
+
if isinstance(output, tuple):
|
489 |
+
probs = torch.sigmoid(output[1])
|
490 |
+
else:
|
491 |
+
probs = torch.sigmoid(output)
|
492 |
+
|
493 |
+
# Same post-processing as before...
|
494 |
+
# [Code omitted for brevity]
|
495 |
+
|
496 |
+
except Exception as e:
|
497 |
+
print(f"Error processing image {img_path}: {str(e)}")
|
498 |
+
results.append({
|
499 |
+
'tags': {},
|
500 |
+
'all_probs': {},
|
501 |
+
'all_tags': [],
|
502 |
+
'success': False,
|
503 |
+
'error': str(e)
|
504 |
+
})
|
505 |
+
|
506 |
+
return results
|
507 |
+
|
508 |
+
except Exception as e:
|
509 |
+
print(f"Error in batch processing: {str(e)}")
|
510 |
+
import traceback
|
511 |
+
traceback.print_exc()
|
app/utils/model_loader.py
ADDED
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from torch.nn import GroupNorm, LayerNorm
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import torch.utils.checkpoint as checkpoint
|
6 |
+
import timm
|
7 |
+
|
8 |
+
class ViTWrapper(nn.Module):
|
9 |
+
"""Wrapper to make ViT compatible with feature extraction for ImageTagger"""
|
10 |
+
def __init__(self, vit_model):
|
11 |
+
super().__init__()
|
12 |
+
self.vit = vit_model
|
13 |
+
self.out_indices = (-1,) # mimic timm.features_only
|
14 |
+
|
15 |
+
# Get patch size and embedding dim from the model
|
16 |
+
self.patch_size = vit_model.patch_embed.patch_size[0]
|
17 |
+
self.embed_dim = vit_model.embed_dim
|
18 |
+
|
19 |
+
def forward(self, x):
|
20 |
+
B = x.size(0)
|
21 |
+
|
22 |
+
# ➊ patch tokens
|
23 |
+
x = self.vit.patch_embed(x) # (B, N, C)
|
24 |
+
|
25 |
+
# ➋ prepend CLS
|
26 |
+
cls_tok = self.vit.cls_token.expand(B, -1, -1) # (B, 1, C)
|
27 |
+
x = torch.cat((cls_tok, x), dim=1) # (B, 1+N, C)
|
28 |
+
|
29 |
+
# ➌ add positional encodings (full, incl. CLS)
|
30 |
+
if self.vit.pos_embed is not None:
|
31 |
+
x = x + self.vit.pos_embed[:, : x.size(1), :]
|
32 |
+
|
33 |
+
x = self.vit.pos_drop(x)
|
34 |
+
|
35 |
+
for blk in self.vit.blocks:
|
36 |
+
x = blk(x)
|
37 |
+
|
38 |
+
x = self.vit.norm(x) # (B, 1+N, C)
|
39 |
+
|
40 |
+
# ➍ split back out
|
41 |
+
cls_final = x[:, 0] # (B, C)
|
42 |
+
patch_tokens = x[:, 1:] # (B, N, C)
|
43 |
+
|
44 |
+
# ➎ reshape patches to (B, C, H, W)
|
45 |
+
B, N, C = patch_tokens.shape
|
46 |
+
h = w = int(N ** 0.5) # square assumption
|
47 |
+
patch_features = patch_tokens.permute(0, 2, 1).reshape(B, C, h, w)
|
48 |
+
|
49 |
+
# Return **both**: (patch map, CLS)
|
50 |
+
return patch_features, cls_final
|
51 |
+
|
52 |
+
def set_grad_checkpointing(self, enable=True):
|
53 |
+
"""Enable gradient checkpointing if supported"""
|
54 |
+
if hasattr(self.vit, 'set_grad_checkpointing'):
|
55 |
+
self.vit.set_grad_checkpointing(enable)
|
56 |
+
return True
|
57 |
+
return False
|
58 |
+
|
59 |
+
class ImageTagger(nn.Module):
|
60 |
+
"""
|
61 |
+
ImageTagger with Vision Transformer backbone
|
62 |
+
"""
|
63 |
+
def __init__(self, total_tags, dataset, model_name='vit_base_patch16_224',
|
64 |
+
num_heads=16, dropout=0.1, pretrained=True, tag_context_size=256,
|
65 |
+
use_gradient_checkpointing=False, img_size=224):
|
66 |
+
super().__init__()
|
67 |
+
|
68 |
+
# Store checkpointing config
|
69 |
+
self.use_gradient_checkpointing = use_gradient_checkpointing
|
70 |
+
self.model_name = model_name
|
71 |
+
self.img_size = img_size
|
72 |
+
|
73 |
+
# Debug and stats flags
|
74 |
+
self._flags = {
|
75 |
+
'debug': False,
|
76 |
+
'model_stats': True
|
77 |
+
}
|
78 |
+
|
79 |
+
# Core model config
|
80 |
+
self.dataset = dataset
|
81 |
+
self.tag_context_size = tag_context_size
|
82 |
+
self.total_tags = total_tags
|
83 |
+
|
84 |
+
print(f"🏗️ Building ImageTagger with ViT backbone and {total_tags} tags")
|
85 |
+
print(f" Backbone: {model_name}")
|
86 |
+
print(f" Image size: {img_size}x{img_size}")
|
87 |
+
print(f" Tag context size: {tag_context_size}")
|
88 |
+
print(f" Gradient checkpointing: {use_gradient_checkpointing}")
|
89 |
+
print(f" 🎯 Custom embeddings, PyTorch native attention, no ground truth inclusion")
|
90 |
+
|
91 |
+
# 1. Vision Transformer Backbone
|
92 |
+
print("📦 Loading Vision Transformer backbone...")
|
93 |
+
self._load_vit_backbone()
|
94 |
+
|
95 |
+
# Get backbone dimensions by running a test forward pass
|
96 |
+
self._determine_backbone_dimensions()
|
97 |
+
|
98 |
+
self.embedding_dim = self.backbone.embed_dim
|
99 |
+
|
100 |
+
# 2. Custom Tag Embeddings (no CLIP)
|
101 |
+
print("🎯 Using custom tag embeddings (no CLIP)")
|
102 |
+
self.tag_embedding = nn.Embedding(total_tags, self.embedding_dim)
|
103 |
+
|
104 |
+
# 3. Shared weights approach - tag bias for initial predictions
|
105 |
+
print("🔗 Using shared weights between initial head and tag embeddings")
|
106 |
+
self.tag_bias = nn.Parameter(torch.zeros(total_tags))
|
107 |
+
|
108 |
+
|
109 |
+
# 4. Image token extraction (for attention AND global pooling)
|
110 |
+
self.image_token_proj = nn.Identity()
|
111 |
+
|
112 |
+
# 5. Tags-as-queries cross-attention (using PyTorch's optimized implementation)
|
113 |
+
self.cross_attention = nn.MultiheadAttention(
|
114 |
+
embed_dim=self.embedding_dim,
|
115 |
+
num_heads=num_heads,
|
116 |
+
dropout=dropout,
|
117 |
+
batch_first=True # Use (batch, seq, feature) format
|
118 |
+
)
|
119 |
+
self.cross_norm = nn.LayerNorm(self.embedding_dim)
|
120 |
+
|
121 |
+
# Initialize weights
|
122 |
+
self._init_weights()
|
123 |
+
|
124 |
+
# Enable gradient checkpointing
|
125 |
+
if self.use_gradient_checkpointing:
|
126 |
+
self._enable_gradient_checkpointing()
|
127 |
+
|
128 |
+
print(f"✅ ImageTagger with ViT initialized!")
|
129 |
+
self._print_parameter_count()
|
130 |
+
|
131 |
+
def _load_vit_backbone(self):
|
132 |
+
"""Load Vision Transformer model from timm"""
|
133 |
+
print(f" Loading from timm: {self.model_name}")
|
134 |
+
|
135 |
+
# Load the ViT model (not features_only, we want the full model for token extraction)
|
136 |
+
vit_model = timm.create_model(
|
137 |
+
self.model_name,
|
138 |
+
pretrained=True,
|
139 |
+
img_size=self.img_size,
|
140 |
+
num_classes=0 # Remove classification head
|
141 |
+
)
|
142 |
+
|
143 |
+
# Wrap it in our compatibility layer
|
144 |
+
self.backbone = ViTWrapper(vit_model)
|
145 |
+
|
146 |
+
print(f" ✅ ViT loaded successfully")
|
147 |
+
print(f" Patch size: {self.backbone.patch_size}x{self.backbone.patch_size}")
|
148 |
+
print(f" Embed dim: {self.backbone.embed_dim}")
|
149 |
+
|
150 |
+
def _determine_backbone_dimensions(self):
|
151 |
+
"""Determine backbone output dimensions"""
|
152 |
+
print(" 🔍 Determining backbone dimensions...")
|
153 |
+
|
154 |
+
with torch.no_grad(), torch.autocast('cuda', dtype=torch.bfloat16):
|
155 |
+
# Create a dummy input
|
156 |
+
dummy_input = torch.randn(1, 3, self.img_size, self.img_size)
|
157 |
+
|
158 |
+
# Get features
|
159 |
+
backbone_features, cls_dummy = self.backbone(dummy_input)
|
160 |
+
feature_tensor = backbone_features
|
161 |
+
|
162 |
+
self.backbone_dim = feature_tensor.shape[1]
|
163 |
+
self.feature_map_size = feature_tensor.shape[2]
|
164 |
+
|
165 |
+
print(f" Backbone output: {self.backbone_dim}D, {self.feature_map_size}x{self.feature_map_size} spatial")
|
166 |
+
print(f" Total patch tokens: {self.feature_map_size * self.feature_map_size}")
|
167 |
+
|
168 |
+
def _enable_gradient_checkpointing(self):
|
169 |
+
"""Enable gradient checkpointing for memory efficiency"""
|
170 |
+
print("🔄 Enabling gradient checkpointing...")
|
171 |
+
|
172 |
+
# Enable checkpointing for ViT backbone
|
173 |
+
if self.backbone.set_grad_checkpointing(True):
|
174 |
+
print(" ✅ ViT backbone checkpointing enabled")
|
175 |
+
else:
|
176 |
+
print(" ⚠️ ViT backbone doesn't support built-in checkpointing, will checkpoint manually")
|
177 |
+
|
178 |
+
def _checkpoint_backbone(self, x):
|
179 |
+
"""Wrapper for backbone with gradient checkpointing"""
|
180 |
+
if self.use_gradient_checkpointing and self.training:
|
181 |
+
return checkpoint.checkpoint(self.backbone, x, use_reentrant=False)
|
182 |
+
else:
|
183 |
+
return self.backbone(x)
|
184 |
+
|
185 |
+
def _checkpoint_image_proj(self, x):
|
186 |
+
"""Wrapper for image projection with gradient checkpointing"""
|
187 |
+
if self.use_gradient_checkpointing and self.training:
|
188 |
+
return checkpoint.checkpoint(self.image_token_proj, x, use_reentrant=False)
|
189 |
+
else:
|
190 |
+
return self.image_token_proj(x)
|
191 |
+
|
192 |
+
def _checkpoint_cross_attention(self, query, key, value):
|
193 |
+
"""Wrapper for cross attention with gradient checkpointing"""
|
194 |
+
def _attention_forward(q, k, v):
|
195 |
+
attended_features, _ = self.cross_attention(query=q, key=k, value=v)
|
196 |
+
return self.cross_norm(attended_features)
|
197 |
+
|
198 |
+
if self.use_gradient_checkpointing and self.training:
|
199 |
+
return checkpoint.checkpoint(_attention_forward, query, key, value, use_reentrant=False)
|
200 |
+
else:
|
201 |
+
return _attention_forward(query, key, value)
|
202 |
+
|
203 |
+
def _checkpoint_candidate_selection(self, initial_logits):
|
204 |
+
"""Wrapper for candidate selection with gradient checkpointing"""
|
205 |
+
def _candidate_forward(logits):
|
206 |
+
return self._get_candidate_tags(logits)
|
207 |
+
|
208 |
+
if self.use_gradient_checkpointing and self.training:
|
209 |
+
return checkpoint.checkpoint(_candidate_forward, initial_logits, use_reentrant=False)
|
210 |
+
else:
|
211 |
+
return _candidate_forward(initial_logits)
|
212 |
+
|
213 |
+
def _checkpoint_final_scoring(self, attended_features, candidate_indices):
|
214 |
+
"""Wrapper for final scoring with gradient checkpointing"""
|
215 |
+
def _scoring_forward(features, indices):
|
216 |
+
emb = self.tag_embedding(indices)
|
217 |
+
# BF16 in, BF16 out
|
218 |
+
return (features * emb).sum(dim=-1)
|
219 |
+
|
220 |
+
if self.use_gradient_checkpointing and self.training:
|
221 |
+
return checkpoint.checkpoint(_scoring_forward, attended_features, candidate_indices, use_reentrant=False)
|
222 |
+
else:
|
223 |
+
return _scoring_forward(attended_features, candidate_indices)
|
224 |
+
|
225 |
+
def _init_weights(self):
|
226 |
+
"""Initialize weights for new modules"""
|
227 |
+
def _init_layer(layer):
|
228 |
+
if isinstance(layer, nn.Linear):
|
229 |
+
nn.init.xavier_uniform_(layer.weight)
|
230 |
+
if layer.bias is not None:
|
231 |
+
nn.init.zeros_(layer.bias)
|
232 |
+
elif isinstance(layer, nn.Conv2d):
|
233 |
+
nn.init.kaiming_normal_(layer.weight, mode='fan_out', nonlinearity='relu')
|
234 |
+
if layer.bias is not None:
|
235 |
+
nn.init.zeros_(layer.bias)
|
236 |
+
elif isinstance(layer, nn.Embedding):
|
237 |
+
nn.init.normal_(layer.weight, mean=0, std=0.02)
|
238 |
+
|
239 |
+
# Initialize new components
|
240 |
+
self.image_token_proj.apply(_init_layer)
|
241 |
+
|
242 |
+
# Initialize tag embeddings with normal distribution
|
243 |
+
nn.init.normal_(self.tag_embedding.weight, mean=0, std=0.02)
|
244 |
+
|
245 |
+
# Initialize tag bias
|
246 |
+
nn.init.zeros_(self.tag_bias)
|
247 |
+
|
248 |
+
def _print_parameter_count(self):
|
249 |
+
"""Print parameter statistics"""
|
250 |
+
total_params = sum(p.numel() for p in self.parameters())
|
251 |
+
trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
|
252 |
+
backbone_params = sum(p.numel() for p in self.backbone.parameters())
|
253 |
+
|
254 |
+
print(f"📊 Parameter Statistics:")
|
255 |
+
print(f" Total parameters: {total_params/1e6:.1f}M")
|
256 |
+
print(f" Trainable parameters: {trainable_params/1e6:.1f}M")
|
257 |
+
print(f" Frozen parameters: {(total_params-trainable_params)/1e6:.1f}M")
|
258 |
+
print(f" Backbone parameters: {backbone_params/1e6:.1f}M")
|
259 |
+
|
260 |
+
if self.use_gradient_checkpointing:
|
261 |
+
print(f" 🔄 Gradient checkpointing enabled for memory efficiency")
|
262 |
+
|
263 |
+
@property
|
264 |
+
def debug(self):
|
265 |
+
return self._flags['debug']
|
266 |
+
|
267 |
+
@property
|
268 |
+
def model_stats(self):
|
269 |
+
return self._flags['model_stats']
|
270 |
+
|
271 |
+
def _get_candidate_tags(self, initial_logits, target_tags=None, hard_negatives=None):
|
272 |
+
"""Select candidate tags - no ground truth inclusion"""
|
273 |
+
batch_size = initial_logits.size(0)
|
274 |
+
|
275 |
+
# Simply select top K candidates based on initial predictions
|
276 |
+
top_probs, top_indices = torch.topk(
|
277 |
+
torch.sigmoid(initial_logits),
|
278 |
+
k=min(self.tag_context_size, self.total_tags),
|
279 |
+
dim=1, largest=True, sorted=True
|
280 |
+
)
|
281 |
+
|
282 |
+
return top_indices
|
283 |
+
|
284 |
+
def _analyze_predictions(self, predictions, tag_indices):
|
285 |
+
"""Analyze prediction patterns"""
|
286 |
+
if not self.model_stats:
|
287 |
+
return {}
|
288 |
+
|
289 |
+
if torch._dynamo.is_compiling():
|
290 |
+
return {}
|
291 |
+
|
292 |
+
with torch.no_grad(), torch.autocast('cuda', dtype=torch.bfloat16):
|
293 |
+
probs = torch.sigmoid(predictions)
|
294 |
+
relevant_probs = torch.gather(probs, 1, tag_indices)
|
295 |
+
|
296 |
+
return {
|
297 |
+
'prediction_confidence': relevant_probs.mean().item(),
|
298 |
+
'prediction_entropy': -(relevant_probs * torch.log(relevant_probs + 1e-9)).mean().item(),
|
299 |
+
'high_confidence_ratio': (relevant_probs > 0.7).float().mean().item(),
|
300 |
+
'above_threshold_ratio': (relevant_probs > 0.5).float().mean().item(),
|
301 |
+
}
|
302 |
+
|
303 |
+
def forward(self, x, targets=None, hard_negatives=None):
|
304 |
+
"""
|
305 |
+
Forward pass with ViT backbone, CLS token support and gradient-checkpointing.
|
306 |
+
All arithmetic tensors stay in the backbone’s dtype (BF16 under autocast,
|
307 |
+
FP32 otherwise). Anything that must mix dtypes is cast to match.
|
308 |
+
"""
|
309 |
+
batch_size = x.size(0)
|
310 |
+
model_stats = {} if self.model_stats else {}
|
311 |
+
|
312 |
+
# ------------------------------------------------------------------
|
313 |
+
# 1. Backbone → patch map + CLS token
|
314 |
+
# ------------------------------------------------------------------
|
315 |
+
patch_map, cls_token = self._checkpoint_backbone(x) # patch_map: [B, C, H, W]
|
316 |
+
# cls_token: [B, C]
|
317 |
+
|
318 |
+
# ------------------------------------------------------------------
|
319 |
+
# 2. Tokens → global image vector
|
320 |
+
# ------------------------------------------------------------------
|
321 |
+
image_tokens_4d = self._checkpoint_image_proj(patch_map) # [B, C, H, W]
|
322 |
+
image_tokens = image_tokens_4d.flatten(2).transpose(1, 2) # [B, N, C]
|
323 |
+
|
324 |
+
# “Dual-pool”: mean-pool patches ⊕ CLS
|
325 |
+
global_features = 0.5 * (image_tokens.mean(dim=1, dtype=image_tokens.dtype) + cls_token) # [B, C]
|
326 |
+
|
327 |
+
compute_dtype = global_features.dtype # BF16 or FP32
|
328 |
+
|
329 |
+
# ------------------------------------------------------------------
|
330 |
+
# 3. Initial logits (shared weights)
|
331 |
+
# ------------------------------------------------------------------
|
332 |
+
tag_weights = self.tag_embedding.weight.to(compute_dtype) # [T, C]
|
333 |
+
tag_bias = self.tag_bias.to(compute_dtype) # [T]
|
334 |
+
|
335 |
+
initial_logits = global_features @ tag_weights.t() + tag_bias # [B, T]
|
336 |
+
initial_logits = initial_logits.to(compute_dtype) # keep dtype uniform
|
337 |
+
initial_preds = initial_logits # alias
|
338 |
+
|
339 |
+
# ------------------------------------------------------------------
|
340 |
+
# 4. Candidate set
|
341 |
+
# ------------------------------------------------------------------
|
342 |
+
candidate_indices = self._checkpoint_candidate_selection(initial_logits) # [B, K]
|
343 |
+
|
344 |
+
tag_embeddings = self.tag_embedding(candidate_indices).to(compute_dtype) # [B, K, C]
|
345 |
+
|
346 |
+
attended_features = self._checkpoint_cross_attention( # [B, K, C]
|
347 |
+
tag_embeddings, image_tokens, image_tokens
|
348 |
+
)
|
349 |
+
|
350 |
+
# ------------------------------------------------------------------
|
351 |
+
# 5. Score candidates & scatter back
|
352 |
+
# ------------------------------------------------------------------
|
353 |
+
candidate_logits = self._checkpoint_final_scoring(attended_features, candidate_indices) # [B, K]
|
354 |
+
|
355 |
+
# --- align dtypes so scatter never throws ---
|
356 |
+
if candidate_logits.dtype != initial_logits.dtype:
|
357 |
+
candidate_logits = candidate_logits.to(initial_logits.dtype)
|
358 |
+
|
359 |
+
refined_logits = initial_logits.clone()
|
360 |
+
refined_logits.scatter_(1, candidate_indices, candidate_logits)
|
361 |
+
refined_preds = refined_logits
|
362 |
+
|
363 |
+
# ------------------------------------------------------------------
|
364 |
+
# 6. Optional stats
|
365 |
+
# ------------------------------------------------------------------
|
366 |
+
if self.model_stats and targets is not None and not torch._dynamo.is_compiling():
|
367 |
+
model_stats['initial_prediction_stats'] = self._analyze_predictions(initial_preds,
|
368 |
+
candidate_indices)
|
369 |
+
model_stats['refined_prediction_stats'] = self._analyze_predictions(refined_preds,
|
370 |
+
candidate_indices)
|
371 |
+
|
372 |
+
return {
|
373 |
+
'initial_predictions': initial_preds,
|
374 |
+
'refined_predictions': refined_preds,
|
375 |
+
'selected_candidates': candidate_indices,
|
376 |
+
'model_stats': model_stats
|
377 |
+
}
|
378 |
+
|
379 |
+
def predict
|
app/utils/onnx_processing.py
ADDED
@@ -0,0 +1,729 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
ONNX-based batch image processing for the Image Tagger application.
|
3 |
+
Updated with proper ImageNet normalization and new metadata format.
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import json
|
8 |
+
import time
|
9 |
+
import traceback
|
10 |
+
import numpy as np
|
11 |
+
import glob
|
12 |
+
import onnxruntime as ort
|
13 |
+
from PIL import Image
|
14 |
+
import torchvision.transforms as transforms
|
15 |
+
from concurrent.futures import ThreadPoolExecutor
|
16 |
+
|
17 |
+
def preprocess_image(image_path, image_size=512):
|
18 |
+
"""
|
19 |
+
Process an image for ImageTagger inference with proper ImageNet normalization
|
20 |
+
"""
|
21 |
+
if not os.path.exists(image_path):
|
22 |
+
raise ValueError(f"Image not found at path: {image_path}")
|
23 |
+
|
24 |
+
# ImageNet normalization - CRITICAL for your model
|
25 |
+
transform = transforms.Compose([
|
26 |
+
transforms.ToTensor(),
|
27 |
+
transforms.Normalize(
|
28 |
+
mean=[0.485, 0.456, 0.406],
|
29 |
+
std=[0.229, 0.224, 0.225]
|
30 |
+
)
|
31 |
+
])
|
32 |
+
|
33 |
+
try:
|
34 |
+
with Image.open(image_path) as img:
|
35 |
+
# Convert RGBA or Palette images to RGB
|
36 |
+
if img.mode in ('RGBA', 'P'):
|
37 |
+
img = img.convert('RGB')
|
38 |
+
|
39 |
+
# Get original dimensions
|
40 |
+
width, height = img.size
|
41 |
+
aspect_ratio = width / height
|
42 |
+
|
43 |
+
# Calculate new dimensions to maintain aspect ratio
|
44 |
+
if aspect_ratio > 1:
|
45 |
+
new_width = image_size
|
46 |
+
new_height = int(new_width / aspect_ratio)
|
47 |
+
else:
|
48 |
+
new_height = image_size
|
49 |
+
new_width = int(new_height * aspect_ratio)
|
50 |
+
|
51 |
+
# Resize with LANCZOS filter
|
52 |
+
img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
53 |
+
|
54 |
+
# Create new image with padding (use ImageNet mean for padding)
|
55 |
+
# Using RGB values close to ImageNet mean: (0.485*255, 0.456*255, 0.406*255)
|
56 |
+
pad_color = (124, 116, 104)
|
57 |
+
new_image = Image.new('RGB', (image_size, image_size), pad_color)
|
58 |
+
paste_x = (image_size - new_width) // 2
|
59 |
+
paste_y = (image_size - new_height) // 2
|
60 |
+
new_image.paste(img, (paste_x, paste_y))
|
61 |
+
|
62 |
+
# Apply transforms (including ImageNet normalization)
|
63 |
+
img_tensor = transform(new_image)
|
64 |
+
return img_tensor.numpy()
|
65 |
+
|
66 |
+
except Exception as e:
|
67 |
+
raise Exception(f"Error processing {image_path}: {str(e)}")
|
68 |
+
|
69 |
+
def process_single_image_onnx(image_path, model_path, metadata, threshold_profile="Overall",
|
70 |
+
active_threshold=0.35, active_category_thresholds=None,
|
71 |
+
min_confidence=0.1):
|
72 |
+
"""
|
73 |
+
Process a single image using ONNX model with new metadata format
|
74 |
+
|
75 |
+
Args:
|
76 |
+
image_path: Path to the image file
|
77 |
+
model_path: Path to the ONNX model file
|
78 |
+
metadata: Model metadata dictionary
|
79 |
+
threshold_profile: The threshold profile being used
|
80 |
+
active_threshold: Overall threshold value
|
81 |
+
active_category_thresholds: Category-specific thresholds
|
82 |
+
min_confidence: Minimum confidence to include in results
|
83 |
+
|
84 |
+
Returns:
|
85 |
+
Dictionary with tags and probabilities
|
86 |
+
"""
|
87 |
+
try:
|
88 |
+
# Create ONNX tagger for this image (or reuse an existing one)
|
89 |
+
if hasattr(process_single_image_onnx, 'tagger'):
|
90 |
+
tagger = process_single_image_onnx.tagger
|
91 |
+
else:
|
92 |
+
# Create new tagger
|
93 |
+
tagger = ONNXImageTagger(model_path, metadata)
|
94 |
+
# Cache it for future calls
|
95 |
+
process_single_image_onnx.tagger = tagger
|
96 |
+
|
97 |
+
# Preprocess the image
|
98 |
+
start_time = time.time()
|
99 |
+
img_array = preprocess_image(image_path)
|
100 |
+
|
101 |
+
# Run inference
|
102 |
+
results = tagger.predict_batch(
|
103 |
+
[img_array],
|
104 |
+
threshold=active_threshold,
|
105 |
+
category_thresholds=active_category_thresholds,
|
106 |
+
min_confidence=min_confidence
|
107 |
+
)
|
108 |
+
inference_time = time.time() - start_time
|
109 |
+
|
110 |
+
if results:
|
111 |
+
result = results[0]
|
112 |
+
result['inference_time'] = inference_time
|
113 |
+
result['success'] = True
|
114 |
+
return result
|
115 |
+
else:
|
116 |
+
return {
|
117 |
+
'success': False,
|
118 |
+
'error': 'Failed to process image',
|
119 |
+
'all_tags': [],
|
120 |
+
'all_probs': {},
|
121 |
+
'tags': {}
|
122 |
+
}
|
123 |
+
|
124 |
+
except Exception as e:
|
125 |
+
print(f"Error in process_single_image_onnx: {str(e)}")
|
126 |
+
traceback.print_exc()
|
127 |
+
return {
|
128 |
+
'success': False,
|
129 |
+
'error': str(e),
|
130 |
+
'all_tags': [],
|
131 |
+
'all_probs': {},
|
132 |
+
'tags': {}
|
133 |
+
}
|
134 |
+
|
135 |
+
def preprocess_images_parallel(image_paths, image_size=512, max_workers=8):
|
136 |
+
"""Process multiple images in parallel"""
|
137 |
+
processed_images = []
|
138 |
+
valid_paths = []
|
139 |
+
|
140 |
+
# Define a worker function
|
141 |
+
def process_single_image(path):
|
142 |
+
try:
|
143 |
+
return preprocess_image(path, image_size), path
|
144 |
+
except Exception as e:
|
145 |
+
print(f"Error processing {path}: {str(e)}")
|
146 |
+
return None, path
|
147 |
+
|
148 |
+
# Process images in parallel
|
149 |
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
150 |
+
results = list(executor.map(process_single_image, image_paths))
|
151 |
+
|
152 |
+
# Filter results
|
153 |
+
for img_array, path in results:
|
154 |
+
if img_array is not None:
|
155 |
+
processed_images.append(img_array)
|
156 |
+
valid_paths.append(path)
|
157 |
+
|
158 |
+
return processed_images, valid_paths
|
159 |
+
|
160 |
+
def apply_category_limits(result, category_limits):
|
161 |
+
"""
|
162 |
+
Apply category limits to a result dictionary.
|
163 |
+
|
164 |
+
Args:
|
165 |
+
result: Result dictionary containing tags and all_tags
|
166 |
+
category_limits: Dictionary mapping categories to their tag limits
|
167 |
+
(0 = exclude category, -1 = no limit/include all)
|
168 |
+
|
169 |
+
Returns:
|
170 |
+
Updated result dictionary with limits applied
|
171 |
+
"""
|
172 |
+
if not category_limits or not result['success']:
|
173 |
+
return result
|
174 |
+
|
175 |
+
# Get the filtered tags
|
176 |
+
filtered_tags = result['tags']
|
177 |
+
|
178 |
+
# Apply limits to each category
|
179 |
+
for category, cat_tags in list(filtered_tags.items()):
|
180 |
+
# Get limit for this category, default to -1 (no limit)
|
181 |
+
limit = category_limits.get(category, -1)
|
182 |
+
|
183 |
+
if limit == 0:
|
184 |
+
# Exclude this category entirely
|
185 |
+
del filtered_tags[category]
|
186 |
+
elif limit > 0 and len(cat_tags) > limit:
|
187 |
+
# Limit to top N tags for this category
|
188 |
+
filtered_tags[category] = cat_tags[:limit]
|
189 |
+
|
190 |
+
# Regenerate all_tags list after applying limits
|
191 |
+
all_tags = []
|
192 |
+
for category, cat_tags in filtered_tags.items():
|
193 |
+
for tag, _ in cat_tags:
|
194 |
+
all_tags.append(tag)
|
195 |
+
|
196 |
+
# Update the result with limited tags
|
197 |
+
result['tags'] = filtered_tags
|
198 |
+
result['all_tags'] = all_tags
|
199 |
+
|
200 |
+
return result
|
201 |
+
|
202 |
+
class ONNXImageTagger:
|
203 |
+
"""ONNX-based image tagger for fast batch inference with updated metadata format"""
|
204 |
+
|
205 |
+
def __init__(self, model_path, metadata):
|
206 |
+
# Load model
|
207 |
+
self.model_path = model_path
|
208 |
+
try:
|
209 |
+
self.session = ort.InferenceSession(
|
210 |
+
model_path,
|
211 |
+
providers=['CUDAExecutionProvider', 'CPUExecutionProvider']
|
212 |
+
)
|
213 |
+
print(f"Using providers: {self.session.get_providers()}")
|
214 |
+
except Exception as e:
|
215 |
+
print(f"CUDA not available, using CPU: {e}")
|
216 |
+
self.session = ort.InferenceSession(
|
217 |
+
model_path,
|
218 |
+
providers=['CPUExecutionProvider']
|
219 |
+
)
|
220 |
+
print(f"Using providers: {self.session.get_providers()}")
|
221 |
+
|
222 |
+
# Store metadata (passed as dict, not loaded from file)
|
223 |
+
self.metadata = metadata
|
224 |
+
|
225 |
+
# Extract tag mappings from new metadata structure
|
226 |
+
if 'dataset_info' in metadata:
|
227 |
+
# New metadata format
|
228 |
+
self.tag_mapping = metadata['dataset_info']['tag_mapping']
|
229 |
+
self.idx_to_tag = self.tag_mapping['idx_to_tag']
|
230 |
+
self.tag_to_category = self.tag_mapping['tag_to_category']
|
231 |
+
self.total_tags = metadata['dataset_info']['total_tags']
|
232 |
+
else:
|
233 |
+
# Fallback for older format
|
234 |
+
self.idx_to_tag = metadata.get('idx_to_tag', {})
|
235 |
+
self.tag_to_category = metadata.get('tag_to_category', {})
|
236 |
+
self.total_tags = metadata.get('total_tags', len(self.idx_to_tag))
|
237 |
+
|
238 |
+
# Get input name
|
239 |
+
self.input_name = self.session.get_inputs()[0].name
|
240 |
+
print(f"Model loaded successfully. Input name: {self.input_name}")
|
241 |
+
print(f"Total tags: {self.total_tags}, Categories: {len(set(self.tag_to_category.values()))}")
|
242 |
+
|
243 |
+
def predict_batch(self, image_arrays, threshold=0.5, category_thresholds=None, min_confidence=0.1):
|
244 |
+
"""Run batch inference on preprocessed image arrays"""
|
245 |
+
# Stack arrays into batch
|
246 |
+
batch_input = np.stack(image_arrays)
|
247 |
+
|
248 |
+
# Run inference
|
249 |
+
start_time = time.time()
|
250 |
+
outputs = self.session.run(None, {self.input_name: batch_input})
|
251 |
+
inference_time = time.time() - start_time
|
252 |
+
print(f"Batch inference completed in {inference_time:.4f} seconds ({inference_time/len(image_arrays):.4f} s/image)")
|
253 |
+
|
254 |
+
# Process outputs - handle both single and multi-output models
|
255 |
+
if len(outputs) >= 2:
|
256 |
+
# Multi-output model (initial_predictions, refined_predictions, selected_candidates)
|
257 |
+
initial_logits = outputs[0]
|
258 |
+
refined_logits = outputs[1]
|
259 |
+
# Use refined predictions as main output
|
260 |
+
main_logits = refined_logits
|
261 |
+
print(f"Using refined predictions (shape: {refined_logits.shape})")
|
262 |
+
else:
|
263 |
+
# Single output model
|
264 |
+
main_logits = outputs[0]
|
265 |
+
print(f"Using single output (shape: {main_logits.shape})")
|
266 |
+
|
267 |
+
# Apply sigmoid to get probabilities
|
268 |
+
main_probs = 1.0 / (1.0 + np.exp(-main_logits))
|
269 |
+
|
270 |
+
# Process results for each image in batch
|
271 |
+
batch_results = []
|
272 |
+
|
273 |
+
for i in range(main_probs.shape[0]):
|
274 |
+
probs = main_probs[i]
|
275 |
+
|
276 |
+
# Extract and organize all probabilities
|
277 |
+
all_probs = {}
|
278 |
+
for idx in range(probs.shape[0]):
|
279 |
+
prob_value = float(probs[idx])
|
280 |
+
if prob_value >= min_confidence:
|
281 |
+
idx_str = str(idx)
|
282 |
+
tag_name = self.idx_to_tag.get(idx_str, f"unknown-{idx}")
|
283 |
+
category = self.tag_to_category.get(tag_name, "general")
|
284 |
+
|
285 |
+
if category not in all_probs:
|
286 |
+
all_probs[category] = []
|
287 |
+
|
288 |
+
all_probs[category].append((tag_name, prob_value))
|
289 |
+
|
290 |
+
# Sort tags by probability within each category
|
291 |
+
for category in all_probs:
|
292 |
+
all_probs[category] = sorted(
|
293 |
+
all_probs[category],
|
294 |
+
key=lambda x: x[1],
|
295 |
+
reverse=True
|
296 |
+
)
|
297 |
+
|
298 |
+
# Get the filtered tags based on the selected threshold
|
299 |
+
tags = {}
|
300 |
+
for category, cat_tags in all_probs.items():
|
301 |
+
# Use category-specific threshold if available
|
302 |
+
if category_thresholds and category in category_thresholds:
|
303 |
+
cat_threshold = category_thresholds[category]
|
304 |
+
else:
|
305 |
+
cat_threshold = threshold
|
306 |
+
|
307 |
+
tags[category] = [(tag, prob) for tag, prob in cat_tags if prob >= cat_threshold]
|
308 |
+
|
309 |
+
# Create a flat list of all tags above threshold
|
310 |
+
all_tags = []
|
311 |
+
for category, cat_tags in tags.items():
|
312 |
+
for tag, _ in cat_tags:
|
313 |
+
all_tags.append(tag)
|
314 |
+
|
315 |
+
batch_results.append({
|
316 |
+
'tags': tags,
|
317 |
+
'all_probs': all_probs,
|
318 |
+
'all_tags': all_tags,
|
319 |
+
'success': True
|
320 |
+
})
|
321 |
+
|
322 |
+
return batch_results
|
323 |
+
|
324 |
+
def batch_process_images_onnx(folder_path, model_path, metadata_path, threshold_profile,
|
325 |
+
active_threshold, active_category_thresholds, save_dir=None,
|
326 |
+
progress_callback=None, min_confidence=0.1, batch_size=16,
|
327 |
+
category_limits=None):
|
328 |
+
"""
|
329 |
+
Process all images in a folder using the ONNX model with new metadata format.
|
330 |
+
|
331 |
+
Args:
|
332 |
+
folder_path: Path to folder containing images
|
333 |
+
model_path: Path to the ONNX model file
|
334 |
+
metadata_path: Path to the model metadata file
|
335 |
+
threshold_profile: Selected threshold profile
|
336 |
+
active_threshold: Overall threshold value
|
337 |
+
active_category_thresholds: Category-specific thresholds
|
338 |
+
save_dir: Directory to save tag files (if None uses default)
|
339 |
+
progress_callback: Optional callback for progress updates
|
340 |
+
min_confidence: Minimum confidence threshold
|
341 |
+
batch_size: Number of images to process at once
|
342 |
+
category_limits: Dictionary mapping categories to their tag limits
|
343 |
+
|
344 |
+
Returns:
|
345 |
+
Dictionary with results for each image
|
346 |
+
"""
|
347 |
+
from utils.file_utils import save_tags_to_file # Import here to avoid circular imports
|
348 |
+
|
349 |
+
# Find all image files in the folder
|
350 |
+
image_extensions = ['*.jpg', '*.jpeg', '*.png']
|
351 |
+
image_files = []
|
352 |
+
|
353 |
+
for ext in image_extensions:
|
354 |
+
image_files.extend(glob.glob(os.path.join(folder_path, ext)))
|
355 |
+
image_files.extend(glob.glob(os.path.join(folder_path, ext.upper())))
|
356 |
+
|
357 |
+
# Remove duplicates (Windows case-insensitive filesystems)
|
358 |
+
if os.name == 'nt': # Windows
|
359 |
+
unique_paths = set()
|
360 |
+
unique_files = []
|
361 |
+
for file_path in image_files:
|
362 |
+
normalized_path = os.path.normpath(file_path).lower()
|
363 |
+
if normalized_path not in unique_paths:
|
364 |
+
unique_paths.add(normalized_path)
|
365 |
+
unique_files.append(file_path)
|
366 |
+
image_files = unique_files
|
367 |
+
|
368 |
+
if not image_files:
|
369 |
+
return {
|
370 |
+
'success': False,
|
371 |
+
'error': f"No images found in {folder_path}",
|
372 |
+
'results': {}
|
373 |
+
}
|
374 |
+
|
375 |
+
# Use the provided save directory or create a default one
|
376 |
+
if save_dir is None:
|
377 |
+
app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
378 |
+
save_dir = os.path.join(app_dir, "saved_tags")
|
379 |
+
|
380 |
+
# Ensure the directory exists
|
381 |
+
os.makedirs(save_dir, exist_ok=True)
|
382 |
+
|
383 |
+
# Load metadata
|
384 |
+
try:
|
385 |
+
with open(metadata_path, 'r') as f:
|
386 |
+
metadata = json.load(f)
|
387 |
+
except Exception as e:
|
388 |
+
return {
|
389 |
+
'success': False,
|
390 |
+
'error': f"Failed to load metadata: {e}",
|
391 |
+
'results': {}
|
392 |
+
}
|
393 |
+
|
394 |
+
# Create ONNX tagger
|
395 |
+
try:
|
396 |
+
tagger = ONNXImageTagger(model_path, metadata)
|
397 |
+
except Exception as e:
|
398 |
+
return {
|
399 |
+
'success': False,
|
400 |
+
'error': f"Failed to load model: {e}",
|
401 |
+
'results': {}
|
402 |
+
}
|
403 |
+
|
404 |
+
# Process images in batches
|
405 |
+
results = {}
|
406 |
+
total_images = len(image_files)
|
407 |
+
processed = 0
|
408 |
+
|
409 |
+
start_time = time.time()
|
410 |
+
|
411 |
+
# Process in batches
|
412 |
+
for i in range(0, total_images, batch_size):
|
413 |
+
batch_start = time.time()
|
414 |
+
|
415 |
+
# Get current batch of images
|
416 |
+
batch_files = image_files[i:i+batch_size]
|
417 |
+
batch_size_actual = len(batch_files)
|
418 |
+
|
419 |
+
# Update progress if callback provided
|
420 |
+
if progress_callback:
|
421 |
+
progress_callback(processed, total_images, batch_files[0] if batch_files else None)
|
422 |
+
|
423 |
+
print(f"Processing batch {i//batch_size + 1}/{(total_images + batch_size - 1)//batch_size}: {batch_size_actual} images")
|
424 |
+
|
425 |
+
try:
|
426 |
+
# Preprocess images in parallel
|
427 |
+
processed_images, valid_paths = preprocess_images_parallel(batch_files)
|
428 |
+
|
429 |
+
if processed_images:
|
430 |
+
# Run batch prediction
|
431 |
+
batch_results = tagger.predict_batch(
|
432 |
+
processed_images,
|
433 |
+
threshold=active_threshold,
|
434 |
+
category_thresholds=active_category_thresholds,
|
435 |
+
min_confidence=min_confidence
|
436 |
+
)
|
437 |
+
|
438 |
+
# Process results for each image
|
439 |
+
for j, (image_path, result) in enumerate(zip(valid_paths, batch_results)):
|
440 |
+
# Update progress if callback provided
|
441 |
+
if progress_callback:
|
442 |
+
progress_callback(processed + j, total_images, image_path)
|
443 |
+
|
444 |
+
# Apply category limits if specified
|
445 |
+
if category_limits and result['success']:
|
446 |
+
print(f"Applying limits to {os.path.basename(image_path)}: {len(result['all_tags'])} → ", end="")
|
447 |
+
result = apply_category_limits(result, category_limits)
|
448 |
+
print(f"{len(result['all_tags'])} tags")
|
449 |
+
|
450 |
+
# Save the tags to a file
|
451 |
+
if result['success']:
|
452 |
+
try:
|
453 |
+
output_path = save_tags_to_file(
|
454 |
+
image_path=image_path,
|
455 |
+
all_tags=result['all_tags'],
|
456 |
+
custom_dir=save_dir,
|
457 |
+
overwrite=True
|
458 |
+
)
|
459 |
+
result['output_path'] = str(output_path)
|
460 |
+
except Exception as e:
|
461 |
+
print(f"Error saving tags for {image_path}: {e}")
|
462 |
+
result['save_error'] = str(e)
|
463 |
+
|
464 |
+
# Store the result
|
465 |
+
results[image_path] = result
|
466 |
+
|
467 |
+
processed += batch_size_actual
|
468 |
+
|
469 |
+
# Calculate batch timing
|
470 |
+
batch_end = time.time()
|
471 |
+
batch_time = batch_end - batch_start
|
472 |
+
print(f"Batch processed in {batch_time:.2f} seconds ({batch_time/batch_size_actual:.2f} seconds per image)")
|
473 |
+
|
474 |
+
except Exception as e:
|
475 |
+
print(f"Error processing batch: {str(e)}")
|
476 |
+
traceback.print_exc()
|
477 |
+
|
478 |
+
# Process failed images one by one as fallback
|
479 |
+
for j, image_path in enumerate(batch_files):
|
480 |
+
try:
|
481 |
+
# Update progress if callback provided
|
482 |
+
if progress_callback:
|
483 |
+
progress_callback(processed + j, total_images, image_path)
|
484 |
+
|
485 |
+
# Preprocess single image
|
486 |
+
img_array = preprocess_image(image_path)
|
487 |
+
|
488 |
+
# Run inference on single image
|
489 |
+
single_results = tagger.predict_batch(
|
490 |
+
[img_array],
|
491 |
+
threshold=active_threshold,
|
492 |
+
category_thresholds=active_category_thresholds,
|
493 |
+
min_confidence=min_confidence
|
494 |
+
)
|
495 |
+
|
496 |
+
if single_results:
|
497 |
+
result = single_results[0]
|
498 |
+
|
499 |
+
# Apply category limits if specified
|
500 |
+
if category_limits and result['success']:
|
501 |
+
result = apply_category_limits(result, category_limits)
|
502 |
+
|
503 |
+
# Save the tags to a file
|
504 |
+
if result['success']:
|
505 |
+
try:
|
506 |
+
output_path = save_tags_to_file(
|
507 |
+
image_path=image_path,
|
508 |
+
all_tags=result['all_tags'],
|
509 |
+
custom_dir=save_dir,
|
510 |
+
overwrite=True
|
511 |
+
)
|
512 |
+
result['output_path'] = str(output_path)
|
513 |
+
except Exception as e:
|
514 |
+
print(f"Error saving tags for {image_path}: {e}")
|
515 |
+
result['save_error'] = str(e)
|
516 |
+
|
517 |
+
results[image_path] = result
|
518 |
+
else:
|
519 |
+
results[image_path] = {
|
520 |
+
'success': False,
|
521 |
+
'error': 'Failed to process image',
|
522 |
+
'all_tags': []
|
523 |
+
}
|
524 |
+
|
525 |
+
except Exception as img_e:
|
526 |
+
print(f"Error processing single image {image_path}: {str(img_e)}")
|
527 |
+
results[image_path] = {
|
528 |
+
'success': False,
|
529 |
+
'error': str(img_e),
|
530 |
+
'all_tags': []
|
531 |
+
}
|
532 |
+
|
533 |
+
processed += batch_size_actual
|
534 |
+
|
535 |
+
# Final progress update
|
536 |
+
if progress_callback:
|
537 |
+
progress_callback(total_images, total_images, None)
|
538 |
+
|
539 |
+
end_time = time.time()
|
540 |
+
total_time = end_time - start_time
|
541 |
+
print(f"Batch processing finished. Total time: {total_time:.2f} seconds, Average: {total_time/total_images:.2f} seconds per image")
|
542 |
+
|
543 |
+
return {
|
544 |
+
'success': True,
|
545 |
+
'total': total_images,
|
546 |
+
'processed': len(results),
|
547 |
+
'results': results,
|
548 |
+
'save_dir': save_dir,
|
549 |
+
'time_elapsed': end_time - start_time
|
550 |
+
}
|
551 |
+
|
552 |
+
def test_onnx_imagetagger(model_path, metadata_path, image_path, threshold=0.5, top_k=256):
|
553 |
+
"""
|
554 |
+
Test ImageTagger ONNX model with proper handling of all outputs and new metadata format
|
555 |
+
|
556 |
+
Args:
|
557 |
+
model_path: Path to ONNX model file
|
558 |
+
metadata_path: Path to metadata JSON file
|
559 |
+
image_path: Path to test image
|
560 |
+
threshold: Confidence threshold for predictions
|
561 |
+
top_k: Maximum number of predictions to show
|
562 |
+
"""
|
563 |
+
import onnxruntime as ort
|
564 |
+
import numpy as np
|
565 |
+
import json
|
566 |
+
import time
|
567 |
+
from collections import defaultdict
|
568 |
+
|
569 |
+
print(f"Loading ImageTagger ONNX model from {model_path}")
|
570 |
+
|
571 |
+
# Load metadata with proper error handling
|
572 |
+
try:
|
573 |
+
with open(metadata_path, 'r') as f:
|
574 |
+
metadata = json.load(f)
|
575 |
+
except Exception as e:
|
576 |
+
raise ValueError(f"Failed to load metadata: {e}")
|
577 |
+
|
578 |
+
# Extract tag mappings from new metadata structure
|
579 |
+
try:
|
580 |
+
if 'dataset_info' in metadata:
|
581 |
+
# New metadata format
|
582 |
+
dataset_info = metadata['dataset_info']
|
583 |
+
tag_mapping = dataset_info['tag_mapping']
|
584 |
+
idx_to_tag = tag_mapping['idx_to_tag']
|
585 |
+
tag_to_category = tag_mapping['tag_to_category']
|
586 |
+
total_tags = dataset_info['total_tags']
|
587 |
+
else:
|
588 |
+
# Fallback for older format
|
589 |
+
idx_to_tag = metadata.get('idx_to_tag', {})
|
590 |
+
tag_to_category = metadata.get('tag_to_category', {})
|
591 |
+
total_tags = metadata.get('total_tags', len(idx_to_tag))
|
592 |
+
|
593 |
+
print(f"Model info: {total_tags} tags, {len(set(tag_to_category.values()))} categories")
|
594 |
+
|
595 |
+
except KeyError as e:
|
596 |
+
raise ValueError(f"Invalid metadata structure, missing key: {e}")
|
597 |
+
|
598 |
+
# Initialize ONNX session with robust provider handling
|
599 |
+
providers = []
|
600 |
+
if ort.get_device() == 'GPU':
|
601 |
+
providers.append('CUDAExecutionProvider')
|
602 |
+
providers.append('CPUExecutionProvider')
|
603 |
+
|
604 |
+
try:
|
605 |
+
session = ort.InferenceSession(model_path, providers=providers)
|
606 |
+
active_provider = session.get_providers()[0]
|
607 |
+
print(f"Using provider: {active_provider}")
|
608 |
+
|
609 |
+
# Print model info
|
610 |
+
inputs = session.get_inputs()
|
611 |
+
outputs = session.get_outputs()
|
612 |
+
print(f"Model inputs: {len(inputs)}")
|
613 |
+
print(f"Model outputs: {len(outputs)}")
|
614 |
+
for i, output in enumerate(outputs):
|
615 |
+
print(f" Output {i}: {output.name} {output.shape}")
|
616 |
+
|
617 |
+
except Exception as e:
|
618 |
+
raise RuntimeError(f"Failed to create ONNX session: {e}")
|
619 |
+
|
620 |
+
# Preprocess image
|
621 |
+
print(f"Processing image: {image_path}")
|
622 |
+
try:
|
623 |
+
# Get image size from metadata
|
624 |
+
img_size = metadata.get('model_info', {}).get('img_size', 512)
|
625 |
+
img_tensor = preprocess_image(image_path, image_size=img_size)
|
626 |
+
img_numpy = img_tensor[np.newaxis, :] # Add batch dimension
|
627 |
+
print(f"Input shape: {img_numpy.shape}, dtype: {img_numpy.dtype}")
|
628 |
+
|
629 |
+
except Exception as e:
|
630 |
+
raise ValueError(f"Image preprocessing failed: {e}")
|
631 |
+
|
632 |
+
# Run inference
|
633 |
+
input_name = session.get_inputs()[0].name
|
634 |
+
print("Running inference...")
|
635 |
+
|
636 |
+
start_time = time.time()
|
637 |
+
try:
|
638 |
+
outputs = session.run(None, {input_name: img_numpy})
|
639 |
+
inference_time = time.time() - start_time
|
640 |
+
print(f"Inference completed in {inference_time:.4f} seconds")
|
641 |
+
|
642 |
+
except Exception as e:
|
643 |
+
raise RuntimeError(f"Inference failed: {e}")
|
644 |
+
|
645 |
+
# Handle outputs properly
|
646 |
+
if len(outputs) >= 2:
|
647 |
+
initial_logits = outputs[0]
|
648 |
+
refined_logits = outputs[1]
|
649 |
+
selected_candidates = outputs[2] if len(outputs) > 2 else None
|
650 |
+
|
651 |
+
# Use refined predictions as main output
|
652 |
+
main_logits = refined_logits
|
653 |
+
print(f"Using refined predictions (shape: {refined_logits.shape})")
|
654 |
+
|
655 |
+
else:
|
656 |
+
# Fallback to single output
|
657 |
+
main_logits = outputs[0]
|
658 |
+
print(f"Using single output (shape: {main_logits.shape})")
|
659 |
+
|
660 |
+
# Apply sigmoid to get probabilities
|
661 |
+
main_probs = 1.0 / (1.0 + np.exp(-main_logits))
|
662 |
+
|
663 |
+
# Apply threshold and get predictions
|
664 |
+
predictions_mask = (main_probs >= threshold)
|
665 |
+
indices = np.where(predictions_mask[0])[0]
|
666 |
+
|
667 |
+
if len(indices) == 0:
|
668 |
+
print(f"No predictions above threshold {threshold}")
|
669 |
+
# Show top 5 regardless of threshold
|
670 |
+
top_indices = np.argsort(main_probs[0])[-5:][::-1]
|
671 |
+
print("Top 5 predictions:")
|
672 |
+
for idx in top_indices:
|
673 |
+
idx_str = str(idx)
|
674 |
+
tag_name = idx_to_tag.get(idx_str, f"unknown-{idx}")
|
675 |
+
prob = float(main_probs[0, idx])
|
676 |
+
print(f" {tag_name}: {prob:.3f}")
|
677 |
+
return {}
|
678 |
+
|
679 |
+
# Group by category
|
680 |
+
tags_by_category = defaultdict(list)
|
681 |
+
|
682 |
+
for idx in indices:
|
683 |
+
idx_str = str(idx)
|
684 |
+
tag_name = idx_to_tag.get(idx_str, f"unknown-{idx}")
|
685 |
+
category = tag_to_category.get(tag_name, "general")
|
686 |
+
prob = float(main_probs[0, idx])
|
687 |
+
|
688 |
+
tags_by_category[category].append((tag_name, prob))
|
689 |
+
|
690 |
+
# Sort by probability within each category
|
691 |
+
for category in tags_by_category:
|
692 |
+
tags_by_category[category] = sorted(
|
693 |
+
tags_by_category[category],
|
694 |
+
key=lambda x: x[1],
|
695 |
+
reverse=True
|
696 |
+
)[:top_k] # Limit per category
|
697 |
+
|
698 |
+
# Print results
|
699 |
+
total_predictions = sum(len(tags) for tags in tags_by_category.values())
|
700 |
+
print(f"\nPredicted tags (threshold: {threshold}): {total_predictions} total")
|
701 |
+
|
702 |
+
# Category order for consistent display
|
703 |
+
category_order = ['general', 'character', 'copyright', 'artist', 'meta', 'year', 'rating']
|
704 |
+
|
705 |
+
for category in category_order:
|
706 |
+
if category in tags_by_category:
|
707 |
+
tags = tags_by_category[category]
|
708 |
+
print(f"\n{category.upper()} ({len(tags)}):")
|
709 |
+
for tag, prob in tags:
|
710 |
+
print(f" {tag}: {prob:.3f}")
|
711 |
+
|
712 |
+
# Show any other categories not in standard order
|
713 |
+
for category in sorted(tags_by_category.keys()):
|
714 |
+
if category not in category_order:
|
715 |
+
tags = tags_by_category[category]
|
716 |
+
print(f"\n{category.upper()} ({len(tags)}):")
|
717 |
+
for tag, prob in tags:
|
718 |
+
print(f" {tag}: {prob:.3f}")
|
719 |
+
|
720 |
+
# Performance stats
|
721 |
+
print(f"\nPerformance:")
|
722 |
+
print(f" Inference time: {inference_time:.4f}s")
|
723 |
+
print(f" Provider: {active_provider}")
|
724 |
+
print(f" Max confidence: {main_probs.max():.3f}")
|
725 |
+
if total_predictions > 0:
|
726 |
+
avg_conf = np.mean([prob for tags in tags_by_category.values() for _, prob in tags])
|
727 |
+
print(f" Average confidence: {avg_conf:.3f}")
|
728 |
+
|
729 |
+
return dict(tags_by_category)
|
app/utils/ui_components.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
UI components for the Image Tagger application.
|
3 |
+
"""
|
4 |
+
|
5 |
+
import os
|
6 |
+
import streamlit as st
|
7 |
+
from PIL import Image
|
8 |
+
|
9 |
+
|
10 |
+
def display_progress_bar(prob):
|
11 |
+
"""
|
12 |
+
Create an HTML progress bar for displaying probability.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
prob: Probability value between 0 and 1
|
16 |
+
|
17 |
+
Returns:
|
18 |
+
HTML string for the progress bar
|
19 |
+
"""
|
20 |
+
# Convert probability to percentage
|
21 |
+
percentage = int(prob * 100)
|
22 |
+
|
23 |
+
# Choose color based on confidence level
|
24 |
+
if prob >= 0.8:
|
25 |
+
color = "green"
|
26 |
+
elif prob >= 0.5:
|
27 |
+
color = "orange"
|
28 |
+
else:
|
29 |
+
color = "red"
|
30 |
+
|
31 |
+
# Return HTML for a styled progress bar
|
32 |
+
return f"""
|
33 |
+
<div style="margin-bottom: 5px; display: flex; align-items: center;">
|
34 |
+
<div style="flex-grow: 1; background-color: #f0f0f0; border-radius: 3px; height: 8px; position: relative;">
|
35 |
+
<div style="position: absolute; width: {percentage}%; background-color: {color}; height: 8px; border-radius: 3px;"></div>
|
36 |
+
</div>
|
37 |
+
<div style="margin-left: 8px; min-width: 40px; text-align: right; font-size: 0.9em;">{percentage}%</div>
|
38 |
+
</div>
|
39 |
+
"""
|
40 |
+
|
41 |
+
|
42 |
+
def show_example_images(examples_dir):
|
43 |
+
"""
|
44 |
+
Display example images from a directory.
|
45 |
+
|
46 |
+
Args:
|
47 |
+
examples_dir: Directory containing example images
|
48 |
+
|
49 |
+
Returns:
|
50 |
+
Selected image path or None
|
51 |
+
"""
|
52 |
+
selected_image = None
|
53 |
+
|
54 |
+
if os.path.exists(examples_dir):
|
55 |
+
example_files = [f for f in os.listdir(examples_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
|
56 |
+
|
57 |
+
if example_files:
|
58 |
+
st.write("Select an example image:")
|
59 |
+
|
60 |
+
# Create a 2-column layout for examples
|
61 |
+
example_cols = st.columns(2)
|
62 |
+
|
63 |
+
for i, example_file in enumerate(example_files):
|
64 |
+
col_idx = i % 2
|
65 |
+
with example_cols[col_idx]:
|
66 |
+
example_path = os.path.join(examples_dir, example_file)
|
67 |
+
|
68 |
+
# Display thumbnail
|
69 |
+
try:
|
70 |
+
img = Image.open(example_path)
|
71 |
+
st.image(img, width=150, caption=example_file)
|
72 |
+
|
73 |
+
# Button to select this example
|
74 |
+
if st.button(f"Use", key=f"example_{i}"):
|
75 |
+
selected_image = example_path
|
76 |
+
st.session_state.original_filename = example_file
|
77 |
+
|
78 |
+
# Display full image
|
79 |
+
st.image(img, use_container_width=True)
|
80 |
+
st.success(f"Example '{example_file}' selected!")
|
81 |
+
except Exception as e:
|
82 |
+
st.error(f"Error loading {example_file}: {str(e)}")
|
83 |
+
else:
|
84 |
+
st.info("No example images found.")
|
85 |
+
st.write("Add some JPG or PNG images to the 'examples' directory.")
|
86 |
+
else:
|
87 |
+
st.info("Examples directory not found.")
|
88 |
+
st.write("Create an 'examples' directory and add some JPG or PNG images.")
|
89 |
+
|
90 |
+
return selected_image
|
91 |
+
|
92 |
+
|
93 |
+
def display_batch_results(batch_results):
|
94 |
+
"""
|
95 |
+
Display batch processing results.
|
96 |
+
|
97 |
+
Args:
|
98 |
+
batch_results: Dictionary with batch processing results
|
99 |
+
"""
|
100 |
+
if batch_results['success']:
|
101 |
+
st.success(f"✅ Processed {batch_results['processed']} of {batch_results['total']} images")
|
102 |
+
|
103 |
+
# Show details in an expander
|
104 |
+
with st.expander("Batch Processing Results", expanded=True):
|
105 |
+
# Count successes and failures
|
106 |
+
successes = sum(1 for r in batch_results['results'].values() if r['success'])
|
107 |
+
failures = batch_results['total'] - successes
|
108 |
+
|
109 |
+
st.write(f"- Successfully tagged: {successes}")
|
110 |
+
st.write(f"- Failed to process: {failures}")
|
111 |
+
|
112 |
+
if failures > 0:
|
113 |
+
# Show errors
|
114 |
+
st.write("### Processing Errors")
|
115 |
+
for img_path, result in batch_results['results'].items():
|
116 |
+
if not result['success']:
|
117 |
+
st.write(f"- **{os.path.basename(img_path)}**: {result.get('error', 'Unknown error')}")
|
118 |
+
|
119 |
+
# Show the location of the output files
|
120 |
+
if successes > 0:
|
121 |
+
st.write("### Output Files")
|
122 |
+
st.write(f"Tag files have been saved to the 'saved_tags' folder.")
|
123 |
+
|
124 |
+
# Show the first few as examples
|
125 |
+
st.write("Example outputs:")
|
126 |
+
sample_results = [(path, res) for path, res in batch_results['results'].items() if res['success']][:3]
|
127 |
+
for img_path, result in sample_results:
|
128 |
+
output_path = result.get('output_path', '')
|
129 |
+
if output_path and os.path.exists(output_path):
|
130 |
+
st.write(f"- **{os.path.basename(output_path)}**")
|
131 |
+
|
132 |
+
# Show file contents in a collapsible code block
|
133 |
+
with open(output_path, 'r', encoding='utf-8') as f:
|
134 |
+
content = f.read()
|
135 |
+
st.code(content, language='text')
|
136 |
+
else:
|
137 |
+
st.error(f"Batch processing failed: {batch_results.get('error', 'Unknown error')}")
|
camie-tagger-v2-metadata.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
camie-tagger-v2.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ab0aaf253e3d546090001bec9bebc776c354ab6800f442ab9167af87b4a953ac
|
3 |
+
size 788983561
|
camie-tagger-v2.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:81b1f1caaf84afc2e91204e1d01163487b90720f797af8b64eea331bf16b3f25
|
3 |
+
size 572151796
|
config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_name": "camie-tagger",
|
3 |
+
"model_type": "pytorch",
|
4 |
+
"models": [
|
5 |
+
{
|
6 |
+
"name": "camie-tagger-v2-safetensors",
|
7 |
+
"path": "camie-tagger-v2.safetensors",
|
8 |
+
"description": "Safetensors"
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"name": "camie-tagger-v2-onnx",
|
12 |
+
"path": "camie-tagger-v2.onnx",
|
13 |
+
"description": "ONNX"
|
14 |
+
}
|
15 |
+
],
|
16 |
+
"default_model": "camie-tagger-v2-onnx",
|
17 |
+
"description": "A custom tagging model",
|
18 |
+
"version": "1.0.0",
|
19 |
+
"tags": ["tagger", "classification"],
|
20 |
+
"license": "gpl-3.0"
|
21 |
+
}
|
config.yaml
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model_info:
|
2 |
+
name: camie-tagger
|
3 |
+
version: 1.0.0
|
4 |
+
type: pytorch
|
5 |
+
|
6 |
+
models:
|
7 |
+
- name: camie-tagger-v2-safetensors
|
8 |
+
path: camie-tagger-v2.safetensors
|
9 |
+
description: Safetensors
|
10 |
+
|
11 |
+
- name: camie-tagger-v2-onnx
|
12 |
+
path: camie-tagger-v2.onnx
|
13 |
+
description: ONNX
|
14 |
+
|
15 |
+
default_model: camie-tagger-v2-onnx
|
16 |
+
|
17 |
+
framework:
|
18 |
+
name: pytorch
|
19 |
+
version: "1.X"
|
20 |
+
|
21 |
+
metadata:
|
22 |
+
task: classification
|
23 |
+
tags:
|
24 |
+
- tagger
|
25 |
+
- classification
|
26 |
+
license: GPL-3.0
|
full_validation_results.json
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_samples": 20116,
|
4 |
+
"num_profiles": 3,
|
5 |
+
"num_categories": 8,
|
6 |
+
"evaluation_date": "2025-08-30T19:03:30.091334"
|
7 |
+
},
|
8 |
+
"results": [
|
9 |
+
{
|
10 |
+
"CATEGORY": "OVERALL",
|
11 |
+
"PROFILE": "MICRO OPT",
|
12 |
+
"THRESHOLD": 0.6142857142857143,
|
13 |
+
"MICRO-F1": 67.34597711325426,
|
14 |
+
"MACRO-F1": 46.29084825934598
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"CATEGORY": "YEAR",
|
18 |
+
"PROFILE": "MICRO OPT",
|
19 |
+
"THRESHOLD": 0.6142857142857143,
|
20 |
+
"MICRO-F1": 30.752530027663965,
|
21 |
+
"MACRO-F1": 21.295489610362065
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"CATEGORY": "RATING",
|
25 |
+
"PROFILE": "MICRO OPT",
|
26 |
+
"THRESHOLD": 0.6142857142857143,
|
27 |
+
"MICRO-F1": 83.067447729746,
|
28 |
+
"MACRO-F1": 81.83200483395994
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"CATEGORY": "GENERAL",
|
32 |
+
"PROFILE": "MICRO OPT",
|
33 |
+
"THRESHOLD": 0.6142857142857143,
|
34 |
+
"MICRO-F1": 66.41444399192685,
|
35 |
+
"MACRO-F1": 27.40425414261813
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"CATEGORY": "CHARACTER",
|
39 |
+
"PROFILE": "MICRO OPT",
|
40 |
+
"THRESHOLD": 0.6142857142857143,
|
41 |
+
"MICRO-F1": 83.42532813054274,
|
42 |
+
"MACRO-F1": 64.54679672170435
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"CATEGORY": "COPYRIGHT",
|
46 |
+
"PROFILE": "MICRO OPT",
|
47 |
+
"THRESHOLD": 0.6142857142857143,
|
48 |
+
"MICRO-F1": 86.57996129008917,
|
49 |
+
"MACRO-F1": 53.128368093914744
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"CATEGORY": "ARTIST",
|
53 |
+
"PROFILE": "MICRO OPT",
|
54 |
+
"THRESHOLD": 0.6142857142857143,
|
55 |
+
"MICRO-F1": 70.03317039106145,
|
56 |
+
"MACRO-F1": 64.35697070900147
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"CATEGORY": "META",
|
60 |
+
"PROFILE": "MICRO OPT",
|
61 |
+
"THRESHOLD": 0.6142857142857143,
|
62 |
+
"MICRO-F1": 61.17673478553783,
|
63 |
+
"MACRO-F1": 19.199087616327382
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"CATEGORY": "OVERALL",
|
67 |
+
"PROFILE": "MACRO OPT",
|
68 |
+
"THRESHOLD": 0.49183673469387756,
|
69 |
+
"MICRO-F1": 60.90569893586309,
|
70 |
+
"MACRO-F1": 50.61684357922967
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"CATEGORY": "YEAR",
|
74 |
+
"PROFILE": "MACRO OPT",
|
75 |
+
"THRESHOLD": 0.49183673469387756,
|
76 |
+
"MICRO-F1": 37.21798791030943,
|
77 |
+
"MACRO-F1": 32.62783637958324
|
78 |
+
},
|
79 |
+
{
|
80 |
+
"CATEGORY": "RATING",
|
81 |
+
"PROFILE": "MACRO OPT",
|
82 |
+
"THRESHOLD": 0.49183673469387756,
|
83 |
+
"MICRO-F1": 78.70415252867991,
|
84 |
+
"MACRO-F1": 77.4818397805115
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"CATEGORY": "GENERAL",
|
88 |
+
"PROFILE": "MACRO OPT",
|
89 |
+
"THRESHOLD": 0.49183673469387756,
|
90 |
+
"MICRO-F1": 60.23192293959081,
|
91 |
+
"MACRO-F1": 34.578761327417006
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"CATEGORY": "CHARACTER",
|
95 |
+
"PROFILE": "MACRO OPT",
|
96 |
+
"THRESHOLD": 0.49183673469387756,
|
97 |
+
"MICRO-F1": 79.91802201844503,
|
98 |
+
"MACRO-F1": 66.17825085829298
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"CATEGORY": "COPYRIGHT",
|
102 |
+
"PROFILE": "MACRO OPT",
|
103 |
+
"THRESHOLD": 0.49183673469387756,
|
104 |
+
"MICRO-F1": 81.84935439943561,
|
105 |
+
"MACRO-F1": 56.18607804162489
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"CATEGORY": "ARTIST",
|
109 |
+
"PROFILE": "MACRO OPT",
|
110 |
+
"THRESHOLD": 0.49183673469387756,
|
111 |
+
"MICRO-F1": 62.32690082490389,
|
112 |
+
"MACRO-F1": 66.10039817274198
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"CATEGORY": "META",
|
116 |
+
"PROFILE": "MACRO OPT",
|
117 |
+
"THRESHOLD": 0.49183673469387756,
|
118 |
+
"MICRO-F1": 56.30398796087284,
|
119 |
+
"MACRO-F1": 23.673363465036253
|
120 |
+
},
|
121 |
+
{
|
122 |
+
"CATEGORY": "OVERALL",
|
123 |
+
"PROFILE": "BALANCED",
|
124 |
+
"THRESHOLD": 0.6142857142857143,
|
125 |
+
"MICRO-F1": 67.34597711325426,
|
126 |
+
"MACRO-F1": 46.29084825934598
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"CATEGORY": "YEAR",
|
130 |
+
"PROFILE": "BALANCED",
|
131 |
+
"THRESHOLD": 0.6142857142857143,
|
132 |
+
"MICRO-F1": 30.752530027663965,
|
133 |
+
"MACRO-F1": 21.295489610362065
|
134 |
+
},
|
135 |
+
{
|
136 |
+
"CATEGORY": "RATING",
|
137 |
+
"PROFILE": "BALANCED",
|
138 |
+
"THRESHOLD": 0.6142857142857143,
|
139 |
+
"MICRO-F1": 83.067447729746,
|
140 |
+
"MACRO-F1": 81.83200483395994
|
141 |
+
},
|
142 |
+
{
|
143 |
+
"CATEGORY": "GENERAL",
|
144 |
+
"PROFILE": "BALANCED",
|
145 |
+
"THRESHOLD": 0.6142857142857143,
|
146 |
+
"MICRO-F1": 66.41444399192685,
|
147 |
+
"MACRO-F1": 27.40425414261813
|
148 |
+
},
|
149 |
+
{
|
150 |
+
"CATEGORY": "CHARACTER",
|
151 |
+
"PROFILE": "BALANCED",
|
152 |
+
"THRESHOLD": 0.6142857142857143,
|
153 |
+
"MICRO-F1": 83.42532813054274,
|
154 |
+
"MACRO-F1": 64.54679672170435
|
155 |
+
},
|
156 |
+
{
|
157 |
+
"CATEGORY": "COPYRIGHT",
|
158 |
+
"PROFILE": "BALANCED",
|
159 |
+
"THRESHOLD": 0.6142857142857143,
|
160 |
+
"MICRO-F1": 86.57996129008917,
|
161 |
+
"MACRO-F1": 53.128368093914744
|
162 |
+
},
|
163 |
+
{
|
164 |
+
"CATEGORY": "ARTIST",
|
165 |
+
"PROFILE": "BALANCED",
|
166 |
+
"THRESHOLD": 0.6142857142857143,
|
167 |
+
"MICRO-F1": 70.03317039106145,
|
168 |
+
"MACRO-F1": 64.35697070900147
|
169 |
+
},
|
170 |
+
{
|
171 |
+
"CATEGORY": "META",
|
172 |
+
"PROFILE": "BALANCED",
|
173 |
+
"THRESHOLD": 0.6142857142857143,
|
174 |
+
"MICRO-F1": 61.17673478553783,
|
175 |
+
"MACRO-F1": 19.199087616327382
|
176 |
+
}
|
177 |
+
]
|
178 |
+
}
|
game/dev_tools.py
ADDED
@@ -0,0 +1,580 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Developer Tools for Tag Collector Game
|
4 |
+
A hidden panel with tools for testing and debugging game features.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import streamlit as st
|
8 |
+
import random
|
9 |
+
import time
|
10 |
+
from game_constants import (
|
11 |
+
TAG_CURRENCY_NAME,
|
12 |
+
RARITY_LEVELS,
|
13 |
+
ACHIEVEMENTS,
|
14 |
+
)
|
15 |
+
|
16 |
+
from tag_categories import (
|
17 |
+
TAG_CATEGORIES,
|
18 |
+
TAG_DETECTOR_UPGRADES,
|
19 |
+
PROGRESSION_ACHIEVEMENTS
|
20 |
+
)
|
21 |
+
|
22 |
+
def display_dev_tools():
|
23 |
+
"""
|
24 |
+
Display the developer tools interface
|
25 |
+
This should be hidden in production builds or behind a developer toggle
|
26 |
+
"""
|
27 |
+
st.title("🛠️ Developer Tools")
|
28 |
+
st.warning("These tools are for testing and debugging only. They can break game balance!")
|
29 |
+
|
30 |
+
# Create tabs for different tool categories
|
31 |
+
resource_tab, tag_tab, progression_tab, mosaic_tab, reset_tab = st.tabs([
|
32 |
+
"Resources", "Tag Management", "Progression", "Mosaic Tools", "Reset Tools"
|
33 |
+
])
|
34 |
+
|
35 |
+
with resource_tab:
|
36 |
+
display_resource_tools()
|
37 |
+
|
38 |
+
with tag_tab:
|
39 |
+
display_tag_tools()
|
40 |
+
|
41 |
+
with progression_tab:
|
42 |
+
display_progression_tools()
|
43 |
+
|
44 |
+
with mosaic_tab:
|
45 |
+
display_mosaic_tools()
|
46 |
+
|
47 |
+
with reset_tab:
|
48 |
+
display_reset_tools()
|
49 |
+
|
50 |
+
def display_resource_tools():
|
51 |
+
"""Display tools for managing game resources"""
|
52 |
+
st.subheader("Currency and Resources")
|
53 |
+
|
54 |
+
# Add TagCoins
|
55 |
+
col1, col2 = st.columns([3, 1])
|
56 |
+
with col1:
|
57 |
+
amount = st.number_input("Amount of TagCoins to add:", min_value=0, max_value=1000000, value=1000, step=100)
|
58 |
+
with col2:
|
59 |
+
if st.button("Add TagCoins", key="add_currency"):
|
60 |
+
st.session_state.tag_currency += amount
|
61 |
+
st.session_state.game_stats["total_currency_earned"] += amount
|
62 |
+
st.success(f"Added {amount} {TAG_CURRENCY_NAME}!")
|
63 |
+
|
64 |
+
# Set threshold directly
|
65 |
+
col1, col2 = st.columns([3, 1])
|
66 |
+
with col1:
|
67 |
+
threshold = st.slider("Set threshold value:", min_value=0.0, max_value=1.0, value=st.session_state.threshold, step=0.01)
|
68 |
+
with col2:
|
69 |
+
if st.button("Set Threshold", key="set_threshold"):
|
70 |
+
st.session_state.threshold = threshold
|
71 |
+
st.success(f"Set threshold to {threshold:.2f}")
|
72 |
+
|
73 |
+
# Add tag power bonuses
|
74 |
+
col1, col2 = st.columns([3, 1])
|
75 |
+
with col1:
|
76 |
+
power = st.number_input("Add tag power bonus:", min_value=0.0, max_value=0.1, value=0.01, step=0.001, format="%.3f")
|
77 |
+
with col2:
|
78 |
+
if st.button("Add Power", key="add_power"):
|
79 |
+
if not hasattr(st.session_state, 'tag_power_bonus'):
|
80 |
+
st.session_state.tag_power_bonus = 0
|
81 |
+
st.session_state.tag_power_bonus += power
|
82 |
+
st.success(f"Added {power:.3f} tag power!")
|
83 |
+
|
84 |
+
def display_tag_tools():
|
85 |
+
"""Display tools for managing tags"""
|
86 |
+
st.subheader("Tag Management")
|
87 |
+
|
88 |
+
# Add specific tag
|
89 |
+
with st.expander("Add Specific Tag", expanded=True):
|
90 |
+
col1, col2, col3 = st.columns([4, 2, 1])
|
91 |
+
|
92 |
+
with col1:
|
93 |
+
tag_name = st.text_input("Tag name:", value="custom_tag")
|
94 |
+
|
95 |
+
with col2:
|
96 |
+
rarities = list(RARITY_LEVELS.keys())
|
97 |
+
rarity = st.selectbox("Rarity:", rarities)
|
98 |
+
|
99 |
+
with col3:
|
100 |
+
# Get categories from session state or fallback to general
|
101 |
+
categories = ["general", "character", "copyright", "meta", "rating", "artist", "year"]
|
102 |
+
category = st.selectbox("Category:", categories)
|
103 |
+
|
104 |
+
if st.button("Add Tag", key="add_specific_tag"):
|
105 |
+
# Check if tag already exists
|
106 |
+
is_new = tag_name not in st.session_state.collected_tags
|
107 |
+
|
108 |
+
# Add tag to collection
|
109 |
+
st.session_state.collected_tags[tag_name] = {
|
110 |
+
"count": 1,
|
111 |
+
"rarity": rarity,
|
112 |
+
"category": category,
|
113 |
+
"discovery_time": time.strftime("%Y-%m-%d %H:%M:%S")
|
114 |
+
}
|
115 |
+
|
116 |
+
# Show confirmation
|
117 |
+
if is_new:
|
118 |
+
st.success(f"Added new tag '{tag_name}' ({rarity}, {category})")
|
119 |
+
else:
|
120 |
+
st.session_state.collected_tags[tag_name]["count"] += 1
|
121 |
+
st.info(f"Incremented count for existing tag '{tag_name}'")
|
122 |
+
|
123 |
+
# Generate random tags
|
124 |
+
with st.expander("Generate Random Tags", expanded=False):
|
125 |
+
col1, col2 = st.columns([3, 1])
|
126 |
+
|
127 |
+
with col1:
|
128 |
+
num_tags = st.number_input("Number of random tags to generate:", min_value=1, max_value=1000, value=10)
|
129 |
+
|
130 |
+
# Options for distribution
|
131 |
+
advanced = st.checkbox("Advanced options")
|
132 |
+
if advanced:
|
133 |
+
st.write("Rarity distribution:")
|
134 |
+
common_pct = st.slider("Common tags %:", 0, 100, 70)
|
135 |
+
uncommon_pct = st.slider("Uncommon tags %:", 0, 100, 20)
|
136 |
+
rare_pct = st.slider("Rare tags %:", 0, 100, 8)
|
137 |
+
super_rare_pct = st.slider("Super rare tags %:", 0, 100, 2)
|
138 |
+
|
139 |
+
# Ensure total is 100%
|
140 |
+
total = common_pct + uncommon_pct + rare_pct + super_rare_pct
|
141 |
+
if total != 100:
|
142 |
+
st.warning(f"Distribution totals {total}%, should be 100%")
|
143 |
+
|
144 |
+
with col2:
|
145 |
+
generate_button = st.button("Generate", key="generate_random_tags")
|
146 |
+
|
147 |
+
if generate_button:
|
148 |
+
generated_count = 0
|
149 |
+
|
150 |
+
# Determine distribution of rarities
|
151 |
+
if advanced and total == 100:
|
152 |
+
# Custom distribution
|
153 |
+
rarity_weights = {
|
154 |
+
"Whispered Word": common_pct / 100,
|
155 |
+
"Common Canard": uncommon_pct / 100 * 0.6,
|
156 |
+
"Urban Footnote": uncommon_pct / 100 * 0.4,
|
157 |
+
"Urban Myth": rare_pct / 100 * 0.5,
|
158 |
+
"Urban Legend": rare_pct / 100 * 0.5,
|
159 |
+
"Urban Nightmare": super_rare_pct / 100 * 0.8,
|
160 |
+
"Impuritas Civitas": super_rare_pct / 100 * 0.2
|
161 |
+
}
|
162 |
+
else:
|
163 |
+
# Default distribution
|
164 |
+
rarity_weights = {
|
165 |
+
"Whispered Word": 0.70,
|
166 |
+
"Common Canard": 0.15,
|
167 |
+
"Urban Footnote": 0.08,
|
168 |
+
"Urban Myth": 0.04,
|
169 |
+
"Urban Legend": 0.02,
|
170 |
+
"Urban Nightmare": 0.008,
|
171 |
+
"Impuritas Civitas": 0.002
|
172 |
+
}
|
173 |
+
|
174 |
+
# Generate the tags
|
175 |
+
for i in range(num_tags):
|
176 |
+
# Create a random tag name if we don't have metadata
|
177 |
+
tag_name = f"random_tag_{int(time.time() % 10000)}_{i}"
|
178 |
+
|
179 |
+
# Determine rarity
|
180 |
+
rarity = random.choices(
|
181 |
+
list(rarity_weights.keys()),
|
182 |
+
weights=list(rarity_weights.values()),
|
183 |
+
k=1
|
184 |
+
)[0]
|
185 |
+
|
186 |
+
# Determine category
|
187 |
+
categories = list(TAG_CATEGORIES.keys())
|
188 |
+
category = random.choice(categories)
|
189 |
+
|
190 |
+
# Add to collection
|
191 |
+
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
|
192 |
+
|
193 |
+
# Check if this is a new tag
|
194 |
+
is_new = tag_name not in st.session_state.collected_tags
|
195 |
+
|
196 |
+
if is_new:
|
197 |
+
st.session_state.collected_tags[tag_name] = {
|
198 |
+
"count": 1,
|
199 |
+
"rarity": rarity,
|
200 |
+
"category": category,
|
201 |
+
"discovery_time": timestamp
|
202 |
+
}
|
203 |
+
generated_count += 1
|
204 |
+
else:
|
205 |
+
# Increment count if already exists
|
206 |
+
st.session_state.collected_tags[tag_name]["count"] += 1
|
207 |
+
|
208 |
+
# Show confirmation
|
209 |
+
st.success(f"Generated {generated_count} new random tags!")
|
210 |
+
|
211 |
+
def display_progression_tools():
|
212 |
+
"""Display tools for managing progression"""
|
213 |
+
st.subheader("Progression System Tools")
|
214 |
+
|
215 |
+
# Unlock categories
|
216 |
+
with st.expander("Unlock Categories", expanded=True):
|
217 |
+
st.write("Select categories to unlock:")
|
218 |
+
|
219 |
+
# Get currently unlocked categories
|
220 |
+
unlocked = []
|
221 |
+
if hasattr(st.session_state, 'unlocked_tag_categories'):
|
222 |
+
unlocked = st.session_state.unlocked_tag_categories
|
223 |
+
|
224 |
+
# Display each category with a checkbox
|
225 |
+
category_checkboxes = {}
|
226 |
+
for category, info in TAG_CATEGORIES.items():
|
227 |
+
# Skip default unlocked
|
228 |
+
if info["unlocked_by_default"]:
|
229 |
+
continue
|
230 |
+
|
231 |
+
# Check if already unlocked
|
232 |
+
is_unlocked = category in unlocked
|
233 |
+
category_checkboxes[category] = st.checkbox(
|
234 |
+
f"{info['name']} ({info['cost']} {TAG_CURRENCY_NAME})",
|
235 |
+
value=is_unlocked,
|
236 |
+
key=f"cat_{category}"
|
237 |
+
)
|
238 |
+
|
239 |
+
# Button to apply changes
|
240 |
+
if st.button("Apply Category Changes", key="apply_categories"):
|
241 |
+
# Initialize if needed
|
242 |
+
if not hasattr(st.session_state, 'unlocked_tag_categories'):
|
243 |
+
st.session_state.unlocked_tag_categories = []
|
244 |
+
|
245 |
+
# Add default unlocked
|
246 |
+
for cat, info in TAG_CATEGORIES.items():
|
247 |
+
if info["unlocked_by_default"]:
|
248 |
+
st.session_state.unlocked_tag_categories.append(cat)
|
249 |
+
|
250 |
+
# Update unlocked categories
|
251 |
+
for category, checked in category_checkboxes.items():
|
252 |
+
# If checked but not unlocked, add it
|
253 |
+
if checked and category not in st.session_state.unlocked_tag_categories:
|
254 |
+
st.session_state.unlocked_tag_categories.append(category)
|
255 |
+
st.success(f"Unlocked {TAG_CATEGORIES[category]['name']}!")
|
256 |
+
|
257 |
+
# If unchecked but unlocked, remove it
|
258 |
+
elif not checked and category in st.session_state.unlocked_tag_categories:
|
259 |
+
st.session_state.unlocked_tag_categories.remove(category)
|
260 |
+
st.info(f"Locked {TAG_CATEGORIES[category]['name']}")
|
261 |
+
|
262 |
+
# Upgrade detector level
|
263 |
+
with st.expander("Set Detector Level", expanded=False):
|
264 |
+
# Get current level
|
265 |
+
current_level = 0
|
266 |
+
if hasattr(st.session_state, 'detector_level'):
|
267 |
+
current_level = st.session_state.detector_level
|
268 |
+
|
269 |
+
# Display slider for detector level
|
270 |
+
new_level = st.slider(
|
271 |
+
"Detector Level:",
|
272 |
+
min_value=0,
|
273 |
+
max_value=len(TAG_DETECTOR_UPGRADES) - 1,
|
274 |
+
value=current_level
|
275 |
+
)
|
276 |
+
|
277 |
+
# Show info about selected level
|
278 |
+
upgrade = TAG_DETECTOR_UPGRADES[new_level]
|
279 |
+
max_tags = upgrade["max_tags"]
|
280 |
+
if max_tags == 0:
|
281 |
+
st.write(f"Selected: {upgrade['name']} (Unlimited tags)")
|
282 |
+
else:
|
283 |
+
st.write(f"Selected: {upgrade['name']} ({max_tags} tags)")
|
284 |
+
|
285 |
+
# Button to apply changes
|
286 |
+
if st.button("Set Detector Level", key="set_detector_level"):
|
287 |
+
st.session_state.detector_level = new_level
|
288 |
+
st.success(f"Set detector level to {new_level} ({upgrade['name']})")
|
289 |
+
|
290 |
+
# Unlock achievements
|
291 |
+
with st.expander("Manage Achievements", expanded=False):
|
292 |
+
# Combine standard and progression achievements
|
293 |
+
all_achievements = {**ACHIEVEMENTS, **PROGRESSION_ACHIEVEMENTS}
|
294 |
+
|
295 |
+
# Initialize achievements if needed
|
296 |
+
if not hasattr(st.session_state, 'achievements'):
|
297 |
+
st.session_state.achievements = set()
|
298 |
+
|
299 |
+
# Create tabs for unlocked and locked
|
300 |
+
unlocked_tab, locked_tab = st.tabs(["Unlocked", "Locked"])
|
301 |
+
|
302 |
+
with unlocked_tab:
|
303 |
+
st.write("Currently unlocked achievements:")
|
304 |
+
|
305 |
+
# Show unlocked achievements with option to remove
|
306 |
+
for achievement_id in sorted(st.session_state.achievements):
|
307 |
+
if achievement_id in all_achievements:
|
308 |
+
col1, col2 = st.columns([3, 1])
|
309 |
+
|
310 |
+
with col1:
|
311 |
+
achievement = all_achievements[achievement_id]
|
312 |
+
st.write(f"**{achievement['name']}**: {achievement['description']}")
|
313 |
+
|
314 |
+
with col2:
|
315 |
+
if st.button("Remove", key=f"remove_{achievement_id}"):
|
316 |
+
st.session_state.achievements.remove(achievement_id)
|
317 |
+
st.info(f"Removed achievement: {achievement['name']}")
|
318 |
+
st.rerun()
|
319 |
+
|
320 |
+
with locked_tab:
|
321 |
+
st.write("Currently locked achievements:")
|
322 |
+
|
323 |
+
# Show locked achievements with option to add
|
324 |
+
locked_achievements = [a for a in all_achievements if a not in st.session_state.achievements]
|
325 |
+
|
326 |
+
for achievement_id in sorted(locked_achievements):
|
327 |
+
col1, col2 = st.columns([3, 1])
|
328 |
+
|
329 |
+
with col1:
|
330 |
+
achievement = all_achievements[achievement_id]
|
331 |
+
st.write(f"**{achievement['name']}**: {achievement['description']}")
|
332 |
+
|
333 |
+
with col2:
|
334 |
+
if st.button("Unlock", key=f"unlock_{achievement_id}"):
|
335 |
+
st.session_state.achievements.add(achievement_id)
|
336 |
+
|
337 |
+
# Apply rewards if applicable
|
338 |
+
if "reward" in achievement:
|
339 |
+
from scan_handler import apply_achievement_reward
|
340 |
+
apply_achievement_reward(achievement_id, achievement["reward"])
|
341 |
+
|
342 |
+
st.success(f"Unlocked achievement: {achievement['name']}")
|
343 |
+
st.rerun()
|
344 |
+
|
345 |
+
def display_mosaic_tools():
|
346 |
+
"""Display tools for managing the tag mosaic"""
|
347 |
+
st.subheader("Tag Mosaic Tools")
|
348 |
+
|
349 |
+
# Check if mosaic exists
|
350 |
+
has_mosaic = hasattr(st.session_state, 'tag_mosaic')
|
351 |
+
|
352 |
+
if not has_mosaic:
|
353 |
+
st.warning("Tag Mosaic not initialized yet. Visit the Tag Collection tab first.")
|
354 |
+
return
|
355 |
+
|
356 |
+
# Fill random portions of the mosaic
|
357 |
+
with st.expander("Fill Random Portions", expanded=True):
|
358 |
+
col1, col2 = st.columns([3, 1])
|
359 |
+
|
360 |
+
with col1:
|
361 |
+
fill_percentage = st.slider(
|
362 |
+
"Percentage to fill:",
|
363 |
+
min_value=0,
|
364 |
+
max_value=100,
|
365 |
+
value=10,
|
366 |
+
step=1
|
367 |
+
)
|
368 |
+
|
369 |
+
# Options for distribution
|
370 |
+
st.write("Fill with tags of rarity:")
|
371 |
+
fill_rarities = {}
|
372 |
+
for rarity in RARITY_LEVELS:
|
373 |
+
fill_rarities[rarity] = st.checkbox(rarity, value=True, key=f"fill_{rarity}")
|
374 |
+
|
375 |
+
with col2:
|
376 |
+
fill_button = st.button("Fill Mosaic", key="fill_mosaic")
|
377 |
+
|
378 |
+
if fill_button:
|
379 |
+
# Get the mosaic from session state
|
380 |
+
mosaic = st.session_state.tag_mosaic
|
381 |
+
|
382 |
+
# Calculate how many cells to fill
|
383 |
+
total_cells = mosaic.total_cells
|
384 |
+
existing_filled = len(mosaic.filled_cells)
|
385 |
+
target_filled = int(total_cells * fill_percentage / 100)
|
386 |
+
cells_to_add = max(0, target_filled - existing_filled)
|
387 |
+
|
388 |
+
# Get active rarities
|
389 |
+
active_rarities = [r for r, checked in fill_rarities.items() if checked]
|
390 |
+
if not active_rarities:
|
391 |
+
st.error("Select at least one rarity to fill with")
|
392 |
+
return
|
393 |
+
|
394 |
+
# Create artificial tags and add them
|
395 |
+
added_count = 0
|
396 |
+
added_tags = {}
|
397 |
+
|
398 |
+
# Generate random positions
|
399 |
+
all_positions = [(x, y) for x in range(mosaic.grid_width) for y in range(mosaic.grid_height)]
|
400 |
+
# Remove already filled positions
|
401 |
+
available_positions = [pos for pos in all_positions if pos not in mosaic.filled_cells]
|
402 |
+
|
403 |
+
# If we need more than available, just use what's available
|
404 |
+
cells_to_add = min(cells_to_add, len(available_positions))
|
405 |
+
|
406 |
+
# Randomly select positions
|
407 |
+
selected_positions = random.sample(available_positions, cells_to_add)
|
408 |
+
|
409 |
+
# Create tags for each position
|
410 |
+
for pos in selected_positions:
|
411 |
+
x, y = pos
|
412 |
+
|
413 |
+
# Create a tag name
|
414 |
+
tag_name = f"mosaic_fill_{x}_{y}_{int(time.time() % 10000)}"
|
415 |
+
|
416 |
+
# Select a random rarity from active rarities
|
417 |
+
rarity = random.choice(active_rarities)
|
418 |
+
|
419 |
+
# Add to tags dictionary (this won't be saved to session_state)
|
420 |
+
added_tags[tag_name] = {
|
421 |
+
"count": 1,
|
422 |
+
"rarity": rarity,
|
423 |
+
"category": "general"
|
424 |
+
}
|
425 |
+
|
426 |
+
added_count += 1
|
427 |
+
|
428 |
+
# Update the mosaic (this does save to disk)
|
429 |
+
if added_count > 0:
|
430 |
+
mosaic.update_with_tags(added_tags)
|
431 |
+
st.success(f"Added {added_count} random cells to the mosaic!")
|
432 |
+
|
433 |
+
# Show updated stats
|
434 |
+
stats = mosaic.get_stats()
|
435 |
+
st.write(f"New completion: {stats['completion_percentage']:.2f}%")
|
436 |
+
st.write(f"Emerging pattern: {stats['completion_pattern']}")
|
437 |
+
|
438 |
+
# Show image
|
439 |
+
mosaic_img = mosaic.get_image(show_highlights=True)
|
440 |
+
st.image(mosaic_img, caption="Updated Mosaic", width=400)
|
441 |
+
else:
|
442 |
+
st.info("No new cells added. Mosaic may already be filled to the requested level.")
|
443 |
+
|
444 |
+
# Reset mosaic without affecting collection
|
445 |
+
with st.expander("Reset Mosaic", expanded=False):
|
446 |
+
if st.button("Reset Mosaic", key="reset_mosaic"):
|
447 |
+
# Confirm
|
448 |
+
confirm = st.checkbox("I understand this will clear the mosaic visualization (not your collection)")
|
449 |
+
|
450 |
+
if confirm:
|
451 |
+
# Get the mosaic from session state
|
452 |
+
mosaic = st.session_state.tag_mosaic
|
453 |
+
|
454 |
+
# Reset the mosaic by creating a new one
|
455 |
+
from tag_mosaic import TagMosaic
|
456 |
+
st.session_state.tag_mosaic = TagMosaic()
|
457 |
+
|
458 |
+
# Delete the mosaic save file
|
459 |
+
import os
|
460 |
+
if os.path.exists("tag_mosaic.png"):
|
461 |
+
try:
|
462 |
+
os.remove("tag_mosaic.png")
|
463 |
+
except Exception as e:
|
464 |
+
st.error(f"Error removing mosaic file: {e}")
|
465 |
+
|
466 |
+
st.success("Mosaic has been reset!")
|
467 |
+
|
468 |
+
def display_reset_tools():
|
469 |
+
"""Display tools for resetting the game"""
|
470 |
+
st.subheader("Reset Tools")
|
471 |
+
st.warning("These tools will reset parts of your game progress. Use with caution!")
|
472 |
+
|
473 |
+
# Reset currency
|
474 |
+
with st.expander("Reset Currency", expanded=False):
|
475 |
+
col1, col2 = st.columns([3, 1])
|
476 |
+
|
477 |
+
with col1:
|
478 |
+
new_amount = st.number_input("Set currency to:", min_value=0, value=0)
|
479 |
+
|
480 |
+
with col2:
|
481 |
+
if st.button("Reset Currency", key="reset_currency"):
|
482 |
+
st.session_state.tag_currency = new_amount
|
483 |
+
st.success(f"Reset currency to {new_amount} {TAG_CURRENCY_NAME}")
|
484 |
+
|
485 |
+
# Reset collection
|
486 |
+
with st.expander("Reset Collection", expanded=False):
|
487 |
+
st.write("This will remove all collected tags or specific rarities.")
|
488 |
+
|
489 |
+
# Options to keep certain rarities
|
490 |
+
st.write("Keep tags with these rarities:")
|
491 |
+
keep_rarities = {}
|
492 |
+
for rarity in RARITY_LEVELS:
|
493 |
+
keep_rarities[rarity] = st.checkbox(rarity, value=False, key=f"keep_{rarity}")
|
494 |
+
|
495 |
+
if st.button("Reset Collection", key="reset_collection"):
|
496 |
+
# Confirm
|
497 |
+
confirm = st.checkbox("I understand this will delete collected tags")
|
498 |
+
|
499 |
+
if confirm:
|
500 |
+
# Get rarities to keep
|
501 |
+
rarities_to_keep = [r for r, checked in keep_rarities.items() if checked]
|
502 |
+
|
503 |
+
# If keeping some rarities, filter the collection
|
504 |
+
if rarities_to_keep:
|
505 |
+
# Create a new collection with only the kept rarities
|
506 |
+
kept_tags = {}
|
507 |
+
for tag, info in st.session_state.collected_tags.items():
|
508 |
+
if info.get("rarity") in rarities_to_keep:
|
509 |
+
kept_tags[tag] = info
|
510 |
+
|
511 |
+
# Replace the collection
|
512 |
+
removed_count = len(st.session_state.collected_tags) - len(kept_tags)
|
513 |
+
st.session_state.collected_tags = kept_tags
|
514 |
+
st.success(f"Removed {removed_count} tags. Kept {len(kept_tags)} tags with rarities: {', '.join(rarities_to_keep)}")
|
515 |
+
else:
|
516 |
+
# Remove all tags
|
517 |
+
removed_count = len(st.session_state.collected_tags)
|
518 |
+
st.session_state.collected_tags = {}
|
519 |
+
st.success(f"Removed all {removed_count} tags from your collection")
|
520 |
+
|
521 |
+
# Reset complete game
|
522 |
+
with st.expander("Reset ENTIRE Game", expanded=False):
|
523 |
+
st.error("This will reset ALL game progress including collection, currency, achievements, and upgrades.")
|
524 |
+
|
525 |
+
if st.button("Reset EVERYTHING", key="reset_everything"):
|
526 |
+
# Double confirm
|
527 |
+
confirm1 = st.checkbox("I understand ALL progress will be lost")
|
528 |
+
confirm2 = st.checkbox("This cannot be undone")
|
529 |
+
|
530 |
+
if confirm1 and confirm2:
|
531 |
+
# Reset everything
|
532 |
+
st.session_state.threshold = 0.25 # Default starting threshold
|
533 |
+
st.session_state.tag_currency = 0
|
534 |
+
st.session_state.collected_tags = {}
|
535 |
+
st.session_state.purchased_upgrades = []
|
536 |
+
st.session_state.achievements = set()
|
537 |
+
st.session_state.tag_history = []
|
538 |
+
st.session_state.current_scan = None
|
539 |
+
st.session_state.game_stats = {
|
540 |
+
"images_processed": 0,
|
541 |
+
"total_tags_found": 0,
|
542 |
+
"total_currency_earned": 0,
|
543 |
+
"currency_spent": 0
|
544 |
+
}
|
545 |
+
|
546 |
+
# Reset progression
|
547 |
+
if hasattr(st.session_state, 'unlocked_tag_categories'):
|
548 |
+
st.session_state.unlocked_tag_categories = []
|
549 |
+
|
550 |
+
# Add default unlocked categories
|
551 |
+
for cat, info in TAG_CATEGORIES.items():
|
552 |
+
if info["unlocked_by_default"]:
|
553 |
+
st.session_state.unlocked_tag_categories.append(cat)
|
554 |
+
|
555 |
+
if hasattr(st.session_state, 'detector_level'):
|
556 |
+
st.session_state.detector_level = 0
|
557 |
+
|
558 |
+
if hasattr(st.session_state, 'tag_power_bonus'):
|
559 |
+
st.session_state.tag_power_bonus = 0
|
560 |
+
|
561 |
+
if hasattr(st.session_state, 'coin_multiplier'):
|
562 |
+
st.session_state.coin_multiplier = 1.0
|
563 |
+
|
564 |
+
if hasattr(st.session_state, 'essence_generator_count'):
|
565 |
+
st.session_state.essence_generator_count = 0
|
566 |
+
|
567 |
+
# Reset mosaic
|
568 |
+
import os
|
569 |
+
if os.path.exists("tag_mosaic.png"):
|
570 |
+
try:
|
571 |
+
os.remove("tag_mosaic.png")
|
572 |
+
except Exception as e:
|
573 |
+
st.error(f"Error removing mosaic file: {e}")
|
574 |
+
|
575 |
+
if hasattr(st.session_state, 'tag_mosaic'):
|
576 |
+
from tag_mosaic import TagMosaic
|
577 |
+
st.session_state.tag_mosaic = TagMosaic()
|
578 |
+
|
579 |
+
st.success("Game completely reset to initial state!")
|
580 |
+
st.info("Refresh the page to see changes take effect")
|
game/essence_generator.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
game/game.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
game/game_constants.py
ADDED
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Game Constants for Tag Collector Game - Updated with new tag rarity names
|
4 |
+
This file contains shared constants used by both the main game and the library system.
|
5 |
+
"""
|
6 |
+
|
7 |
+
# Game currency names
|
8 |
+
TAG_CURRENCY_NAME = "TagCoins"
|
9 |
+
ENKEPHALIN_CURRENCY_NAME = "Enkephalin"
|
10 |
+
ENKEPHALIN_ICON = "💧"
|
11 |
+
|
12 |
+
STARTING_THRESHOLD = 0.55
|
13 |
+
MIN_THRESHOLD = 0.1
|
14 |
+
|
15 |
+
# Tag animations and theme settings
|
16 |
+
TAG_ANIMATIONS = {
|
17 |
+
"Star of the City": {
|
18 |
+
"css_class": "star-of-city",
|
19 |
+
"animation": """
|
20 |
+
@keyframes glowing {
|
21 |
+
0% { box-shadow: 0 0 5px #FFD700; }
|
22 |
+
50% { box-shadow: 0 0 20px #FFD700; }
|
23 |
+
100% { box-shadow: 0 0 5px #FFD700; }
|
24 |
+
}
|
25 |
+
.star-of-city {
|
26 |
+
background-color: rgba(255, 215, 0, 0.2);
|
27 |
+
padding: 8px;
|
28 |
+
border-radius: 5px;
|
29 |
+
border: 2px solid gold;
|
30 |
+
animation: glowing 2s infinite;
|
31 |
+
}
|
32 |
+
"""
|
33 |
+
},
|
34 |
+
"Impuritas Civitas": {
|
35 |
+
"css_class": "impuritas-civitas",
|
36 |
+
"animation": """
|
37 |
+
@keyframes rainbow-border {
|
38 |
+
0% { border-color: red; }
|
39 |
+
14% { border-color: orange; }
|
40 |
+
28% { border-color: yellow; }
|
41 |
+
42% { border-color: green; }
|
42 |
+
57% { border-color: blue; }
|
43 |
+
71% { border-color: indigo; }
|
44 |
+
85% { border-color: violet; }
|
45 |
+
100% { border-color: red; }
|
46 |
+
}
|
47 |
+
|
48 |
+
@keyframes rainbow-text {
|
49 |
+
0% { color: red; }
|
50 |
+
14% { color: orange; }
|
51 |
+
28% { color: yellow; }
|
52 |
+
42% { color: green; }
|
53 |
+
57% { color: blue; }
|
54 |
+
71% { color: indigo; }
|
55 |
+
85% { color: violet; }
|
56 |
+
100% { color: red; }
|
57 |
+
}
|
58 |
+
|
59 |
+
@keyframes rainbow-bg {
|
60 |
+
0% { background-color: rgba(255,0,0,0.1); }
|
61 |
+
14% { background-color: rgba(255,165,0,0.1); }
|
62 |
+
28% { background-color: rgba(255,255,0,0.1); }
|
63 |
+
42% { background-color: rgba(0,128,0,0.1); }
|
64 |
+
57% { background-color: rgba(0,0,255,0.1); }
|
65 |
+
71% { background-color: rgba(75,0,130,0.1); }
|
66 |
+
85% { background-color: rgba(238,130,238,0.1); }
|
67 |
+
100% { background-color: rgba(255,0,0,0.1); }
|
68 |
+
}
|
69 |
+
|
70 |
+
.impuritas-civitas {
|
71 |
+
background-color: rgba(0, 0, 0, 0.1);
|
72 |
+
padding: 10px;
|
73 |
+
border-radius: 5px;
|
74 |
+
border: 3px solid red;
|
75 |
+
animation: rainbow-border 4s linear infinite, rainbow-bg 4s linear infinite;
|
76 |
+
}
|
77 |
+
|
78 |
+
.impuritas-text {
|
79 |
+
font-weight: bold;
|
80 |
+
animation: rainbow-text 4s linear infinite;
|
81 |
+
}
|
82 |
+
"""
|
83 |
+
}
|
84 |
+
}
|
85 |
+
|
86 |
+
# Rarity levels with appropriate colors (updated to match new rarity tiers)
|
87 |
+
RARITY_LEVELS = {
|
88 |
+
"Canard": {"color": "#AAAAAA", "value": 1}, # Gray
|
89 |
+
"Urban Myth": {"color": "#5D9C59", "value": 5}, # Green
|
90 |
+
"Urban Legend": {"color": "#2196F3", "value": 10}, # Blue
|
91 |
+
"Urban Plague": {"color": "#9C27B0", "value": 25}, # Purple
|
92 |
+
"Urban Nightmare": {"color": "#FF9800", "value": 50}, # Orange
|
93 |
+
"Star of the City": {"color": "#FFEB3B", "value": 250}, # Yellow/Gold
|
94 |
+
"Impuritas Civitas": {"color": "#F44336", "value": 1000} # Red
|
95 |
+
}
|
96 |
+
|
97 |
+
# Essence generation costs in enkephalin
|
98 |
+
ESSENCE_COSTS = {
|
99 |
+
"Canard": 10, # Common tags
|
100 |
+
"Urban Myth": 30, # Uncommon tags
|
101 |
+
"Urban Legend": 75, # Rare tags
|
102 |
+
"Urban Plague": 150, # Very rare tags
|
103 |
+
"Urban Nightmare": 300, # Extremely rare tags
|
104 |
+
"Star of the City": 600, # Nearly mythical tags
|
105 |
+
"Impuritas Civitas": 1200 # Legendary tags
|
106 |
+
}
|
107 |
+
|
108 |
+
# Tag power system
|
109 |
+
TAG_POWER_BONUSES = {
|
110 |
+
"Canard": {"coin_multiplier": 0, "enkephalin_reward": 0},
|
111 |
+
"Urban Myth": {"coin_multiplier": 0, "enkephalin_reward": 0},
|
112 |
+
"Urban Legend": {"coin_multiplier": 0, "enkephalin_reward": 1},
|
113 |
+
"Urban Plague": {"coin_multiplier": 0.001, "enkephalin_reward": 3},
|
114 |
+
"Urban Nightmare": {"coin_multiplier": 0.0025, "enkephalin_reward": 5},
|
115 |
+
"Star of the City": {"coin_multiplier": 0.005, "enkephalin_reward": 10},
|
116 |
+
"Impuritas Civitas": {"coin_multiplier": 0.01, "enkephalin_reward": 25}
|
117 |
+
}
|
118 |
+
|
119 |
+
THRESHOLD_UPGRADES = [
|
120 |
+
{
|
121 |
+
"name": "Pattern Recognition Module",
|
122 |
+
"threshold_setting": 0.48367345, # High precision threshold
|
123 |
+
"cost": 300,
|
124 |
+
"description": "Basic algorithm focused on high-precision identification. Reduces false positives but may miss some tags."
|
125 |
+
},
|
126 |
+
{
|
127 |
+
"name": "Neural Network Enhancement",
|
128 |
+
"threshold_setting": 0.40000000,
|
129 |
+
"cost": 500,
|
130 |
+
"description": "Improved tag detection using multi-layer perceptrons. Offers good precision with moderate recall."
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"name": "Deep Learning Framework",
|
134 |
+
"threshold_setting": 0.35000000,
|
135 |
+
"cost": 1000,
|
136 |
+
"description": "Advanced algorithms that learn from previous scans. Provides better balance between precision and recall."
|
137 |
+
},
|
138 |
+
{
|
139 |
+
"name": "Quantum Probability Engine",
|
140 |
+
"threshold_setting": 0.32857141, # Balanced optimal F1 score threshold
|
141 |
+
"cost": 2500,
|
142 |
+
"description": "Leverages quantum uncertainty for optimal detection balance. Perfect calibration point for F1 score."
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"name": "Recursive Self-Improvement",
|
146 |
+
"threshold_setting": 0.31224489, # Weighted F1 threshold
|
147 |
+
"cost": 7500,
|
148 |
+
"description": "Scanner enhances its own detection capabilities. Optimized for weighted tag discovery."
|
149 |
+
},
|
150 |
+
{
|
151 |
+
"name": "Consciousness Emulation",
|
152 |
+
"threshold_setting": 0.25000000,
|
153 |
+
"cost": 15000,
|
154 |
+
"description": "Scanner develops intuition-like abilities. Favors higher recall while maintaining reasonable precision."
|
155 |
+
},
|
156 |
+
{
|
157 |
+
"name": "Technological Singularity",
|
158 |
+
"threshold_setting": 0.20612246, # High recall threshold
|
159 |
+
"cost": 50000,
|
160 |
+
"description": "The scanner transcends conventional limitations. Maximizes tag discovery at the cost of some precision."
|
161 |
+
}
|
162 |
+
]
|
163 |
+
|
164 |
+
# Achievements
|
165 |
+
ACHIEVEMENTS = {
|
166 |
+
# Collection achievements
|
167 |
+
"tag_collector_beginner": {"name": "Novice Archivist", "requirement": 50, "description": "Collect 50 different tags", "reward": {"coin_bonus": 0.01}},
|
168 |
+
"tag_collector_expert": {"name": "Senior Cataloger", "requirement": 250, "description": "Collect 250 different tags", "reward": {"coin_bonus": 0.01}},
|
169 |
+
"tag_collector_master": {"name": "Master Librarian", "requirement": 500, "description": "Collect 500 different tags", "reward": {"coin_bonus": 0.01}},
|
170 |
+
"tag_master": {"name": "Grand Archivist", "requirement": 1000, "description": "Collect 1000 different tags", "reward": {"coin_bonus": 0.01}},
|
171 |
+
|
172 |
+
# Rarity achievements
|
173 |
+
"legendary_hunter": {"name": "Impuritas Seeker", "requirement": 1, "description": "Find your first Impuritas Civitas tag", "reward": {"coin_bonus": 0.01, "enkephalin": 50}},
|
174 |
+
"multi_legendary": {"name": "Forbidden Collection", "requirement": 5, "description": "Collect 5 Impuritas Civitas tags", "reward": {"coin_bonus": 0.01, "enkephalin": 100}},
|
175 |
+
"canard_collector": {"name": "Canard Chronicler", "requirement": 30, "description": "Collect 30 Canard tags", "reward": {"coin_bonus": 0.01}},
|
176 |
+
"urban_myth_collector": {"name": "Myth Curator", "requirement": 15, "description": "Collect 15 Urban Myth tags", "reward": {"coin_bonus": 0.01}},
|
177 |
+
"urban_legend_collector": {"name": "Legend Preserver", "requirement": 10, "description": "Collect 10 Urban Legend tags", "reward": {"coin_bonus": 0.01}},
|
178 |
+
"urban_plague_collector": {"name": "Plague Archivist", "requirement": 5, "description": "Collect 5 Urban Plague tags", "reward": {"coin_bonus": 0.01}},
|
179 |
+
"urban_nightmare_collector": {"name": "Nightmare Keeper", "requirement": 5, "description": "Collect 5 Urban Nightmare tags", "reward": {"coin_bonus": 0.01}},
|
180 |
+
"star_collector": {"name": "Star Collector", "requirement": 3, "description": "Collect 3 Star of the City tags", "reward": {"coin_bonus": 0.01, "enkephalin": 30}},
|
181 |
+
"impuritas_collector": {"name": "Impuritas Scholar", "requirement": 3, "description": "Collect 3 Impuritas Civitas tags", "reward": {"coin_bonus": 0.01, "enkephalin": 75}},
|
182 |
+
|
183 |
+
# Progress achievements
|
184 |
+
"perfect_scanner": {"name": "Omniscient Observer", "description": "Reach the minimum threshold", "reward": {"coin_bonus": 0.01}},
|
185 |
+
"optimal_threshold": {"name": "Perfect Calibration", "description": "Reach the optimal F1 score threshold of 0.328", "reward": {"coin_bonus": 0.01}},
|
186 |
+
"collection_milestone_100": {"name": "Century Collector", "requirement": 100, "description": "Collect 100 different tags", "reward": {"tagcoins": 100, "coin_bonus": 0.01}},
|
187 |
+
"collection_milestone_1000": {"name": "Millennium Collector", "requirement": 1000, "description": "Collect 1000 different tags", "reward": {"tagcoins": 1000, "coin_bonus": 0.01}},
|
188 |
+
"collection_milestone_5000": {"name": "Epic Collector", "requirement": 5000, "description": "Collect 5000 different tags", "reward": {"tagcoins": 5000, "coin_bonus": 0.01}},
|
189 |
+
|
190 |
+
# Essence & library achievements
|
191 |
+
"essence_creator": {"name": "Essence Creator", "requirement": 5, "description": "Generate 5 tag essences", "reward": {"essence_cost_reduction": 0.2, "coin_bonus": 0.01}},
|
192 |
+
"tag_explorer": {"name": "Tag Explorer", "requirement": 20, "description": "Explore all library tiers", "reward": {"library_cost_reduction": 0.15, "coin_bonus": 0.01}},
|
193 |
+
"enkephalin_master": {"name": "Enkephalin Master", "requirement": 5000, "description": "Generate 5000 Enkephalin", "reward": {"essence_cost_reduction": 0.25, "coin_bonus": 0.01}},
|
194 |
+
"sacrifice_devotee": {"name": "Sacrifice Devotee", "requirement": 100, "description": "Sacrifice 100 tags", "reward": {"enkephalin_bonus": 0.2, "coin_bonus": 0.01}},
|
195 |
+
|
196 |
+
# New achievements
|
197 |
+
"category_explorer": {"name": "Category Explorer", "requirement": 10, "description": "Collect tags from 10 different categories", "reward": {"coin_bonus": 0.01}},
|
198 |
+
"series_collector": {"name": "Series Collector", "requirement": 3, "description": "Complete 3 series mosaics", "reward": {"coin_bonus": 0.01, "enkephalin": 25}},
|
199 |
+
"rapid_tagger": {"name": "Rapid Tagger", "requirement": 100, "description": "Scan 100 images", "reward": {"coin_bonus": 0.01}},
|
200 |
+
"enkephalin_harvester": {"name": "Enkephalin Harvester", "requirement": 1000, "description": "Generate 1000 Enkephalin", "reward": {"enkephalin_bonus": 0.1, "coin_bonus": 0.01}},
|
201 |
+
"library_scholar": {"name": "Library Scholar", "requirement": 50, "description": "Extract 50 tags from the library", "reward": {"library_cost_reduction": 0.1, "coin_bonus": 0.01}},
|
202 |
+
"rarity_hunter": {"name": "Rarity Hunter", "description": "Find tags of all rarity levels", "reward": {"coin_bonus": 0.02}},
|
203 |
+
"essence_master": {"name": "Essence Master", "requirement": 25, "description": "Generate 25 tag essences", "reward": {"essence_cost_reduction": 0.15, "coin_bonus": 0.01}},
|
204 |
+
"legendary_librarian": {"name": "Legendary Librarian", "description": "Extract an Impuritas Civitas tag from the library", "reward": {"library_cost_reduction": 0.2, "coin_bonus": 0.01, "enkephalin": 100}}
|
205 |
+
}
|
game/library_system.py
ADDED
@@ -0,0 +1,2010 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Redesigned Library System with Instant Discovery and Cooldown for Tag Collector Game
|
3 |
+
"""
|
4 |
+
|
5 |
+
import streamlit as st
|
6 |
+
import random
|
7 |
+
import time
|
8 |
+
import math
|
9 |
+
import pandas as pd
|
10 |
+
import datetime
|
11 |
+
from game_constants import (
|
12 |
+
TAG_CURRENCY_NAME,
|
13 |
+
RARITY_LEVELS,
|
14 |
+
ENKEPHALIN_CURRENCY_NAME,
|
15 |
+
ENKEPHALIN_ICON,
|
16 |
+
TAG_POWER_BONUSES
|
17 |
+
)
|
18 |
+
from essence_generator import display_essence_generator
|
19 |
+
from tag_categories import (
|
20 |
+
TAG_CATEGORIES,
|
21 |
+
get_collection_power_level
|
22 |
+
)
|
23 |
+
import tag_storage
|
24 |
+
|
25 |
+
# Define library properties
|
26 |
+
LIBRARY_INFO = {
|
27 |
+
"name": "The Library",
|
28 |
+
"description": "A vast repository of knowledge where tags are discovered through patient exploration and research.",
|
29 |
+
"color": "#4A148C", # Deep purple
|
30 |
+
"rarities_available": ["Canard", "Urban Myth", "Urban Legend", "Urban Plague", "Urban Nightmare", "Star of the City", "Impuritas Civitas"],
|
31 |
+
"odds_multiplier": 2.0
|
32 |
+
}
|
33 |
+
|
34 |
+
# Define library floors with their unlocking requirements and rarity boosts
|
35 |
+
LIBRARY_FLOORS = [
|
36 |
+
{
|
37 |
+
"name": "Floor of General Works",
|
38 |
+
"description": "The foundation of knowledge. Contains basic tags with limited rarity.",
|
39 |
+
"required_tags": 0, # Available from the start
|
40 |
+
"rarity_boost": 0.0,
|
41 |
+
"color": "#8D99AE", # Light blue-gray
|
42 |
+
"unlocked": True, # Always unlocked
|
43 |
+
"rarities": ["Canard", "Urban Myth", "Urban Legend"],
|
44 |
+
"odds_multiplier": 1.0 # Base odds multiplier
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"name": "Floor of History",
|
48 |
+
"description": "Archives of past knowledge. Offers more access to uncommon tags.",
|
49 |
+
"required_tags": 25, # Unlocked after collecting 25 tags
|
50 |
+
"rarity_boost": 0.2,
|
51 |
+
"color": "#457B9D", # Moderate blue
|
52 |
+
"rarities": ["Canard", "Urban Myth", "Urban Legend", "Urban Plague"],
|
53 |
+
"odds_multiplier": 1.2
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"name": "Floor of Technological Sciences",
|
57 |
+
"description": "Repository of technical knowledge. Access to rare tags begins here.",
|
58 |
+
"required_tags": 75, # Unlocked after collecting 75 tags
|
59 |
+
"rarity_boost": 0.4,
|
60 |
+
"color": "#2B9348", # Green
|
61 |
+
"rarities": ["Canard", "Urban Myth", "Urban Legend", "Urban Plague"],
|
62 |
+
"odds_multiplier": 1.5
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"name": "Floor of Literature",
|
66 |
+
"description": "A vast collection of narrative concepts. Higher chance of rare discoveries.",
|
67 |
+
"required_tags": 150, # Unlocked after collecting 150 tags
|
68 |
+
"rarity_boost": 0.6,
|
69 |
+
"color": "#6A0572", # Purple
|
70 |
+
"rarities": ["Canard", "Urban Myth", "Urban Legend", "Urban Plague", "Urban Nightmare"],
|
71 |
+
"odds_multiplier": 1.8
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"name": "Floor of Art",
|
75 |
+
"description": "The realm of aesthetic concepts. First access to Urban Nightmare tags.",
|
76 |
+
"required_tags": 250, # Unlocked after collecting 250 tags
|
77 |
+
"rarity_boost": 0.8,
|
78 |
+
"color": "#D90429", # Red
|
79 |
+
"rarities": ["Canard", "Urban Myth", "Urban Legend", "Urban Plague", "Urban Nightmare"],
|
80 |
+
"odds_multiplier": 2.2
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"name": "Floor of Natural Sciences",
|
84 |
+
"description": "Where empirical knowledge is cataloged. Significant chance of very rare tags.",
|
85 |
+
"required_tags": 500, # Unlocked after collecting 400 tags
|
86 |
+
"rarity_boost": 1.0,
|
87 |
+
"color": "#1A759F", # Deep blue
|
88 |
+
"rarities": ["Urban Myth", "Urban Legend", "Urban Plague", "Urban Nightmare", "Star of the City"],
|
89 |
+
"odds_multiplier": 2.5
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"name": "Floor of Language",
|
93 |
+
"description": "The domain of linguistic concepts. First glimpse of Star of the City tags.",
|
94 |
+
"required_tags": 1000, # Unlocked after collecting 600 tags
|
95 |
+
"rarity_boost": 1.2,
|
96 |
+
"color": "#FF8C00", # Orange
|
97 |
+
"rarities": ["Urban Myth", "Urban Legend", "Urban Plague", "Urban Nightmare", "Star of the City"],
|
98 |
+
"odds_multiplier": 3.0
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"name": "Floor of Social Sciences",
|
102 |
+
"description": "Complex social patterns and abstractions. Notable chance of exceptional rarities.",
|
103 |
+
"required_tags": 2000, # Unlocked after collecting 1000 tags
|
104 |
+
"rarity_boost": 1.4,
|
105 |
+
"color": "#76B041", # Brighter green
|
106 |
+
"rarities": ["Urban Legend", "Urban Plague", "Urban Nightmare", "Star of the City"],
|
107 |
+
"odds_multiplier": 3.5
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"name": "Floor of Philosophy",
|
111 |
+
"description": "The realm of profound thought. First access to the rarest 'Impuritas Civitas' tags.",
|
112 |
+
"required_tags": 5000, # Unlocked after collecting 1500 tags
|
113 |
+
"rarity_boost": 1.6,
|
114 |
+
"color": "#7209B7", # Deep purple
|
115 |
+
"rarities": ["Urban Legend", "Urban Plague", "Urban Nightmare", "Star of the City", "Impuritas Civitas"],
|
116 |
+
"odds_multiplier": 5.0
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"name": "Floor of Religion",
|
120 |
+
"description": "The ultimate repository of the most profound conceptual territories.",
|
121 |
+
"required_tags": 10000,
|
122 |
+
"rarity_boost": 2.0,
|
123 |
+
"color": "#FFBD00", # Gold
|
124 |
+
"rarities": ["Urban Plague", "Urban Nightmare", "Star of the City", "Impuritas Civitas"],
|
125 |
+
"odds_multiplier": 10.0
|
126 |
+
}
|
127 |
+
]
|
128 |
+
|
129 |
+
def start_instant_expedition():
|
130 |
+
"""
|
131 |
+
Start an instant expedition with a cooldown before next expedition.
|
132 |
+
|
133 |
+
Returns:
|
134 |
+
List of discoveries or None if on cooldown
|
135 |
+
"""
|
136 |
+
# Check if we're on cooldown
|
137 |
+
current_time = time.time()
|
138 |
+
|
139 |
+
if hasattr(st.session_state, 'last_expedition_time'):
|
140 |
+
elapsed_time = current_time - st.session_state.last_expedition_time
|
141 |
+
cooldown_duration = calculate_expedition_duration()
|
142 |
+
|
143 |
+
if elapsed_time < cooldown_duration:
|
144 |
+
# Still on cooldown
|
145 |
+
time_remaining = cooldown_duration - elapsed_time
|
146 |
+
minutes, seconds = divmod(int(time_remaining), 60)
|
147 |
+
st.error(f"Expedition on cooldown. {minutes:02d}:{seconds:02d} remaining.")
|
148 |
+
return None
|
149 |
+
|
150 |
+
# Generate instant discoveries
|
151 |
+
discoveries = generate_expedition_discoveries()
|
152 |
+
|
153 |
+
# Set the cooldown timer
|
154 |
+
st.session_state.last_expedition_time = current_time
|
155 |
+
|
156 |
+
# Save state
|
157 |
+
tag_storage.save_game(st.session_state)
|
158 |
+
|
159 |
+
# Preserve the current tab
|
160 |
+
if 'library_tab_index' not in st.session_state:
|
161 |
+
st.session_state.library_tab_index = 0
|
162 |
+
|
163 |
+
return discoveries
|
164 |
+
|
165 |
+
def generate_expedition_discoveries():
|
166 |
+
"""
|
167 |
+
Generate expedition discoveries instantly.
|
168 |
+
|
169 |
+
Returns:
|
170 |
+
List of discovered tags and their info
|
171 |
+
"""
|
172 |
+
# Get current library floor
|
173 |
+
current_floor = None
|
174 |
+
collection_size = len(st.session_state.collected_tags) if hasattr(st.session_state, 'collected_tags') else 0
|
175 |
+
|
176 |
+
if hasattr(st.session_state, 'library_floors'):
|
177 |
+
# Find the highest unlocked floor
|
178 |
+
for floor in reversed(st.session_state.library_floors):
|
179 |
+
if collection_size >= floor["required_tags"]:
|
180 |
+
current_floor = floor
|
181 |
+
break
|
182 |
+
|
183 |
+
# Default to first floor if we couldn't find one
|
184 |
+
if not current_floor:
|
185 |
+
current_floor = st.session_state.library_floors[0] if hasattr(st.session_state, 'library_floors') else {
|
186 |
+
"name": "Archival Records",
|
187 |
+
"rarities": ["Canard", "Urban Myth"],
|
188 |
+
"rarity_boost": 0.0
|
189 |
+
}
|
190 |
+
|
191 |
+
# Calculate rarity odds for discoveries
|
192 |
+
rarity_odds = calculate_rarity_odds()
|
193 |
+
|
194 |
+
# Calculate capacity from upgrades
|
195 |
+
tags_capacity = calculate_expedition_capacity()
|
196 |
+
|
197 |
+
# Generate discoveries
|
198 |
+
discoveries = []
|
199 |
+
for _ in range(tags_capacity):
|
200 |
+
# Select a rarity based on calculated odds
|
201 |
+
rarities = list(rarity_odds.keys())
|
202 |
+
weights = list(rarity_odds.values())
|
203 |
+
selected_rarity = random.choices(rarities, weights=weights, k=1)[0]
|
204 |
+
|
205 |
+
# Now select a random tag with this rarity that hasn't been discovered yet
|
206 |
+
possible_tags = []
|
207 |
+
|
208 |
+
# Check if we have tag metadata with rarity info
|
209 |
+
if hasattr(st.session_state, 'tag_rarity_metadata') and st.session_state.tag_rarity_metadata:
|
210 |
+
# Find all tags of the selected rarity
|
211 |
+
for tag, tag_info in st.session_state.tag_rarity_metadata.items():
|
212 |
+
# Skip if already discovered
|
213 |
+
if tag in st.session_state.discovered_tags:
|
214 |
+
continue
|
215 |
+
|
216 |
+
# Handle both formats - new (dict with rarity) and old (just rarity string)
|
217 |
+
if isinstance(tag_info, dict) and "rarity" in tag_info:
|
218 |
+
if tag_info["rarity"] == selected_rarity:
|
219 |
+
possible_tags.append(tag)
|
220 |
+
elif tag_info == selected_rarity:
|
221 |
+
possible_tags.append(tag)
|
222 |
+
|
223 |
+
# If no undiscovered tags found in the selected rarity, fallback to already discovered tags
|
224 |
+
if not possible_tags:
|
225 |
+
if hasattr(st.session_state, 'tag_rarity_metadata') and st.session_state.tag_rarity_metadata:
|
226 |
+
for tag, tag_info in st.session_state.tag_rarity_metadata.items():
|
227 |
+
# Skip if in different rarity
|
228 |
+
tag_rarity = tag_info.get("rarity", tag_info) if isinstance(tag_info, dict) else tag_info
|
229 |
+
if tag_rarity != selected_rarity:
|
230 |
+
continue
|
231 |
+
|
232 |
+
possible_tags.append(tag)
|
233 |
+
|
234 |
+
# If still no tags found (or no metadata), create a fallback
|
235 |
+
if not possible_tags:
|
236 |
+
# If we have the model's full tag list, use it
|
237 |
+
if hasattr(st.session_state, 'metadata') and 'idx_to_tag' in st.session_state.metadata:
|
238 |
+
all_tags = list(st.session_state.metadata['idx_to_tag'].values())
|
239 |
+
# Just pick a random tag and assign the selected rarity
|
240 |
+
possible_tags = random.sample(all_tags, min(20, len(all_tags)))
|
241 |
+
else:
|
242 |
+
# Complete fallback - use some generic tags
|
243 |
+
possible_tags = ["portrait", "landscape", "digital_art", "anime", "realistic",
|
244 |
+
"fantasy", "sci-fi", "city", "nature", "character"]
|
245 |
+
|
246 |
+
# If we found possible tags, select one randomly
|
247 |
+
if possible_tags:
|
248 |
+
selected_tag = random.choice(possible_tags)
|
249 |
+
|
250 |
+
# Get category from metadata if available
|
251 |
+
category = "unknown"
|
252 |
+
if hasattr(st.session_state, 'metadata') and 'tag_to_category' in st.session_state.metadata:
|
253 |
+
if selected_tag in st.session_state.metadata['tag_to_category']:
|
254 |
+
category = st.session_state.metadata['tag_to_category'][selected_tag]
|
255 |
+
|
256 |
+
# Use the enhanced tag storage function to add the discovered tag
|
257 |
+
is_new = tag_storage.add_discovered_tag(
|
258 |
+
tag=selected_tag,
|
259 |
+
rarity=selected_rarity,
|
260 |
+
session_state=st.session_state,
|
261 |
+
library_floor=current_floor["name"],
|
262 |
+
category=category # Pass the category we found
|
263 |
+
)
|
264 |
+
|
265 |
+
# Record for library growth
|
266 |
+
st.session_state.library_growth["total_discoveries"] += 1
|
267 |
+
st.session_state.library_growth["last_discovery_time"] = time.time()
|
268 |
+
|
269 |
+
# Create timestamp for display
|
270 |
+
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
|
271 |
+
|
272 |
+
# Add to results
|
273 |
+
discoveries.append({
|
274 |
+
"tag": selected_tag,
|
275 |
+
"rarity": selected_rarity,
|
276 |
+
"is_new": is_new,
|
277 |
+
"timestamp": timestamp,
|
278 |
+
"library": current_floor["name"]
|
279 |
+
})
|
280 |
+
|
281 |
+
# Save the game state after discoveries
|
282 |
+
tag_storage.save_library_state(session_state=st.session_state)
|
283 |
+
tag_storage.save_game(st.session_state)
|
284 |
+
|
285 |
+
return discoveries
|
286 |
+
|
287 |
+
def update_discovered_tag_categories():
|
288 |
+
"""Update categories of discovered tags from metadata if they're unknown"""
|
289 |
+
if not hasattr(st.session_state, 'discovered_tags') or not st.session_state.discovered_tags:
|
290 |
+
return 0
|
291 |
+
|
292 |
+
updated_count = 0
|
293 |
+
|
294 |
+
# First try from metadata.tag_to_category
|
295 |
+
if hasattr(st.session_state, 'metadata') and 'tag_to_category' in st.session_state.metadata:
|
296 |
+
tag_to_category = st.session_state.metadata['tag_to_category']
|
297 |
+
|
298 |
+
for tag, info in st.session_state.discovered_tags.items():
|
299 |
+
if info.get('category', 'unknown') == 'unknown' and tag in tag_to_category:
|
300 |
+
info['category'] = tag_to_category[tag]
|
301 |
+
updated_count += 1
|
302 |
+
|
303 |
+
# Then try from tag_rarity_metadata
|
304 |
+
if hasattr(st.session_state, 'tag_rarity_metadata') and st.session_state.tag_rarity_metadata:
|
305 |
+
for tag, info in st.session_state.discovered_tags.items():
|
306 |
+
if info.get('category', 'unknown') == 'unknown' and tag in st.session_state.tag_rarity_metadata:
|
307 |
+
tag_metadata = st.session_state.tag_rarity_metadata[tag]
|
308 |
+
if isinstance(tag_metadata, dict) and "category" in tag_metadata:
|
309 |
+
info['category'] = tag_metadata["category"]
|
310 |
+
updated_count += 1
|
311 |
+
|
312 |
+
if updated_count > 0:
|
313 |
+
print(f"Updated categories for {updated_count} discovered tags")
|
314 |
+
tag_storage.save_library_state(session_state=st.session_state)
|
315 |
+
|
316 |
+
return updated_count
|
317 |
+
|
318 |
+
def calculate_expedition_duration():
|
319 |
+
"""
|
320 |
+
Calculate the duration of cooldown after an expedition based on upgrades.
|
321 |
+
|
322 |
+
Returns:
|
323 |
+
Duration in seconds for the cooldown
|
324 |
+
"""
|
325 |
+
base_duration = 10 # Default to 10 seconds
|
326 |
+
|
327 |
+
# Apply speed upgrades if they exist
|
328 |
+
speed_level = 1
|
329 |
+
if hasattr(st.session_state, 'library_upgrades'):
|
330 |
+
speed_level = st.session_state.library_upgrades.get("speed", 1)
|
331 |
+
|
332 |
+
# Each speed level reduces duration by 10% (multiplicative)
|
333 |
+
duration_multiplier = 0.9 ** (speed_level - 1)
|
334 |
+
|
335 |
+
# Calculate final duration (minimum 1 second)
|
336 |
+
duration = max(1, base_duration * duration_multiplier)
|
337 |
+
|
338 |
+
return duration
|
339 |
+
|
340 |
+
def calculate_expedition_capacity():
|
341 |
+
"""
|
342 |
+
Calculate how many tags can be discovered in one expedition.
|
343 |
+
|
344 |
+
Returns:
|
345 |
+
Number of tags that can be discovered
|
346 |
+
"""
|
347 |
+
base_capacity = 1 # Default to 1 discovery per expedition
|
348 |
+
|
349 |
+
# Apply capacity upgrades if they exist
|
350 |
+
capacity_level = 1
|
351 |
+
if hasattr(st.session_state, 'library_upgrades'):
|
352 |
+
capacity_level = st.session_state.library_upgrades.get("capacity", 1)
|
353 |
+
|
354 |
+
# Each capacity level increases discoveries by 1
|
355 |
+
capacity = base_capacity + (capacity_level - 1)
|
356 |
+
|
357 |
+
return capacity
|
358 |
+
|
359 |
+
def calculate_rarity_odds():
|
360 |
+
"""
|
361 |
+
Calculate rarity odds based on library floor level and upgrades.
|
362 |
+
|
363 |
+
Returns:
|
364 |
+
Dictionary of {rarity: probability} for available rarities
|
365 |
+
"""
|
366 |
+
# Get current library floor
|
367 |
+
current_floor = None
|
368 |
+
if hasattr(st.session_state, 'library_floors'):
|
369 |
+
collection_size = len(st.session_state.collected_tags) if hasattr(st.session_state, 'collected_tags') else 0
|
370 |
+
|
371 |
+
# Find the highest unlocked floor
|
372 |
+
for floor in reversed(st.session_state.library_floors):
|
373 |
+
if collection_size >= floor["required_tags"]:
|
374 |
+
current_floor = floor
|
375 |
+
break
|
376 |
+
|
377 |
+
# Default to first floor if we couldn't find one
|
378 |
+
if not current_floor:
|
379 |
+
current_floor = st.session_state.library_floors[0] if hasattr(st.session_state, 'library_floors') else {
|
380 |
+
"rarities": ["Canard", "Urban Myth"],
|
381 |
+
"rarity_boost": 0.0,
|
382 |
+
"odds_multiplier": 1.0
|
383 |
+
}
|
384 |
+
|
385 |
+
# Get available rarities from current floor
|
386 |
+
available_rarities = current_floor.get("rarities", ["Canard", "Urban Myth"])
|
387 |
+
odds_multiplier = current_floor.get("odds_multiplier", 1.0)
|
388 |
+
|
389 |
+
# Base weights for each rarity
|
390 |
+
base_weights = {
|
391 |
+
"Canard": 70,
|
392 |
+
"Urban Myth": 20,
|
393 |
+
"Urban Legend": 7,
|
394 |
+
"Urban Plague": 2,
|
395 |
+
"Urban Nightmare": 1,
|
396 |
+
"Star of the City": 0.1,
|
397 |
+
"Impuritas Civitas": 0.01
|
398 |
+
}
|
399 |
+
|
400 |
+
# Apply floor's rarity boost
|
401 |
+
floor_rarity_boost = current_floor.get("rarity_boost", 0.0)
|
402 |
+
|
403 |
+
# Apply rarity upgrades if they exist
|
404 |
+
rarity_level = 1
|
405 |
+
if hasattr(st.session_state, 'library_upgrades'):
|
406 |
+
rarity_level = st.session_state.library_upgrades.get("rarity", 1)
|
407 |
+
|
408 |
+
# Calculate boost based on rarity level
|
409 |
+
upgrade_rarity_boost = (rarity_level - 1) * 0.2 # Each level gives 20% more chance for rare tags
|
410 |
+
|
411 |
+
# Combine boosts
|
412 |
+
total_boost = floor_rarity_boost + upgrade_rarity_boost
|
413 |
+
|
414 |
+
# Adjust weights based on rarity boost
|
415 |
+
adjusted_weights = {}
|
416 |
+
for rarity in available_rarities:
|
417 |
+
if rarity == "Canard":
|
418 |
+
# Reduce common tag odds as rarity level increases
|
419 |
+
adjusted_weights[rarity] = base_weights[rarity] * (1.0 - total_boost * 0.7)
|
420 |
+
elif rarity == "Urban Myth":
|
421 |
+
# Slight reduction for uncommon as rarity level increases
|
422 |
+
adjusted_weights[rarity] = base_weights[rarity] * (1.0 - total_boost * 0.3)
|
423 |
+
else:
|
424 |
+
# Increase rare tag odds as rarity level increases
|
425 |
+
rarity_index = list(RARITY_LEVELS.keys()).index(rarity)
|
426 |
+
# Higher rarities get larger boosts
|
427 |
+
boost_factor = 1.0 + (total_boost * odds_multiplier * (rarity_index + 1))
|
428 |
+
adjusted_weights[rarity] = base_weights[rarity] * boost_factor
|
429 |
+
|
430 |
+
# Normalize weights
|
431 |
+
total = sum(adjusted_weights.values())
|
432 |
+
normalized_weights = {r: w/total for r, w in adjusted_weights.items()}
|
433 |
+
|
434 |
+
return normalized_weights
|
435 |
+
|
436 |
+
def format_time_remaining(seconds):
|
437 |
+
"""
|
438 |
+
Format seconds into a human-readable time remaining format.
|
439 |
+
|
440 |
+
Args:
|
441 |
+
seconds: Seconds remaining
|
442 |
+
|
443 |
+
Returns:
|
444 |
+
String with formatted time
|
445 |
+
"""
|
446 |
+
if seconds < 60:
|
447 |
+
return f"{int(seconds)} seconds"
|
448 |
+
elif seconds < 3600:
|
449 |
+
minutes = seconds / 60
|
450 |
+
return f"{int(minutes)} minutes"
|
451 |
+
else:
|
452 |
+
hours = seconds / 3600
|
453 |
+
minutes = (seconds % 3600) / 60
|
454 |
+
if minutes > 0:
|
455 |
+
return f"{int(hours)} hours, {int(minutes)} minutes"
|
456 |
+
else:
|
457 |
+
return f"{int(hours)} hours"
|
458 |
+
|
459 |
+
def display_cooldown_timer():
|
460 |
+
"""Display a countdown timer until the next expedition is available"""
|
461 |
+
# Check if on cooldown
|
462 |
+
current_time = time.time()
|
463 |
+
cooldown_remaining = 0
|
464 |
+
cooldown_duration = calculate_expedition_duration()
|
465 |
+
|
466 |
+
if hasattr(st.session_state, 'last_expedition_time'):
|
467 |
+
elapsed_time = current_time - st.session_state.last_expedition_time
|
468 |
+
if elapsed_time < cooldown_duration:
|
469 |
+
cooldown_remaining = cooldown_duration - elapsed_time
|
470 |
+
|
471 |
+
# If on cooldown, show timer
|
472 |
+
if cooldown_remaining > 0:
|
473 |
+
minutes, seconds = divmod(int(cooldown_remaining), 60)
|
474 |
+
|
475 |
+
# Create a timer display with dark-mode styling
|
476 |
+
st.markdown("""
|
477 |
+
<div style="background-color: rgba(255, 152, 0, 0.15);
|
478 |
+
border: 1px solid #FF9800;
|
479 |
+
border-radius: 5px;
|
480 |
+
padding: 10px;
|
481 |
+
text-align: center;
|
482 |
+
margin-bottom: 15px;
|
483 |
+
color: #ffffff;">
|
484 |
+
<p style="margin: 0; font-weight: bold;">⏱️ Next expedition available in:</p>
|
485 |
+
<p style="font-size: 1.2em; margin: 5px 0;">{:02d}:{:02d}</p>
|
486 |
+
</div>
|
487 |
+
""".format(minutes, seconds), unsafe_allow_html=True)
|
488 |
+
|
489 |
+
# Add refresh button for the timer
|
490 |
+
if st.button("🔄 Refresh Timer", key="refresh_timer"):
|
491 |
+
st.rerun()
|
492 |
+
|
493 |
+
return True # Still on cooldown
|
494 |
+
|
495 |
+
return False # Not on cooldown
|
496 |
+
|
497 |
+
def display_library_exploration_interface():
|
498 |
+
"""Display the unified interface for library exploration using Streamlit elements."""
|
499 |
+
# Tag collection progress
|
500 |
+
tag_count = len(st.session_state.collected_tags) if hasattr(st.session_state, 'collected_tags') else 0
|
501 |
+
|
502 |
+
# Check if we have tags to start exploring
|
503 |
+
if not hasattr(st.session_state, 'collected_tags') or not st.session_state.collected_tags:
|
504 |
+
st.warning("Start scanning images to collect tags first. The library will grow as you collect more tags!")
|
505 |
+
return
|
506 |
+
|
507 |
+
# Get current library floor
|
508 |
+
current_floor = None
|
509 |
+
if hasattr(st.session_state, 'library_floors'):
|
510 |
+
# Find the highest unlocked floor
|
511 |
+
for floor in reversed(st.session_state.library_floors):
|
512 |
+
if tag_count >= floor["required_tags"]:
|
513 |
+
current_floor = floor
|
514 |
+
break
|
515 |
+
|
516 |
+
# Default to first floor if we couldn't find one
|
517 |
+
if not current_floor:
|
518 |
+
current_floor = st.session_state.library_floors[0] if hasattr(st.session_state, 'library_floors') else {
|
519 |
+
"name": "Floor of General Works",
|
520 |
+
"description": "The foundational level of knowledge.",
|
521 |
+
"color": "#607D8B",
|
522 |
+
"rarities": ["Canard", "Urban Myth"]
|
523 |
+
}
|
524 |
+
|
525 |
+
# Library growth progress
|
526 |
+
total_discoveries = st.session_state.library_growth["total_discoveries"]
|
527 |
+
|
528 |
+
# Create container with colored border for current floor
|
529 |
+
floor_container = st.container()
|
530 |
+
with floor_container:
|
531 |
+
# Use a stylized container with dark mode theme
|
532 |
+
st.markdown(f"""
|
533 |
+
<div style="border-left: 5px solid {current_floor['color']};
|
534 |
+
border-radius: 5px;
|
535 |
+
background-color: rgba({int(current_floor['color'][1:3], 16)},
|
536 |
+
{int(current_floor['color'][3:5], 16)},
|
537 |
+
{int(current_floor['color'][5:7], 16)}, 0.15);
|
538 |
+
padding: 15px 10px 10px 15px;
|
539 |
+
margin-bottom: 15px;
|
540 |
+
color: #ffffff;">
|
541 |
+
<h3 style="margin-top: 0; color: {current_floor['color']};">{current_floor['name']}</h3>
|
542 |
+
<p>{current_floor['description']}</p>
|
543 |
+
<p>Total Discoveries: <strong>{total_discoveries}</strong></p>
|
544 |
+
</div>
|
545 |
+
""", unsafe_allow_html=True)
|
546 |
+
|
547 |
+
# Create a nice divider for dark theme
|
548 |
+
st.markdown("<hr style='margin: 20px 0; border: 0; height: 1px; background-image: linear-gradient(to right, rgba(255, 255, 255, 0), rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 0));'>", unsafe_allow_html=True)
|
549 |
+
|
550 |
+
# Display expedition details
|
551 |
+
st.subheader("Expedition Details")
|
552 |
+
|
553 |
+
# Calculate capacity
|
554 |
+
capacity = calculate_expedition_capacity()
|
555 |
+
|
556 |
+
# Two columns for expedition stats
|
557 |
+
col1, col2 = st.columns(2)
|
558 |
+
|
559 |
+
with col1:
|
560 |
+
# Expedition duration/timer
|
561 |
+
cooldown_duration = calculate_expedition_duration()
|
562 |
+
st.write(f"📊 Cooldown: {format_time_remaining(cooldown_duration)}")
|
563 |
+
st.write(f"🔍 Tag Discoveries: {capacity} per expedition")
|
564 |
+
|
565 |
+
with col2:
|
566 |
+
# Calculate and display rarity odds with Streamlit elements
|
567 |
+
rarity_odds = calculate_rarity_odds()
|
568 |
+
available_rarities = current_floor.get("rarities", ["Canard", "Urban Myth"])
|
569 |
+
|
570 |
+
# Display rarity chances with dark theme styling
|
571 |
+
for rarity in available_rarities:
|
572 |
+
if rarity in rarity_odds:
|
573 |
+
color = RARITY_LEVELS[rarity]["color"]
|
574 |
+
percentage = rarity_odds[rarity]*100
|
575 |
+
|
576 |
+
# Custom styling based on rarity
|
577 |
+
if rarity == "Impuritas Civitas":
|
578 |
+
st.markdown(f"""
|
579 |
+
<div style="display: flex; align-items: center; margin-bottom: 5px;">
|
580 |
+
<span style="animation: rainbow-text 4s linear infinite; font-weight: bold; width: 140px;">{rarity}:</span>
|
581 |
+
<div style="flex-grow: 1; background-color: #2c2c2c; border-radius: 5px; height: 10px;">
|
582 |
+
<div style="width: {min(percentage*5, 100)}%; background: linear-gradient(to right, red, orange, yellow, green, blue, indigo, violet); height: 10px; border-radius: 5px;"></div>
|
583 |
+
</div>
|
584 |
+
<span style="margin-left: 10px;">{percentage:.1f}%</span>
|
585 |
+
</div>
|
586 |
+
""", unsafe_allow_html=True)
|
587 |
+
elif rarity == "Star of the City":
|
588 |
+
st.markdown(f"""
|
589 |
+
<div style="display: flex; align-items: center; margin-bottom: 5px;">
|
590 |
+
<span style="color:{color}; text-shadow: 0 0 3px gold; font-weight: bold; width: 140px;">{rarity}:</span>
|
591 |
+
<div style="flex-grow: 1; background-color: #2c2c2c; border-radius: 5px; height: 10px;">
|
592 |
+
<div style="width: {min(percentage*5, 100)}%; background-color: {color}; box-shadow: 0 0 5px gold; height: 10px; border-radius: 5px;"></div>
|
593 |
+
</div>
|
594 |
+
<span style="margin-left: 10px;">{percentage:.1f}%</span>
|
595 |
+
</div>
|
596 |
+
""", unsafe_allow_html=True)
|
597 |
+
elif rarity == "Urban Nightmare":
|
598 |
+
st.markdown(f"""
|
599 |
+
<div style="display: flex; align-items: center; margin-bottom: 5px;">
|
600 |
+
<span style="color:{color}; text-shadow: 0 0 1px #FF5722; font-weight: bold; width: 140px;">{rarity}:</span>
|
601 |
+
<div style="flex-grow: 1; background-color: #2c2c2c; border-radius: 5px; height: 10px;">
|
602 |
+
<div style="width: {min(percentage*5, 100)}%; background-color: {color}; animation: pulse-bar 3s infinite; height: 10px; border-radius: 5px;"></div>
|
603 |
+
</div>
|
604 |
+
<span style="margin-left: 10px;">{percentage:.1f}%</span>
|
605 |
+
</div>
|
606 |
+
""", unsafe_allow_html=True)
|
607 |
+
else:
|
608 |
+
st.markdown(f"""
|
609 |
+
<div style="display: flex; align-items: center; margin-bottom: 5px;">
|
610 |
+
<span style="color:{color}; font-weight: bold; width: 140px;">{rarity}:</span>
|
611 |
+
<div style="flex-grow: 1; background-color: #2c2c2c; border-radius: 5px; height: 10px;">
|
612 |
+
<div style="width: {min(percentage*5, 100)}%; background-color: {color}; height: 10px; border-radius: 5px;"></div>
|
613 |
+
</div>
|
614 |
+
<span style="margin-left: 10px;">{percentage:.1f}%</span>
|
615 |
+
</div>
|
616 |
+
""", unsafe_allow_html=True)
|
617 |
+
|
618 |
+
# Check for cooldown and display timer if needed
|
619 |
+
on_cooldown = display_cooldown_timer()
|
620 |
+
|
621 |
+
# Add button to start expedition regardless of cooldown status
|
622 |
+
# The button will always be displayed, but if on cooldown, expedition won't start
|
623 |
+
if st.button("🚀 Start Expedition", key="start_expedition", use_container_width=True, disabled=on_cooldown):
|
624 |
+
if not on_cooldown:
|
625 |
+
discoveries = start_instant_expedition()
|
626 |
+
if discoveries:
|
627 |
+
# Store discovery results for display
|
628 |
+
st.session_state.expedition_results = discoveries
|
629 |
+
# Show success message
|
630 |
+
st.success(f"Expedition completed! Discovered {len(discoveries)} new tags!")
|
631 |
+
# Show balloons for celebration
|
632 |
+
st.balloons()
|
633 |
+
# Display the results
|
634 |
+
display_expedition_results(discoveries)
|
635 |
+
# Save state
|
636 |
+
tag_storage.save_game(st.session_state)
|
637 |
+
else:
|
638 |
+
# This should not be reached due to disabled button, but just in case
|
639 |
+
st.error("Expedition on cooldown. Please wait until the timer expires.")
|
640 |
+
|
641 |
+
# Display library upgrades
|
642 |
+
display_library_upgrades()
|
643 |
+
|
644 |
+
def display_expedition_results(results):
|
645 |
+
"""Display results from completed expeditions using Streamlit elements with enhanced dark-mode visuals."""
|
646 |
+
st.subheader("Expedition Discoveries")
|
647 |
+
|
648 |
+
# Add animations CSS for dark theme
|
649 |
+
st.markdown("""
|
650 |
+
<style>
|
651 |
+
@keyframes rainbow-text {
|
652 |
+
0% { color: red; }
|
653 |
+
14% { color: orange; }
|
654 |
+
28% { color: yellow; }
|
655 |
+
42% { color: green; }
|
656 |
+
57% { color: blue; }
|
657 |
+
71% { color: indigo; }
|
658 |
+
85% { color: violet; }
|
659 |
+
100% { color: red; }
|
660 |
+
}
|
661 |
+
|
662 |
+
@keyframes rainbow-border {
|
663 |
+
0% { border-color: red; }
|
664 |
+
14% { border-color: orange; }
|
665 |
+
28% { border-color: yellow; }
|
666 |
+
42% { border-color: green; }
|
667 |
+
57% { border-color: blue; }
|
668 |
+
71% { border-color: indigo; }
|
669 |
+
85% { border-color: violet; }
|
670 |
+
100% { border-color: red; }
|
671 |
+
}
|
672 |
+
|
673 |
+
@keyframes star-glow {
|
674 |
+
0% { box-shadow: 0 0 5px #FFD700; }
|
675 |
+
50% { box-shadow: 0 0 15px #FFD700; }
|
676 |
+
100% { box-shadow: 0 0 5px #FFD700; }
|
677 |
+
}
|
678 |
+
|
679 |
+
@keyframes nightmare-pulse {
|
680 |
+
0% { border-color: #FF9800; }
|
681 |
+
50% { border-color: #FF5722; }
|
682 |
+
100% { border-color: #FF9800; }
|
683 |
+
}
|
684 |
+
|
685 |
+
@keyframes pulse-bar {
|
686 |
+
0% { opacity: 0.8; }
|
687 |
+
50% { opacity: 1; }
|
688 |
+
100% { opacity: 0.8; }
|
689 |
+
}
|
690 |
+
|
691 |
+
.expedition-tag-impuritas {
|
692 |
+
animation: rainbow-text 4s linear infinite;
|
693 |
+
font-weight: bold;
|
694 |
+
}
|
695 |
+
|
696 |
+
.expedition-card-impuritas {
|
697 |
+
background-color: rgba(255, 0, 0, 0.15);
|
698 |
+
border-radius: 8px;
|
699 |
+
border: 3px solid red;
|
700 |
+
padding: 12px;
|
701 |
+
animation: rainbow-border 4s linear infinite;
|
702 |
+
color: #ffffff;
|
703 |
+
}
|
704 |
+
|
705 |
+
.expedition-card-star {
|
706 |
+
background-color: rgba(255, 215, 0, 0.15);
|
707 |
+
border-radius: 8px;
|
708 |
+
border: 2px solid gold;
|
709 |
+
padding: 12px;
|
710 |
+
animation: star-glow 2s infinite;
|
711 |
+
color: #ffffff;
|
712 |
+
}
|
713 |
+
|
714 |
+
.expedition-card-nightmare {
|
715 |
+
background-color: rgba(255, 152, 0, 0.15);
|
716 |
+
border-radius: 8px;
|
717 |
+
border: 2px solid #FF9800;
|
718 |
+
padding: 12px;
|
719 |
+
animation: nightmare-pulse 3s infinite;
|
720 |
+
color: #ffffff;
|
721 |
+
}
|
722 |
+
|
723 |
+
.expedition-card-plague {
|
724 |
+
background-color: rgba(156, 39, 176, 0.12);
|
725 |
+
border-radius: 8px;
|
726 |
+
border: 1px solid #9C27B0;
|
727 |
+
padding: 12px;
|
728 |
+
box-shadow: 0 0 3px #9C27B0;
|
729 |
+
color: #ffffff;
|
730 |
+
}
|
731 |
+
|
732 |
+
.expedition-card-legend {
|
733 |
+
background-color: rgba(33, 150, 243, 0.15);
|
734 |
+
border-radius: 8px;
|
735 |
+
border: 1px solid #2196F3;
|
736 |
+
padding: 12px;
|
737 |
+
color: #ffffff;
|
738 |
+
}
|
739 |
+
|
740 |
+
.expedition-card-myth {
|
741 |
+
background-color: rgba(76, 175, 80, 0.15);
|
742 |
+
border-radius: 8px;
|
743 |
+
border: 1px solid #4CAF50;
|
744 |
+
padding: 12px;
|
745 |
+
color: #ffffff;
|
746 |
+
}
|
747 |
+
|
748 |
+
.expedition-card-canard {
|
749 |
+
background-color: rgba(170, 170, 170, 0.15);
|
750 |
+
border-radius: 8px;
|
751 |
+
border: 1px solid #AAAAAA;
|
752 |
+
padding: 12px;
|
753 |
+
color: #ffffff;
|
754 |
+
}
|
755 |
+
</style>
|
756 |
+
""", unsafe_allow_html=True)
|
757 |
+
|
758 |
+
# Group by rarity first for better organization
|
759 |
+
results_by_rarity = {}
|
760 |
+
for result in results:
|
761 |
+
rarity = result["rarity"]
|
762 |
+
if rarity not in results_by_rarity:
|
763 |
+
results_by_rarity[rarity] = []
|
764 |
+
results_by_rarity[rarity].append(result)
|
765 |
+
|
766 |
+
# Get ordered rarities (rarest first)
|
767 |
+
ordered_rarities = list(RARITY_LEVELS.keys())
|
768 |
+
ordered_rarities.reverse() # Reverse to display rarest first
|
769 |
+
|
770 |
+
# Display rare discoveries first
|
771 |
+
for rarity in ordered_rarities:
|
772 |
+
if rarity not in results_by_rarity:
|
773 |
+
continue
|
774 |
+
|
775 |
+
rarity_results = results_by_rarity[rarity]
|
776 |
+
if not rarity_results:
|
777 |
+
continue
|
778 |
+
|
779 |
+
color = RARITY_LEVELS[rarity]["color"]
|
780 |
+
|
781 |
+
# Special styling for rare discoveries
|
782 |
+
if rarity == "Impuritas Civitas":
|
783 |
+
st.markdown(f"""
|
784 |
+
<div style="background-color: rgba(255, 0, 0, 0.15); border-radius: 10px; padding: 15px; margin-bottom: 20px; border: 2px solid red; animation: rainbow-border 4s linear infinite; color: #ffffff;">
|
785 |
+
<h3 style="margin-top: 0; animation: rainbow-text 4s linear infinite;">✨ EXTRAORDINARY DISCOVERY!</h3>
|
786 |
+
<p>You found {len(rarity_results)} Impuritas Civitas tag(s)!</p>
|
787 |
+
</div>
|
788 |
+
""", unsafe_allow_html=True)
|
789 |
+
st.balloons() # Add celebration effect
|
790 |
+
elif rarity == "Star of the City":
|
791 |
+
st.markdown(f"""
|
792 |
+
<div style="background-color: rgba(255, 215, 0, 0.15); border-radius: 10px; padding: 15px; margin-bottom: 20px; border: 2px solid gold; animation: star-glow 2s infinite; color: #ffffff;">
|
793 |
+
<h3 style="margin-top: 0; color: {color}; text-shadow: 0 0 3px gold;">🌟 EXCEPTIONAL DISCOVERY!</h3>
|
794 |
+
<p>You found {len(rarity_results)} Star of the City tag(s)!</p>
|
795 |
+
</div>
|
796 |
+
""", unsafe_allow_html=True)
|
797 |
+
elif rarity == "Urban Nightmare":
|
798 |
+
st.markdown(f"""
|
799 |
+
<div style="background-color: rgba(255, 152, 0, 0.15); border-radius: 10px; padding: 15px; margin-bottom: 20px; border: 2px solid #FF9800; animation: nightmare-pulse 3s infinite; color: #ffffff;">
|
800 |
+
<h3 style="margin-top: 0; color: {color}; text-shadow: 0 0 1px #FF5722;">👑 RARE DISCOVERY!</h3>
|
801 |
+
<p>You found {len(rarity_results)} Urban Nightmare tag(s)!</p>
|
802 |
+
</div>
|
803 |
+
""", unsafe_allow_html=True)
|
804 |
+
elif rarity == "Urban Plague":
|
805 |
+
st.markdown(f"""
|
806 |
+
<div style="background-color: rgba(156, 39, 176, 0.12); border-radius: 10px; padding: 15px; margin-bottom: 20px; border: 1px solid #9C27B0; box-shadow: 0 0 3px #9C27B0; color: #ffffff;">
|
807 |
+
<h3 style="margin-top: 0; color: {color}; text-shadow: 0 0 1px #9C27B0;">⚔️ UNCOMMON DISCOVERY!</h3>
|
808 |
+
<p>You found {len(rarity_results)} Urban Plague tag(s)!</p>
|
809 |
+
</div>
|
810 |
+
""", unsafe_allow_html=True)
|
811 |
+
else:
|
812 |
+
st.markdown(f"### {rarity} ({len(rarity_results)} discoveries)")
|
813 |
+
|
814 |
+
# Display tags in this rarity
|
815 |
+
cols = st.columns(3)
|
816 |
+
for i, result in enumerate(rarity_results):
|
817 |
+
col_idx = i % 3
|
818 |
+
with cols[col_idx]:
|
819 |
+
tag = result["tag"]
|
820 |
+
floor_name = result.get("library", "Library")
|
821 |
+
|
822 |
+
# Get the appropriate card class based on rarity
|
823 |
+
rarity_class = rarity.lower().replace(' ', '-')
|
824 |
+
card_class = f"expedition-card-{rarity_class}"
|
825 |
+
|
826 |
+
# Create styled card for each tag
|
827 |
+
tag_html = f"""<div class="{card_class}">"""
|
828 |
+
|
829 |
+
# Special styling for the tag name based on rarity
|
830 |
+
if rarity == "Impuritas Civitas":
|
831 |
+
tag_html += f"""<p style="font-size: 1.1em; margin-bottom: 5px;"><span class="expedition-tag-impuritas">✨ {tag}</span></p>"""
|
832 |
+
elif rarity == "Star of the City":
|
833 |
+
tag_html += f"""<p style="font-size: 1.1em; margin-bottom: 5px;"><span style="color: {color}; text-shadow: 0 0 3px gold; font-weight: bold;">🌟 {tag}</span></p>"""
|
834 |
+
elif rarity == "Urban Nightmare":
|
835 |
+
tag_html += f"""<p style="font-size: 1.1em; margin-bottom: 5px;"><span style="color: {color}; text-shadow: 0 0 1px #FF5722; font-weight: bold;">👑 {tag}</span></p>"""
|
836 |
+
elif rarity == "Urban Plague":
|
837 |
+
tag_html += f"""<p style="font-size: 1.1em; margin-bottom: 5px;"><span style="color: {color}; text-shadow: 0 0 1px #9C27B0; font-weight: bold;">⚔️ {tag}</span></p>"""
|
838 |
+
else:
|
839 |
+
tag_html += f"""<p style="font-size: 1.1em; margin-bottom: 5px;"><span style="color: {color}; font-weight: bold;">{tag}</span></p>"""
|
840 |
+
|
841 |
+
# Mark as new if it is
|
842 |
+
is_new = result.get("is_new", False)
|
843 |
+
new_badge = """<span style="background-color: #4CAF50; color: white; padding: 2px 6px; border-radius: 10px; font-size: 0.7em; margin-left: 5px;">NEW</span>""" if is_new else ""
|
844 |
+
|
845 |
+
# Add other tag details
|
846 |
+
tag_html += f"""
|
847 |
+
<p style="margin: 0; font-size: 0.9em;">Found in: {floor_name} {new_badge}</p>
|
848 |
+
<p style="margin: 5px 0 0 0; font-size: 0.9em;">Rarity: <span style="color: {color};">{rarity}</span></p>
|
849 |
+
</div>
|
850 |
+
"""
|
851 |
+
|
852 |
+
st.markdown(tag_html, unsafe_allow_html=True)
|
853 |
+
|
854 |
+
# Add separator between rarity groups
|
855 |
+
st.markdown("<hr style='margin: 20px 0; border: 0; height: 1px; background-image: linear-gradient(to right, rgba(255, 255, 255, 0), rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 0));'>", unsafe_allow_html=True)
|
856 |
+
|
857 |
+
def display_library_building():
|
858 |
+
"""Display a visual representation of the library building with all floors."""
|
859 |
+
st.subheader("The Great Library Building")
|
860 |
+
|
861 |
+
# Get collection size
|
862 |
+
collection_size = len(st.session_state.collected_tags) if hasattr(st.session_state, 'collected_tags') else 0
|
863 |
+
|
864 |
+
# Determine current floor
|
865 |
+
current_floor_index = 0
|
866 |
+
for i, floor in enumerate(st.session_state.library_floors):
|
867 |
+
if collection_size >= floor["required_tags"]:
|
868 |
+
current_floor_index = i
|
869 |
+
|
870 |
+
# Create a visual representation of the library building
|
871 |
+
total_floors = len(st.session_state.library_floors)
|
872 |
+
|
873 |
+
# Enhanced CSS for the library building with dark theme
|
874 |
+
st.markdown("""
|
875 |
+
<style>
|
876 |
+
@keyframes floor-glow {
|
877 |
+
0% { box-shadow: 0 0 5px rgba(13, 110, 253, 0.5); }
|
878 |
+
50% { box-shadow: 0 0 15px rgba(13, 110, 253, 0.8); }
|
879 |
+
100% { box-shadow: 0 0 5px rgba(13, 110, 253, 0.5); }
|
880 |
+
}
|
881 |
+
|
882 |
+
.library-roof {
|
883 |
+
background: linear-gradient(90deg, #8D6E63, #A1887F);
|
884 |
+
height: 35px;
|
885 |
+
width: 90%;
|
886 |
+
margin: 0 auto;
|
887 |
+
border-radius: 8px 8px 0 0;
|
888 |
+
display: flex;
|
889 |
+
justify-content: center;
|
890 |
+
align-items: center;
|
891 |
+
color: white;
|
892 |
+
font-weight: bold;
|
893 |
+
box-shadow: 0 -2px 10px rgba(0,0,0,0.4);
|
894 |
+
}
|
895 |
+
|
896 |
+
.library-floor {
|
897 |
+
height: 65px;
|
898 |
+
width: 80%;
|
899 |
+
margin: 0 auto;
|
900 |
+
border: 1px solid #444;
|
901 |
+
display: flex;
|
902 |
+
align-items: center;
|
903 |
+
padding: 0 20px;
|
904 |
+
position: relative;
|
905 |
+
transition: all 0.3s ease;
|
906 |
+
color: #ffffff;
|
907 |
+
background-color: #2c2c2c;
|
908 |
+
}
|
909 |
+
|
910 |
+
.library-floor:hover {
|
911 |
+
transform: translateX(10px);
|
912 |
+
}
|
913 |
+
|
914 |
+
.library-floor.current {
|
915 |
+
box-shadow: 0 0 10px rgba(13, 110, 253, 0.5);
|
916 |
+
z-index: 2;
|
917 |
+
animation: floor-glow 2s infinite;
|
918 |
+
border-left: 5px solid #0d6efd;
|
919 |
+
}
|
920 |
+
|
921 |
+
.library-floor.locked {
|
922 |
+
background-color: #1e1e1e;
|
923 |
+
color: #777;
|
924 |
+
filter: grayscale(50%);
|
925 |
+
}
|
926 |
+
|
927 |
+
.library-floor-number {
|
928 |
+
position: absolute;
|
929 |
+
left: -30px;
|
930 |
+
width: 25px;
|
931 |
+
height: 25px;
|
932 |
+
background-color: #0d6efd;
|
933 |
+
color: white;
|
934 |
+
border-radius: 50%;
|
935 |
+
display: flex;
|
936 |
+
justify-content: center;
|
937 |
+
align-items: center;
|
938 |
+
font-weight: bold;
|
939 |
+
}
|
940 |
+
|
941 |
+
.library-floor.locked .library-floor-number {
|
942 |
+
background-color: #555;
|
943 |
+
}
|
944 |
+
|
945 |
+
.library-entrance {
|
946 |
+
background: linear-gradient(90deg, #5D4037, #795548);
|
947 |
+
height: 45px;
|
948 |
+
width: 35%;
|
949 |
+
margin: 0 auto;
|
950 |
+
border-radius: 10px 10px 0 0;
|
951 |
+
display: flex;
|
952 |
+
justify-content: center;
|
953 |
+
align-items: center;
|
954 |
+
color: white;
|
955 |
+
font-weight: bold;
|
956 |
+
box-shadow: 0 -2px 10px rgba(0,0,0,0.4);
|
957 |
+
}
|
958 |
+
|
959 |
+
.library-floor-details {
|
960 |
+
flex: 1;
|
961 |
+
}
|
962 |
+
|
963 |
+
.library-floor-name {
|
964 |
+
font-weight: bold;
|
965 |
+
margin: 0;
|
966 |
+
}
|
967 |
+
|
968 |
+
.library-floor-description {
|
969 |
+
font-size: 0.85em;
|
970 |
+
margin: 3px 0 0 0;
|
971 |
+
opacity: 0.9;
|
972 |
+
}
|
973 |
+
|
974 |
+
.library-floor-status {
|
975 |
+
display: flex;
|
976 |
+
align-items: center;
|
977 |
+
font-weight: bold;
|
978 |
+
}
|
979 |
+
|
980 |
+
.library-floor-rarities {
|
981 |
+
font-size: 0.8em;
|
982 |
+
margin-top: 4px;
|
983 |
+
}
|
984 |
+
|
985 |
+
.rarity-dot {
|
986 |
+
display: inline-block;
|
987 |
+
width: 8px;
|
988 |
+
height: 8px;
|
989 |
+
border-radius: 50%;
|
990 |
+
margin-right: 3px;
|
991 |
+
}
|
992 |
+
|
993 |
+
/* Special animations for rarer floors */
|
994 |
+
.library-floor.star {
|
995 |
+
background-color: rgba(255, 215, 0, 0.15);
|
996 |
+
}
|
997 |
+
|
998 |
+
.library-floor.impuritas {
|
999 |
+
background-color: rgba(255, 0, 0, 0.15);
|
1000 |
+
}
|
1001 |
+
|
1002 |
+
@keyframes rainbow-border {
|
1003 |
+
0% { border-color: red; }
|
1004 |
+
14% { border-color: orange; }
|
1005 |
+
28% { border-color: yellow; }
|
1006 |
+
42% { border-color: green; }
|
1007 |
+
57% { border-color: blue; }
|
1008 |
+
71% { border-color: indigo; }
|
1009 |
+
85% { border-color: violet; }
|
1010 |
+
100% { border-color: red; }
|
1011 |
+
}
|
1012 |
+
|
1013 |
+
.rainbow-border {
|
1014 |
+
animation: rainbow-border 4s linear infinite;
|
1015 |
+
}
|
1016 |
+
</style>
|
1017 |
+
""", unsafe_allow_html=True)
|
1018 |
+
|
1019 |
+
# Roof
|
1020 |
+
st.markdown('<div class="library-roof">🏛️ The Great Library</div>', unsafe_allow_html=True)
|
1021 |
+
|
1022 |
+
# Display floors from top (highest) to bottom
|
1023 |
+
for i in reversed(range(total_floors)):
|
1024 |
+
floor = st.session_state.library_floors[i]
|
1025 |
+
is_current = i == current_floor_index
|
1026 |
+
is_unlocked = collection_size >= floor["required_tags"]
|
1027 |
+
|
1028 |
+
# Style based on floor status
|
1029 |
+
floor_class = "library-floor"
|
1030 |
+
if is_current:
|
1031 |
+
floor_class += " current"
|
1032 |
+
if not is_unlocked:
|
1033 |
+
floor_class += " locked"
|
1034 |
+
|
1035 |
+
# Add special classes for highest floors
|
1036 |
+
if i >= 8 and is_unlocked: # Impuritas Civitas level floors
|
1037 |
+
floor_class += " impuritas"
|
1038 |
+
elif i >= 6 and is_unlocked: # Star of the City level floors
|
1039 |
+
floor_class += " star"
|
1040 |
+
|
1041 |
+
# Determine rarity dots HTML
|
1042 |
+
rarity_dots = ""
|
1043 |
+
for rarity in floor.get("rarities", []):
|
1044 |
+
color = RARITY_LEVELS[rarity]["color"]
|
1045 |
+
rarity_dots += f'<span class="rarity-dot" style="background-color:{color};"></span>'
|
1046 |
+
|
1047 |
+
# Floor style based on color
|
1048 |
+
if is_unlocked:
|
1049 |
+
floor_style = f"background-color: rgba({int(floor['color'][1:3], 16)}, {int(floor['color'][3:5], 16)}, {int(floor['color'][5:7], 16)}, 0.25);"
|
1050 |
+
else:
|
1051 |
+
floor_style = ""
|
1052 |
+
|
1053 |
+
# Special border animation for highest floor
|
1054 |
+
border_class = ""
|
1055 |
+
if i == 9 and is_unlocked: # Top floor
|
1056 |
+
border_class = "rainbow-border"
|
1057 |
+
|
1058 |
+
# Display the floor
|
1059 |
+
floor_content = f"""
|
1060 |
+
<div class="{floor_class} {border_class}" style="{floor_style}">
|
1061 |
+
<span class="library-floor-number">{i+1}</span>
|
1062 |
+
<div class="library-floor-details">
|
1063 |
+
<p class="library-floor-name">{floor['name']}</p>
|
1064 |
+
<p class="library-floor-description">{floor['description'] if is_unlocked else 'Locked'}</p>
|
1065 |
+
<div class="library-floor-rarities">{rarity_dots}</div>
|
1066 |
+
</div>
|
1067 |
+
<div class="library-floor-status">
|
1068 |
+
{"🔓" if is_unlocked else "🔒"} {floor['required_tags']} tags
|
1069 |
+
</div>
|
1070 |
+
</div>
|
1071 |
+
"""
|
1072 |
+
st.markdown(floor_content, unsafe_allow_html=True)
|
1073 |
+
|
1074 |
+
# Entrance
|
1075 |
+
st.markdown('<div class="library-entrance">📚 Entrance</div>', unsafe_allow_html=True)
|
1076 |
+
|
1077 |
+
# Floor details expander
|
1078 |
+
with st.expander("Floor Details", expanded=False):
|
1079 |
+
# Create a table with styled rarities for dark theme
|
1080 |
+
st.markdown("""
|
1081 |
+
<style>
|
1082 |
+
.floor-details-table {
|
1083 |
+
width: 100%;
|
1084 |
+
border-collapse: collapse;
|
1085 |
+
color: #ffffff;
|
1086 |
+
}
|
1087 |
+
|
1088 |
+
.floor-details-table th {
|
1089 |
+
background-color: #333;
|
1090 |
+
padding: 8px;
|
1091 |
+
text-align: left;
|
1092 |
+
border: 1px solid #444;
|
1093 |
+
}
|
1094 |
+
|
1095 |
+
.floor-details-table td {
|
1096 |
+
padding: 8px;
|
1097 |
+
border: 1px solid #444;
|
1098 |
+
}
|
1099 |
+
|
1100 |
+
.floor-details-table tr:nth-child(even) {
|
1101 |
+
background-color: rgba(255,255,255,0.03);
|
1102 |
+
}
|
1103 |
+
|
1104 |
+
.floor-details-table tr:nth-child(odd) {
|
1105 |
+
background-color: rgba(0,0,0,0.2);
|
1106 |
+
}
|
1107 |
+
|
1108 |
+
.floor-details-table tr:hover {
|
1109 |
+
background-color: rgba(13, 110, 253, 0.1);
|
1110 |
+
}
|
1111 |
+
|
1112 |
+
.current-floor {
|
1113 |
+
background-color: rgba(13, 110, 253, 0.15) !important;
|
1114 |
+
}
|
1115 |
+
</style>
|
1116 |
+
|
1117 |
+
<table class="floor-details-table">
|
1118 |
+
<tr>
|
1119 |
+
<th>Floor</th>
|
1120 |
+
<th>Name</th>
|
1121 |
+
<th>Status</th>
|
1122 |
+
<th>Req. Tags</th>
|
1123 |
+
<th>Rarities</th>
|
1124 |
+
<th>Rarity Boost</th>
|
1125 |
+
</tr>
|
1126 |
+
""", unsafe_allow_html=True)
|
1127 |
+
|
1128 |
+
# Add each floor to the table
|
1129 |
+
for i, floor in enumerate(st.session_state.library_floors):
|
1130 |
+
is_unlocked = collection_size >= floor["required_tags"]
|
1131 |
+
is_current = i == current_floor_index
|
1132 |
+
|
1133 |
+
# Format rarities with colors
|
1134 |
+
rarity_text = ""
|
1135 |
+
for rarity in floor.get("rarities", []):
|
1136 |
+
color = RARITY_LEVELS[rarity]["color"]
|
1137 |
+
|
1138 |
+
# Special styling based on rarity
|
1139 |
+
if rarity == "Impuritas Civitas":
|
1140 |
+
rarity_text += f"<span style='animation: rainbow-text 4s linear infinite;'>{rarity}</span>, "
|
1141 |
+
elif rarity == "Star of the City":
|
1142 |
+
rarity_text += f"<span style='color:{color}; text-shadow: 0 0 3px gold;'>{rarity}</span>, "
|
1143 |
+
elif rarity == "Urban Nightmare":
|
1144 |
+
rarity_text += f"<span style='color:{color}; text-shadow: 0 0 1px #FF5722;'>{rarity}</span>, "
|
1145 |
+
elif rarity == "Urban Plague":
|
1146 |
+
rarity_text += f"<span style='color:{color}; text-shadow: 0 0 1px #9C27B0;'>{rarity}</span>, "
|
1147 |
+
else:
|
1148 |
+
rarity_text += f"<span style='color:{color};'>{rarity}</span>, "
|
1149 |
+
|
1150 |
+
# Current floor class
|
1151 |
+
row_class = "current-floor" if is_current else ""
|
1152 |
+
|
1153 |
+
# Add the floor row
|
1154 |
+
st.markdown(f"""
|
1155 |
+
<tr class="{row_class}">
|
1156 |
+
<td>{i+1}</td>
|
1157 |
+
<td>{floor["name"]}</td>
|
1158 |
+
<td>{"🔓 Unlocked" if is_unlocked else "🔒 Locked"}</td>
|
1159 |
+
<td>{floor["required_tags"]}</td>
|
1160 |
+
<td>{rarity_text[:-2] if rarity_text else ""}</td>
|
1161 |
+
<td>+{int(floor.get('rarity_boost', 0) * 100)}%</td>
|
1162 |
+
</tr>
|
1163 |
+
""", unsafe_allow_html=True)
|
1164 |
+
|
1165 |
+
# Close the table
|
1166 |
+
st.markdown("</table>", unsafe_allow_html=True)
|
1167 |
+
|
1168 |
+
def add_discovered_tag(tag, rarity, library_floor=None):
|
1169 |
+
"""
|
1170 |
+
Add a tag to the discovered tags with enriched metadata
|
1171 |
+
|
1172 |
+
Args:
|
1173 |
+
tag: The tag name
|
1174 |
+
rarity: The tag rarity level
|
1175 |
+
library_floor: The library floor where it was discovered (optional)
|
1176 |
+
|
1177 |
+
Returns:
|
1178 |
+
bool: True if it's a new discovery, False if already discovered
|
1179 |
+
"""
|
1180 |
+
is_new = tag not in st.session_state.discovered_tags
|
1181 |
+
|
1182 |
+
# Get current time
|
1183 |
+
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
|
1184 |
+
|
1185 |
+
# Get tag category if metadata is available
|
1186 |
+
category = "unknown"
|
1187 |
+
if hasattr(st.session_state, 'tag_rarity_metadata') and st.session_state.tag_rarity_metadata:
|
1188 |
+
if tag in st.session_state.tag_rarity_metadata:
|
1189 |
+
tag_info = st.session_state.tag_rarity_metadata[tag]
|
1190 |
+
if isinstance(tag_info, dict) and "category" in tag_info:
|
1191 |
+
category = tag_info["category"]
|
1192 |
+
|
1193 |
+
# Get or update tag info
|
1194 |
+
if is_new:
|
1195 |
+
tag_info = {
|
1196 |
+
"rarity": rarity,
|
1197 |
+
"discovery_time": timestamp,
|
1198 |
+
"category": category,
|
1199 |
+
"discovery_count": 1,
|
1200 |
+
"last_seen": timestamp
|
1201 |
+
}
|
1202 |
+
|
1203 |
+
if library_floor:
|
1204 |
+
tag_info["library_floor"] = library_floor
|
1205 |
+
|
1206 |
+
st.session_state.discovered_tags[tag] = tag_info
|
1207 |
+
|
1208 |
+
# Track exploration of library tiers
|
1209 |
+
if 'explored_library_tiers' not in st.session_state:
|
1210 |
+
st.session_state.explored_library_tiers = set()
|
1211 |
+
|
1212 |
+
if library_floor:
|
1213 |
+
st.session_state.explored_library_tiers.add(library_floor)
|
1214 |
+
else:
|
1215 |
+
# Update existing tag
|
1216 |
+
tag_info = st.session_state.discovered_tags[tag]
|
1217 |
+
tag_info["discovery_count"] = tag_info.get("discovery_count", 1) + 1
|
1218 |
+
tag_info["last_seen"] = timestamp
|
1219 |
+
|
1220 |
+
# Only update library floor if provided
|
1221 |
+
if library_floor:
|
1222 |
+
tag_info["library_floor"] = library_floor
|
1223 |
+
|
1224 |
+
# Track exploration
|
1225 |
+
if 'explored_library_tiers' not in st.session_state:
|
1226 |
+
st.session_state.explored_library_tiers = set()
|
1227 |
+
|
1228 |
+
st.session_state.explored_library_tiers.add(library_floor)
|
1229 |
+
|
1230 |
+
# Save state after updating
|
1231 |
+
tag_storage.save_library_state(session_state=st.session_state)
|
1232 |
+
|
1233 |
+
return is_new
|
1234 |
+
|
1235 |
+
def calculate_upgrade_cost(upgrade_type, current_level):
|
1236 |
+
base_cost = {
|
1237 |
+
"speed": 50,
|
1238 |
+
"capacity": 100,
|
1239 |
+
"rarity": 150
|
1240 |
+
}
|
1241 |
+
# Cost increases with each level
|
1242 |
+
return int(base_cost[upgrade_type] * (current_level * 1.5))
|
1243 |
+
|
1244 |
+
def purchase_library_upgrade(upgrade_type):
|
1245 |
+
"""
|
1246 |
+
Purchase a library upgrade
|
1247 |
+
|
1248 |
+
Args:
|
1249 |
+
upgrade_type: The type of upgrade ("speed", "capacity", or "rarity")
|
1250 |
+
|
1251 |
+
Returns:
|
1252 |
+
bool: True if purchased successfully, False otherwise
|
1253 |
+
"""
|
1254 |
+
# Get current level and calculate cost
|
1255 |
+
current_level = st.session_state.library_upgrades.get(upgrade_type, 1)
|
1256 |
+
cost = calculate_upgrade_cost(upgrade_type, current_level)
|
1257 |
+
|
1258 |
+
# Check if player can afford it
|
1259 |
+
if st.session_state.tag_currency < cost:
|
1260 |
+
return False
|
1261 |
+
|
1262 |
+
# Apply purchase
|
1263 |
+
st.session_state.tag_currency -= cost
|
1264 |
+
st.session_state.library_upgrades[upgrade_type] = current_level + 1
|
1265 |
+
|
1266 |
+
# Update stats
|
1267 |
+
if hasattr(st.session_state, 'game_stats'):
|
1268 |
+
if "currency_spent" not in st.session_state.game_stats:
|
1269 |
+
st.session_state.game_stats["currency_spent"] = 0
|
1270 |
+
st.session_state.game_stats["currency_spent"] += cost
|
1271 |
+
|
1272 |
+
# Save state
|
1273 |
+
tag_storage.save_library_state(session_state=st.session_state)
|
1274 |
+
tag_storage.save_game(st.session_state)
|
1275 |
+
|
1276 |
+
return True
|
1277 |
+
|
1278 |
+
def display_library_upgrades():
|
1279 |
+
"""Display and manage upgrades for the library using Streamlit elements with enhanced visuals."""
|
1280 |
+
st.subheader("Library Upgrades")
|
1281 |
+
|
1282 |
+
# Add styling for upgrade cards with dark theme
|
1283 |
+
st.markdown("""
|
1284 |
+
<style>
|
1285 |
+
.upgrade-card {
|
1286 |
+
border: 1px solid #444;
|
1287 |
+
border-radius: 10px;
|
1288 |
+
padding: 15px;
|
1289 |
+
margin-bottom: 20px;
|
1290 |
+
background-color: #222;
|
1291 |
+
color: #ffffff;
|
1292 |
+
transition: transform 0.2s, box-shadow 0.2s;
|
1293 |
+
}
|
1294 |
+
|
1295 |
+
.upgrade-card:hover {
|
1296 |
+
transform: translateY(-2px);
|
1297 |
+
box-shadow: 0 4px 8px rgba(0,0,0,0.3);
|
1298 |
+
}
|
1299 |
+
|
1300 |
+
.upgrade-title {
|
1301 |
+
font-size: 1.2em;
|
1302 |
+
font-weight: bold;
|
1303 |
+
margin-bottom: 10px;
|
1304 |
+
color: #ffffff;
|
1305 |
+
}
|
1306 |
+
|
1307 |
+
.upgrade-level {
|
1308 |
+
display: inline-block;
|
1309 |
+
background-color: #0d6efd;
|
1310 |
+
color: white;
|
1311 |
+
padding: 3px 8px;
|
1312 |
+
border-radius: 10px;
|
1313 |
+
font-size: 0.8em;
|
1314 |
+
margin-bottom: 10px;
|
1315 |
+
}
|
1316 |
+
|
1317 |
+
.upgrade-stat {
|
1318 |
+
display: flex;
|
1319 |
+
align-items: center;
|
1320 |
+
margin-bottom: 5px;
|
1321 |
+
}
|
1322 |
+
|
1323 |
+
.upgrade-stat-label {
|
1324 |
+
width: 100px;
|
1325 |
+
font-size: 0.9em;
|
1326 |
+
color: #adb5bd;
|
1327 |
+
}
|
1328 |
+
|
1329 |
+
.upgrade-stat-value {
|
1330 |
+
font-weight: bold;
|
1331 |
+
}
|
1332 |
+
|
1333 |
+
.upgrade-cost {
|
1334 |
+
margin-top: 10px;
|
1335 |
+
font-weight: bold;
|
1336 |
+
color: #6610f2;
|
1337 |
+
}
|
1338 |
+
|
1339 |
+
.level-bar {
|
1340 |
+
height: 6px;
|
1341 |
+
background-color: #333;
|
1342 |
+
border-radius: 3px;
|
1343 |
+
margin-bottom: 10px;
|
1344 |
+
overflow: hidden;
|
1345 |
+
}
|
1346 |
+
|
1347 |
+
.level-progress {
|
1348 |
+
height: 100%;
|
1349 |
+
background-color: #0d6efd;
|
1350 |
+
border-radius: 3px;
|
1351 |
+
}
|
1352 |
+
|
1353 |
+
@keyframes pulse-button {
|
1354 |
+
0% { transform: scale(1); }
|
1355 |
+
50% { transform: scale(1.05); }
|
1356 |
+
100% { transform: scale(1); }
|
1357 |
+
}
|
1358 |
+
|
1359 |
+
.pulse-button {
|
1360 |
+
animation: pulse-button 2s infinite;
|
1361 |
+
}
|
1362 |
+
</style>
|
1363 |
+
""", unsafe_allow_html=True)
|
1364 |
+
|
1365 |
+
st.write("Improve your expeditions with these upgrades:")
|
1366 |
+
|
1367 |
+
# Create columns for each upgrade type
|
1368 |
+
col1, col2, col3 = st.columns(3)
|
1369 |
+
|
1370 |
+
# Get current upgrade levels
|
1371 |
+
upgrades = st.session_state.library_upgrades
|
1372 |
+
|
1373 |
+
# Speed upgrade
|
1374 |
+
with col1:
|
1375 |
+
speed_level = upgrades.get("speed", 1)
|
1376 |
+
speed_cost = calculate_upgrade_cost("speed", speed_level)
|
1377 |
+
|
1378 |
+
# Calculate max level for progress bar
|
1379 |
+
max_level = 10
|
1380 |
+
progress_percentage = min(100, (speed_level / max_level) * 100)
|
1381 |
+
|
1382 |
+
# Create an upgrade card with progress bar in dark theme
|
1383 |
+
st.markdown(f"""
|
1384 |
+
<div class="upgrade-card">
|
1385 |
+
<div class="upgrade-title">⏱️ Speed Upgrade</div>
|
1386 |
+
<div class="upgrade-level">Level {speed_level}</div>
|
1387 |
+
<div class="level-bar">
|
1388 |
+
<div class="level-progress" style="width: {progress_percentage}%;"></div>
|
1389 |
+
</div>
|
1390 |
+
<div class="upgrade-stat">
|
1391 |
+
<span class="upgrade-stat-label">Effect:</span>
|
1392 |
+
<span class="upgrade-stat-value">Reduces cooldown time</span>
|
1393 |
+
</div>
|
1394 |
+
<div class="upgrade-stat">
|
1395 |
+
<span class="upgrade-stat-label">Current:</span>
|
1396 |
+
<span class="upgrade-stat-value">{format_time_remaining(calculate_expedition_duration())}</span>
|
1397 |
+
</div>
|
1398 |
+
<div class="upgrade-stat">
|
1399 |
+
<span class="upgrade-stat-label">Next Level:</span>
|
1400 |
+
<span class="upgrade-stat-value">{format_time_remaining(calculate_expedition_duration() * 0.9)}</span>
|
1401 |
+
</div>
|
1402 |
+
<div class="upgrade-cost" style="color: #9D4EDD;">Cost: {speed_cost} {TAG_CURRENCY_NAME}</div>
|
1403 |
+
</div>
|
1404 |
+
""", unsafe_allow_html=True)
|
1405 |
+
|
1406 |
+
# Upgrade button
|
1407 |
+
can_afford = st.session_state.tag_currency >= speed_cost
|
1408 |
+
button_class = "pulse-button" if can_afford else ""
|
1409 |
+
|
1410 |
+
if st.button(f"Upgrade Speed", key=f"upgrade_speed", disabled=not can_afford, use_container_width=True):
|
1411 |
+
if purchase_library_upgrade("speed"):
|
1412 |
+
st.success(f"Speed upgraded to level {speed_level + 1}!")
|
1413 |
+
st.rerun()
|
1414 |
+
else:
|
1415 |
+
st.error(f"Not enough {TAG_CURRENCY_NAME}. Need {speed_cost}.")
|
1416 |
+
|
1417 |
+
# Capacity upgrade
|
1418 |
+
with col2:
|
1419 |
+
capacity_level = upgrades.get("capacity", 1)
|
1420 |
+
capacity_cost = calculate_upgrade_cost("capacity", capacity_level)
|
1421 |
+
|
1422 |
+
# Calculate max level for progress bar
|
1423 |
+
max_level = 10
|
1424 |
+
progress_percentage = min(100, (capacity_level / max_level) * 100)
|
1425 |
+
|
1426 |
+
# Create an upgrade card with progress bar in dark theme
|
1427 |
+
st.markdown(f"""
|
1428 |
+
<div class="upgrade-card">
|
1429 |
+
<div class="upgrade-title">🔍 Capacity Upgrade</div>
|
1430 |
+
<div class="upgrade-level">Level {capacity_level}</div>
|
1431 |
+
<div class="level-bar">
|
1432 |
+
<div class="level-progress" style="width: {progress_percentage}%;"></div>
|
1433 |
+
</div>
|
1434 |
+
<div class="upgrade-stat">
|
1435 |
+
<span class="upgrade-stat-label">Effect:</span>
|
1436 |
+
<span class="upgrade-stat-value">Increases tags discovered</span>
|
1437 |
+
</div>
|
1438 |
+
<div class="upgrade-stat">
|
1439 |
+
<span class="upgrade-stat-label">Current:</span>
|
1440 |
+
<span class="upgrade-stat-value">{calculate_expedition_capacity()} tags</span>
|
1441 |
+
</div>
|
1442 |
+
<div class="upgrade-stat">
|
1443 |
+
<span class="upgrade-stat-label">Next Level:</span>
|
1444 |
+
<span class="upgrade-stat-value">{calculate_expedition_capacity() + 1} tags</span>
|
1445 |
+
</div>
|
1446 |
+
<div class="upgrade-cost" style="color: #9D4EDD;">Cost: {capacity_cost} {TAG_CURRENCY_NAME}</div>
|
1447 |
+
</div>
|
1448 |
+
""", unsafe_allow_html=True)
|
1449 |
+
|
1450 |
+
# Upgrade button
|
1451 |
+
can_afford = st.session_state.tag_currency >= capacity_cost
|
1452 |
+
button_class = "pulse-button" if can_afford else ""
|
1453 |
+
|
1454 |
+
if st.button(f"Upgrade Capacity", key=f"upgrade_capacity", disabled=not can_afford, use_container_width=True):
|
1455 |
+
if purchase_library_upgrade("capacity"):
|
1456 |
+
st.success(f"Capacity upgraded to level {capacity_level + 1}!")
|
1457 |
+
st.rerun()
|
1458 |
+
else:
|
1459 |
+
st.error(f"Not enough {TAG_CURRENCY_NAME}. Need {capacity_cost}.")
|
1460 |
+
|
1461 |
+
# Rarity upgrade
|
1462 |
+
with col3:
|
1463 |
+
rarity_level = upgrades.get("rarity", 1)
|
1464 |
+
rarity_cost = calculate_upgrade_cost("rarity", rarity_level)
|
1465 |
+
|
1466 |
+
# Calculate max level for progress bar
|
1467 |
+
max_level = 10
|
1468 |
+
progress_percentage = min(100, (rarity_level / max_level) * 100)
|
1469 |
+
|
1470 |
+
# Create an upgrade card with progress bar in dark theme
|
1471 |
+
st.markdown(f"""
|
1472 |
+
<div class="upgrade-card">
|
1473 |
+
<div class="upgrade-title">💎 Rarity Upgrade</div>
|
1474 |
+
<div class="upgrade-level">Level {rarity_level}</div>
|
1475 |
+
<div class="level-bar">
|
1476 |
+
<div class="level-progress" style="width: {progress_percentage}%;"></div>
|
1477 |
+
</div>
|
1478 |
+
<div class="upgrade-stat">
|
1479 |
+
<span class="upgrade-stat-label">Effect:</span>
|
1480 |
+
<span class="upgrade-stat-value">Improves rare tag chance</span>
|
1481 |
+
</div>
|
1482 |
+
<div class="upgrade-stat">
|
1483 |
+
<span class="upgrade-stat-label">Current:</span>
|
1484 |
+
<span class="upgrade-stat-value">+{(rarity_level - 1) * 20}% boost</span>
|
1485 |
+
</div>
|
1486 |
+
<div class="upgrade-stat">
|
1487 |
+
<span class="upgrade-stat-label">Next Level:</span>
|
1488 |
+
<span class="upgrade-stat-value">+{rarity_level * 20}% boost</span>
|
1489 |
+
</div>
|
1490 |
+
<div class="upgrade-cost" style="color: #9D4EDD;">Cost: {rarity_cost} {TAG_CURRENCY_NAME}</div>
|
1491 |
+
</div>
|
1492 |
+
""", unsafe_allow_html=True)
|
1493 |
+
|
1494 |
+
# Upgrade button
|
1495 |
+
can_afford = st.session_state.tag_currency >= rarity_cost
|
1496 |
+
button_class = "pulse-button" if can_afford else ""
|
1497 |
+
|
1498 |
+
if st.button(f"Upgrade Rarity", key=f"upgrade_rarity", disabled=not can_afford, use_container_width=True):
|
1499 |
+
if purchase_library_upgrade("rarity"):
|
1500 |
+
st.success(f"Rapacity upgraded to level {rarity_level + 1}!")
|
1501 |
+
st.rerun()
|
1502 |
+
else:
|
1503 |
+
st.error(f"Not enough {TAG_CURRENCY_NAME}. Need {rarity_cost}.")
|
1504 |
+
|
1505 |
+
# Add a styled info box about library growth with dark theme
|
1506 |
+
st.markdown("""
|
1507 |
+
<div style="background-color: rgba(13, 110, 253, 0.15);
|
1508 |
+
border-left: 4px solid #0d6efd;
|
1509 |
+
border-radius: 4px;
|
1510 |
+
padding: 15px;
|
1511 |
+
margin-top: 20px;
|
1512 |
+
color: #ffffff;">
|
1513 |
+
<h4 style="margin-top: 0; color: #6495ED;">📚 Library Growth</h4>
|
1514 |
+
<p style="margin-bottom: 0;">
|
1515 |
+
Your library will grow as you collect more tags. Each floor of the library unlocks new rarities and
|
1516 |
+
improves your chances of finding rare tags. Continue collecting tags to unlock deeper levels of the library!
|
1517 |
+
</p>
|
1518 |
+
</div>
|
1519 |
+
""", unsafe_allow_html=True)
|
1520 |
+
|
1521 |
+
def initialize_library_system():
|
1522 |
+
"""Initialize the library system state in session state if not already present."""
|
1523 |
+
if 'library_system_initialized' not in st.session_state:
|
1524 |
+
st.session_state.library_system_initialized = True
|
1525 |
+
|
1526 |
+
# Try to load from storage first
|
1527 |
+
library_state = tag_storage.load_library_state(st.session_state)
|
1528 |
+
|
1529 |
+
if library_state:
|
1530 |
+
# We already have the state loaded into session_state by the load function
|
1531 |
+
print("Library system loaded from storage.")
|
1532 |
+
else:
|
1533 |
+
# Initialize with defaults
|
1534 |
+
st.session_state.discovered_tags = {} # {tag_name: {"rarity": str, "discovery_time": timestamp, "category": str}}
|
1535 |
+
st.session_state.library_exploration_history = [] # List of recent library explorations
|
1536 |
+
|
1537 |
+
# Initialize enkephalin if not present
|
1538 |
+
if 'enkephalin' not in st.session_state:
|
1539 |
+
st.session_state.enkephalin = 0
|
1540 |
+
|
1541 |
+
# For the library interface
|
1542 |
+
st.session_state.expedition_results = [] # Results from completed expeditions
|
1543 |
+
|
1544 |
+
# Library growth system
|
1545 |
+
st.session_state.library_growth = {
|
1546 |
+
"total_discoveries": 0,
|
1547 |
+
"last_discovery_time": time.time()
|
1548 |
+
}
|
1549 |
+
|
1550 |
+
# Upgrade system for library
|
1551 |
+
st.session_state.library_upgrades = {
|
1552 |
+
"speed": 1, # Expedition speed (reduces cooldown time)
|
1553 |
+
"capacity": 1, # Tags discovered per expedition
|
1554 |
+
"rarity": 1 # Rare tag chance
|
1555 |
+
}
|
1556 |
+
|
1557 |
+
# Set of explored library tiers
|
1558 |
+
st.session_state.explored_library_tiers = set()
|
1559 |
+
|
1560 |
+
print("Library system initialized with defaults.")
|
1561 |
+
|
1562 |
+
# Store library floors in session state if not already there
|
1563 |
+
if 'library_floors' not in st.session_state:
|
1564 |
+
st.session_state.library_floors = LIBRARY_FLOORS
|
1565 |
+
|
1566 |
+
# Update categories for any "unknown" category tags
|
1567 |
+
update_discovered_tag_categories()
|
1568 |
+
|
1569 |
+
# Add CSS animations for styling
|
1570 |
+
st.markdown("""
|
1571 |
+
<style>
|
1572 |
+
/* Star of the City animation */
|
1573 |
+
@keyframes star-glow {
|
1574 |
+
0% { text-shadow: 0 0 5px #FFD700; }
|
1575 |
+
50% { text-shadow: 0 0 15px #FFD700; }
|
1576 |
+
100% { text-shadow: 0 0 5px #FFD700; }
|
1577 |
+
}
|
1578 |
+
|
1579 |
+
/* Impuritas Civitas animation */
|
1580 |
+
@keyframes rainbow-text {
|
1581 |
+
0% { color: red; }
|
1582 |
+
14% { color: orange; }
|
1583 |
+
28% { color: yellow; }
|
1584 |
+
42% { color: green; }
|
1585 |
+
57% { color: blue; }
|
1586 |
+
71% { color: indigo; }
|
1587 |
+
85% { color: violet; }
|
1588 |
+
100% { color: red; }
|
1589 |
+
}
|
1590 |
+
|
1591 |
+
@keyframes rainbow-border {
|
1592 |
+
0% { border-color: red; }
|
1593 |
+
14% { border-color: orange; }
|
1594 |
+
28% { border-color: yellow; }
|
1595 |
+
42% { border-color: green; }
|
1596 |
+
57% { border-color: blue; }
|
1597 |
+
71% { border-color: indigo; }
|
1598 |
+
85% { border-color: violet; }
|
1599 |
+
100% { border-color: red; }
|
1600 |
+
}
|
1601 |
+
|
1602 |
+
/* Urban Nightmare animation */
|
1603 |
+
@keyframes nightmare-pulse {
|
1604 |
+
0% { border-color: #FF9800; }
|
1605 |
+
50% { border-color: #FF5722; }
|
1606 |
+
100% { border-color: #FF9800; }
|
1607 |
+
}
|
1608 |
+
|
1609 |
+
/* Urban Plague subtle effect */
|
1610 |
+
.glow-purple {
|
1611 |
+
text-shadow: 0 0 3px #9C27B0;
|
1612 |
+
}
|
1613 |
+
|
1614 |
+
/* Apply the animations to specific rarity classes */
|
1615 |
+
.star-of-city {
|
1616 |
+
animation: star-glow 2s infinite;
|
1617 |
+
font-weight: bold;
|
1618 |
+
}
|
1619 |
+
|
1620 |
+
.impuritas-civitas {
|
1621 |
+
animation: rainbow-text 4s linear infinite;
|
1622 |
+
font-weight: bold;
|
1623 |
+
}
|
1624 |
+
|
1625 |
+
.urban-nightmare {
|
1626 |
+
animation: nightmare-pulse 3s infinite;
|
1627 |
+
font-weight: bold;
|
1628 |
+
}
|
1629 |
+
|
1630 |
+
.urban-plague {
|
1631 |
+
text-shadow: 0 0 3px #9C27B0;
|
1632 |
+
font-weight: bold;
|
1633 |
+
}
|
1634 |
+
</style>
|
1635 |
+
""", unsafe_allow_html=True)
|
1636 |
+
|
1637 |
+
def display_library_extraction():
|
1638 |
+
"""Display the library exploration interface."""
|
1639 |
+
initialize_library_system()
|
1640 |
+
|
1641 |
+
st.title(f"Welcome to {LIBRARY_INFO['name']}")
|
1642 |
+
st.markdown(f"""
|
1643 |
+
<div style="background-color: rgba(74, 20, 140, 0.15);
|
1644 |
+
border-radius: 10px;
|
1645 |
+
padding: 15px;
|
1646 |
+
margin-bottom: 20px;
|
1647 |
+
border-left: 5px solid {LIBRARY_INFO['color']};
|
1648 |
+
color: #ffffff;">
|
1649 |
+
<p style="margin: 0;">{LIBRARY_INFO['description']}</p>
|
1650 |
+
</div>
|
1651 |
+
""", unsafe_allow_html=True)
|
1652 |
+
|
1653 |
+
# Create tabs with enhanced styling for dark theme
|
1654 |
+
st.markdown("""
|
1655 |
+
<style>
|
1656 |
+
/* Custom styling for tabs */
|
1657 |
+
.stTabs [data-baseweb="tab-list"] {
|
1658 |
+
gap: 2px;
|
1659 |
+
}
|
1660 |
+
|
1661 |
+
.stTabs [data-baseweb="tab"] {
|
1662 |
+
border-radius: 5px 5px 0 0;
|
1663 |
+
padding: 10px 16px;
|
1664 |
+
font-weight: 600;
|
1665 |
+
}
|
1666 |
+
</style>
|
1667 |
+
""", unsafe_allow_html=True)
|
1668 |
+
|
1669 |
+
# Store current tab index in session state if not present
|
1670 |
+
if 'library_tab_index' not in st.session_state:
|
1671 |
+
st.session_state.library_tab_index = 0
|
1672 |
+
|
1673 |
+
# Create expanded tabs - removed the essence tab
|
1674 |
+
explore_tab, discovered_tab, building_tab = st.tabs([
|
1675 |
+
"📚 Library Exploration",
|
1676 |
+
"🔍 Discovered Tags",
|
1677 |
+
"🏛️ Library Building"
|
1678 |
+
])
|
1679 |
+
|
1680 |
+
with explore_tab:
|
1681 |
+
st.session_state.library_tab_index = 0
|
1682 |
+
display_library_exploration_interface()
|
1683 |
+
|
1684 |
+
with discovered_tab:
|
1685 |
+
st.session_state.library_tab_index = 1
|
1686 |
+
st.subheader("Your Discovered Tags")
|
1687 |
+
st.write("These are tags you've discovered through the library system. They differ from your collected tags, which are obtained from scanning images.")
|
1688 |
+
|
1689 |
+
# Display discovered tags using our new function
|
1690 |
+
display_discovered_tags()
|
1691 |
+
|
1692 |
+
with building_tab:
|
1693 |
+
st.session_state.library_tab_index = 2
|
1694 |
+
display_library_building()
|
1695 |
+
|
1696 |
+
def display_discovered_tags():
|
1697 |
+
"""Display the user's discovered tags with the same visual style as the tag collection"""
|
1698 |
+
# Show total unique discovered tags
|
1699 |
+
if not hasattr(st.session_state, 'discovered_tags') or not st.session_state.discovered_tags:
|
1700 |
+
st.info("Explore the library to discover new tags!")
|
1701 |
+
return
|
1702 |
+
|
1703 |
+
unique_tags = len(st.session_state.discovered_tags)
|
1704 |
+
st.write(f"You have discovered {unique_tags} unique tags.")
|
1705 |
+
|
1706 |
+
# Count tags by rarity
|
1707 |
+
rarity_counts = {}
|
1708 |
+
for tag_info in st.session_state.discovered_tags.values():
|
1709 |
+
rarity = tag_info.get("rarity", "Unknown")
|
1710 |
+
if rarity not in rarity_counts:
|
1711 |
+
rarity_counts[rarity] = 0
|
1712 |
+
rarity_counts[rarity] += 1
|
1713 |
+
|
1714 |
+
# Only display rarity categories that have tags
|
1715 |
+
active_rarities = {r: c for r, c in rarity_counts.items() if c > 0}
|
1716 |
+
|
1717 |
+
# If there are active rarities to display
|
1718 |
+
if active_rarities:
|
1719 |
+
display_discovered_rarity_distribution(active_rarities)
|
1720 |
+
|
1721 |
+
# Add a sorting option
|
1722 |
+
sort_options = ["Category (rarest first)", "Rarity", "Discovery Time"]
|
1723 |
+
selected_sort = st.selectbox("Sort tags by:", sort_options, key="discovered_tags_sort")
|
1724 |
+
|
1725 |
+
# Group tags by the selected method
|
1726 |
+
if selected_sort == "Category (rarest first)":
|
1727 |
+
# Group tags by category
|
1728 |
+
categories = {}
|
1729 |
+
for tag, info in st.session_state.discovered_tags.items():
|
1730 |
+
category = info.get("category", "unknown")
|
1731 |
+
if category not in categories:
|
1732 |
+
categories[category] = []
|
1733 |
+
categories[category].append((tag, info))
|
1734 |
+
|
1735 |
+
# Display tags by category in expanders
|
1736 |
+
for category, tags in sorted(categories.items()):
|
1737 |
+
# Get rarity order for sorting
|
1738 |
+
rarity_order = list(RARITY_LEVELS.keys())
|
1739 |
+
|
1740 |
+
# Sort tags by rarity (rarest first)
|
1741 |
+
def get_rarity_index(tag_tuple):
|
1742 |
+
tag, info = tag_tuple
|
1743 |
+
rarity = info.get("rarity", "Unknown")
|
1744 |
+
if rarity in rarity_order:
|
1745 |
+
return len(rarity_order) - rarity_order.index(rarity)
|
1746 |
+
return 0
|
1747 |
+
|
1748 |
+
sorted_tags = sorted(tags, key=get_rarity_index, reverse=True)
|
1749 |
+
|
1750 |
+
# Check if category has any rare tags
|
1751 |
+
has_rare_tags = any(info.get("rarity") in ["Impuritas Civitas", "Star of the City"]
|
1752 |
+
for _, info in sorted_tags)
|
1753 |
+
|
1754 |
+
# Get category info if available
|
1755 |
+
category_display = category.capitalize()
|
1756 |
+
if category in TAG_CATEGORIES:
|
1757 |
+
category_info = TAG_CATEGORIES[category]
|
1758 |
+
category_icon = category_info.get("icon", "")
|
1759 |
+
category_color = category_info.get("color", "#888888")
|
1760 |
+
category_display = f"<span style='color:{category_color};'>{category_icon} {category.capitalize()}</span>"
|
1761 |
+
|
1762 |
+
# Create header with information about rare tags if present
|
1763 |
+
header = f"{category_display} ({len(tags)} tags)"
|
1764 |
+
if has_rare_tags:
|
1765 |
+
header += " ✨ Contains rare tags!"
|
1766 |
+
|
1767 |
+
# Display the header and expander
|
1768 |
+
st.markdown(header, unsafe_allow_html=True)
|
1769 |
+
with st.expander("Show/Hide", expanded=has_rare_tags):
|
1770 |
+
# Group by rarity within category
|
1771 |
+
rarity_groups = {}
|
1772 |
+
for tag, info in sorted_tags:
|
1773 |
+
rarity = info.get("rarity", "Unknown")
|
1774 |
+
if rarity not in rarity_groups:
|
1775 |
+
rarity_groups[rarity] = []
|
1776 |
+
rarity_groups[rarity].append((tag, info))
|
1777 |
+
|
1778 |
+
# Display each rarity group in order (rarest first)
|
1779 |
+
for rarity in reversed(rarity_order):
|
1780 |
+
if rarity in rarity_groups:
|
1781 |
+
tags_in_rarity = rarity_groups[rarity]
|
1782 |
+
if tags_in_rarity:
|
1783 |
+
color = RARITY_LEVELS[rarity]["color"]
|
1784 |
+
|
1785 |
+
# Special styling for rare rarities
|
1786 |
+
if rarity == "Impuritas Civitas":
|
1787 |
+
rarity_style = f"animation:rainbow-text 4s linear infinite;font-weight:bold;"
|
1788 |
+
elif rarity == "Star of the City":
|
1789 |
+
rarity_style = f"color:{color};text-shadow:0 0 3px gold;font-weight:bold;"
|
1790 |
+
elif rarity == "Urban Nightmare":
|
1791 |
+
rarity_style = f"color:{color};text-shadow:0 0 1px #FF5722;font-weight:bold;"
|
1792 |
+
else:
|
1793 |
+
rarity_style = f"color:{color};font-weight:bold;"
|
1794 |
+
|
1795 |
+
st.markdown(f"<span style='{rarity_style}'>{rarity.capitalize()}</span> ({len(tags_in_rarity)} tags)", unsafe_allow_html=True)
|
1796 |
+
display_discovered_tag_grid(tags_in_rarity)
|
1797 |
+
st.markdown("---")
|
1798 |
+
|
1799 |
+
elif selected_sort == "Rarity":
|
1800 |
+
# Group tags by rarity level
|
1801 |
+
rarity_groups = {}
|
1802 |
+
for tag, info in st.session_state.discovered_tags.items():
|
1803 |
+
rarity = info.get("rarity", "Unknown")
|
1804 |
+
if rarity not in rarity_groups:
|
1805 |
+
rarity_groups[rarity] = []
|
1806 |
+
rarity_groups[rarity].append((tag, info))
|
1807 |
+
|
1808 |
+
# Get ordered rarities (rarest first)
|
1809 |
+
ordered_rarities = list(RARITY_LEVELS.keys())
|
1810 |
+
ordered_rarities.reverse() # Reverse to show rarest first
|
1811 |
+
|
1812 |
+
# Display tags by rarity
|
1813 |
+
for rarity in ordered_rarities:
|
1814 |
+
if rarity in rarity_groups:
|
1815 |
+
tags = rarity_groups[rarity]
|
1816 |
+
color = RARITY_LEVELS[rarity]["color"]
|
1817 |
+
|
1818 |
+
# Add special styling for rare rarities
|
1819 |
+
rarity_html = f"<span style='color:{color};font-weight:bold;'>{rarity.capitalize()}</span>"
|
1820 |
+
if rarity == "Impuritas Civitas":
|
1821 |
+
rarity_html = f"<span style='animation:rainbow-text 4s linear infinite;font-weight:bold;'>{rarity.capitalize()}</span>"
|
1822 |
+
elif rarity == "Star of the City":
|
1823 |
+
rarity_html = f"<span style='color:{color};text-shadow:0 0 3px gold;font-weight:bold;'>{rarity.capitalize()}</span>"
|
1824 |
+
elif rarity == "Urban Nightmare":
|
1825 |
+
rarity_html = f"<span style='color:{color};text-shadow:0 0 1px #FF5722;font-weight:bold;'>{rarity.capitalize()}</span>"
|
1826 |
+
|
1827 |
+
# First create the title with HTML, then use it in the expander
|
1828 |
+
st.markdown(f"### {rarity_html} ({len(tags)} tags)", unsafe_allow_html=True)
|
1829 |
+
with st.expander("Show/Hide", expanded=rarity in ["Impuritas Civitas", "Star of the City"]):
|
1830 |
+
# Group by category within rarity
|
1831 |
+
category_groups = {}
|
1832 |
+
for tag, info in tags:
|
1833 |
+
category = info.get("category", "unknown")
|
1834 |
+
if category not in category_groups:
|
1835 |
+
category_groups[category] = []
|
1836 |
+
category_groups[category].append((tag, info))
|
1837 |
+
|
1838 |
+
# Display each category within this rarity level
|
1839 |
+
for category, category_tags in sorted(category_groups.items()):
|
1840 |
+
# Get category info if available
|
1841 |
+
category_display = category.capitalize()
|
1842 |
+
if category in TAG_CATEGORIES:
|
1843 |
+
category_info = TAG_CATEGORIES[category]
|
1844 |
+
category_icon = category_info.get("icon", "")
|
1845 |
+
category_color = category_info.get("color", "#888888")
|
1846 |
+
category_display = f"<span style='color:{category_color};'>{category_icon} {category.capitalize()}</span>"
|
1847 |
+
|
1848 |
+
st.markdown(f"#### {category_display} ({len(category_tags)} tags)", unsafe_allow_html=True)
|
1849 |
+
display_discovered_tag_grid(category_tags)
|
1850 |
+
st.markdown("---")
|
1851 |
+
|
1852 |
+
elif selected_sort == "Discovery Time":
|
1853 |
+
# Sort all tags by discovery time (newest first)
|
1854 |
+
sorted_tags = []
|
1855 |
+
for tag, info in st.session_state.discovered_tags.items():
|
1856 |
+
discovery_time = info.get("discovery_time", "")
|
1857 |
+
sorted_tags.append((tag, info, discovery_time))
|
1858 |
+
|
1859 |
+
sorted_tags.sort(key=lambda x: x[2], reverse=True) # Sort by time, newest first
|
1860 |
+
|
1861 |
+
# Group by date
|
1862 |
+
date_groups = {}
|
1863 |
+
for tag, info, time_str in sorted_tags:
|
1864 |
+
# Extract just the date part if timestamp has date and time
|
1865 |
+
date = time_str.split()[0] if " " in time_str else time_str
|
1866 |
+
|
1867 |
+
if date not in date_groups:
|
1868 |
+
date_groups[date] = []
|
1869 |
+
date_groups[date].append((tag, info))
|
1870 |
+
|
1871 |
+
# Display tags grouped by discovery date
|
1872 |
+
for date, tags in date_groups.items():
|
1873 |
+
date_display = date if date else "Unknown date"
|
1874 |
+
st.markdown(f"### Discovered on {date_display} ({len(tags)} tags)")
|
1875 |
+
|
1876 |
+
with st.expander("Show/Hide", expanded=date == list(date_groups.keys())[0]): # Expand most recent by default
|
1877 |
+
display_discovered_tag_grid(tags)
|
1878 |
+
st.markdown("---")
|
1879 |
+
|
1880 |
+
def display_discovered_rarity_distribution(active_rarities):
|
1881 |
+
"""Display distribution of discovered tags by rarity with themed animations"""
|
1882 |
+
# Add the necessary CSS for animations
|
1883 |
+
st.markdown("""
|
1884 |
+
<style>
|
1885 |
+
@keyframes grid-glow {
|
1886 |
+
0% { text-shadow: 0 0 2px gold; }
|
1887 |
+
50% { text-shadow: 0 0 6px gold; }
|
1888 |
+
100% { text-shadow: 0 0 2px gold; }
|
1889 |
+
}
|
1890 |
+
|
1891 |
+
@keyframes grid-rainbow {
|
1892 |
+
0% { color: red; }
|
1893 |
+
14% { color: orange; }
|
1894 |
+
28% { color: yellow; }
|
1895 |
+
42% { color: green; }
|
1896 |
+
57% { color: blue; }
|
1897 |
+
71% { color: indigo; }
|
1898 |
+
85% { color: violet; }
|
1899 |
+
100% { color: red; }
|
1900 |
+
}
|
1901 |
+
|
1902 |
+
@keyframes grid-pulse {
|
1903 |
+
0% { opacity: 0.8; }
|
1904 |
+
50% { opacity: 1; }
|
1905 |
+
100% { opacity: 0.8; }
|
1906 |
+
}
|
1907 |
+
|
1908 |
+
.grid-star {
|
1909 |
+
text-shadow: 0 0 3px gold;
|
1910 |
+
animation: grid-glow 2s infinite;
|
1911 |
+
}
|
1912 |
+
|
1913 |
+
.grid-impuritas {
|
1914 |
+
animation: grid-rainbow 4s linear infinite;
|
1915 |
+
}
|
1916 |
+
|
1917 |
+
.grid-nightmare {
|
1918 |
+
text-shadow: 0 0 1px #FF5722;
|
1919 |
+
animation: grid-pulse 3s infinite;
|
1920 |
+
}
|
1921 |
+
|
1922 |
+
.grid-plague {
|
1923 |
+
text-shadow: 0 0 1px #9C27B0;
|
1924 |
+
}
|
1925 |
+
</style>
|
1926 |
+
""", unsafe_allow_html=True)
|
1927 |
+
|
1928 |
+
rarity_cols = st.columns(len(active_rarities))
|
1929 |
+
for i, (rarity, count) in enumerate(active_rarities.items()):
|
1930 |
+
with rarity_cols[i]:
|
1931 |
+
# Get color with fallback
|
1932 |
+
color = RARITY_LEVELS.get(rarity, {}).get("color", "#888888")
|
1933 |
+
|
1934 |
+
# Apply special styling based on rarity
|
1935 |
+
style = f"color:{color};font-weight:bold;"
|
1936 |
+
class_name = ""
|
1937 |
+
|
1938 |
+
if rarity == "Impuritas Civitas":
|
1939 |
+
class_name = "grid-impuritas"
|
1940 |
+
elif rarity == "Star of the City":
|
1941 |
+
class_name = "grid-star"
|
1942 |
+
elif rarity == "Urban Nightmare":
|
1943 |
+
class_name = "grid-nightmare"
|
1944 |
+
elif rarity == "Urban Plague":
|
1945 |
+
class_name = "grid-plague"
|
1946 |
+
|
1947 |
+
if class_name:
|
1948 |
+
st.markdown(
|
1949 |
+
f"<div style='text-align:center;'><span class='{class_name}' style='font-weight:bold;'>{rarity.capitalize()}</span><br>{count}</div>",
|
1950 |
+
unsafe_allow_html=True
|
1951 |
+
)
|
1952 |
+
else:
|
1953 |
+
st.markdown(
|
1954 |
+
f"<div style='text-align:center;'><span style='{style}'>{rarity.capitalize()}</span><br>{count}</div>",
|
1955 |
+
unsafe_allow_html=True
|
1956 |
+
)
|
1957 |
+
|
1958 |
+
def display_discovered_tag_grid(tags):
|
1959 |
+
"""Display discovered tags in a grid layout with discovery information"""
|
1960 |
+
# Create a grid layout for tags
|
1961 |
+
cols = st.columns(3)
|
1962 |
+
for i, (tag, info) in enumerate(sorted(tags)):
|
1963 |
+
col_idx = i % 3
|
1964 |
+
with cols[col_idx]:
|
1965 |
+
rarity = info.get("rarity", "Unknown")
|
1966 |
+
discovery_time = info.get("discovery_time", "")
|
1967 |
+
library_floor = info.get("library_floor", "")
|
1968 |
+
discovery_count = info.get("discovery_count", 1)
|
1969 |
+
|
1970 |
+
color = RARITY_LEVELS.get(rarity, {}).get("color", "#888888")
|
1971 |
+
|
1972 |
+
# Get sample count if available
|
1973 |
+
sample_count = None
|
1974 |
+
if hasattr(st.session_state, 'tag_rarity_metadata') and st.session_state.tag_rarity_metadata:
|
1975 |
+
if tag in st.session_state.tag_rarity_metadata:
|
1976 |
+
tag_info = st.session_state.tag_rarity_metadata[tag]
|
1977 |
+
if isinstance(tag_info, dict) and "sample_count" in tag_info:
|
1978 |
+
sample_count = tag_info["sample_count"]
|
1979 |
+
|
1980 |
+
# Format sample count
|
1981 |
+
sample_display = ""
|
1982 |
+
if sample_count is not None:
|
1983 |
+
if sample_count >= 1000000:
|
1984 |
+
sample_display = f"<span style='font-size:0.8em;color:#666;'>({sample_count/1000000:.1f}M)</span>"
|
1985 |
+
elif sample_count >= 1000:
|
1986 |
+
sample_display = f"<span style='font-size:0.8em;color:#666;'>({sample_count/1000:.1f}K)</span>"
|
1987 |
+
else:
|
1988 |
+
sample_display = f"<span style='font-size:0.8em;color:#666;'>({sample_count})</span>"
|
1989 |
+
|
1990 |
+
# Apply special styling for rare tags
|
1991 |
+
tag_html = tag
|
1992 |
+
if rarity == "Impuritas Civitas":
|
1993 |
+
tag_html = f"<span style='animation: rainbow-text 4s linear infinite;'>{tag}</span>"
|
1994 |
+
elif rarity == "Star of the City":
|
1995 |
+
tag_html = f"<span style='text-shadow: 0 0 3px gold;'>{tag}</span>"
|
1996 |
+
elif rarity == "Urban Nightmare":
|
1997 |
+
tag_html = f"<span style='text-shadow: 0 0 1px #FF9800;'>{tag}</span>"
|
1998 |
+
|
1999 |
+
# Display tag with rarity badge and discovery info
|
2000 |
+
st.markdown(
|
2001 |
+
f"{tag_html} <span style='background-color:{color};color:white;padding:2px 6px;border-radius:10px;font-size:0.8em;'>{rarity.capitalize()}</span> {sample_display}",
|
2002 |
+
unsafe_allow_html=True
|
2003 |
+
)
|
2004 |
+
|
2005 |
+
# Show discovery details
|
2006 |
+
if library_floor:
|
2007 |
+
st.markdown(f"<span style='font-size:0.85em;'>Found in: {library_floor}</span>", unsafe_allow_html=True)
|
2008 |
+
|
2009 |
+
if discovery_count > 1:
|
2010 |
+
st.markdown(f"<span style='font-size:0.85em;'>Seen {discovery_count} times</span>", unsafe_allow_html=True)
|
game/mosaics/templates/1st_costume_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/animal_crossing_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/arknights_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/azur_lane_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/blue_archive_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/boku_no_hero_academia_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/casual_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/chainsaw_man_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/character_extended_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/company_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/cosplay_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/disgaea_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/disney_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/dragon_ball_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/dungeon_and_fighter_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/elsword_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/emblem_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/ensemble_stars!_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/fate_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/ff14_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/fire_emblem_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/flower_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/food_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/genshin_impact_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/girls'_frontline_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/girls_und_panzer_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/granblue_fantasy_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/honkai_impact_template.png
ADDED
![]() |
Git LFS Details
|
game/mosaics/templates/honkai_star_rail_template.png
ADDED
![]() |
Git LFS Details
|