EGYADMIN commited on
Commit
1249af0
·
verified ·
1 Parent(s): 18f5e7f

Upload 13 files

Browse files
Files changed (4) hide show
  1. .gitignore +1 -1
  2. LICENSE +21 -0
  3. README.md +1 -1
  4. app.py +455 -257
.gitignore CHANGED
@@ -12,4 +12,4 @@ env/
12
 
13
  # تجاهل ملفات النظام
14
  .DS_Store
15
- Thumbs.db
 
12
 
13
  # تجاهل ملفات النظام
14
  .DS_Store
15
+ Thumbs.db.env
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Tamer ELGOHARY
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -127,4 +127,4 @@ streamlit run main.py
127
 
128
  ## الاتصال
129
 
130
- للاستفسارات أو الدعم، يرجى التواصل عبر البريد الإلكتروني: support@example.com
 
127
 
128
  ## الاتصال
129
 
130
+ للاستفسارات أو الدعم، يرجى التواصل عبر البريد الإلكتروني: tgohary@sajco.com.sa
app.py CHANGED
@@ -1,277 +1,475 @@
 
 
 
1
  """
2
- نظام تحليل المناقصات وسلاسل الإمداد والتوقعات المستقبلية
3
- تطبيق خاص بشركة شبه الجزيرة للمقاولات
4
 
5
- مهندس التطوير: م. تامر الجوهري
 
 
6
  """
7
 
8
  import os
9
  import sys
10
- import yaml
11
- import json
12
- import logging
13
- from datetime import datetime
14
- from pathlib import Path
15
-
16
- import streamlit as st
17
-
18
- # إعداد الصفحة مسبقًا (يجب أن يكون هذا أول أمر في Streamlit)
19
- st.set_page_config(
20
- page_title="نظام تحليل المناقصات - شركة شبه الجزيرة للمقاولات",
21
- page_icon="📊",
22
- layout="wide",
23
- initial_sidebar_state="expanded",
24
- )
25
-
26
- # استيراد باقي المكتبات (بعد set_page_config)
27
- import pandas as pd
28
  import numpy as np
29
- import plotly.express as px
30
- import plotly.graph_objects as go
31
-
32
- # إضافة المجلد الرئيسي للمسار
33
- current_dir = Path(__file__).parent
34
- sys.path.append(str(current_dir))
35
-
36
- # استيراد المكونات الضرورية
37
- from web.pages.home import show_home_page
38
- from web.pages.tender_analysis import show_tender_analysis
39
- from web.pages.requirements_analysis import show_requirements_analysis
40
- from web.pages.cost_estimation import show_cost_estimation
41
- from web.pages.risk_analysis import show_risk_analysis
42
- from web.pages.timeline import show_timeline
43
- from web.pages.local_content import show_local_content
44
- from web.pages.supply_chain import show_supply_chain
45
- from web.pages.procurement import show_procurement
46
- from web.pages.vendors import show_vendors
47
- from web.pages.future_projects import show_future_projects
48
- from web.pages.success_prediction import show_success_prediction
49
- from web.pages.reports import show_reports
50
- from web.pages.ai_settings import show_ai_settings
51
-
52
- from web.components.sidebar import create_sidebar
53
- from web.components.header import create_header
54
-
55
- from utils.file_handler import setup_logging
56
- from utils.permissions import initialize_permissions
57
- from utils.ai_helper import initialize_ai_helper
58
 
59
- # إعداد التسجيل
60
- setup_logging()
61
- logger = logging.getLogger("TenderAnalysisSystem")
 
 
 
 
 
 
 
62
 
63
- # تحميل الإعدادات
64
- def load_config():
65
- config_path = os.path.join(current_dir, "config", "config.yaml")
66
- if os.path.exists(config_path):
67
- with open(config_path, 'r', encoding='utf-8') as f:
68
- return yaml.safe_load(f)
69
- return {}
70
 
71
- # تهيئة حالة الجلسة
72
- def initialize_session():
73
- if 'config' not in st.session_state:
74
- st.session_state.config = load_config()
75
-
76
- if 'page' not in st.session_state:
77
- st.session_state.page = "الرئيسية"
78
-
79
- if 'uploaded_files' not in st.session_state:
80
- st.session_state.uploaded_files = []
81
-
82
- if 'analysis_results' not in st.session_state:
83
- st.session_state.analysis_results = {}
84
 
85
- if 'current_tender' not in st.session_state:
86
- st.session_state.current_tender = None
 
87
 
88
- if 'user_info' not in st.session_state:
89
- st.session_state.user_info = {
90
- "company": "شركة شبه الجزيرة للمقاولات",
91
- "user_name": "المستخدم الحالي",
92
- "role": "مدير النظام" # تغيير الدور الافتراضي إلى مدير النظام للوصول إلى جميع الصلاحيات
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  }
94
-
95
- if 'latest_predictions' not in st.session_state:
96
- st.session_state.latest_predictions = None
97
-
98
- # نطاق التاريخ الافتراضي
99
- if 'date_range' not in st.session_state:
100
- today = datetime.today()
101
- st.session_state.date_range = {
102
- "start_date": today.replace(month=1, day=1),
103
- "end_date": today
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
 
106
- # تهيئة نظام الصلاحيات
107
- initialize_permissions()
108
-
109
- # تهيئة مساعد الذكاء الاصطناعي
110
- initialize_ai_helper()
111
-
112
- # تحديد ما إذا كان الجهاز موبايل
113
- if 'is_mobile' not in st.session_state:
114
- st.session_state.is_mobile = False
115
-
116
- # إضافة دعم الموبايل
117
- def setup_mobile_support():
118
- # إضافة CSS للموبايل
119
- mobile_css_path = os.path.join(current_dir, "web", "styles", "mobile.css")
120
- if os.path.exists(mobile_css_path):
121
- with open(mobile_css_path) as f:
122
- st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
123
-
124
- # إضافة سكريبت للكشف عن الموبايل وإضافة القائمة
125
- mobile_script = """
126
- <script>
127
- // الكشف عن الموبايل
128
- function detectMobile() {
129
- return (window.innerWidth <= 768) ||
130
- (navigator.userAgent.match(/Android/i) ||
131
- navigator.userAgent.match(/webOS/i) ||
132
- navigator.userAgent.match(/iPhone/i) ||
133
- navigator.userAgent.match(/iPad/i) ||
134
- navigator.userAgent.match(/iPod/i) ||
135
- navigator.userAgent.match(/BlackBerry/i) ||
136
- navigator.userAgent.match(/Windows Phone/i));
137
- }
138
-
139
- // إرسال حالة الموبايل إلى Streamlit
140
- if (detectMobile()) {
141
- const data = {
142
- is_mobile: true
143
- };
144
- window.parent.postMessage({
145
- type: "streamlit:setComponentValue",
146
- value: data
147
- }, "*");
148
- }
149
- </script>
150
- """
151
- st.markdown(mobile_script, unsafe_allow_html=True)
152
-
153
- # إضافة سكريبت القائمة للموبايل
154
- mobile_menu_path = os.path.join(current_dir, "web", "scripts", "mobile_menu.js")
155
- if os.path.exists(mobile_menu_path):
156
- with open(mobile_menu_path) as f:
157
- st.markdown(f"<script>{f.read()}</script>", unsafe_allow_html=True)
158
-
159
- # التطبيق الرئيسي
160
- def main():
161
- # تهيئة الجلسة
162
- initialize_session()
163
-
164
- # إضافة CSS المخصص
165
- with open(os.path.join(current_dir, "web", "styles", "main.css")) as f:
166
- st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
167
-
168
- # إضافة CSS للغة العربية
169
- with open(os.path.join(current_dir, "web", "styles", "rtl.css")) as f:
170
- st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
171
-
172
- # إضافة دعم الموبايل
173
- setup_mobile_support()
174
-
175
- # إضافة متغير من نوع Streamlit Component لقراءة حالة الموبايل
176
- mobile_detector = st.empty()
177
- mobile_data = mobile_detector.text_input("", key="mobile_detector", label_visibility="collapsed")
178
-
179
- try:
180
- if mobile_data:
181
- mobile_data_json = json.loads(mobile_data)
182
- st.session_state.is_mobile = mobile_data_json.get('is_mobile', False)
183
- except:
184
- pass
185
-
186
- # إنشاء الشريط الجانبي
187
- selected_page = create_sidebar()
188
-
189
- # إنشاء رأس الصفحة
190
- create_header()
191
-
192
- # إضافة زر القائمة للموبايل في حالة تم الكشف عن جهاز موبايل
193
- if st.session_state.is_mobile:
194
- st.markdown('<div id="mobile-menu-container"></div>', unsafe_allow_html=True)
195
-
196
- # عرض الصفحة المحددة
197
- if selected_page == "الرئيسية":
198
- show_home_page()
199
-
200
- elif selected_page == "تحليل المناقصات":
201
- show_tender_analysis()
202
-
203
- elif selected_page == "تحليل المتطلبات":
204
- show_requirements_analysis()
205
-
206
- elif selected_page == "تقدير التكاليف":
207
- show_cost_estimation()
208
-
209
- elif selected_page == "تحليل المخاطر":
210
- show_risk_analysis()
211
-
212
- elif selected_page == "الجدول الزمني":
213
- show_timeline()
214
-
215
- elif selected_page == "المحتوى المحلي":
216
- show_local_content()
217
-
218
- elif selected_page == "سلاسل الإمداد":
219
- show_supply_chain()
220
-
221
- elif selected_page == "المشتريات":
222
- show_procurement()
223
-
224
- elif selected_page == "الموردون والمقاولون":
225
- show_vendors()
226
-
227
- elif selected_page == "المشاريع المستقبلية":
228
- show_future_projects()
229
-
230
- elif selected_page == "توقع احتمالية النجاح":
231
- from utils.permissions import check_permission
232
- if check_permission("access_ai"):
233
- show_success_prediction()
234
  else:
235
- st.error("ليس لديك صلاحية للوصول إلى هذه الصفحة")
236
-
237
- elif selected_page == "التقارير":
238
- show_reports()
239
-
240
- elif selected_page == "إعدادات الذكاء الاصطناعي":
241
- from utils.permissions import check_permission
242
- if check_permission("configure_system"):
243
- show_ai_settings()
244
- else:
245
- st.error("ليس لديك صلاحية للوصول إلى هذه الصفحة")
 
 
 
 
 
246
 
247
- # إضافة أيقونة تبديل وضع الموبايل/سطح المكتب
248
- if st.session_state.is_mobile:
249
- st.markdown(
250
- """
251
- <style>
252
- .desktop-switch {
253
- position: fixed;
254
- bottom: 10px;
255
- right: 10px;
256
- background-color: #4a86e8;
257
- color: white;
258
- width: 40px;
259
- height: 40px;
260
- border-radius: 50%;
261
- text-align: center;
262
- line-height: 40px;
263
- box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2);
264
- z-index: 9999;
265
- cursor: pointer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
  }
267
- </style>
268
- <div class="desktop-switch" onclick="window.open(window.location.href, '_blank')">🖥️</div>
269
- """,
270
- unsafe_allow_html=True
271
- )
272
-
273
- # تسجيل زيارة الصفحة
274
- logger.info(f"تمت زيارة صفحة {selected_page}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
 
276
  if __name__ == "__main__":
277
- main()
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+
4
  """
5
+ Hybrid Face Environment System
 
6
 
7
+ This module integrates multiple face recognition and analysis techniques
8
+ into a comprehensive system capable of performing 2D and 3D face recognition,
9
+ landmark detection, pose estimation, and emotion analysis.
10
  """
11
 
12
  import os
13
  import sys
14
+ import cv2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  import numpy as np
16
+ import logging
17
+ from typing import Dict, List, Tuple, Union, Optional, Any
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ # Import component modules
20
+ from face_detector import FaceDetector
21
+ from feature_extractor import FeatureExtractor
22
+ from image_preprocessor import ImagePreprocessor
23
+ from face_recognition_2d import FaceRecognition2D
24
+ from face_recognition_3d import FaceRecognition3D
25
+ from landmark_detector import LandmarkDetector
26
+ from pose_estimator import PoseEstimator
27
+ from emotion_analyzer import EmotionAnalyzer
28
+ from database_manager import DatabaseManager
29
 
 
 
 
 
 
 
 
30
 
31
+ class HybridFaceSystem:
32
+ """
33
+ Main class that integrates all components of the hybrid face recognition system.
 
 
 
 
 
 
 
 
 
 
34
 
35
+ This system combines 2D and 3D face recognition techniques with landmark detection,
36
+ pose estimation, and emotion analysis for a comprehensive face analysis solution.
37
+ """
38
 
39
+ def __init__(self, config: Dict = None):
40
+ """
41
+ Initialize the hybrid face system with configuration parameters.
42
+
43
+ Args:
44
+ config: Dictionary containing configuration parameters for the system
45
+ """
46
+ # Setup logging
47
+ logging.basicConfig(
48
+ level=logging.INFO,
49
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
50
+ )
51
+ self.logger = logging.getLogger("HybridFaceSystem")
52
+ self.logger.info("Initializing Hybrid Face System...")
53
+
54
+ # Set default configuration if none provided
55
+ self.config = config or {
56
+ 'min_face_size': (60, 60),
57
+ 'confidence_threshold': 0.7,
58
+ 'use_gpu': True,
59
+ 'recognition_mode': 'hybrid', # Options: '2d', '3d', 'hybrid'
60
+ 'fusion_weights': {'2d': 0.4, '3d': 0.6},
61
+ 'database_path': 'face_database',
62
  }
63
+
64
+ # Initialize components
65
+ self._init_components()
66
+
67
+ self.logger.info("Hybrid Face System initialized successfully")
68
+
69
+ def _init_components(self):
70
+ """Initialize all system components with appropriate configurations."""
71
+ try:
72
+ # Initialize preprocessing component
73
+ self.preprocessor = ImagePreprocessor(
74
+ target_size=(224, 224),
75
+ normalize=True,
76
+ augmentation_enabled=False
77
+ )
78
+
79
+ # Initialize face detection component
80
+ self.face_detector = FaceDetector(
81
+ min_size=self.config['min_face_size'],
82
+ use_gpu=self.config['use_gpu']
83
+ )
84
+
85
+ # Initialize facial landmark detection
86
+ self.landmark_detector = LandmarkDetector(
87
+ model_type='deep',
88
+ num_landmarks=68
89
+ )
90
+
91
+ # Initialize feature extraction
92
+ self.feature_extractor = FeatureExtractor(
93
+ embedding_size=512,
94
+ use_deep_features=True
95
+ )
96
+
97
+ # Initialize pose estimation
98
+ self.pose_estimator = PoseEstimator(
99
+ landmark_detector=self.landmark_detector
100
+ )
101
+
102
+ # Initialize emotion analysis
103
+ self.emotion_analyzer = EmotionAnalyzer(
104
+ model_path='models/emotion_model.h5'
105
+ )
106
+
107
+ # Initialize database manager
108
+ self.db_manager = DatabaseManager(
109
+ database_path=self.config['database_path']
110
+ )
111
+
112
+ # Initialize recognition systems
113
+ self.face_recognition_2d = FaceRecognition2D(
114
+ feature_extractor=self.feature_extractor,
115
+ db_manager=self.db_manager
116
+ )
117
+
118
+ self.face_recognition_3d = FaceRecognition3D(
119
+ feature_extractor=self.feature_extractor,
120
+ db_manager=self.db_manager
121
+ )
122
+
123
+ except Exception as e:
124
+ self.logger.error(f"Error initializing components: {str(e)}")
125
+ raise
126
+
127
+ def process_image(self, image: np.ndarray, depth_data: Optional[np.ndarray] = None) -> Dict:
128
+ """
129
+ Process an input image through the hybrid face recognition pipeline.
130
+
131
+ Args:
132
+ image: Input RGB image as numpy array
133
+ depth_data: Optional depth data for 3D recognition
134
+
135
+ Returns:
136
+ Dictionary containing recognition results and analysis data
137
+ """
138
+ try:
139
+ # Step 1: Preprocess the image
140
+ preprocessed_image = self.preprocessor.preprocess(image)
141
+
142
+ # Step 2: Detect faces
143
+ faces = self.face_detector.detect(preprocessed_image)
144
+
145
+ if not faces:
146
+ return {'status': 'no_face_detected'}
147
+
148
+ results = []
149
+ for face_idx, face_data in enumerate(faces):
150
+ result = self._process_single_face(
151
+ image=preprocessed_image,
152
+ face_data=face_data,
153
+ depth_data=depth_data,
154
+ face_idx=face_idx
155
+ )
156
+ results.append(result)
157
+
158
+ return {
159
+ 'status': 'success',
160
+ 'num_faces': len(faces),
161
+ 'results': results
162
+ }
163
+
164
+ except Exception as e:
165
+ self.logger.error(f"Error processing image: {str(e)}")
166
+ return {'status': 'error', 'message': str(e)}
167
+
168
+ def _process_single_face(self, image: np.ndarray, face_data: Dict,
169
+ depth_data: Optional[np.ndarray], face_idx: int) -> Dict:
170
+ """
171
+ Process a single detected face through the recognition pipeline.
172
+
173
+ Args:
174
+ image: Preprocessed image
175
+ face_data: Dictionary with face detection results
176
+ depth_data: Optional depth data for 3D recognition
177
+ face_idx: Index of the face in the image
178
+
179
+ Returns:
180
+ Dictionary with recognition and analysis results for this face
181
+ """
182
+ # Extract face region
183
+ face_img = face_data['face_image']
184
+
185
+ # Step 3: Detect landmarks
186
+ landmarks = self.landmark_detector.detect_landmarks(face_img)
187
+
188
+ # Step 4: Calculate pose
189
+ pose = self.pose_estimator.estimate_pose(face_img, landmarks)
190
+
191
+ # Step 5: Analyze emotion
192
+ emotion = self.emotion_analyzer.analyze(face_img, landmarks)
193
+
194
+ # Step 6: Extract features
195
+ features_2d = self.feature_extractor.extract_features(face_img, mode='2d')
196
+
197
+ # Step 7: Perform recognition based on config
198
+ recognition_results = {}
199
+ confidence = 0.0
200
+ person_id = None
201
+
202
+ # 2D recognition
203
+ if self.config['recognition_mode'] in ['2d', 'hybrid']:
204
+ recognition_2d = self.face_recognition_2d.recognize(features_2d)
205
+ recognition_results['2d'] = recognition_2d
206
+
207
+ if self.config['recognition_mode'] == '2d':
208
+ confidence = recognition_2d['confidence']
209
+ person_id = recognition_2d['person_id']
210
+
211
+ # 3D recognition (if depth data available)
212
+ if self.config['recognition_mode'] in ['3d', 'hybrid'] and depth_data is not None:
213
+ # Extract depth data for this face
214
+ face_depth = self._extract_face_depth(depth_data, face_data['bbox'])
215
+
216
+ # Extract 3D features and perform recognition
217
+ features_3d = self.feature_extractor.extract_features(
218
+ face_img, depth_data=face_depth, mode='3d'
219
+ )
220
+ recognition_3d = self.face_recognition_3d.recognize(features_3d)
221
+ recognition_results['3d'] = recognition_3d
222
+
223
+ if self.config['recognition_mode'] == '3d':
224
+ confidence = recognition_3d['confidence']
225
+ person_id = recognition_3d['person_id']
226
+
227
+ # Perform fusion for hybrid mode
228
+ if self.config['recognition_mode'] == 'hybrid' and '2d' in recognition_results and '3d' in recognition_results:
229
+ fusion_result = self._fuse_recognition_results(
230
+ recognition_results['2d'],
231
+ recognition_results['3d']
232
+ )
233
+ confidence = fusion_result['confidence']
234
+ person_id = fusion_result['person_id']
235
+
236
+ # Step 8: Compile results
237
+ return {
238
+ 'face_id': face_idx,
239
+ 'bbox': face_data['bbox'],
240
+ 'landmarks': landmarks,
241
+ 'pose': pose,
242
+ 'emotion': emotion,
243
+ 'person_id': person_id,
244
+ 'confidence': confidence,
245
+ 'recognition_details': recognition_results
246
  }
247
 
248
+ def _extract_face_depth(self, depth_data: np.ndarray, bbox: Tuple[int, int, int, int]) -> np.ndarray:
249
+ """
250
+ Extract depth data for a specific face region.
251
+
252
+ Args:
253
+ depth_data: Full depth map
254
+ bbox: Bounding box of the face (x, y, width, height)
255
+
256
+ Returns:
257
+ Depth data corresponding to the face region
258
+ """
259
+ x, y, w, h = bbox
260
+
261
+ # Ensure coordinates are within bounds
262
+ x = max(0, x)
263
+ y = max(0, y)
264
+
265
+ # Extract depth data for the face region
266
+ face_depth = depth_data[y:y+h, x:x+w]
267
+ return face_depth
268
+
269
+ def _fuse_recognition_results(self, result_2d: Dict, result_3d: Dict) -> Dict:
270
+ """
271
+ Fuse 2D and 3D recognition results using weighted confidence.
272
+
273
+ Args:
274
+ result_2d: Results from 2D recognition
275
+ result_3d: Results from 3D recognition
276
+
277
+ Returns:
278
+ Dictionary with fused recognition results
279
+ """
280
+ # Get weights from config
281
+ w_2d = self.config['fusion_weights']['2d']
282
+ w_3d = self.config['fusion_weights']['3d']
283
+
284
+ # Extract confidences
285
+ conf_2d = result_2d['confidence']
286
+ conf_3d = result_3d['confidence']
287
+
288
+ # Extract person IDs
289
+ id_2d = result_2d['person_id']
290
+ id_3d = result_3d['person_id']
291
+
292
+ # Check if the IDs match
293
+ if id_2d == id_3d:
294
+ # Same person identified by both methods
295
+ fused_confidence = (w_2d * conf_2d + w_3d * conf_3d) / (w_2d + w_3d)
296
+ fused_id = id_2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
297
  else:
298
+ # Different persons identified, use the one with higher weighted confidence
299
+ weighted_conf_2d = w_2d * conf_2d
300
+ weighted_conf_3d = w_3d * conf_3d
301
+
302
+ if weighted_conf_2d > weighted_conf_3d:
303
+ fused_confidence = conf_2d
304
+ fused_id = id_2d
305
+ else:
306
+ fused_confidence = conf_3d
307
+ fused_id = id_3d
308
+
309
+ return {
310
+ 'person_id': fused_id,
311
+ 'confidence': fused_confidence,
312
+ 'method': 'hybrid_fusion'
313
+ }
314
 
315
+ def register_face(self, image: np.ndarray, person_id: str,
316
+ depth_data: Optional[np.ndarray] = None) -> Dict:
317
+ """
318
+ Register a new face in the database.
319
+
320
+ Args:
321
+ image: Input image containing the face
322
+ person_id: Unique identifier for the person
323
+ depth_data: Optional depth data for 3D registration
324
+
325
+ Returns:
326
+ Dictionary indicating registration status
327
+ """
328
+ try:
329
+ # Preprocess the image
330
+ preprocessed_image = self.preprocessor.preprocess(image)
331
+
332
+ # Detect face
333
+ faces = self.face_detector.detect(preprocessed_image)
334
+
335
+ if not faces:
336
+ return {'status': 'error', 'message': 'No face detected for registration'}
337
+
338
+ # Use the largest face if multiple are detected
339
+ face_data = max(faces, key=lambda x: x['face_area'])
340
+ face_img = face_data['face_image']
341
+
342
+ # Extract landmarks
343
+ landmarks = self.landmark_detector.detect_landmarks(face_img)
344
+
345
+ # Extract 2D features
346
+ features_2d = self.feature_extractor.extract_features(face_img, mode='2d')
347
+
348
+ # Register in 2D database
349
+ self.db_manager.add_face(person_id, features_2d, '2d', metadata={
350
+ 'landmarks': landmarks,
351
+ 'registration_time': self.db_manager.get_current_timestamp()
352
+ })
353
+
354
+ # If depth data is available, also register in 3D database
355
+ if depth_data is not None:
356
+ face_depth = self._extract_face_depth(depth_data, face_data['bbox'])
357
+ features_3d = self.feature_extractor.extract_features(
358
+ face_img, depth_data=face_depth, mode='3d'
359
+ )
360
+ self.db_manager.add_face(person_id, features_3d, '3d', metadata={
361
+ 'landmarks': landmarks,
362
+ 'registration_time': self.db_manager.get_current_timestamp()
363
+ })
364
+
365
+ return {
366
+ 'status': 'success',
367
+ 'message': 'Face registered in both 2D and 3D databases',
368
+ 'person_id': person_id
369
+ }
370
+ else:
371
+ return {
372
+ 'status': 'partial_success',
373
+ 'message': 'Face registered in 2D database only (no depth data provided)',
374
+ 'person_id': person_id
375
+ }
376
+
377
+ except Exception as e:
378
+ self.logger.error(f"Error registering face: {str(e)}")
379
+ return {'status': 'error', 'message': str(e)}
380
+
381
+ def update_model(self, model_type: str = 'all') -> Dict:
382
+ """
383
+ Update/retrain the recognition models with the current database.
384
+
385
+ Args:
386
+ model_type: Type of model to update ('2d', '3d', or 'all')
387
+
388
+ Returns:
389
+ Dictionary with update status
390
+ """
391
+ try:
392
+ if model_type in ['2d', 'all']:
393
+ self.face_recognition_2d.update_model()
394
+
395
+ if model_type in ['3d', 'all']:
396
+ self.face_recognition_3d.update_model()
397
+
398
+ return {
399
+ 'status': 'success',
400
+ 'message': f'Successfully updated {model_type} recognition models'
401
  }
402
+
403
+ except Exception as e:
404
+ self.logger.error(f"Error updating models: {str(e)}")
405
+ return {'status': 'error', 'message': str(e)}
406
+
407
+ def get_system_info(self) -> Dict:
408
+ """Get information about the current system configuration and status."""
409
+ return {
410
+ 'system_name': 'Hybrid Face Recognition System',
411
+ 'version': '1.0.0',
412
+ 'recognition_mode': self.config['recognition_mode'],
413
+ 'fusion_weights': self.config['fusion_weights'],
414
+ 'database_status': self.db_manager.get_database_stats(),
415
+ 'components': {
416
+ 'face_detector': self.face_detector.get_info(),
417
+ 'landmark_detector': self.landmark_detector.get_info(),
418
+ 'feature_extractor': self.feature_extractor.get_info(),
419
+ '2d_recognition': self.face_recognition_2d.get_info(),
420
+ '3d_recognition': self.face_recognition_3d.get_info(),
421
+ }
422
+ }
423
+
424
+
425
+ def demo():
426
+ """Simple demonstration of the hybrid face system."""
427
+ # Initialize the system
428
+ system = HybridFaceSystem()
429
+
430
+ # Load a test image
431
+ image_path = "test_data/sample_face.jpg"
432
+ if not os.path.exists(image_path):
433
+ print(f"Error: Test image not found at {image_path}")
434
+ return
435
+
436
+ image = cv2.imread(image_path)
437
+ if image is None:
438
+ print(f"Error: Could not load image from {image_path}")
439
+ return
440
+
441
+ # For demo purposes, we'll use a dummy depth map
442
+ # In a real application, this would come from a depth sensor
443
+ depth_data = np.random.rand(*image.shape[:2]) * 255
444
+ depth_data = depth_data.astype(np.uint8)
445
+
446
+ # Process the image
447
+ results = system.process_image(image, depth_data)
448
+
449
+ # Print results
450
+ print("Hybrid Face System Demo Results:")
451
+ print(f"Status: {results['status']}")
452
+
453
+ if results['status'] == 'success':
454
+ print(f"Number of faces detected: {results['num_faces']}")
455
+
456
+ for i, face_result in enumerate(results['results']):
457
+ print(f"\nFace #{i+1}:")
458
+ print(f" Bounding Box: {face_result['bbox']}")
459
+ print(f" Person ID: {face_result['person_id']}")
460
+ print(f" Confidence: {face_result['confidence']:.2f}")
461
+ print(f" Emotion: {face_result['emotion']}")
462
+ print(f" Head Pose: Pitch={face_result['pose']['pitch']:.2f}°, "
463
+ f"Yaw={face_result['pose']['yaw']:.2f}°, "
464
+ f"Roll={face_result['pose']['roll']:.2f}°")
465
+
466
+ # Display system info
467
+ system_info = system.get_system_info()
468
+ print("\nSystem Information:")
469
+ print(f" System Name: {system_info['system_name']}")
470
+ print(f" Version: {system_info['version']}")
471
+ print(f" Recognition Mode: {system_info['recognition_mode']}")
472
+
473
 
474
  if __name__ == "__main__":
475
+ demo()