Rudra Rahul Chothe
commited on
Initial upload
Browse files- .gitignore +93 -0
- README.md +109 -0
- data/embeddings.pkl +3 -0
- data/sample-test-images/MEN-Jackets_Vests-id_00003336-06_1_front.jpg +0 -0
- data/sample-test-images/MEN-Shirts_Polos-id_00000204-01_1_front.jpg +0 -0
- data/sample-test-images/MEN-Shirts_Polos-id_00000226-04_1_front.jpg +0 -0
- data/sample-test-images/MEN-Shirts_Polos-id_00000226-05_7_additional.jpg +0 -0
- data/sample-test-images/MEN-Shirts_Polos-id_00000562-02_1_front.jpg +0 -0
- data/sample-test-images/MEN-Shirts_Polos-id_00000649-01_7_additional.jpg +0 -0
- data/sample-test-images/MEN-Shirts_Polos-id_00000846-02_4_full.jpg +0 -0
- data/sample-test-images/MEN-Shirts_Polos-id_00000846-03_1_front.jpg +0 -0
- data/sample-test-images/MEN-Shirts_Polos-id_00000846-06_4_full.jpg +0 -0
- data/sample-test-images/MEN-Shirts_Polos-id_00001919-13_1_front.jpg +0 -0
- data/sample-test-images/MEN-Shirts_Polos-id_00002808-01_1_front.jpg +0 -0
- data/sample-test-images/MEN-Shirts_Polos-id_00003706-01_1_front.jpg +0 -0
- data/sample-test-images/MEN-Shirts_Polos-id_00005489-01_1_front.jpg +0 -0
- data/sample-test-images/MEN-Shorts-id_00000638-01_4_full.jpg +0 -0
- data/sample-test-images/MEN-Shorts-id_00002767-09_1_front.jpg +0 -0
- data/sample-test-images/MEN-Shorts-id_00005151-04_1_front.jpg +0 -0
- data/sample-test-images/WOMEN-Cardigans-id_00003686-01_1_front.jpg +0 -0
- data/sample-test-images/WOMEN-Cardigans-id_00003696-01_1_front.jpg +0 -0
- data/sample-test-images/WOMEN-Cardigans-id_00003778-05_3_back.jpg +0 -0
- data/sample-test-images/WOMEN-Cardigans-id_00003924-10_2_side.jpg +0 -0
- data/sample-test-images/WOMEN-Cardigans-id_00003993-02_1_front.jpg +0 -0
- data/sample-test-images/WOMEN-Cardigans-id_00004057-02_7_additional.jpg +0 -0
- data/sample-test-images/WOMEN-Cardigans-id_00004490-01_1_front.jpg +0 -0
- data/sample-test-images/WOMEN-Cardigans-id_00004765-08_4_full.jpg +0 -0
- data/sample-test-images/WOMEN-Cardigans-id_00005302-01_1_front.jpg +0 -0
- data/sample-test-images/WOMEN-Cardigans-id_00005604-01_1_front.jpg +0 -0
- data/sample-test-images/WOMEN-Cardigans-id_00006279-02_1_front.jpg +0 -0
- data/sample-test-images/WOMEN-Dresses-id_00000002-02_1_front.jpg +0 -0
- data/sample-test-images/WOMEN-Dresses-id_00000021-05_1_front.jpg +0 -0
- data/sample-test-images/WOMEN-Dresses-id_00000088-02_1_front.jpg +0 -0
- data/sample-test-images/WOMEN-Dresses-id_00000170-01_1_front.jpg +0 -0
- requirements.txt +7 -0
- src/__init__.py +29 -0
- src/feature_extractor.py +22 -0
- src/main.py +50 -0
- src/preprocessing.py +62 -0
- src/similarity_search.py +22 -0
- temp_query_image.jpg +0 -0
.gitignore
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
*.so
|
6 |
+
.Python
|
7 |
+
build/
|
8 |
+
develop-eggs/
|
9 |
+
dist/
|
10 |
+
downloads/
|
11 |
+
eggs/
|
12 |
+
.eggs/
|
13 |
+
lib/
|
14 |
+
lib64/
|
15 |
+
parts/
|
16 |
+
sdist/
|
17 |
+
var/
|
18 |
+
wheels/
|
19 |
+
*.egg-info/
|
20 |
+
.installed.cfg
|
21 |
+
*.egg
|
22 |
+
|
23 |
+
# Virtual Environment
|
24 |
+
venv/
|
25 |
+
env/
|
26 |
+
ENV/
|
27 |
+
.env
|
28 |
+
.venv
|
29 |
+
env.bak/
|
30 |
+
venv.bak/
|
31 |
+
|
32 |
+
# IDE specific files
|
33 |
+
.idea/
|
34 |
+
.vscode/
|
35 |
+
*.swp
|
36 |
+
*.swo
|
37 |
+
.project
|
38 |
+
.pydevproject
|
39 |
+
.settings/
|
40 |
+
|
41 |
+
# Project specific
|
42 |
+
# data/
|
43 |
+
# *.pkl
|
44 |
+
# temp_query_image.jpg
|
45 |
+
# embeddings.pkl
|
46 |
+
*.h5
|
47 |
+
models/
|
48 |
+
temp/
|
49 |
+
logs/
|
50 |
+
|
51 |
+
# OS specific
|
52 |
+
.DS_Store
|
53 |
+
Thumbs.db
|
54 |
+
*.db
|
55 |
+
*.sqlite3
|
56 |
+
|
57 |
+
# Jupyter Notebook
|
58 |
+
.ipynb_checkpoints
|
59 |
+
*.ipynb
|
60 |
+
|
61 |
+
# Distribution / packaging
|
62 |
+
.Python
|
63 |
+
*.manifest
|
64 |
+
*.spec
|
65 |
+
pip-log.txt
|
66 |
+
pip-delete-this-directory.txt
|
67 |
+
|
68 |
+
# Unit test / coverage reports
|
69 |
+
htmlcov/
|
70 |
+
.tox/
|
71 |
+
.coverage
|
72 |
+
.coverage.*
|
73 |
+
.cache
|
74 |
+
nosetests.xml
|
75 |
+
coverage.xml
|
76 |
+
*.cover
|
77 |
+
.hypothesis/
|
78 |
+
.pytest_cache/
|
79 |
+
|
80 |
+
# Logs
|
81 |
+
*.log
|
82 |
+
local_settings.py
|
83 |
+
db.sqlite3
|
84 |
+
|
85 |
+
# Environment variables
|
86 |
+
.env
|
87 |
+
.env.local
|
88 |
+
.env.*.local
|
89 |
+
|
90 |
+
# Docker
|
91 |
+
Dockerfile
|
92 |
+
docker-compose.yml
|
93 |
+
.docker/
|
README.md
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Image Similarity Search Engine
|
2 |
+
A deep learning-based image similarity search engine that uses EfficientNetB0 for feature extraction and FAISS for fast similarity search. The application provides a web interface built with Streamlit for easy interaction.
|
3 |
+
|
4 |
+
Features
|
5 |
+
- Deep Feature Extraction: Uses EfficientNetB0 (pre-trained on ImageNet) to extract meaningful features from images
|
6 |
+
- Fast Similarity Search: Implements FAISS for efficient nearest-neighbor search
|
7 |
+
- Interactive Web Interface: User-friendly interface built with Streamlit
|
8 |
+
- Real-time Processing: Shows progress and time estimates during feature extraction
|
9 |
+
- Scalable Architecture: Designed to handle large image datasets efficiently
|
10 |
+
|
11 |
+
## Installation
|
12 |
+
## Prerequisites
|
13 |
+
|
14 |
+
Python 3.8 or higher
|
15 |
+
pip package manager
|
16 |
+
|
17 |
+
## Setup
|
18 |
+
|
19 |
+
1. Clone the repository:
|
20 |
+
```
|
21 |
+
git clone https://github.com/yourusername/image-similarity-search.git
|
22 |
+
cd image-similarity-search
|
23 |
+
```
|
24 |
+
2. Create and activate a virtual environment:
|
25 |
+
```
|
26 |
+
python -m venv venv
|
27 |
+
source venv/bin/activate # On Windows use: venv\Scripts\activate
|
28 |
+
```
|
29 |
+
3. Install required packages:
|
30 |
+
```
|
31 |
+
pip install -r requirements.txt
|
32 |
+
```
|
33 |
+
|
34 |
+
## Project Structure
|
35 |
+
```
|
36 |
+
image-similarity-search/
|
37 |
+
├── data/
|
38 |
+
│ ├── images/ # Directory for train dataset images
|
39 |
+
│ ├── sample-test-images/ # Directory for test dataset images
|
40 |
+
│ └── embeddings.pkl # Pre-computed image embeddings
|
41 |
+
├── src/
|
42 |
+
│ ├── feature_extractor.py # EfficientNetB0 feature extraction
|
43 |
+
│ ├── preprocessing.py # Image preprocessing and embedding computation
|
44 |
+
│ ├── similarity_search.py # FAISS-based similarity search
|
45 |
+
│ └── main.py # Streamlit web interface
|
46 |
+
├── requirements.txt
|
47 |
+
├── README.md
|
48 |
+
└── .gitignore
|
49 |
+
```
|
50 |
+
## Usage
|
51 |
+
|
52 |
+
1. **Prepare Your Dataset:**
|
53 |
+
Get training image dataset from drive:
|
54 |
+
```
|
55 |
+
https://drive.google.com/file/d/1U2PljA7NE57jcSSzPs21ZurdIPXdYZtN/view?usp=drive_link
|
56 |
+
```
|
57 |
+
Place your image dataset in the data/images directory
|
58 |
+
Supported formats: JPG, JPEG, PNG
|
59 |
+
|
60 |
+
2. **Generate Embeddings:**
|
61 |
+
```
|
62 |
+
python -m src.preprocessing
|
63 |
+
```
|
64 |
+
|
65 |
+
**This will**:
|
66 |
+
- Process all images in the dataset
|
67 |
+
- Show progress and time estimates
|
68 |
+
- Save embeddings to data/embeddings.pkl
|
69 |
+
|
70 |
+
3. **Run the Web Interface:**
|
71 |
+
```
|
72 |
+
streamlit run src/main.py
|
73 |
+
```
|
74 |
+
|
75 |
+
4. Using the Interface:
|
76 |
+
|
77 |
+
- Upload a query image using the file uploader
|
78 |
+
- Click "Search Similar Images"
|
79 |
+
- View the most similar images from your dataset
|
80 |
+
|
81 |
+
|
82 |
+
|
83 |
+
## Technical Details
|
84 |
+
**Feature Extraction**
|
85 |
+
- Uses EfficientNetB0 without top layers
|
86 |
+
- Input image size: 224x224 pixels
|
87 |
+
- Output feature dimension: 1280
|
88 |
+
|
89 |
+
**Similarity Search**
|
90 |
+
- Uses FAISS IndexFlatL2 for L2 distance-based search
|
91 |
+
- Returns top-k most similar images (default k=5)
|
92 |
+
|
93 |
+
**Web Interface**
|
94 |
+
- Responsive design with Streamlit
|
95 |
+
- Displays original and similar images with similarity scores
|
96 |
+
- Progress tracking during processing
|
97 |
+
|
98 |
+
**Dependencies**
|
99 |
+
- TensorFlow 2.x
|
100 |
+
- FAISS-cpu (or FAISS-gpu for GPU support)
|
101 |
+
- Streamlit
|
102 |
+
- Pillow
|
103 |
+
- NumPy
|
104 |
+
- tqdm
|
105 |
+
|
106 |
+
**Performance**
|
107 |
+
- Feature extraction: ~1 second per image on CPU
|
108 |
+
- Similarity search: Near real-time for datasets up to 100k images
|
109 |
+
- Memory usage depends on dataset size (approximately 5KB per image embedding)
|
data/embeddings.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bdc230f965cc668bcb0bb7d024c3ef0cde267dca763df634b93b58f512aedc73
|
3 |
+
size 229887733
|
data/sample-test-images/MEN-Jackets_Vests-id_00003336-06_1_front.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shirts_Polos-id_00000204-01_1_front.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shirts_Polos-id_00000226-04_1_front.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shirts_Polos-id_00000226-05_7_additional.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shirts_Polos-id_00000562-02_1_front.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shirts_Polos-id_00000649-01_7_additional.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shirts_Polos-id_00000846-02_4_full.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shirts_Polos-id_00000846-03_1_front.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shirts_Polos-id_00000846-06_4_full.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shirts_Polos-id_00001919-13_1_front.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shirts_Polos-id_00002808-01_1_front.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shirts_Polos-id_00003706-01_1_front.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shirts_Polos-id_00005489-01_1_front.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shorts-id_00000638-01_4_full.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shorts-id_00002767-09_1_front.jpg
ADDED
![]() |
data/sample-test-images/MEN-Shorts-id_00005151-04_1_front.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Cardigans-id_00003686-01_1_front.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Cardigans-id_00003696-01_1_front.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Cardigans-id_00003778-05_3_back.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Cardigans-id_00003924-10_2_side.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Cardigans-id_00003993-02_1_front.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Cardigans-id_00004057-02_7_additional.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Cardigans-id_00004490-01_1_front.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Cardigans-id_00004765-08_4_full.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Cardigans-id_00005302-01_1_front.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Cardigans-id_00005604-01_1_front.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Cardigans-id_00006279-02_1_front.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Dresses-id_00000002-02_1_front.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Dresses-id_00000021-05_1_front.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Dresses-id_00000088-02_1_front.jpg
ADDED
![]() |
data/sample-test-images/WOMEN-Dresses-id_00000170-01_1_front.jpg
ADDED
![]() |
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
tensorflow
|
2 |
+
numpy
|
3 |
+
opencv-python
|
4 |
+
scikit-learn
|
5 |
+
streamlit
|
6 |
+
Pillow
|
7 |
+
faiss-cpu
|
src/__init__.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dotenv import load_dotenv
|
2 |
+
import os
|
3 |
+
from huggingface_hub import HfApi, HfFolder, create_repo, upload_folder
|
4 |
+
|
5 |
+
# Load environment variables from .env file
|
6 |
+
load_dotenv()
|
7 |
+
|
8 |
+
# Load the access token from the environment variable
|
9 |
+
access_token = os.getenv("HF_ACCESS_TOKEN")
|
10 |
+
|
11 |
+
if access_token is None:
|
12 |
+
raise ValueError("HF_ACCESS_TOKEN environment variable is not set")
|
13 |
+
|
14 |
+
# Save the token to the Hugging Face folder
|
15 |
+
HfFolder.save_token(access_token)
|
16 |
+
|
17 |
+
# Authenticate
|
18 |
+
HfFolder.get_token()
|
19 |
+
|
20 |
+
# Create a new repository
|
21 |
+
create_repo("image-search-engine-fashion", private=False)
|
22 |
+
|
23 |
+
# Upload the folder
|
24 |
+
upload_folder(
|
25 |
+
folder_path=r"C:\Users\rudra\Documents\GitHub\image-search-engine",
|
26 |
+
repo_id="rudra0410/image-search-engine-fashion",
|
27 |
+
repo_type="model",
|
28 |
+
commit_message="Initial upload"
|
29 |
+
)
|
src/feature_extractor.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from tensorflow.keras.applications import EfficientNetB0
|
3 |
+
from tensorflow.keras.preprocessing import image
|
4 |
+
from tensorflow.keras.applications.efficientnet import preprocess_input
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
class FeatureExtractor:
|
8 |
+
def __init__(self):
|
9 |
+
# Load pretrained EfficientNetB0 model without top layers
|
10 |
+
base_model = EfficientNetB0(weights='imagenet', include_top=False, pooling='avg')
|
11 |
+
self.model = tf.keras.Model(inputs=base_model.input, outputs=base_model.output)
|
12 |
+
|
13 |
+
def extract_features(self, img_path):
|
14 |
+
# Load and preprocess the image
|
15 |
+
img = image.load_img(img_path, target_size=(224, 224))
|
16 |
+
img_array = image.img_to_array(img)
|
17 |
+
expanded_img = np.expand_dims(img_array, axis=0)
|
18 |
+
preprocessed_img = preprocess_input(expanded_img)
|
19 |
+
|
20 |
+
# Extract features
|
21 |
+
features = self.model.predict(preprocessed_img)
|
22 |
+
return features.flatten()
|
src/main.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
from feature_extractor import FeatureExtractor
|
4 |
+
from similarity_search import SimilaritySearchEngine
|
5 |
+
|
6 |
+
def main():
|
7 |
+
st.title('Image Similarity Search')
|
8 |
+
|
9 |
+
# Upload query image
|
10 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
|
11 |
+
|
12 |
+
if uploaded_file is not None:
|
13 |
+
# Load the uploaded image
|
14 |
+
query_img = Image.open(uploaded_file)
|
15 |
+
|
16 |
+
# Resize and display the query image
|
17 |
+
query_img_resized = query_img.resize((263, 385))
|
18 |
+
st.image(query_img_resized, caption='Uploaded Image', use_container_width=False)
|
19 |
+
|
20 |
+
# Feature extraction and similarity search
|
21 |
+
if st.button("Search Similar Images"):
|
22 |
+
with st.spinner("Analyzing query image..."):
|
23 |
+
try:
|
24 |
+
# Initialize feature extractor and search engine
|
25 |
+
extractor = FeatureExtractor()
|
26 |
+
search_engine = SimilaritySearchEngine()
|
27 |
+
|
28 |
+
# Save the uploaded image temporarily
|
29 |
+
query_img_path = 'temp_query_image.jpg'
|
30 |
+
query_img.save(query_img_path)
|
31 |
+
|
32 |
+
# Extract features from the query image
|
33 |
+
query_embedding = extractor.extract_features(query_img_path)
|
34 |
+
|
35 |
+
# Perform similarity search
|
36 |
+
similar_images, distances = search_engine.search_similar_images(query_embedding)
|
37 |
+
|
38 |
+
# Display similar images
|
39 |
+
st.subheader('Similar Images')
|
40 |
+
cols = st.columns(len(similar_images))
|
41 |
+
for i, (img_path, dist) in enumerate(zip(similar_images, distances)):
|
42 |
+
with cols[i]:
|
43 |
+
similar_img = Image.open(img_path).resize((375, 550))
|
44 |
+
st.image(similar_img, caption=f'Distance: {dist:.2f}', use_container_width=True)
|
45 |
+
|
46 |
+
except Exception as e:
|
47 |
+
st.error(f"Error during similarity search: {e}")
|
48 |
+
|
49 |
+
if __name__ == '__main__':
|
50 |
+
main()
|
src/preprocessing.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pickle
|
3 |
+
from .feature_extractor import FeatureExtractor
|
4 |
+
import time
|
5 |
+
from tqdm import tqdm
|
6 |
+
|
7 |
+
def precompute_embeddings(image_dir='data/images', output_path='data/embeddings.pkl'):
|
8 |
+
# Initialize the feature extractor
|
9 |
+
extractor = FeatureExtractor()
|
10 |
+
|
11 |
+
embeddings = []
|
12 |
+
image_paths = []
|
13 |
+
|
14 |
+
# Get total number of valid images
|
15 |
+
valid_images = [f for f in os.listdir(image_dir)
|
16 |
+
if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
|
17 |
+
total_images = len(valid_images)
|
18 |
+
|
19 |
+
print(f"\nFound {total_images} images to process")
|
20 |
+
|
21 |
+
# Estimate time (assuming ~1 second per image for EfficientNetB0)
|
22 |
+
estimated_time = total_images * 1 # 1 second per image
|
23 |
+
print(f"Estimated time: {estimated_time//60} minutes and {estimated_time%60} seconds\n")
|
24 |
+
|
25 |
+
# Use tqdm for progress bar
|
26 |
+
start_time = time.time()
|
27 |
+
for idx, filename in enumerate(tqdm(valid_images, desc="Processing images")):
|
28 |
+
if filename.endswith(('.png', '.jpg', '.jpeg')):
|
29 |
+
img_path = os.path.join(image_dir, filename)
|
30 |
+
try:
|
31 |
+
# Show current image being processed
|
32 |
+
print(f"\rProcessing image {idx+1}/{total_images}: {filename}", end="")
|
33 |
+
|
34 |
+
embedding = extractor.extract_features(img_path)
|
35 |
+
embeddings.append(embedding)
|
36 |
+
image_paths.append(img_path)
|
37 |
+
|
38 |
+
# Calculate and show remaining time
|
39 |
+
elapsed_time = time.time() - start_time
|
40 |
+
avg_time_per_image = elapsed_time / (idx + 1)
|
41 |
+
remaining_images = total_images - (idx + 1)
|
42 |
+
estimated_remaining_time = remaining_images * avg_time_per_image
|
43 |
+
|
44 |
+
print(f" | Remaining time: {estimated_remaining_time//60:.0f}m {estimated_remaining_time%60:.0f}s")
|
45 |
+
|
46 |
+
except Exception as e:
|
47 |
+
print(f"\nError processing {filename}: {e}")
|
48 |
+
|
49 |
+
# Save embeddings and paths
|
50 |
+
with open(output_path, 'wb') as f:
|
51 |
+
pickle.dump({'embeddings': embeddings, 'image_paths': image_paths}, f)
|
52 |
+
|
53 |
+
total_time = time.time() - start_time
|
54 |
+
print(f"\nProcessing complete!")
|
55 |
+
print(f"Total time taken: {total_time//60:.0f} minutes and {total_time%60:.0f} seconds")
|
56 |
+
print(f"Successfully processed {len(embeddings)}/{total_images} images")
|
57 |
+
print(f"Embeddings saved to {output_path}")
|
58 |
+
|
59 |
+
return embeddings, image_paths
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
precompute_embeddings()
|
src/similarity_search.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import faiss
|
2 |
+
import numpy as np
|
3 |
+
import pickle
|
4 |
+
import os
|
5 |
+
|
6 |
+
class SimilaritySearchEngine:
|
7 |
+
def __init__(self, embeddings_path='data/embeddings.pkl'):
|
8 |
+
# Load precomputed embeddings
|
9 |
+
with open(embeddings_path, 'rb') as f:
|
10 |
+
data = pickle.load(f)
|
11 |
+
self.embeddings = data['embeddings']
|
12 |
+
self.image_paths = data['image_paths']
|
13 |
+
|
14 |
+
# Create FAISS index
|
15 |
+
dimension = len(self.embeddings[0])
|
16 |
+
self.index = faiss.IndexFlatL2(dimension)
|
17 |
+
self.index.add(np.array(self.embeddings))
|
18 |
+
|
19 |
+
def search_similar_images(self, query_embedding, top_k=5):
|
20 |
+
# Perform similarity search
|
21 |
+
distances, indices = self.index.search(np.array([query_embedding]), top_k)
|
22 |
+
return [self.image_paths[idx] for idx in indices[0]], distances[0]
|
temp_query_image.jpg
ADDED
![]() |