📝 Update README: Replace pickle examples with secure JSON format
Browse files
README.md
CHANGED
|
@@ -186,19 +186,43 @@ For production ATS scoring, combine the semantic model with the ensemble score m
|
|
| 186 |
|
| 187 |
```python
|
| 188 |
from sentence_transformers import SentenceTransformer
|
|
|
|
|
|
|
|
|
|
| 189 |
import numpy as np
|
| 190 |
-
import
|
| 191 |
|
| 192 |
# Load semantic model
|
| 193 |
model = SentenceTransformer('0xnbk/nbk-ats-semantic-v1-en')
|
| 194 |
|
| 195 |
-
# Load ensemble weights (
|
| 196 |
-
with open('
|
| 197 |
-
|
| 198 |
-
with open('
|
| 199 |
-
|
| 200 |
-
with open('poly_features.
|
| 201 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 202 |
|
| 203 |
def predict_ats_score(resume_text, job_text):
|
| 204 |
# Generate embeddings
|
|
|
|
| 186 |
|
| 187 |
```python
|
| 188 |
from sentence_transformers import SentenceTransformer
|
| 189 |
+
from sklearn.linear_model import Ridge
|
| 190 |
+
from sklearn.neural_network import MLPRegressor
|
| 191 |
+
from sklearn.preprocessing import PolynomialFeatures
|
| 192 |
import numpy as np
|
| 193 |
+
import json
|
| 194 |
|
| 195 |
# Load semantic model
|
| 196 |
model = SentenceTransformer('0xnbk/nbk-ats-semantic-v1-en')
|
| 197 |
|
| 198 |
+
# Load ensemble weights from JSON (secure format, no pickle warnings)
|
| 199 |
+
with open('ridge_weights.json', 'r') as f:
|
| 200 |
+
ridge_data = json.load(f)
|
| 201 |
+
with open('neural_weights.json', 'r') as f:
|
| 202 |
+
neural_data = json.load(f)
|
| 203 |
+
with open('poly_features.json', 'r') as f:
|
| 204 |
+
poly_data = json.load(f)
|
| 205 |
+
|
| 206 |
+
# Reconstruct models from JSON
|
| 207 |
+
score_mapper = Ridge(alpha=ridge_data['alpha'])
|
| 208 |
+
score_mapper.coef_ = np.array(ridge_data['coefficients'])
|
| 209 |
+
score_mapper.intercept_ = ridge_data['intercept']
|
| 210 |
+
score_mapper.n_features_in_ = ridge_data['n_features_in']
|
| 211 |
+
|
| 212 |
+
neural_mapper = MLPRegressor(
|
| 213 |
+
hidden_layer_sizes=tuple(neural_data['hidden_layer_sizes']),
|
| 214 |
+
activation=neural_data['activation']
|
| 215 |
+
)
|
| 216 |
+
neural_mapper.coefs_ = [np.array(c) for c in neural_data['coefs']]
|
| 217 |
+
neural_mapper.intercepts_ = [np.array(i) for i in neural_data['intercepts']]
|
| 218 |
+
neural_mapper.n_features_in_ = neural_data['n_features_in']
|
| 219 |
+
|
| 220 |
+
poly_features = PolynomialFeatures(
|
| 221 |
+
degree=poly_data['degree'],
|
| 222 |
+
include_bias=poly_data['include_bias']
|
| 223 |
+
)
|
| 224 |
+
poly_features.n_features_in_ = poly_data['n_features_in']
|
| 225 |
+
poly_features.n_output_features_ = poly_data['n_output_features']
|
| 226 |
|
| 227 |
def predict_ats_score(resume_text, job_text):
|
| 228 |
# Generate embeddings
|