wayne-chi commited on
Commit
d36373e
·
verified ·
1 Parent(s): c26c1d1

Upload 4 files

Browse files
Files changed (4) hide show
  1. eagleblend.db +1 -1
  2. inference.py +209 -0
  3. predictor.py +35 -2
  4. test_app.py +492 -90
eagleblend.db CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8300999f2674af7455b937bb6105f7b126790b5c6a4ce2cc27ec0b9c13a9151
3
  size 3096576
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38cccc7257ad01d8d5ca0ac26a68090f4ac7c26fd2c3a76544fb27cfadb344e4
3
  size 3096576
inference.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ import torch
4
+ import joblib
5
+ import argparse
6
+ import os
7
+ import glob
8
+ from sklearn.multioutput import MultiOutputRegressor
9
+ from tabpfn_extensions.post_hoc_ensembles.sklearn_interface import AutoTabPFNRegressor
10
+
11
+ class TabPFNEnsemblePredictor:
12
+ """
13
+ A class to load an ensemble of TabPFN models and generate averaged predictions.
14
+
15
+ This class is designed to find and load all k-fold models from a specified
16
+ directory, handle the necessary feature engineering, and produce a single,
17
+ ensembled prediction from various input types (DataFrame, numpy array, or CSV file path).
18
+
19
+ Attributes:
20
+ model_paths (list): A list of file paths for the loaded models.
21
+ models (list): A list of the loaded model objects.
22
+ target_cols (list): The names of the target columns for the output DataFrame.
23
+ """
24
+
25
+ def __init__(self, model_dir: str, model_pattern: str = "Fold_*_best_model.tabpfn_fit"):
26
+ """
27
+ Initializes the predictor by finding and loading the ensemble of models.
28
+
29
+ Args:
30
+ model_dir (str): The directory containing the saved .tabpfn_fit model files.
31
+ model_pattern (str, optional): The glob pattern to find model files.
32
+ Defaults to "Fold_*_best_model.tabpfn_fit".
33
+
34
+ Raises:
35
+ FileNotFoundError: If no models matching the pattern are found in the directory.
36
+ """
37
+ print("Initializing the TabPFN Ensemble Predictor...")
38
+ self.model_paths = sorted(glob.glob(os.path.join(model_dir, model_pattern)))
39
+ if not self.model_paths:
40
+ raise FileNotFoundError(
41
+ f"Error: No models found in '{model_dir}' matching the pattern '{model_pattern}'"
42
+ )
43
+
44
+ print(f"Found {len(self.model_paths)} models to form the ensemble.")
45
+ self.models = self._load_models()
46
+ self.target_cols = [f"BlendProperty{i}" for i in range(1, 11)]
47
+
48
+ def _load_models(self) -> list:
49
+ """
50
+ Loads the TabPFN models from the specified paths and moves them to the CPU.
51
+
52
+ This is a private method called during initialization.
53
+ """
54
+ loaded_models = []
55
+ for model_path in self.model_paths:
56
+ print(f"Loading model: {os.path.basename(model_path)}...")
57
+ try:
58
+ # Move model components to CPU for inference to avoid potential CUDA errors
59
+ # and ensure compatibility on machines without a GPU.
60
+ if not torch.cuda.is_available():
61
+ print("Cuda not available using cpu")
62
+ model = joblib.load(model_path)
63
+ for estimator in model.estimators_:
64
+ if hasattr(estimator, "predictor_") and hasattr(estimator.predictor_, "predictors"):
65
+ for p in estimator.predictor_.predictors:
66
+ p.to("cpu")
67
+ else:
68
+ print("Cuda is available")
69
+ model = joblib.load(model_path)
70
+
71
+ loaded_models.append(model)
72
+ print(f"Successfully loaded {os.path.basename(model_path)}")
73
+ except Exception as e:
74
+ print(f"Warning: Could not load model from {model_path}. Skipping. Error: {e}")
75
+ return loaded_models
76
+
77
+ @staticmethod
78
+ def _feature_engineering(df: pd.DataFrame) -> pd.DataFrame:
79
+ """
80
+ Applies feature engineering to the input dataframe. This is a static method
81
+ as it does not depend on the state of the class instance.
82
+
83
+ Args:
84
+ df (pd.DataFrame): The input dataframe.
85
+
86
+ Returns:
87
+ pd.DataFrame: The dataframe with new engineered features.
88
+ """
89
+ components = ['Component1', 'Component2', 'Component3', 'Component4', 'Component5']
90
+ properties = [f'Property{i}' for i in range(1, 11)]
91
+ df_featured = df.copy()
92
+
93
+ for prop in properties:
94
+ df_featured[f'Weighted_{prop}'] = sum(
95
+ df_featured[f'{comp}_fraction'] * df_featured[f'{comp}_{prop}'] for comp in components
96
+ )
97
+ cols = [f'{comp}_{prop}' for comp in components]
98
+ df_featured[f'{prop}_variance'] = df_featured[cols].var(axis=1)
99
+ df_featured[f'{prop}_range'] = df_featured[cols].max(axis=1) - df_featured[cols].min(axis=1)
100
+
101
+ return df_featured
102
+
103
+ def predict(self, input_data: pd.DataFrame or np.ndarray or str) -> (np.ndarray, pd.DataFrame):
104
+ """
105
+ Generates ensembled predictions for the given input data.
106
+
107
+ This method takes input data, preprocesses it if necessary, generates a
108
+ prediction from each model in the ensemble, and returns the averaged result.
109
+
110
+ Args:
111
+ input_data (pd.DataFrame or np.ndarray or str): The input data for prediction.
112
+ Can be a pandas DataFrame, a numpy array (must be pre-processed),
113
+ or a string path to a CSV file.
114
+
115
+ Returns:
116
+ tuple: A tuple containing:
117
+ - np.ndarray: The averaged predictions as a numpy array.
118
+ - pd.DataFrame: The averaged predictions as a pandas DataFrame.
119
+ """
120
+ if not self.models:
121
+ print("Error: No models were loaded. Cannot make predictions.")
122
+ return None, None
123
+
124
+ # --- Data Preparation ---
125
+ if isinstance(input_data, str) and os.path.isfile(input_data):
126
+ print(f"Loading and processing data from CSV: {input_data}")
127
+ test_df = pd.read_csv(input_data)
128
+ processed_df = self._feature_engineering(test_df)
129
+ elif isinstance(input_data, pd.DataFrame):
130
+ print("Processing input DataFrame...")
131
+ processed_df = self._feature_engineering(input_data)
132
+ elif isinstance(input_data, np.ndarray):
133
+ print("Using input numpy array directly (assuming it's pre-processed).")
134
+ sub = input_data
135
+ else:
136
+ raise TypeError("Input data must be a pandas DataFrame, a numpy array, or a path to a CSV file.")
137
+
138
+ if isinstance(input_data, (str, pd.DataFrame)):
139
+ if "ID" in processed_df.columns:
140
+ sub = processed_df.drop(columns=["ID"]).values
141
+ else:
142
+ sub = processed_df.values
143
+
144
+ # --- Prediction Loop ---
145
+ all_fold_predictions = []
146
+ print("\nGenerating predictions from the model ensemble...")
147
+ for i, model in enumerate(self.models):
148
+ try:
149
+ y_sub = model.predict(sub)
150
+ all_fold_predictions.append(y_sub)
151
+ print(f" - Prediction from model {i+1} completed.")
152
+ except Exception as e:
153
+ print(f" - Warning: Could not predict with model {i+1}. Skipping. Error: {e}")
154
+
155
+ if not all_fold_predictions:
156
+ print("\nError: No predictions were generated from any model.")
157
+ return None, None
158
+
159
+ # --- Averaging ---
160
+ print("\nAveraging predictions from all models...")
161
+ averaged_preds_array = np.mean(all_fold_predictions, axis=0)
162
+ averaged_preds_df = pd.DataFrame(averaged_preds_array, columns=self.target_cols)
163
+ print("Ensemble prediction complete.")
164
+
165
+ return averaged_preds_array, averaged_preds_df
166
+
167
+ # This block allows the script to be run directly from the command line
168
+ if __name__ == "__main__":
169
+ parser = argparse.ArgumentParser(
170
+ description="""
171
+ Command-line interface for the TabPFNEnsemblePredictor.
172
+
173
+ Example Usage:
174
+ python inference.py --model_dir ./saved_models/ --input_path ./test_data.csv --output_path ./final_preds.csv
175
+ """,
176
+ formatter_class=argparse.RawTextHelpFormatter
177
+ )
178
+
179
+ parser.add_argument("--model_dir", type=str, required=True,
180
+ help="Directory containing the saved .tabpfn_fit model files.")
181
+ parser.add_argument("--input_path", type=str, required=True,
182
+ help="Path to the input CSV file for prediction.")
183
+ parser.add_argument("--output_path", type=str, default="predictions_ensembled.csv",
184
+ help="Path to save the final ensembled predictions CSV file.")
185
+
186
+ args = parser.parse_args()
187
+
188
+ if not os.path.isdir(args.model_dir):
189
+ print(f"Error: Model directory not found at {args.model_dir}")
190
+ elif not os.path.exists(args.input_path):
191
+ print(f"Error: Input file not found at {args.input_path}")
192
+ else:
193
+ try:
194
+ # 1. Instantiate the predictor class
195
+ predictor = TabPFNEnsemblePredictor(model_dir=args.model_dir)
196
+
197
+ # 2. Call the predict method
198
+ preds_array, preds_df = predictor.predict(args.input_path)
199
+
200
+ # 3. Save the results
201
+ if preds_df is not None:
202
+ preds_df.to_csv(args.output_path, index=False)
203
+ print(f"\nEnsembled predictions successfully saved to {args.output_path}")
204
+ print("\n--- Sample of Final Averaged Predictions ---")
205
+ print(preds_df.head())
206
+ print("------------------------------------------")
207
+
208
+ except Exception as e:
209
+ print(f"\nAn error occurred during the process: {e}")
predictor.py CHANGED
@@ -45,7 +45,7 @@ from scipy.special import comb
45
 
46
 
47
  class EagleBlendPredictor:
48
- def __init__(self, model_sources = './Models'):
49
  """
50
  model_sources: Dict[str, Any]
51
  A dictionary where keys are 'BlendProperty1', ..., 'BlendProperty10'
@@ -121,7 +121,7 @@ class EagleBlendPredictor:
121
  self.model_10 = joblib.load(os.path.join(self.home, self.saved_files_map[10]["model"]))
122
  self.poly_10 = joblib.load(os.path.join(self.home, self.saved_files_map[10]["transform"]))
123
 
124
- self.model_3489 = TabPFNEnsemblePredictor(model_dir="Models")
125
  pass
126
 
127
 
@@ -260,6 +260,39 @@ class EagleBlendPredictor:
260
  return predictions_df
261
 
262
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
264
 
265
 
 
45
 
46
 
47
  class EagleBlendPredictor:
48
+ def __init__(self, model_sources = 'Models'):
49
  """
50
  model_sources: Dict[str, Any]
51
  A dictionary where keys are 'BlendProperty1', ..., 'BlendProperty10'
 
121
  self.model_10 = joblib.load(os.path.join(self.home, self.saved_files_map[10]["model"]))
122
  self.poly_10 = joblib.load(os.path.join(self.home, self.saved_files_map[10]["transform"]))
123
 
124
+ self.model_3489 = TabPFNEnsemblePredictor(model_dir=self.home)
125
  pass
126
 
127
 
 
260
  return predictions_df
261
 
262
 
263
+ def predict_fast(self, df: pd.DataFrame) -> pd.DataFrame:
264
+ """
265
+ Generates predictions for the faster blend properties using the individual prediction methods.
266
+
267
+ Args:
268
+ df: Input DataFrame containing the features.
269
+
270
+ Returns:
271
+ DataFrame with predicted blend properties for 1,2,5,6,7,10.
272
+ """
273
+ predictions_list = []
274
+
275
+ # Predict individual properties
276
+ predictions_list.append(self.predict_BlendProperty1(df, full=True))
277
+ predictions_list.append(self.predict_BlendProperty2(df, full=True))
278
+
279
+ predictions_list.append(self.predict_BlendProperty5(df, full=True))
280
+ predictions_list.append(self.predict_BlendProperty6(df, full=True))
281
+ predictions_list.append(self.predict_BlendProperty7(df, full=True))
282
+
283
+ predictions_list.append(self.predict_BlendProperty10(df, full=True))
284
+
285
+
286
+ # Concatenate the list of single-column DataFrames into a single DataFrame
287
+ predictions_df = pd.concat(predictions_list, axis=1)
288
+
289
+ # Ensure columns are in the desired order
290
+ ordered_cols = [f'BlendProperty{i}' for i in [1,2,5,6,7,10]]
291
+ # Reindex to ensure columns are in order, dropping any not generated (though all should be)
292
+ predictions_df = predictions_df.reindex(columns=ordered_cols)
293
+
294
+
295
+ return predictions_df
296
 
297
 
298
 
test_app.py CHANGED
@@ -10,7 +10,8 @@ from typing import Optional, Dict, Any
10
  from datetime import datetime, timedelta
11
  import re
12
  from pathlib import Path
13
- # from blend_logic import run_dummy_prediction
 
14
 
15
  ##---- fucntions ------
16
  # Load fuel data from CSV (create this file if it doesn't exist)
@@ -64,7 +65,14 @@ st.markdown("""
64
  background-color: #f8f5f0;
65
  overflow: visible;
66
  padding-top: 0
67
-
 
 
 
 
 
 
 
68
  }
69
  /* Remove unnecessary space at the top */
70
  /* Remove any fixed headers */
@@ -264,7 +272,19 @@ st.markdown("""
264
  }
265
 
266
 
267
-
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
 
270
  /* Color scale adjustments */
@@ -836,8 +856,6 @@ with tabs[0]:
836
  # Blend Designer Tab
837
  # ----------------------------------------------------------------------------------------------------------------------------------------------
838
 
839
- from inference import EagleBlendPredictor # Add this import at the top of your main script
840
-
841
  # --- Add these new functions to your functions section ---
842
 
843
  @st.cache_data
@@ -919,8 +937,11 @@ with tabs[1]:
919
 
920
  # 4. Run prediction
921
  predictor = st.session_state.predictor
922
- results = predictor.predict_all(df_model.drop(columns=['blend_name']))
923
- st.session_state.prediction_results = results[0] # Get the first (and only) row of results
 
 
 
924
 
925
  # --- Conditional cost calculation ---
926
  # 5. Calculate cost only if all unit costs are provided and greater than zero
@@ -974,12 +995,74 @@ with tabs[1]:
974
  with col_header[1]:
975
  batch_blend = st.checkbox("Batch Blend Mode", value=False, key="batch_blend_mode")
976
 
 
977
  if batch_blend:
978
  st.subheader("📤 Batch Processing")
979
- uploaded_file = st.file_uploader("Upload CSV File", type=["csv"], key="Batch_upload")
980
- if uploaded_file:
981
- st.info("Batch processing functionality can be implemented here.")
982
- # Add batch processing logic here
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
983
  else:
984
  # --- Manual Blend Designer UI ---
985
  all_components_df = get_components_from_db()
@@ -1202,72 +1285,291 @@ with tabs[1]:
1202
  # Optimization Engine Tab
1203
  # ----------------------------------------------------------------------------------------------------------------------------------------------
1204
 
1205
- with tabs[2]:
1206
- st.subheader("⚙️ Optimization Engine")
 
 
 
 
 
1207
 
1208
- # Pareto frontier demo
1209
- st.markdown("#### Cost vs Performance Trade-off")
1210
- np.random.seed(42)
1211
- optimization_data = pd.DataFrame({
1212
- 'Cost ($/ton)': np.random.uniform(100, 300, 50),
1213
- 'Performance Score': np.random.uniform(70, 95, 50)
1214
- })
1215
 
1216
- fig3 = px.scatter(
1217
- optimization_data,
1218
- x='Cost ($/ton)',
1219
- y='Performance Score',
1220
- title="Potential Blend Formulations",
1221
- color='Performance Score',
1222
- color_continuous_scale='YlOrBr'
1223
- )
1224
 
1225
- # Add dummy pareto frontier
1226
- x_pareto = np.linspace(100, 300, 10)
1227
- y_pareto = 95 - 0.1*(x_pareto-100)
1228
- fig3.add_trace(px.line(
1229
- x=x_pareto,
1230
- y=y_pareto,
1231
- color_discrete_sequence= ['#8B4513', '#CFB53B', '#654321']
1232
- ).data[0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1233
 
1234
- fig3.update_layout(
1235
- showlegend=False,
1236
- annotations=[
1237
- dict(
1238
- x=200,
1239
- y=88,
1240
- text="Pareto Frontier",
1241
- showarrow=True,
1242
- arrowhead=1,
1243
- ax=-50,
1244
- ay=-30
1245
- )
1246
- ]
1247
- )
1248
- st.plotly_chart(fig3, use_container_width=True)
1249
 
1250
- # Blend optimization history
1251
- st.markdown("#### Optimization Progress")
1252
- iterations = np.arange(20)
1253
- performance = np.concatenate([np.linspace(70, 85, 10), np.linspace(85, 89, 10)])
 
 
 
 
 
 
 
 
 
 
1254
 
1255
- fig4 = px.line(
1256
- x=iterations,
1257
- y=performance,
1258
- title="Best Performance by Iteration",
1259
- markers=True
1260
- )
1261
- fig4.update_traces(
1262
- line_color='#1d3b58',
1263
- marker_color='#2c5282',
1264
- line_width=2.5
1265
- )
1266
- fig4.update_layout(
1267
- yaxis_title="Performance Score",
1268
- xaxis_title="Iteration"
1269
- )
1270
- st.plotly_chart(fig4, use_container_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1271
 
1272
  # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------
1273
  # Blend Comparison Tab
@@ -1503,6 +1805,40 @@ with tabs[3]:
1503
  )
1504
  st.plotly_chart(fig_composite, use_container_width=True)
1505
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1506
  # ----------------------------------------------------------------------------------------------------------------------------------------------
1507
  # Fuel Registry Tab
1508
  # ---------------------------------------------------------------------------------------------------------------------------------------------
@@ -1713,6 +2049,39 @@ with tabs[4]:
1713
  else:
1714
  del st.session_state.blends
1715
  st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1716
 
1717
 
1718
  # ----------------------------------------------------------------------------------------------------------------------------------------------
@@ -1776,27 +2145,60 @@ with tabs[5]:
1776
  </style>
1777
  """, unsafe_allow_html=True)
1778
 
1779
- # --- Floating "How to Use" Button and Panel ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1780
  st.markdown("""
1781
- <input id="help-toggle" type="checkbox" />
1782
- <label for="help-toggle" class="help-button">💬 Help</label>
1783
-
1784
- <div class="help-panel" aria-hidden="true">
1785
- <div class="head">
1786
- <div class="title">Interpreting Model Insights</div>
1787
- <label for="help-toggle" class="help-close">Close</label>
1788
- </div>
1789
- <div class="help-body">
1790
- <p><b>KPI Cards:</b> These four cards give you a quick summary of the model's overall health.</p>
1791
- <ul>
1792
- <li><b>Overall Score:</b> Think of this as the model's accuracy grade. A score of 92.4% means the model's predictions are highly accurate.</li>
1793
- <li><b>MSE (Mean Squared Error):</b> This measures the average size of the model's mistakes. A smaller number is better.</li>
1794
- <li><b>MAPE (Mean Absolute % Error):</b> This tells you the average error in percentage terms. A value of 0.112 means predictions are off by about 11.2% on average.</li>
1795
- </ul>
1796
- <p><b>R² Score by Blend Property Chart:</b> This chart shows how well the model predicts each specific property.</p>
1797
- <p>A <b>longer bar</b> means the model is very good at predicting that property. A <b>shorter bar</b> indicates a property that is harder for the model to predict accurately. This helps you trust predictions for some properties more than others.</p>
 
 
 
 
 
 
 
 
 
 
1798
  </div>
1799
- </div>
1800
  """, unsafe_allow_html=True)
1801
 
1802
  # --- Main Title ---
 
10
  from datetime import datetime, timedelta
11
  import re
12
  from pathlib import Path
13
+ from predictor import EagleBlendPredictor
14
+
15
 
16
  ##---- fucntions ------
17
  # Load fuel data from CSV (create this file if it doesn't exist)
 
65
  background-color: #f8f5f0;
66
  overflow: visible;
67
  padding-top: 0
68
+
69
+ /* --- ADD THIS CSS FOR THE NEW HELP BUTTONS --- */
70
+ #help-toggle-insights:checked ~ .help-panel-insights,
71
+ #help-toggle-registry:checked ~ .help-panel-registry,
72
+ #help-toggle-comparison:checked ~ .help-panel-comparison {
73
+ opacity: 1; visibility: visible; transform: translateY(0);
74
+ }
75
+
76
  }
77
  /* Remove unnecessary space at the top */
78
  /* Remove any fixed headers */
 
272
  }
273
 
274
 
275
+ /* --- Add this CSS class for the spinner --- */
276
+ @keyframes spin {
277
+ 0% { transform: rotate(0deg); }
278
+ 100% { transform: rotate(360deg); }
279
+ }
280
+ .spinner {
281
+ border: 4px solid rgba(0,0,0,0.1);
282
+ border-left-color: #8B4513;
283
+ border-radius: 50%;
284
+ width: 24px;
285
+ height: 24px;
286
+ animation: spin 1s linear infinite;
287
+ }
288
 
289
 
290
  /* Color scale adjustments */
 
856
  # Blend Designer Tab
857
  # ----------------------------------------------------------------------------------------------------------------------------------------------
858
 
 
 
859
  # --- Add these new functions to your functions section ---
860
 
861
  @st.cache_data
 
937
 
938
  # 4. Run prediction
939
  predictor = st.session_state.predictor
940
+ # results = predictor.predict_all(df_model.drop(columns=['blend_name']))
941
+ # st.session_state.prediction_results = results[0] # Get the first (and only) row of results
942
+ # --- FIX: Handles DataFrame output and converts it to an array for single prediction ---
943
+ results_df = predictor.predict_all(df_model.drop(columns=['blend_name']))
944
+ st.session_state.prediction_results = results_df.iloc[0].values
945
 
946
  # --- Conditional cost calculation ---
947
  # 5. Calculate cost only if all unit costs are provided and greater than zero
 
995
  with col_header[1]:
996
  batch_blend = st.checkbox("Batch Blend Mode", value=False, key="batch_blend_mode")
997
 
998
+ # --- This is the new, fully functional batch mode block ---
999
  if batch_blend:
1000
  st.subheader("📤 Batch Processing")
1001
+ st.markdown("Upload a CSV file with blend recipes to predict their properties in bulk. The file must contain the 55 feature columns required by the model.")
1002
+
1003
+ # Provide a template for download
1004
+ # NOTE: You will need to create a dummy CSV file named 'batch_template.csv'
1005
+ # with the 55 required column headers for this to work.
1006
+ try:
1007
+ with open("assets/batch_template.csv", "rb") as f:
1008
+ st.download_button(
1009
+ label="📥 Download Batch Template (CSV)",
1010
+ data=f,
1011
+ file_name="batch_template.csv",
1012
+ mime="text/csv"
1013
+ )
1014
+ except FileNotFoundError:
1015
+ st.warning("Batch template file not found. Please create 'assets/batch_template.csv'.")
1016
+
1017
+
1018
+ uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"], key="batch_upload")
1019
+
1020
+ if uploaded_file is not None:
1021
+ try:
1022
+ input_df = pd.read_csv(uploaded_file)
1023
+ st.markdown("##### Uploaded Data Preview")
1024
+ st.dataframe(input_df.head())
1025
+
1026
+ if st.button("🧪 Run Batch Prediction", use_container_width=True, type="primary"):
1027
+ # Basic validation: check for at least the fraction columns
1028
+ required_cols = [f'Component{i+1}_fraction' for i in range(5)]
1029
+ if not all(col in input_df.columns for col in required_cols):
1030
+ st.error(f"Invalid file format. The uploaded CSV is missing one or more required columns like: {', '.join(required_cols)}")
1031
+ else:
1032
+ with st.spinner("Running batch prediction... This may take a moment."):
1033
+ # Run prediction on the entire DataFrame
1034
+ predictor = st.session_state.predictor
1035
+ results_df = predictor.predict_all(input_df)
1036
+
1037
+ # Combine original data with the results
1038
+ # Ensure column names for results are clear
1039
+ results_df.columns = [f"BlendProperty{i+1}" for i in range(results_df.shape[1])]
1040
+
1041
+ # Combine input and output dataframes
1042
+ final_df = pd.concat([input_df.reset_index(drop=True), results_df.reset_index(drop=True)], axis=1)
1043
+
1044
+ st.session_state['batch_results'] = final_df
1045
+ st.success("Batch prediction complete!")
1046
+
1047
+ except Exception as e:
1048
+ st.error(f"An error occurred while processing the file: {e}")
1049
+
1050
+ # Display results and download button if they exist in the session state
1051
+ if 'batch_results' in st.session_state:
1052
+ st.markdown("---")
1053
+ st.subheader("✅ Batch Prediction Results")
1054
+
1055
+ results_to_show = st.session_state['batch_results']
1056
+ st.dataframe(results_to_show)
1057
+
1058
+ csv_data = results_to_show.to_csv(index=False).encode('utf-8')
1059
+ st.download_button(
1060
+ label="📥 Download Full Results (CSV)",
1061
+ data=csv_data,
1062
+ file_name="batch_prediction_results.csv",
1063
+ mime="text/csv",
1064
+ use_container_width=True
1065
+ )
1066
  else:
1067
  # --- Manual Blend Designer UI ---
1068
  all_components_df = get_components_from_db()
 
1285
  # Optimization Engine Tab
1286
  # ----------------------------------------------------------------------------------------------------------------------------------------------
1287
 
1288
+ import time # Add this import to the top of your script
1289
+
1290
+ # --- Add this new function to your functions section ---
1291
+ def dummy_optimization_function(targets, fixed_targets, components_data):
1292
+ """
1293
+ Placeholder for your actual optimization algorithm.
1294
+ This function simulates a multi-objective optimization.
1295
 
1296
+ Returns:
1297
+ A list of dictionaries, where each dictionary represents a solution.
1298
+ """
1299
+ print("--- Running Dummy Optimization ---")
1300
+ print("Targets:", targets)
1301
+ print("Fixed Targets:", fixed_targets)
1302
+ print("---------------------------------")
1303
 
1304
+ # Simulate a process that takes a few seconds
1305
+ time.sleep(3)
 
 
 
 
 
 
1306
 
1307
+ # Generate 3 dummy solutions
1308
+ solutions = []
1309
+ for i in range(3):
1310
+ # Create slightly different results for each solution
1311
+ base_frac = 0.2 + (i * 0.05)
1312
+ fractions = np.random.rand(5)
1313
+ fractions = fractions / fractions.sum() # Normalize to sum to 1
1314
+
1315
+ blend_properties = [val + np.random.uniform(-0.5, 0.5) for val in targets.values()]
1316
+
1317
+ # Ensure fixed targets are met in the dummy result
1318
+ for prop, val in fixed_targets.items():
1319
+ prop_index = int(prop.replace('Property', '')) - 1
1320
+ blend_properties[prop_index] = val
1321
+
1322
+ solution = {
1323
+ "component_fractions": fractions,
1324
+ "blend_properties": np.array(blend_properties),
1325
+ "optimized_cost": 150.0 - (i * 10),
1326
+ "error": 0.05 + (i * 0.02) # Dummy error for the Pareto plot
1327
+ }
1328
+ solutions.append(solution)
1329
+
1330
+ return solutions
1331
 
1332
+
1333
+ with tabs[2]:
1334
+ st.subheader("⚙️ Optimization Engine")
1335
+ st.markdown("Define your property goals, select base components, and run the optimizer to find the ideal blend recipe.")
1336
+
1337
+ # --- State Initialization ---
1338
+ if 'optimization_running' not in st.session_state:
1339
+ st.session_state.optimization_running = False
1340
+ if 'optimization_results' not in st.session_state:
1341
+ st.session_state.optimization_results = None
1342
+ if 'optimization_time' not in st.session_state:
1343
+ st.session_state.optimization_time = 0.0
1344
+
1345
+ # --- Optimization Goals ---
1346
+ st.markdown("#### 1. Define Optimization Goals")
1347
 
1348
+ # Using a container to group the goal inputs
1349
+ with st.container(border=True):
1350
+ cols_row1 = st.columns(5)
1351
+ cols_row2 = st.columns(5)
1352
+
1353
+ for i in range(1, 11):
1354
+ col = cols_row1[(i-1)] if i <= 5 else cols_row2[(i-6)]
1355
+ with col:
1356
+ st.number_input(f"Property {i}", key=f"opt_target_{i}", value=0.0, step=0.01, format="%.4f")
1357
+ st.toggle("Fix Target", key=f"opt_fix_{i}", help=f"Toggle on to make Property {i} a fixed constraint.")
1358
+
1359
+ # --- Component Selection (Copied and Adapted) ---
1360
+ st.markdown("#### 2. Select Initial Components")
1361
+ all_components_df_opt = get_components_from_db() # Use a different variable to avoid conflicts
1362
 
1363
+ main_cols = st.columns(2)
1364
+ with main_cols[0]: # Left side for first 3 components
1365
+ for i in range(3):
1366
+ with st.expander(f"**Component {i+1}**", expanded=(i==0)):
1367
+ # Auto-population and input fields logic (reused from Blend Designer)
1368
+ # Note: Keys are prefixed with 'opt_' to ensure they are unique to this tab
1369
+ select_key, name_key, frac_key, cost_key = f"opt_c{i}_select", f"opt_c{i}_name", f"opt_c{i}_fraction", f"opt_c{i}_cost"
1370
+
1371
+ # Auto-population logic...
1372
+ if select_key in st.session_state and st.session_state[select_key] != "---":
1373
+ selected_name = st.session_state[select_key]
1374
+ comp_data = all_components_df_opt[all_components_df_opt['component_name'] == selected_name].iloc[0]
1375
+ st.session_state[name_key] = comp_data['component_name']
1376
+ st.session_state[frac_key] = comp_data.get('component_fraction', 0.2)
1377
+ cost_val = comp_data.get('unit_cost', 0.0)
1378
+ st.session_state[cost_key] = 0.0 if pd.isna(cost_val) else float(cost_val)
1379
+ for j in range(1, 11):
1380
+ st.session_state[f"opt_c{i}_prop{j}"] = comp_data.get(f'property{j}', 0.0)
1381
+ st.session_state[select_key] = "---"
1382
+
1383
+ # UI for component
1384
+ component_options = ["---"] + all_components_df_opt['component_name'].tolist()
1385
+ st.selectbox("Load from Registry", options=component_options, key=select_key)
1386
+ c1, c2, c3 = st.columns([1.5, 2, 2])
1387
+ with c1:
1388
+ st.text_input("Component Name", key=name_key)
1389
+ st.number_input("Unit Cost ($)", min_value=0.0, step=0.01, key=cost_key, format="%.2f")
1390
+ with c2:
1391
+ for j in range(1, 6): st.number_input(f"Property {j}", key=f"opt_c{i}_prop{j}", format="%.4f")
1392
+ with c3:
1393
+ for j in range(6, 11): st.number_input(f"Property {j}", key=f"opt_c{i}_prop{j}", format="%.4f")
1394
+
1395
+ with main_cols[1]: # Right side for last 2 components and controls
1396
+ for i in range(3, 5):
1397
+ with st.expander(f"**Component {i+1}**", expanded=False):
1398
+ # Auto-population and input fields logic...
1399
+ select_key, name_key, frac_key, cost_key = f"opt_c{i}_select", f"opt_c{i}_name", f"opt_c{i}_fraction", f"opt_c{i}_cost"
1400
+ if select_key in st.session_state and st.session_state[select_key] != "---":
1401
+ selected_name = st.session_state[select_key]
1402
+ comp_data = all_components_df_opt[all_components_df_opt['component_name'] == selected_name].iloc[0]
1403
+ st.session_state[name_key] = comp_data['component_name']
1404
+ st.session_state[frac_key] = comp_data.get('component_fraction', 0.2)
1405
+ cost_val = comp_data.get('unit_cost', 0.0)
1406
+ st.session_state[cost_key] = 0.0 if pd.isna(cost_val) else float(cost_val)
1407
+ for j in range(1, 11):
1408
+ st.session_state[f"opt_c{i}_prop{j}"] = comp_data.get(f'property{j}', 0.0)
1409
+ st.session_state[select_key] = "---"
1410
+ component_options = ["---"] + all_components_df_opt['component_name'].tolist()
1411
+ st.selectbox("Load from Registry", options=component_options, key=select_key)
1412
+ c1, c2, c3 = st.columns([1.5, 2, 2])
1413
+ with c1:
1414
+ st.text_input("Component Name", key=name_key)
1415
+ st.number_input("Unit Cost ($)", min_value=0.0, step=0.01, key=cost_key, format="%.2f")
1416
+ with c2:
1417
+ for j in range(1, 6): st.number_input(f"Property {j}", key=f"opt_c{i}_prop{j}", format="%.4f")
1418
+ with c3:
1419
+ for j in range(6, 11): st.number_input(f"Property {j}", key=f"opt_c{i}_prop{j}", format="%.4f")
1420
+
1421
+ # --- Optimization Controls ---
1422
+ with st.container(border=True):
1423
+ st.markdown("##### 3. Configure & Run")
1424
+ st.checkbox("Include Cost in Optimization", value=True, key="opt_include_cost")
1425
+
1426
+ # Run button and spinner logic
1427
+ run_button_col, spinner_col = st.columns([3, 1])
1428
+ with run_button_col:
1429
+ if st.button("🚀 Run Optimization", use_container_width=True, type="primary", disabled=st.session_state.optimization_running):
1430
+ st.session_state.optimization_running = True
1431
+ start_time = time.time()
1432
+
1433
+ # Gather data for the optimization function
1434
+ targets = {f"Property{i}": st.session_state[f"opt_target_{i}"] for i in range(1, 11)}
1435
+ fixed_targets = {f"Property{i}": targets[f"Property{i}"] for i in range(1, 11) if st.session_state[f"opt_fix_{i}"]}
1436
+ components_data = [] # You would gather component data similarly if your function needs it
1437
+
1438
+ # Call the (dummy) optimization function
1439
+ st.session_state.optimization_results = dummy_optimization_function(targets, fixed_targets, components_data)
1440
+ st.session_state.optimization_time = time.time() - start_time
1441
+ st.session_state.optimization_running = False
1442
+ st.rerun() # Rerun to display results
1443
+
1444
+ with spinner_col:
1445
+ if st.session_state.optimization_running:
1446
+ st.markdown('<div class="spinner"></div>', unsafe_allow_html=True)
1447
+
1448
+ if st.session_state.optimization_time > 0:
1449
+ st.success(f"Optimization complete in {st.session_state.optimization_time:.2f} seconds.")
1450
+
1451
+ # --- Results Section ---
1452
+ if st.session_state.optimization_results:
1453
+ st.markdown('<hr class="custom-divider">', unsafe_allow_html=True)
1454
+ st.subheader("🏆 Optimization Results")
1455
+
1456
+ results = st.session_state.optimization_results
1457
+
1458
+ # Dropdown to select which result to view
1459
+ result_options = {i: f"Solution {i+1}" for i in range(len(results))}
1460
+ selected_idx = st.selectbox("View Solution", options=list(result_options.keys()), format_func=lambda x: result_options[x])
1461
+
1462
+ selected_solution = results[selected_idx]
1463
+
1464
+ # Display best fractions and properties
1465
+ res_cols = st.columns([3, 2])
1466
+ with res_cols[0]:
1467
+ st.markdown("##### Optimal Component Fractions")
1468
+ frac_cols = st.columns(5)
1469
+ for i, frac in enumerate(selected_solution["component_fractions"]):
1470
+ with frac_cols[i]:
1471
+ comp_name = st.session_state.get(f"opt_c{i}_name", f"Component {i+1}")
1472
+ st.markdown(f"""
1473
+ <div class="metric-card" style="padding: 0.8rem;">
1474
+ <div class="metric-label" style="font-size: 0.8rem;">{comp_name}</div>
1475
+ <div class="metric-value" style="font-size: 1.5rem;">{frac*100:.2f}%</div>
1476
+ </div>
1477
+ """, unsafe_allow_html=True)
1478
+
1479
+ # --- FIX: New, readable KPI cards for blend properties ---
1480
+ with res_cols[1]:
1481
+ st.markdown("##### Resulting Blend Properties")
1482
+ prop_kpi_cols = st.columns(5)
1483
+ for i, prop_val in enumerate(selected_solution["blend_properties"]):
1484
+ col = prop_kpi_cols[i % 5]
1485
+ with col:
1486
+ st.markdown(f"""
1487
+ <div class="metric-card" style="margin-bottom: 10px; padding: 0.5rem;">
1488
+ <div class="metric-label" style="font-size: 0.7rem;">Property {i+1}</div>
1489
+ <div class="metric-value" style="font-size: 1.1rem;">{prop_val:.4f}</div>
1490
+ </div>
1491
+ """, unsafe_allow_html=True)
1492
+
1493
+ # Expander for full results table
1494
+ with st.expander("Show Full Results Table"):
1495
+ table_data = []
1496
+ for i in range(5):
1497
+ row = {
1498
+ "Composition": st.session_state.get(f"opt_c{i}_name", f"C{i+1}"),
1499
+ "Fraction": selected_solution["component_fractions"][i],
1500
+ "Unit Cost": st.session_state.get(f"opt_c{i}_cost", 0.0)
1501
+ }
1502
+ for j in range(1, 11):
1503
+ row[f"Property {j}"] = st.session_state.get(f"opt_c{i}_prop{j}", 0.0)
1504
+ table_data.append(row)
1505
+
1506
+ # Add blend row
1507
+ blend_row = {"Composition": "Optimized Blend", "Fraction": 1.0, "Unit Cost": selected_solution["optimized_cost"]}
1508
+ for i, prop in enumerate(selected_solution["blend_properties"]):
1509
+ blend_row[f"Property {i+1}"] = prop
1510
+ table_data.append(blend_row)
1511
+
1512
+ st.dataframe(pd.DataFrame(table_data), use_container_width=True)
1513
+
1514
+ # Pareto Plot and Save Section
1515
+ pareto_col, save_col = st.columns([2, 1])
1516
+ with pareto_col:
1517
+ st.markdown("##### Pareto Front: Cost vs. Error")
1518
+ pareto_df = pd.DataFrame({
1519
+ 'Cost': [r['optimized_cost'] for r in results],
1520
+ 'Error': [r['error'] for r in results],
1521
+ 'Solution': [f'Sol {i+1}' for i in range(len(results))]
1522
+ })
1523
+ # --- FIX: Inverted the axes to show Error vs. Cost ---
1524
+ fig_pareto = px.scatter(
1525
+ pareto_df, x='Error', y='Cost', text='Solution', title="<b>Pareto Front: Error vs. Cost</b>"
1526
+ )
1527
+ fig_pareto.update_traces(textposition='top center', marker=dict(size=12, color='#8B4513'))
1528
+ st.plotly_chart(fig_pareto, use_container_width=True)
1529
+
1530
+ with save_col:
1531
+ st.markdown("##### Save Result")
1532
+ st.text_input("Save as Blend Name", value=f"Optimized_Blend_{selected_idx+1}", key="opt_save_name")
1533
+ if st.button("💾 Save to Database", use_container_width=True):
1534
+ st.info("Save functionality can be implemented here.") # Placeholder for save logic
1535
+
1536
+ # Placeholder for download button logic
1537
+ st.download_button("📥 Download All Solutions (CSV)", data="dummy_csv_data", file_name="optimization_results.csv", use_container_width=True)
1538
+
1539
+ # --- Floating Help Button ---
1540
+ # (Using a different key to avoid conflict with other tabs)
1541
+ # --- FIX: Complete working version of the help button ---
1542
+ st.markdown("""
1543
+ <style>
1544
+ #help-toggle-optimizer { display: none; }
1545
+ #help-toggle-optimizer:checked ~ .help-panel-optimizer {
1546
+ opacity: 1; visibility: visible; transform: translateY(0);
1547
+ }
1548
+ .help-panel-optimizer {
1549
+ position:fixed; right:25px; bottom:100px; z-index:9998;
1550
+ width:520px; max-height:70vh; overflow-y:auto;
1551
+ background: linear-gradient(135deg, #FFFDF5 0%, #F8EAD9 100%);
1552
+ border:1px solid #CFB53B; border-radius:12px; padding:20px;
1553
+ box-shadow:0 14px 34px rgba(0,0,0,0.22);
1554
+ color:#4a2f1f; transform: translateY(12px); opacity:0;
1555
+ visibility:hidden; transition: all .22s ease-in-out;
1556
+ }
1557
+ </style>
1558
+ <input id="help-toggle-optimizer" type="checkbox" />
1559
+ <label for="help-toggle-optimizer" class="help-button">💬 Help</label>
1560
+ <div class="help-panel help-panel-optimizer"> <div class="head">
1561
+ <div class="title">How to Use the Optimizer</div>
1562
+ <label for="help-toggle-optimizer" class="help-close">Close</label>
1563
+ </div>
1564
+ <div class="help-body">
1565
+ <p><b>1. Define Goals:</b> Enter your desired target values for each of the 10 blend properties. Use the 'Fix Target' toggle for any property that must be met exactly.</p>
1566
+ <p><b>2. Select Components:</b> Choose up to 5 base components. You can load them from the registry to auto-fill their data or enter them manually.</p>
1567
+ <p><b>3. Configure & Run:</b> Decide if cost should be a factor in the optimization, then click 'Run Optimization'. A spinner will appear while the process runs.</p>
1568
+ <p><b>4. Analyze Results:</b> After completion, the best solution is shown by default. You can view other potential solutions from the dropdown. The results include optimal component fractions and the final blend properties.</p>
1569
+ <p><b>5. Save & Download:</b> Give your chosen solution a name and save it to the blends database for future use in the Comparison tab.</p>
1570
+ </div>
1571
+ </div>
1572
+ """, unsafe_allow_html=True)
1573
 
1574
  # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------
1575
  # Blend Comparison Tab
 
1805
  )
1806
  st.plotly_chart(fig_composite, use_container_width=True)
1807
 
1808
+ # --- ADD: Floating Help Button for Blend Comparison ---
1809
+ st.markdown("""
1810
+ <style>
1811
+ #help-toggle-comparison { display: none; }
1812
+ #help-toggle-comparison:checked ~ .help-panel-comparison {
1813
+ opacity: 1; visibility: visible; transform: translateY(0);
1814
+ }
1815
+ .help-panel-comparison {
1816
+ position:fixed; right:25px; bottom:100px; z-index:9998;
1817
+ width:520px; max-height:70vh; overflow-y:auto;
1818
+ background: linear-gradient(135deg, #FFFDF5 0%, #F8EAD9 100%);
1819
+ border:1px solid #CFB53B; border-radius:12px; padding:20px;
1820
+ box-shadow:0 14px 34px rgba(0,0,0,0.22);
1821
+ color:#4a2f1f; transform: translateY(12px); opacity:0;
1822
+ visibility:hidden; transition: all .22s ease-in-out;
1823
+ }
1824
+ </style>
1825
+ <input id="help-toggle-comparison" type="checkbox" />
1826
+ <label for="help-toggle-comparison" class="help-button">💬 Help</label>
1827
+ <div class="help-panel help-panel-comparison">
1828
+ <div class="head">
1829
+ <div class="title">Using the Blend Comparison Tool</div>
1830
+ <label for="help-toggle-comparison" class="help-close">Close</label>
1831
+ </div>
1832
+ <div class="help-body">
1833
+ <p>This tab allows you to perform a side-by-side analysis of up to three saved blends.</p>
1834
+ <p><b>1. Select Scenarios:</b> Use the three dropdown menus at the top to select the saved blends you wish to compare.</p>
1835
+ <p><b>2. Review Overviews:</b> Key information for each selected blend, including its composition and final properties, will be displayed in summary cards.</p>
1836
+ <p><b>3. Analyze Charts:</b> The charts provide a deep dive into how the blends compare on cost, property profiles, quality, and composition.</p>
1837
+ <p><b>4. Export:</b> Click the 'Export to PDF' button to generate a downloadable report containing all the charts and data for your selected comparison.</p>
1838
+ </div>
1839
+ </div>
1840
+ """, unsafe_allow_html=True)
1841
+
1842
  # ----------------------------------------------------------------------------------------------------------------------------------------------
1843
  # Fuel Registry Tab
1844
  # ---------------------------------------------------------------------------------------------------------------------------------------------
 
2049
  else:
2050
  del st.session_state.blends
2051
  st.rerun()
2052
+
2053
+ # --- ADD: Floating Help Button for Fuel Registry ---
2054
+ st.markdown("""
2055
+ <style>
2056
+ #help-toggle-registry { display: none; }
2057
+ #help-toggle-registry:checked ~ .help-panel-registry {
2058
+ opacity: 1; visibility: visible; transform: translateY(0);
2059
+ }
2060
+ .help-panel-registry {
2061
+ position:fixed; right:25px; bottom:100px; z-index:9998;
2062
+ width:520px; max-height:70vh; overflow-y:auto;
2063
+ background: linear-gradient(135deg, #FFFDF5 0%, #F8EAD9 100%);
2064
+ border:1px solid #CFB53B; border-radius:12px; padding:20px;
2065
+ box-shadow:0 14px 34px rgba(0,0,0,0.22);
2066
+ color:#4a2f1f; transform: translateY(12px); opacity:0;
2067
+ visibility:hidden; transition: all .22s ease-in-out;
2068
+ }
2069
+ </style>
2070
+ <input id="help-toggle-registry" type="checkbox" />
2071
+ <label for="help-toggle-registry" class="help-button">💬 Help</label>
2072
+ <div class="help-panel help-panel-registry">
2073
+ <div class="head">
2074
+ <div class="title">Using the Fuel Registry</div>
2075
+ <label for="help-toggle-registry" class="help-close">Close</label>
2076
+ </div>
2077
+ <div class="help-body">
2078
+ <p>This tab is your central database for managing all blend components and saved blends.</p>
2079
+ <p><b>1. Add Components/Blends:</b> You can add a single component manually using the form or upload a CSV file for batch additions of components or blends. Download the templates to ensure your file format is correct.</p>
2080
+ <p><b>2. View & Manage Data:</b> Use the dropdown to switch between viewing 'Components' and 'Blends'. The table shows all saved records.</p>
2081
+ <p><b>3. Search & Delete:</b> Use the search bar to filter the table. To delete records, check the 'Select' box next to the desired rows and click the 'Delete Selected' button that appears.</p>
2082
+ </div>
2083
+ </div>
2084
+ """, unsafe_allow_html=True)
2085
 
2086
 
2087
  # ----------------------------------------------------------------------------------------------------------------------------------------------
 
2145
  </style>
2146
  """, unsafe_allow_html=True)
2147
 
2148
+ # # --- Floating "How to Use" Button and Panel ---
2149
+ # st.markdown("""
2150
+ # <input id="help-toggle" type="checkbox" />
2151
+ # <label for="help-toggle" class="help-button">💬 Help</label>
2152
+
2153
+ # <div class="help-panel" aria-hidden="true">
2154
+ # <div class="head">
2155
+ # <div class="title">Interpreting Model Insights</div>
2156
+ # <label for="help-toggle" class="help-close">Close</label>
2157
+ # </div>
2158
+ # <div class="help-body">
2159
+ # <p><b>KPI Cards:</b> These four cards give you a quick summary of the model's overall health.</p>
2160
+ # <ul>
2161
+ # <li><b>Overall R² Score:</b> Think of this as the model's accuracy grade. A score of 92.4% means the model's predictions are highly accurate.</li>
2162
+ # <li><b>MSE (Mean Squared Error):</b> This measures the average size of the model's mistakes. A smaller number is better.</li>
2163
+ # <li><b>MAPE (Mean Absolute % Error):</b> This tells you the average error in percentage terms. A value of 0.112 means predictions are off by about 11.2% on average.</li>
2164
+ # </ul>
2165
+ # <p><b>R² Score by Blend Property Chart:</b> This chart shows how well the model predicts each specific property.</p>
2166
+ # <p>A <b>longer bar</b> means the model is very good at predicting that property. A <b>shorter bar</b> indicates a property that is harder for the model to predict accurately. This helps you trust predictions for some properties more than others.</p>
2167
+ # </div>
2168
+ # </div>
2169
+ # """, unsafe_allow_html=True)
2170
+
2171
+ # --- FIX: Complete working version of the help button ---
2172
+ # --- FIX: Complete working version of the help button ---
2173
  st.markdown("""
2174
+ <style>
2175
+ /* Styles for the help panel and button */
2176
+ #help-toggle-insights { display: none; }
2177
+ #help-toggle-insights:checked ~ .help-panel-insights {
2178
+ opacity: 1; visibility: visible; transform: translateY(0);
2179
+ }
2180
+ .help-panel-insights {
2181
+ position:fixed; right:25px; bottom:100px; z-index:9998;
2182
+ width:520px; max-height:70vh; overflow-y:auto;
2183
+ background: linear-gradient(135deg, #FFFDF5 0%, #F8EAD9 100%);
2184
+ border:1px solid #CFB53B; border-radius:12px; padding:20px;
2185
+ box-shadow:0 14px 34px rgba(0,0,0,0.22);
2186
+ color:#4a2f1f; transform: translateY(12px); opacity:0;
2187
+ visibility:hidden; transition: all .22s ease-in-out;
2188
+ }
2189
+ </style>
2190
+ <input id="help-toggle-insights" type="checkbox" />
2191
+ <label for="help-toggle-insights" class="help-button">💬 Help</label>
2192
+ <div class="help-panel help-panel-insights">
2193
+ <div class="head">
2194
+ <div class="title">Interpreting Model Insights</div>
2195
+ <label for="help-toggle-insights" class="help-close">Close</label>
2196
+ </div>
2197
+ <div class="help-body">
2198
+ <p><b>KPI Cards:</b> These cards give a quick summary of the model's health. <b>R² Score</b> is its accuracy grade, while <b>MSE</b> and <b>MAPE</b> measure the average size of its errors.</p>
2199
+ <p><b>R² Score by Blend Property Chart:</b> This chart shows how well the model predicts each specific property. A longer bar means the model is very good at predicting that property.</p>
2200
+ </div>
2201
  </div>
 
2202
  """, unsafe_allow_html=True)
2203
 
2204
  # --- Main Title ---