HaryaniAnjali commited on
Commit
6d73ebd
·
verified ·
1 Parent(s): 2e261c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +477 -191
app.py CHANGED
@@ -12,14 +12,36 @@ import os
12
  import json
13
  import requests
14
  import re
15
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
16
  import torch
17
  import openai
 
 
 
18
 
19
  # Set plot styling
20
  sns.set(style="whitegrid")
21
  plt.rcParams["figure.figsize"] = (10, 6)
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  # Initialize AI Models
24
  def initialize_ai_models():
25
  """Initialize the AI models for data analysis."""
@@ -28,52 +50,86 @@ def initialize_ai_models():
28
 
29
  # Initialize Hugging Face model for data recommendations
30
  try:
31
- tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
32
- model = AutoModelForCausalLM.from_pretrained("google/flan-t5-base")
33
  data_assistant = pipeline("text-generation", model=model, tokenizer=tokenizer)
34
- except:
 
35
  # Fallback to a smaller model if the main one fails to load
36
- data_assistant = pipeline("text-generation", model="distilgpt2")
 
 
 
37
 
38
  return data_assistant
39
 
40
- # Global variables for AI models
41
- data_assistant = None
42
-
43
  def read_file(file):
44
- """Read different file formats into a pandas DataFrame."""
45
  if file is None:
46
  return None
47
 
48
  file_name = file.name if hasattr(file, 'name') else ''
 
49
 
50
  try:
51
  # Handle different file types
52
  if file_name.endswith('.csv'):
53
- # For wine quality dataset which uses semicolons
54
- if "winequality" in file_name.lower():
55
- return pd.read_csv(file, sep=';')
56
- else:
57
- # Try standard comma separator first
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  try:
59
- df = pd.read_csv(file)
60
- # If we got only one column but it contains semicolons, try again with semicolon separator
61
- if len(df.columns) == 1 and ';' in df.columns[0]:
62
- return pd.read_csv(file, sep=';')
63
  return df
64
  except:
65
- # Fall back to semicolon if comma fails
66
- return pd.read_csv(file, sep=';')
 
67
 
68
  elif file_name.endswith(('.xls', '.xlsx')):
69
  return pd.read_excel(file)
70
  elif file_name.endswith('.json'):
71
  return pd.read_json(file)
72
  elif file_name.endswith('.txt'):
73
- return pd.read_csv(file, delimiter='\t')
 
 
 
 
 
 
 
 
 
 
 
74
  else:
75
  return "Unsupported file format. Please upload .csv, .xlsx, .xls, .json, or .txt files."
76
  except Exception as e:
 
77
  return f"Error reading file: {str(e)}"
78
 
79
  def analyze_data(df):
@@ -202,8 +258,11 @@ def detect_outliers(df, numeric_cols):
202
  def generate_visualizations(df):
203
  """Generate appropriate visualizations based on the data types."""
204
  if not isinstance(df, pd.DataFrame):
 
205
  return df # Return error message if df is not a DataFrame
206
 
 
 
207
  visualizations = {}
208
 
209
  # Identify column types
@@ -212,108 +271,226 @@ def generate_visualizations(df):
212
  date_cols = [col for col in df.columns if df[col].dtype == 'datetime64[ns]' or
213
  (df[col].dtype == 'object' and pd.to_datetime(df[col], errors='coerce').notna().all())]
214
 
215
- # 1. Distribution plots for numeric columns (first 5)
216
- if numeric_cols:
217
- for i, col in enumerate(numeric_cols[:5]): # Limit to first 5 numeric columns
218
- fig = px.histogram(df, x=col, marginal="box", title=f"Distribution of {col}")
219
- visualizations[f'dist_{col}'] = fig
220
 
221
- # 2. Bar charts for categorical columns (first 5)
222
- if categorical_cols:
223
- for i, col in enumerate(categorical_cols[:5]): # Limit to first 5 categorical columns
224
- value_counts = df[col].value_counts().nlargest(10) # Top 10 categories
225
- fig = px.bar(x=value_counts.index, y=value_counts.values,
226
- title=f"Top 10 categories in {col}")
227
- fig.update_xaxes(title=col)
228
- fig.update_yaxes(title="Count")
229
- visualizations[f'bar_{col}'] = fig
230
-
231
- # 3. Correlation heatmap for numeric columns
232
- if len(numeric_cols) > 1:
233
- corr_matrix = df[numeric_cols].corr()
234
- fig = px.imshow(corr_matrix, text_auto=True, aspect="auto",
235
- title="Correlation Heatmap")
236
- visualizations['correlation'] = fig
237
-
238
- # 4. Scatter plot matrix (first 4 numeric columns)
239
- if len(numeric_cols) >= 2:
240
- plot_cols = numeric_cols[:4] # Limit to first 4 numeric columns
241
- fig = px.scatter_matrix(df, dimensions=plot_cols, title="Scatter Plot Matrix")
242
- visualizations['scatter_matrix'] = fig
243
-
244
- # 5. Time series plot if date column exists
245
- if date_cols and numeric_cols:
246
- date_col = date_cols[0] # Use the first date column
247
- # Convert to datetime if not already
248
- if df[date_col].dtype != 'datetime64[ns]':
249
- df[date_col] = pd.to_datetime(df[date_col], errors='coerce')
250
-
251
- # Sort by date
252
- df_sorted = df.sort_values(by=date_col)
253
-
254
- # Create time series for first numeric column
255
- num_col = numeric_cols[0]
256
- fig = px.line(df_sorted, x=date_col, y=num_col,
257
- title=f"{num_col} over Time")
258
- visualizations['time_series'] = fig
259
-
260
- # 6. PCA visualization if enough numeric columns
261
- if len(numeric_cols) >= 3:
262
- # Apply PCA to numeric data
263
- numeric_data = df[numeric_cols].select_dtypes(include=[np.number])
264
- # Fill NaN values with mean for PCA
265
- numeric_data = numeric_data.fillna(numeric_data.mean())
266
-
267
- # Standardize the data
268
- scaler = StandardScaler()
269
- scaled_data = scaler.fit_transform(numeric_data)
270
-
271
- # Apply PCA with 2 components
272
- pca = PCA(n_components=2)
273
- pca_result = pca.fit_transform(scaled_data)
274
-
275
- # Create a DataFrame with PCA results
276
- pca_df = pd.DataFrame(data=pca_result, columns=['PC1', 'PC2'])
277
-
278
- # If categorical column exists, use it for color
279
  if categorical_cols:
280
- cat_col = categorical_cols[0]
281
- pca_df[cat_col] = df[cat_col].values
282
- fig = px.scatter(pca_df, x='PC1', y='PC2', color=cat_col,
283
- title="PCA Visualization")
284
- else:
285
- fig = px.scatter(pca_df, x='PC1', y='PC2',
286
- title="PCA Visualization")
287
-
288
- variance_ratio = pca.explained_variance_ratio_
289
- fig.update_layout(
290
- annotations=[
291
- dict(
292
- text=f"PC1 explained variance: {variance_ratio[0]:.2f}",
293
- showarrow=False,
294
- x=0.5,
295
- y=1.05,
296
- xref="paper",
297
- yref="paper"
298
- ),
299
- dict(
300
- text=f"PC2 explained variance: {variance_ratio[1]:.2f}",
301
- showarrow=False,
302
- x=0.5,
303
- y=1.02,
304
- xref="paper",
305
- yref="paper"
306
- )
307
- ]
308
- )
309
 
310
- visualizations['pca'] = fig
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
 
312
  return visualizations
313
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
  def get_ai_cleaning_recommendations(df):
315
  """Get AI-powered recommendations for data cleaning using OpenAI."""
316
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
  # Prepare the dataset summary
318
  summary = {
319
  "shape": df.shape,
@@ -340,47 +517,49 @@ def get_ai_cleaning_recommendations(df):
340
  Format your response as markdown and ONLY include the cleaning recommendations.
341
  """
342
 
343
- # Check if OpenAI API key is available
344
- api_key = os.environ.get("OPENAI_API_KEY")
345
- if api_key:
346
- openai.api_key = api_key
347
- response = openai.ChatCompletion.create(
348
- model="gpt-3.5-turbo",
349
- messages=[
350
- {"role": "system", "content": "You are a data science assistant focused on data cleaning recommendations."},
351
- {"role": "user", "content": prompt}
352
- ],
353
- max_tokens=700
354
- )
355
- return response.choices[0].message.content
356
- else:
357
- # Fallback to Hugging Face model if OpenAI key is not available
358
- global data_assistant
359
- if data_assistant is None:
360
- data_assistant = initialize_ai_models()
361
-
362
  # Shorten the prompt for the smaller model
363
  short_prompt = f"Data cleaning recommendations for dataset with {df.shape[0]} rows, {df.shape[1]} columns, and columns: {', '.join(df.columns[:5])}..."
364
 
365
- # Generate recommendations
366
- recommendations = data_assistant(
367
- short_prompt,
368
- max_length=500,
369
- num_return_sequences=1
370
- )[0]['generated_text']
371
-
372
- return f"""
373
- ## Data Cleaning Recommendations
374
-
375
- * Handle missing values in columns with appropriate imputation techniques
376
- * Check for and remove duplicate records
377
- * Standardize text fields and correct spelling errors
378
- * Convert columns to appropriate data types
379
- * Check for and handle outliers in numerical columns
380
-
381
- Note: These are generic recommendations as AI model access is limited.
382
- """
383
- except Exception as e:
 
 
 
384
  return f"""
385
  ## Data Cleaning Recommendations
386
 
@@ -396,10 +575,43 @@ def get_ai_cleaning_recommendations(df):
396
  def get_hf_model_insights(df):
397
  """Get dataset insights using Hugging Face model."""
398
  try:
399
- global data_assistant
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
  if data_assistant is None:
401
  data_assistant = initialize_ai_models()
402
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
  # Prepare a brief summary of the dataset
404
  numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
405
  categorical_cols = df.select_dtypes(include=['object', 'category']).columns.tolist()
@@ -454,7 +666,7 @@ def process_file(file):
454
  # Read the file
455
  df = read_file(file)
456
 
457
- if isinstance(df, str): # If error message
458
  return df, None, None, None
459
 
460
  # Convert date columns to datetime
@@ -638,38 +850,6 @@ def apply_data_cleaning(df, cleaning_options):
638
 
639
  return cleaned_df, cleaning_log
640
 
641
- def app_ui(file):
642
- """Main function for the Gradio interface."""
643
- if file is None:
644
- return "Please upload a file to begin analysis.", None, None, None
645
-
646
- # Process the file
647
- analysis, visualizations, cleaning_recommendations, analysis_insights = process_file(file)
648
-
649
- if isinstance(analysis, str): # If error message
650
- return analysis, None, None, None
651
-
652
- # Format analysis for display
653
- analysis_html = display_analysis(analysis)
654
-
655
- # Prepare visualizations for display
656
- viz_html = ""
657
- if visualizations and not isinstance(visualizations, str):
658
- for viz_name, fig in visualizations.items():
659
- # Convert plotly figure to HTML
660
- viz_html += f'<div style="margin-bottom: 30px;">{fig.to_html(full_html=False, include_plotlyjs="cdn")}</div>'
661
-
662
- # Combine analysis and visualizations
663
- result_html = f"""
664
- <div style="display: flex; flex-direction: column;">
665
- <div>{analysis_html}</div>
666
- <h2>Data Visualizations</h2>
667
- <div>{viz_html}</div>
668
- </div>
669
- """
670
-
671
- return result_html, visualizations, cleaning_recommendations, analysis_insights
672
-
673
  def apply_cleaning_ui(file, handle_missing, missing_method, remove_duplicates,
674
  handle_outliers, outlier_method, convert_dates, date_columns,
675
  normalize_numeric):
@@ -680,7 +860,7 @@ def apply_cleaning_ui(file, handle_missing, missing_method, remove_duplicates,
680
  # Read the file
681
  df = read_file(file)
682
 
683
- if isinstance(df, str): # If error message
684
  return df, None
685
 
686
  # Configure cleaning options
@@ -721,19 +901,87 @@ def apply_cleaning_ui(file, handle_missing, missing_method, remove_duplicates,
721
 
722
  return result_summary, buffer
723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
724
  # Create Gradio interface
725
  with gr.Blocks(title="Data Visualization & Cleaning AI") as demo:
726
  gr.Markdown("# Data Visualization & Cleaning AI")
727
  gr.Markdown("Upload your data file (CSV, Excel, JSON, or TXT) and get automatic analysis, visualizations, and AI-powered insights.")
728
 
729
- with gr.Row():
730
- file_input = gr.File(label="Upload Data File")
731
-
732
- with gr.Tabs():
733
  with gr.TabItem("Data Analysis"):
734
  with gr.Row():
 
735
  analyze_button = gr.Button("Analyze Data")
736
 
 
 
 
 
737
  with gr.Tabs():
738
  with gr.TabItem("Analysis & Visualizations"):
739
  output = gr.HTML(label="Results")
@@ -772,6 +1020,32 @@ with gr.Blocks(title="Data Visualization & Cleaning AI") as demo:
772
  clean_button = gr.Button("Clean Data")
773
  cleaning_output = gr.HTML(label="Cleaning Results")
774
  cleaned_file_output = gr.File(label="Download Cleaned Data")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
775
 
776
  # Connect the buttons to functions
777
  analyze_button.click(
@@ -789,6 +1063,18 @@ with gr.Blocks(title="Data Visualization & Cleaning AI") as demo:
789
  ],
790
  outputs=[cleaning_output, cleaned_file_output]
791
  )
 
 
 
 
 
 
 
 
 
 
 
 
792
 
793
  # Initialize AI models
794
  try:
 
12
  import json
13
  import requests
14
  import re
 
15
  import torch
16
  import openai
17
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
18
+ import base64
19
+ from io import BytesIO
20
 
21
  # Set plot styling
22
  sns.set(style="whitegrid")
23
  plt.rcParams["figure.figsize"] = (10, 6)
24
 
25
+ # Global variables for API keys and AI models
26
+ OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
27
+ HF_API_TOKEN = os.environ.get("HF_API_TOKEN", "")
28
+ data_assistant = None
29
+
30
+ def set_openai_key(api_key):
31
+ """Set the OpenAI API key."""
32
+ global OPENAI_API_KEY
33
+ OPENAI_API_KEY = api_key
34
+ openai.api_key = api_key
35
+ return "OpenAI API key set successfully!"
36
+
37
+ def set_hf_token(api_token):
38
+ """Set the Hugging Face API token."""
39
+ global HF_API_TOKEN, data_assistant
40
+ HF_API_TOKEN = api_token
41
+ os.environ["TRANSFORMERS_TOKEN"] = api_token
42
+ data_assistant = initialize_ai_models()
43
+ return "Hugging Face token set successfully!"
44
+
45
  # Initialize AI Models
46
  def initialize_ai_models():
47
  """Initialize the AI models for data analysis."""
 
50
 
51
  # Initialize Hugging Face model for data recommendations
52
  try:
53
+ tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
54
+ model = AutoModelForCausalLM.from_pretrained("distilgpt2")
55
  data_assistant = pipeline("text-generation", model=model, tokenizer=tokenizer)
56
+ except Exception as e:
57
+ print(f"Error loading model: {e}")
58
  # Fallback to a smaller model if the main one fails to load
59
+ try:
60
+ data_assistant = pipeline("text-generation", model="distilgpt2")
61
+ except:
62
+ data_assistant = None
63
 
64
  return data_assistant
65
 
 
 
 
66
  def read_file(file):
67
+ """Read different file formats into a pandas DataFrame with robust separator detection."""
68
  if file is None:
69
  return None
70
 
71
  file_name = file.name if hasattr(file, 'name') else ''
72
+ print(f"Reading file: {file_name}")
73
 
74
  try:
75
  # Handle different file types
76
  if file_name.endswith('.csv'):
77
+ # First try with comma
78
+ try:
79
+ df = pd.read_csv(file)
80
+
81
+ # Check if we got only one column but it contains semicolons
82
+ if len(df.columns) == 1 and ';' in str(df.columns[0]):
83
+ print("Detected potential semicolon-separated file")
84
+ # Reset file position
85
+ file.seek(0)
86
+ # Try with semicolon
87
+ df = pd.read_csv(file, sep=';')
88
+ print(f"Read file with semicolon separator: {df.shape}")
89
+ else:
90
+ print(f"Read file with comma separator: {df.shape}")
91
+
92
+ # Convert columns to appropriate types
93
+ for col in df.columns:
94
+ # Try to convert string columns to numeric
95
+ if df[col].dtype == 'object':
96
+ df[col] = pd.to_numeric(df[col], errors='ignore')
97
+
98
+ return df
99
+ except Exception as e:
100
+ print(f"Error with standard separators: {e}")
101
+ # Try with semicolon
102
+ file.seek(0)
103
  try:
104
+ df = pd.read_csv(file, sep=';')
105
+ print(f"Read file with semicolon separator after error: {df.shape}")
 
 
106
  return df
107
  except:
108
+ # Final attempt with Python's csv sniffer
109
+ file.seek(0)
110
+ return pd.read_csv(file, sep=None, engine='python')
111
 
112
  elif file_name.endswith(('.xls', '.xlsx')):
113
  return pd.read_excel(file)
114
  elif file_name.endswith('.json'):
115
  return pd.read_json(file)
116
  elif file_name.endswith('.txt'):
117
+ # Try tab separator first for text files
118
+ try:
119
+ df = pd.read_csv(file, delimiter='\t')
120
+ if len(df.columns) <= 1:
121
+ # If tab doesn't work well, try with separator detection
122
+ file.seek(0)
123
+ df = pd.read_csv(file, sep=None, engine='python')
124
+ return df
125
+ except:
126
+ # Fall back to separator detection
127
+ file.seek(0)
128
+ return pd.read_csv(file, sep=None, engine='python')
129
  else:
130
  return "Unsupported file format. Please upload .csv, .xlsx, .xls, .json, or .txt files."
131
  except Exception as e:
132
+ print(f"Error reading file: {str(e)}")
133
  return f"Error reading file: {str(e)}"
134
 
135
  def analyze_data(df):
 
258
  def generate_visualizations(df):
259
  """Generate appropriate visualizations based on the data types."""
260
  if not isinstance(df, pd.DataFrame):
261
+ print(f"Not a DataFrame: {type(df)}")
262
  return df # Return error message if df is not a DataFrame
263
 
264
+ print(f"Starting visualization generation for DataFrame with shape: {df.shape}")
265
+
266
  visualizations = {}
267
 
268
  # Identify column types
 
271
  date_cols = [col for col in df.columns if df[col].dtype == 'datetime64[ns]' or
272
  (df[col].dtype == 'object' and pd.to_datetime(df[col], errors='coerce').notna().all())]
273
 
274
+ print(f"Found {len(numeric_cols)} numeric columns: {numeric_cols}")
275
+ print(f"Found {len(categorical_cols)} categorical columns: {categorical_cols}")
276
+ print(f"Found {len(date_cols)} date columns: {date_cols}")
 
 
277
 
278
+ try:
279
+ # Simple test plot to verify Plotly is working
280
+ if len(df) > 0 and len(df.columns) > 0:
281
+ col = df.columns[0]
282
+ try:
283
+ test_data = df[col].head(100)
284
+ fig = px.histogram(x=test_data, title=f"Test Plot for {col}")
285
+ visualizations['test_plot'] = fig
286
+ print(f"Generated test plot for column: {col}")
287
+ except Exception as e:
288
+ print(f"Error creating test plot: {e}")
289
+
290
+ # 1. Distribution plots for numeric columns (first 5)
291
+ if numeric_cols:
292
+ for i, col in enumerate(numeric_cols[:5]): # Limit to first 5 numeric columns
293
+ try:
294
+ fig = px.histogram(df, x=col, marginal="box", title=f"Distribution of {col}")
295
+ visualizations[f'dist_{col}'] = fig
296
+ print(f"Generated distribution plot for {col}")
297
+ except Exception as e:
298
+ print(f"Error creating histogram for {col}: {e}")
299
+
300
+ # 2. Bar charts for categorical columns (first 5)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
  if categorical_cols:
302
+ for i, col in enumerate(categorical_cols[:5]): # Limit to first 5 categorical columns
303
+ try:
304
+ # Get value counts and handle potential large number of categories
305
+ value_counts = df[col].value_counts().nlargest(10) # Top 10 categories
306
+
307
+ # Convert indices to strings to ensure they can be plotted
308
+ value_counts.index = value_counts.index.astype(str)
309
+
310
+ fig = px.bar(x=value_counts.index, y=value_counts.values,
311
+ title=f"Top 10 categories in {col}")
312
+ fig.update_xaxes(title=col)
313
+ fig.update_yaxes(title="Count")
314
+ visualizations[f'bar_{col}'] = fig
315
+ print(f"Generated bar chart for {col}")
316
+ except Exception as e:
317
+ print(f"Error creating bar chart for {col}: {e}")
318
+
319
+ # 3. Correlation heatmap for numeric columns
320
+ if len(numeric_cols) > 1:
321
+ try:
322
+ corr_matrix = df[numeric_cols].corr()
323
+ fig = px.imshow(corr_matrix, text_auto=True, aspect="auto",
324
+ title="Correlation Heatmap")
325
+ visualizations['correlation'] = fig
326
+ print("Generated correlation heatmap")
327
+ except Exception as e:
328
+ print(f"Error creating correlation heatmap: {e}")
 
 
329
 
330
+ # 4. Scatter plot matrix (first 3 numeric columns to keep it manageable)
331
+ if len(numeric_cols) >= 2:
332
+ try:
333
+ plot_cols = numeric_cols[:3] # Limit to first 3 numeric columns
334
+ fig = px.scatter_matrix(df, dimensions=plot_cols, title="Scatter Plot Matrix")
335
+ visualizations['scatter_matrix'] = fig
336
+ print("Generated scatter plot matrix")
337
+ except Exception as e:
338
+ print(f"Error creating scatter matrix: {e}")
339
+
340
+ # 5. Time series plot if date column exists
341
+ if date_cols and numeric_cols:
342
+ try:
343
+ date_col = date_cols[0] # Use the first date column
344
+ # Convert to datetime if not already
345
+ if df[date_col].dtype != 'datetime64[ns]':
346
+ df[date_col] = pd.to_datetime(df[date_col], errors='coerce')
347
+
348
+ # Sort by date
349
+ df_sorted = df.sort_values(by=date_col)
350
+
351
+ # Create time series for first numeric column
352
+ num_col = numeric_cols[0]
353
+ fig = px.line(df_sorted, x=date_col, y=num_col,
354
+ title=f"{num_col} over Time")
355
+ visualizations['time_series'] = fig
356
+ print("Generated time series plot")
357
+ except Exception as e:
358
+ print(f"Error creating time series plot: {e}")
359
+
360
+ # 6. PCA visualization if enough numeric columns
361
+ if len(numeric_cols) >= 3:
362
+ try:
363
+ # Apply PCA to numeric data
364
+ numeric_data = df[numeric_cols].select_dtypes(include=[np.number])
365
+ # Fill NaN values with mean for PCA
366
+ numeric_data = numeric_data.fillna(numeric_data.mean())
367
+
368
+ # Standardize the data
369
+ scaler = StandardScaler()
370
+ scaled_data = scaler.fit_transform(numeric_data)
371
+
372
+ # Apply PCA with 2 components
373
+ pca = PCA(n_components=2)
374
+ pca_result = pca.fit_transform(scaled_data)
375
+
376
+ # Create a DataFrame with PCA results
377
+ pca_df = pd.DataFrame(data=pca_result, columns=['PC1', 'PC2'])
378
+
379
+ # If categorical column exists, use it for color
380
+ if categorical_cols:
381
+ cat_col = categorical_cols[0]
382
+ pca_df[cat_col] = df[cat_col].values
383
+ fig = px.scatter(pca_df, x='PC1', y='PC2', color=cat_col,
384
+ title="PCA Visualization")
385
+ else:
386
+ fig = px.scatter(pca_df, x='PC1', y='PC2',
387
+ title="PCA Visualization")
388
+
389
+ variance_ratio = pca.explained_variance_ratio_
390
+ fig.update_layout(
391
+ annotations=[
392
+ dict(
393
+ text=f"PC1 explained variance: {variance_ratio[0]:.2f}",
394
+ showarrow=False,
395
+ x=0.5,
396
+ y=1.05,
397
+ xref="paper",
398
+ yref="paper"
399
+ ),
400
+ dict(
401
+ text=f"PC2 explained variance: {variance_ratio[1]:.2f}",
402
+ showarrow=False,
403
+ x=0.5,
404
+ y=1.02,
405
+ xref="paper",
406
+ yref="paper"
407
+ )
408
+ ]
409
+ )
410
+
411
+ visualizations['pca'] = fig
412
+ print("Generated PCA visualization")
413
+ except Exception as e:
414
+ print(f"Error creating PCA visualization: {e}")
415
+
416
+ except Exception as e:
417
+ print(f"Error in visualization generation: {e}")
418
+
419
+ print(f"Generated {len(visualizations)} visualizations")
420
+
421
+ # If no visualizations were created, add a fallback
422
+ if not visualizations:
423
+ visualizations['fallback'] = generate_fallback_visualization(df)
424
 
425
  return visualizations
426
 
427
+ def generate_fallback_visualization(df):
428
+ """Generate a simple fallback visualization using matplotlib."""
429
+ try:
430
+ plt.figure(figsize=(10, 6))
431
+
432
+ # Choose what to plot based on data types
433
+ numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
434
+ if numeric_cols:
435
+ # Plot first numeric column
436
+ col = numeric_cols[0]
437
+ plt.hist(df[col].dropna(), bins=20)
438
+ plt.title(f"Distribution of {col}")
439
+ plt.xlabel(col)
440
+ plt.ylabel("Count")
441
+ else:
442
+ # Plot count of first column values
443
+ col = df.columns[0]
444
+ value_counts = df[col].value_counts().nlargest(10)
445
+ plt.bar(value_counts.index.astype(str), value_counts.values)
446
+ plt.title(f"Top values for {col}")
447
+ plt.xticks(rotation=45)
448
+ plt.ylabel("Count")
449
+
450
+ # Create a plotly figure from matplotlib
451
+ fig = go.Figure()
452
+
453
+ # Add trace based on the type of plot
454
+ if numeric_cols:
455
+ hist, bin_edges = np.histogram(df[numeric_cols[0]].dropna(), bins=20)
456
+ bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
457
+ fig.add_trace(go.Bar(x=bin_centers, y=hist, name=numeric_cols[0]))
458
+ fig.update_layout(title=f"Distribution of {numeric_cols[0]}")
459
+ else:
460
+ col = df.columns[0]
461
+ counts = df[col].value_counts().nlargest(10)
462
+ fig.add_trace(go.Bar(x=counts.index.astype(str), y=counts.values, name=col))
463
+ fig.update_layout(title=f"Top values for {col}")
464
+
465
+ return fig
466
+ except Exception as e:
467
+ print(f"Error generating fallback visualization: {e}")
468
+ # Create an empty plotly figure as last resort
469
+ fig = go.Figure()
470
+ fig.add_annotation(text="Could not generate visualization", showarrow=False)
471
+ fig.update_layout(title="Visualization Error")
472
+ return fig
473
+
474
  def get_ai_cleaning_recommendations(df):
475
  """Get AI-powered recommendations for data cleaning using OpenAI."""
476
  try:
477
+ # Check if OpenAI API key is available
478
+ global OPENAI_API_KEY
479
+ if not OPENAI_API_KEY:
480
+ return """
481
+ ## OpenAI API Key Not Configured
482
+
483
+ Please set your OpenAI API key in the Settings tab to get AI-powered data cleaning recommendations.
484
+
485
+ Without an API key, here are some general recommendations:
486
+
487
+ * Handle missing values by either removing rows or imputing with mean/median/mode
488
+ * Remove duplicate rows if present
489
+ * Convert date-like string columns to proper datetime format
490
+ * Standardize text data by removing extra spaces and converting to lowercase
491
+ * Check for and handle outliers in numerical columns
492
+ """
493
+
494
  # Prepare the dataset summary
495
  summary = {
496
  "shape": df.shape,
 
517
  Format your response as markdown and ONLY include the cleaning recommendations.
518
  """
519
 
520
+ # Use the OpenAI API key
521
+ openai.api_key = OPENAI_API_KEY
522
+ response = openai.ChatCompletion.create(
523
+ model="gpt-3.5-turbo",
524
+ messages=[
525
+ {"role": "system", "content": "You are a data science assistant focused on data cleaning recommendations."},
526
+ {"role": "user", "content": prompt}
527
+ ],
528
+ max_tokens=700
529
+ )
530
+ return response.choices[0].message.content
531
+ except Exception as e:
532
+ # Fallback to Hugging Face model if OpenAI call fails
533
+ global data_assistant
534
+ if data_assistant is None:
535
+ data_assistant = initialize_ai_models()
536
+
537
+ if data_assistant:
 
538
  # Shorten the prompt for the smaller model
539
  short_prompt = f"Data cleaning recommendations for dataset with {df.shape[0]} rows, {df.shape[1]} columns, and columns: {', '.join(df.columns[:5])}..."
540
 
541
+ try:
542
+ # Generate recommendations
543
+ recommendations = data_assistant(
544
+ short_prompt,
545
+ max_length=500,
546
+ num_return_sequences=1
547
+ )[0]['generated_text']
548
+
549
+ return f"""
550
+ ## Data Cleaning Recommendations
551
+
552
+ * Handle missing values in columns with appropriate imputation techniques
553
+ * Check for and remove duplicate records
554
+ * Standardize text fields and correct spelling errors
555
+ * Convert columns to appropriate data types
556
+ * Check for and handle outliers in numerical columns
557
+
558
+ Note: Using basic AI model as OpenAI API encountered an error: {str(e)}
559
+ """
560
+ except:
561
+ pass
562
+
563
  return f"""
564
  ## Data Cleaning Recommendations
565
 
 
575
  def get_hf_model_insights(df):
576
  """Get dataset insights using Hugging Face model."""
577
  try:
578
+ global data_assistant, HF_API_TOKEN
579
+
580
+ # Check if HF token is set
581
+ if not HF_API_TOKEN and not data_assistant:
582
+ return """
583
+ ## Hugging Face API Token Not Configured
584
+
585
+ Please set your Hugging Face API token in the Settings tab to get AI-powered data analysis insights.
586
+
587
+ Without an API token, here are some general analysis suggestions:
588
+
589
+ 1. Examine the distribution of each numeric column
590
+ 2. Analyze correlations between numeric features
591
+ 3. Look for patterns in categorical data
592
+ 4. Consider creating visualizations like histograms and scatter plots
593
+ 5. Explore relationships between different variables
594
+ """
595
+
596
+ # Initialize the model if not already done
597
  if data_assistant is None:
598
  data_assistant = initialize_ai_models()
599
 
600
+ if not data_assistant:
601
+ return """
602
+ ## AI Model Not Available
603
+
604
+ Could not initialize the Hugging Face model. Please check your API token or try again later.
605
+
606
+ Here are some general analysis suggestions:
607
+
608
+ 1. Examine the distribution of each numeric column
609
+ 2. Analyze correlations between numeric features
610
+ 3. Look for patterns in categorical data
611
+ 4. Consider creating pivot tables to understand relationships
612
+ 5. Look for time-based patterns if datetime columns are present
613
+ """
614
+
615
  # Prepare a brief summary of the dataset
616
  numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
617
  categorical_cols = df.select_dtypes(include=['object', 'category']).columns.tolist()
 
666
  # Read the file
667
  df = read_file(file)
668
 
669
+ if isinstance(df, str): # Error message
670
  return df, None, None, None
671
 
672
  # Convert date columns to datetime
 
850
 
851
  return cleaned_df, cleaning_log
852
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
853
  def apply_cleaning_ui(file, handle_missing, missing_method, remove_duplicates,
854
  handle_outliers, outlier_method, convert_dates, date_columns,
855
  normalize_numeric):
 
860
  # Read the file
861
  df = read_file(file)
862
 
863
+ if isinstance(df, str): # Error message
864
  return df, None
865
 
866
  # Configure cleaning options
 
901
 
902
  return result_summary, buffer
903
 
904
+ def app_ui(file):
905
+ """Main function for the Gradio interface."""
906
+ if file is None:
907
+ return "Please upload a file to begin analysis.", None, None, None
908
+
909
+ print(f"Processing file in app_ui: {file.name if hasattr(file, 'name') else 'unknown'}")
910
+
911
+ # Process the file
912
+ analysis, visualizations, cleaning_recommendations, analysis_insights = process_file(file)
913
+
914
+ if isinstance(analysis, str): # Error message
915
+ print(f"Error in analysis: {analysis}")
916
+ return analysis, None, None, None
917
+
918
+ # Format analysis for display
919
+ analysis_html = display_analysis(analysis)
920
+
921
+ # Prepare visualizations for display
922
+ viz_html = ""
923
+ if visualizations and not isinstance(visualizations, str):
924
+ print(f"Processing {len(visualizations)} visualizations for display")
925
+ for viz_name, fig in visualizations.items():
926
+ try:
927
+ # For debugging, print visualization object info
928
+ print(f"Visualization {viz_name}: type={type(fig)}")
929
+
930
+ # Convert plotly figure to HTML
931
+ html_content = fig.to_html(full_html=False, include_plotlyjs="cdn")
932
+ print(f"Generated HTML for {viz_name}, length: {len(html_content)}")
933
+
934
+ viz_html += f'<div style="margin-bottom: 30px;">{html_content}</div>'
935
+ print(f"Added visualization: {viz_name}")
936
+ except Exception as e:
937
+ print(f"Error rendering visualization {viz_name}: {e}")
938
+ else:
939
+ print(f"No visualizations to display: {visualizations}")
940
+ viz_html = "<p>No visualizations could be generated for this dataset.</p>"
941
+
942
+ # Combine analysis and visualizations
943
+ result_html = f"""
944
+ <div style="display: flex; flex-direction: column;">
945
+ <div>{analysis_html}</div>
946
+ <h2>Data Visualizations</h2>
947
+ <div>{viz_html}</div>
948
+ </div>
949
+ """
950
+
951
+ return result_html, visualizations, cleaning_recommendations, analysis_insights
952
+
953
+ def test_visualization():
954
+ """Create a simple test visualization to verify plotly is working."""
955
+ import plotly.express as px
956
+ import numpy as np
957
+
958
+ # Create sample data
959
+ x = np.random.rand(100)
960
+ y = np.random.rand(100)
961
+
962
+ # Create a simple scatter plot
963
+ fig = px.scatter(x=x, y=y, title="Test Plot")
964
+
965
+ # Convert to HTML
966
+ html = fig.to_html(full_html=False, include_plotlyjs="cdn")
967
+
968
+ return html
969
+
970
  # Create Gradio interface
971
  with gr.Blocks(title="Data Visualization & Cleaning AI") as demo:
972
  gr.Markdown("# Data Visualization & Cleaning AI")
973
  gr.Markdown("Upload your data file (CSV, Excel, JSON, or TXT) and get automatic analysis, visualizations, and AI-powered insights.")
974
 
975
+ with gr.Tabs() as tabs:
 
 
 
976
  with gr.TabItem("Data Analysis"):
977
  with gr.Row():
978
+ file_input = gr.File(label="Upload Data File")
979
  analyze_button = gr.Button("Analyze Data")
980
 
981
+ # Add test visualization to verify Plotly is working
982
+ test_viz_html = test_visualization()
983
+ gr.HTML(f"<details><summary>Plotly Test (Click to expand)</summary>{test_viz_html}</details>", visible=True)
984
+
985
  with gr.Tabs():
986
  with gr.TabItem("Analysis & Visualizations"):
987
  output = gr.HTML(label="Results")
 
1020
  clean_button = gr.Button("Clean Data")
1021
  cleaning_output = gr.HTML(label="Cleaning Results")
1022
  cleaned_file_output = gr.File(label="Download Cleaned Data")
1023
+
1024
+ with gr.TabItem("Settings"):
1025
+ gr.Markdown("### API Key Configuration")
1026
+ gr.Markdown("Enter your API keys to enable AI-powered features.")
1027
+
1028
+ with gr.Group():
1029
+ gr.Markdown("#### OpenAI API Key")
1030
+ gr.Markdown("Required for advanced data cleaning recommendations.")
1031
+ openai_key_input = gr.Textbox(
1032
+ label="OpenAI API Key",
1033
+ placeholder="sk-...",
1034
+ type="password"
1035
+ )
1036
+ openai_key_button = gr.Button("Save OpenAI API Key")
1037
+ openai_key_status = gr.Markdown("Status: Not configured")
1038
+
1039
+ with gr.Group():
1040
+ gr.Markdown("#### Hugging Face API Token")
1041
+ gr.Markdown("Required for AI-powered data analysis insights.")
1042
+ hf_token_input = gr.Textbox(
1043
+ label="Hugging Face API Token",
1044
+ placeholder="hf_...",
1045
+ type="password"
1046
+ )
1047
+ hf_token_button = gr.Button("Save Hugging Face Token")
1048
+ hf_token_status = gr.Markdown("Status: Not configured")
1049
 
1050
  # Connect the buttons to functions
1051
  analyze_button.click(
 
1063
  ],
1064
  outputs=[cleaning_output, cleaned_file_output]
1065
  )
1066
+
1067
+ openai_key_button.click(
1068
+ fn=set_openai_key,
1069
+ inputs=[openai_key_input],
1070
+ outputs=[openai_key_status]
1071
+ )
1072
+
1073
+ hf_token_button.click(
1074
+ fn=set_hf_token,
1075
+ inputs=[hf_token_input],
1076
+ outputs=[hf_token_status]
1077
+ )
1078
 
1079
  # Initialize AI models
1080
  try: