eyupipler commited on
Commit
44734a1
1 Parent(s): 3537a0b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +99 -1
README.md CHANGED
@@ -18,6 +18,8 @@ base_model:
18
  pipeline_tag: video-text-to-text
19
  ---
20
 
 
 
21
  # Xbai-Epilepsy 1.0 Sürümü (TR)
22
 
23
  ## Tanım
@@ -49,4 +51,100 @@ It is available for personal use for everyone, primarily hospitals, health and s
49
  ### Classes
50
 
51
  - **No Epileptic Seizure**: The person is not in a seizure.
52
- - **Epilepsy Seizure Detected**: The person is having or about to have a seizure.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  pipeline_tag: video-text-to-text
19
  ---
20
 
21
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65e21f5133d3600496498125/Jlt301igJbSI0K_wqCjVf.png)
22
+
23
  # Xbai-Epilepsy 1.0 Sürümü (TR)
24
 
25
  ## Tanım
 
51
  ### Classes
52
 
53
  - **No Epileptic Seizure**: The person is not in a seizure.
54
+ - **Epilepsy Seizure Detected**: The person is having or about to have a seizure.
55
+
56
+ ## ----------------------------------------
57
+
58
+ # Kullanım / Usage
59
+
60
+ ```python
61
+ import tensorflow as tf
62
+ import numpy as np
63
+ import pandas as pd
64
+ from tensorflow.keras.models import load_model
65
+ from tensorflow.keras import layers
66
+ from PIL import Image
67
+ import io
68
+
69
+
70
+ def build_cnn(input_shape=(224, 224, 3)):
71
+ cnn_input = layers.Input(shape=input_shape)
72
+ x = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(cnn_input)
73
+ x = layers.MaxPooling2D((2, 2))(x)
74
+ x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x)
75
+ x = layers.MaxPooling2D((2, 2))(x)
76
+ x = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(x)
77
+ x = layers.MaxPooling2D((2, 2))(x)
78
+ x = layers.Flatten()(x)
79
+ x = layers.Dense(256, activation='relu')(x)
80
+ x = layers.Dropout(0.5)(x)
81
+ cnn_output = layers.Dense(2, activation='softmax')(x)
82
+
83
+ cnn_model = models.Model(inputs=cnn_input, outputs=cnn_output)
84
+ return cnn_model
85
+
86
+
87
+ def build_lstm(input_shape=(178, 2)):
88
+ lstm_input = layers.Input(shape=input_shape)
89
+ x = layers.LSTM(64, activation='relu')(lstm_input)
90
+ lstm_output = layers.Dense(2, activation='softmax')(x)
91
+
92
+ lstm_model = models.Model(inputs=lstm_input, outputs=lstm_output)
93
+ return lstm_model
94
+
95
+
96
+ def bytes_to_image_array(byte_data):
97
+ image = Image.open(io.BytesIO(byte_data))
98
+ image = image.resize((224, 224))
99
+ image = image.convert('RGB')
100
+ image = np.array(image)
101
+ if image.shape == (224, 224, 3):
102
+ return image
103
+ else:
104
+ raise ValueError(f"Unexpected image shape: {image.shape}")
105
+
106
+
107
+ model_path = 'model/path'
108
+ multimodal_model = load_model(model_path)
109
+ print("Successfully model uploaded.")
110
+
111
+ multimodal_model.summary()
112
+
113
+ data_path = 'image/parquet/data/path'
114
+ data = pd.read_parquet(data_path)
115
+
116
+ try:
117
+ X_images = np.array([bytes_to_image_array(img['bytes']) for img in data['image']])
118
+ except ValueError as e:
119
+ print(e)
120
+
121
+ eeg_data_path = 'epileptic/seizure/data/path'
122
+ eeg_data = pd.read_csv(eeg_data_path)
123
+
124
+ eeg_data.columns = eeg_data.columns.str.strip()
125
+ eeg_data['data'] = eeg_data['data'].apply(eval)
126
+ X_time_series = np.array(eeg_data['data'].to_list())
127
+ y = eeg_data['label'].values
128
+
129
+ X_images = X_images / 255.0
130
+
131
+ X_test_images = X_images[:32]
132
+ X_test_time_series = X_time_series[:32]
133
+ y_test = y[:32]
134
+
135
+ X_test_time_series = np.expand_dims(X_test_time_series, axis=-1) # (None, 178, 1)
136
+ X_test_time_series = np.repeat(X_test_time_series, 2, axis=-1) # (None, 178, 2)
137
+
138
+ test_loss, test_accuracy = multimodal_model.evaluate([X_test_images, X_test_time_series], y_test)
139
+ print(f"Test Loss: {100 * test_loss}%")
140
+ print(f"Test Accuracy: {100 * test_accuracy}%")
141
+
142
+ y_pred = multimodal_model.predict([X_test_images, X_test_time_series])
143
+
144
+ for i in range(50):
145
+ print(f"Sample {i+1}: Real Label: {y_test[i]}, Prediction: {np.argmax(y_pred[i])}")
146
+ ```
147
+
148
+ # Python Sürümü / Python Version
149
+
150
+ ### 3.9 <=> 3.13