juliogu81's picture
dataset recortado
9ac9950
raw
history blame
1.73 kB
import tensorflow as tf
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint
from utils import load_dataset
# 1. Cargar datos
X, y = load_dataset("data")
print(f"✅ Datos cargados: {X.shape[0]} audios (formato {X.shape[1:]}")
# 2. Dividir en entrenamiento/validación
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
# 3. Crear modelo (MobileNetV2 + fine-tuning)
base_model = MobileNetV2(
weights='imagenet',
include_top=False,
input_shape=(128, 128, 3)
)
# Congelar capas base (opcional para pocos datos)
for layer in base_model.layers:
layer.trainable = False
# Añadir capas personalizadas
x = GlobalAveragePooling2D()(base_model.output)
x = Dense(128, activation='relu')(x)
predictions = Dense(1, activation='sigmoid')(x) # Salida binaria
model = Model(inputs=base_model.input, outputs=predictions)
# 4. Compilar (énfasis en Recall)
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', tf.keras.metrics.Recall()]
)
# 5. Callbacks (guardar mejor modelo)
checkpoint = ModelCheckpoint(
"model.h5",
monitor='val_recall', # Priorizar recall en validación
mode='max',
save_best_only=True,
verbose=1
)
# 6. Entrenar
history = model.fit(
X_train, y_train,
epochs=15,
batch_size=32,
validation_data=(X_val, y_val),
callbacks=[checkpoint],
class_weight={0: 1, 1: 4} # Ajustar si hay desbalance
)
print("✅ Entrenamiento completado. Modelo guardado como 'model.h5'")