import subprocess import os import time import gradio as gr from TTS.api import TTS import json # Initialisation du modèle TTS tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2", gpu=False) # Dossier de base pour les projets base_output_folder = "projects" os.makedirs(base_output_folder, exist_ok=True) # Fonction pour créer ou charger un projet def create_or_load_project(project_name, speaker, agree): if not agree: raise gr.Error("❗ Veuillez accepter les conditions d'utilisation.") project_folder = os.path.join(base_output_folder, project_name) os.makedirs(project_folder, exist_ok=True) project_data = { "name": project_name, "speaker": speaker, "sections": [] } # Vérifier si le projet existe déjà project_file = os.path.join(project_folder, "project_data.json") if os.path.exists(project_file): with open(project_file, "r") as f: project_data = json.load(f) else: with open(project_file, "w") as f: json.dump(project_data, f) return project_data # Fonction pour ajouter ou mettre à jour une section def update_section(project_data, section_name, section_text): for section in project_data["sections"]: if section["name"] == section_name: section["text"] = section_text return project_data project_data["sections"].append({ "name": section_name, "text": section_text, "audio_path": None }) return project_data # Fonction pour générer l'audio d'une section def generate_section_audio(section_name, text, speaker): output_path = os.path.join(base_output_folder, f"{section_name}.wav") tts.tts_to_file( text=text, file_path=output_path, speaker_wav=[os.path.join("examples", f) for f in os.listdir("examples") if f.startswith(speaker) and f.endswith(".wav")], language="fr" ) return output_path # Interface Gradio with gr.Blocks() as demo: gr.Markdown("# 🎙️ Projet Margaux TTS") # Étape 1: Création ou chargement du projet with gr.Tab("🔧 Création du Projet"): project_name = gr.Textbox(label="📝 Nom du projet") speaker = gr.Dropdown(label="🔊 Voix", choices=["Margaux"], value="Margaux") agree = gr.Checkbox(label="✅ J'accepte les conditions d'utilisation") create_btn = gr.Button("🚀 Créer/Charger le Projet") # Étape 2: Gestion des sections with gr.Tab("📋 Gestion des Sections"): project_info = gr.JSON(label="📁 Informations du projet", visible=False) sections_list = gr.List(label="📜 Sections du projet") # Entrées pour ajouter une nouvelle section section_name = gr.Textbox(label="📝 Nom de la section") section_text = gr.Textbox(label="✍️ Texte de la section", lines=5) add_section_btn = gr.Button("➕ Ajouter/Mettre à jour la section") # Section pour générer l'audio de chaque section individuellement audio_outputs_display = [] # Fonctions de callback def load_project(project_name, speaker, agree): project_data = create_or_load_project(project_name, speaker, agree) sections = [section["name"] for section in project_data["sections"]] return project_data, sections def add_section(project_data, section_name, section_text): updated_project = update_section(project_data, section_name, section_text) sections = [section["name"] for section in updated_project["sections"]] return updated_project, sections def generate_audio(section_name, text): audio_path = generate_section_audio(section_name, text, speaker.value) return audio_path # Connexion des événements create_btn.click(load_project, inputs=[project_name, speaker, agree], outputs=[project_info, sections_list]) add_section_btn.click(add_section, inputs=[project_info, section_name, section_text], outputs=[project_info, sections_list]) # Dynamique : Créer des boutons de génération audio pour chaque section def update_audio_buttons(sections): global audio_outputs_display audio_outputs_display.clear() for sec in sections: audio_output_btn = gr.Button(f"🎤 Générer l'audio pour {sec}") audio_output_display = gr.Audio(label=f"🔊 Audio de {sec}", type="filepath", visible=False) def generate_for_section(sec=sec): text_for_section = next((s['text'] for s in project_info['sections'] if s['name'] == sec), "") return generate_audio(sec, text_for_section) audio_output_btn.click(generate_for_section) audio_outputs_display.append(audio_output_display) # Ajouter les boutons et les affichages audio à l'interface demo.add(audio_output_btn) demo.add(audio_output_display) # Lancement de l'interface demo.launch()