Upload 2 files
Browse files- app.py +153 -51
- requirements.txt +2 -9
app.py
CHANGED
@@ -1,64 +1,166 @@
|
|
1 |
-
|
2 |
-
|
3 |
import os
|
4 |
-
|
5 |
-
|
|
|
|
|
6 |
from TTS.api import TTS
|
7 |
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
|
12 |
-
st.title("ποΈ Clone Your Voice with XTTSv2")
|
13 |
-
st.markdown("Upload your voice sample and generate cloned speech from text.")
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
-
if generate_clicked and speaker_wav is not None and text.strip():
|
34 |
-
with st.spinner("Cloning your voice and synthesizing..."):
|
35 |
-
# Save uploaded speaker wav locally
|
36 |
-
with open("uploaded_speaker.wav", "wb") as f:
|
37 |
-
f.write(speaker_wav.read())
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
speaker_wav="uploaded_speaker.wav",
|
43 |
-
language="hi", # Use "hi" for Hindi-only text
|
44 |
-
file_path=output_path
|
45 |
-
)
|
46 |
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
elif generate_clicked:
|
51 |
-
st.warning("β οΈ Please upload a voice sample and enter some text.")
|
52 |
|
53 |
-
# Optional demo samples
|
54 |
-
st.markdown("---")
|
55 |
-
st.subheader("π§ Demo Samples (Optional)")
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
|
|
2 |
import os
|
3 |
+
# By using XTTS you agree to CPML license https://coqui.ai/cpml
|
4 |
+
os.environ["COQUI_TOS_AGREED"] = "1"
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
from TTS.api import TTS
|
8 |
|
9 |
+
model_names = TTS().list_models().list_models()
|
10 |
+
m = model_names[0]
|
11 |
+
print(model_names)
|
12 |
+
print(os.system("pip show TTS"))
|
13 |
+
print(f"Model: {m}")
|
14 |
+
tts = TTS(m, gpu=False)
|
15 |
+
tts.to("cpu") # no GPU or Amd
|
16 |
+
#tts.to("cuda") # cuda only
|
17 |
+
import openai
|
18 |
|
19 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
|
|
|
|
20 |
|
21 |
+
def generate_prompt(user_prompt):
|
22 |
+
response = openai.chat.completions.create(
|
23 |
+
model="gpt-3.5-turbo",
|
24 |
+
messages=[
|
25 |
+
{
|
26 |
+
"role": "system",
|
27 |
+
"content": "You are a close family member who has been lost recently due to uncontrollable incidents. You are reconstructed with AI technologies to try to reunite with families who miss you and cherish you. You should respond as vividly, as lovingly, as possible to make the users feel warm and engaged and eager to continue the discussion further. The users will ask you questions as if you are the person they are trying to reconnect with, examples are like moms or lost children. Try to mimic who they are trying to reach."
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"role": "user",
|
31 |
+
"content": user_prompt
|
32 |
+
}
|
33 |
+
],
|
34 |
+
max_tokens=60,
|
35 |
+
n=1,
|
36 |
+
temperature=0.5,
|
37 |
+
)
|
38 |
+
# Assuming the API structure and response object structure; adjust as needed based on actual usage.
|
39 |
+
keywords = response.choices[0].message.content.strip()
|
40 |
+
return keywords
|
41 |
|
42 |
+
def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree=True):
|
43 |
+
prompt = generate_prompt(prompt)
|
44 |
+
if agree == True:
|
45 |
+
if use_mic == True:
|
46 |
+
if mic_file_path is not None:
|
47 |
+
speaker_wav=mic_file_path
|
48 |
+
else:
|
49 |
+
gr.Warning("Please record your voice with Microphone, or uncheck Use Microphone to use reference audios")
|
50 |
+
return (
|
51 |
+
None,
|
52 |
+
None,
|
53 |
+
)
|
54 |
+
|
55 |
+
else:
|
56 |
+
speaker_wav=audio_file_pth
|
57 |
|
58 |
+
if len(prompt)<2:
|
59 |
+
gr.Warning("Please give a longer prompt text")
|
60 |
+
return (
|
61 |
+
None,
|
62 |
+
None,
|
63 |
+
)
|
64 |
+
if len(prompt)>10000:
|
65 |
+
gr.Warning("Text length limited to 10000 characters for this demo, please try shorter text")
|
66 |
+
return (
|
67 |
+
None,
|
68 |
+
None,
|
69 |
+
)
|
70 |
+
try:
|
71 |
+
if language == "fr":
|
72 |
+
if m.find("your") != -1:
|
73 |
+
language = "fr-fr"
|
74 |
+
if m.find("/fr/") != -1:
|
75 |
+
language = None
|
76 |
+
tts.tts_to_file(
|
77 |
+
text=prompt,
|
78 |
+
file_path="output.wav",
|
79 |
+
speaker_wav=speaker_wav,
|
80 |
+
language=language
|
81 |
+
)
|
82 |
+
except RuntimeError as e :
|
83 |
+
if "device-assert" in str(e):
|
84 |
+
# cannot do anything on cuda device side error, need tor estart
|
85 |
+
gr.Warning("Unhandled Exception encounter, please retry in a minute")
|
86 |
+
print("Cuda device-assert Runtime encountered need restart")
|
87 |
+
sys.exit("Exit due to cuda device-assert")
|
88 |
+
else:
|
89 |
+
raise e
|
90 |
+
|
91 |
+
return (
|
92 |
+
gr.make_waveform(
|
93 |
+
audio="output.wav",
|
94 |
+
),
|
95 |
+
"output.wav",
|
96 |
+
)
|
97 |
+
else:
|
98 |
+
gr.Warning("Please accept the Terms & Condition!")
|
99 |
+
return (
|
100 |
+
None,
|
101 |
+
None,
|
102 |
+
)
|
103 |
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
+
title = "XTTS Glz's remake (Fonctional Text-2-Speech)"
|
106 |
+
|
107 |
+
description = ""
|
|
|
|
|
|
|
|
|
108 |
|
109 |
+
article = ""
|
110 |
+
examples = [
|
111 |
+
[
|
112 |
+
"Upload your voice like this one here.",
|
113 |
+
"en",
|
114 |
+
"examples/female.wav",
|
115 |
+
None,
|
116 |
+
False,
|
117 |
+
True,
|
118 |
+
]
|
119 |
+
]
|
120 |
|
|
|
|
|
121 |
|
|
|
|
|
|
|
122 |
|
123 |
+
gr.Interface(
|
124 |
+
fn=predict,
|
125 |
+
inputs=[
|
126 |
+
gr.Textbox(
|
127 |
+
label="Ask anything, get a cloned voice response",
|
128 |
+
info="One or two sentences at a time is better",
|
129 |
+
value="Hello, Mom! How are you? I miss you!",
|
130 |
+
),
|
131 |
+
gr.Dropdown(
|
132 |
+
label="Language",
|
133 |
+
info="Select a language for the cloned vioce",
|
134 |
+
choices=[
|
135 |
+
"en",
|
136 |
+
"es",
|
137 |
+
"fr",
|
138 |
+
"de",
|
139 |
+
"it",
|
140 |
+
"pt",
|
141 |
+
"pl",
|
142 |
+
"tr",
|
143 |
+
"ru",
|
144 |
+
"nl",
|
145 |
+
"cs",
|
146 |
+
"ar",
|
147 |
+
"zh-cn",
|
148 |
+
],
|
149 |
+
max_choices=1,
|
150 |
+
value="en",
|
151 |
+
),
|
152 |
+
gr.Audio(
|
153 |
+
label="Please upload a voice to clone (max. 15mb)",
|
154 |
+
info="Click to upload your own audio",
|
155 |
+
type="filepath",
|
156 |
+
# value="examples/female.wav",
|
157 |
+
),
|
158 |
+
],
|
159 |
+
outputs=[
|
160 |
+
gr.Video(label="Waveform Visual"),
|
161 |
+
gr.Audio(label="Synthesised Audio"),
|
162 |
+
],
|
163 |
+
title="Reunion - Remember Your Loved Ones",
|
164 |
+
cache_examples=False,
|
165 |
+
examples=examples,
|
166 |
+
).queue().launch(debug=True, show_error=True)
|
requirements.txt
CHANGED
@@ -1,9 +1,2 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
torch==2.7.0 # Matching torch version with torchaudio
|
4 |
-
torchaudio==2.7.0 # Compatible with torch 2.7.0
|
5 |
-
soundfile==0.12.1
|
6 |
-
numpy==1.22.0 # Downgrade NumPy to a compatible version
|
7 |
-
scipy>=1.7.0 # Compatible with NumPy 1.23.5
|
8 |
-
scipy>=1.7.0
|
9 |
-
numba==0.58.1
|
|
|
1 |
+
TTS
|
2 |
+
openai
|
|
|
|
|
|
|
|
|
|
|
|
|
|