Spaces:
Runtime error
Runtime error
ryantts
Browse files- .gitignore +2 -0
- app.py +17 -77
.gitignore
CHANGED
|
@@ -170,3 +170,5 @@ vocoder/*
|
|
| 170 |
models
|
| 171 |
*.wav
|
| 172 |
/wav_files/
|
|
|
|
|
|
|
|
|
| 170 |
models
|
| 171 |
*.wav
|
| 172 |
/wav_files/
|
| 173 |
+
*.db
|
| 174 |
+
flagged
|
app.py
CHANGED
|
@@ -5,99 +5,39 @@ import scipy.io.wavfile
|
|
| 5 |
from espnet2.bin.tts_inference import Text2Speech
|
| 6 |
from espnet2.utils.types import str_or_none
|
| 7 |
|
| 8 |
-
tagen = '
|
| 9 |
-
vocoder_tagen = "
|
| 10 |
-
|
| 11 |
|
| 12 |
text2speechen = Text2Speech.from_pretrained(
|
| 13 |
model_tag=str_or_none(tagen),
|
| 14 |
vocoder_tag=str_or_none(vocoder_tagen),
|
| 15 |
device="cpu",
|
| 16 |
-
# Only for Tacotron 2 & Transformer
|
| 17 |
-
threshold=0.5,
|
| 18 |
-
# Only for Tacotron 2
|
| 19 |
-
minlenratio=0.0,
|
| 20 |
-
maxlenratio=10.0,
|
| 21 |
-
use_att_constraint=False,
|
| 22 |
-
backward_window=1,
|
| 23 |
-
forward_window=3,
|
| 24 |
-
# Only for FastSpeech & FastSpeech2 & VITS
|
| 25 |
-
speed_control_alpha=1.0,
|
| 26 |
-
# Only for VITS
|
| 27 |
-
noise_scale=0.333,
|
| 28 |
-
noise_scale_dur=0.333,
|
| 29 |
-
)
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
tagjp = 'kan-bayashi/jsut_full_band_vits_prosody'
|
| 33 |
-
vocoder_tagjp = 'none'
|
| 34 |
-
|
| 35 |
-
text2speechjp = Text2Speech.from_pretrained(
|
| 36 |
-
model_tag=str_or_none(tagjp),
|
| 37 |
-
vocoder_tag=str_or_none(vocoder_tagjp),
|
| 38 |
-
device="cpu",
|
| 39 |
-
# Only for Tacotron 2 & Transformer
|
| 40 |
-
threshold=0.5,
|
| 41 |
-
# Only for Tacotron 2
|
| 42 |
-
minlenratio=0.0,
|
| 43 |
-
maxlenratio=10.0,
|
| 44 |
-
use_att_constraint=False,
|
| 45 |
-
backward_window=1,
|
| 46 |
-
forward_window=3,
|
| 47 |
-
# Only for FastSpeech & FastSpeech2 & VITS
|
| 48 |
-
speed_control_alpha=1.0,
|
| 49 |
-
# Only for VITS
|
| 50 |
-
noise_scale=0.333,
|
| 51 |
-
noise_scale_dur=0.333,
|
| 52 |
)
|
| 53 |
|
| 54 |
-
|
| 55 |
-
vocoder_tagch = "none"
|
| 56 |
-
|
| 57 |
-
text2speechch = Text2Speech.from_pretrained(
|
| 58 |
-
model_tag=str_or_none(tagch),
|
| 59 |
-
vocoder_tag=str_or_none(vocoder_tagch),
|
| 60 |
-
device="cpu",
|
| 61 |
-
# Only for Tacotron 2 & Transformer
|
| 62 |
-
threshold=0.5,
|
| 63 |
-
# Only for Tacotron 2
|
| 64 |
-
minlenratio=0.0,
|
| 65 |
-
maxlenratio=10.0,
|
| 66 |
-
use_att_constraint=False,
|
| 67 |
-
backward_window=1,
|
| 68 |
-
forward_window=3,
|
| 69 |
-
# Only for FastSpeech & FastSpeech2 & VITS
|
| 70 |
-
speed_control_alpha=1.0,
|
| 71 |
-
# Only for VITS
|
| 72 |
-
noise_scale=0.333,
|
| 73 |
-
noise_scale_dur=0.333,
|
| 74 |
-
)
|
| 75 |
-
|
| 76 |
-
def inference(text,lang):
|
| 77 |
with torch.no_grad():
|
| 78 |
-
if
|
| 79 |
wav = text2speechen(text)["wav"]
|
| 80 |
-
scipy.io.wavfile.write("out.wav",text2speechen.fs
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
return "out.wav"
|
| 88 |
-
title = "ESPnet2-TTS"
|
| 89 |
-
description = "Gradio demo for ESPnet2-TTS: Extending the Edge of TTS Research. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
|
| 90 |
-
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2110.07840' target='_blank'>ESPnet2-TTS: Extending the Edge of TTS Research</a> | <a href='https://github.com/espnet/espnet' target='_blank'>Github Repo</a></p>"
|
| 91 |
|
| 92 |
-
examples=[['
|
|
|
|
|
|
|
| 93 |
|
| 94 |
gr.Interface(
|
| 95 |
inference,
|
| 96 |
-
[gr.inputs.Textbox(label="input text",lines=10),
|
|
|
|
| 97 |
gr.outputs.Audio(type="file", label="Output"),
|
| 98 |
title=title,
|
| 99 |
description=description,
|
| 100 |
article=article,
|
| 101 |
enable_queue=True,
|
| 102 |
examples=examples
|
| 103 |
-
).launch(debug=True)
|
|
|
|
| 5 |
from espnet2.bin.tts_inference import Text2Speech
|
| 6 |
from espnet2.utils.types import str_or_none
|
| 7 |
|
| 8 |
+
tagen = 'espnet/english_male_ryanspeech_tacotron'
|
| 9 |
+
vocoder_tagen = "parallel_wavegan/ljspeech_melgan.v1.long"
|
|
|
|
| 10 |
|
| 11 |
text2speechen = Text2Speech.from_pretrained(
|
| 12 |
model_tag=str_or_none(tagen),
|
| 13 |
vocoder_tag=str_or_none(vocoder_tagen),
|
| 14 |
device="cpu",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
)
|
| 16 |
|
| 17 |
+
def inference(text, gender):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
with torch.no_grad():
|
| 19 |
+
if gender == "male":
|
| 20 |
wav = text2speechen(text)["wav"]
|
| 21 |
+
scipy.io.wavfile.write("out.wav", text2speechen.fs, wav.view(-1).cpu().numpy())
|
| 22 |
+
return "out.wav"
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
title = "RyanSpeech TTS"
|
| 26 |
+
description = "Gradio demo for RyanSpeech: First high quality speech dataset in the domain of conversation. (the female voice will be added in future).You get much better outputs when you use our <a href='https://www.kaggle.com/datasets/roholazandie/conformer-fastspeech2-ryanspeech'>pre-trained vocoder</a>. To use it, simply input a text, or click one of the examples to load. Please <a href='https://www.isca-speech.org/archive/interspeech_2021/zandie21_interspeech.html'>cite</a> our work"
|
| 27 |
+
article = "<p style='text-align: center'>" "<a href='https://arxiv.org/abs/2106.08468' target='_blank'>" "RyanSpeech-TTS</a> | <a href='http://mohammadmahoor.com/ryanspeech/' target='_blank'>Website</a> | <a href='https://www.kaggle.com/datasets/roholazandie/ryanspeech' target='_blank'>Download Dataset</a> | <a href='https://github.com/roholazandie/ryan-tts'>Github</a></p>"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
+
examples = [['When he reached the suburbs, the light of homes was shining through curtains of all colors', "male"],
|
| 30 |
+
['I am a fully autonomous social robot. I can talk, listen, express, understand, and remember. My programming lets me have a conversation with just about anyone.', "male"],
|
| 31 |
+
['When in the very midst of our victory, here comes an order to halt.', "male"]]
|
| 32 |
|
| 33 |
gr.Interface(
|
| 34 |
inference,
|
| 35 |
+
[gr.inputs.Textbox(label="input text", lines=10),
|
| 36 |
+
gr.inputs.Radio(choices=["male", "female"], type="value", default="male", label="Gender")],
|
| 37 |
gr.outputs.Audio(type="file", label="Output"),
|
| 38 |
title=title,
|
| 39 |
description=description,
|
| 40 |
article=article,
|
| 41 |
enable_queue=True,
|
| 42 |
examples=examples
|
| 43 |
+
).launch(debug=True)
|