Update app.py
Browse files
app.py
CHANGED
|
@@ -8,8 +8,6 @@ import gradio as gr
|
|
| 8 |
import yt_dlp
|
| 9 |
import subprocess
|
| 10 |
from pydub import AudioSegment
|
| 11 |
-
from scipy.signal import convolve
|
| 12 |
-
|
| 13 |
from audio_separator.separator import Separator
|
| 14 |
from lib.infer import infer_audio
|
| 15 |
import edge_tts
|
|
@@ -17,17 +15,22 @@ import tempfile
|
|
| 17 |
import anyio
|
| 18 |
from pathlib import Path
|
| 19 |
from lib.language_tts import language_dict
|
|
|
|
|
|
|
| 20 |
import shutil
|
|
|
|
|
|
|
|
|
|
| 21 |
import time
|
| 22 |
from argparse import ArgumentParser
|
| 23 |
from download_model import download_online_model
|
| 24 |
-
|
| 25 |
main_dir = Path().resolve()
|
| 26 |
print(main_dir)
|
| 27 |
|
| 28 |
os.chdir(main_dir)
|
| 29 |
models_dir = main_dir / "rvc_models"
|
| 30 |
audio_separat_dir = main_dir / "audio_input"
|
|
|
|
| 31 |
AUDIO_DIR = main_dir / 'audio_input'
|
| 32 |
|
| 33 |
|
|
@@ -37,28 +40,35 @@ def get_folders():
|
|
| 37 |
return [folder.name for folder in models_dir.iterdir() if folder.is_dir()]
|
| 38 |
return []
|
| 39 |
|
| 40 |
-
|
| 41 |
# Function to refresh and return the list of folders
|
| 42 |
def refresh_folders():
|
| 43 |
return gr.Dropdown.update(choices=get_folders())
|
| 44 |
|
| 45 |
|
|
|
|
|
|
|
| 46 |
# Function to get the list of audio files in the specified directory
|
| 47 |
def get_audio_files():
|
| 48 |
if not os.path.exists(AUDIO_DIR):
|
| 49 |
os.makedirs(AUDIO_DIR)
|
|
|
|
| 50 |
return [f for f in os.listdir(AUDIO_DIR) if f.lower().endswith(('.mp3', '.wav', '.flac', '.ogg', '.aac'))]
|
| 51 |
|
| 52 |
-
|
| 53 |
# Function to return the full path of audio files for playback
|
| 54 |
def load_audio_files():
|
| 55 |
audio_files = get_audio_files()
|
| 56 |
return [os.path.join(AUDIO_DIR, f) for f in audio_files]
|
| 57 |
|
| 58 |
-
|
| 59 |
def refresh_audio_list():
|
| 60 |
audio_files = load_audio_files()
|
| 61 |
-
return gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
|
| 64 |
def download_audio(url):
|
|
@@ -75,47 +85,47 @@ def download_audio(url):
|
|
| 75 |
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
| 76 |
info_dict = ydl.extract_info(url, download=True)
|
| 77 |
file_path = ydl.prepare_filename(info_dict).rsplit('.', 1)[0] + '.wav'
|
| 78 |
-
|
|
|
|
| 79 |
|
|
|
|
| 80 |
|
| 81 |
-
async def text_to_speech_edge(text, language_code):
|
| 82 |
-
voice = language_dict.get(language_code, "default_voice")
|
| 83 |
-
communicate = edge_tts.Communicate(text, voice)
|
| 84 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
| 85 |
-
tmp_path = tmp_file.name
|
| 86 |
-
await communicate.save(tmp_path)
|
| 87 |
-
return tmp_path
|
| 88 |
|
| 89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
|
| 92 |
-
#
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
# Convert AudioSegment to numpy array
|
| 98 |
-
samples = np.array(sound.get_array_of_samples())
|
| 99 |
-
|
| 100 |
-
# Define a simple impulse response for reverb (can be customized)
|
| 101 |
-
impulse_response = np.concatenate([np.zeros(5000), np.array([0.5**i for i in range(1000)])])
|
| 102 |
-
|
| 103 |
-
# Apply convolution (reverb effect)
|
| 104 |
-
reverbed_samples = convolve(samples, impulse_response, mode='full')
|
| 105 |
-
reverbed_samples = reverbed_samples[:len(samples)] # trim to original length
|
| 106 |
-
|
| 107 |
-
# Convert numpy array back to AudioSegment
|
| 108 |
-
reverbed_sound = sound._spawn(reverbed_samples.astype(np.int16).tobytes())
|
| 109 |
-
|
| 110 |
-
# Export the reverbed sound to a new file-like object (in-memory)
|
| 111 |
-
output_path = "vocals_with_reverb.wav"
|
| 112 |
-
reverbed_sound.export(output_path, format='wav')
|
| 113 |
-
|
| 114 |
-
return output_path
|
| 115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
|
| 118 |
-
|
|
|
|
|
|
|
| 119 |
def process_audio(MODEL_NAME, SOUND_PATH, F0_CHANGE, F0_METHOD, MIN_PITCH, MAX_PITCH, CREPE_HOP_LENGTH, INDEX_RATE,
|
| 120 |
FILTER_RADIUS, RMS_MIX_RATE, PROTECT, SPLIT_INFER, MIN_SILENCE, SILENCE_THRESHOLD, SEEK_STEP,
|
| 121 |
KEEP_SILENCE, FORMANT_SHIFT, QUEFRENCY, TIMBRE, F0_AUTOTUNE, OUTPUT_FORMAT, upload_audio=None):
|
|
@@ -130,7 +140,7 @@ def process_audio(MODEL_NAME, SOUND_PATH, F0_CHANGE, F0_METHOD, MIN_PITCH, MAX_P
|
|
| 130 |
if not MODEL_NAME:
|
| 131 |
return "Please provide a model name."
|
| 132 |
|
| 133 |
-
# Run the inference
|
| 134 |
os.system("chmod +x stftpitchshift")
|
| 135 |
inferred_audio = infer_audio(
|
| 136 |
MODEL_NAME,
|
|
@@ -159,144 +169,157 @@ def process_audio(MODEL_NAME, SOUND_PATH, F0_CHANGE, F0_METHOD, MIN_PITCH, MAX_P
|
|
| 159 |
return inferred_audio
|
| 160 |
|
| 161 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 162 |
|
| 163 |
|
| 164 |
|
| 165 |
|
| 166 |
if __name__ == '__main__':
|
| 167 |
-
parser = ArgumentParser()
|
| 168 |
-
parser.add_argument("--share", action="store_true", dest="share_enabled", default=False)
|
| 169 |
-
parser.add_argument("--listen", action="store_true", default=False)
|
| 170 |
-
parser.add_argument('--listen-host', type=str)
|
| 171 |
-
parser.add_argument('--listen-port', type=int)
|
| 172 |
args = parser.parse_args()
|
| 173 |
|
| 174 |
-
# Gradio Interface
|
| 175 |
-
with gr.Blocks(title="Hex RVC", theme=gr.themes.Base(primary_hue="red", secondary_hue="pink")) as app:
|
| 176 |
-
gr.Markdown("# Hex RVC - AI Audio Inference")
|
| 177 |
-
gr.Markdown("Join [AIHub](https://discord.gg/aihub) to get the RVC model!")
|
| 178 |
|
| 179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
with gr.Tab("Inference"):
|
| 181 |
-
gr.Markdown("## Inference Settings")
|
| 182 |
with gr.Row():
|
| 183 |
MODEL_NAME = gr.Dropdown(
|
| 184 |
-
label="Select
|
| 185 |
choices=get_folders(),
|
| 186 |
interactive=True,
|
| 187 |
-
|
| 188 |
)
|
| 189 |
SOUND_PATH = gr.Dropdown(
|
| 190 |
choices=load_audio_files(),
|
| 191 |
-
label="Select
|
| 192 |
interactive=True,
|
| 193 |
-
|
| 194 |
-
)
|
| 195 |
-
upload_audio = gr.Audio(
|
| 196 |
-
label="Upload Your Own Audio",
|
| 197 |
-
type='filepath',
|
| 198 |
-
info="Upload an audio file if not using existing ones"
|
| 199 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
|
| 201 |
-
|
| 202 |
-
|
|
|
|
|
|
|
| 203 |
with gr.Row():
|
| 204 |
-
F0_CHANGE = gr.Number(
|
| 205 |
-
|
| 206 |
-
value=0,
|
| 207 |
-
info="Adjust the pitch of the output audio"
|
| 208 |
-
)
|
| 209 |
-
F0_METHOD = gr.Dropdown(
|
| 210 |
-
choices=["crepe", "harvest", "mangio-crepe", "rmvpe", "rmvpe_legacy", "fcpe", "fcpe_legacy", "hybrid[rmvpe+fcpe]"],
|
| 211 |
-
label="F0 Method",
|
| 212 |
-
value="fcpe",
|
| 213 |
-
info="Select the fundamental frequency extraction method"
|
| 214 |
-
)
|
| 215 |
with gr.Row():
|
| 216 |
-
MIN_PITCH = gr.
|
| 217 |
-
MAX_PITCH = gr.
|
| 218 |
-
CREPE_HOP_LENGTH = gr.Number(label="Crepe Hop Length", value=120
|
| 219 |
INDEX_RATE = gr.Slider(label="Index Rate", minimum=0, maximum=1, value=0.75)
|
| 220 |
-
FILTER_RADIUS = gr.Number(label="Filter Radius", value=3
|
| 221 |
RMS_MIX_RATE = gr.Slider(label="RMS Mix Rate", minimum=0, maximum=1, value=0.25)
|
| 222 |
-
PROTECT = gr.Slider(label="Protect
|
| 223 |
-
|
| 224 |
-
gr.
|
| 225 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 226 |
|
| 227 |
with gr.Row():
|
| 228 |
-
refresh_btn = gr.Button("Refresh
|
| 229 |
-
run_button = gr.Button("
|
| 230 |
-
|
| 231 |
-
#
|
| 232 |
refresh_btn.click(
|
| 233 |
-
lambda: (refresh_audio_list(), refresh_folders()),
|
| 234 |
outputs=[SOUND_PATH, MODEL_NAME]
|
| 235 |
)
|
| 236 |
-
|
| 237 |
-
# Run Inference and Display Result
|
| 238 |
run_button.click(
|
| 239 |
-
|
| 240 |
inputs=[MODEL_NAME, SOUND_PATH, F0_CHANGE, F0_METHOD, MIN_PITCH, MAX_PITCH, CREPE_HOP_LENGTH, INDEX_RATE,
|
| 241 |
-
FILTER_RADIUS, RMS_MIX_RATE, PROTECT, MIN_SILENCE, SILENCE_THRESHOLD, SEEK_STEP,
|
| 242 |
KEEP_SILENCE, FORMANT_SHIFT, QUEFRENCY, TIMBRE, F0_AUTOTUNE, OUTPUT_FORMAT, upload_audio],
|
| 243 |
outputs=output_audio
|
| 244 |
)
|
| 245 |
|
| 246 |
-
# Other Tabs (Download Model, Audio Separation)
|
| 247 |
with gr.Tab("Download RVC Model"):
|
| 248 |
-
gr.
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
download_button.click(
|
| 255 |
-
download_online_model,
|
| 256 |
-
inputs=[url, dirname],
|
| 257 |
-
outputs=download_output
|
| 258 |
-
)
|
| 259 |
-
|
| 260 |
-
with gr.Tab("Audio Effect (demo)"):
|
| 261 |
-
input_audio = gr.Textbox(label="Path Audio File")
|
| 262 |
-
output_audio = gr.Audio(type="filepath", label="Processed Audio with Reverb")
|
| 263 |
-
|
| 264 |
-
reverb_btn = gr.Button("Add Reverb")
|
| 265 |
-
|
| 266 |
-
reverb_btn.click(add_simple_reverb, inputs=input_audio, outputs=output_audio)
|
| 267 |
-
|
| 268 |
|
|
|
|
| 269 |
with gr.Tab("Audio Separation"):
|
| 270 |
-
gr.
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
|
|
|
|
|
|
| 275 |
label = "Link",
|
| 276 |
placeholder = "Paste the link here",
|
| 277 |
interactive = True
|
| 278 |
)
|
| 279 |
with gr.Row():
|
| 280 |
-
|
| 281 |
with gr.Row():
|
| 282 |
roformer_download_button = gr.Button(
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
separate_button = gr.Button("Separate Audio")
|
| 287 |
-
separation_output = gr.Textbox(label="Separation Output Path")
|
| 288 |
|
| 289 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 290 |
separate_button.click(
|
| 291 |
-
|
| 292 |
-
inputs=[input_audio,
|
| 293 |
-
|
| 294 |
-
"mel_band_roformer_karaoke_aufr33_viperx_sdr_10.1956.ckpt"],
|
| 295 |
-
outputs=[separation_output]
|
| 296 |
)
|
| 297 |
|
|
|
|
|
|
|
| 298 |
app.launch(
|
| 299 |
share=args.share_enabled,
|
| 300 |
server_name=None if not args.listen else (args.listen_host or '0.0.0.0'),
|
| 301 |
-
server_port=args.listen_port
|
| 302 |
-
)
|
|
|
|
| 8 |
import yt_dlp
|
| 9 |
import subprocess
|
| 10 |
from pydub import AudioSegment
|
|
|
|
|
|
|
| 11 |
from audio_separator.separator import Separator
|
| 12 |
from lib.infer import infer_audio
|
| 13 |
import edge_tts
|
|
|
|
| 15 |
import anyio
|
| 16 |
from pathlib import Path
|
| 17 |
from lib.language_tts import language_dict
|
| 18 |
+
import os
|
| 19 |
+
import zipfile
|
| 20 |
import shutil
|
| 21 |
+
import urllib.request
|
| 22 |
+
import gdown
|
| 23 |
+
import subprocess
|
| 24 |
import time
|
| 25 |
from argparse import ArgumentParser
|
| 26 |
from download_model import download_online_model
|
|
|
|
| 27 |
main_dir = Path().resolve()
|
| 28 |
print(main_dir)
|
| 29 |
|
| 30 |
os.chdir(main_dir)
|
| 31 |
models_dir = main_dir / "rvc_models"
|
| 32 |
audio_separat_dir = main_dir / "audio_input"
|
| 33 |
+
|
| 34 |
AUDIO_DIR = main_dir / 'audio_input'
|
| 35 |
|
| 36 |
|
|
|
|
| 40 |
return [folder.name for folder in models_dir.iterdir() if folder.is_dir()]
|
| 41 |
return []
|
| 42 |
|
|
|
|
| 43 |
# Function to refresh and return the list of folders
|
| 44 |
def refresh_folders():
|
| 45 |
return gr.Dropdown.update(choices=get_folders())
|
| 46 |
|
| 47 |
|
| 48 |
+
|
| 49 |
+
|
| 50 |
# Function to get the list of audio files in the specified directory
|
| 51 |
def get_audio_files():
|
| 52 |
if not os.path.exists(AUDIO_DIR):
|
| 53 |
os.makedirs(AUDIO_DIR)
|
| 54 |
+
# List all supported audio file formats
|
| 55 |
return [f for f in os.listdir(AUDIO_DIR) if f.lower().endswith(('.mp3', '.wav', '.flac', '.ogg', '.aac'))]
|
| 56 |
|
|
|
|
| 57 |
# Function to return the full path of audio files for playback
|
| 58 |
def load_audio_files():
|
| 59 |
audio_files = get_audio_files()
|
| 60 |
return [os.path.join(AUDIO_DIR, f) for f in audio_files]
|
| 61 |
|
| 62 |
+
# Refresh function to update the list of files
|
| 63 |
def refresh_audio_list():
|
| 64 |
audio_files = load_audio_files()
|
| 65 |
+
return gr.update(choices=audio_files)
|
| 66 |
+
|
| 67 |
+
# Function to play selected audio file
|
| 68 |
+
def play_audio(file_path):
|
| 69 |
+
return file_path
|
| 70 |
+
|
| 71 |
+
|
| 72 |
|
| 73 |
|
| 74 |
def download_audio(url):
|
|
|
|
| 85 |
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
| 86 |
info_dict = ydl.extract_info(url, download=True)
|
| 87 |
file_path = ydl.prepare_filename(info_dict).rsplit('.', 1)[0] + '.wav'
|
| 88 |
+
sample_rate, audio_data = read(file_path)
|
| 89 |
+
audio_array = np.asarray(audio_data, dtype=np.int16)
|
| 90 |
|
| 91 |
+
return sample_rate, audio_array
|
| 92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
|
| 95 |
+
# Define a function to handle the entire separation process
|
| 96 |
+
def separate_audio(input_audio, model_voc_inst, model_deecho, model_back_voc):
|
| 97 |
+
output_dir = audio_separat_dir
|
| 98 |
+
separator = Separator(output_dir=output_dir)
|
| 99 |
|
| 100 |
+
# Define output files
|
| 101 |
+
vocals = os.path.join(output_dir, 'Vocals.wav')
|
| 102 |
+
instrumental = os.path.join(output_dir, 'Instrumental.wav')
|
| 103 |
+
vocals_reverb = os.path.join(output_dir, 'Vocals (Reverb).wav')
|
| 104 |
+
vocals_no_reverb = os.path.join(output_dir, 'Vocals (No Reverb).wav')
|
| 105 |
+
lead_vocals = os.path.join(output_dir, 'Lead Vocals.wav')
|
| 106 |
+
backing_vocals = os.path.join(output_dir, 'Backing Vocals.wav')
|
| 107 |
|
| 108 |
+
# Splitting a track into Vocal and Instrumental
|
| 109 |
+
separator.load_model(model_filename=model_voc_inst)
|
| 110 |
+
voc_inst = separator.separate(input_audio)
|
| 111 |
+
os.rename(os.path.join(output_dir, voc_inst[0]), instrumental) # Rename to “Instrumental.wav”
|
| 112 |
+
os.rename(os.path.join(output_dir, voc_inst[1]), vocals) # Rename to “Vocals.wav”
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
|
| 114 |
+
# Applying DeEcho-DeReverb to Vocals
|
| 115 |
+
separator.load_model(model_filename=model_deecho)
|
| 116 |
+
voc_no_reverb = separator.separate(vocals)
|
| 117 |
+
os.rename(os.path.join(output_dir, voc_no_reverb[0]), vocals_no_reverb) # Rename to “Vocals (No Reverb).wav”
|
| 118 |
+
os.rename(os.path.join(output_dir, voc_no_reverb[1]), vocals_reverb) # Rename to “Vocals (Reverb).wav”
|
| 119 |
|
| 120 |
+
# Separating Back Vocals from Main Vocals
|
| 121 |
+
separator.load_model(model_filename=model_back_voc)
|
| 122 |
+
backing_voc = separator.separate(vocals_no_reverb)
|
| 123 |
+
os.rename(os.path.join(output_dir, backing_voc[0]), backing_vocals) # Rename to “Backing Vocals.wav”
|
| 124 |
+
os.rename(os.path.join(output_dir, backing_voc[1]), lead_vocals) # Rename to “Lead Vocals.wav”
|
| 125 |
|
| 126 |
+
return "separation done..."
|
| 127 |
+
|
| 128 |
+
# Main function to process audio (Inference)
|
| 129 |
def process_audio(MODEL_NAME, SOUND_PATH, F0_CHANGE, F0_METHOD, MIN_PITCH, MAX_PITCH, CREPE_HOP_LENGTH, INDEX_RATE,
|
| 130 |
FILTER_RADIUS, RMS_MIX_RATE, PROTECT, SPLIT_INFER, MIN_SILENCE, SILENCE_THRESHOLD, SEEK_STEP,
|
| 131 |
KEEP_SILENCE, FORMANT_SHIFT, QUEFRENCY, TIMBRE, F0_AUTOTUNE, OUTPUT_FORMAT, upload_audio=None):
|
|
|
|
| 140 |
if not MODEL_NAME:
|
| 141 |
return "Please provide a model name."
|
| 142 |
|
| 143 |
+
# Run the inference
|
| 144 |
os.system("chmod +x stftpitchshift")
|
| 145 |
inferred_audio = infer_audio(
|
| 146 |
MODEL_NAME,
|
|
|
|
| 169 |
return inferred_audio
|
| 170 |
|
| 171 |
|
| 172 |
+
async def text_to_speech_edge(text, language_code):
|
| 173 |
+
voice = language_dict.get(language_code, "default_voice")
|
| 174 |
+
communicate = edge_tts.Communicate(text, voice)
|
| 175 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
| 176 |
+
tmp_path = tmp_file.name
|
| 177 |
+
await communicate.save(tmp_path)
|
| 178 |
+
return tmp_path
|
| 179 |
+
|
| 180 |
|
| 181 |
|
| 182 |
|
| 183 |
|
| 184 |
if __name__ == '__main__':
|
| 185 |
+
parser = ArgumentParser(description='Generate a AI song in the song_output/id directory.', add_help=True)
|
| 186 |
+
parser.add_argument("--share", action="store_true", dest="share_enabled", default=False, help="Enable sharing")
|
| 187 |
+
parser.add_argument("--listen", action="store_true", default=False, help="Make the UI reachable from your local network.")
|
| 188 |
+
parser.add_argument('--listen-host', type=str, help='The hostname that the server will use.')
|
| 189 |
+
parser.add_argument('--listen-port', type=int, help='The listening port that the server will use.')
|
| 190 |
args = parser.parse_args()
|
| 191 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
# Gradio Blocks Interface with Tabs
|
| 196 |
+
with gr.Blocks(title="Hex RVC", theme=gr.themes.Base(primary_hue="red", secondary_hue="pink")) as app:
|
| 197 |
+
gr.Markdown("# Hex RVC")
|
| 198 |
+
gr.Markdown(" join [AIHub](https://discord.gg/aihub) to get the rvc model!")
|
| 199 |
+
|
| 200 |
with gr.Tab("Inference"):
|
|
|
|
| 201 |
with gr.Row():
|
| 202 |
MODEL_NAME = gr.Dropdown(
|
| 203 |
+
label="Select a Model",
|
| 204 |
choices=get_folders(),
|
| 205 |
interactive=True,
|
| 206 |
+
elem_id="model_folder"
|
| 207 |
)
|
| 208 |
SOUND_PATH = gr.Dropdown(
|
| 209 |
choices=load_audio_files(),
|
| 210 |
+
label="Select an audio file",
|
| 211 |
interactive=True,
|
| 212 |
+
value=None,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 213 |
)
|
| 214 |
+
# Button to refresh the list of folders
|
| 215 |
+
|
| 216 |
+
with gr.Row():
|
| 217 |
+
upload_audio = gr.Audio(label="Upload Audio", type='filepath', visible=False)
|
| 218 |
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
with gr.Accordion("Conversion Settings"):
|
| 223 |
with gr.Row():
|
| 224 |
+
F0_CHANGE = gr.Number(label="Pitch Change (semitones)", value=0)
|
| 225 |
+
F0_METHOD = gr.Dropdown(choices=["crepe", "harvest", "mangio-crepe", "rmvpe", "rmvpe_legacy", "fcpe", "fcpe_legacy", "hybrid[rmvpe+fcpe]"], label="F0 Method", value="fcpe")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 226 |
with gr.Row():
|
| 227 |
+
MIN_PITCH = gr.Textbox(label="Min Pitch", value="50")
|
| 228 |
+
MAX_PITCH = gr.Textbox(label="Max Pitch", value="1100")
|
| 229 |
+
CREPE_HOP_LENGTH = gr.Number(label="Crepe Hop Length", value=120)
|
| 230 |
INDEX_RATE = gr.Slider(label="Index Rate", minimum=0, maximum=1, value=0.75)
|
| 231 |
+
FILTER_RADIUS = gr.Number(label="Filter Radius", value=3)
|
| 232 |
RMS_MIX_RATE = gr.Slider(label="RMS Mix Rate", minimum=0, maximum=1, value=0.25)
|
| 233 |
+
PROTECT = gr.Slider(label="Protect", minimum=0, maximum=1, value=0.33)
|
| 234 |
+
|
| 235 |
+
with gr.Accordion("Hex TTS", open=False):
|
| 236 |
+
input_text = gr.Textbox(lines=5, label="Input Text")
|
| 237 |
+
#output_text = gr.Textbox(label="Output Text")
|
| 238 |
+
#output_audio = gr.Audio(type="filepath", label="Exported Audio")
|
| 239 |
+
language = gr.Dropdown(choices=list(language_dict.keys()), label="Choose the Voice Model")
|
| 240 |
+
tts_convert = gr.Button("Convert")
|
| 241 |
+
tts_convert.click(fn=text_to_speech_edge, inputs=[input_text, language], outputs=[upload_audio])
|
| 242 |
+
with gr.Accordion("Advanced Settings", open=False):
|
| 243 |
+
SPLIT_INFER = gr.Checkbox(label="Enable Split Inference", value=False)
|
| 244 |
+
MIN_SILENCE = gr.Number(label="Min Silence (ms)", value=500)
|
| 245 |
+
SILENCE_THRESHOLD = gr.Number(label="Silence Threshold (dBFS)", value=-50)
|
| 246 |
+
SEEK_STEP = gr.Slider(label="Seek Step (ms)", minimum=1, maximum=10, value=1)
|
| 247 |
+
KEEP_SILENCE = gr.Number(label="Keep Silence (ms)", value=200)
|
| 248 |
+
FORMANT_SHIFT = gr.Checkbox(label="Enable Formant Shift", value=False)
|
| 249 |
+
QUEFRENCY = gr.Number(label="Quefrency", value=0)
|
| 250 |
+
TIMBRE = gr.Number(label="Timbre", value=1)
|
| 251 |
+
F0_AUTOTUNE = gr.Checkbox(label="Enable F0 Autotune", value=False)
|
| 252 |
+
OUTPUT_FORMAT = gr.Dropdown(choices=["wav", "flac", "mp3"], label="Output Format", value="wav")
|
| 253 |
+
|
| 254 |
+
output_audio = gr.Audio(label="Generated Audio", type='filepath')
|
| 255 |
|
| 256 |
with gr.Row():
|
| 257 |
+
refresh_btn = gr.Button("Refresh")
|
| 258 |
+
run_button = gr.Button("Convert")
|
| 259 |
+
|
| 260 |
+
#ref_btn.click(update_models_list, None, outputs=MODEL_NAME)
|
| 261 |
refresh_btn.click(
|
| 262 |
+
lambda: (refresh_audio_list(), refresh_folders()),
|
| 263 |
outputs=[SOUND_PATH, MODEL_NAME]
|
| 264 |
)
|
|
|
|
|
|
|
| 265 |
run_button.click(
|
| 266 |
+
process_audio,
|
| 267 |
inputs=[MODEL_NAME, SOUND_PATH, F0_CHANGE, F0_METHOD, MIN_PITCH, MAX_PITCH, CREPE_HOP_LENGTH, INDEX_RATE,
|
| 268 |
+
FILTER_RADIUS, RMS_MIX_RATE, PROTECT, SPLIT_INFER, MIN_SILENCE, SILENCE_THRESHOLD, SEEK_STEP,
|
| 269 |
KEEP_SILENCE, FORMANT_SHIFT, QUEFRENCY, TIMBRE, F0_AUTOTUNE, OUTPUT_FORMAT, upload_audio],
|
| 270 |
outputs=output_audio
|
| 271 |
)
|
| 272 |
|
|
|
|
| 273 |
with gr.Tab("Download RVC Model"):
|
| 274 |
+
with gr.Row():
|
| 275 |
+
url = gr.Textbox(label="Your model URL")
|
| 276 |
+
dirname = gr.Textbox(label="Your Model name")
|
| 277 |
+
outout_pah = gr.Textbox(label="output download", interactive=False)
|
| 278 |
+
button_model = gr.Button("Download model")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 279 |
|
| 280 |
+
button_model.click(fn=download_online_model, inputs=[url, dirname], outputs=[outout_pah])
|
| 281 |
with gr.Tab("Audio Separation"):
|
| 282 |
+
with gr.Row():
|
| 283 |
+
input_audio = gr.Audio(type="filepath", label="Upload Audio File")
|
| 284 |
+
|
| 285 |
+
with gr.Row():
|
| 286 |
+
with gr.Accordion("Separation by Link", open = False):
|
| 287 |
+
with gr.Row():
|
| 288 |
+
roformer_link = gr.Textbox(
|
| 289 |
label = "Link",
|
| 290 |
placeholder = "Paste the link here",
|
| 291 |
interactive = True
|
| 292 |
)
|
| 293 |
with gr.Row():
|
| 294 |
+
gr.Markdown("You can paste the link to the video/audio from many sites, check the complete list [here](https://github.com/yt-dlp/yt-dlp/blob/master/supportedsites.md)")
|
| 295 |
with gr.Row():
|
| 296 |
roformer_download_button = gr.Button(
|
| 297 |
+
"Download!",
|
| 298 |
+
variant = "primary"
|
| 299 |
+
)
|
|
|
|
|
|
|
| 300 |
|
| 301 |
+
roformer_download_button.click(download_audio, [roformer_link], [input_audio])
|
| 302 |
+
|
| 303 |
+
with gr.Row():
|
| 304 |
+
model_voc_inst = gr.Textbox(value='model_bs_roformer_ep_317_sdr_12.9755.ckpt', label="Vocal & Instrumental Model", visible=False)
|
| 305 |
+
model_deecho = gr.Textbox(value='UVR-DeEcho-DeReverb.pth', label="DeEcho-DeReverb Model", visible=False)
|
| 306 |
+
model_back_voc = gr.Textbox(value='mel_band_roformer_karaoke_aufr33_viperx_sdr_10.1956.ckpt', label="Backing Vocals Model", visible=False)
|
| 307 |
+
|
| 308 |
+
separate_button = gr.Button("Separate Audio")
|
| 309 |
+
|
| 310 |
+
with gr.Row():
|
| 311 |
+
outout_paht = gr.Textbox(label="output download", interactive=False)
|
| 312 |
+
|
| 313 |
separate_button.click(
|
| 314 |
+
separate_audio,
|
| 315 |
+
inputs=[input_audio, model_voc_inst, model_deecho, model_back_voc],
|
| 316 |
+
outputs=[outout_paht]
|
|
|
|
|
|
|
| 317 |
)
|
| 318 |
|
| 319 |
+
|
| 320 |
+
# Launch the Gradio app
|
| 321 |
app.launch(
|
| 322 |
share=args.share_enabled,
|
| 323 |
server_name=None if not args.listen else (args.listen_host or '0.0.0.0'),
|
| 324 |
+
server_port=args.listen_port,
|
| 325 |
+
)
|