Spaces:
Runtime error
Runtime error
SayaSS
commited on
Commit
·
4b5c6f0
1
Parent(s):
c17721a
update
Browse files- app.py +4 -4
- inference/__pycache__/infer_tool.cpython-38.pyc +0 -0
- inference/infer_tool.py +2 -2
- requirements.txt +1 -1
- utils.py +0 -1
app.py
CHANGED
|
@@ -1,9 +1,8 @@
|
|
| 1 |
-
import io
|
| 2 |
import os
|
| 3 |
import gradio as gr
|
| 4 |
import librosa
|
| 5 |
import numpy as np
|
| 6 |
-
import
|
| 7 |
from inference.infer_tool import Svc
|
| 8 |
import logging
|
| 9 |
import webbrowser
|
|
@@ -40,6 +39,7 @@ def create_vc_fn(model, sid):
|
|
| 40 |
if sampling_rate != 44100:
|
| 41 |
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=44100)
|
| 42 |
out_audio, out_sr = model.infer(sid, vc_transform, audio, auto_predict_f0=auto_f0)
|
|
|
|
| 43 |
return "Success", (44100, out_audio.cpu().numpy())
|
| 44 |
return vc_fn
|
| 45 |
|
|
@@ -50,11 +50,11 @@ if __name__ == '__main__':
|
|
| 50 |
parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
|
| 51 |
parser.add_argument("--colab", action="store_true", default=False, help="share gradio app")
|
| 52 |
args = parser.parse_args()
|
| 53 |
-
|
| 54 |
models = []
|
| 55 |
for f in os.listdir("models"):
|
| 56 |
name = f
|
| 57 |
-
model = Svc(fr"models/{f}/{f}.pth", f"models/{f}/config.json", device=args.device)
|
| 58 |
cover = f"models/{f}/cover.png" if os.path.exists(f"models/{f}/cover.png") else None
|
| 59 |
models.append((name, cover, create_vc_fn(model, name)))
|
| 60 |
with gr.Blocks() as app:
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
import librosa
|
| 4 |
import numpy as np
|
| 5 |
+
import utils
|
| 6 |
from inference.infer_tool import Svc
|
| 7 |
import logging
|
| 8 |
import webbrowser
|
|
|
|
| 39 |
if sampling_rate != 44100:
|
| 40 |
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=44100)
|
| 41 |
out_audio, out_sr = model.infer(sid, vc_transform, audio, auto_predict_f0=auto_f0)
|
| 42 |
+
model.clear_empty()
|
| 43 |
return "Success", (44100, out_audio.cpu().numpy())
|
| 44 |
return vc_fn
|
| 45 |
|
|
|
|
| 50 |
parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
|
| 51 |
parser.add_argument("--colab", action="store_true", default=False, help="share gradio app")
|
| 52 |
args = parser.parse_args()
|
| 53 |
+
hubert_model = utils.get_hubert_model().to(args.device)
|
| 54 |
models = []
|
| 55 |
for f in os.listdir("models"):
|
| 56 |
name = f
|
| 57 |
+
model = Svc(fr"models/{f}/{f}.pth", f"models/{f}/config.json", device=args.device, hubert_model=hubert_model)
|
| 58 |
cover = f"models/{f}/cover.png" if os.path.exists(f"models/{f}/cover.png") else None
|
| 59 |
models.append((name, cover, create_vc_fn(model, name)))
|
| 60 |
with gr.Blocks() as app:
|
inference/__pycache__/infer_tool.cpython-38.pyc
CHANGED
|
Binary files a/inference/__pycache__/infer_tool.cpython-38.pyc and b/inference/__pycache__/infer_tool.cpython-38.pyc differ
|
|
|
inference/infer_tool.py
CHANGED
|
@@ -109,7 +109,7 @@ def split_list_by_n(list_collection, n, pre=0):
|
|
| 109 |
|
| 110 |
|
| 111 |
class Svc(object):
|
| 112 |
-
def __init__(self, net_g_path, config_path,
|
| 113 |
device=None,
|
| 114 |
cluster_model_path="logs/44k/kmeans_10000.pt"):
|
| 115 |
self.net_g_path = net_g_path
|
|
@@ -123,7 +123,7 @@ class Svc(object):
|
|
| 123 |
self.hop_size = self.hps_ms.data.hop_length
|
| 124 |
self.spk2id = self.hps_ms.spk
|
| 125 |
# 加载hubert
|
| 126 |
-
self.hubert_model =
|
| 127 |
self.load_model()
|
| 128 |
if os.path.exists(cluster_model_path):
|
| 129 |
self.cluster_model = cluster.get_cluster_model(cluster_model_path)
|
|
|
|
| 109 |
|
| 110 |
|
| 111 |
class Svc(object):
|
| 112 |
+
def __init__(self, net_g_path, config_path, hubert_model,
|
| 113 |
device=None,
|
| 114 |
cluster_model_path="logs/44k/kmeans_10000.pt"):
|
| 115 |
self.net_g_path = net_g_path
|
|
|
|
| 123 |
self.hop_size = self.hps_ms.data.hop_length
|
| 124 |
self.spk2id = self.hps_ms.spk
|
| 125 |
# 加载hubert
|
| 126 |
+
self.hubert_model = hubert_model
|
| 127 |
self.load_model()
|
| 128 |
if os.path.exists(cluster_model_path):
|
| 129 |
self.cluster_model = cluster.get_cluster_model(cluster_model_path)
|
requirements.txt
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
Flask
|
| 2 |
Flask_Cors
|
| 3 |
-
gradio
|
| 4 |
numpy
|
| 5 |
playsound
|
| 6 |
pydub
|
|
|
|
| 1 |
Flask
|
| 2 |
Flask_Cors
|
| 3 |
+
gradio==3.18.0
|
| 4 |
numpy
|
| 5 |
playsound
|
| 6 |
pydub
|
utils.py
CHANGED
|
@@ -244,7 +244,6 @@ def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False
|
|
| 244 |
model.module.load_state_dict(new_state_dict)
|
| 245 |
else:
|
| 246 |
model.load_state_dict(new_state_dict)
|
| 247 |
-
print("load ")
|
| 248 |
logger.info("Loaded checkpoint '{}' (iteration {})".format(
|
| 249 |
checkpoint_path, iteration))
|
| 250 |
return model, optimizer, learning_rate, iteration
|
|
|
|
| 244 |
model.module.load_state_dict(new_state_dict)
|
| 245 |
else:
|
| 246 |
model.load_state_dict(new_state_dict)
|
|
|
|
| 247 |
logger.info("Loaded checkpoint '{}' (iteration {})".format(
|
| 248 |
checkpoint_path, iteration))
|
| 249 |
return model, optimizer, learning_rate, iteration
|