Skip to content

Commit a488c7e

Browse files
committed
remove unused variables and functions
1 parent 516eb1e commit a488c7e

File tree

4 files changed

+7
-97
lines changed

4 files changed

+7
-97
lines changed

lib/rvc/pipeline.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,6 @@
1111

1212
# from faiss.swigfaiss_avx2 import IndexIVFFlat # cause crash on windows' faiss-cpu installed from pip
1313
from fairseq.models.hubert import HubertModel
14-
from transformers import HubertModel as TrHubertModel
15-
from transformers import Wav2Vec2FeatureExtractor
1614

1715
from .models import SynthesizerTrnMs256NSFSid
1816

@@ -61,7 +59,6 @@ def get_f0(
6159
f0_method: str,
6260
inp_f0: np.ndarray = None,
6361
):
64-
time_step = self.window / self.sr * 1000
6562
f0_min = 50
6663
f0_max = 1100
6764
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
@@ -114,7 +111,7 @@ def get_f0(
114111

115112
def _convert(
116113
self,
117-
model: Union[HubertModel, Tuple[Wav2Vec2FeatureExtractor, TrHubertModel]],
114+
model: HubertModel,
118115
embedding_output_layer: int,
119116
net_g: SynthesizerTrnMs256NSFSid,
120117
sid: int,
@@ -231,7 +228,7 @@ def _convert(
231228

232229
def __call__(
233230
self,
234-
model: Union[HubertModel, Tuple[Wav2Vec2FeatureExtractor, TrHubertModel]],
231+
model: HubertModel,
235232
embedding_output_layer: int,
236233
net_g: SynthesizerTrnMs256NSFSid,
237234
sid: int,

lib/rvc/preprocessing/extract_f0.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ def compute_f0(
1919
f0_min: float,
2020
):
2121
x = load_audio(path, fs)
22-
p_len = x.shape[0] // hop
2322
if f0_method == "harvest":
2423
f0, t = pyworld.harvest(
2524
x.astype(np.double),

lib/rvc/preprocessing/extract_feature.py

Lines changed: 1 addition & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,6 @@
1010
import torch.nn.functional as F
1111
from fairseq import checkpoint_utils
1212
from tqdm import tqdm
13-
from transformers import HubertModel as TrHubertModel
14-
from transformers import Wav2Vec2FeatureExtractor
1513

1614

1715
def load_embedder(embedder_path: str, device):
@@ -34,42 +32,6 @@ def load_embedder(embedder_path: str, device):
3432
return embedder_model, cfg
3533

3634

37-
def load_transformers_hubert(repo_name: str, device):
38-
try:
39-
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(repo_name)
40-
embedder_model = TrHubertModel.from_pretrained(repo_name).to(device)
41-
if device != "cpu":
42-
embedder_model = embedder_model.half()
43-
else:
44-
embedder_model = embedder_model.float()
45-
embedder_model.eval()
46-
except Exception as e:
47-
print(f"Error: {e} {repo_name}")
48-
traceback.print_exc()
49-
50-
return (feature_extractor, embedder_model), None
51-
52-
53-
def load_transformers_hubert_local(embedder_path: str, device):
54-
try:
55-
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
56-
embedder_path, local_files_only=True
57-
)
58-
embedder_model = TrHubertModel.from_pretrained(
59-
embedder_path, local_files_only=True
60-
).to(device)
61-
if device != "cpu":
62-
embedder_model = embedder_model.half()
63-
else:
64-
embedder_model = embedder_model.float()
65-
embedder_model.eval()
66-
except Exception as e:
67-
print(f"Error: {e} {embedder_path}")
68-
traceback.print_exc()
69-
70-
return (feature_extractor, embedder_model), None
71-
72-
7335
# wave must be 16k, hop_size=320
7436
def readwave(wav_path, normalize=False):
7537
wav, sr = sf.read(wav_path)
@@ -104,12 +66,7 @@ def processor(
10466
if embedder_load_from == "local" and not os.path.exists(embedder_path):
10567
return f"Embedder not found: {embedder_path}"
10668

107-
if embedder_load_from == "hf":
108-
model, cfg = load_transformers_hubert(embedder_path, device)
109-
elif embedder_load_from == "tr-local":
110-
model, cfg = load_transformers_hubert_local(embedder_path, device)
111-
else:
112-
model, cfg = load_embedder(embedder_path, device)
69+
model, cfg = load_embedder(embedder_path, device)
11370

11471
for file in tqdm(todo, position=1 + process_id):
11572
try:

modules/models.py

Lines changed: 4 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66
from fairseq import checkpoint_utils
77
from fairseq.models.hubert.hubert import HubertModel
88
from pydub import AudioSegment
9-
from transformers import HubertModel as TrHubertModel
10-
from transformers import Wav2Vec2FeatureExtractor
119

1210
from lib.rvc.models import SynthesizerTrnMs256NSFSid, SynthesizerTrnMs256NSFSidNono
1311
from lib.rvc.pipeline import VocalConvertPipeline
@@ -20,16 +18,12 @@
2018

2119

2220
EMBEDDINGS_LIST = {
23-
# "hubert_base": ("hubert_base.pt", "hubert_base", "local"),
2421
"hubert-base-japanese": (
2522
"rinna_hubert_base_jp.pt",
2623
"hubert-base-japanese",
2724
"local",
2825
),
2926
"contentvec": ("checkpoint_best_legacy_500.pt", "contentvec", "local"),
30-
# "distilhubert": ("ntu-spml/distilhubert", "distilhubert", "hf"),
31-
# "distilhubert-ja": ("TylorShine/distilhubert-ft-japanese-50k", "distilhubert-ja", "hf"),
32-
# "distilhubert-ja_dev": ("models/pretrained/feature_extractors/distilhubert-ja-en", "distilhubert-ja_dev", "tr-local"),
3327
}
3428

3529

@@ -129,10 +123,13 @@ def single(
129123
)
130124
if embedder_model_name.endswith("768"):
131125
embedder_model_name = embedder_model_name[:-3]
126+
132127
if embedder_model_name == "hubert_base":
133128
embedder_model_name = "contentvec"
129+
134130
if not embedder_model_name in EMBEDDINGS_LIST.keys():
135131
raise Exception(f"Not supported embedder: {embedder_model_name}")
132+
136133
if (
137134
embedder_model == None
138135
or loaded_embedder_model != EMBEDDINGS_LIST[embedder_model_name][1]
@@ -141,12 +138,7 @@ def single(
141138
embedder_filename, embedder_name, load_from = get_embedder(
142139
embedder_model_name
143140
)
144-
if load_from == "hf":
145-
load_transformers_hubert(embedder_filename, embedder_name)
146-
elif load_from == "tr-local":
147-
load_transformers_hubert_local(embedder_filename, embedder_name)
148-
else:
149-
load_embedder(embedder_filename, embedder_name)
141+
load_embedder(embedder_filename, embedder_name)
150142

151143
if embedding_output_layer == "auto":
152144
embedding_output_layer = (
@@ -255,41 +247,6 @@ def load_embedder(emb_file: str, emb_name: str):
255247
loaded_embedder_model = emb_name
256248

257249

258-
def load_transformers_hubert(repo_name: str, emb_name: str):
259-
global embedder_model, loaded_embedder_model
260-
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(repo_name)
261-
embedder = TrHubertModel.from_pretrained(repo_name).to(device)
262-
263-
if is_half:
264-
embedder = embedder.half()
265-
else:
266-
embedder = embedder.float()
267-
embedder.eval()
268-
269-
embedder_model = (feature_extractor, embedder)
270-
271-
loaded_embedder_model = emb_name
272-
273-
274-
def load_transformers_hubert_local(emb_file: str, emb_name: str):
275-
global embedder_model, loaded_embedder_model
276-
emb_file = os.path.join(ROOT_DIR, emb_file)
277-
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
278-
emb_file, local_files_only=True
279-
)
280-
embedder = TrHubertModel.from_pretrained(emb_file, local_files_only=True).to(device)
281-
282-
if is_half:
283-
embedder = embedder.half()
284-
else:
285-
embedder = embedder.float()
286-
embedder.eval()
287-
288-
embedder_model = (feature_extractor, embedder)
289-
290-
loaded_embedder_model = emb_name
291-
292-
293250
def get_vc_model(model_name: str):
294251
model_path = os.path.join(MODELS_DIR, "checkpoints", model_name)
295252
weight = torch.load(model_path, map_location="cpu")

0 commit comments

Comments
 (0)