wasmdashai commited on
Commit
2d850c2
·
verified ·
1 Parent(s): 217cd0c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -7
app.py CHANGED
@@ -93,20 +93,35 @@ def _inference_forward_stream(
93
  yield spectrogram
94
 
95
 
96
- def get_model(name_model):
97
  global models
98
- if name_model in models:
99
- tokenizer = AutoTokenizer.from_pretrained(name_model, token=token)
100
- return models[name_model], tokenizer
 
 
101
 
102
- models[name_model] = VitsModel.from_pretrained(name_model, token=token)
 
 
 
 
 
 
 
103
  models[name_model].decoder.apply_weight_norm()
 
 
104
  for flow in models[name_model].flow.flows:
105
  torch.nn.utils.weight_norm(flow.conv_pre)
106
  torch.nn.utils.weight_norm(flow.conv_post)
 
 
 
 
 
107
 
108
- tokenizer = AutoTokenizer.from_pretrained(name_model, token=token)
109
- return models[name_model], tokenizer
110
 
111
  TXT = """السلام عليكم ورحمة الله وبركاته يا هلا وسهلا ومراحب بالغالي اخباركم طيبين ان شاء الله ارحبوا على العين والراس"""
112
  def process_chunk(chunk_id, spectrogram_chunk, speaker_embeddings, decoder):
 
93
  yield spectrogram
94
 
95
 
96
+ def get_model(name_model):
97
  global models
98
+ if name_model in models:
99
+ if name_model=='wasmdashai/vits-en-v1':
100
+ tokenizer = AutoTokenizer.from_pretrained("wasmdashai/vits-en-v1",token=token)
101
+ else:
102
+ tokenizer = AutoTokenizer.from_pretrained("wasmdashai/vtk",token=token)
103
 
104
+
105
+
106
+
107
+ return models[name_model],tokenizer
108
+ models[name_model]=VitsModel.from_pretrained(name_model,token=token)
109
+
110
+
111
+
112
  models[name_model].decoder.apply_weight_norm()
113
+ # torch.nn.utils.weight_norm(self.decoder.conv_pre)
114
+ # torch.nn.utils.weight_norm(self.decoder.conv_post)
115
  for flow in models[name_model].flow.flows:
116
  torch.nn.utils.weight_norm(flow.conv_pre)
117
  torch.nn.utils.weight_norm(flow.conv_post)
118
+
119
+ if name_model=='wasmdashai/vits-en-v1':
120
+ tokenizer = AutoTokenizer.from_pretrained("wasmdashai/vits-en-v1",token=token)
121
+ else:
122
+ tokenizer = AutoTokenizer.from_pretrained("wasmdashai/vtk",token=token)
123
 
124
+ return models[name_model],tokenizer
 
125
 
126
  TXT = """السلام عليكم ورحمة الله وبركاته يا هلا وسهلا ومراحب بالغالي اخباركم طيبين ان شاء الله ارحبوا على العين والراس"""
127
  def process_chunk(chunk_id, spectrogram_chunk, speaker_embeddings, decoder):