frimelle HF Staff commited on
Commit
c5d4931
·
1 Parent(s): 63d0469

add audio cloning functionality (test)

Browse files
Files changed (6) hide show
  1. app.py +171 -116
  2. requirements.txt +2 -0
  3. src/generate.py +46 -0
  4. src/process.py +88 -0
  5. src/prompts.py +47 -0
  6. src/tts.py +43 -0
app.py CHANGED
@@ -1,118 +1,128 @@
1
  import gradio as gr
2
- import random
3
- import re
4
- import difflib
5
- import torch
6
- from functools import lru_cache
7
- from transformers import pipeline
8
-
9
- # ------------------- Sentence Bank (customize freely) -------------------
10
- SENTENCE_BANK = [
11
- "The quick brown fox jumps over the lazy dog.",
12
- "I promise to speak clearly and at a steady pace.",
13
- "Open source makes AI more transparent and inclusive.",
14
- "Hugging Face Spaces make demos easy to share.",
15
- "Today the weather in Berlin is pleasantly cool.",
16
- "Privacy and transparency should go hand in hand.",
17
- "Please generate a new sentence for me to read.",
18
- "Machine learning can amplify or reduce inequality.",
19
- "Responsible AI requires participation from everyone.",
20
- "This microphone test checks my pronunciation accuracy.",
21
- ]
22
-
23
- # ------------------- Utilities -------------------
24
- def normalize_text(t: str) -> str:
25
- # English-only normalization: lowercase, keep letters/digits/' and -
26
- t = t.lower()
27
- t = re.sub(r"[^a-z0-9'\-]+", " ", t)
28
- t = re.sub(r"\s+", " ", t).strip()
29
- return t
30
-
31
- def similarity_and_diff(ref: str, hyp: str):
32
- """Return similarity ratio (0..1) and HTML diff highlighting changes."""
33
- ref_tokens = ref.split()
34
- hyp_tokens = hyp.split()
35
- sm = difflib.SequenceMatcher(a=ref_tokens, b=hyp_tokens)
36
- ratio = sm.ratio()
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  out = []
39
- for op, i1, i2, j1, j2 in sm.get_opcodes():
 
 
 
 
 
40
  if op == "equal":
41
- out.append(" " + " ".join(ref_tokens[i1:i2]))
42
  elif op == "delete":
43
- out.append(
44
- ' <span style="background:#ffe0e0;text-decoration:line-through;">'
45
- + " ".join(ref_tokens[i1:i2]) + "</span>"
46
- )
47
  elif op == "insert":
48
- out.append(
49
- ' <span style="background:#e0ffe0;">'
50
- + " ".join(hyp_tokens[j1:j2]) + "</span>"
51
- )
52
  elif op == "replace":
53
- out.append(
54
- ' <span style="background:#ffe0e0;text-decoration:line-through;">'
55
- + " ".join(ref_tokens[i1:i2]) + "</span>"
56
- )
57
- out.append(
58
- ' <span style="background:#e0ffe0;">'
59
- + " ".join(hyp_tokens[j1:j2]) + "</span>"
60
- )
61
  html = '<div style="line-height:1.6;font-size:1rem;">' + "".join(out).strip() + "</div>"
62
- return ratio, html
63
-
64
- @lru_cache(maxsize=2)
65
- def get_asr(model_id: str, device_preference: str):
66
- """Cache an ASR pipeline. device_preference: 'auto'|'cpu'|'cuda'."""
67
- if device_preference == "cuda" and torch.cuda.is_available():
68
- device = 0
69
- elif device_preference == "auto":
70
- device = 0 if torch.cuda.is_available() else -1
71
- else:
72
- device = -1
73
- return pipeline(
74
- "automatic-speech-recognition",
75
- model=model_id, # use English-only Whisper models (.en)
76
- device=device,
77
- chunk_length_s=30,
78
- return_timestamps=False,
79
- )
80
 
81
- def gen_sentence():
82
- return random.choice(SENTENCE_BANK)
83
 
84
- def clear_all():
85
- # target, hyp_out, score_out, diff_out, summary_out
86
- return "", "", "", "", ""
 
 
 
 
 
 
 
87
 
88
  # ------------------- Core Check (English-only) -------------------
89
- def check_pronunciation(audio_path, target_sentence, model_id, device_pref, pass_threshold):
 
90
  if not target_sentence:
91
- return "", "", "", "Please generate a sentence first."
92
-
93
- asr = get_asr(model_id, device_pref)
94
-
95
- try:
96
- # IMPORTANT: For English-only Whisper (.en), do NOT pass language/task args.
97
- result = asr(audio_path)
98
- hyp_raw = result["text"].strip()
99
- except Exception as e:
100
- return "", "", "", f"Transcription failed: {e}"
101
-
102
- ref_norm = normalize_text(target_sentence)
103
- hyp_norm = normalize_text(hyp_raw)
104
-
105
- ratio, diff_html = similarity_and_diff(ref_norm, hyp_norm)
106
- passed = ratio >= pass_threshold
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
- summary = (
109
- f"✅ Correct ( {int(pass_threshold*100)}%)"
110
- if passed else
111
- f" Not a match (need ≥ {int(pass_threshold*100)}%)"
112
- )
113
- score = f"Similarity: {ratio*100:.1f}%"
 
114
 
115
- return hyp_raw, score, diff_html, summary
116
 
117
  # ------------------- UI -------------------
118
  with gr.Blocks(title="Say the Sentence (English)") as demo:
@@ -122,25 +132,28 @@ with gr.Blocks(title="Say the Sentence (English)") as demo:
122
  1) Generate a sentence.
123
  2) Record yourself reading it.
124
  3) Transcribe & check your accuracy.
 
125
  """
126
  )
127
 
128
  with gr.Row():
129
- target = gr.Textbox(label="Target sentence", interactive=False, placeholder="Click 'Generate sentence'")
 
130
 
131
  with gr.Row():
132
  btn_gen = gr.Button("🎲 Generate sentence", variant="primary")
133
  btn_clear = gr.Button("🧹 Clear")
134
 
135
  with gr.Row():
136
- audio = gr.Audio(sources=["microphone"], type="filepath", label="Record your voice")
 
137
 
138
  with gr.Accordion("Advanced settings", open=False):
139
  model_id = gr.Dropdown(
140
  choices=[
141
- "openai/whisper-tiny.en", # fastest (CPU-friendly)
142
- "openai/whisper-base.en", # better accuracy, a bit slower
143
- "distil-whisper/distil-small.en" # optional distil English model
144
  ],
145
  value="openai/whisper-tiny.en",
146
  label="ASR model (English only)",
@@ -150,26 +163,68 @@ with gr.Blocks(title="Say the Sentence (English)") as demo:
150
  value="auto",
151
  label="Device preference"
152
  )
153
- pass_threshold = gr.Slider(0.50, 1.00, value=0.85, step=0.01, label="Match threshold")
 
154
 
155
  with gr.Row():
156
  btn_check = gr.Button("✅ Transcribe & Check", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
 
158
  with gr.Row():
159
- hyp_out = gr.Textbox(label="Transcription", interactive=False)
160
  with gr.Row():
161
- score_out = gr.Label(label="Score")
162
- summary_out = gr.Label(label="Result")
163
- diff_out = gr.HTML(label="Word-level diff (red = expected but missing / green = extra or replacement)")
 
 
 
 
 
 
 
 
 
 
164
 
165
- # Events
166
- btn_gen.click(fn=gen_sentence, outputs=target)
167
- btn_clear.click(fn=clear_all, outputs=[target, hyp_out, score_out, diff_out, summary_out])
168
  btn_check.click(
169
- fn=check_pronunciation,
170
  inputs=[audio, target, model_id, device_pref, pass_threshold],
171
- outputs=[hyp_out, score_out, diff_out, summary_out]
 
 
 
 
 
 
172
  )
173
 
174
  if __name__ == "__main__":
175
- demo.launch()
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ import src.generate as generate
4
+ import src.process as process
5
+ import src.tts as tts
6
+
7
+
8
+ # ------------------- UI printing functions -------------------
9
+ def clear_all():
10
+ # target, user_transcript, score_html, diff_html, result_html,
11
+ # tts_text, clone_status, tts_audio
12
+ return "", "", "", "", "", "", "", None
13
+
14
+
15
+ def make_result_html(pass_threshold, passed, ratio):
16
+ """Returns summary and score label."""
17
+ summary = (
18
+ f"✅ Correct (≥ {int(pass_threshold * 100)}%)"
19
+ if passed else
20
+ f"❌ Not a match (need ≥ {int(pass_threshold * 100)}%)"
21
+ )
22
+ score = f"Similarity: {ratio * 100:.1f}%"
23
+ return summary, score
24
+
25
+
26
+ def make_alignment_html(ref_tokens, hyp_tokens, alignments):
27
+ """Returns HTML showing alignment between target and recognized user audio."""
28
  out = []
29
+ no_match_html = ' <span style="background:#ffe0e0;text-decoration:line-through;">'
30
+ match_html = ' <span style="background:#e0ffe0;">'
31
+ for span in alignments:
32
+ op, i1, i2, j1, j2 = span
33
+ ref_string = " ".join(ref_tokens[i1:i2])
34
+ hyp_string = " ".join(hyp_tokens[j1:j2])
35
  if op == "equal":
36
+ out.append(" " + ref_string)
37
  elif op == "delete":
38
+ out.append(no_match_html + ref_string + "</span>")
 
 
 
39
  elif op == "insert":
40
+ out.append(match_html + hyp_string + "</span>")
 
 
 
41
  elif op == "replace":
42
+ out.append(no_match_html + ref_string + "</span>")
43
+ out.append(match_html + hyp_string + "</span>")
 
 
 
 
 
 
44
  html = '<div style="line-height:1.6;font-size:1rem;">' + "".join(out).strip() + "</div>"
45
+ return html
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
 
 
47
 
48
+ def make_html(sentence_match):
49
+ """Build diff + results HTML."""
50
+ diff_html = make_alignment_html(sentence_match.target_tokens,
51
+ sentence_match.user_tokens,
52
+ sentence_match.alignments)
53
+ result_html, score_html = make_result_html(sentence_match.pass_threshold,
54
+ sentence_match.passed,
55
+ sentence_match.ratio)
56
+ return score_html, result_html, diff_html
57
+
58
 
59
  # ------------------- Core Check (English-only) -------------------
60
+ def get_user_transcript(audio_path: gr.Audio, target_sentence: str, model_id: str, device_pref: str) -> (str, str):
61
+ """ASR for the input audio and basic validation."""
62
  if not target_sentence:
63
+ return "Please generate a sentence first.", ""
64
+ if audio_path is None:
65
+ return "Please start, record, then stop the audio recording before trying to transcribe.", ""
66
+
67
+ user_transcript = process.run_asr(audio_path, model_id, device_pref)
68
+ if isinstance(user_transcript, Exception):
69
+ return f"Transcription failed: {user_transcript}", ""
70
+ return "", user_transcript
71
+
72
+
73
+ def transcribe_check(audio_path, target_sentence, model_id, device_pref, pass_threshold):
74
+ """Transcribe user audio, compute match, and render results."""
75
+ error_msg, user_transcript = get_user_transcript(audio_path, target_sentence, model_id, device_pref)
76
+ if error_msg:
77
+ score_html = ""
78
+ diff_html = ""
79
+ result_html = error_msg
80
+ else:
81
+ sentence_match = process.SentenceMatcher(target_sentence, user_transcript, pass_threshold)
82
+ score_html, result_html, diff_html = make_html(sentence_match)
83
+ return user_transcript, score_html, result_html, diff_html
84
+
85
+
86
+ # ------------------- Voice cloning gate -------------------
87
+ def clone_if_pass(
88
+ audio_path, # ref voice (the same recorded clip)
89
+ target_sentence, # sentence user was supposed to say
90
+ user_transcript, # what ASR heard
91
+ tts_text, # what we want to synthesize (in cloned voice)
92
+ pass_threshold, # must meet or exceed this
93
+ tts_model_id, # e.g., "coqui/XTTS-v2"
94
+ tts_language, # e.g., "en"
95
+ ):
96
+ """
97
+ If user correctly read the target (>= threshold), clone their voice from the
98
+ recorded audio and speak 'tts_text'. Otherwise, refuse.
99
+ """
100
+ # Basic validations
101
+ if audio_path is None:
102
+ return None, "Record audio first (reference voice is required)."
103
+ if not target_sentence:
104
+ return None, "Generate a target sentence first."
105
+ if not user_transcript:
106
+ return None, "Transcribe first to verify the sentence."
107
+ if not tts_text:
108
+ return None, "Enter the sentence to synthesize."
109
+
110
+ # Recompute pass/fail to avoid relying on UI state
111
+ sm = process.SentenceMatcher(target_sentence, user_transcript, pass_threshold)
112
+ if not sm.passed:
113
+ return None, (
114
+ f"❌ Cloning blocked: your reading did not reach the threshold "
115
+ f"({sm.ratio*100:.1f}% < {int(pass_threshold*100)}%)."
116
+ )
117
 
118
+ # Run zero-shot cloning
119
+ out = tts.run_tts_clone(audio_path, tts_text, model_id=tts_model_id, language=tts_language)
120
+ if isinstance(out, Exception):
121
+ return None, f"Voice cloning failed: {out}"
122
+ sr, wav = out
123
+ # Gradio Audio can take a tuple (sr, np.array)
124
+ return (sr, wav), f"✅ Cloned and synthesized with {tts_model_id} ({tts_language})."
125
 
 
126
 
127
  # ------------------- UI -------------------
128
  with gr.Blocks(title="Say the Sentence (English)") as demo:
 
132
  1) Generate a sentence.
133
  2) Record yourself reading it.
134
  3) Transcribe & check your accuracy.
135
+ 4) If matched, clone your voice to speak any sentence you enter.
136
  """
137
  )
138
 
139
  with gr.Row():
140
+ target = gr.Textbox(label="Target sentence", interactive=False,
141
+ placeholder="Click 'Generate sentence'")
142
 
143
  with gr.Row():
144
  btn_gen = gr.Button("🎲 Generate sentence", variant="primary")
145
  btn_clear = gr.Button("🧹 Clear")
146
 
147
  with gr.Row():
148
+ audio = gr.Audio(sources=["microphone"], type="filepath",
149
+ label="Record your voice")
150
 
151
  with gr.Accordion("Advanced settings", open=False):
152
  model_id = gr.Dropdown(
153
  choices=[
154
+ "openai/whisper-tiny.en",
155
+ "openai/whisper-base.en",
156
+ "distil-whisper/distil-small.en",
157
  ],
158
  value="openai/whisper-tiny.en",
159
  label="ASR model (English only)",
 
163
  value="auto",
164
  label="Device preference"
165
  )
166
+ pass_threshold = gr.Slider(0.50, 1.00, value=0.85, step=0.01,
167
+ label="Match threshold")
168
 
169
  with gr.Row():
170
  btn_check = gr.Button("✅ Transcribe & Check", variant="primary")
171
+ with gr.Row():
172
+ user_transcript = gr.Textbox(label="Transcription", interactive=False)
173
+ with gr.Row():
174
+ score_html = gr.Label(label="Score")
175
+ result_html = gr.Label(label="Result")
176
+ diff_html = gr.HTML(
177
+ label="Word-level diff (red = expected but missing / green = extra or replacement)")
178
+
179
+ gr.Markdown("## 🔁 Voice cloning (gated)")
180
+ with gr.Row():
181
+ tts_text = gr.Textbox(
182
+ label="Text to synthesize (voice clone)",
183
+ placeholder="Type the sentence you want the cloned voice to say",
184
+ )
185
+ with gr.Row():
186
+ tts_model_id = gr.Dropdown(
187
+ choices=[
188
+ "coqui/XTTS-v2",
189
+ # add others if you like, e.g. "myshell-ai/MeloTTS"
190
+ ],
191
+ value="coqui/XTTS-v2",
192
+ label="TTS (voice cloning) model",
193
+ )
194
+ tts_language = gr.Dropdown(
195
+ choices=["en", "de", "fr", "es", "it", "pt", "pl", "tr", "ru", "nl", "cs", "ar", "zh"],
196
+ value="en",
197
+ label="Language",
198
+ )
199
 
200
  with gr.Row():
201
+ btn_clone = gr.Button("🔁 Clone voice (if passed)", variant="secondary")
202
  with gr.Row():
203
+ tts_audio = gr.Audio(label="Cloned speech output", interactive=False)
204
+ clone_status = gr.Label(label="Cloning status")
205
+
206
+ # -------- Events --------
207
+ # Use pre-specified sentence bank by default
208
+ btn_gen.click(fn=generate.gen_sentence_set, outputs=target)
209
+ # Or use LLM generation:
210
+ # btn_gen.click(fn=generate.gen_sentence_llm, outputs=target)
211
+
212
+ btn_clear.click(
213
+ fn=clear_all,
214
+ outputs=[target, user_transcript, score_html, result_html, diff_html, tts_text, clone_status, tts_audio]
215
+ )
216
 
 
 
 
217
  btn_check.click(
218
+ fn=transcribe_check,
219
  inputs=[audio, target, model_id, device_pref, pass_threshold],
220
+ outputs=[user_transcript, score_html, result_html, diff_html]
221
+ )
222
+
223
+ btn_clone.click(
224
+ fn=clone_if_pass,
225
+ inputs=[audio, target, user_transcript, tts_text, pass_threshold, tts_model_id, tts_language],
226
+ outputs=[tts_audio, clone_status],
227
  )
228
 
229
  if __name__ == "__main__":
230
+ demo.launch()
requirements.txt CHANGED
@@ -3,3 +3,5 @@ transformers>=4.44.0
3
  torch>=2.2.0
4
  accelerate>=0.33.0
5
  sentencepiece>=0.2.0
 
 
 
3
  torch>=2.2.0
4
  accelerate>=0.33.0
5
  sentencepiece>=0.2.0
6
+ numpy
7
+
src/generate.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ from transformers import pipeline, AutoTokenizer
4
+
5
+ import src.process as process
6
+
7
+ # You can choose to use either:
8
+ # (1) a list of pre-specified sentences, in SENTENCE_BANK
9
+ # (2) an LLM-generated sentence.
10
+ # SENTENCE_BANK is used in the `gen_sentence_set` function.
11
+ # LLM generation is used in the `gen_sentence_llm` function.
12
+
13
+ # ------------------- Sentence Bank (customize freely) -------------------
14
+ SENTENCE_BANK = [
15
+ "The quick brown fox jumps over the lazy dog.",
16
+ "I promise to speak clearly and at a steady pace.",
17
+ "Open source makes AI more transparent and inclusive.",
18
+ "Hugging Face Spaces make demos easy to share.",
19
+ "Today the weather in Berlin is pleasantly cool.",
20
+ "Privacy and transparency should go hand in hand.",
21
+ "Please generate a new sentence for me to read.",
22
+ "Machine learning can amplify or reduce inequality.",
23
+ "Responsible AI requires participation from everyone.",
24
+ "This microphone test checks my pronunciation accuracy.",
25
+ ]
26
+
27
+
28
+ def gen_sentence_llm():
29
+ """Generates a sentence using an LLM.
30
+ Returns:
31
+ Normalized text string to display in the UI.
32
+ """
33
+ prompt = ""
34
+ tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
35
+ generator = pipeline('text-generation', model='gpt2')
36
+ result = generator(prompt, stop_strings=[".", ], num_return_sequences=1,
37
+ tokenizer=tokenizer, pad_token_id=tokenizer.eos_token_id)
38
+ display_text = process.normalize_text(result[0]["generated_text"],
39
+ lower=False)
40
+ return display_text
41
+
42
+
43
+ def gen_sentence_set():
44
+ """Returns a sentence for the user to say using a prespecified set of options."""
45
+ return random.choice(SENTENCE_BANK)
46
+
src/process.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import difflib
2
+ import re
3
+ from functools import lru_cache
4
+
5
+ import gradio.components.audio as gr_audio
6
+ import torch
7
+ from transformers import pipeline
8
+
9
+
10
+ # ------------------- Utilities -------------------
11
+ def normalize_text(t: str, lower: bool = True) -> str:
12
+ """For normalizing LLM-generated and human-generated strings.
13
+ For LLMs, this removes extraneous quote marks and spaces."""
14
+ # English-only normalization: lowercase, keep letters/digits/' and -
15
+ if lower:
16
+ t = t.lower()
17
+ # TODO: Previously was re.sub(r"[^a-z0-9'\-]+", " ", t); discuss normalizing for LLMs too.
18
+ t = re.sub(r"[^a-zA-Z0-9'\-.,]+", " ", t)
19
+ t = re.sub(r"\s+", " ", t).strip()
20
+ return t
21
+
22
+
23
+ @lru_cache(maxsize=2)
24
+ def get_asr_pipeline(model_id: str, device_preference: str) -> pipeline:
25
+ """Cache an ASR pipeline.
26
+ Parameters:
27
+ model_id: String of desired ASR model.
28
+ device_preference: String of desired device for ASR processing, "cuda", "cpu", or "auto".
29
+ Returns:
30
+ transformers.pipeline ASR component.
31
+ """
32
+ if device_preference == "cuda" and torch.cuda.is_available():
33
+ device = 0
34
+ elif device_preference == "auto":
35
+ device = 0 if torch.cuda.is_available() else -1
36
+ else:
37
+ device = -1
38
+ return pipeline(
39
+ "automatic-speech-recognition",
40
+ model=model_id, # use English-only Whisper models (.en)
41
+ device=device,
42
+ chunk_length_s=30,
43
+ return_timestamps=False,
44
+ )
45
+
46
+ def run_asr(audio_path: gr_audio, model_id: str, device_pref: str) -> str | Exception:
47
+ """Returns the recognized user utterance from the input audio stream.
48
+ Parameters:
49
+ audio_path: gradio.Audio component.
50
+ model_id: String of desired ASR model.
51
+ device_preference: String of desired device for ASR processing, "cuda", "cpu", or "auto".
52
+ Returns:
53
+ hyp_raw: Recognized user utterance.
54
+ """
55
+ asr = get_asr_pipeline(model_id, device_pref)
56
+ try:
57
+ # IMPORTANT: For English-only Whisper (.en), do NOT pass language/task args.
58
+ result = asr(audio_path)
59
+ hyp_raw = result["text"].strip()
60
+ except Exception as e:
61
+ return e
62
+ return hyp_raw
63
+
64
+ def similarity_and_diff(ref_tokens: list, hyp_tokens: list) -> (float, list[str, int, int, int]):
65
+ """
66
+ Returns:
67
+ ratio: Similarity ratio (0..1).
68
+ opcodes: List of differences between target and recognized user utterance.
69
+ """
70
+ sm = difflib.SequenceMatcher(a=ref_tokens, b=hyp_tokens)
71
+ ratio = sm.ratio()
72
+ opcodes = sm.get_opcodes()
73
+ return ratio, opcodes
74
+
75
+ class SentenceMatcher:
76
+ """Class for keeping track of (target sentence, user utterance) match features."""
77
+ def __init__(self, target_sentence, user_transcript, pass_threshold):
78
+ self.target_sentence: str = target_sentence
79
+ self.user_transcript: str = user_transcript
80
+ self.pass_threshold: float = pass_threshold
81
+ self.target_tokens: list = normalize_text(target_sentence).split()
82
+ self.user_tokens: list = normalize_text(user_transcript).split()
83
+ self.ratio: float
84
+ self.alignments: list
85
+ self.ratio, self.alignments = similarity_and_diff(self.target_tokens,
86
+ self.user_tokens)
87
+ self.passed: bool = self.ratio >= self.pass_threshold
88
+
src/prompts.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src/utils/prompts.py
2
+
3
+ def get_consent_generation_prompt(audio_model_name: str, short_prompt: bool = False) -> str:
4
+ """
5
+ Returns a text prompt instructing the model to generate a natural-sounding
6
+ consent sentence for voice cloning with the specified model.
7
+
8
+ Args:
9
+ audio_model_name (str): Name of the audio model to mention in the prompt.
10
+ short_prompt (bool): If True, returns a concise one-line prompt suitable
11
+ for direct model input. If False (default), returns the full detailed prompt.
12
+
13
+ Returns:
14
+ str: The prompt text.
15
+ """
16
+
17
+ if short_prompt:
18
+ return (
19
+ f"Generate one natural, spoken-style English sentence (10–20 words) in which a person "
20
+ f"clearly gives informed consent to use their voice for generating synthetic audio "
21
+ f"with the model {audio_model_name}. The sentence should sound conversational, include "
22
+ f"a clear consent phrase like 'I give my consent' or 'I agree', mention {audio_model_name} "
23
+ f"by name, and be phonetically varied but neutral in tone. Output only the final sentence."
24
+ )
25
+
26
+ return f"""
27
+ Generate a short, natural-sounding English sentence (10–20 words) that a person could say aloud
28
+ to clearly state their informed consent to use their voice for generating synthetic audio with
29
+ an AI model called {audio_model_name}.
30
+
31
+ The sentence should:
32
+ - Sound natural and conversational, not like legal text.
33
+ - Explicitly include a consent phrase, such as “I give my consent,” “I agree,” or “I allow.”
34
+ - Mention the model name ({audio_model_name}) clearly in the sentence.
35
+ - Include a neutral descriptive clause before or after the consent phrase to add phonetic variety
36
+ (e.g., “The weather today is bright and calm” or “This recording is made clearly and freely.”)
37
+ - Have a neutral or polite tone (no emotional extremes).
38
+ - Be comfortable to read aloud and phonetically rich, covering diverse vowels and consonants naturally.
39
+ - Be self-contained, so the full sentence can serve as an independent audio clip.
40
+
41
+ Examples of structure to follow:
42
+ - “The weather is clear and warm today. I give my consent to use my voice for generating audio with the model {audio_model_name}.”
43
+ - “I give my consent to use my voice for generating audio with the model {audio_model_name}. This statement is made freely and clearly.”
44
+ - “Good afternoon. I agree to the use of my recorded voice for audio generation with the model {audio_model_name}.”
45
+
46
+ The output should be a single, natural sentence ready to be spoken aloud for recording purposes.
47
+ """
src/tts.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src/tts.py
2
+ from __future__ import annotations
3
+ from typing import Tuple, Union
4
+
5
+ import numpy as np
6
+ from transformers import pipeline
7
+
8
+ # We use the text-to-speech pipeline with XTTS v2 (zero-shot cloning)
9
+ # Example forward params: {"speaker_wav": "/path/to/ref.wav", "language": "en"}
10
+
11
+ def get_tts_pipeline(model_id: str):
12
+ """
13
+ Create a TTS pipeline for the given model.
14
+ XTTS v2 works well for zero-shot cloning and is available on the Hub.
15
+ """
16
+ # NOTE: Add device selection similar to ASR if needed
17
+ return pipeline("text-to-speech", model=model_id)
18
+
19
+ def run_tts_clone(
20
+ ref_audio_path: str,
21
+ text_to_speak: str,
22
+ model_id: str = "coqui/XTTS-v2",
23
+ language: str = "en",
24
+ ) -> Union[Tuple[int, np.ndarray], Exception]:
25
+ """
26
+ Synthesize 'text_to_speak' in the cloned voice from 'ref_audio_path'.
27
+
28
+ Returns:
29
+ (sampling_rate, waveform) on success, or Exception on failure.
30
+ """
31
+ try:
32
+ tts = get_tts_pipeline(model_id)
33
+ result = tts(
34
+ text_to_speak,
35
+ forward_params={"speaker_wav": ref_audio_path, "language": language},
36
+ )
37
+ # transformers TTS returns dict like: {"audio": {"array": np.ndarray, "sampling_rate": 24000}}
38
+ audio = result["audio"]
39
+ sr = int(audio["sampling_rate"])
40
+ wav = audio["array"].astype(np.float32)
41
+ return sr, wav
42
+ except Exception as e:
43
+ return e