File size: 2,637 Bytes
781d823
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import logging
import torch
import torchaudio
from transformers import pipeline


class WhisperWrapper:
    """Simplified Whisper ASR wrapper"""

    def __init__(self, model_id="openai/whisper-large-v3"):
        """
        Initialize WhisperWrapper

        Args:
            model_id: Whisper model ID, default uses openai/whisper-large-v3
        """
        self.logger = logging.getLogger(__name__)
        self.model = None

        try:
            self.model = pipeline("automatic-speech-recognition", model=model_id)
            self.logger.info(f"βœ“ Whisper model loaded successfully: {model_id}")
        except Exception as e:
            self.logger.error(f"❌ Failed to load Whisper model: {e}")
            raise

    def __call__(self, audio_input):
        """
        Audio to text transcription

        Args:
            audio_input: Audio file path or audio tensor

        Returns:
            Transcribed text
        """
        if self.model is None:
            raise RuntimeError("Whisper model not loaded")

        try:
            # Load audio
            if isinstance(audio_input, str):
                # Audio file path
                audio, audio_sr = torchaudio.load(audio_input)
                audio = torchaudio.functional.resample(audio, audio_sr, 16000)
                # Handle stereo to mono conversion (pipeline may not handle this)
                if audio.shape[0] > 1:
                    audio = audio.mean(dim=0, keepdim=True)  # Convert stereo to mono by averaging
                # Convert to numpy and squeeze
                audio = audio.squeeze(0).numpy()
            elif isinstance(audio_input, torch.Tensor):
                # Tensor input
                audio = audio_input.cpu()
                audio = torchaudio.functional.resample(audio, audio_sr, 16000)
                # Handle stereo to mono conversion
                if audio.ndim > 1 and audio.shape[0] > 1:
                    audio = audio.mean(dim=0, keepdim=True)
                audio = audio.squeeze().numpy()
            else:
                raise ValueError(f"Unsupported audio input type: {type(audio_input)}")

            # Transcribe
            result = self.model(audio)
            text = result.get("text", "").strip() if isinstance(result, dict) else str(result).strip()

            self.logger.debug(f"Transcription result: {text}")
            return text

        except Exception as e:
            self.logger.error(f"Audio transcription failed: {e}")
            return ""

    def is_available(self):
        """Check if whisper model is available"""
        return self.model is not None