Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python3 | |
| """ | |
| Example usage of Wave2Vec2Inference with dynamic model switching | |
| """ | |
| from src.AI_Models.wave2vec_inference import ( | |
| create_inference, | |
| get_available_models, | |
| get_model_name, | |
| DEFAULT_MODEL | |
| ) | |
| def main(): | |
| print("=== Wave2Vec2 Model Selection Example ===\n") | |
| # Show available models | |
| print("Available models:") | |
| models = get_available_models() | |
| for key, model_name in models.items(): | |
| print(f" {key}: {model_name}") | |
| print(f"\nDefault model: {DEFAULT_MODEL}\n") | |
| # Example 1: Using default model | |
| print("1. Creating inference with default model:") | |
| asr_default = create_inference() | |
| print(f" Loaded: {asr_default.model_name}\n") | |
| # Example 2: Using model key | |
| print("2. Creating inference with model key 'english_large':") | |
| asr_key = create_inference("english_large") | |
| print(f" Loaded: {asr_key.model_name}\n") | |
| # Example 3: Using full model name | |
| print("3. Creating inference with full model name:") | |
| asr_full = create_inference("facebook/wav2vec2-base-960h") | |
| print(f" Loaded: {asr_full.model_name}\n") | |
| # Example 4: Dynamic model switching | |
| print("4. Dynamic model switching:") | |
| model_keys = ["english_large", "multilingual", "base_english"] | |
| for model_key in model_keys: | |
| print(f" Switching to: {model_key}") | |
| asr = create_inference(model_key) | |
| print(f" Active model: {asr.model_name}") | |
| # Example transcription (if you have an audio file) | |
| # result = asr.file_to_text("your_audio_file.wav") | |
| # print(f" Result: {result}") | |
| print() | |
| # Example 5: Using with ONNX | |
| print("5. Creating ONNX inference with model selection:") | |
| try: | |
| asr_onnx = create_inference("english_large", use_onnx=True) | |
| print(f" ONNX model loaded: {asr_onnx.model_name}") | |
| except Exception as e: | |
| print(f" ONNX conversion needed: {e}") | |
| print("\n=== Usage Examples ===") | |
| print("# Use default model") | |
| print("asr = create_inference()") | |
| print() | |
| print("# Use model key") | |
| print("asr = create_inference('english_large')") | |
| print() | |
| print("# Use full model name") | |
| print("asr = create_inference('facebook/wav2vec2-base-960h')") | |
| print() | |
| print("# Use with ONNX") | |
| print("asr = create_inference('english_large', use_onnx=True)") | |
| print() | |
| print("# Transcribe audio") | |
| print("result = asr.file_to_text('audio.wav')") | |
| print("# or") | |
| print("result = asr.buffer_to_text(audio_array)") | |
| if __name__ == "__main__": | |
| main() | |