File size: 4,316 Bytes
3282984
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import whisper
import os
import streamlit as st
import time

@st.cache_resource
def load_whisper_model(model_name="base"):
    # Create a placeholder for notifications
    notify = st.empty()
    notify.info(f"Loading Whisper '{model_name}' model for transcription... This may take a moment.", icon="⏳")
    whisper_model = whisper.load_model(model_name)
    notify.success(f"Whisper '{model_name}' model loaded successfully!")
    time.sleep(2)  # Show success for 2 seconds
    notify.empty() # Clear the notification
    return whisper_model

whisper_model_instance = load_whisper_model("base")

def transcribe_audio(audio_path):
    try:
        result = whisper_model_instance.transcribe(audio_path)
        return result["text"]
    except Exception as e:
        st.error(f"Error during Whisper transcription: {e}")
        return "" # Return empty string if transcription fails
    

# @st.cache_resource
# def load_whisper_model(model_name="base"):
    
#     status_placeholder = st.empty()

#     with status_placeholder.status(f"Loading Whisper '{model_name}' model...", expanded=True) as status:
#         st.write("Initializing model download and setup...")
#         try:
#             whisper_model = whisper.load_model(model_name)
#             status.update(label=f"Whisper '{model_name}' model loaded successfully!", state="complete", expanded=False)
#             # st.session_state is available even in cached functions if they are part of the app's scope
#             st.session_state['whisper_model_loaded'] = True
#             return whisper_model
#         except Exception as e:
#             status.update(label=f"Error loading Whisper '{model_name}' model: {e}", state="error")
#             st.error("Model loading failed! Please check your internet connection or try again.")
#             st.session_state['whisper_model_loaded'] = False # Mark as failed
#             raise # Re-raise the exception to propagate it

# # Load the desired Whisper model globally within this module
# whisper_model_instance = load_whisper_model("base")

# def transcribe_audio(audio_path):
#     """
#     Transcribes the audio file.
#     """
#     try:
#         result = whisper_model_instance.transcribe(audio_path)
#         return result["text"]
#     except Exception as e:
#         st.error(f"Error during Whisper transcription: {e}")
#         return ""



# @st.cache_resource
# def load_whisper_model(model_name="base"):
    
#     # st.info(f"Loading Whisper '{model_name}' model for transcription... This may take a moment.", icon="⏳")
   
#     whisper_model = whisper.load_model(model_name)
#     # st.success(f"Whisper '{model_name}' model loaded successfully!")
#     return whisper_model

# whisper_model_instance = load_whisper_model("base")

# def transcribe_audio(audio_path):
    
#     try:
#         result = whisper_model_instance.transcribe(audio_path)
#         return result["text"]
#     except Exception as e:
#         st.error(f"Error during Whisper transcription: {e}")
#         return "" # Return empty string if transcription fails


# @st.cache_resource
# def load_whisper_model(model_name="base"):

#     with st.status(f"Loading Whisper '{model_name}' model...", expanded=True) as status:
#         st.write("Initializing model download and setup...")
#         try:
#             whisper_model = whisper.load_model(model_name)
#             # Update the status once complete
#             status.update(label=f"Whisper '{model_name}' model loaded successfully!", state="complete", expanded=False)
#             return whisper_model
#         except Exception as e:
#             # Update the status on error
#             status.update(label=f"Error loading Whisper '{model_name}' model: {e}", state="error")
#             st.error("Model loading failed! Please check your internet connection or try again.")
#             raise # Re-raise the exception to propagate it
            
# # Load the desired Whisper model globally within this module
# whisper_model_instance = load_whisper_model("base")

# def transcribe_audio(audio_path):

#     try:
#         result = whisper_model_instance.transcribe(audio_path)
#         return result["text"]
#     except Exception as e:
#         st.error(f"Error during Whisper transcription: {e}")
#         return ""