Reyall commited on
Commit
6ab3905
·
verified ·
1 Parent(s): 96ca1f2

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +24 -53
src/streamlit_app.py CHANGED
@@ -1,73 +1,44 @@
1
- import os
2
  import streamlit as st
3
- import soundfile as sf
4
- from openai import OpenAI
5
  from transformers import pipeline
6
 
7
- # -----------------------------
8
- # HF Token
9
- # -----------------------------
10
- HF_TOKEN = st.secrets["HF_TOKEN"]
11
-
12
- client = OpenAI(base_url="https://router.huggingface.co/v1", api_key=HF_TOKEN)
13
-
14
- # -----------------------------
15
- # ASR Model (Whisper)
16
- # -----------------------------
17
- @st.cache_resource
18
- def load_asr():
19
- return pipeline("automatic-speech-recognition", model="openai/whisper-large-v3")
20
-
21
- asr = load_asr()
22
-
23
  # -----------------------------
24
  # Streamlit UI
25
  # -----------------------------
26
  st.title("🏥 AZ Medical Speech → Insight")
27
- st.write("Audio yükləyin və tibbi insight çıxarın.")
28
 
29
- uploaded_file = st.file_uploader("Audio seçin (.wav, .mp3, .ogg, .m4a)", type=["wav", "mp3", "ogg", "m4a"])
30
 
31
  if uploaded_file is not None:
32
- # Soundfile ilə .wav fayl yarat
33
  wav_path = "temp.wav"
34
- data, samplerate = sf.read(uploaded_file)
35
- sf.write(wav_path, data, samplerate)
36
-
37
  st.audio(wav_path, format="audio/wav")
38
 
39
- # ASR AZ və EN
40
- az_text = asr(wav_path, generate_kwargs={"task": "transcribe", "language": "az"})["text"].strip()
41
- en_text = asr(wav_path, generate_kwargs={"task": "translate", "language": "az"})["text"].strip()
 
 
 
 
 
42
 
43
  st.subheader("🎧 Transcripts")
44
  st.write("AZ:", az_text)
45
  st.write("EN:", en_text)
46
 
47
- # LLM Insight
48
- messages = [
49
- {
50
- "role": "system",
51
- "content": (
52
- "Sən tibbi köməkçi modelsən. Məqsədin xəstənin danışığından simptomları, "
53
- "həyati əlamətləri təcili prioriteti müəyyən etməkdir. "
54
- "Qısa və analitik cavab ver, tibbi anlayışlara əsaslan."
55
- )
56
- },
57
- {
58
- "role": "user",
59
- "content": f"Mətn: {az_text}\n\nXəstənin vəziyyəti barədə tibbi təhlil ver:"
60
- }
61
- ]
62
-
63
- completion = client.chat.completions.create(
64
- model="Intelligent-Internet/II-Medical-8B-1706:featherless-ai",
65
- messages=messages,
66
- max_tokens=400,
67
- temperature=0.4
68
- )
69
-
70
- llm_response = completion.choices[0].message.content.strip()
71
 
72
  st.subheader("💡 MODEL INSIGHT")
73
- st.write(llm_response)
 
 
1
  import streamlit as st
2
+ from pydub import AudioSegment
 
3
  from transformers import pipeline
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  # -----------------------------
6
  # Streamlit UI
7
  # -----------------------------
8
  st.title("🏥 AZ Medical Speech → Insight")
9
+ st.write("Audio faylı yükləyin və tibbi insight çıxarın.")
10
 
11
+ uploaded_file = st.file_uploader("Audio seçin (.wav, .mp3, .ogg, .m4a)", type=["wav","mp3","ogg","m4a"])
12
 
13
  if uploaded_file is not None:
14
+ # 1️⃣ Audio wav
15
  wav_path = "temp.wav"
16
+ audio = AudioSegment.from_file(uploaded_file)
17
+ audio.export(wav_path, format="wav")
18
+
19
  st.audio(wav_path, format="audio/wav")
20
 
21
+ # 2️⃣ ASR Model (Whisper, public)
22
+ with st.spinner("Audio tanınır..."):
23
+ asr = pipeline(
24
+ "automatic-speech-recognition",
25
+ model="openai/whisper-large-v2"
26
+ )
27
+ az_text = asr(wav_path, generate_kwargs={"task":"transcribe", "language":"az"})["text"].strip()
28
+ en_text = asr(wav_path, generate_kwargs={"task":"translate", "language":"az"})["text"].strip()
29
 
30
  st.subheader("🎧 Transcripts")
31
  st.write("AZ:", az_text)
32
  st.write("EN:", en_text)
33
 
34
+ # 3️⃣ LLM Model (Public, instruct)
35
+ with st.spinner("Tibbi insight hazırlanır..."):
36
+ llm = pipeline(
37
+ "text-generation",
38
+ model="tiiuae/falcon-7b-instruct"
39
+ )
40
+ insight_output = llm(f"Xəstənin vəziyyəti barədə tibbi təhlil ver:\n{az_text}",
41
+ max_new_tokens=200)[0]["generated_text"].strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  st.subheader("💡 MODEL INSIGHT")
44
+ st.write(insight_output)