Files changed (1) hide show
  1. app.py +139 -200
app.py CHANGED
@@ -1,208 +1,147 @@
1
-
2
- import streamlit
3
- as st
4
  import os
5
- from openai
6
- import OpenAI
7
  import json
8
-
9
- defclear_chat():
10
- st.session_state.messages = []
11
-
12
- definitialize_provider_settings(provider_choice):
13
- """Configure API settings based on provider selection"""
14
- provider_configs = {
15
- "Denvr Dataworks": {
16
- "api_key_source": st.secrets.get("openai_apikey",
17
- ""),
18
- "base_url_source": os.environ.get("base_url",
19
- ""),
20
- "fallback_model":
21
- "meta-llama/Llama-3.3-70B-Instruct"
22
- },
23
- "IBM": {
24
- "api_key_source": os.environ.get("ibm_openai_apikey",
25
- ""),
26
- "base_url_source": os.environ.get("ibm_base_url",
27
- ""),
28
- "fallback_model": None
29
- }
30
- }
31
-
32
- return provider_configs.get(provider_choice, {})
33
-
34
- st.title("Intel® AI for Enterprise Inference")
35
- st.header("LLM chatbot")
36
-
37
- with st.sidebar:
38
-
39
- # Provider selection dropdown
40
-
41
- available_providers = ["Denvr Dataworks",
42
- "IBM"]
43
-
44
- if"current_provider_choice"notin st.session_state:
45
- st.session_state.current_provider_choice = available_providers[0]
46
-
47
- provider_selection = st.selectbox(
48
- "Choose AI Provider:",
49
- available_providers,
50
- key="current_provider_choice"
51
- )
52
-
53
-
54
- # Get provider-specific settings
55
-
56
- provider_settings = initialize_provider_settings(provider_selection)
57
-
58
-
59
- # Validate required credentials
60
-
61
- ifnot provider_settings.get("api_key_source")
62
- ornot provider_settings.get("base_url_source"):
63
- st.error(f"Configuration missing for
64
- {provider_selection}. Check environment variables.")
65
- st.stop()
66
-
67
-
68
- # Setup OpenAI client
69
-
70
- try:
71
- api_client = OpenAI(
72
- api_key=provider_settings["api_key_source"],
73
- base_url=provider_settings["base_url_source"]
74
- )
75
- available_models = api_client.models.list()
76
- model_list = sorted([m.idfor m
77
- in available_models])
78
-
79
-
80
- # Handle model selection with provider switching
81
 
82
- session_key = f"model_for_{provider_selection}"
83
- if session_key
84
- notin st.session_state
85
- or st.session_state.get("last_provider") != provider_selection:
86
- preferred_model = provider_settings.get("fallback_model")
87
- if preferred_model
88
- and preferred_model
89
- in model_list:
90
- st.session_state[session_key] = preferred_model
91
- elif model_list:
92
- st.session_state[session_key] = model_list[0]
93
- st.session_state.last_provider = provider_selection
94
-
95
- ifnot model_list:
96
- st.error(f"No models found for
97
- {provider_selection}")
98
- st.stop()
99
-
 
100
 
101
- # Model selection interface
102
-
103
- chosen_model = st.selectbox(
104
- f"Available models from {provider_selection}:",
105
- model_list,
106
- key=session_key,
107
- )
108
- st.info(f"Active model:
109
- {chosen_model}")
110
-
111
- except Exception
112
- as connection_error:
113
- st.error(f"Connection failed for
114
- {provider_selection}:
115
- {connection_error}")
116
- st.stop()
117
-
118
- st.button("Reset Conversation", on_click=clear_chat)
119
-
120
- st.markdown("---")
121
-
122
-
123
- # Display provider-specific information
124
 
125
- if provider_selection ==
126
- "Denvr Dataworks":
127
- st.markdown(
128
- """
129
- **Denvr Dataworks Integration**
130
-
131
- Visit [Denvr Dataworks](https://www.denvrdata.com/intel)
132
- for model information and API access.
133
-
134
- Join the community: [Intel's DevHub Discord](https://discord.gg/kfJ3NKEw5t)
135
- """
136
- )
137
- elif provider_selection ==
138
- "IBM":
139
- st.markdown(
140
- """
141
- **IBM AI Services**
142
-
143
- Connected to IBM's AI infrastructure. Ensure your credentials are properly configured.
144
- """
145
- )
146
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
  # Main chat interface
149
-
150
- try:
151
- if"messages"notin st.session_state:
152
- st.session_state.messages = []
153
-
154
-
155
- # Display conversation history
156
-
157
- for msg
158
- in st.session_state.messages:
159
- with st.chat_message(msg["role"]):
160
- st.markdown(msg["content"])
161
-
162
-
163
- # Handle new user input
164
-
165
- if user_input := st.chat_input("Enter your message..."):
166
- st.session_state.messages.append({"role":
167
- "user",
168
- "content": user_input})
169
- with st.chat_message("user"):
170
- st.markdown(user_input)
171
-
172
-
173
- # Generate AI response
174
-
175
- with st.chat_message("assistant"):
176
  try:
177
- response_stream = api_client.chat.completions.create(
178
- model=chosen_model,
179
- messages=[
180
- {"role": msg["role"],
181
- "content": msg["content"]}
182
- for msg
183
- in st.session_state.messages
184
- ],
185
- max_tokens=4096,
186
- stream=True,
187
- )
188
- ai_response = st.write_stream(response_stream)
189
- except Exception
190
- as generation_error:
191
- st.error(f"Response generation failed:
192
- {generation_error}")
193
- ai_response = "Unable to generate response due to an error."
194
-
195
- st.session_state.messages.append({"role":
196
- "assistant",
197
- "content": ai_response})
198
-
199
- except KeyError
200
- as key_err:
201
- st.error(f"Configuration key error:
202
- {key_err}")
203
- except Exception
204
- as general_err:
205
- st.error(f"Unexpected error occurred:
206
- {general_err}")
207
-
208
-
 
 
 
 
 
 
1
+ import streamlit as st
 
 
2
  import os
3
+ from openai import OpenAI
 
4
  import json
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ def clear_chat():
7
+ st.session_state.messages = []
8
+
9
+ def initialize_provider_settings(provider_choice):
10
+ """Configure API settings based on provider selection"""
11
+ provider_configs = {
12
+ "Denvr Dataworks": {
13
+ "api_key_source": st.secrets.get("openai_apikey", ""),
14
+ "base_url_source": os.environ.get("base_url", ""),
15
+ "fallback_model": "meta-llama/Llama-3.3-70B-Instruct"
16
+ },
17
+ "IBM": {
18
+ "api_key_source": os.environ.get("ibm_openai_apikey", ""),
19
+ "base_url_source": os.environ.get("ibm_base_url", ""),
20
+ "fallback_model": None
21
+ }
22
+ }
23
+
24
+ return provider_configs.get(provider_choice, {})
25
 
26
+ st.title("Intel® AI for Enterprise Inference")
27
+ st.header("LLM chatbot")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ with st.sidebar:
30
+ # Provider selection dropdown
31
+ available_providers = ["Denvr Dataworks", "IBM"]
32
+
33
+ if "current_provider_choice" not in st.session_state:
34
+ st.session_state.current_provider_choice = available_providers[0]
35
+
36
+ provider_selection = st.selectbox(
37
+ "Choose AI Provider:",
38
+ available_providers,
39
+ key="current_provider_choice"
40
+ )
41
+
42
+ # Get provider-specific settings
43
+ provider_settings = initialize_provider_settings(provider_selection)
44
+
45
+ # Validate required credentials
46
+ if not provider_settings.get("api_key_source") or not provider_settings.get("base_url_source"):
47
+ st.error(f"Configuration missing for {provider_selection}. Check environment variables.")
48
+ st.stop()
49
+
50
+ # Setup OpenAI client
51
+ try:
52
+ api_client = OpenAI(
53
+ api_key=provider_settings["api_key_source"],
54
+ base_url=provider_settings["base_url_source"]
55
+ )
56
+ available_models = api_client.models.list()
57
+ model_list = sorted([m.id for m in available_models])
58
+
59
+ # Handle model selection with provider switching
60
+ session_key = f"model_for_{provider_selection}"
61
+ if session_key not in st.session_state or st.session_state.get("last_provider") != provider_selection:
62
+ preferred_model = provider_settings.get("fallback_model")
63
+ if preferred_model and preferred_model in model_list:
64
+ st.session_state[session_key] = preferred_model
65
+ elif model_list:
66
+ st.session_state[session_key] = model_list[0]
67
+ st.session_state.last_provider = provider_selection
68
+
69
+ if not model_list:
70
+ st.error(f"No models found for {provider_selection}")
71
+ st.stop()
72
+
73
+ # Model selection interface
74
+ chosen_model = st.selectbox(
75
+ f"Available models from {provider_selection}:",
76
+ model_list,
77
+ key=session_key,
78
+ )
79
+ st.info(f"Active model: {chosen_model}")
80
+
81
+ except Exception as connection_error:
82
+ st.error(f"Connection failed for {provider_selection}: {connection_error}")
83
+ st.stop()
84
+
85
+ st.button("Reset Conversation", on_click=clear_chat)
86
+
87
+ st.markdown("---")
88
+
89
+ # Display provider-specific information
90
+ if provider_selection == "Denvr Dataworks":
91
+ st.markdown(
92
+ """
93
+ **Denvr Dataworks Integration**
94
+
95
+ Visit [Denvr Dataworks](https://www.denvrdata.com/intel) for model information and API access.
96
+
97
+ Join the community: [Intel's DevHub Discord](https://discord.gg/kfJ3NKEw5t)
98
+ """
99
+ )
100
+ elif provider_selection == "IBM":
101
+ st.markdown(
102
+ """
103
+ **IBM AI Services**
104
+
105
+ Connected to IBM's AI infrastructure. Ensure your credentials are properly configured.
106
+ """
107
+ )
108
 
109
  # Main chat interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
  try:
111
+ if "messages" not in st.session_state:
112
+ st.session_state.messages = []
113
+
114
+ # Display conversation history
115
+ for msg in st.session_state.messages:
116
+ with st.chat_message(msg["role"]):
117
+ st.markdown(msg["content"])
118
+
119
+ # Handle new user input
120
+ if user_input := st.chat_input("Enter your message..."):
121
+ st.session_state.messages.append({"role": "user", "content": user_input})
122
+ with st.chat_message("user"):
123
+ st.markdown(user_input)
124
+
125
+ # Generate AI response
126
+ with st.chat_message("assistant"):
127
+ try:
128
+ response_stream = api_client.chat.completions.create(
129
+ model=chosen_model,
130
+ messages=[
131
+ {"role": msg["role"], "content": msg["content"]}
132
+ for msg in st.session_state.messages
133
+ ],
134
+ max_tokens=4096,
135
+ stream=True,
136
+ )
137
+ ai_response = st.write_stream(response_stream)
138
+ except Exception as generation_error:
139
+ st.error(f"Response generation failed: {generation_error}")
140
+ ai_response = "Unable to generate response due to an error."
141
+
142
+ st.session_state.messages.append({"role": "assistant", "content": ai_response})
143
+
144
+ except KeyError as key_err:
145
+ st.error(f"Configuration key error: {key_err}")
146
+ except Exception as general_err:
147
+ st.error(f"Unexpected error occurred: {general_err}")