|
|
|
|
|
import os |
|
|
from pathlib import Path |
|
|
import streamlit as st |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
def setup_dotenv(): |
|
|
env_dir = Path("config") |
|
|
env_dir.mkdir(exist_ok=True) |
|
|
env_path = env_dir / ".env" |
|
|
if not env_path.exists(): |
|
|
with open(env_path, 'w') as f: |
|
|
f.write("# Configuration du serveur LLM et des clés API\n") |
|
|
f.write("LLM_SERVER_URL=http://localhost:8080/completion\n") |
|
|
f.write("GITHUB_API_TOKEN=\n") |
|
|
f.write("HUGGINGFACE_API_TOKEN=\n") |
|
|
f.write("NVD_API_KEY=\n") |
|
|
f.write("STACK_EXCHANGE_API_KEY=\n") |
|
|
load_dotenv(dotenv_path=env_path) |
|
|
|
|
|
setup_dotenv() |
|
|
|
|
|
LLM_SERVER_URL = os.getenv('LLM_SERVER_URL', 'http://localhost:8080/completion') |
|
|
|
|
|
def init_session_state(): |
|
|
if 'bot_status' not in st.session_state: |
|
|
st.session_state.bot_status = "Arrêté" |
|
|
if 'server_status' not in st.session_state: |
|
|
st.session_state.server_status = "Inactif" |
|
|
if 'total_qa_pairs' not in st.session_state: |
|
|
st.session_state.total_qa_pairs = 0 |
|
|
if 'logs' not in st.session_state: |
|
|
st.session_state.logs = [] |
|
|
if 'qa_data' not in st.session_state: |
|
|
st.session_state.qa_data = [] |
|
|
if 'enable_enrichment' not in st.session_state: |
|
|
st.session_state.enable_enrichment = True |
|
|
if 'min_relevance' not in st.session_state: |
|
|
st.session_state.min_relevance = 70 |
|
|
if 'num_queries' not in st.session_state: |
|
|
st.session_state.num_queries = 5 |
|
|
if 'temperature' not in st.session_state: |
|
|
st.session_state.temperature = 0.7 |
|
|
if 'n_predict' not in st.session_state: |
|
|
st.session_state.n_predict = 512 |
|
|
|
|
|
REQUEST_COUNT = 0 |
|
|
MAX_REQUESTS_BEFORE_PAUSE = 15 |
|
|
MIN_PAUSE = 2 |
|
|
MAX_PAUSE = 5 |
|
|
USE_API_KEYS = True |
|
|
|