Update app.py
Browse files
app.py
CHANGED
|
@@ -48,49 +48,6 @@ def setup_logging():
|
|
| 48 |
|
| 49 |
logger = setup_logging()
|
| 50 |
|
| 51 |
-
# Création des répertoires et scripts nécessaires
|
| 52 |
-
def create_initial_setup():
|
| 53 |
-
dirs = [
|
| 54 |
-
"data/devsecops/qa", "data/security/qa", "data/development/qa",
|
| 55 |
-
"data/data_analysis/qa", "logs", "config", "server", "scripts",
|
| 56 |
-
"models", "llama.cpp", ".kaggle"
|
| 57 |
-
]
|
| 58 |
-
for dir_path in dirs:
|
| 59 |
-
Path(dir_path).mkdir(parents=True, exist_ok=True)
|
| 60 |
-
|
| 61 |
-
download_script = Path("scripts/download_with_aria2c.sh")
|
| 62 |
-
if not download_script.exists():
|
| 63 |
-
with open(download_script, 'w') as f:
|
| 64 |
-
f.write("""#!/bin/bash
|
| 65 |
-
URL=$1
|
| 66 |
-
OUTPUT=$2
|
| 67 |
-
MAX_RETRIES=5
|
| 68 |
-
for i in $(seq 1 $MAX_RETRIES); do
|
| 69 |
-
echo "Tentative $i/$MAX_RETRIES: $URL"
|
| 70 |
-
aria2c -x 16 -s 16 -d "$(dirname "$OUTPUT")" -o "$(basename "$OUTPUT")" "$URL" && break
|
| 71 |
-
sleep 10
|
| 72 |
-
done
|
| 73 |
-
""")
|
| 74 |
-
os.chmod(download_script, 0o755)
|
| 75 |
-
|
| 76 |
-
llama_dir = Path("llama.cpp")
|
| 77 |
-
if not llama_dir.exists():
|
| 78 |
-
st.info("Installation de llama.cpp...")
|
| 79 |
-
subprocess.run(["git", "clone", "https://github.com/ggerganov/llama.cpp.git", str(llama_dir)])
|
| 80 |
-
os.chdir(str(llama_dir))
|
| 81 |
-
subprocess.run(["mkdir", "-p", "build"])
|
| 82 |
-
os.chdir("build")
|
| 83 |
-
subprocess.run(["cmake", "..", "-DLLAMA_CURL=1"])
|
| 84 |
-
subprocess.run(["cmake", "--build", ".", "--config", "Release"])
|
| 85 |
-
os.chdir(Path(__file__).parent)
|
| 86 |
-
|
| 87 |
-
# Convertisseur HTML vers texte
|
| 88 |
-
h = html2text.HTML2Text()
|
| 89 |
-
h.ignore_links = False
|
| 90 |
-
h.ignore_images = True
|
| 91 |
-
h.ignore_emphasis = False
|
| 92 |
-
h.body_width = 0
|
| 93 |
-
|
| 94 |
# Fonctions pour le serveur LLM (llama.cpp)
|
| 95 |
def check_server_status():
|
| 96 |
try:
|
|
@@ -807,5 +764,4 @@ def main():
|
|
| 807 |
st.session_state.n_predict = st.slider("Nombre de tokens", 128, 1024, st.session_state.n_predict, help="Nombre maximum de tokens à générer par l'IA.")
|
| 808 |
|
| 809 |
if __name__ == "__main__":
|
| 810 |
-
create_initial_setup()
|
| 811 |
main()
|
|
|
|
| 48 |
|
| 49 |
logger = setup_logging()
|
| 50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
# Fonctions pour le serveur LLM (llama.cpp)
|
| 52 |
def check_server_status():
|
| 53 |
try:
|
|
|
|
| 764 |
st.session_state.n_predict = st.slider("Nombre de tokens", 128, 1024, st.session_state.n_predict, help="Nombre maximum de tokens à générer par l'IA.")
|
| 765 |
|
| 766 |
if __name__ == "__main__":
|
|
|
|
| 767 |
main()
|