Update app.py
Browse files
app.py
CHANGED
|
@@ -106,7 +106,8 @@ echo $! > "server/server.pid"
|
|
| 106 |
subprocess.Popen(["bash", str(start_script)])
|
| 107 |
st.success("Le serveur llama.cpp est en cours de démarrage...")
|
| 108 |
time.sleep(5)
|
| 109 |
-
|
|
|
|
| 110 |
st.success("Serveur llama.cpp démarré avec succès!")
|
| 111 |
else:
|
| 112 |
st.error("Le serveur n'a pas pu démarrer. Vérifiez les logs dans le dossier logs/.")
|
|
@@ -134,7 +135,8 @@ fi
|
|
| 134 |
subprocess.run(["bash", str(stop_script)])
|
| 135 |
st.success("Le serveur llama.cpp est en cours d'arrêt...")
|
| 136 |
time.sleep(2)
|
| 137 |
-
|
|
|
|
| 138 |
st.success("Serveur llama.cpp arrêté avec succès!")
|
| 139 |
else:
|
| 140 |
st.warning("Le serveur n'a pas pu être arrêté correctement.")
|
|
@@ -281,15 +283,17 @@ def check_api_keys():
|
|
| 281 |
'GITHUB_API_TOKEN': os.getenv('GITHUB_API_TOKEN'),
|
| 282 |
'HUGGINGFACE_API_TOKEN': os.getenv('HUGGINGFACE_API_TOKEN'),
|
| 283 |
'NVD_API_KEY': os.getenv('NVD_API_KEY'),
|
| 284 |
-
'STACK_EXCHANGE_API_KEY': os.getenv('STACK_EXCHANGE_API_KEY')
|
| 285 |
}
|
| 286 |
|
| 287 |
valid_keys = {k: v for k, v in keys.items() if v and v != f'your_{k.lower()}_here'}
|
| 288 |
|
| 289 |
-
|
| 290 |
-
if not config.USE_API_KEYS:
|
| 291 |
logger.warning("Aucune clé d'API valide trouvée. Le bot fonctionnera en mode dégradé avec des pauses plus longues.")
|
| 292 |
else:
|
|
|
|
|
|
|
|
|
|
| 293 |
logger.info(f"Clés d'API valides trouvées pour: {', '.join(valid_keys.keys())}.")
|
| 294 |
|
| 295 |
return valid_keys
|
|
@@ -297,7 +301,7 @@ def check_api_keys():
|
|
| 297 |
def make_request(url, headers=None, params=None, is_api_call=True):
|
| 298 |
config.REQUEST_COUNT += 1
|
| 299 |
|
| 300 |
-
pause_factor = 1 if
|
| 301 |
|
| 302 |
if config.REQUEST_COUNT >= config.MAX_REQUESTS_BEFORE_PAUSE:
|
| 303 |
pause_time = random.uniform(config.MIN_PAUSE * pause_factor, config.MAX_PAUSE * pause_factor)
|
|
@@ -368,70 +372,71 @@ def save_qa_pair(question, answer, category, subcategory, source, attack_signatu
|
|
| 368 |
st.session_state.total_qa_pairs += 1
|
| 369 |
st.session_state.qa_data.append(qa_data)
|
| 370 |
|
| 371 |
-
|
| 372 |
-
|
|
|
|
| 373 |
except Exception as e:
|
| 374 |
logger.error(f"Erreur lors de la sauvegarde du fichier {filename}: {str(e)}")
|
|
|
|
| 375 |
|
| 376 |
-
|
|
|
|
| 377 |
logger.info("Début de la collecte des données Kaggle...")
|
| 378 |
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
os.environ['KAGGLE_USERNAME'] = os.getenv('KAGGLE_USERNAME')
|
| 385 |
-
os.environ['KAGGLE_KEY'] = os.getenv('KAGGLE_KEY')
|
| 386 |
-
import kaggle
|
| 387 |
-
kaggle.api.authenticate()
|
| 388 |
-
|
| 389 |
-
search_queries = queries.split('\n') if queries else ["cybersecurity", "vulnerability"]
|
| 390 |
-
|
| 391 |
-
if ia_enricher.available and st.session_state.enable_enrichment:
|
| 392 |
-
adaptive_queries = ia_enricher.generate_adaptive_queries("Initial data keywords: " + ", ".join(search_queries))
|
| 393 |
-
search_queries.extend(adaptive_queries)
|
| 394 |
-
|
| 395 |
-
for query in list(set(search_queries)):
|
| 396 |
-
logger.info(f"Recherche de datasets Kaggle pour: {query}")
|
| 397 |
-
try:
|
| 398 |
-
# Kaggle API ne supporte pas la pagination et "results_per_page"
|
| 399 |
-
datasets = kaggle.api.dataset_list(search=query, max_results=results_per_page)
|
| 400 |
-
if not datasets:
|
| 401 |
-
logger.info(f"Aucun dataset trouvé pour la requête '{query}'.")
|
| 402 |
-
continue
|
| 403 |
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 435 |
logger.info("Collecte des données Kaggle terminée.")
|
| 436 |
|
| 437 |
def collect_github_data(queries, num_pages, results_per_page):
|
|
@@ -444,7 +449,7 @@ def collect_github_data(queries, num_pages, results_per_page):
|
|
| 444 |
headers["Authorization"] = f"token {github_token}"
|
| 445 |
else:
|
| 446 |
logger.warning("Clé GitHub non configurée. La collecte GitHub est ignorée.")
|
| 447 |
-
st.session_state.logs.append("ATTENTION: Clé GitHub non configurée. Collecte ignorée.")
|
| 448 |
return
|
| 449 |
|
| 450 |
search_queries = queries.split('\n') if queries else ["topic:devsecops", "topic:security"]
|
|
@@ -453,59 +458,64 @@ def collect_github_data(queries, num_pages, results_per_page):
|
|
| 453 |
logger.info(f"Recherche de repositories pour: '{query}' sur {num_pages} page(s)")
|
| 454 |
|
| 455 |
for page_number in range(1, num_pages + 1):
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
"q": query,
|
| 461 |
-
"sort": "stars",
|
| 462 |
-
"per_page": results_per_page,
|
| 463 |
-
"page": page_number
|
| 464 |
-
}
|
| 465 |
-
|
| 466 |
-
response = make_request(search_url, headers=headers, params=params)
|
| 467 |
-
if not response:
|
| 468 |
-
break
|
| 469 |
-
|
| 470 |
-
data = response.json()
|
| 471 |
-
items = data.get("items", [])
|
| 472 |
-
|
| 473 |
-
if not items:
|
| 474 |
-
logger.info(f"Fin des résultats pour cette requête (page {page_number}).")
|
| 475 |
-
break
|
| 476 |
-
|
| 477 |
-
for repo in items:
|
| 478 |
-
repo_name = repo["full_name"].replace("/", "_")
|
| 479 |
-
logger.info(f"Traitement du repository: {repo['full_name']}")
|
| 480 |
|
| 481 |
-
|
| 482 |
-
|
| 483 |
-
|
|
|
|
|
|
|
|
|
|
| 484 |
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 509 |
logger.info("Collecte des données GitHub terminée.")
|
| 510 |
|
| 511 |
def collect_huggingface_data(queries, num_pages, results_per_page):
|
|
@@ -518,50 +528,52 @@ def collect_huggingface_data(queries, num_pages, results_per_page):
|
|
| 518 |
headers["Authorization"] = f"Bearer {hf_token}"
|
| 519 |
else:
|
| 520 |
logger.warning("Clé Hugging Face non configurée. La collecte Hugging Face est ignorée.")
|
| 521 |
-
st.session_state.logs.append("ATTENTION: Clé Hugging Face non configurée. Collecte ignorée.")
|
| 522 |
return
|
| 523 |
|
| 524 |
search_queries = queries.split('\n') if queries else ["security", "devsecops"]
|
| 525 |
for query in search_queries:
|
| 526 |
logger.info(f"Recherche de datasets pour: {query}")
|
| 527 |
|
| 528 |
-
# Hugging Face API ne supporte pas la pagination par page_number, mais par 'limit' et 'offset'
|
| 529 |
-
# On va simuler la pagination en ajustant l'offset
|
| 530 |
for page_number in range(num_pages):
|
| 531 |
-
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
|
| 535 |
-
response = make_request(search_url, headers=headers, params=params)
|
| 536 |
-
if not response: continue
|
| 537 |
-
|
| 538 |
-
data = response.json()
|
| 539 |
-
if not data:
|
| 540 |
-
logger.info(f"Fin des résultats pour la requête '{query}'.")
|
| 541 |
-
break
|
| 542 |
|
| 543 |
-
|
| 544 |
-
|
| 545 |
-
logger.info(f"Traitement du dataset: {dataset['id']}")
|
| 546 |
-
dataset_url = f"{base_url}/datasets/{dataset['id']}"
|
| 547 |
-
dataset_response = make_request(dataset_url, headers=headers)
|
| 548 |
|
| 549 |
-
|
| 550 |
-
|
| 551 |
-
|
| 552 |
-
|
| 553 |
-
tags = dataset_data.get("tags", [])
|
| 554 |
-
tags_text = ", ".join(tags) if tags else "No tags"
|
| 555 |
-
answer = f"Dataset: {dataset_data.get('id', '')}\nDownloads: {dataset_data.get('downloads', 0)}\nTags: {tags_text}\n\n{description}"
|
| 556 |
|
| 557 |
-
|
| 558 |
-
|
| 559 |
-
|
| 560 |
-
|
| 561 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 562 |
logger.info("Collecte des données Hugging Face terminée.")
|
| 563 |
|
| 564 |
-
def collect_nvd_data(
|
| 565 |
logger.info("Début de la collecte des données NVD...")
|
| 566 |
base_url = "https://services.nvd.nist.gov/rest/json/cves/2.0"
|
| 567 |
headers = {"Accept": "application/json"}
|
|
@@ -571,46 +583,51 @@ def collect_nvd_data(queries, num_pages, results_per_page):
|
|
| 571 |
headers["apiKey"] = nvd_key
|
| 572 |
else:
|
| 573 |
logger.warning("Clé NVD non configurée. La collecte NVD est ignorée.")
|
| 574 |
-
st.session_state.logs.append("ATTENTION: Clé NVD non configurée. Collecte ignorée.")
|
| 575 |
return
|
| 576 |
|
| 577 |
for page in range(num_pages):
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
logger.warning("Impossible de récupérer les données du NVD. Arrêt de la collecte NVD.")
|
| 585 |
-
break
|
| 586 |
-
|
| 587 |
-
data = response.json()
|
| 588 |
-
vulnerabilities = data.get("vulnerabilities", [])
|
| 589 |
-
if not vulnerabilities:
|
| 590 |
-
logger.info("Fin des résultats pour la collecte NVD.")
|
| 591 |
-
break
|
| 592 |
-
|
| 593 |
-
logger.info(f"Traitement de {len(vulnerabilities)} vulnérabilités...")
|
| 594 |
-
|
| 595 |
-
for vuln in vulnerabilities:
|
| 596 |
-
cve_data = vuln.get("cve", {})
|
| 597 |
-
cve_id = cve_data.get("id", "")
|
| 598 |
-
descriptions = cve_data.get("descriptions", [])
|
| 599 |
-
description = next((desc.get("value", "") for desc in descriptions if desc.get("lang") == "en"), "")
|
| 600 |
-
if not description or len(description) < 50: continue
|
| 601 |
|
| 602 |
-
|
| 603 |
-
|
| 604 |
-
|
| 605 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 606 |
|
| 607 |
-
|
| 608 |
|
| 609 |
-
|
| 610 |
-
|
| 611 |
-
|
| 612 |
-
|
| 613 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 614 |
logger.info("Collecte des données NVD terminée.")
|
| 615 |
|
| 616 |
def collect_stack_exchange_data(queries, num_pages, results_per_page):
|
|
@@ -623,7 +640,7 @@ def collect_stack_exchange_data(queries, num_pages, results_per_page):
|
|
| 623 |
params_base["key"] = se_key
|
| 624 |
else:
|
| 625 |
logger.warning("Clé Stack Exchange non configurée. La collecte est ignorée.")
|
| 626 |
-
st.session_state.logs.append("ATTENTION: Clé Stack Exchange non configurée. Collecte ignorée.")
|
| 627 |
return
|
| 628 |
|
| 629 |
sites = [
|
|
@@ -645,43 +662,97 @@ def collect_stack_exchange_data(queries, num_pages, results_per_page):
|
|
| 645 |
|
| 646 |
for tag in list(set(tags)):
|
| 647 |
logger.info(f"Recherche de questions avec le tag: '{tag}'")
|
| 648 |
-
questions_url = f"{base_url}/questions"
|
| 649 |
|
| 650 |
for page_number in range(1, num_pages + 1):
|
| 651 |
-
|
| 652 |
-
|
| 653 |
-
|
| 654 |
-
if not response: continue
|
| 655 |
-
|
| 656 |
-
questions_data = response.json()
|
| 657 |
-
items = questions_data.get("items", [])
|
| 658 |
-
|
| 659 |
-
if not items:
|
| 660 |
-
logger.info(f"Fin des résultats pour le tag '{tag}' à la page {page_number}.")
|
| 661 |
-
break
|
| 662 |
|
| 663 |
-
|
| 664 |
-
|
| 665 |
-
title = question.get("title", "")
|
| 666 |
-
body = clean_html(question.get("body", ""))
|
| 667 |
-
if not body or len(body) < 50: continue
|
| 668 |
|
| 669 |
-
|
| 670 |
-
|
| 671 |
-
answers_response = make_request(answers_url, params=answers_params)
|
| 672 |
-
answer_body = ""
|
| 673 |
-
if answers_response and answers_response.json().get("items"):
|
| 674 |
-
answer_body = clean_html(answers_response.json()["items"][0].get("body", ""))
|
| 675 |
|
| 676 |
-
if
|
| 677 |
-
|
| 678 |
-
|
| 679 |
-
|
| 680 |
-
|
| 681 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 682 |
logger.info("Collecte des données Stack Exchange terminée.")
|
| 683 |
|
| 684 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 685 |
st.session_state.bot_status = "En cours d'exécution"
|
| 686 |
st.session_state.logs = []
|
| 687 |
|
|
@@ -689,51 +760,63 @@ def run_data_collection(sources, queries, num_pages, results_per_page):
|
|
| 689 |
|
| 690 |
progress_bar = st.progress(0)
|
| 691 |
status_text = st.empty()
|
|
|
|
| 692 |
|
| 693 |
enabled_sources = [s for s, enabled in sources.items() if enabled]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 694 |
total_sources = len(enabled_sources)
|
| 695 |
completed_sources = 0
|
| 696 |
|
| 697 |
for source_name in enabled_sources:
|
| 698 |
-
if source_name == "Kaggle" and 'KAGGLE_USERNAME' not in os.environ:
|
| 699 |
-
logger.warning("Clés Kaggle non définies dans les variables d'environnement. Saut de la collecte Kaggle.")
|
| 700 |
-
continue
|
| 701 |
-
if source_name == "GitHub" and not valid_keys.get('GITHUB_API_TOKEN'):
|
| 702 |
-
logger.warning("Clé GitHub non définie. Saut de la collecte GitHub.")
|
| 703 |
-
continue
|
| 704 |
-
if source_name == "Hugging Face" and not valid_keys.get('HUGGINGFACE_API_TOKEN'):
|
| 705 |
-
logger.warning("Clé Hugging Face non définie. Saut de la collecte Hugging Face.")
|
| 706 |
-
continue
|
| 707 |
-
if source_name == "NVD" and not valid_keys.get('NVD_API_KEY'):
|
| 708 |
-
logger.warning("Clé NVD non définie. Saut de la collecte NVD.")
|
| 709 |
-
continue
|
| 710 |
-
if source_name == "Stack Exchange" and not valid_keys.get('STACK_EXCHANGE_API_KEY'):
|
| 711 |
-
logger.warning("Clé Stack Exchange non définie. Saut de la collecte Stack Exchange.")
|
| 712 |
-
continue
|
| 713 |
-
|
| 714 |
status_text.text(f"Collecte des données de {source_name}...")
|
| 715 |
-
|
| 716 |
-
|
| 717 |
-
|
| 718 |
-
|
|
|
|
|
|
|
| 719 |
collect_github_data(queries.get("GitHub", ""), num_pages, results_per_page)
|
| 720 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 721 |
collect_huggingface_data(queries.get("Hugging Face", ""), num_pages, results_per_page)
|
| 722 |
-
|
| 723 |
-
|
| 724 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 725 |
collect_stack_exchange_data(queries.get("Stack Exchange", ""), num_pages, results_per_page)
|
| 726 |
-
|
| 727 |
-
|
| 728 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 729 |
completed_sources += 1
|
| 730 |
progress_bar.progress(completed_sources / total_sources)
|
| 731 |
-
|
|
|
|
|
|
|
| 732 |
st.session_state.bot_status = "Arrêté"
|
| 733 |
st.info("Collecte des données terminée!")
|
| 734 |
progress_bar.empty()
|
| 735 |
status_text.empty()
|
| 736 |
-
|
| 737 |
st.rerun()
|
| 738 |
|
| 739 |
def main():
|
|
@@ -773,22 +856,27 @@ def main():
|
|
| 773 |
st.header("Lancer la collecte")
|
| 774 |
|
| 775 |
st.subheader("Sources de données")
|
| 776 |
-
sources_columns = st.columns(
|
| 777 |
sources = {
|
| 778 |
"GitHub": sources_columns[0].checkbox("GitHub", value=True),
|
| 779 |
-
"
|
| 780 |
-
"
|
| 781 |
-
"
|
| 782 |
-
"
|
|
|
|
| 783 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 784 |
|
| 785 |
st.subheader("Requêtes de recherche")
|
| 786 |
queries = {}
|
| 787 |
queries["GitHub"] = st.text_area("Requêtes GitHub (une par ligne)", "topic:devsecops\ntopic:security\nvulnerability")
|
| 788 |
queries["Kaggle"] = st.text_area("Requêtes Kaggle (une par ligne)", "cybersecurity\nvulnerability dataset\npenetration testing")
|
| 789 |
queries["Hugging Face"] = st.text_area("Requêtes Hugging Face (une par ligne)", "security dataset\nvulnerability\nlanguage model security")
|
| 790 |
-
queries["NVD"] = ""
|
| 791 |
queries["Stack Exchange"] = st.text_area("Tags Stack Exchange (un par ligne)", "devsecops\nsecurity\nvulnerability")
|
|
|
|
| 792 |
|
| 793 |
st.markdown("---")
|
| 794 |
|
|
@@ -797,7 +885,7 @@ def main():
|
|
| 797 |
st.session_state.logs = []
|
| 798 |
st.session_state.qa_data = []
|
| 799 |
st.session_state.total_qa_pairs = 0
|
| 800 |
-
run_data_collection(sources, queries, num_pages, results_per_page)
|
| 801 |
else:
|
| 802 |
st.warning("La collecte est en cours. Veuillez attendre qu'elle se termine.")
|
| 803 |
if st.button("Forcer l'arrêt", use_container_width=True, type="secondary"):
|
|
@@ -862,12 +950,14 @@ def main():
|
|
| 862 |
with llm_col1:
|
| 863 |
if st.button("Démarrer le serveur LLM", type="primary", use_container_width=True):
|
| 864 |
start_llm_server()
|
|
|
|
| 865 |
if st.button("Vérifier le statut du serveur", use_container_width=True):
|
| 866 |
check_server_status()
|
| 867 |
st.rerun()
|
| 868 |
with llm_col2:
|
| 869 |
if st.button("Arrêter le serveur LLM", type="secondary", use_container_width=True):
|
| 870 |
stop_llm_server()
|
|
|
|
| 871 |
|
| 872 |
st.markdown("---")
|
| 873 |
|
|
|
|
| 106 |
subprocess.Popen(["bash", str(start_script)])
|
| 107 |
st.success("Le serveur llama.cpp est en cours de démarrage...")
|
| 108 |
time.sleep(5)
|
| 109 |
+
check_server_status()
|
| 110 |
+
if st.session_state.server_status == "Actif":
|
| 111 |
st.success("Serveur llama.cpp démarré avec succès!")
|
| 112 |
else:
|
| 113 |
st.error("Le serveur n'a pas pu démarrer. Vérifiez les logs dans le dossier logs/.")
|
|
|
|
| 135 |
subprocess.run(["bash", str(stop_script)])
|
| 136 |
st.success("Le serveur llama.cpp est en cours d'arrêt...")
|
| 137 |
time.sleep(2)
|
| 138 |
+
check_server_status()
|
| 139 |
+
if st.session_state.server_status == "Inactif":
|
| 140 |
st.success("Serveur llama.cpp arrêté avec succès!")
|
| 141 |
else:
|
| 142 |
st.warning("Le serveur n'a pas pu être arrêté correctement.")
|
|
|
|
| 283 |
'GITHUB_API_TOKEN': os.getenv('GITHUB_API_TOKEN'),
|
| 284 |
'HUGGINGFACE_API_TOKEN': os.getenv('HUGGINGFACE_API_TOKEN'),
|
| 285 |
'NVD_API_KEY': os.getenv('NVD_API_KEY'),
|
| 286 |
+
'STACK_EXCHANGE_API_KEY': os.getenv('STACK_EXCHANGE_API_KEY'),
|
| 287 |
}
|
| 288 |
|
| 289 |
valid_keys = {k: v for k, v in keys.items() if v and v != f'your_{k.lower()}_here'}
|
| 290 |
|
| 291 |
+
if len(valid_keys) == 0:
|
|
|
|
| 292 |
logger.warning("Aucune clé d'API valide trouvée. Le bot fonctionnera en mode dégradé avec des pauses plus longues.")
|
| 293 |
else:
|
| 294 |
+
missing = set(keys.keys()) - set(valid_keys.keys())
|
| 295 |
+
if missing:
|
| 296 |
+
logger.warning(f"Clés d'API manquantes ou non configurées: {', '.join(missing)}")
|
| 297 |
logger.info(f"Clés d'API valides trouvées pour: {', '.join(valid_keys.keys())}.")
|
| 298 |
|
| 299 |
return valid_keys
|
|
|
|
| 301 |
def make_request(url, headers=None, params=None, is_api_call=True):
|
| 302 |
config.REQUEST_COUNT += 1
|
| 303 |
|
| 304 |
+
pause_factor = 1 if len(check_api_keys()) > 0 else 2
|
| 305 |
|
| 306 |
if config.REQUEST_COUNT >= config.MAX_REQUESTS_BEFORE_PAUSE:
|
| 307 |
pause_time = random.uniform(config.MIN_PAUSE * pause_factor, config.MAX_PAUSE * pause_factor)
|
|
|
|
| 372 |
st.session_state.total_qa_pairs += 1
|
| 373 |
st.session_state.qa_data.append(qa_data)
|
| 374 |
|
| 375 |
+
log_message = f"Paire Q/R sauvegardée: {filename} (Total: {st.session_state.total_qa_pairs})"
|
| 376 |
+
logger.info(log_message)
|
| 377 |
+
st.session_state.logs.append(log_message)
|
| 378 |
except Exception as e:
|
| 379 |
logger.error(f"Erreur lors de la sauvegarde du fichier {filename}: {str(e)}")
|
| 380 |
+
st.session_state.logs.append(f"Erreur: Impossible de sauvegarder {filename}")
|
| 381 |
|
| 382 |
+
|
| 383 |
+
def collect_kaggle_data(queries):
|
| 384 |
logger.info("Début de la collecte des données Kaggle...")
|
| 385 |
|
| 386 |
+
try:
|
| 387 |
+
if not os.getenv('KAGGLE_USERNAME') or not os.getenv('KAGGLE_KEY'):
|
| 388 |
+
logger.warning("Clés Kaggle non configurées. La collecte Kaggle est ignorée.")
|
| 389 |
+
st.session_state.logs.append("ATTENTION: Clés Kaggle non configurées. Collecte Kaggle ignorée.")
|
| 390 |
+
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 391 |
|
| 392 |
+
os.environ['KAGGLE_USERNAME'] = os.getenv('KAGGLE_USERNAME')
|
| 393 |
+
os.environ['KAGGLE_KEY'] = os.getenv('KAGGLE_KEY')
|
| 394 |
+
import kaggle
|
| 395 |
+
kaggle.api.authenticate()
|
| 396 |
+
|
| 397 |
+
search_queries = queries.split('\n') if queries else ["cybersecurity", "vulnerability"]
|
| 398 |
+
|
| 399 |
+
if ia_enricher.available and st.session_state.enable_enrichment:
|
| 400 |
+
adaptive_queries = ia_enricher.generate_adaptive_queries("Initial data keywords: " + ", ".join(search_queries))
|
| 401 |
+
search_queries.extend(adaptive_queries)
|
| 402 |
+
|
| 403 |
+
for query in list(set(search_queries)):
|
| 404 |
+
logger.info(f"Recherche de datasets Kaggle pour: {query}")
|
| 405 |
+
try:
|
| 406 |
+
datasets = kaggle.api.dataset_list(search=query, max_results=5)
|
| 407 |
+
for dataset in datasets:
|
| 408 |
+
dataset_ref = dataset.ref
|
| 409 |
+
if ia_enricher.available and st.session_state.enable_enrichment:
|
| 410 |
+
is_relevant, _, _, relevance_score = ia_enricher.analyze_content_relevance(dataset.title + " " + dataset.subtitle)
|
| 411 |
+
if not is_relevant or relevance_score < st.session_state.min_relevance:
|
| 412 |
+
logger.info(f"Dataset non pertinent ({relevance_score}%): {dataset_ref}. Ignoré.")
|
| 413 |
+
continue
|
| 414 |
+
|
| 415 |
+
logger.info(f"Traitement du dataset: {dataset_ref}")
|
| 416 |
+
download_dir = Path("data") / "security" / "kaggle" / dataset_ref.replace('/', '_')
|
| 417 |
+
shutil.rmtree(download_dir, ignore_errors=True)
|
| 418 |
+
download_dir.mkdir(parents=True, exist_ok=True)
|
| 419 |
+
kaggle.api.dataset_download_files(dataset_ref, path=download_dir, unzip=True)
|
| 420 |
+
|
| 421 |
+
for file_path in download_dir.glob('*'):
|
| 422 |
+
if file_path.is_file() and file_path.suffix.lower() in ['.json', '.csv', '.txt']:
|
| 423 |
+
try:
|
| 424 |
+
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
| 425 |
+
file_content = f.read()[:5000]
|
| 426 |
+
is_relevant, signatures, security_tags, _ = ia_enricher.analyze_content_relevance(file_content)
|
| 427 |
+
if is_relevant:
|
| 428 |
+
save_qa_pair(
|
| 429 |
+
question=f"Quelles informations de sécurité contient le fichier {file_path.name} du dataset '{dataset.title}'?",
|
| 430 |
+
answer=file_content, category="security", subcategory="vulnerability",
|
| 431 |
+
source=f"kaggle_{dataset_ref}", attack_signatures=signatures, tags=security_tags
|
| 432 |
+
)
|
| 433 |
+
except Exception as e:
|
| 434 |
+
logger.error(f"Erreur lors du traitement du fichier {file_path}: {str(e)}")
|
| 435 |
+
time.sleep(random.uniform(2, 4))
|
| 436 |
+
except Exception as e:
|
| 437 |
+
logger.error(f"Erreur lors de la collecte des données Kaggle pour {query}: {str(e)}")
|
| 438 |
+
except Exception as e:
|
| 439 |
+
logger.error(f"Erreur inattendue dans collect_kaggle_data: {str(e)}")
|
| 440 |
logger.info("Collecte des données Kaggle terminée.")
|
| 441 |
|
| 442 |
def collect_github_data(queries, num_pages, results_per_page):
|
|
|
|
| 449 |
headers["Authorization"] = f"token {github_token}"
|
| 450 |
else:
|
| 451 |
logger.warning("Clé GitHub non configurée. La collecte GitHub est ignorée.")
|
| 452 |
+
st.session_state.logs.append("ATTENTION: Clé GitHub non configurée. Collecte GitHub ignorée.")
|
| 453 |
return
|
| 454 |
|
| 455 |
search_queries = queries.split('\n') if queries else ["topic:devsecops", "topic:security"]
|
|
|
|
| 458 |
logger.info(f"Recherche de repositories pour: '{query}' sur {num_pages} page(s)")
|
| 459 |
|
| 460 |
for page_number in range(1, num_pages + 1):
|
| 461 |
+
try:
|
| 462 |
+
logger.info(f"Consultation de la page {page_number}...")
|
| 463 |
+
st.session_state.logs.append(f"GitHub: page {page_number} pour '{query}'")
|
| 464 |
+
search_url = f"{base_url}/search/repositories"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 465 |
|
| 466 |
+
params = {
|
| 467 |
+
"q": query,
|
| 468 |
+
"sort": "stars",
|
| 469 |
+
"per_page": results_per_page,
|
| 470 |
+
"page": page_number
|
| 471 |
+
}
|
| 472 |
|
| 473 |
+
response = make_request(search_url, headers=headers, params=params)
|
| 474 |
+
if not response:
|
| 475 |
+
break
|
| 476 |
+
|
| 477 |
+
data = response.json()
|
| 478 |
+
items = data.get("items", [])
|
| 479 |
+
|
| 480 |
+
if not items:
|
| 481 |
+
logger.info(f"Fin des résultats pour cette requête (page {page_number}).")
|
| 482 |
+
break
|
| 483 |
+
|
| 484 |
+
for repo in items:
|
| 485 |
+
repo_name = repo["full_name"].replace("/", "_")
|
| 486 |
+
logger.info(f"Traitement du repository: {repo['full_name']}")
|
| 487 |
+
|
| 488 |
+
issues_url = f"{base_url}/repos/{repo['full_name']}/issues"
|
| 489 |
+
issues_params = {"state": "closed", "labels": "security,bug,vulnerability", "per_page": 10}
|
| 490 |
+
issues_response = make_request(issues_url, headers=headers, params=issues_params)
|
| 491 |
+
|
| 492 |
+
if issues_response:
|
| 493 |
+
issues_data = issues_response.json()
|
| 494 |
+
for issue in issues_data:
|
| 495 |
+
if "pull_request" in issue: continue
|
| 496 |
+
question = issue.get("title", "")
|
| 497 |
+
body = clean_html(issue.get("body", ""))
|
| 498 |
+
if not question or not body or len(body) < 50: continue
|
| 499 |
+
|
| 500 |
+
comments_url = issue.get("comments_url")
|
| 501 |
+
comments_response = make_request(comments_url, headers=headers)
|
| 502 |
+
answer_parts = []
|
| 503 |
+
if comments_response:
|
| 504 |
+
comments_data = comments_response.json()
|
| 505 |
+
for comment in comments_data:
|
| 506 |
+
comment_body = clean_html(comment.get("body", ""))
|
| 507 |
+
if comment_body: answer_parts.append(comment_body)
|
| 508 |
+
|
| 509 |
+
if answer_parts:
|
| 510 |
+
answer = "\n\n".join(answer_parts)
|
| 511 |
+
save_qa_pair(
|
| 512 |
+
question=f"{question}: {body}", answer=answer, category="devsecops",
|
| 513 |
+
subcategory="github", source=f"github_{repo_name}"
|
| 514 |
+
)
|
| 515 |
+
time.sleep(random.uniform(1, 3))
|
| 516 |
+
except Exception as e:
|
| 517 |
+
logger.error(f"Erreur lors de la collecte GitHub pour la page {page_number}: {str(e)}")
|
| 518 |
+
st.session_state.logs.append(f"Erreur GitHub: {str(e)}")
|
| 519 |
logger.info("Collecte des données GitHub terminée.")
|
| 520 |
|
| 521 |
def collect_huggingface_data(queries, num_pages, results_per_page):
|
|
|
|
| 528 |
headers["Authorization"] = f"Bearer {hf_token}"
|
| 529 |
else:
|
| 530 |
logger.warning("Clé Hugging Face non configurée. La collecte Hugging Face est ignorée.")
|
| 531 |
+
st.session_state.logs.append("ATTENTION: Clé Hugging Face non configurée. Collecte Hugging Face ignorée.")
|
| 532 |
return
|
| 533 |
|
| 534 |
search_queries = queries.split('\n') if queries else ["security", "devsecops"]
|
| 535 |
for query in search_queries:
|
| 536 |
logger.info(f"Recherche de datasets pour: {query}")
|
| 537 |
|
|
|
|
|
|
|
| 538 |
for page_number in range(num_pages):
|
| 539 |
+
try:
|
| 540 |
+
offset = page_number * results_per_page
|
| 541 |
+
search_url = f"{base_url}/datasets"
|
| 542 |
+
params = {"search": query, "limit": results_per_page, "offset": offset}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 543 |
|
| 544 |
+
response = make_request(search_url, headers=headers, params=params)
|
| 545 |
+
if not response: continue
|
|
|
|
|
|
|
|
|
|
| 546 |
|
| 547 |
+
data = response.json()
|
| 548 |
+
if not data:
|
| 549 |
+
logger.info(f"Fin des résultats pour la requête '{query}'.")
|
| 550 |
+
break
|
|
|
|
|
|
|
|
|
|
| 551 |
|
| 552 |
+
for dataset in data:
|
| 553 |
+
dataset_id = dataset["id"].replace("/", "_")
|
| 554 |
+
logger.info(f"Traitement du dataset: {dataset['id']}")
|
| 555 |
+
dataset_url = f"{base_url}/datasets/{dataset['id']}"
|
| 556 |
+
dataset_response = make_request(dataset_url, headers=headers)
|
| 557 |
+
|
| 558 |
+
if dataset_response:
|
| 559 |
+
dataset_data = dataset_response.json()
|
| 560 |
+
description = clean_html(dataset_data.get("description", ""))
|
| 561 |
+
if not description or len(description) < 100: continue
|
| 562 |
+
tags = dataset_data.get("tags", [])
|
| 563 |
+
tags_text = ", ".join(tags) if tags else "No tags"
|
| 564 |
+
answer = f"Dataset: {dataset_data.get('id', '')}\nDownloads: {dataset_data.get('downloads', 0)}\nTags: {tags_text}\n\n{description}"
|
| 565 |
+
|
| 566 |
+
save_qa_pair(
|
| 567 |
+
question=f"What is the {dataset_data.get('id', '')} dataset about?", answer=answer,
|
| 568 |
+
category="security", subcategory="dataset", source=f"huggingface_{dataset_id}", tags=tags
|
| 569 |
+
)
|
| 570 |
+
time.sleep(random.uniform(1, 3))
|
| 571 |
+
except Exception as e:
|
| 572 |
+
logger.error(f"Erreur lors de la collecte Hugging Face: {str(e)}")
|
| 573 |
+
st.session_state.logs.append(f"Erreur Hugging Face: {str(e)}")
|
| 574 |
logger.info("Collecte des données Hugging Face terminée.")
|
| 575 |
|
| 576 |
+
def collect_nvd_data(num_pages, results_per_page):
|
| 577 |
logger.info("Début de la collecte des données NVD...")
|
| 578 |
base_url = "https://services.nvd.nist.gov/rest/json/cves/2.0"
|
| 579 |
headers = {"Accept": "application/json"}
|
|
|
|
| 583 |
headers["apiKey"] = nvd_key
|
| 584 |
else:
|
| 585 |
logger.warning("Clé NVD non configurée. La collecte NVD est ignorée.")
|
| 586 |
+
st.session_state.logs.append("ATTENTION: Clé NVD non configurée. Collecte NVD ignorée.")
|
| 587 |
return
|
| 588 |
|
| 589 |
for page in range(num_pages):
|
| 590 |
+
try:
|
| 591 |
+
start_index = page * results_per_page
|
| 592 |
+
logger.info(f"Consultation de la page NVD, index de départ: {start_index}")
|
| 593 |
+
st.session_state.logs.append(f"NVD: page {page + 1}")
|
| 594 |
+
params = {"resultsPerPage": results_per_page, "startIndex": start_index}
|
| 595 |
+
response = make_request(base_url, headers=headers, params=params)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 596 |
|
| 597 |
+
if not response:
|
| 598 |
+
logger.warning("Impossible de récupérer les données du NVD. Arrêt de la collecte NVD.")
|
| 599 |
+
break
|
| 600 |
+
|
| 601 |
+
data = response.json()
|
| 602 |
+
vulnerabilities = data.get("vulnerabilities", [])
|
| 603 |
+
if not vulnerabilities:
|
| 604 |
+
logger.info("Fin des résultats pour la collecte NVD.")
|
| 605 |
+
break
|
| 606 |
|
| 607 |
+
logger.info(f"Traitement de {len(vulnerabilities)} vulnérabilités...")
|
| 608 |
|
| 609 |
+
for vuln in vulnerabilities:
|
| 610 |
+
cve_data = vuln.get("cve", {})
|
| 611 |
+
cve_id = cve_data.get("id", "")
|
| 612 |
+
descriptions = cve_data.get("descriptions", [])
|
| 613 |
+
description = next((desc.get("value", "") for desc in descriptions if desc.get("lang") == "en"), "")
|
| 614 |
+
if not description or len(description) < 50: continue
|
| 615 |
+
|
| 616 |
+
cvss_v3 = cve_data.get("metrics", {}).get("cvssMetricV31", [{}])[0].get("cvssData", {})
|
| 617 |
+
severity = cvss_v3.get("baseSeverity", "UNKNOWN")
|
| 618 |
+
score = cvss_v3.get("baseScore", 0)
|
| 619 |
+
references = [ref.get("url", "") for ref in cve_data.get("references", [])]
|
| 620 |
+
|
| 621 |
+
answer = f"CVE ID: {cve_id}\nSeverity: {severity}\nCVSS Score: {score}\nReferences: {', '.join(references[:5])}\n\nDescription: {description}"
|
| 622 |
+
|
| 623 |
+
save_qa_pair(
|
| 624 |
+
question=f"What is the vulnerability {cve_id}?", answer=answer,
|
| 625 |
+
category="security", subcategory="vulnerability", source=f"nvd_{cve_id}"
|
| 626 |
+
)
|
| 627 |
+
time.sleep(random.uniform(1, 3))
|
| 628 |
+
except Exception as e:
|
| 629 |
+
logger.error(f"Erreur lors de la collecte NVD: {str(e)}")
|
| 630 |
+
st.session_state.logs.append(f"Erreur NVD: {str(e)}")
|
| 631 |
logger.info("Collecte des données NVD terminée.")
|
| 632 |
|
| 633 |
def collect_stack_exchange_data(queries, num_pages, results_per_page):
|
|
|
|
| 640 |
params_base["key"] = se_key
|
| 641 |
else:
|
| 642 |
logger.warning("Clé Stack Exchange non configurée. La collecte est ignorée.")
|
| 643 |
+
st.session_state.logs.append("ATTENTION: Clé Stack Exchange non configurée. Collecte Stack Exchange ignorée.")
|
| 644 |
return
|
| 645 |
|
| 646 |
sites = [
|
|
|
|
| 662 |
|
| 663 |
for tag in list(set(tags)):
|
| 664 |
logger.info(f"Recherche de questions avec le tag: '{tag}'")
|
|
|
|
| 665 |
|
| 666 |
for page_number in range(1, num_pages + 1):
|
| 667 |
+
try:
|
| 668 |
+
questions_url = f"{base_url}/questions"
|
| 669 |
+
params = {**params_base, "site": site, "tagged": tag, "page": page_number}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 670 |
|
| 671 |
+
response = make_request(questions_url, params=params)
|
| 672 |
+
if not response: continue
|
|
|
|
|
|
|
|
|
|
| 673 |
|
| 674 |
+
questions_data = response.json()
|
| 675 |
+
items = questions_data.get("items", [])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 676 |
|
| 677 |
+
if not items:
|
| 678 |
+
logger.info(f"Fin des résultats pour le tag '{tag}' à la page {page_number}.")
|
| 679 |
+
break
|
| 680 |
+
|
| 681 |
+
for question in items:
|
| 682 |
+
question_id = question.get("question_id")
|
| 683 |
+
title = question.get("title", "")
|
| 684 |
+
body = clean_html(question.get("body", ""))
|
| 685 |
+
if not body or len(body) < 50: continue
|
| 686 |
+
|
| 687 |
+
answers_url = f"{base_url}/questions/{question_id}/answers"
|
| 688 |
+
answers_params = {**params_base, "site": site}
|
| 689 |
+
answers_response = make_request(answers_url, params=answers_params)
|
| 690 |
+
answer_body = ""
|
| 691 |
+
if answers_response and answers_response.json().get("items"):
|
| 692 |
+
answer_body = clean_html(answers_response.json()["items"][0].get("body", ""))
|
| 693 |
+
|
| 694 |
+
if answer_body:
|
| 695 |
+
save_qa_pair(
|
| 696 |
+
question=title, answer=answer_body, category=category,
|
| 697 |
+
subcategory=subcategory, source=f"{site}_{question_id}", tags=question.get("tags", [])
|
| 698 |
+
)
|
| 699 |
+
time.sleep(random.uniform(1, 3))
|
| 700 |
+
except Exception as e:
|
| 701 |
+
logger.error(f"Erreur lors de la collecte Stack Exchange: {str(e)}")
|
| 702 |
+
st.session_state.logs.append(f"Erreur Stack Exchange: {str(e)}")
|
| 703 |
logger.info("Collecte des données Stack Exchange terminée.")
|
| 704 |
|
| 705 |
+
def collect_web_data(url):
|
| 706 |
+
logger.info(f"Début de la collecte des données pour l'URL: {url}")
|
| 707 |
+
|
| 708 |
+
try:
|
| 709 |
+
st.session_state.logs.append(f"Début de la collecte pour {url}...")
|
| 710 |
+
response = make_request(url)
|
| 711 |
+
if not response or response.status_code != 200:
|
| 712 |
+
logger.error(f"Impossible de récupérer le contenu de l'URL: {url}")
|
| 713 |
+
st.session_state.logs.append(f"Erreur: Impossible de récupérer l'URL {url}")
|
| 714 |
+
return
|
| 715 |
+
|
| 716 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
| 717 |
+
|
| 718 |
+
for script in soup(["script", "style", "header", "footer", "nav"]):
|
| 719 |
+
script.extract()
|
| 720 |
+
|
| 721 |
+
raw_text = soup.get_text()
|
| 722 |
+
clean_text = clean_html(raw_text)
|
| 723 |
+
|
| 724 |
+
if not clean_text or len(clean_text) < 100:
|
| 725 |
+
logger.warning(f"Contenu de l'URL trop court ou vide: {url}")
|
| 726 |
+
st.session_state.logs.append("Avertissement: Contenu de l'URL trop court ou vide.")
|
| 727 |
+
return
|
| 728 |
+
|
| 729 |
+
is_relevant, signatures, security_tags, _ = ia_enricher.analyze_content_relevance(clean_text)
|
| 730 |
+
|
| 731 |
+
if is_relevant:
|
| 732 |
+
title = soup.title.string if soup.title else os.path.basename(url)
|
| 733 |
+
question = f"What security information is on the page '{title}'?"
|
| 734 |
+
answer = clean_text
|
| 735 |
+
|
| 736 |
+
save_qa_pair(
|
| 737 |
+
question=question,
|
| 738 |
+
answer=answer,
|
| 739 |
+
category="security",
|
| 740 |
+
subcategory="web-scraping",
|
| 741 |
+
source=f"web_{re.sub(r'[^a-zA-Z0-9]+', '', url)[:30]}",
|
| 742 |
+
attack_signatures=signatures,
|
| 743 |
+
tags=security_tags
|
| 744 |
+
)
|
| 745 |
+
else:
|
| 746 |
+
logger.info(f"Contenu de l'URL non pertinent pour DevSecOps: {url}")
|
| 747 |
+
st.session_state.logs.append(f"Contenu de l'URL non pertinent pour DevSecOps.")
|
| 748 |
+
|
| 749 |
+
except Exception as e:
|
| 750 |
+
logger.error(f"Erreur lors du scraping de l'URL {url}: {str(e)}")
|
| 751 |
+
st.session_state.logs.append(f"Erreur lors du scraping de l'URL {url}")
|
| 752 |
+
|
| 753 |
+
logger.info("Collecte des données web terminée.")
|
| 754 |
+
|
| 755 |
+
def run_data_collection(sources, queries, web_url, num_pages, results_per_page):
|
| 756 |
st.session_state.bot_status = "En cours d'exécution"
|
| 757 |
st.session_state.logs = []
|
| 758 |
|
|
|
|
| 760 |
|
| 761 |
progress_bar = st.progress(0)
|
| 762 |
status_text = st.empty()
|
| 763 |
+
log_container = st.empty()
|
| 764 |
|
| 765 |
enabled_sources = [s for s, enabled in sources.items() if enabled]
|
| 766 |
+
|
| 767 |
+
if "Web Scraping" in enabled_sources and web_url:
|
| 768 |
+
enabled_sources.remove("Web Scraping")
|
| 769 |
+
enabled_sources.insert(0, "Web Scraping")
|
| 770 |
+
|
| 771 |
total_sources = len(enabled_sources)
|
| 772 |
completed_sources = 0
|
| 773 |
|
| 774 |
for source_name in enabled_sources:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 775 |
status_text.text(f"Collecte des données de {source_name}...")
|
| 776 |
+
log_container.text("Logs en temps réel:\n" + "\n".join(st.session_state.logs))
|
| 777 |
+
|
| 778 |
+
if source_name == "Kaggle":
|
| 779 |
+
collect_kaggle_data(queries.get("Kaggle", ""))
|
| 780 |
+
elif source_name == "GitHub":
|
| 781 |
+
if 'GITHUB_API_TOKEN' in valid_keys:
|
| 782 |
collect_github_data(queries.get("GitHub", ""), num_pages, results_per_page)
|
| 783 |
+
else:
|
| 784 |
+
logger.warning("Clé GitHub non définie. Saut de la collecte GitHub.")
|
| 785 |
+
st.session_state.logs.append("ATTENTION: Clé GitHub non définie. Collecte GitHub ignorée.")
|
| 786 |
+
elif source_name == "Hugging Face":
|
| 787 |
+
if 'HUGGINGFACE_API_TOKEN' in valid_keys:
|
| 788 |
collect_huggingface_data(queries.get("Hugging Face", ""), num_pages, results_per_page)
|
| 789 |
+
else:
|
| 790 |
+
logger.warning("Clé Hugging Face non définie. Saut de la collecte Hugging Face.")
|
| 791 |
+
st.session_state.logs.append("ATTENTION: Clé Hugging Face non définie. Collecte Hugging Face ignorée.")
|
| 792 |
+
elif source_name == "NVD":
|
| 793 |
+
if 'NVD_API_KEY' in valid_keys:
|
| 794 |
+
collect_nvd_data(num_pages, results_per_page)
|
| 795 |
+
else:
|
| 796 |
+
logger.warning("Clé NVD non définie. Saut de la collecte NVD.")
|
| 797 |
+
st.session_state.logs.append("ATTENTION: Clé NVD non définie. Collecte NVD ignorée.")
|
| 798 |
+
elif source_name == "Stack Exchange":
|
| 799 |
+
if 'STACK_EXCHANGE_API_KEY' in valid_keys:
|
| 800 |
collect_stack_exchange_data(queries.get("Stack Exchange", ""), num_pages, results_per_page)
|
| 801 |
+
else:
|
| 802 |
+
logger.warning("Clé Stack Exchange non définie. Saut de la collecte Stack Exchange.")
|
| 803 |
+
st.session_state.logs.append("ATTENTION: Clé Stack Exchange non définie. Collecte Stack Exchange ignorée.")
|
| 804 |
+
elif source_name == "Web Scraping":
|
| 805 |
+
if web_url:
|
| 806 |
+
collect_web_data(web_url)
|
| 807 |
+
else:
|
| 808 |
+
st.session_state.logs.append("ATTENTION: URL de scraping non fournie. Collecte ignorée.")
|
| 809 |
+
|
| 810 |
completed_sources += 1
|
| 811 |
progress_bar.progress(completed_sources / total_sources)
|
| 812 |
+
log_container.text("Logs en temps réel:\n" + "\n".join(st.session_state.logs))
|
| 813 |
+
time.sleep(1) # Pause pour la mise à jour visuelle
|
| 814 |
+
|
| 815 |
st.session_state.bot_status = "Arrêté"
|
| 816 |
st.info("Collecte des données terminée!")
|
| 817 |
progress_bar.empty()
|
| 818 |
status_text.empty()
|
| 819 |
+
log_container.text("Logs en temps réel:\n" + "\n".join(st.session_state.logs))
|
| 820 |
st.rerun()
|
| 821 |
|
| 822 |
def main():
|
|
|
|
| 856 |
st.header("Lancer la collecte")
|
| 857 |
|
| 858 |
st.subheader("Sources de données")
|
| 859 |
+
sources_columns = st.columns(6)
|
| 860 |
sources = {
|
| 861 |
"GitHub": sources_columns[0].checkbox("GitHub", value=True),
|
| 862 |
+
"Hugging Face": sources_columns[1].checkbox("Hugging Face", value=True),
|
| 863 |
+
"NVD": sources_columns[2].checkbox("NVD", value=True),
|
| 864 |
+
"Stack Exchange": sources_columns[3].checkbox("Stack Exchange", value=True),
|
| 865 |
+
"Kaggle": sources_columns[4].checkbox("Kaggle", value=True),
|
| 866 |
+
"Web Scraping": sources_columns[5].checkbox("Web Scraping", value=True)
|
| 867 |
}
|
| 868 |
+
|
| 869 |
+
web_url = st.text_input("URL à scraper (optionnel)", help="Entrez une URL pour extraire les données de sécurité.")
|
| 870 |
+
|
| 871 |
+
st.info("En cliquant sur 'Lancer la collecte', vous reconnaissez que vous disposez des droits légaux de scraping et de manipulation des données du site fourni, et nous déclinons toute responsabilité.")
|
| 872 |
|
| 873 |
st.subheader("Requêtes de recherche")
|
| 874 |
queries = {}
|
| 875 |
queries["GitHub"] = st.text_area("Requêtes GitHub (une par ligne)", "topic:devsecops\ntopic:security\nvulnerability")
|
| 876 |
queries["Kaggle"] = st.text_area("Requêtes Kaggle (une par ligne)", "cybersecurity\nvulnerability dataset\npenetration testing")
|
| 877 |
queries["Hugging Face"] = st.text_area("Requêtes Hugging Face (une par ligne)", "security dataset\nvulnerability\nlanguage model security")
|
|
|
|
| 878 |
queries["Stack Exchange"] = st.text_area("Tags Stack Exchange (un par ligne)", "devsecops\nsecurity\nvulnerability")
|
| 879 |
+
queries["NVD"] = ""
|
| 880 |
|
| 881 |
st.markdown("---")
|
| 882 |
|
|
|
|
| 885 |
st.session_state.logs = []
|
| 886 |
st.session_state.qa_data = []
|
| 887 |
st.session_state.total_qa_pairs = 0
|
| 888 |
+
run_data_collection(sources, queries, web_url, num_pages, results_per_page)
|
| 889 |
else:
|
| 890 |
st.warning("La collecte est en cours. Veuillez attendre qu'elle se termine.")
|
| 891 |
if st.button("Forcer l'arrêt", use_container_width=True, type="secondary"):
|
|
|
|
| 950 |
with llm_col1:
|
| 951 |
if st.button("Démarrer le serveur LLM", type="primary", use_container_width=True):
|
| 952 |
start_llm_server()
|
| 953 |
+
st.rerun()
|
| 954 |
if st.button("Vérifier le statut du serveur", use_container_width=True):
|
| 955 |
check_server_status()
|
| 956 |
st.rerun()
|
| 957 |
with llm_col2:
|
| 958 |
if st.button("Arrêter le serveur LLM", type="secondary", use_container_width=True):
|
| 959 |
stop_llm_server()
|
| 960 |
+
st.rerun()
|
| 961 |
|
| 962 |
st.markdown("---")
|
| 963 |
|