File size: 1,405 Bytes
b9fb564
4f3d86c
b9fb564
4f3d86c
 
 
 
 
 
 
 
 
 
 
 
b9fb564
4f3d86c
 
 
 
 
b9fb564
 
4f3d86c
 
 
b9fb564
 
4f3d86c
 
b9fb564
4f3d86c
 
b9fb564
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import gradio as gr
from transformers import pipeline

# Load open-source LLM (distilled for speed)
qa_model = pipeline("question-answering", model="distilbert-base-cased-distilled-squad")

# Simulated DevOps context (can be replaced with real logs or docs)
devops_context = """
Kubernetes pods are running across three nodes. Jenkins is configured with two pipelines: build and deploy.
Disk usage on node-1 is 72%. Recent logs show no errors. Deployment is triggered via GitHub Actions.
"""

def devops_qa_agent(query):
    # Rule-based fallback
    if "restart jenkins" in query.lower():
        return "πŸ”„ Jenkins restarted successfully."
    elif "pod status" in query.lower():
        return "βœ… All Kubernetes pods are running."
    elif "disk usage" in query.lower():
        return "πŸ’½ Disk usage: 72% used on node-1."
    elif "deploy" in query.lower():
        return "πŸš€ Deployment triggered via CI/CD pipeline."
    else:
        # LLM-powered Q&A
        result = qa_model(question=query, context=devops_context)
        return f"πŸ€– LLM Answer: {result['answer']}"

gr.Interface(
    fn=devops_qa_agent,
    inputs=gr.Textbox(lines=2, placeholder="Ask a DevOps question..."),
    outputs="text",
    title="πŸ› οΈ DevOps AI Agent with Open-Source LLM",
    description="Hybrid rule-based + LLM-powered Q&A agent. No API keys needed. Runs locally on Hugging Face Spaces."
).launch()