Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline | |
| # Load open-source LLM (distilled for speed) | |
| qa_model = pipeline("question-answering", model="distilbert-base-cased-distilled-squad") | |
| # Simulated DevOps context (can be replaced with real logs or docs) | |
| devops_context = """ | |
| Kubernetes pods are running across three nodes. Jenkins is configured with two pipelines: build and deploy. | |
| Disk usage on node-1 is 72%. Recent logs show no errors. Deployment is triggered via GitHub Actions. | |
| """ | |
| def devops_qa_agent(query): | |
| # Rule-based fallback | |
| if "restart jenkins" in query.lower(): | |
| return "π Jenkins restarted successfully." | |
| elif "pod status" in query.lower(): | |
| return "β All Kubernetes pods are running." | |
| elif "disk usage" in query.lower(): | |
| return "π½ Disk usage: 72% used on node-1." | |
| elif "deploy" in query.lower(): | |
| return "π Deployment triggered via CI/CD pipeline." | |
| else: | |
| # LLM-powered Q&A | |
| result = qa_model(question=query, context=devops_context) | |
| return f"π€ LLM Answer: {result['answer']}" | |
| gr.Interface( | |
| fn=devops_qa_agent, | |
| inputs=gr.Textbox(lines=2, placeholder="Ask a DevOps question..."), | |
| outputs="text", | |
| title="π οΈ DevOps AI Agent with Open-Source LLM", | |
| description="Hybrid rule-based + LLM-powered Q&A agent. No API keys needed. Runs locally on Hugging Face Spaces." | |
| ).launch() | |