import gradio as gr from transformers import pipeline # Load open-source LLM (distilled for speed) qa_model = pipeline("question-answering", model="distilbert-base-cased-distilled-squad") # Simulated DevOps context (can be replaced with real logs or docs) devops_context = """ Kubernetes pods are running across three nodes. Jenkins is configured with two pipelines: build and deploy. Disk usage on node-1 is 72%. Recent logs show no errors. Deployment is triggered via GitHub Actions. """ def devops_qa_agent(query): # Rule-based fallback if "restart jenkins" in query.lower(): return "🔄 Jenkins restarted successfully." elif "pod status" in query.lower(): return "✅ All Kubernetes pods are running." elif "disk usage" in query.lower(): return "💽 Disk usage: 72% used on node-1." elif "deploy" in query.lower(): return "🚀 Deployment triggered via CI/CD pipeline." else: # LLM-powered Q&A result = qa_model(question=query, context=devops_context) return f"🤖 LLM Answer: {result['answer']}" gr.Interface( fn=devops_qa_agent, inputs=gr.Textbox(lines=2, placeholder="Ask a DevOps question..."), outputs="text", title="🛠️ DevOps AI Agent with Open-Source LLM", description="Hybrid rule-based + LLM-powered Q&A agent. No API keys needed. Runs locally on Hugging Face Spaces." ).launch()