Spaces:
Sleeping
Sleeping
| # -*- coding: utf-8 -*- | |
| """app.py | |
| Automatically generated by Colab. | |
| Original file is located at | |
| https://colab.research.google.com/drive/1zVrT1lKxARrm_wPS9sX-L8z5K4cxaAXm | |
| """ | |
| import gradio as gr | |
| import yaml, json, requests | |
| import os | |
| # Call Mistral-7B via OpenRouter | |
| def call_mistral_openrouter(prompt): | |
| headers = { | |
| "Authorization": f"Bearer {os.getenv('OpenRouter_Secret_Key')}", | |
| "Content-Type": "application/json" | |
| } | |
| data = { | |
| "model": "mistralai/mistral-7b-instruct:free", | |
| "messages": [ | |
| {"role": "system", "content": "You are a helpful technical writer."}, | |
| {"role": "user", "content": prompt} | |
| ] | |
| } | |
| res = requests.post("https://openrouter.ai/api/v1/chat/completions", headers=headers, json=data) | |
| try: | |
| return res.json()["choices"][0]["message"]["content"].strip() | |
| except Exception as e: | |
| return f"⚠️ LLM Error: {e}\nFull response: {res.text}" | |
| # Prompt builder | |
| def format_prompt(spec: dict, format_type: str, style: str) -> str: | |
| prompt_lines = [style.strip() + "\n\n"] | |
| if format_type.lower() == "openapi": | |
| seen = set() | |
| paths = spec.get("paths", {}) | |
| for path, methods in paths.items(): | |
| for method, meta in methods.items(): | |
| key = (path, method) | |
| if key in seen: | |
| continue | |
| seen.add(key) | |
| line = f"Endpoint `{path}` supports `{method.upper()}` requests.\n" | |
| summary = meta.get("summary", "") | |
| if summary: | |
| line += f"Purpose: {summary}\n" | |
| for p in meta.get("parameters", []): | |
| pname = p.get("name", "unknown") | |
| ploc = p.get("in", "unknown") | |
| ptype = p.get("schema", {}).get("type", "unknown") | |
| line += f"- Parameter `{pname}` in `{ploc}` of type `{ptype}`\n" | |
| for code, resp in meta.get("responses", {}).items(): | |
| desc = resp.get("description", "") | |
| line += f"- Returns `{code}`: {desc}\n" | |
| prompt_lines.append(line + "\n") | |
| elif format_type.lower() == "terraform": | |
| resources = spec.get("resource", {}) | |
| for rtype, rblocks in resources.items(): | |
| for name, config in rblocks.items(): | |
| line = f"Terraform resource `{rtype}.{name}` has configuration:\n" | |
| for k, v in config.items(): | |
| line += f"- {k}: {v}\n" | |
| prompt_lines.append(line + "\n") | |
| elif format_type.lower() == "kubernetes": | |
| kind = spec.get("kind", "Unknown") | |
| metadata = spec.get("metadata", {}) | |
| name = metadata.get("name", "Unnamed") | |
| spec_section = spec.get("spec", {}) | |
| line = f"Kubernetes `{kind}` named `{name}` includes:\n" | |
| for k, v in spec_section.items(): | |
| v_fmt = json.dumps(v) if isinstance(v, (list, dict)) else str(v) | |
| line += f"- {k}: {v_fmt}\n" | |
| prompt_lines.append(line + "\n") | |
| elif format_type.lower() in {"cicd", "ci/cd", "github-actions", "github"}: | |
| jobs = spec.get("jobs", {}) | |
| for job_name, job_config in jobs.items(): | |
| line = f"CI/CD Job `{job_name}` consists of:\n" | |
| steps = job_config.get("steps", []) | |
| for step in steps: | |
| if "name" in step: | |
| line += f"- Step: {step['name']}\n" | |
| if "uses" in step: | |
| line += f" Uses: {step['uses']}\n" | |
| if "run" in step: | |
| line += f" Run: {step['run']}\n" | |
| prompt_lines.append(line + "\n") | |
| return "".join(prompt_lines) | |
| # End-to-end summarization | |
| def summarize_spec(format_type, raw_text, url, file, eli5): | |
| style = ( | |
| "You are a technical writer. Explain like I'm 5. " | |
| if eli5 else "You are a technical writer. " | |
| ) | |
| style += f"Summarize the following {format_type.upper()} spec in clear, developer-friendly language. Use full sentences. Combine related endpoints where appropriate." | |
| try: | |
| if file: | |
| content = file.read() | |
| spec = yaml.safe_load(content) | |
| elif url: | |
| response = requests.get(url) | |
| spec = response.json() if url.endswith(".json") else yaml.safe_load(response.text) | |
| elif raw_text: | |
| spec = yaml.safe_load(raw_text) | |
| else: | |
| return "⚠️ No input provided." | |
| except Exception as e: | |
| return f"⚠️ Parsing error: {e}" | |
| try: | |
| prompt = format_prompt(spec, format_type, style) | |
| result = call_mistral_openrouter(prompt) | |
| return result | |
| except Exception as e: | |
| return f"⚠️ Inference error: {e}" | |
| # Gradio UI | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 📄 YAML/JSON Explainer — Mistral-7B via OpenRouter") | |
| format_dropdown = gr.Dropdown(choices=["openapi", "terraform", "kubernetes", "cicd"], value="openapi", label="Select Spec Format") | |
| raw_input = gr.Textbox(lines=10, placeholder="Paste YAML/JSON here...", label="Raw Spec") | |
| eli5_toggle = gr.Checkbox(label="Explain like I'm 5", value=False) | |
| file_input = gr.File(file_types=[".yaml", ".yml", ".json"], label="Upload File") | |
| url_input = gr.Textbox(placeholder="https://example.com/spec.yaml", label="OR enter a URL") | |
| submit_btn = gr.Button("Generate Summary") | |
| output = gr.TextArea(label="Natural Language Explanation", lines=12) | |
| submit_btn.click( | |
| summarize_spec, | |
| inputs=[format_dropdown, raw_input, url_input, file_input, eli5_toggle], | |
| outputs=output | |
| ) | |
| demo.launch(share=True) |