Spaces:
Running
Running
milwright
update ui: change button emojis, reorganize docs with accordions, fix broken images
663172e
| """ | |
| HuggingFace Space Generator - Refactored with Gradio 5.x Best Practices | |
| Creates customizable AI chat interfaces for deployment on HuggingFace Spaces | |
| """ | |
| import gradio as gr | |
| import json | |
| import zipfile | |
| import io | |
| import os | |
| import requests | |
| from datetime import datetime | |
| from dotenv import load_dotenv | |
| from pathlib import Path | |
| # Import our shared utilities | |
| from utils import ( | |
| get_theme, fetch_url_content, create_safe_filename, | |
| export_conversation_to_markdown, process_file_upload, | |
| ConfigurationManager, get_model_choices, AVAILABLE_THEMES, | |
| extract_urls_from_text | |
| ) | |
| # Load environment variables | |
| load_dotenv() | |
| # Load templates | |
| try: | |
| from space_template import get_template, validate_template | |
| print("Loaded space template") | |
| except Exception as e: | |
| print(f"Could not load space_template.py: {e}") | |
| # Fallback template will be defined if needed | |
| # Load academic templates if available | |
| try: | |
| with open('academic_templates.json', 'r') as f: | |
| ACADEMIC_TEMPLATES = json.load(f) | |
| print(f"Loaded {len(ACADEMIC_TEMPLATES)} academic templates") | |
| except Exception as e: | |
| print(f"Could not load academic templates: {e}") | |
| ACADEMIC_TEMPLATES = {} | |
| class SpaceGenerator: | |
| """Main application class for generating HuggingFace Spaces""" | |
| def __init__(self): | |
| self.default_config = { | |
| 'name': 'AI Assistant', | |
| 'tagline': 'A customizable AI assistant', | |
| 'description': 'A versatile AI assistant powered by advanced language models. Configure it to meet your specific needs with custom prompts, examples, and grounding URLs.', | |
| 'system_prompt': 'You are a helpful AI assistant.', | |
| 'model': 'google/gemini-2.0-flash-001', | |
| 'api_key_var': 'API_KEY', | |
| 'temperature': 0.7, | |
| 'max_tokens': 750, | |
| 'theme': 'Default', | |
| 'grounding_urls': [], | |
| 'enable_dynamic_urls': True, | |
| 'enable_file_upload': True, | |
| 'examples': [ | |
| "Hello! How can you help me?", | |
| "Tell me something interesting", | |
| "What can you do?" | |
| ] | |
| } | |
| self.config_manager = ConfigurationManager(self.default_config) | |
| self.current_config = {} | |
| self.url_content_cache = {} | |
| # Cache for custom values when switching templates | |
| self.custom_values_cache = {} | |
| def create_interface(self) -> gr.Blocks: | |
| """Create the main Gradio interface""" | |
| theme = get_theme("Default") # Using Default theme for the generator | |
| with gr.Blocks(title="ChatUI Helper", theme=theme) as demo: | |
| # Header | |
| gr.Markdown("# ChatUI Helper") | |
| gr.Markdown("Create customizable AI chat interfaces for deployment on HuggingFace Spaces") | |
| # Shared state - create these first so they can be referenced in tabs | |
| self.config_state = gr.State({}) | |
| self.preview_chat_history = gr.State([]) | |
| self.previous_template = gr.State("None (Custom)") | |
| self.template_cache = gr.State({}) | |
| # Main tabs | |
| with gr.Tabs() as main_tabs: | |
| self.main_tabs = main_tabs # Store reference for tab switching | |
| # Configuration Tab | |
| with gr.Tab("Configuration"): | |
| self._create_configuration_tab() | |
| # Preview Tab | |
| with gr.Tab("Preview"): | |
| self._create_preview_tab() | |
| # Documentation Tab | |
| with gr.Tab("Documentation"): | |
| self._create_documentation_tab() | |
| return demo | |
| def _create_configuration_tab(self): | |
| """Create the configuration tab with modern Gradio patterns""" | |
| with gr.Column(): | |
| # Template Selection | |
| with gr.Group(): | |
| gr.Markdown("### π Quick Start Templates") | |
| template_selector = gr.Dropdown( | |
| label="Select Template", | |
| choices=["None (Custom)"] + list(ACADEMIC_TEMPLATES.keys()), | |
| value="None (Custom)", | |
| interactive=True | |
| ) | |
| # Space Identity | |
| with gr.Group(): | |
| gr.Markdown("### π― Space Identity") | |
| with gr.Row(): | |
| self.name_input = gr.Textbox( | |
| label="Assistant Name", | |
| placeholder="My AI Assistant", | |
| value="AI Assistant" | |
| ) | |
| self.theme_input = gr.Dropdown( | |
| label="Theme", | |
| choices=list(AVAILABLE_THEMES.keys()), | |
| value="Default" | |
| ) | |
| self.tagline_input = gr.Textbox( | |
| label="Tagline", | |
| placeholder="Brief tagline for HuggingFace...", | |
| max_length=60, | |
| info="Maximum 60 characters (for YAML frontmatter)" | |
| ) | |
| self.description_input = gr.Textbox( | |
| label="Description", | |
| placeholder="A detailed description of your AI assistant. You can use markdown formatting here...", | |
| lines=4, | |
| info="Full markdown description for the README" | |
| ) | |
| # System Configuration | |
| with gr.Group(): | |
| gr.Markdown("### βοΈ System Configuration") | |
| self.system_prompt_input = gr.Textbox( | |
| label="System Prompt", | |
| placeholder="You are a helpful AI assistant...", | |
| lines=5 | |
| ) | |
| self.model_input = gr.Dropdown( | |
| label="Model", | |
| choices=get_model_choices(), | |
| value="google/gemini-2.0-flash-001" | |
| ) | |
| with gr.Row(): | |
| self.temperature_input = gr.Slider( | |
| label="Temperature", | |
| minimum=0, | |
| maximum=2, | |
| value=0.7, | |
| step=0.1 | |
| ) | |
| self.max_tokens_input = gr.Slider( | |
| label="Max Tokens", | |
| minimum=50, | |
| maximum=4096, | |
| value=750, | |
| step=50 | |
| ) | |
| # Example Prompts | |
| with gr.Group(): | |
| gr.Markdown("### π‘ Example Prompts") | |
| gr.Markdown("Provide 3-5 sample prompts that showcase your assistant's capabilities") | |
| # Create individual example input fields | |
| self.example_inputs = [] | |
| for i in range(5): | |
| example_input = gr.Textbox( | |
| label=f"Example {i+1}", | |
| placeholder=f"Sample prompt {i+1}...", | |
| visible=(i < 3) # Show first 3 by default | |
| ) | |
| self.example_inputs.append(example_input) | |
| with gr.Row(): | |
| add_example_btn = gr.Button("β Add Example", size="sm") | |
| remove_example_btn = gr.Button("β Remove Example", size="sm", visible=False) | |
| self.example_count = gr.State(3) | |
| # URL Grounding | |
| with gr.Group(): | |
| gr.Markdown("### π URL Grounding") | |
| gr.Markdown("Add URLs to provide context to your assistant") | |
| # Dynamic URL fields using gr.render | |
| self.url_count = gr.State(2) | |
| self.url_inputs = [] | |
| # Create initial URL inputs | |
| for i in range(10): | |
| url_input = gr.Textbox( | |
| label=f"URL {i+1}" + (" (Primary)" if i < 2 else " (Secondary)"), | |
| placeholder="https://...", | |
| visible=(i < 2) | |
| ) | |
| self.url_inputs.append(url_input) | |
| with gr.Row(): | |
| add_url_btn = gr.Button("β Add URL", size="sm") | |
| remove_url_btn = gr.Button("β Remove URL", size="sm", visible=False) | |
| # API Configuration | |
| with gr.Group(): | |
| gr.Markdown("### π API Configuration") | |
| gr.Markdown( | |
| "Configure the required secrets in your HuggingFace Space settings." | |
| ) | |
| # Required API Key on its own row | |
| self.api_key_var_input = gr.Textbox( | |
| label="API Key Variable Name (Required)", | |
| value="API_KEY", | |
| info="Environment variable for OpenRouter API key", | |
| interactive=False # Make non-editable | |
| ) | |
| # Optional variables on the same row | |
| gr.Markdown("**Optional Environment Variables:**") | |
| with gr.Row(): | |
| self.hf_token_input = gr.Textbox( | |
| label="HF Token Variable Name", | |
| value="HF_TOKEN", | |
| info="Environment variable for HuggingFace token", | |
| interactive=False # Make non-editable | |
| ) | |
| self.access_code_input = gr.Textbox( | |
| label="Access Code Variable", | |
| value="ACCESS_CODE", | |
| info="Environment variable for password protection", | |
| interactive=False # Make non-editable | |
| ) | |
| # Instructions without broken images | |
| with gr.Accordion("π Step-by-Step Instructions", open=False): | |
| gr.Markdown( | |
| """### How to Configure Secrets in HuggingFace Spaces | |
| **Step 1: Navigate to Settings** | |
| - Go to your HuggingFace Space | |
| - Click on the βοΈ **Settings** tab at the top | |
| **Step 2: Access Variables and Secrets** | |
| - Scroll down to find **Variables and secrets** section | |
| - Click on **New secret** button | |
| **Step 3: Add Required Secrets** | |
| Add the following secrets to your Space: | |
| 1. **API_KEY** - Your OpenRouter API key (required) | |
| - Get your key at: https://openrouter.ai/keys | |
| - Value should start with `sk-or-` | |
| - This enables AI responses | |
| 2. **HF_TOKEN** - Your HuggingFace token (optional) | |
| - Enables automatic configuration updates | |
| - Get your token at: https://huggingface.co/settings/tokens | |
| - Select "write" permission when creating token | |
| 3. **ACCESS_CODE** - Password protection (optional) | |
| - Set any password to restrict access | |
| - Users will need this code to access your Space | |
| - Leave empty for public access | |
| """ | |
| ) | |
| # Configuration Upload | |
| with gr.Accordion("π€ Upload Configuration", open=False): | |
| config_upload = gr.File( | |
| label="Upload config.json", | |
| file_types=[".json"], | |
| type="filepath" | |
| ) | |
| upload_status = gr.Markdown(visible=False) | |
| # Action Buttons | |
| with gr.Row(): | |
| preview_btn = gr.Button("π¬ Preview Configuration", variant="secondary") | |
| generate_btn = gr.Button("π³οΈ Generate Deployment Package", variant="primary") | |
| # Output Section | |
| with gr.Column(visible=False) as output_section: | |
| output_message = gr.Markdown() | |
| download_file = gr.File(label="π¦ Download Package", visible=False) | |
| deployment_details = gr.Markdown(visible=False) | |
| # Event Handlers | |
| template_selector.change( | |
| self._apply_template, | |
| inputs=[ | |
| template_selector, self.previous_template, self.template_cache, | |
| self.name_input, self.tagline_input, self.description_input, self.system_prompt_input, | |
| self.model_input, self.temperature_input, self.max_tokens_input | |
| ] + self.example_inputs + self.url_inputs, | |
| outputs=[ | |
| self.name_input, self.tagline_input, self.description_input, self.system_prompt_input, | |
| self.model_input, self.temperature_input, self.max_tokens_input | |
| ] + self.example_inputs + self.url_inputs + [self.previous_template, self.template_cache] | |
| ) | |
| config_upload.upload( | |
| self._apply_uploaded_config, | |
| inputs=[config_upload], | |
| outputs=[ | |
| self.name_input, self.tagline_input, self.description_input, self.system_prompt_input, | |
| self.model_input, self.theme_input, self.temperature_input, | |
| self.max_tokens_input, upload_status | |
| ] + self.example_inputs + self.url_inputs | |
| ) | |
| # URL management | |
| def update_url_visibility(count): | |
| new_count = min(count + 1, 10) | |
| updates = [] | |
| for i in range(10): | |
| updates.append(gr.update(visible=(i < new_count))) | |
| updates.append(gr.update(visible=(new_count > 2))) # Remove button | |
| updates.append(new_count) | |
| return updates | |
| def remove_url(count): | |
| new_count = max(count - 1, 2) | |
| updates = [] | |
| for i in range(10): | |
| updates.append(gr.update(visible=(i < new_count))) | |
| updates.append(gr.update(visible=(new_count > 2))) # Remove button | |
| updates.append(new_count) | |
| return updates | |
| add_url_btn.click( | |
| update_url_visibility, | |
| inputs=[self.url_count], | |
| outputs=self.url_inputs + [remove_url_btn, self.url_count] | |
| ) | |
| remove_url_btn.click( | |
| remove_url, | |
| inputs=[self.url_count], | |
| outputs=self.url_inputs + [remove_url_btn, self.url_count] | |
| ) | |
| # Example management | |
| def update_example_visibility(count): | |
| new_count = min(count + 1, 5) | |
| updates = [] | |
| for i in range(5): | |
| updates.append(gr.update(visible=(i < new_count))) | |
| updates.append(gr.update(visible=(new_count > 3))) # Remove button | |
| updates.append(new_count) | |
| return updates | |
| def remove_example(count): | |
| new_count = max(count - 1, 3) | |
| updates = [] | |
| for i in range(5): | |
| updates.append(gr.update(visible=(i < new_count))) | |
| updates.append(gr.update(visible=(new_count > 3))) # Remove button | |
| updates.append(new_count) | |
| return updates | |
| add_example_btn.click( | |
| update_example_visibility, | |
| inputs=[self.example_count], | |
| outputs=self.example_inputs + [remove_example_btn, self.example_count] | |
| ) | |
| remove_example_btn.click( | |
| remove_example, | |
| inputs=[self.example_count], | |
| outputs=self.example_inputs + [remove_example_btn, self.example_count] | |
| ) | |
| # Preview and Generate handlers | |
| preview_btn.click( | |
| self._preview_configuration, | |
| inputs=[ | |
| self.name_input, self.tagline_input, self.description_input, self.system_prompt_input, | |
| self.model_input, self.theme_input, self.api_key_var_input, | |
| self.temperature_input, self.max_tokens_input, self.access_code_input | |
| ] + self.example_inputs + self.url_inputs, | |
| outputs=[self.config_state] | |
| ).then( | |
| lambda: gr.Tabs(selected=1), # Switch to preview tab | |
| outputs=[self.main_tabs] | |
| ) | |
| generate_btn.click( | |
| self._generate_package, | |
| inputs=[ | |
| self.name_input, self.tagline_input, self.description_input, self.system_prompt_input, | |
| self.model_input, self.theme_input, self.api_key_var_input, | |
| self.temperature_input, self.max_tokens_input, self.access_code_input | |
| ] + self.example_inputs + self.url_inputs, | |
| outputs=[ | |
| output_section, output_message, download_file, | |
| deployment_details, self.config_state | |
| ] | |
| ) | |
| def _create_preview_tab(self): | |
| """Create the preview tab with modern patterns""" | |
| with gr.Column(): | |
| # Use gr.render for dynamic preview based on config | |
| def render_preview(config): | |
| if not config or not config.get('preview_ready'): | |
| gr.Markdown( | |
| "### β οΈ Preview Not Ready\n\n" | |
| "Configure your assistant in the Configuration tab and click 'Preview Configuration' to test it here." | |
| ) | |
| return | |
| # Header | |
| gr.Markdown(f"# {config.get('name', 'AI Assistant')}") | |
| if config.get('tagline'): | |
| gr.Markdown(f"*{config.get('tagline')}*") | |
| # Chat interface | |
| preview_chatbot = gr.Chatbot( | |
| type="messages", | |
| height=400, | |
| show_copy_button=True | |
| ) | |
| # Message input | |
| msg = gr.Textbox( | |
| label="Message", | |
| placeholder="Type your message here...", | |
| lines=2 | |
| ) | |
| # Buttons | |
| with gr.Row(): | |
| submit_btn = gr.Button("Send", variant="primary") | |
| clear_btn = gr.Button("Clear") | |
| # Export functionality | |
| with gr.Row(): | |
| export_btn = gr.DownloadButton( | |
| "π₯ Export Conversation", | |
| variant="secondary", | |
| size="sm" | |
| ) | |
| # Examples section | |
| examples = config.get('examples_list', []) | |
| if examples: | |
| gr.Examples(examples=examples, inputs=msg) | |
| # File upload accordion (if enabled) | |
| if config.get('enable_file_upload'): | |
| with gr.Accordion("π Upload Files", open=False): | |
| file_upload = gr.File( | |
| label="Upload Files", | |
| file_types=None, | |
| file_count="multiple", | |
| visible=True, | |
| interactive=True | |
| ) | |
| uploaded_files_display = gr.Markdown("", visible=False) | |
| # Configuration accordion | |
| with gr.Accordion("βΉοΈ Configuration", open=False): | |
| config_display = {k: v for k, v in config.items() if k != 'preview_ready'} | |
| gr.JSON( | |
| value=config_display, | |
| label="Active Configuration", | |
| show_label=True | |
| ) | |
| # Chat functionality | |
| def respond(message, chat_history): | |
| if not message: | |
| return chat_history, "" | |
| # Simulate response for preview | |
| api_key = os.environ.get(config.get('api_key_var', 'API_KEY')) | |
| if not api_key: | |
| response = ( | |
| f"π **API Key Required**\n\n" | |
| f"Please configure your OpenRouter API key:\n" | |
| f"1. Go to Settings (βοΈ) in your HuggingFace Space\n" | |
| f"2. Click 'Variables and secrets'\n" | |
| f"3. Add secret: **{config.get('api_key_var', 'API_KEY')}**\n" | |
| f"4. Value: Your OpenRouter API key (starts with `sk-or-`)\n\n" | |
| f"Get your API key at: https://openrouter.ai/keys" | |
| ) | |
| else: | |
| # Make actual API call to OpenRouter | |
| try: | |
| headers = { | |
| "Authorization": f"Bearer {api_key}", | |
| "Content-Type": "application/json" | |
| } | |
| # Build messages for API | |
| messages = [{"role": "system", "content": config.get('system_prompt', 'You are a helpful AI assistant.')}] | |
| # Add conversation history | |
| for msg in chat_history: | |
| if isinstance(msg, dict) and 'role' in msg and 'content' in msg: | |
| messages.append({ | |
| "role": msg['role'], | |
| "content": msg['content'] | |
| }) | |
| # Add current message | |
| messages.append({ | |
| "role": "user", | |
| "content": message | |
| }) | |
| # Get grounding context from URLs if configured | |
| grounding_context = "" | |
| urls = config.get('grounding_urls', []) | |
| if urls and len(urls) > 0: | |
| grounding_context = "\nπ **Reference Context:**\n" | |
| for i, url in enumerate(urls[:2], 1): # Primary URLs only | |
| try: | |
| content = fetch_url_content(url) | |
| if not content.startswith("β") and not content.startswith("β±οΈ"): | |
| grounding_context += f"\n**Source {i}:** {content}\n" | |
| except: | |
| pass | |
| # Add grounding context to the user message if available | |
| if grounding_context: | |
| messages[-1]["content"] = f"{grounding_context}\n\n{message}" | |
| data = { | |
| "model": config.get('model', 'openai/gpt-3.5-turbo'), | |
| "messages": messages, | |
| "temperature": config.get('temperature', 0.7), | |
| "max_tokens": config.get('max_tokens', 750), | |
| "stream": False | |
| } | |
| response = requests.post( | |
| "https://openrouter.ai/api/v1/chat/completions", | |
| headers=headers, | |
| json=data, | |
| timeout=30 | |
| ) | |
| if response.status_code == 200: | |
| result = response.json() | |
| response = result['choices'][0]['message']['content'] | |
| else: | |
| error_data = response.json() | |
| error_message = error_data.get('error', {}).get('message', 'Unknown error') | |
| response = f"β API Error ({response.status_code}): {error_message}" | |
| except requests.exceptions.Timeout: | |
| response = "β° Request timeout (30s limit). Try a shorter message or different model." | |
| except requests.exceptions.ConnectionError: | |
| response = "π Connection error. Check your internet connection and try again." | |
| except Exception as e: | |
| response = f"β Error: {str(e)}" | |
| # Update chat history | |
| chat_history = chat_history + [ | |
| {"role": "user", "content": message}, | |
| {"role": "assistant", "content": response} | |
| ] | |
| return chat_history, "" | |
| # Wire up the interface | |
| msg.submit(respond, [msg, preview_chatbot], [preview_chatbot, msg]) | |
| submit_btn.click(respond, [msg, preview_chatbot], [preview_chatbot, msg]) | |
| clear_btn.click(lambda: ([], ""), outputs=[preview_chatbot, msg]) | |
| # Export handler | |
| def prepare_export(): | |
| return None # Simplified for preview | |
| export_btn.click( | |
| prepare_export, | |
| outputs=[export_btn] | |
| ) | |
| def _create_documentation_tab(self): | |
| """Create the documentation tab""" | |
| with gr.Column(): | |
| gr.Markdown(self._get_support_docs()) | |
| def _apply_template(self, template_name, prev_template, cache, | |
| name, tagline, desc, prompt, model, temp, tokens, *args): | |
| """Apply selected template to form fields with caching""" | |
| # Split args into examples and URLs | |
| example_values = args[:5] # First 5 are examples | |
| url_values = args[5:] # Rest are URLs | |
| # First, cache the current values if switching from custom | |
| if prev_template == "None (Custom)" and template_name != "None (Custom)": | |
| # Cache custom values - collect non-empty examples and URLs | |
| examples_list = [ex for ex in example_values if ex and ex.strip()] | |
| urls_list = [url for url in url_values if url and url.strip()] | |
| cache["custom"] = { | |
| 'name': name, | |
| 'tagline': tagline, | |
| 'description': desc, | |
| 'system_prompt': prompt, | |
| 'model': model, | |
| 'temperature': temp, | |
| 'max_tokens': tokens, | |
| 'examples': examples_list, | |
| 'grounding_urls': urls_list | |
| } | |
| # Apply new template values | |
| if template_name == "None (Custom)": | |
| # Restore custom values if they exist | |
| if "custom" in cache: | |
| custom = cache["custom"] | |
| cached_examples = custom.get('examples', []) | |
| cached_urls = custom.get('grounding_urls', []) | |
| # Prepare example updates - fill first 5 fields | |
| example_updates = [] | |
| for i in range(5): | |
| if i < len(cached_examples): | |
| example_updates.append(gr.update(value=cached_examples[i])) | |
| else: | |
| example_updates.append(gr.update(value="")) | |
| # Prepare URL updates - fill first 10 fields | |
| url_updates = [] | |
| for i in range(10): | |
| if i < len(cached_urls): | |
| url_updates.append(gr.update(value=cached_urls[i])) | |
| else: | |
| url_updates.append(gr.update(value="")) | |
| return [ | |
| gr.update(value=custom.get('name', '')), | |
| gr.update(value=custom.get('tagline', '')), | |
| gr.update(value=custom.get('description', '')), | |
| gr.update(value=custom.get('system_prompt', '')), | |
| gr.update(value=custom.get('model', 'google/gemini-2.0-flash-001')), | |
| gr.update(value=custom.get('temperature', 0.7)), | |
| gr.update(value=custom.get('max_tokens', 750)) | |
| ] + example_updates + url_updates + [template_name, cache] | |
| else: | |
| # No cached values, return defaults | |
| default_examples = [ | |
| "Hello! How can you help me?", | |
| "Tell me something interesting", | |
| "What can you do?", | |
| "", | |
| "" | |
| ] | |
| example_updates = [gr.update(value=ex) for ex in default_examples] | |
| url_updates = [gr.update(value="") for _ in range(10)] | |
| return [ | |
| gr.update(value='AI Assistant'), | |
| gr.update(value='A customizable AI assistant'), | |
| gr.update(value='A versatile AI assistant powered by advanced language models.'), | |
| gr.update(value='You are a helpful AI assistant.'), | |
| gr.update(value='google/gemini-2.0-flash-001'), | |
| gr.update(value=0.7), | |
| gr.update(value=750) | |
| ] + example_updates + url_updates + [template_name, cache] | |
| elif template_name in ACADEMIC_TEMPLATES: | |
| template = ACADEMIC_TEMPLATES[template_name] | |
| template_examples = template.get('examples', []) | |
| template_urls = template.get('grounding_urls', []) | |
| # Prepare example updates - fill available examples, empty the rest | |
| example_updates = [] | |
| for i in range(5): | |
| if i < len(template_examples): | |
| example_updates.append(gr.update(value=template_examples[i])) | |
| else: | |
| example_updates.append(gr.update(value="")) | |
| # Prepare URL updates - fill available URLs, empty the rest | |
| url_updates = [] | |
| for i in range(10): | |
| if i < len(template_urls): | |
| url_updates.append(gr.update(value=template_urls[i])) | |
| else: | |
| url_updates.append(gr.update(value="")) | |
| return [ | |
| gr.update(value=template.get('name', '')), | |
| gr.update(value=template.get('tagline', template.get('description', '')[:60])), | |
| gr.update(value=template.get('description', '')), | |
| gr.update(value=template.get('system_prompt', '')), | |
| gr.update(value=template.get('model', 'google/gemini-2.0-flash-001')), | |
| gr.update(value=template.get('temperature', 0.7)), | |
| gr.update(value=template.get('max_tokens', 750)) | |
| ] + example_updates + url_updates + [template_name, cache] | |
| else: | |
| # Invalid template, no updates | |
| # 7 basic fields + 5 examples + 10 URLs = 22, plus prev_template and cache = 24 total | |
| return [gr.update() for _ in range(22)] + [prev_template, cache] | |
| def _apply_uploaded_config(self, config_file): | |
| """Apply uploaded configuration file""" | |
| if not config_file: | |
| # 8 basic + 1 status + 5 examples + 10 URLs = 24 total | |
| return [gr.update() for _ in range(24)] | |
| try: | |
| with open(config_file, 'r') as f: | |
| config = json.load(f) | |
| # Extract values | |
| updates = [ | |
| gr.update(value=config.get('name', '')), | |
| gr.update(value=config.get('tagline', config.get('description', '')[:60])), | |
| gr.update(value=config.get('description', '')), | |
| gr.update(value=config.get('system_prompt', '')), | |
| gr.update(value=config.get('model', 'google/gemini-2.0-flash-001')), | |
| gr.update(value=config.get('theme', 'Default')), | |
| gr.update(value=config.get('temperature', 0.7)), | |
| gr.update(value=config.get('max_tokens', 750)) | |
| ] | |
| # Status message | |
| updates.append(gr.update( | |
| value=f"Configuration loaded successfully", | |
| visible=True | |
| )) | |
| # Example updates | |
| examples = config.get('examples', []) | |
| for i in range(5): | |
| if i < len(examples): | |
| updates.append(gr.update(value=examples[i])) | |
| else: | |
| updates.append(gr.update(value="")) | |
| # URL updates | |
| urls = config.get('grounding_urls', []) | |
| for i in range(10): | |
| if i < len(urls): | |
| updates.append(gr.update(value=urls[i])) | |
| else: | |
| updates.append(gr.update(value="")) | |
| return updates | |
| except Exception as e: | |
| error_updates = [gr.update() for _ in range(8)] # Basic fields | |
| error_updates.append(gr.update( | |
| value=f"Error loading configuration: {str(e)}", | |
| visible=True | |
| )) | |
| error_updates.extend([gr.update() for _ in range(5)]) # Examples | |
| error_updates.extend([gr.update() for _ in range(10)]) # URLs | |
| return error_updates | |
| def _preview_configuration(self, name, tagline, description, system_prompt, model, | |
| theme, api_key_var, temperature, max_tokens, | |
| access_code, *args): | |
| """Preview the configuration""" | |
| # Split args into examples and URLs | |
| example_values = args[:5] # First 5 are examples | |
| urls = args[5:] # Rest are URLs | |
| # Build configuration | |
| config = { | |
| 'name': name or 'AI Assistant', | |
| 'tagline': tagline or 'A customizable AI assistant', | |
| 'description': description or 'A versatile AI assistant powered by advanced language models.', | |
| 'system_prompt': system_prompt or 'You are a helpful AI assistant.', | |
| 'model': model, | |
| 'theme': theme, | |
| 'api_key_var': api_key_var, | |
| 'temperature': temperature, | |
| 'max_tokens': int(max_tokens), | |
| 'access_code': access_code, | |
| 'grounding_urls': [url for url in urls if url and url.strip()], | |
| 'examples_list': [ex.strip() for ex in example_values if ex and hasattr(ex, 'strip') and ex.strip()], | |
| 'preview_ready': True | |
| } | |
| gr.Info("β Preview updated! β¬οΈ Switch to the Preview tab to test your assistant.") | |
| return config | |
| def _generate_package(self, name, tagline, description, system_prompt, model, | |
| theme, api_key_var, temperature, max_tokens, | |
| access_code, *args): | |
| """Generate the deployment package""" | |
| try: | |
| # Validate inputs | |
| if not system_prompt: | |
| gr.Error("Please provide a system prompt") | |
| return gr.update(), gr.update(), gr.update(), gr.update(), {} | |
| # Split args into examples and URLs | |
| example_values = args[:5] # First 5 are examples | |
| urls = args[5:] # Rest are URLs | |
| # Process examples | |
| examples_list = [ex.strip() for ex in example_values if ex and hasattr(ex, 'strip') and ex.strip()] | |
| examples_python = repr(examples_list) | |
| # Process URLs | |
| grounding_urls = [url.strip() for url in urls if url and hasattr(url, 'strip') and url.strip()] | |
| # Create configuration | |
| config = { | |
| 'name': repr(name or 'AI Assistant'), | |
| 'description': repr(tagline or 'A customizable AI assistant'), | |
| 'system_prompt': repr(system_prompt), | |
| 'model': repr(model), | |
| 'api_key_var': repr(api_key_var), | |
| 'temperature': temperature, | |
| 'max_tokens': int(max_tokens), | |
| 'examples': examples_python, | |
| 'grounding_urls': json.dumps(grounding_urls), | |
| 'enable_dynamic_urls': True, | |
| 'enable_file_upload': True, | |
| 'theme': repr(theme) | |
| } | |
| # Generate files | |
| template = get_template() | |
| app_content = template.format(**config) | |
| requirements_content = """gradio>=5.39.0 | |
| requests>=2.32.3 | |
| beautifulsoup4>=4.12.3 | |
| python-dotenv>=1.0.0 | |
| huggingface-hub>=0.20.0""" | |
| config_json = { | |
| 'name': name or 'AI Assistant', | |
| 'tagline': tagline or 'A customizable AI assistant', | |
| 'description': description or 'A versatile AI assistant powered by advanced language models.', | |
| 'system_prompt': system_prompt, | |
| 'model': model, | |
| 'api_key_var': api_key_var, | |
| 'temperature': temperature, | |
| 'max_tokens': int(max_tokens), | |
| 'examples': examples_list, | |
| 'grounding_urls': grounding_urls, | |
| 'enable_dynamic_urls': True, | |
| 'enable_file_upload': True, | |
| 'theme': theme | |
| } | |
| # Create README | |
| readme_content = self._create_readme( | |
| name or 'AI Assistant', | |
| tagline or 'A customizable AI assistant', | |
| description or 'A versatile AI assistant powered by advanced language models. Configure it to meet your specific needs with custom prompts, examples, and grounding URLs.', | |
| model, | |
| api_key_var, | |
| access_code | |
| ) | |
| # Create zip file | |
| filename = create_safe_filename(name or 'ai_assistant', suffix='.zip') | |
| zip_buffer = io.BytesIO() | |
| with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file: | |
| zip_file.writestr('app.py', app_content) | |
| zip_file.writestr('requirements.txt', requirements_content) | |
| zip_file.writestr('config.json', json.dumps(config_json, indent=2)) | |
| zip_file.writestr('README.md', readme_content) | |
| # Save zip file | |
| zip_buffer.seek(0) | |
| with open(filename, 'wb') as f: | |
| f.write(zip_buffer.getvalue()) | |
| # Success message | |
| title_msg = f"**π Deployment package ready!**\n\n**File**: `{filename}`" | |
| details_msg = f"""**Package Contents:** | |
| - `app.py` - Ready-to-deploy Gradio application | |
| - `requirements.txt` - Python dependencies | |
| - `config.json` - Configuration backup | |
| - `README.md` - Deployment instructions | |
| **Next Steps:** | |
| 1. Download the package below | |
| 2. Create a new HuggingFace Space | |
| 3. Upload all files from the package | |
| 4. Set your `{api_key_var}` secret in Space settings""" | |
| if access_code: | |
| details_msg += f"\n5. Set your `ACCESS_CODE` secret for access control" | |
| return ( | |
| gr.update(visible=True), | |
| gr.update(value=title_msg, visible=True), | |
| gr.update(value=filename, visible=True), | |
| gr.update(value=details_msg, visible=True), | |
| config_json | |
| ) | |
| except Exception as e: | |
| return ( | |
| gr.update(visible=True), | |
| gr.update(value=f"Error: {str(e)}", visible=True), | |
| gr.update(visible=False), | |
| gr.update(visible=False), | |
| {} | |
| ) | |
| def _format_config_display(self, config): | |
| """Format configuration for display""" | |
| return f"""**Model:** {config.get('model', 'Not set')} | |
| **Temperature:** {config.get('temperature', 0.7)} | |
| **Max Tokens:** {config.get('max_tokens', 750)} | |
| **Theme:** {config.get('theme', 'Default')} | |
| **System Prompt:** | |
| {config.get('system_prompt', 'Not set')}""" | |
| def _get_support_docs(self): | |
| """Get support documentation content""" | |
| return """# ChatUI Helper Documentation | |
| Welcome to ChatUI Helper! This tool helps you create customizable AI chat interfaces for deployment on HuggingFace Spaces. | |
| <details open> | |
| <summary><h2>π Quick Start Guide</h2></summary> | |
| 1. **Configure** your assistant using templates or custom settings | |
| 2. **Preview** your configuration to test it works | |
| 3. **Generate** your deployment package | |
| 4. **Deploy** to HuggingFace Spaces | |
| </details> | |
| <details> | |
| <summary><h2>π Step 1: Configure Your Space</h2></summary> | |
| The **Configuration Tab** provides these sections: | |
| ### 1. Quick Start Templates | |
| - Choose from pre-configured academic templates | |
| - Or start with "None (Custom)" for full control | |
| - Your custom values are preserved when switching templates | |
| ### 2. Space Identity | |
| - **Assistant Name**: Your AI assistant's display name | |
| - **Theme**: Choose from Default, Soft, Glass, Monochrome, or Base | |
| - **Tagline**: Brief description (60 chars max) for the Space header | |
| - **Description**: Full markdown description for the README | |
| ### 3. System Configuration | |
| - **System Prompt**: Define your assistant's behavior and knowledge | |
| - **Model**: Select from available OpenRouter models | |
| - **Temperature**: Control response creativity (0-2) | |
| - **Max Tokens**: Set response length limit (50-4096) | |
| ### 4. Example Prompts | |
| - Add 3-5 sample prompts that showcase capabilities | |
| - These appear as quick-start buttons for users | |
| - Use the β/β buttons to add/remove examples | |
| ### 5. URL Grounding | |
| - Add reference URLs for context (up to 10) | |
| - First 2 URLs are primary sources (always loaded) | |
| - Additional URLs are secondary references | |
| - Content is fetched and provided as context | |
| ### 6. API Configuration | |
| - **API_KEY**: Required OpenRouter API key variable | |
| - **HF_TOKEN**: Optional for auto-configuration updates | |
| - **ACCESS_CODE**: Optional password protection | |
| ### 7. Upload Configuration | |
| - Import existing config.json files | |
| - Quickly restore previous configurations | |
| </details> | |
| <details> | |
| <summary><h2>π¬ Step 2: Preview Your Assistant</h2></summary> | |
| The **Preview Tab** allows you to: | |
| ### Test Configuration | |
| - Sends real API requests using your local API key | |
| - Uses the exact configuration from Step 1 | |
| - Displays responses in the chat interface | |
| ### Features Available | |
| - Try example prompts | |
| - Upload files (if enabled) | |
| - Export conversation history | |
| - View active configuration | |
| ### API Key Setup | |
| - Set your OpenRouter API key as environment variable | |
| - Default variable name: `API_KEY` | |
| - Get your key at: https://openrouter.ai/keys | |
| </details> | |
| <details> | |
| <summary><h2>π³οΈ Step 3: Generate Deployment Package</h2></summary> | |
| Click **Generate Deployment Package** to create: | |
| ### Package Contents | |
| - **app.py**: Complete Gradio application | |
| - **requirements.txt**: Python dependencies | |
| - **config.json**: Configuration backup | |
| - **README.md**: Deployment instructions | |
| ### Deployment Steps | |
| 1. Download the generated ZIP file | |
| 2. Create a new HuggingFace Space | |
| 3. Upload all files from the package | |
| 4. Configure secrets in Space settings | |
| </details> | |
| <details> | |
| <summary><h2>π HuggingFace Secrets Setup</h2></summary> | |
| ### Required Secret | |
| **API_KEY** (or your configured variable name) | |
| - Get from: https://openrouter.ai/keys | |
| - Must start with: `sk-or-` | |
| - Add in: Settings β Variables and secrets | |
| ### Optional Secrets | |
| **HF_TOKEN** | |
| - Enables automatic configuration updates | |
| - Get from: https://huggingface.co/settings/tokens | |
| - Needs write permissions | |
| **ACCESS_CODE** | |
| - Password protects your Space | |
| - Set any password value | |
| - Share with authorized users only | |
| ### How to Add Secrets | |
| 1. Go to your Space Settings (βοΈ icon) | |
| 2. Find "Variables and secrets" section | |
| 3. Click "New secret" | |
| 4. Enter the variable name and value | |
| 5. Save your changes | |
| </details> | |
| <details> | |
| <summary><h2>π¨ Template System</h2></summary> | |
| ### Available Templates | |
| - **STEM Adventure Games**: Interactive STEM learning | |
| - **Socratic Dialogue Partner**: Philosophical discussions | |
| - **Business Strategy Advisor**: Strategic planning assistant | |
| - **Creative Writing Coach**: Writing improvement helper | |
| - **Research Assistant**: Academic research support | |
| ### Template Features | |
| - Pre-configured prompts and examples | |
| - Optimized model settings | |
| - Relevant grounding URLs | |
| - Domain-specific system prompts | |
| ### Custom Values | |
| - Your custom settings are preserved | |
| - Switch templates without losing work | |
| - Return to "None (Custom)" to restore | |
| </details> | |
| <details> | |
| <summary><h2>π§ Troubleshooting</h2></summary> | |
| ### Common Issues | |
| **Build Errors** | |
| - Check requirements.txt compatibility | |
| - Ensure Gradio version β₯ 5.39.0 | |
| - Verify all dependencies are available | |
| **API Errors** | |
| - Verify API_KEY is set correctly | |
| - Check API key starts with 'sk-or-' | |
| - Ensure you have API credits | |
| **Access Issues** | |
| - ACCESS_CODE must match exactly | |
| - Check for extra spaces in password | |
| - Verify secret is properly saved | |
| **Preview Not Working** | |
| - Set API_KEY in local environment | |
| - Check browser console for errors | |
| - Ensure configuration is saved | |
| </details> | |
| <details> | |
| <summary><h2>π‘ Tips & Best Practices</h2></summary> | |
| ### System Prompts | |
| - Be specific about capabilities | |
| - Include examples of desired behavior | |
| - Set clear boundaries and limitations | |
| ### Model Selection | |
| - Gemini Flash: Fast, cost-effective | |
| - Claude Sonnet: High quality reasoning | |
| - GPT-4: Broad knowledge base | |
| - Llama: Open source alternative | |
| ### URL Grounding | |
| - Use authoritative sources | |
| - Keep URLs up to date | |
| - Primary URLs load on every request | |
| - Secondary URLs provide additional context | |
| ### Security | |
| - Never commit API keys to code | |
| - Use environment variables only | |
| - Enable ACCESS_CODE for sensitive Spaces | |
| - Regularly rotate API keys | |
| </details>""" | |
| def _create_readme(self, title, tagline, description, model, api_key_var, access_code): | |
| """Create README.md content""" | |
| emoji = "π¬" | |
| access_section = "" | |
| if access_code: | |
| access_section = f""" | |
| ### Step 3: Set Access Code | |
| 1. In Settings β Variables and secrets | |
| 2. Add secret: `ACCESS_CODE` | |
| 3. Set your chosen password | |
| 4. Share with authorized users | |
| """ | |
| return f"""--- | |
| title: {title} | |
| emoji: {emoji} | |
| colorFrom: blue | |
| colorTo: green | |
| sdk: gradio | |
| sdk_version: 5.39.0 | |
| app_file: app.py | |
| pinned: false | |
| license: mit | |
| short_description: {tagline} | |
| --- | |
| # {title} | |
| {description} | |
| ## Quick Setup | |
| ### Step 1: Configure API Key (Required) | |
| 1. Get your API key from https://openrouter.ai/keys | |
| 2. In Settings β Variables and secrets | |
| 3. Add secret: `{api_key_var}` | |
| 4. Paste your OpenRouter API key | |
| ### Step 2: Configure HuggingFace Token (Optional) | |
| 1. Get your token from https://huggingface.co/settings/tokens | |
| 2. In Settings β Variables and secrets | |
| 3. Add secret: `HF_TOKEN` | |
| 4. Paste your HuggingFace token (needs write permissions) | |
| 5. This enables automatic configuration updates | |
| {access_section} | |
| ### Step 3: Test Your Space | |
| Your Space should now be running! Try the example prompts or ask your own questions. | |
| ## Configuration | |
| - **Model**: {model} | |
| - **API Key Variable**: {api_key_var} | |
| - **HF Token Variable**: HF_TOKEN (for auto-updates) | |
| {f"- **Access Control**: Enabled (ACCESS_CODE)" if access_code else "- **Access**: Public"} | |
| ## Support | |
| For help, visit the HuggingFace documentation or community forums.""" | |
| def main(): | |
| """Main entry point""" | |
| generator = SpaceGenerator() | |
| demo = generator.create_interface() | |
| demo.launch(share=True) | |
| if __name__ == "__main__": | |
| main() |