Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import os | |
| from typing import List, Dict, Any, Optional, Tuple | |
| import hashlib | |
| from datetime import datetime | |
| import numpy as np | |
| from gradio_client import Client | |
| # PDF ์ฒ๋ฆฌ ๋ผ์ด๋ธ๋ฌ๋ฆฌ | |
| try: | |
| import fitz # PyMuPDF | |
| PDF_AVAILABLE = True | |
| except ImportError: | |
| PDF_AVAILABLE = False | |
| print("โ ๏ธ PyMuPDF not installed. Install with: pip install pymupdf") | |
| try: | |
| from sentence_transformers import SentenceTransformer | |
| ST_AVAILABLE = True | |
| except ImportError: | |
| ST_AVAILABLE = False | |
| print("โ ๏ธ Sentence Transformers not installed. Install with: pip install sentence-transformers") | |
| # Soft and bright custom CSS | |
| custom_css = """ | |
| .gradio-container { | |
| background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%); | |
| min-height: 100vh; | |
| font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif; | |
| } | |
| .main-container { | |
| background: rgba(255, 255, 255, 0.98); | |
| border-radius: 16px; | |
| padding: 24px; | |
| box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06); | |
| border: 1px solid rgba(0, 0, 0, 0.05); | |
| margin: 12px; | |
| } | |
| .main-container:hover { | |
| box-shadow: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05); | |
| transition: all 0.3s ease; | |
| } | |
| /* Status messages styling */ | |
| .pdf-status { | |
| padding: 12px 16px; | |
| border-radius: 12px; | |
| margin: 12px 0; | |
| font-size: 0.95rem; | |
| font-weight: 500; | |
| backdrop-filter: blur(10px); | |
| } | |
| .pdf-success { | |
| background: linear-gradient(135deg, #d4edda 0%, #c3e6cb 100%); | |
| border: 1px solid #b1dfbb; | |
| color: #155724; | |
| } | |
| .pdf-error { | |
| background: linear-gradient(135deg, #f8d7da 0%, #f5c6cb 100%); | |
| border: 1px solid #f1aeb5; | |
| color: #721c24; | |
| } | |
| .pdf-info { | |
| background: linear-gradient(135deg, #d1ecf1 0%, #bee5eb 100%); | |
| border: 1px solid #9ec5d8; | |
| color: #0c5460; | |
| } | |
| .pdf-warning { | |
| background: linear-gradient(135deg, #fff3cd 0%, #ffeeba 100%); | |
| border: 1px solid #ffeaa7; | |
| color: #856404; | |
| } | |
| /* RAG context display */ | |
| .rag-context { | |
| background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%); | |
| border-left: 4px solid #f59e0b; | |
| padding: 16px; | |
| margin: 16px 0; | |
| border-radius: 8px; | |
| font-size: 0.9rem; | |
| } | |
| /* Chat message styling */ | |
| .message { | |
| padding: 12px 16px; | |
| margin: 8px 4px; | |
| border-radius: 12px; | |
| max-width: 80%; | |
| } | |
| .user-message { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; | |
| margin-left: auto; | |
| } | |
| .bot-message { | |
| background: #f3f4f6; | |
| color: #1f2937; | |
| } | |
| """ | |
| class SimpleTextSplitter: | |
| """ํ ์คํธ ๋ถํ ๊ธฐ""" | |
| def __init__(self, chunk_size=800, chunk_overlap=100): | |
| self.chunk_size = chunk_size | |
| self.chunk_overlap = chunk_overlap | |
| def split_text(self, text: str) -> List[str]: | |
| """ํ ์คํธ๋ฅผ ์ฒญํฌ๋ก ๋ถํ """ | |
| chunks = [] | |
| sentences = text.split('. ') | |
| current_chunk = "" | |
| for sentence in sentences: | |
| if len(current_chunk) + len(sentence) < self.chunk_size: | |
| current_chunk += sentence + ". " | |
| else: | |
| if current_chunk: | |
| chunks.append(current_chunk.strip()) | |
| current_chunk = sentence + ". " | |
| if current_chunk: | |
| chunks.append(current_chunk.strip()) | |
| return chunks | |
| class PDFRAGSystem: | |
| """PDF ๊ธฐ๋ฐ RAG ์์คํ """ | |
| def __init__(self): | |
| self.documents = {} | |
| self.document_chunks = {} | |
| self.embeddings_store = {} | |
| self.text_splitter = SimpleTextSplitter(chunk_size=800, chunk_overlap=100) | |
| # ์๋ฒ ๋ฉ ๋ชจ๋ธ ์ด๊ธฐํ | |
| self.embedder = None | |
| if ST_AVAILABLE: | |
| try: | |
| self.embedder = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') | |
| print("โ ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ก๋ ์ฑ๊ณต") | |
| except Exception as e: | |
| print(f"โ ๏ธ ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ก๋ ์คํจ: {e}") | |
| def extract_text_from_pdf(self, pdf_path: str) -> Dict[str, Any]: | |
| """PDF์์ ํ ์คํธ ์ถ์ถ""" | |
| if not PDF_AVAILABLE: | |
| return { | |
| "metadata": { | |
| "title": "PDF Reader Not Available", | |
| "file_name": os.path.basename(pdf_path), | |
| "pages": 0 | |
| }, | |
| "full_text": "PDF ์ฒ๋ฆฌ๋ฅผ ์ํด 'pip install pymupdf'๋ฅผ ์คํํด์ฃผ์ธ์." | |
| } | |
| try: | |
| doc = fitz.open(pdf_path) | |
| text_content = [] | |
| metadata = { | |
| "title": doc.metadata.get("title", os.path.basename(pdf_path)), | |
| "pages": len(doc), | |
| "file_name": os.path.basename(pdf_path) | |
| } | |
| for page_num, page in enumerate(doc): | |
| text = page.get_text() | |
| if text.strip(): | |
| text_content.append(text) | |
| doc.close() | |
| return { | |
| "metadata": metadata, | |
| "full_text": "\n\n".join(text_content) | |
| } | |
| except Exception as e: | |
| raise Exception(f"PDF ์ฒ๋ฆฌ ์ค๋ฅ: {str(e)}") | |
| def process_and_store_pdf(self, pdf_path: str, doc_id: str) -> Dict[str, Any]: | |
| """PDF ์ฒ๋ฆฌ ๋ฐ ์ ์ฅ""" | |
| try: | |
| # PDF ํ ์คํธ ์ถ์ถ | |
| pdf_data = self.extract_text_from_pdf(pdf_path) | |
| # ํ ์คํธ๋ฅผ ์ฒญํฌ๋ก ๋ถํ | |
| chunks = self.text_splitter.split_text(pdf_data["full_text"]) | |
| # ์ฒญํฌ ์ ์ฅ | |
| self.document_chunks[doc_id] = chunks | |
| # ์๋ฒ ๋ฉ ์์ฑ | |
| if self.embedder: | |
| embeddings = self.embedder.encode(chunks) | |
| self.embeddings_store[doc_id] = embeddings | |
| # ๋ฌธ์ ์ ๋ณด ์ ์ฅ | |
| self.documents[doc_id] = { | |
| "metadata": pdf_data["metadata"], | |
| "chunk_count": len(chunks), | |
| "upload_time": datetime.now().isoformat() | |
| } | |
| return { | |
| "success": True, | |
| "doc_id": doc_id, | |
| "chunks": len(chunks), | |
| "pages": pdf_data["metadata"]["pages"], | |
| "title": pdf_data["metadata"]["title"] | |
| } | |
| except Exception as e: | |
| return {"success": False, "error": str(e)} | |
| def search_relevant_chunks(self, query: str, doc_ids: List[str], top_k: int = 3) -> List[Dict]: | |
| """๊ด๋ จ ์ฒญํฌ ๊ฒ์""" | |
| all_relevant_chunks = [] | |
| if self.embedder and self.embeddings_store: | |
| # ์๋ฒ ๋ฉ ๊ธฐ๋ฐ ๊ฒ์ | |
| query_embedding = self.embedder.encode([query])[0] | |
| for doc_id in doc_ids: | |
| if doc_id in self.embeddings_store and doc_id in self.document_chunks: | |
| doc_embeddings = self.embeddings_store[doc_id] | |
| chunks = self.document_chunks[doc_id] | |
| # ์ฝ์ฌ์ธ ์ ์ฌ๋ ๊ณ์ฐ | |
| similarities = [] | |
| for emb in doc_embeddings: | |
| sim = np.dot(query_embedding, emb) / (np.linalg.norm(query_embedding) * np.linalg.norm(emb)) | |
| similarities.append(sim) | |
| # ์์ ์ฒญํฌ ์ ํ | |
| top_indices = np.argsort(similarities)[-top_k:][::-1] | |
| for idx in top_indices: | |
| if similarities[idx] > 0.2: | |
| all_relevant_chunks.append({ | |
| "content": chunks[idx], | |
| "doc_name": self.documents[doc_id]["metadata"]["file_name"], | |
| "similarity": similarities[idx] | |
| }) | |
| else: | |
| # ํค์๋ ๊ธฐ๋ฐ ๊ฒ์ | |
| query_keywords = set(query.lower().split()) | |
| for doc_id in doc_ids: | |
| if doc_id in self.document_chunks: | |
| chunks = self.document_chunks[doc_id] | |
| for chunk in chunks[:top_k]: | |
| chunk_lower = chunk.lower() | |
| score = sum(1 for keyword in query_keywords if keyword in chunk_lower) | |
| if score > 0: | |
| all_relevant_chunks.append({ | |
| "content": chunk[:500], | |
| "doc_name": self.documents[doc_id]["metadata"]["file_name"], | |
| "similarity": score / len(query_keywords) if query_keywords else 0 | |
| }) | |
| # ์ ๋ ฌ ๋ฐ ๋ฐํ | |
| all_relevant_chunks.sort(key=lambda x: x.get('similarity', 0), reverse=True) | |
| return all_relevant_chunks[:top_k] | |
| def create_rag_prompt(self, query: str, doc_ids: List[str], top_k: int = 3) -> str: | |
| """RAG ํ๋กฌํํธ ์์ฑ""" | |
| relevant_chunks = self.search_relevant_chunks(query, doc_ids, top_k) | |
| if not relevant_chunks: | |
| return query | |
| # ํ๋กฌํํธ ๊ตฌ์ฑ | |
| prompt_parts = [] | |
| prompt_parts.append("์๋ ์ฐธ๊ณ ๋ฌธ์๋ฅผ ๋ฐํ์ผ๋ก ์ง๋ฌธ์ ๋ต๋ณํด์ฃผ์ธ์.\n") | |
| prompt_parts.append("=" * 50) | |
| for i, chunk in enumerate(relevant_chunks, 1): | |
| prompt_parts.append(f"\n[์ฐธ๊ณ ๋ฌธ์ {i} - {chunk['doc_name']}]") | |
| content = chunk['content'][:400] if len(chunk['content']) > 400 else chunk['content'] | |
| prompt_parts.append(content) | |
| prompt_parts.append("") | |
| prompt_parts.append("=" * 50) | |
| prompt_parts.append(f"\n์ง๋ฌธ: {query}") | |
| prompt_parts.append("\n์ ์ฐธ๊ณ ๋ฌธ์์ ๋ด์ฉ์ ๋ฐํ์ผ๋ก ์ ํํ๊ณ ์์ธํ๊ฒ ๋ต๋ณํด์ฃผ์ธ์:") | |
| return "\n".join(prompt_parts) | |
| # RAG ์์คํ ์ธ์คํด์ค ์์ฑ | |
| rag_system = PDFRAGSystem() | |
| # State variables | |
| current_model = gr.State("openai/gpt-oss-120b") | |
| conversation_history = gr.State([]) | |
| def upload_pdf(file): | |
| """PDF ํ์ผ ์ ๋ก๋ ์ฒ๋ฆฌ""" | |
| if file is None: | |
| return ( | |
| gr.update(value="<div class='pdf-status pdf-warning'>๐ ํ์ผ์ ์ ํํด์ฃผ์ธ์</div>"), | |
| gr.update(choices=[]), | |
| gr.update(value=False) | |
| ) | |
| try: | |
| # ํ์ผ ํด์๋ฅผ ID๋ก ์ฌ์ฉ | |
| with open(file.name, 'rb') as f: | |
| file_hash = hashlib.md5(f.read()).hexdigest()[:8] | |
| doc_id = f"doc_{file_hash}" | |
| # PDF ์ฒ๋ฆฌ ๋ฐ ์ ์ฅ | |
| result = rag_system.process_and_store_pdf(file.name, doc_id) | |
| if result["success"]: | |
| status_html = f""" | |
| <div class="pdf-status pdf-success"> | |
| โ PDF ์ ๋ก๋ ์๋ฃ<br> | |
| ๐ ํ์ผ: {result['title']}<br> | |
| ๐ ํ์ด์ง: {result['pages']}ํ์ด์ง<br> | |
| ๐ ์ฒญํฌ: {result['chunks']}๊ฐ ์์ฑ | |
| </div> | |
| """ | |
| # ๋ฌธ์ ๋ชฉ๋ก ์ ๋ฐ์ดํธ | |
| doc_choices = [f"{doc_id}: {rag_system.documents[doc_id]['metadata']['file_name']}" | |
| for doc_id in rag_system.documents.keys()] | |
| return ( | |
| status_html, | |
| gr.update(choices=doc_choices, value=doc_choices), | |
| gr.update(value=True) | |
| ) | |
| else: | |
| status_html = f""" | |
| <div class="pdf-status pdf-error"> | |
| โ ์ ๋ก๋ ์คํจ: {result['error']} | |
| </div> | |
| """ | |
| return status_html, gr.update(), gr.update(value=False) | |
| except Exception as e: | |
| return ( | |
| f"<div class='pdf-status pdf-error'>โ ์ค๋ฅ: {str(e)}</div>", | |
| gr.update(), | |
| gr.update(value=False) | |
| ) | |
| def clear_documents(): | |
| """๋ฌธ์ ์ด๊ธฐํ""" | |
| rag_system.documents = {} | |
| rag_system.document_chunks = {} | |
| rag_system.embeddings_store = {} | |
| return ( | |
| gr.update(value="<div class='pdf-status pdf-info'>๐๏ธ ๋ชจ๋ ๋ฌธ์๊ฐ ์ญ์ ๋์์ต๋๋ค</div>"), | |
| gr.update(choices=[], value=[]), | |
| gr.update(value=False) | |
| ) | |
| def switch_model(model_choice): | |
| """Function to switch between models""" | |
| if model_choice == "openai/gpt-oss-120b": | |
| return gr.update(visible=True), gr.update(visible=False), model_choice | |
| else: | |
| return gr.update(visible=False), gr.update(visible=True), model_choice | |
| def chat_with_rag(message, history, model_name, enable_rag, selected_docs, top_k, temperature, max_tokens): | |
| """RAG๋ฅผ ์ ์ฉํ ์ฑํ ํจ์""" | |
| if not message: | |
| return history | |
| # RAG ์ ์ฉ | |
| if enable_rag and selected_docs: | |
| doc_ids = [doc.split(":")[0] for doc in selected_docs] | |
| enhanced_message = rag_system.create_rag_prompt(message, doc_ids, top_k) | |
| # ๋๋ฒ๊ทธ: RAG ์ ์ฉ ํ์ธ | |
| print(f"RAG ์ ์ฉ๋จ - ์๋ณธ: {len(message)}์, ๊ฐํ: {len(enhanced_message)}์") | |
| else: | |
| enhanced_message = message | |
| try: | |
| # ์ฌ๊ธฐ์ ์ค์ ๋ชจ๋ธ API๋ฅผ ํธ์ถํด์ผ ํฉ๋๋ค | |
| # ์์๋ก ๋ชจ์ ์๋ต ์์ฑ | |
| if enable_rag and selected_docs: | |
| response = f"""๐ [RAG ๊ธฐ๋ฐ ๋ต๋ณ] | |
| ๋ฌธ์๋ฅผ ์ฐธ๊ณ ํ์ฌ ๋ต๋ณ๋๋ฆฝ๋๋ค: | |
| {enhanced_message[:500]}... | |
| [์ฐธ๊ณ : ์ค์ ๋ชจ๋ธ API ์ฐ๊ฒฐ ํ์] | |
| """ | |
| else: | |
| response = f"""๐ฌ [์ผ๋ฐ ๋ต๋ณ] | |
| ์ง๋ฌธ: {message} | |
| [์ฐธ๊ณ : ์ค์ ๋ชจ๋ธ API ์ฐ๊ฒฐ ํ์] | |
| """ | |
| # ๋ํ ๊ธฐ๋ก์ ์ถ๊ฐ | |
| history.append([message, response]) | |
| except Exception as e: | |
| response = f"โ ์ค๋ฅ ๋ฐ์: {str(e)}" | |
| history.append([message, response]) | |
| return history | |
| # Main interface with soft theme | |
| with gr.Blocks(fill_height=True, theme=gr.themes.Soft(), css=custom_css) as demo: | |
| with gr.Row(): | |
| # Sidebar | |
| with gr.Column(scale=1): | |
| with gr.Group(elem_classes="main-container"): | |
| gr.Markdown("# ๐ค AI Chat + RAG") | |
| gr.Markdown( | |
| "OpenAI GPT-OSS ๋ชจ๋ธ๊ณผ PDF ๋ฌธ์ ๊ธฐ๋ฐ ๋ต๋ณ ์์คํ ์ ๋๋ค." | |
| ) | |
| # Login button | |
| login_button = gr.LoginButton("๐ Hugging Face ๋ก๊ทธ์ธ", size="lg") | |
| # Model selection | |
| model_dropdown = gr.Dropdown( | |
| choices=["openai/gpt-oss-120b", "openai/gpt-oss-20b"], | |
| value="openai/gpt-oss-120b", | |
| label="๐ ๋ชจ๋ธ ์ ํ", | |
| info="์ํ๋ ๋ชจ๋ธ ํฌ๊ธฐ๋ฅผ ์ ํํ์ธ์" | |
| ) | |
| # Reload button to apply model change | |
| reload_btn = gr.Button("๐ ๋ชจ๋ธ ๋ณ๊ฒฝ ์ ์ฉ", variant="primary", size="lg") | |
| # RAG Settings | |
| with gr.Accordion("๐ PDF RAG ์ค์ ", open=True): | |
| pdf_upload = gr.File( | |
| label="๐ค PDF ์ ๋ก๋", | |
| file_types=[".pdf"], | |
| type="filepath" | |
| ) | |
| upload_status = gr.HTML( | |
| value="<div class='pdf-status pdf-info'>๐ PDF๋ฅผ ์ ๋ก๋ํ์ฌ ๋ฌธ์ ๊ธฐ๋ฐ ๋ต๋ณ์ ๋ฐ์ผ์ธ์</div>" | |
| ) | |
| document_list = gr.CheckboxGroup( | |
| choices=[], | |
| label="๐ ์ ๋ก๋๋ ๋ฌธ์", | |
| info="์ฐธ๊ณ ํ ๋ฌธ์๋ฅผ ์ ํํ์ธ์" | |
| ) | |
| with gr.Row(): | |
| clear_btn = gr.Button("๐๏ธ ๋ชจ๋ ๋ฌธ์ ์ญ์ ", size="sm", variant="secondary") | |
| enable_rag = gr.Checkbox( | |
| label="โจ RAG ํ์ฑํ", | |
| value=False, | |
| info="์ ํํ ๋ฌธ์๋ฅผ ์ฐธ๊ณ ํ์ฌ ๋ต๋ณ ์์ฑ" | |
| ) | |
| top_k_chunks = gr.Slider( | |
| minimum=1, | |
| maximum=5, | |
| value=3, | |
| step=1, | |
| label="์ฐธ์กฐ ์ฒญํฌ ์", | |
| info="๋ต๋ณ์ ์ฐธ๊ณ ํ ๋ฌธ์ ์กฐ๊ฐ ๊ฐ์" | |
| ) | |
| # Additional options | |
| with gr.Accordion("๐๏ธ ๋ชจ๋ธ ์ต์ ", open=False): | |
| temperature = gr.Slider( | |
| minimum=0, | |
| maximum=2, | |
| value=0.7, | |
| step=0.1, | |
| label="Temperature", | |
| info="๋ฎ์์๋ก ์ผ๊ด์ฑ ์๊ณ , ๋์์๋ก ์ฐฝ์์ ์ ๋๋ค" | |
| ) | |
| max_tokens = gr.Slider( | |
| minimum=1, | |
| maximum=4096, | |
| value=512, | |
| step=1, | |
| label="Max Tokens", | |
| info="์์ฑํ ์ต๋ ํ ํฐ ์" | |
| ) | |
| # Main chat area | |
| with gr.Column(scale=3): | |
| with gr.Group(elem_classes="main-container"): | |
| gr.Markdown("## ๐ฌ Chat Interface") | |
| # RAG ์ํ ํ์ | |
| rag_status = gr.HTML( | |
| value="<div class='pdf-status pdf-info'>๐ RAG: <strong>๋นํ์ฑํ</strong></div>" | |
| ) | |
| # ํตํฉ๋ ์ฑํ ์ธํฐํ์ด์ค (๋ชจ๋ธ๋ณ๋ก ํ๋์ฉ) | |
| with gr.Column(visible=True) as model_120b_container: | |
| gr.Markdown("### ๐ Model: openai/gpt-oss-120b") | |
| chatbot_120b = gr.Chatbot( | |
| height=400, | |
| show_label=False, | |
| elem_classes="chatbot" | |
| ) | |
| with gr.Row(): | |
| msg_120b = gr.Textbox( | |
| placeholder="๋ฉ์์ง๋ฅผ ์ ๋ ฅํ์ธ์... (Enter๋ก ์ ์ก)", | |
| show_label=False, | |
| scale=4, | |
| container=False | |
| ) | |
| send_btn_120b = gr.Button("๐ค ์ ์ก", variant="primary", scale=1) | |
| with gr.Row(): | |
| clear_btn_120b = gr.Button("๐๏ธ ๋ํ ์ด๊ธฐํ", variant="secondary", size="sm") | |
| # ์์ ์ง๋ฌธ๋ค | |
| gr.Examples( | |
| examples=[ | |
| "๋ฌธ์์ ์ฃผ์ ๋ด์ฉ์ ์์ฝํด์ฃผ์ธ์", | |
| "์ด ๋ฌธ์์์ ๊ฐ์ฅ ์ค์ํ ํฌ์ธํธ๋ ๋ฌด์์ธ๊ฐ์?", | |
| "๋ฌธ์์ ์ธ๊ธ๋ ๋ ์ง์ ์ผ์ ์ ์๋ ค์ฃผ์ธ์" | |
| ], | |
| inputs=msg_120b | |
| ) | |
| with gr.Column(visible=False) as model_20b_container: | |
| gr.Markdown("### ๐ Model: openai/gpt-oss-20b") | |
| chatbot_20b = gr.Chatbot( | |
| height=400, | |
| show_label=False, | |
| elem_classes="chatbot" | |
| ) | |
| with gr.Row(): | |
| msg_20b = gr.Textbox( | |
| placeholder="๋ฉ์์ง๋ฅผ ์ ๋ ฅํ์ธ์... (Enter๋ก ์ ์ก)", | |
| show_label=False, | |
| scale=4, | |
| container=False | |
| ) | |
| send_btn_20b = gr.Button("๐ค ์ ์ก", variant="primary", scale=1) | |
| with gr.Row(): | |
| clear_btn_20b = gr.Button("๐๏ธ ๋ํ ์ด๊ธฐํ", variant="secondary", size="sm") | |
| # ์์ ์ง๋ฌธ๋ค | |
| gr.Examples( | |
| examples=[ | |
| "๋ฌธ์์ ์ฃผ์ ๋ด์ฉ์ ์์ฝํด์ฃผ์ธ์", | |
| "์ด ๋ฌธ์์์ ๊ฐ์ฅ ์ค์ํ ํฌ์ธํธ๋ ๋ฌด์์ธ๊ฐ์?", | |
| "๋ฌธ์์ ์ธ๊ธ๋ ๋ ์ง์ ์ผ์ ์ ์๋ ค์ฃผ์ธ์" | |
| ], | |
| inputs=msg_20b | |
| ) | |
| # Event Handlers | |
| # PDF ์ ๋ก๋ | |
| pdf_upload.upload( | |
| fn=upload_pdf, | |
| inputs=[pdf_upload], | |
| outputs=[upload_status, document_list, enable_rag] | |
| ) | |
| # ๋ฌธ์ ์ญ์ | |
| clear_btn.click( | |
| fn=clear_documents, | |
| outputs=[upload_status, document_list, enable_rag] | |
| ) | |
| # RAG ์ํ ์ ๋ฐ์ดํธ | |
| enable_rag.change( | |
| fn=lambda x: gr.update( | |
| value=f"<div class='pdf-status pdf-info'>๐ RAG: <strong>{'โ ํ์ฑํ' if x else 'โญ ๋นํ์ฑํ'}</strong></div>" | |
| ), | |
| inputs=[enable_rag], | |
| outputs=[rag_status] | |
| ) | |
| # ๋ชจ๋ธ ์ ํ | |
| reload_btn.click( | |
| fn=switch_model, | |
| inputs=[model_dropdown], | |
| outputs=[model_120b_container, model_20b_container, current_model] | |
| ).then( | |
| fn=lambda: gr.Info("โ ๋ชจ๋ธ์ด ์ฑ๊ณต์ ์ผ๋ก ์ ํ๋์์ต๋๋ค!"), | |
| inputs=[], | |
| outputs=[] | |
| ) | |
| # 120b ๋ชจ๋ธ ์ฑํ ์ด๋ฒคํธ | |
| msg_120b.submit( | |
| fn=lambda msg, hist: chat_with_rag( | |
| msg, hist, "openai/gpt-oss-120b", | |
| enable_rag.value, document_list.value, top_k_chunks.value, | |
| temperature.value, max_tokens.value | |
| ), | |
| inputs=[msg_120b, chatbot_120b], | |
| outputs=[chatbot_120b] | |
| ).then( | |
| fn=lambda: "", | |
| outputs=[msg_120b] | |
| ) | |
| send_btn_120b.click( | |
| fn=lambda msg, hist: chat_with_rag( | |
| msg, hist, "openai/gpt-oss-120b", | |
| enable_rag.value, document_list.value, top_k_chunks.value, | |
| temperature.value, max_tokens.value | |
| ), | |
| inputs=[msg_120b, chatbot_120b], | |
| outputs=[chatbot_120b] | |
| ).then( | |
| fn=lambda: "", | |
| outputs=[msg_120b] | |
| ) | |
| clear_btn_120b.click( | |
| fn=lambda: [], | |
| outputs=[chatbot_120b] | |
| ) | |
| # 20b ๋ชจ๋ธ ์ฑํ ์ด๋ฒคํธ | |
| msg_20b.submit( | |
| fn=lambda msg, hist: chat_with_rag( | |
| msg, hist, "openai/gpt-oss-20b", | |
| enable_rag.value, document_list.value, top_k_chunks.value, | |
| temperature.value, max_tokens.value | |
| ), | |
| inputs=[msg_20b, chatbot_20b], | |
| outputs=[chatbot_20b] | |
| ).then( | |
| fn=lambda: "", | |
| outputs=[msg_20b] | |
| ) | |
| send_btn_20b.click( | |
| fn=lambda msg, hist: chat_with_rag( | |
| msg, hist, "openai/gpt-oss-20b", | |
| enable_rag.value, document_list.value, top_k_chunks.value, | |
| temperature.value, max_tokens.value | |
| ), | |
| inputs=[msg_20b, chatbot_20b], | |
| outputs=[chatbot_20b] | |
| ).then( | |
| fn=lambda: "", | |
| outputs=[msg_20b] | |
| ) | |
| clear_btn_20b.click( | |
| fn=lambda: [], | |
| outputs=[chatbot_20b] | |
| ) | |
| # ์ค์ ๋ชจ๋ธ API ์ฐ๊ฒฐ์ ์ํ ํจ์ (๊ตฌํ ํ์) | |
| def connect_to_model_api(model_name, message, temperature, max_tokens): | |
| """ | |
| ์ค์ ๋ชจ๋ธ API์ ์ฐ๊ฒฐํ๋ ํจ์ | |
| TODO: ์ฌ๊ธฐ์ ์ค์ API ํธ์ถ ์ฝ๋๋ฅผ ๊ตฌํํด์ผ ํฉ๋๋ค | |
| ์์: | |
| - OpenAI API | |
| - Hugging Face Inference API | |
| - Custom model endpoint | |
| """ | |
| # client = Client(f"models/{model_name}") | |
| # response = client.predict(message, temperature=temperature, max_tokens=max_tokens) | |
| # return response | |
| pass | |
| demo.launch() |