akhaliq's picture
akhaliq HF Staff
Upload pages/index.js with huggingface_hub
8ac59dc verified
import { useState, useEffect, useRef } from 'react'
import ChatMessage from '../components/ChatMessage'
import ChatInput from '../components/ChatInput'
export default function Home() {
const [messages, setMessages] = useState([
{
id: 1,
role: 'assistant',
content: 'Hello! I\'m an AI assistant powered by Qwen Vision Language model. I can understand both text and images. You can upload images and ask me questions about them, or just chat with me! How can I help you today?',
timestamp: new Date().toISOString()
}
])
const [isLoading, setIsLoading] = useState(false)
const [isTyping, setIsTyping] = useState(false)
const messagesEndRef = useRef(null)
const scrollToBottom = () => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' })
}
useEffect(() => {
scrollToBottom()
}, [messages])
const handleSendMessage = async (messageData) => {
const userMessage = {
id: Date.now(),
role: 'user',
content: messageData.content,
image: messageData.image,
timestamp: new Date().toISOString()
}
setMessages(prev => [...prev, userMessage])
setIsLoading(true)
setIsTyping(true)
try {
const response = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
messages: [...messages, userMessage]
}),
})
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`)
}
const reader = response.body.getReader()
const decoder = new TextDecoder()
let assistantMessage = {
id: Date.now() + 1,
role: 'assistant',
content: '',
timestamp: new Date().toISOString()
}
setMessages(prev => [...prev, assistantMessage])
while (true) {
const { done, value } = await reader.read()
if (done) break
const chunk = decoder.decode(value)
const lines = chunk.split('\n')
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6)
if (data === '[DONE]') {
setIsTyping(false)
break
}
try {
const parsed = JSON.parse(data)
if (parsed.content) {
setMessages(prev => {
const updated = [...prev]
updated[updated.length - 1] = {
...updated[updated.length - 1],
content: updated[updated.length - 1].content + parsed.content
}
return updated
})
} else if (parsed.error) {
throw new Error(parsed.error)
}
} catch (e) {
console.warn('Failed to parse chunk:', data)
}
}
}
}
} catch (error) {
console.error('Chat error:', error)
setMessages(prev => [...prev, {
id: Date.now() + 1,
role: 'assistant',
content: `Sorry, I encountered an error: ${error.message}. Please make sure your Hugging Face token is properly configured and try again.`,
timestamp: new Date().toISOString()
}])
} finally {
setIsLoading(false)
setIsTyping(false)
}
}
const clearChat = () => {
setMessages([
{
id: 1,
role: 'assistant',
content: 'Hello! I\'m an AI assistant powered by Qwen Vision Language model. I can understand both text and images. You can upload images and ask me questions about them, or just chat with me! How can I help you today?',
timestamp: new Date().toISOString()
}
])
}
return (
<div className="min-h-screen flex flex-col bg-black">
{/* Header */}
<header className="apple-glass border-b border-white border-opacity-10 p-6">
<div className="max-w-4xl mx-auto flex items-center justify-between">
<div>
<h1 className="text-2xl font-semibold text-white tracking-tight">
AI Vision Chat
</h1>
<p className="text-sm text-gray-400 mt-1 font-medium">
Powered by Qwen3-VL-8B-Instruct
</p>
</div>
<div className="flex items-center gap-3">
<button
onClick={clearChat}
className="px-4 py-2 text-sm font-medium text-gray-300 hover:text-white apple-button disabled:opacity-50 disabled:cursor-not-allowed transition-all duration-200"
disabled={isLoading}
>
Clear Chat
</button>
<a
href="https://huggingface.co/spaces/akhaliq/anycoder"
target="_blank"
rel="noopener noreferrer"
className="text-sm text-blue-400 hover:text-blue-300 transition-colors font-medium"
>
Built with anycoder
</a>
</div>
</div>
</header>
{/* Chat Messages */}
<div className="flex-1 overflow-hidden">
<div className="max-w-4xl mx-auto h-full px-6 py-8">
<div className="h-full overflow-y-auto pr-2">
<div className="space-y-8">
{messages.map((message, index) => (
<ChatMessage key={message.id} message={message} index={index} />
))}
{isTyping && (
<div className="flex gap-4 mb-8 animate-fade-in">
<div className="flex-shrink-0">
<div
className="w-10 h-10 rounded-full flex items-center justify-center text-white font-medium text-sm bg-gradient-to-r from-gray-600 to-gray-700 shadow-lg"
>
AI
</div>
</div>
<div className="chat-message assistant">
<div className="flex items-center gap-3">
<div className="flex gap-1">
<div className="w-1.5 h-1.5 bg-gray-400 rounded-full animate-pulse"></div>
<div className="w-1.5 h-1.5 bg-gray-400 rounded-full animate-pulse" style={{ animationDelay: '0.2s' }}></div>
<div className="w-1.5 h-1.5 bg-gray-400 rounded-full animate-pulse" style={{ animationDelay: '0.4s' }}></div>
</div>
<span className="text-xs text-gray-400 font-medium">AI is typing...</span>
</div>
</div>
</div>
)}
<div ref={messagesEndRef} />
</div>
</div>
</div>
</div>
{/* Input Area */}
<div className="max-w-4xl mx-auto w-full px-6 pb-8">
<ChatInput
onSendMessage={handleSendMessage}
disabled={isLoading || isTyping}
/>
<div className="mt-4 text-center">
<p className="text-xs text-gray-500 font-medium">
Upload images to ask questions about them, or just chat!
<span className="text-blue-400 ml-1">Powered by Hugging Face</span>
</p>
</div>
</div>
</div>
)
}