'use client' import React, { useState, useRef, useEffect } from 'react' import { Upload, Mic, MicOff, Send, Image as ImageIcon, FileText, Loader2, X } from 'lucide-react' interface Message { id: string role: 'user' | 'assistant' content: string type: 'text' | 'image' | 'audio' imageUrl?: string timestamp: Date } export default function GeminiAIApp() { const [messages, setMessages] = useState([]) const [inputText, setInputText] = useState('') const [isLoading, setIsLoading] = useState(false) const [isRecording, setIsRecording] = useState(false) const [selectedImage, setSelectedImage] = useState(null) const [activeTab, setActiveTab] = useState<'chat' | 'transcribe' | 'image'>('chat') const fileInputRef = useRef(null) const audioInputRef = useRef(null) const messagesEndRef = useRef(null) const mediaRecorderRef = useRef(null) const audioChunksRef = useRef([]) useEffect(() => { messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }) }, [messages]) const handleSendMessage = async () => { if (!inputText.trim() && !selectedImage) return const newMessage: Message = { id: Date.now().toString(), role: 'user', content: inputText || 'Image uploaded', type: selectedImage ? 'image' : 'text', imageUrl: selectedImage || undefined, timestamp: new Date() } setMessages(prev => [...prev, newMessage]) setInputText('') setSelectedImage(null) setIsLoading(true) try { const response = await fetch('/api/gemini/chat', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ message: inputText, imageUrl: selectedImage, history: messages.slice(-10) // Send last 10 messages for context }) }) const data = await response.json() if (data.error) { throw new Error(data.error) } const aiMessage: Message = { id: (Date.now() + 1).toString(), role: 'assistant', content: data.response, type: 'text', timestamp: new Date() } setMessages(prev => [...prev, aiMessage]) } catch (error) { console.error('Error:', error) const errorMessage: Message = { id: (Date.now() + 1).toString(), role: 'assistant', content: `Error: ${error instanceof Error ? error.message : 'Failed to get response'}`, type: 'text', timestamp: new Date() } setMessages(prev => [...prev, errorMessage]) } finally { setIsLoading(false) } } const handleImageUpload = (e: React.ChangeEvent) => { const file = e.target.files?.[0] if (file) { const reader = new FileReader() reader.onloadend = () => { setSelectedImage(reader.result as string) } reader.readAsDataURL(file) } } const startRecording = async () => { try { const stream = await navigator.mediaDevices.getUserMedia({ audio: true }) const mediaRecorder = new MediaRecorder(stream) mediaRecorderRef.current = mediaRecorder audioChunksRef.current = [] mediaRecorder.ondataavailable = (event) => { audioChunksRef.current.push(event.data) } mediaRecorder.onstop = async () => { const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/wav' }) await transcribeAudio(audioBlob) stream.getTracks().forEach(track => track.stop()) } mediaRecorder.start() setIsRecording(true) } catch (error) { console.error('Error accessing microphone:', error) alert('Error accessing microphone. Please check permissions.') } } const stopRecording = () => { if (mediaRecorderRef.current && isRecording) { mediaRecorderRef.current.stop() setIsRecording(false) } } const transcribeAudio = async (audioBlob: Blob) => { setIsLoading(true) const formData = new FormData() formData.append('audio', audioBlob, 'recording.wav') try { const response = await fetch('/api/gemini/transcribe', { method: 'POST', body: formData }) const data = await response.json() if (data.error) { throw new Error(data.error) } setInputText(data.transcription) } catch (error) { console.error('Transcription error:', error) alert('Failed to transcribe audio') } finally { setIsLoading(false) } } const handleAudioFileUpload = async (e: React.ChangeEvent) => { const file = e.target.files?.[0] if (file) { await transcribeAudio(file) } } return (
{/* Header */}

Gemini AI Assistant

Chat, Transcribe, and Analyze Images with AI

{/* Tabs */}
{/* Chat Messages */}
{messages.length === 0 && (
{activeTab === 'chat' && } {activeTab === 'transcribe' && } {activeTab === 'image' && }

{activeTab === 'chat' && 'Start a conversation with Gemini AI'} {activeTab === 'transcribe' && 'Record or upload audio to transcribe'} {activeTab === 'image' && 'Upload an image for AI analysis'}

)} {messages.map((message) => (
{message.imageUrl && ( Uploaded )}

{message.content}

{new Date(message.timestamp).toLocaleTimeString()}

))} {isLoading && (
)}
{/* Selected Image Preview */} {selectedImage && (
Selected
)} {/* Input Area */}
{/* File Inputs */} {/* Action Buttons */} {activeTab === 'image' && ( )} {activeTab === 'transcribe' && ( <> )} {/* Text Input */} setInputText(e.target.value)} onKeyPress={(e) => e.key === 'Enter' && !e.shiftKey && handleSendMessage()} placeholder={ activeTab === 'chat' ? 'Type your message...' : activeTab === 'transcribe' ? 'Transcribed text will appear here...' : 'Describe what you want to analyze...' } className="flex-1 px-4 py-3 border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500" disabled={isLoading} /> {/* Send Button */}
{/* Feature Instructions */}
{activeTab === 'chat' && (

Chat with Gemini AI. Ask questions, get creative responses, or have a conversation.

)} {activeTab === 'transcribe' && (

Record audio or upload an audio file to transcribe it to text using AI.

)} {activeTab === 'image' && (

Upload an image to get AI-powered analysis, descriptions, or answers about the image content.

)}
) }