Reuben_OS / app /gemini /page.tsx
Reubencf's picture
fix: Update Gemini AI to use server-side API key and gemini-flash-latest model
8c79bdb
'use client'
import React, { useState, useRef, useEffect } from 'react'
import { Upload, Mic, MicOff, Send, Image as ImageIcon, FileText, Loader2, X } from 'lucide-react'
interface Message {
id: string
role: 'user' | 'assistant'
content: string
type: 'text' | 'image' | 'audio'
imageUrl?: string
timestamp: Date
}
export default function GeminiAIApp() {
const [messages, setMessages] = useState<Message[]>([])
const [inputText, setInputText] = useState('')
const [isLoading, setIsLoading] = useState(false)
const [isRecording, setIsRecording] = useState(false)
const [selectedImage, setSelectedImage] = useState<string | null>(null)
const [activeTab, setActiveTab] = useState<'chat' | 'transcribe' | 'image'>('chat')
const fileInputRef = useRef<HTMLInputElement>(null)
const audioInputRef = useRef<HTMLInputElement>(null)
const messagesEndRef = useRef<HTMLDivElement>(null)
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
const audioChunksRef = useRef<Blob[]>([])
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' })
}, [messages])
const handleSendMessage = async () => {
if (!inputText.trim() && !selectedImage) return
const newMessage: Message = {
id: Date.now().toString(),
role: 'user',
content: inputText || 'Image uploaded',
type: selectedImage ? 'image' : 'text',
imageUrl: selectedImage || undefined,
timestamp: new Date()
}
setMessages(prev => [...prev, newMessage])
setInputText('')
setSelectedImage(null)
setIsLoading(true)
try {
const response = await fetch('/api/gemini/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
message: inputText,
imageUrl: selectedImage,
history: messages.slice(-10) // Send last 10 messages for context
})
})
const data = await response.json()
if (data.error) {
throw new Error(data.error)
}
const aiMessage: Message = {
id: (Date.now() + 1).toString(),
role: 'assistant',
content: data.response,
type: 'text',
timestamp: new Date()
}
setMessages(prev => [...prev, aiMessage])
} catch (error) {
console.error('Error:', error)
const errorMessage: Message = {
id: (Date.now() + 1).toString(),
role: 'assistant',
content: `Error: ${error instanceof Error ? error.message : 'Failed to get response'}`,
type: 'text',
timestamp: new Date()
}
setMessages(prev => [...prev, errorMessage])
} finally {
setIsLoading(false)
}
}
const handleImageUpload = (e: React.ChangeEvent<HTMLInputElement>) => {
const file = e.target.files?.[0]
if (file) {
const reader = new FileReader()
reader.onloadend = () => {
setSelectedImage(reader.result as string)
}
reader.readAsDataURL(file)
}
}
const startRecording = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
const mediaRecorder = new MediaRecorder(stream)
mediaRecorderRef.current = mediaRecorder
audioChunksRef.current = []
mediaRecorder.ondataavailable = (event) => {
audioChunksRef.current.push(event.data)
}
mediaRecorder.onstop = async () => {
const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/wav' })
await transcribeAudio(audioBlob)
stream.getTracks().forEach(track => track.stop())
}
mediaRecorder.start()
setIsRecording(true)
} catch (error) {
console.error('Error accessing microphone:', error)
alert('Error accessing microphone. Please check permissions.')
}
}
const stopRecording = () => {
if (mediaRecorderRef.current && isRecording) {
mediaRecorderRef.current.stop()
setIsRecording(false)
}
}
const transcribeAudio = async (audioBlob: Blob) => {
setIsLoading(true)
const formData = new FormData()
formData.append('audio', audioBlob, 'recording.wav')
try {
const response = await fetch('/api/gemini/transcribe', {
method: 'POST',
body: formData
})
const data = await response.json()
if (data.error) {
throw new Error(data.error)
}
setInputText(data.transcription)
} catch (error) {
console.error('Transcription error:', error)
alert('Failed to transcribe audio')
} finally {
setIsLoading(false)
}
}
const handleAudioFileUpload = async (e: React.ChangeEvent<HTMLInputElement>) => {
const file = e.target.files?.[0]
if (file) {
await transcribeAudio(file)
}
}
return (
<div className="min-h-screen bg-gradient-to-br from-purple-50 to-blue-50 p-4">
<div className="max-w-6xl mx-auto">
<div className="bg-white rounded-2xl shadow-xl overflow-hidden">
{/* Header */}
<div className="bg-gradient-to-r from-blue-600 to-purple-600 p-6 text-white">
<h1 className="text-3xl font-bold mb-2">Gemini AI Assistant</h1>
<p className="text-blue-100">Chat, Transcribe, and Analyze Images with AI</p>
</div>
{/* Tabs */}
<div className="flex border-b border-gray-200">
<button
onClick={() => setActiveTab('chat')}
className={`flex-1 py-3 px-4 font-medium transition-colors ${
activeTab === 'chat'
? 'bg-blue-50 text-blue-600 border-b-2 border-blue-600'
: 'text-gray-600 hover:bg-gray-50'
}`}
>
Chat
</button>
<button
onClick={() => setActiveTab('transcribe')}
className={`flex-1 py-3 px-4 font-medium transition-colors ${
activeTab === 'transcribe'
? 'bg-blue-50 text-blue-600 border-b-2 border-blue-600'
: 'text-gray-600 hover:bg-gray-50'
}`}
>
Transcribe
</button>
<button
onClick={() => setActiveTab('image')}
className={`flex-1 py-3 px-4 font-medium transition-colors ${
activeTab === 'image'
? 'bg-blue-50 text-blue-600 border-b-2 border-blue-600'
: 'text-gray-600 hover:bg-gray-50'
}`}
>
Image Analysis
</button>
</div>
{/* Chat Messages */}
<div className="h-[500px] overflow-y-auto p-4 space-y-4">
{messages.length === 0 && (
<div className="text-center text-gray-500 mt-20">
<div className="mb-4">
{activeTab === 'chat' && <FileText size={48} className="mx-auto text-gray-300" />}
{activeTab === 'transcribe' && <Mic size={48} className="mx-auto text-gray-300" />}
{activeTab === 'image' && <ImageIcon size={48} className="mx-auto text-gray-300" />}
</div>
<p className="text-lg font-medium">
{activeTab === 'chat' && 'Start a conversation with Gemini AI'}
{activeTab === 'transcribe' && 'Record or upload audio to transcribe'}
{activeTab === 'image' && 'Upload an image for AI analysis'}
</p>
</div>
)}
{messages.map((message) => (
<div
key={message.id}
className={`flex ${message.role === 'user' ? 'justify-end' : 'justify-start'}`}
>
<div
className={`max-w-[70%] p-4 rounded-2xl ${
message.role === 'user'
? 'bg-blue-600 text-white'
: 'bg-gray-100 text-gray-800'
}`}
>
{message.imageUrl && (
<img
src={message.imageUrl}
alt="Uploaded"
className="mb-2 rounded-lg max-h-64 object-contain"
/>
)}
<p className="whitespace-pre-wrap">{message.content}</p>
<p className={`text-xs mt-2 ${
message.role === 'user' ? 'text-blue-100' : 'text-gray-500'
}`}>
{new Date(message.timestamp).toLocaleTimeString()}
</p>
</div>
</div>
))}
{isLoading && (
<div className="flex justify-start">
<div className="bg-gray-100 p-4 rounded-2xl">
<Loader2 className="animate-spin h-5 w-5 text-gray-600" />
</div>
</div>
)}
<div ref={messagesEndRef} />
</div>
{/* Selected Image Preview */}
{selectedImage && (
<div className="px-4 pb-2">
<div className="relative inline-block">
<img
src={selectedImage}
alt="Selected"
className="h-20 rounded-lg border-2 border-blue-500"
/>
<button
onClick={() => setSelectedImage(null)}
className="absolute -top-2 -right-2 bg-red-500 text-white rounded-full p-1 hover:bg-red-600"
>
<X size={16} />
</button>
</div>
</div>
)}
{/* Input Area */}
<div className="border-t border-gray-200 p-4">
<div className="flex gap-2">
{/* File Inputs */}
<input
ref={fileInputRef}
type="file"
accept="image/*"
onChange={handleImageUpload}
className="hidden"
/>
<input
ref={audioInputRef}
type="file"
accept="audio/*"
onChange={handleAudioFileUpload}
className="hidden"
/>
{/* Action Buttons */}
{activeTab === 'image' && (
<button
onClick={() => fileInputRef.current?.click()}
className="p-3 bg-purple-100 text-purple-600 rounded-lg hover:bg-purple-200 transition-colors"
title="Upload Image"
>
<ImageIcon size={20} />
</button>
)}
{activeTab === 'transcribe' && (
<>
<button
onClick={isRecording ? stopRecording : startRecording}
className={`p-3 rounded-lg transition-colors ${
isRecording
? 'bg-red-100 text-red-600 hover:bg-red-200 animate-pulse'
: 'bg-green-100 text-green-600 hover:bg-green-200'
}`}
title={isRecording ? 'Stop Recording' : 'Start Recording'}
>
{isRecording ? <MicOff size={20} /> : <Mic size={20} />}
</button>
<button
onClick={() => audioInputRef.current?.click()}
className="p-3 bg-blue-100 text-blue-600 rounded-lg hover:bg-blue-200 transition-colors"
title="Upload Audio File"
>
<Upload size={20} />
</button>
</>
)}
{/* Text Input */}
<input
type="text"
value={inputText}
onChange={(e) => setInputText(e.target.value)}
onKeyPress={(e) => e.key === 'Enter' && !e.shiftKey && handleSendMessage()}
placeholder={
activeTab === 'chat' ? 'Type your message...' :
activeTab === 'transcribe' ? 'Transcribed text will appear here...' :
'Describe what you want to analyze...'
}
className="flex-1 px-4 py-3 border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500"
disabled={isLoading}
/>
{/* Send Button */}
<button
onClick={handleSendMessage}
disabled={isLoading || (!inputText.trim() && !selectedImage)}
className="px-6 py-3 bg-blue-600 text-white rounded-lg hover:bg-blue-700 transition-colors disabled:bg-gray-300 disabled:cursor-not-allowed"
>
{isLoading ? <Loader2 className="animate-spin h-5 w-5" /> : <Send size={20} />}
</button>
</div>
{/* Feature Instructions */}
<div className="mt-4 text-sm text-gray-600">
{activeTab === 'chat' && (
<p>Chat with Gemini AI. Ask questions, get creative responses, or have a conversation.</p>
)}
{activeTab === 'transcribe' && (
<p>Record audio or upload an audio file to transcribe it to text using AI.</p>
)}
{activeTab === 'image' && (
<p>Upload an image to get AI-powered analysis, descriptions, or answers about the image content.</p>
)}
</div>
</div>
</div>
</div>
</div>
)
}