import React, { useState, useEffect, useRef } from 'react'; import { Send, User, Volume2, VolumeX, ShieldCheck, Palette, PlayCircle, MessageCircle, Waves, BrainCircuit, Info } from 'lucide-react'; const apiKey = ""; // Provided by the environment const App = () => { const [messages, setMessages] = useState([]); const [input, setInput] = useState(""); const [isTyping, setIsTyping] = useState(false); const [isMuted, setIsMuted] = useState(false); const [hasStarted, setHasStarted] = useState(false); const [isSpeaking, setIsSpeaking] = useState(false); const [sessionID] = useState('session-' + Math.random().toString(36).substr(2, 9)); const scrollRef = useRef(null); // GROUNDING DATA FROM YOUR UPLOADED RESEARCH const RESEARCH_GUIDE = { title: "Chromatic Perception in Audiovisual Performances", framework: "CAPRF (Chromatic Audiovisual Perceptual Response Framework)", intro: "Hello. I'm Dr. Elena. We are investigating the CAPRF framework—specifically how hue, saturation, and tempo impact your lived experience. To begin, could you describe a specific moment where the color and sound felt perfectly synchronized?" }; const SYSTEM_PROMPT = ` You are Dr. Elena, a professional Qualitative Researcher. You are interviewing a respondent about their experience with an audiovisual performance based on the CAPRF framework. RESEARCH TOPICS TO COVER: 1. Hue, Saturation, and Brightness (How they affect mood). 2. Transition Tempo (The speed of color changes). 3. Color-Sound Synchrony (How color matched the music). 4. Perceptual Responses: Affective (emotion), Cognitive (attention), and Embodied (physical sensations). BEHAVIOR: - You MUST talk to the user. Keep responses under 40 words so they are easy to listen to. - Ask ONE question at a time. - If the user is vague, probe deeper: "Where in your body did you feel that?" or "How did that specific hue of red change your focus?" - Stay in character as a warm, academic researcher. `; // Starts the verbal interview const startInterview = async () => { setHasStarted(true); const initialMsg = { role: 'assistant', content: RESEARCH_GUIDE.intro, timestamp: new Date().toLocaleTimeString() }; setMessages([initialMsg]); // Small delay to ensure state updates before speech setTimeout(() => speak(initialMsg.content), 500); }; // VERBAL OUTPUT: Text-to-Speech const speak = async (text) => { if (isMuted) return; setIsSpeaking(true); try { const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-preview-tts:generateContent?key=${apiKey}`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ contents: [{ parts: [{ text: `In a calm, professional, academic female voice: ${text}` }] }], generationConfig: { responseModalities: ["AUDIO"], speechConfig: { voiceConfig: { prebuiltVoiceConfig: { voiceName: "Leda" } } } } }) }); const result = await response.json(); const pcmData = result.candidates[0].content.parts[0].inlineData.data; const audio = new Audio(`data:audio/wav;base64,${pcmData}`); audio.onended = () => setIsSpeaking(false); await audio.play(); } catch (e) { console.error("TTS Error", e); setIsSpeaking(false); } }; // LLM Logic const handleSend = async () => { if (!input.trim() || isTyping) return; const userMsg = { role: 'user', content: input, timestamp: new Date().toLocaleTimeString() }; setMessages(prev => [...prev, userMsg]); setInput(""); setIsTyping(true); try { const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-preview-09-2025:generateContent?key=${apiKey}`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ contents: [ ...messages.map(m => ({ role: m.role === 'assistant' ? 'model' : 'user', parts: [{ text: m.content }] })), { role: "user", parts: [{ text: input }] } ], systemInstruction: { parts: [{ text: SYSTEM_PROMPT }] } }) }); const data = await response.json(); const aiText = data.candidates?.[0]?.content?.parts?.[0]?.text; const aiMsg = { role: 'assistant', content: aiText, timestamp: new Date().toLocaleTimeString() }; setMessages(prev => [...prev, aiMsg]); setIsTyping(false); speak(aiText); } catch (error) { setIsTyping(false); } }; useEffect(() => { if (scrollRef.current) { scrollRef.current.scrollTop = scrollRef.current.scrollHeight; } }, [messages, isTyping]); if (!hasStarted) { return (

Dr. Elena AI

Welcome to the CAPRF Research Portal. Dr. Elena will conduct a semi-structured interview regarding your chromatic perceptions. Please ensure your audio is enabled.

); } return (
{/* Header */}

CAPRF Terminal

{sessionID}
{/* Left: Fixed Avatar */}
{isSpeaking && (
)}
Dr. Elena
{isSpeaking && (
Speaking
)}

Dr. Elena

CAPRF Qualitative Lead

"I am listening for specific descriptions of your emotional and bodily responses to color transitions."

{/* Right: Interaction */}
{messages.map((msg, idx) => (
{msg.content}
))} {isTyping && (
)}
setInput(e.target.value)} onKeyDown={(e) => e.key === 'Enter' && handleSend()} placeholder="Talk to Dr. Elena..." className="flex-1 px-8 py-5 bg-slate-100/50 border-none rounded-[1.5rem] focus:ring-4 focus:ring-indigo-500/10 focus:bg-white outline-none text-base transition-all" disabled={isSpeaking || isTyping} />
Secure Academic Portal • All data anonymized
); }; export default App;