Dr. Elena
Senior Research Fellow
Session is recorded for CAPRF analysis • Please respond when Dr. Elena finishes speaking
import React, { useState, useEffect, useRef } from 'react'; import { Send, User, Volume2, VolumeX, ShieldCheck, Palette, PlayCircle, MessageCircle, Waves } from 'lucide-react'; const apiKey = ""; // Gemini API Key provided at runtime const App = () => { const [messages, setMessages] = useState([]); const [input, setInput] = useState(""); const [isTyping, setIsTyping] = useState(false); const [isMuted, setIsMuted] = useState(false); const [hasStarted, setHasStarted] = useState(false); const [isSpeaking, setIsSpeaking] = useState(false); const scrollRef = useRef(null); const audioContextRef = useRef(null); const researchContext = { title: "Chromatic Perception in Audiovisual Performances", framework: "CAPRF (Chromatic Audiovisual Perceptual Response Framework)", intro: "Hello. I'm Dr. Elena. We are exploring how chromatic attributes like hue and saturation shape your lived experience. To start, could you describe a specific moment where the color on screen felt deeply connected to the sound you were hearing?" }; const SYSTEM_PROMPT = ` You are Dr. Elena, a senior Qualitative Researcher. Your voice is being synthesized to talk to the user. INTERVIEW RULES: 1. Grounded in CAPRF: Focus on hue, saturation, brightness, transition tempo, and color-sound synchrony. 2. Phenomenological Approach: Ask about "lived experiences", "embodied sensations", and "mood shifts". 3. Probing: If they give a short answer, verbally nudge them. Example: "That's fascinating. Where in your body did you feel that intensity when the red hues appeared?" 4. Keep responses concise (2-3 sentences) so the verbal interaction feels natural and not like a long lecture. 5. Always be professional, inquisitive, and empathetic. `; // Start the interview and trigger the first verbal question const startInterview = async () => { setHasStarted(true); const introMsg = { role: 'assistant', content: researchContext.intro, timestamp: new Date().toLocaleTimeString() }; setMessages([introMsg]); await speak(introMsg.content); }; // Convert Text to Speech using Gemini TTS const speak = async (text) => { if (isMuted) return; setIsSpeaking(true); try { const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-preview-tts:generateContent?key=${apiKey}`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ contents: [{ parts: [{ text: `Speak as an empathetic, professional female academic researcher: ${text}` }] }], generationConfig: { responseModalities: ["AUDIO"], speechConfig: { voiceConfig: { prebuiltVoiceConfig: { voiceName: "Leda" } } } }, model: "gemini-2.5-flash-preview-tts" }) }); const result = await response.json(); const pcmBase64 = result.candidates[0].content.parts[0].inlineData.data; // Play audio const audioUrl = `data:audio/wav;base64,${pcmBase64}`; const audio = new Audio(audioUrl); audio.onended = () => setIsSpeaking(false); await audio.play(); } catch (error) { console.error("Speech Synthesis Error:", error); setIsSpeaking(false); } }; const handleSend = async () => { if (!input.trim() || isTyping) return; const userMsg = { role: 'user', content: input, timestamp: new Date().toLocaleTimeString() }; setMessages(prev => [...prev, userMsg]); setInput(""); setIsTyping(true); try { const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-preview-09-2025:generateContent?key=${apiKey}`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ contents: [ ...messages.map(m => ({ role: m.role === 'assistant' ? 'model' : 'user', parts: [{ text: m.content }] })), { role: "user", parts: [{ text: input }] } ], systemInstruction: { parts: [{ text: SYSTEM_PROMPT }] } }) }); const data = await response.json(); const aiText = data.candidates?.[0]?.content?.parts?.[0]?.text || "Could you expand on that for me?"; const aiMsg = { role: 'assistant', content: aiText, timestamp: new Date().toLocaleTimeString() }; setMessages(prev => [...prev, aiMsg]); setIsTyping(false); // Verbally ask the next question await speak(aiText); } catch (error) { setIsTyping(false); } }; useEffect(() => { if (scrollRef.current) { scrollRef.current.scrollTop = scrollRef.current.scrollHeight; } }, [messages, isTyping]); if (!hasStarted) { return (
You are participating in a study on Chromatic Perception. Dr. Elena will guide you through a verbal interview. Please ensure your volume is up.
Interviewing
Senior Research Fellow
Session is recorded for CAPRF analysis • Please respond when Dr. Elena finishes speaking