commit 5f66e41f940c8cc3f3cb556e911c44af704963e3
Author: root <root@hub.scroll.pub>
Date: 2024-12-28 17:56:20 +0000
Subject: Initial commit
diff --git a/body.html b/body.html
new file mode 100644
index 0000000..b1f7dbc
--- /dev/null
+++ b/body.html
@@ -0,0 +1,28 @@
+<header>
+ <h1>VoiceAI</h1>
+ <p class="tagline">Speak with AI, naturally</p>
+</header>
+
+<main>
+ <div class="control-panel">
+ <button id="startBtn" aria-label="Start listening">
+ <span class="icon">🎤</span>
+ <span class="label">Start Listening</span>
+ </button>
+
+ <div class="status-indicator">
+ <span id="statusText">Ready to listen...</span>
+ <div class="pulse-ring"></div>
+ </div>
+ </div>
+
+ <div class="conversation-container">
+ <div id="conversation" aria-live="polite">
+ <!-- Conversation history will appear here -->
+ </div>
+ </div>
+</main>
+
+<footer>
+ <p>Built with care for natural conversations</p>
+</footer>
diff --git a/index.scroll b/index.scroll
new file mode 100644
index 0000000..92f1238
--- /dev/null
+++ b/index.scroll
@@ -0,0 +1,8 @@
+buildHtml
+baseUrl https://voiceai.scroll.pub
+metaTags
+editButton /edit.html
+title VoiceAI - Speech-Driven AI Assistant
+style.css
+body.html
+script.js
diff --git a/prompt.json b/prompt.json
new file mode 100644
index 0000000..e9b0437
--- /dev/null
+++ b/prompt.json
@@ -0,0 +1,20 @@
+{
+ "id": "msg_01Lhu8tXYGhHKPSUSzAEpcqf",
+ "type": "message",
+ "role": "assistant",
+ "model": "claude-3-5-sonnet-20241022",
+ "content": [
+ {
+ "type": "text",
+ "text": "Let me create a website for your voice-driven LLM interaction system.\n\n---domain---\nvoiceai.scroll.pub\n\n---index.scroll---\nbuildHtml\nbaseUrl https://voiceai.scroll.pub\nmetaTags\neditButton /edit.html\ntitle VoiceAI - Speech-Driven AI Assistant\nstyle.css\nbody.html\nscript.js\n\n---body.html---\n<header>\n <h1>VoiceAI</h1>\n <p class=\"tagline\">Speak with AI, naturally</p>\n</header>\n\n<main>\n <div class=\"control-panel\">\n <button id=\"startBtn\" aria-label=\"Start listening\">\n <span class=\"icon\">🎤</span>\n <span class=\"label\">Start Listening</span>\n </button>\n \n <div class=\"status-indicator\">\n <span id=\"statusText\">Ready to listen...</span>\n <div class=\"pulse-ring\"></div>\n </div>\n </div>\n\n <div class=\"conversation-container\">\n <div id=\"conversation\" aria-live=\"polite\">\n <!-- Conversation history will appear here -->\n </div>\n </div>\n</main>\n\n<footer>\n <p>Built with care for natural conversations</p>\n</footer>\n\n---style.css---\n:root {\n --primary-color: #2c3e50;\n --accent-color: #3498db;\n --background-color: #f9fafb;\n --text-color: #2c3e50;\n --shadow-color: rgba(0, 0, 0, 0.1);\n}\n\n* {\n margin: 0;\n padding: 0;\n box-sizing: border-box;\n}\n\nbody {\n font-family: 'Segoe UI', system-ui, sans-serif;\n line-height: 1.6;\n color: var(--text-color);\n background: var(--background-color);\n min-height: 100vh;\n display: flex;\n flex-direction: column;\n}\n\nheader {\n background: var(--primary-color);\n color: white;\n padding: 2rem;\n text-align: center;\n box-shadow: 0 2px 10px var(--shadow-color);\n}\n\nh1 {\n font-size: 2.5rem;\n margin-bottom: 0.5rem;\n background: linear-gradient(45deg, #3498db, #2ecc71);\n -webkit-background-clip: text;\n background-clip: text;\n color: transparent;\n}\n\n.tagline {\n font-size: 1.2rem;\n opacity: 0.9;\n}\n\nmain {\n flex: 1;\n padding: 2rem;\n max-width: 800px;\n margin: 0 auto;\n width: 100%;\n}\n\n.control-panel {\n display: flex;\n flex-direction: column;\n align-items: center;\n gap: 1.5rem;\n margin-bottom: 2rem;\n}\n\n#startBtn {\n background: var(--accent-color);\n color: white;\n border: none;\n padding: 1rem 2rem;\n border-radius: 50px;\n cursor: pointer;\n display: flex;\n align-items: center;\n gap: 0.5rem;\n font-size: 1.1rem;\n transition: transform 0.2s, background-color 0.2s;\n}\n\n#startBtn:hover {\n transform: scale(1.05);\n background-color: #2980b9;\n}\n\n.icon {\n font-size: 1.3rem;\n}\n\n.status-indicator {\n display: flex;\n align-items: center;\n gap: 1rem;\n}\n\n.pulse-ring {\n width: 1rem;\n height: 1rem;\n border-radius: 50%;\n background: #2ecc71;\n position: relative;\n display: none;\n}\n\n.pulse-ring.active {\n display: block;\n animation: pulse 2s infinite;\n}\n\n@keyframes pulse {\n 0% {\n box-shadow: 0 0 0 0 rgba(46, 204, 113, 0.4);\n }\n 70% {\n box-shadow: 0 0 0 10px rgba(46, 204, 113, 0);\n }\n 100% {\n box-shadow: 0 0 0 0 rgba(46, 204, 113, 0);\n }\n}\n\n.conversation-container {\n background: white;\n border-radius: 10px;\n padding: 1.5rem;\n box-shadow: 0 4px 6px var(--shadow-color);\n min-height: 300px;\n max-height: 500px;\n overflow-y: auto;\n}\n\n.message {\n margin-bottom: 1rem;\n padding: 1rem;\n border-radius: 8px;\n animation: fadeIn 0.3s ease-out;\n}\n\n.user-message {\n background: #e8f4fd;\n margin-left: 2rem;\n}\n\n.ai-message {\n background: #f0f7f4;\n margin-right: 2rem;\n}\n\n@keyframes fadeIn {\n from {\n opacity: 0;\n transform: translateY(10px);\n }\n to {\n opacity: 1;\n transform: translateY(0);\n }\n}\n\nfooter {\n text-align: center;\n padding: 1.5rem;\n background: var(--primary-color);\n color: white;\n}\n\n@media (max-width: 600px) {\n header {\n padding: 1.5rem;\n }\n\n h1 {\n font-size: 2rem;\n }\n\n main {\n padding: 1rem;\n }\n\n .conversation-container {\n padding: 1rem;\n }\n}\n\n---script.js---\ndocument.addEventListener('DOMContentLoaded', () => {\n const startBtn = document.getElementById('startBtn');\n const statusText = document.getElementById('statusText');\n const conversation = document.getElementById('conversation');\n const pulseRing = document.querySelector('.pulse-ring');\n\n let recognition = null;\n let synthesis = window.speechSynthesis;\n\n // Initialize speech recognition\n if ('webkitSpeechRecognition' in window) {\n recognition = new webkitSpeechRecognition();\n recognition.continuous = false;\n recognition.interimResults = false;\n recognition.lang = 'en-US';\n } else {\n startBtn.disabled = true;\n statusText.textContent = 'Speech recognition not supported';\n }\n\n function addMessage(text, isUser = true) {\n const messageDiv = document.createElement('div');\n messageDiv.className = `message ${isUser ? 'user-message' : 'ai-message'}`;\n messageDiv.textContent = text;\n conversation.appendChild(messageDiv);\n conversation.scrollTop = conversation.scrollHeight;\n }\n\n async function processQuery(query) {\n // Simulate RAG + LLM processing\n // Replace this with actual API calls to your backend\n const response = await new Promise(resolve => {\n setTimeout(() => {\n resolve(`I understood your question: \"${query}\". This is a simulated response from the LLM system.`);\n }, 1000);\n });\n \n return response;\n }\n\n function speak(text) {\n const utterance = new SpeechSynthesisUtterance(text);\n synthesis.speak(utterance);\n }\n\n let isListening = false;\n\n startBtn.addEventListener('click', () => {\n if (!recognition) return;\n\n if (!isListening) {\n recognition.start();\n startBtn.querySelector('.label').textContent = 'Stop Listening';\n statusText.textContent = 'Listening...';\n pulseRing.classList.add('active');\n isListening = true;\n } else {\n recognition.stop();\n startBtn.querySelector('.label').textContent = 'Start Listening';\n statusText.textContent = 'Ready to listen...';\n pulseRing.classList.remove('active');\n isListening = false;\n }\n });\n\n if (recognition) {\n recognition.onresult = async (event) => {\n const query = event.results[0][0].transcript;\n addMessage(query, true);\n\n // Process the query\n const response = await processQuery(query);\n addMessage(response, false);\n\n // Speak the response\n speak(response);\n\n // Reset the button state\n isListening = false;\n startBtn.querySelector('.label').textContent = 'Start Listening';\n statusText.textContent = 'Ready to listen...';\n pulseRing.classList.remove('active');\n };\n\n recognition.onerror = (event) => {\n statusText.textContent = `Error: ${event.error}`;\n pulseRing.classList.remove('active');\n isListening = false;\n startBtn.querySelector('.label').textContent = 'Start Listening';\n };\n }\n});\n\n---end---\n\nThis implementation creates a beautiful, responsive website with a clean interface for voice interaction. Key features:\n\n1. Sleek, modern design with smooth animations\n2. Clear visual feedback for recording state\n3. Conversation history with distinct user/AI messages\n4. Responsive layout that works on all devices\n5. Accessibility features built-in\n6. Error handling and status indicators\n7. Simulated RAG+LLM processing (replace with actual API calls)\n\nThe interface uses a gradient-enhanced color scheme with smooth animations and transitions. The pulse animation indicates active listening, and messages fade in naturally as they appear.\n\nNote: This implementation uses the Web Speech API for speech recognition and synthesis. You'll need to add your actual RAG+LLM processing logic by replacing the simulated processQuery function with real API calls to your backend service."
+ }
+ ],
+ "stop_reason": "end_turn",
+ "stop_sequence": null,
+ "usage": {
+ "input_tokens": 320,
+ "cache_creation_input_tokens": 0,
+ "cache_read_input_tokens": 0,
+ "output_tokens": 2541
+ }
+}
\ No newline at end of file
diff --git a/readme.scroll b/readme.scroll
new file mode 100644
index 0000000..65fd485
--- /dev/null
+++ b/readme.scroll
@@ -0,0 +1,2 @@
+# voiceai.scroll.pub
+Website generated by Claude from prompt: i want a website that has speech recognision that i give it a question and it pass that quesion to an llm and it gets the answer from the rag system and llm process it then gives me the answer through tts
\ No newline at end of file
diff --git a/script.js b/script.js
new file mode 100644
index 0000000..69fc1be
--- /dev/null
+++ b/script.js
@@ -0,0 +1,92 @@
+document.addEventListener('DOMContentLoaded', () => {
+ const startBtn = document.getElementById('startBtn');
+ const statusText = document.getElementById('statusText');
+ const conversation = document.getElementById('conversation');
+ const pulseRing = document.querySelector('.pulse-ring');
+
+ let recognition = null;
+ let synthesis = window.speechSynthesis;
+
+ // Initialize speech recognition
+ if ('webkitSpeechRecognition' in window) {
+ recognition = new webkitSpeechRecognition();
+ recognition.continuous = false;
+ recognition.interimResults = false;
+ recognition.lang = 'en-US';
+ } else {
+ startBtn.disabled = true;
+ statusText.textContent = 'Speech recognition not supported';
+ }
+
+ function addMessage(text, isUser = true) {
+ const messageDiv = document.createElement('div');
+ messageDiv.className = `message ${isUser ? 'user-message' : 'ai-message'}`;
+ messageDiv.textContent = text;
+ conversation.appendChild(messageDiv);
+ conversation.scrollTop = conversation.scrollHeight;
+ }
+
+ async function processQuery(query) {
+ // Simulate RAG + LLM processing
+ // Replace this with actual API calls to your backend
+ const response = await new Promise(resolve => {
+ setTimeout(() => {
+ resolve(`I understood your question: "${query}". This is a simulated response from the LLM system.`);
+ }, 1000);
+ });
+
+ return response;
+ }
+
+ function speak(text) {
+ const utterance = new SpeechSynthesisUtterance(text);
+ synthesis.speak(utterance);
+ }
+
+ let isListening = false;
+
+ startBtn.addEventListener('click', () => {
+ if (!recognition) return;
+
+ if (!isListening) {
+ recognition.start();
+ startBtn.querySelector('.label').textContent = 'Stop Listening';
+ statusText.textContent = 'Listening...';
+ pulseRing.classList.add('active');
+ isListening = true;
+ } else {
+ recognition.stop();
+ startBtn.querySelector('.label').textContent = 'Start Listening';
+ statusText.textContent = 'Ready to listen...';
+ pulseRing.classList.remove('active');
+ isListening = false;
+ }
+ });
+
+ if (recognition) {
+ recognition.onresult = async (event) => {
+ const query = event.results[0][0].transcript;
+ addMessage(query, true);
+
+ // Process the query
+ const response = await processQuery(query);
+ addMessage(response, false);
+
+ // Speak the response
+ speak(response);
+
+ // Reset the button state
+ isListening = false;
+ startBtn.querySelector('.label').textContent = 'Start Listening';
+ statusText.textContent = 'Ready to listen...';
+ pulseRing.classList.remove('active');
+ };
+
+ recognition.onerror = (event) => {
+ statusText.textContent = `Error: ${event.error}`;
+ pulseRing.classList.remove('active');
+ isListening = false;
+ startBtn.querySelector('.label').textContent = 'Start Listening';
+ };
+ }
+});
diff --git a/style.css b/style.css
new file mode 100644
index 0000000..b22a23d
--- /dev/null
+++ b/style.css
@@ -0,0 +1,179 @@
+:root {
+ --primary-color: #2c3e50;
+ --accent-color: #3498db;
+ --background-color: #f9fafb;
+ --text-color: #2c3e50;
+ --shadow-color: rgba(0, 0, 0, 0.1);
+}
+
+* {
+ margin: 0;
+ padding: 0;
+ box-sizing: border-box;
+}
+
+body {
+ font-family: 'Segoe UI', system-ui, sans-serif;
+ line-height: 1.6;
+ color: var(--text-color);
+ background: var(--background-color);
+ min-height: 100vh;
+ display: flex;
+ flex-direction: column;
+}
+
+header {
+ background: var(--primary-color);
+ color: white;
+ padding: 2rem;
+ text-align: center;
+ box-shadow: 0 2px 10px var(--shadow-color);
+}
+
+h1 {
+ font-size: 2.5rem;
+ margin-bottom: 0.5rem;
+ background: linear-gradient(45deg, #3498db, #2ecc71);
+ -webkit-background-clip: text;
+ background-clip: text;
+ color: transparent;
+}
+
+.tagline {
+ font-size: 1.2rem;
+ opacity: 0.9;
+}
+
+main {
+ flex: 1;
+ padding: 2rem;
+ max-width: 800px;
+ margin: 0 auto;
+ width: 100%;
+}
+
+.control-panel {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ gap: 1.5rem;
+ margin-bottom: 2rem;
+}
+
+#startBtn {
+ background: var(--accent-color);
+ color: white;
+ border: none;
+ padding: 1rem 2rem;
+ border-radius: 50px;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ gap: 0.5rem;
+ font-size: 1.1rem;
+ transition: transform 0.2s, background-color 0.2s;
+}
+
+#startBtn:hover {
+ transform: scale(1.05);
+ background-color: #2980b9;
+}
+
+.icon {
+ font-size: 1.3rem;
+}
+
+.status-indicator {
+ display: flex;
+ align-items: center;
+ gap: 1rem;
+}
+
+.pulse-ring {
+ width: 1rem;
+ height: 1rem;
+ border-radius: 50%;
+ background: #2ecc71;
+ position: relative;
+ display: none;
+}
+
+.pulse-ring.active {
+ display: block;
+ animation: pulse 2s infinite;
+}
+
+@keyframes pulse {
+ 0% {
+ box-shadow: 0 0 0 0 rgba(46, 204, 113, 0.4);
+ }
+ 70% {
+ box-shadow: 0 0 0 10px rgba(46, 204, 113, 0);
+ }
+ 100% {
+ box-shadow: 0 0 0 0 rgba(46, 204, 113, 0);
+ }
+}
+
+.conversation-container {
+ background: white;
+ border-radius: 10px;
+ padding: 1.5rem;
+ box-shadow: 0 4px 6px var(--shadow-color);
+ min-height: 300px;
+ max-height: 500px;
+ overflow-y: auto;
+}
+
+.message {
+ margin-bottom: 1rem;
+ padding: 1rem;
+ border-radius: 8px;
+ animation: fadeIn 0.3s ease-out;
+}
+
+.user-message {
+ background: #e8f4fd;
+ margin-left: 2rem;
+}
+
+.ai-message {
+ background: #f0f7f4;
+ margin-right: 2rem;
+}
+
+@keyframes fadeIn {
+ from {
+ opacity: 0;
+ transform: translateY(10px);
+ }
+ to {
+ opacity: 1;
+ transform: translateY(0);
+ }
+}
+
+footer {
+ text-align: center;
+ padding: 1.5rem;
+ background: var(--primary-color);
+ color: white;
+}
+
+@media (max-width: 600px) {
+ header {
+ padding: 1.5rem;
+ }
+
+ h1 {
+ font-size: 2rem;
+ }
+
+ main {
+ padding: 1rem;
+ }
+
+ .conversation-container {
+ padding: 1rem;
+ }
+}