commit b6c9d35046e2b69e8a7549390e31246e78cdbb9b
Author: root <root@hub.scroll.pub>
Date: 2025-01-12 09:44:11 +0000
Subject: Initial commit
diff --git a/body.html b/body.html
new file mode 100644
index 0000000..bf1931c
--- /dev/null
+++ b/body.html
@@ -0,0 +1,22 @@
+<header>
+ <h1>Sonic Visions</h1>
+ <p>Transform music into images and images into music.</p>
+</header>
+
+<main>
+ <section id="music-to-image">
+ <h2>Music to Image</h2>
+ <input type="file" id="music-upload" accept="audio/*" />
+ <canvas id="music-canvas"></canvas>
+ </section>
+
+ <section id="image-to-music">
+ <h2>Image to Music</h2>
+ <input type="file" id="image-upload" accept="image/*" />
+ <audio id="audio-output" controls></audio>
+ </section>
+</main>
+
+<footer>
+ <p>Experience the harmony of sound and vision.</p>
+</footer>
\ No newline at end of file
diff --git a/index.scroll b/index.scroll
new file mode 100644
index 0000000..fd20772
--- /dev/null
+++ b/index.scroll
@@ -0,0 +1,8 @@
+buildHtml
+baseUrl https://sonicvisions.scroll.pub
+metaTags
+editButton /edit.html
+title Sonic Visions - Transform Music into Images and Images into Music
+style.css
+body.html
+script.js
\ No newline at end of file
diff --git a/prompt.json b/prompt.json
new file mode 100644
index 0000000..7808c3e
--- /dev/null
+++ b/prompt.json
@@ -0,0 +1,25 @@
+{
+ "id": "f0a20f05-daec-4fe3-88d0-ab7affb5f1ad",
+ "object": "chat.completion",
+ "created": 1736675012,
+ "model": "deepseek-chat",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": "---domain---\nsonicvisions.scroll.pub\n---index.scroll---\nbuildHtml\nbaseUrl https://sonicvisions.scroll.pub\nmetaTags\neditButton /edit.html\ntitle Sonic Visions - Transform Music into Images and Images into Music\nstyle.css\nbody.html\nscript.js\n---body.html---\n<header>\n <h1>Sonic Visions</h1>\n <p>Transform music into images and images into music.</p>\n</header>\n\n<main>\n <section id=\"music-to-image\">\n <h2>Music to Image</h2>\n <input type=\"file\" id=\"music-upload\" accept=\"audio/*\" />\n <canvas id=\"music-canvas\"></canvas>\n </section>\n\n <section id=\"image-to-music\">\n <h2>Image to Music</h2>\n <input type=\"file\" id=\"image-upload\" accept=\"image/*\" />\n <audio id=\"audio-output\" controls></audio>\n </section>\n</main>\n\n<footer>\n <p>Experience the harmony of sound and vision.</p>\n</footer>\n---style.css---\n:root {\n --primary-color: #1a1a1a;\n --secondary-color: #333;\n --accent-color: #ff6f61;\n --text-color: #f5f5f5;\n --font-family: 'Segoe UI', sans-serif;\n}\n\n* {\n margin: 0;\n padding: 0;\n box-sizing: border-box;\n}\n\nbody {\n font-family: var(--font-family);\n color: var(--text-color);\n background-color: var(--primary-color);\n line-height: 1.6;\n padding: 20px;\n}\n\nheader {\n text-align: center;\n padding: 40px 0;\n}\n\nh1 {\n font-size: 2.5rem;\n color: var(--accent-color);\n}\n\nh2 {\n font-size: 2rem;\n margin-bottom: 20px;\n}\n\nmain {\n display: grid;\n grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));\n gap: 30px;\n margin-top: 40px;\n}\n\nsection {\n background: var(--secondary-color);\n padding: 20px;\n border-radius: 10px;\n box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);\n}\n\ninput[type=\"file\"] {\n width: 100%;\n margin-bottom: 20px;\n padding: 10px;\n border: 2px dashed var(--accent-color);\n border-radius: 5px;\n background: transparent;\n color: var(--text-color);\n}\n\ncanvas {\n width: 100%;\n height: 300px;\n background: var(--primary-color);\n border-radius: 5px;\n}\n\naudio {\n width: 100%;\n margin-top: 20px;\n}\n\nfooter {\n text-align: center;\n margin-top: 40px;\n padding: 20px;\n background: var(--secondary-color);\n border-radius: 10px;\n}\n\n@media (max-width: 768px) {\n h1 {\n font-size: 2rem;\n }\n\n h2 {\n font-size: 1.5rem;\n }\n}\n---script.js---\ndocument.getElementById('music-upload').addEventListener('change', function(event) {\n const file = event.target.files[0];\n if (file) {\n const reader = new FileReader();\n reader.onload = function(e) {\n const audioContext = new (window.AudioContext || window.webkitAudioContext)();\n const audioData = e.target.result;\n\n audioContext.decodeAudioData(audioData, function(buffer) {\n visualizeAudio(buffer);\n });\n };\n reader.readAsArrayBuffer(file);\n }\n});\n\nfunction visualizeAudio(buffer) {\n const canvas = document.getElementById('music-canvas');\n const ctx = canvas.getContext('2d');\n const width = canvas.width;\n const height = canvas.height;\n\n const audioData = buffer.getChannelData(0);\n const step = Math.ceil(audioData.length / width);\n const amp = height / 2;\n\n ctx.clearRect(0, 0, width, height);\n ctx.beginPath();\n ctx.moveTo(0, amp);\n\n for (let i = 0; i < width; i++) {\n let min = 1.0;\n let max = -1.0;\n for (let j = 0; j < step; j++) {\n const datum = audioData[(i * step) + j];\n if (datum < min) min = datum;\n if (datum > max) max = datum;\n }\n ctx.lineTo(i, (1 + min) * amp);\n ctx.lineTo(i, (1 + max) * amp);\n }\n\n ctx.strokeStyle = '#ff6f61';\n ctx.lineWidth = 2;\n ctx.stroke();\n}\n\ndocument.getElementById('image-upload').addEventListener('change', function(event) {\n const file = event.target.files[0];\n if (file) {\n const reader = new FileReader();\n reader.onload = function(e) {\n const img = new Image();\n img.src = e.target.result;\n img.onload = function() {\n const audio = imageToSound(img);\n const audioOutput = document.getElementById('audio-output');\n audioOutput.src = URL.createObjectURL(audio);\n };\n };\n reader.readAsDataURL(file);\n }\n});\n\nfunction imageToSound(img) {\n const canvas = document.createElement('canvas');\n const ctx = canvas.getContext('2d');\n canvas.width = img.width;\n canvas.height = img.height;\n ctx.drawImage(img, 0, 0);\n\n const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);\n const data = imageData.data;\n\n const audioContext = new (window.AudioContext || window.webkitAudioContext)();\n const buffer = audioContext.createBuffer(1, data.length, audioContext.sampleRate);\n const output = buffer.getChannelData(0);\n\n for (let i = 0; i < data.length; i++) {\n output[i] = (data[i] - 128) / 128;\n }\n\n const source = audioContext.createBufferSource();\n source.buffer = buffer;\n source.connect(audioContext.destination);\n\n const blob = new Blob([bufferToWav(buffer)], { type: 'audio/wav' });\n return blob;\n}\n\nfunction bufferToWav(buffer) {\n const numOfChan = buffer.numberOfChannels;\n const length = buffer.length * numOfChan * 2 + 44;\n const bufferArray = new ArrayBuffer(length);\n const view = new DataView(bufferArray);\n\n writeString(view, 0, 'RIFF');\n view.setUint32(4, 36 + buffer.length * numOfChan * 2, true);\n writeString(view, 8, 'WAVE');\n writeString(view, 12, 'fmt ');\n view.setUint32(16, 16, true);\n view.setUint16(20, 1, true);\n view.setUint16(22, numOfChan, true);\n view.setUint32(24, buffer.sampleRate, true);\n view.setUint32(28, buffer.sampleRate * 2 * numOfChan, true);\n view.setUint16(32, numOfChan * 2, true);\n view.setUint16(34, 16, true);\n writeString(view, 36, 'data');\n view.setUint32(40, buffer.length * numOfChan * 2, true);\n\n let offset = 44;\n for (let i = 0; i < buffer.length; i++) {\n for (let channel = 0; channel < numOfChan; channel++) {\n const sample = Math.max(-1, Math.min(1, buffer.getChannelData(channel)[i]));\n view.setInt16(offset, sample < 0 ? sample * 0x8000 : sample * 0x7FFF, true);\n offset += 2;\n }\n }\n\n return bufferArray;\n}\n\nfunction writeString(view, offset, string) {\n for (let i = 0; i < string.length; i++) {\n view.setUint8(offset + i, string.charCodeAt(i));\n }\n}\n---end---"
+ },
+ "logprobs": null,
+ "finish_reason": "stop"
+ }
+ ],
+ "usage": {
+ "prompt_tokens": 248,
+ "completion_tokens": 1853,
+ "total_tokens": 2101,
+ "prompt_cache_hit_tokens": 0,
+ "prompt_cache_miss_tokens": 248
+ },
+ "system_fingerprint": "fp_3a5770e1b4"
+}
\ No newline at end of file
diff --git a/readme.scroll b/readme.scroll
new file mode 100644
index 0000000..a133b4e
--- /dev/null
+++ b/readme.scroll
@@ -0,0 +1,3 @@
+# sonicvisions.scroll.pub
+
+website generated by DeepSeek from prompt: 上传一个音乐,能转化成图片,上传一个图片能转换成音乐
\ No newline at end of file
diff --git a/script.js b/script.js
new file mode 100644
index 0000000..b24282e
--- /dev/null
+++ b/script.js
@@ -0,0 +1,127 @@
+document.getElementById('music-upload').addEventListener('change', function(event) {
+ const file = event.target.files[0];
+ if (file) {
+ const reader = new FileReader();
+ reader.onload = function(e) {
+ const audioContext = new (window.AudioContext || window.webkitAudioContext)();
+ const audioData = e.target.result;
+
+ audioContext.decodeAudioData(audioData, function(buffer) {
+ visualizeAudio(buffer);
+ });
+ };
+ reader.readAsArrayBuffer(file);
+ }
+});
+
+function visualizeAudio(buffer) {
+ const canvas = document.getElementById('music-canvas');
+ const ctx = canvas.getContext('2d');
+ const width = canvas.width;
+ const height = canvas.height;
+
+ const audioData = buffer.getChannelData(0);
+ const step = Math.ceil(audioData.length / width);
+ const amp = height / 2;
+
+ ctx.clearRect(0, 0, width, height);
+ ctx.beginPath();
+ ctx.moveTo(0, amp);
+
+ for (let i = 0; i < width; i++) {
+ let min = 1.0;
+ let max = -1.0;
+ for (let j = 0; j < step; j++) {
+ const datum = audioData[(i * step) + j];
+ if (datum < min) min = datum;
+ if (datum > max) max = datum;
+ }
+ ctx.lineTo(i, (1 + min) * amp);
+ ctx.lineTo(i, (1 + max) * amp);
+ }
+
+ ctx.strokeStyle = '#ff6f61';
+ ctx.lineWidth = 2;
+ ctx.stroke();
+}
+
+document.getElementById('image-upload').addEventListener('change', function(event) {
+ const file = event.target.files[0];
+ if (file) {
+ const reader = new FileReader();
+ reader.onload = function(e) {
+ const img = new Image();
+ img.src = e.target.result;
+ img.onload = function() {
+ const audio = imageToSound(img);
+ const audioOutput = document.getElementById('audio-output');
+ audioOutput.src = URL.createObjectURL(audio);
+ };
+ };
+ reader.readAsDataURL(file);
+ }
+});
+
+function imageToSound(img) {
+ const canvas = document.createElement('canvas');
+ const ctx = canvas.getContext('2d');
+ canvas.width = img.width;
+ canvas.height = img.height;
+ ctx.drawImage(img, 0, 0);
+
+ const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+ const data = imageData.data;
+
+ const audioContext = new (window.AudioContext || window.webkitAudioContext)();
+ const buffer = audioContext.createBuffer(1, data.length, audioContext.sampleRate);
+ const output = buffer.getChannelData(0);
+
+ for (let i = 0; i < data.length; i++) {
+ output[i] = (data[i] - 128) / 128;
+ }
+
+ const source = audioContext.createBufferSource();
+ source.buffer = buffer;
+ source.connect(audioContext.destination);
+
+ const blob = new Blob([bufferToWav(buffer)], { type: 'audio/wav' });
+ return blob;
+}
+
+function bufferToWav(buffer) {
+ const numOfChan = buffer.numberOfChannels;
+ const length = buffer.length * numOfChan * 2 + 44;
+ const bufferArray = new ArrayBuffer(length);
+ const view = new DataView(bufferArray);
+
+ writeString(view, 0, 'RIFF');
+ view.setUint32(4, 36 + buffer.length * numOfChan * 2, true);
+ writeString(view, 8, 'WAVE');
+ writeString(view, 12, 'fmt ');
+ view.setUint32(16, 16, true);
+ view.setUint16(20, 1, true);
+ view.setUint16(22, numOfChan, true);
+ view.setUint32(24, buffer.sampleRate, true);
+ view.setUint32(28, buffer.sampleRate * 2 * numOfChan, true);
+ view.setUint16(32, numOfChan * 2, true);
+ view.setUint16(34, 16, true);
+ writeString(view, 36, 'data');
+ view.setUint32(40, buffer.length * numOfChan * 2, true);
+
+ let offset = 44;
+ for (let i = 0; i < buffer.length; i++) {
+ for (let channel = 0; channel < numOfChan; channel++) {
+ const sample = Math.max(-1, Math.min(1, buffer.getChannelData(channel)[i]));
+ view.setInt16(offset, sample < 0 ? sample * 0x8000 : sample * 0x7FFF, true);
+ offset += 2;
+ }
+ }
+
+ return bufferArray;
+}
+
+function writeString(view, offset, string) {
+ for (let i = 0; i < string.length; i++) {
+ view.setUint8(offset + i, string.charCodeAt(i));
+ }
+}
\ No newline at end of file
diff --git a/style.css b/style.css
new file mode 100644
index 0000000..54f38aa
--- /dev/null
+++ b/style.css
@@ -0,0 +1,90 @@
+:root {
+ --primary-color: #1a1a1a;
+ --secondary-color: #333;
+ --accent-color: #ff6f61;
+ --text-color: #f5f5f5;
+ --font-family: 'Segoe UI', sans-serif;
+}
+
+* {
+ margin: 0;
+ padding: 0;
+ box-sizing: border-box;
+}
+
+body {
+ font-family: var(--font-family);
+ color: var(--text-color);
+ background-color: var(--primary-color);
+ line-height: 1.6;
+ padding: 20px;
+}
+
+header {
+ text-align: center;
+ padding: 40px 0;
+}
+
+h1 {
+ font-size: 2.5rem;
+ color: var(--accent-color);
+}
+
+h2 {
+ font-size: 2rem;
+ margin-bottom: 20px;
+}
+
+main {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
+ gap: 30px;
+ margin-top: 40px;
+}
+
+section {
+ background: var(--secondary-color);
+ padding: 20px;
+ border-radius: 10px;
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
+}
+
+input[type="file"] {
+ width: 100%;
+ margin-bottom: 20px;
+ padding: 10px;
+ border: 2px dashed var(--accent-color);
+ border-radius: 5px;
+ background: transparent;
+ color: var(--text-color);
+}
+
+canvas {
+ width: 100%;
+ height: 300px;
+ background: var(--primary-color);
+ border-radius: 5px;
+}
+
+audio {
+ width: 100%;
+ margin-top: 20px;
+}
+
+footer {
+ text-align: center;
+ margin-top: 40px;
+ padding: 20px;
+ background: var(--secondary-color);
+ border-radius: 10px;
+}
+
+@media (max-width: 768px) {
+ h1 {
+ font-size: 2rem;
+ }
+
+ h2 {
+ font-size: 1.5rem;
+ }
+}
\ No newline at end of file