` } } class scrollButtonParser extends scrollParagraphParser { defaultClassName = "scrollButton" tag = "button" get htmlAttributes() { const link = this.getFromParser("scrollLinkParser") const post = this.getParticle("post") if (post) { const method = "post" const action = link?.link || "" const formData = new URLSearchParams({particle: post.subparticlesToString()}).toString() return ` onclick="fetch('${action}', {method: '${method}', body: '${formData}', headers: {'Content-Type': 'application/x-www-form-urlencoded'}}).then(async (message) => {const el = document.createElement('div'); el.textContent = await message.text(); this.insertAdjacentElement('afterend', el);}); return false;" ` } return super.htmlAttributes + (link ? `onclick="window.location='${link.link}'"` : "") } getFromParser(parserId) { return this.find(particle => particle.doesExtend(parserId)) } } class catchAllParagraphParser extends scrollParagraphParser { get stringAtom() { return this.getAtom(0) } get isArticleContent() { return true } get isPopular() { return true } get suggestInAutocomplete() { return false } getErrors() { const errors = super.getErrors() || [] return this.parent.has("testStrict") ? errors.concat(this.makeError(`catchAllParagraphParser should not have any matches when testing with testStrict.`)) : errors } get originalText() { return this.getLine() || "" } } class scrollCenterParser extends scrollParagraphParser { buildHtml() { this.parent.sectionStack.push("") return `
${super.buildHtml()}` } buildTxt() { return this.content } } class abstractIndentableParagraphParser extends scrollParagraphParser { createParserCombinator() { return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"[]" : checklistTodoParser, "[x]" : checklistDoneParser, "-" : listAftertextParser, ">" : quickQuoteParser, "id" : aftertextIdParser, "style" : aftertextStyleParser, "font" : aftertextFontParser, "color" : aftertextColorParser, "onclick" : aftertextOnclickParser, "hidden" : aftertextHiddenParser, "bold" : boldParser, "italics" : italicsParser, "underline" : underlineParser, "center" : afterTextCenterParser, "code" : aftertextCodeParser, "strike" : aftertextStrikeParser, "class" : classMarkupParser, "classes" : classesMarkupParser, "hoverNote" : hoverNoteParser, "link" : scrollLinkParser, "click" : scrollClickParser, "email" : emailLinkParser, "dateline" : datelineParser, "dayjs" : dayjsParser, "inlineMarkupsOn" : inlineMarkupsOnParser, "inlineMarkup" : inlineMarkupParser, "linkify" : linkifyParser}), [{regex: /^\d+\. /, parser: orderedListAftertextParser},{regex: /^https?\:/, parser: quickLinkParser},{regex: /^[^\s]+\.(html|htm)/, parser: quickRelativeLinkParser}]) } compileSubparticles() { return this.map(particle => particle.buildHtml()) .join("\n") .trim() } buildHtml() { return super.buildHtml() + this.compileSubparticles() } buildTxt() { return this.getAtom(0) + " " + super.buildTxt() } } class checklistTodoParser extends abstractIndentableParagraphParser { get checked() { return `` } get text() { return `
` } get id() { return this.get("id") || "item" + this._getUid() } } class checklistDoneParser extends checklistTodoParser { get checked() { return `checked` } } class listAftertextParser extends abstractIndentableParagraphParser { defaultClassName = "" buildHtml() { const {index, parent} = this const particleClass = this.constructor const isStartOfList = index === 0 || !(parent.particleAt(index - 1) instanceof particleClass) const isEndOfList = parent.length === index + 1 || !(parent.particleAt(index + 1) instanceof particleClass) const { listType } = this return (isStartOfList ? `<${listType} ${this.attributes}>` : "") + `${super.buildHtml()}` + (isEndOfList ? `` : "") } get attributes() { return "" } tag = "li" listType = "ul" } class abstractCustomListItemParser extends listAftertextParser { get requireOnce() { return `` } get attributes() { return `class="${this.constructor.name}"` } } class orderedListAftertextParser extends listAftertextParser { listType = "ol" get attributes() { return ` start="${this.getAtom(0)}"`} } class quickQuoteParser extends abstractIndentableParagraphParser { get isPopular() { return true } defaultClassName = "scrollQuote" tag = "blockquote" } class scrollCounterParser extends scrollParagraphParser { get cueAtom() { return this.getAtom(0) } get numberAtom() { return parseFloat(this.getAtom(1)) } buildHtml() { const line = this.getLine() const atoms = line.split(" ") atoms.shift() // drop the counter atom const perSecond = parseFloat(atoms.shift()) // get number const increment = perSecond/10 const id = this._getUid() this.setLine(`* 0 ` + atoms.join(" ")) const html = super.buildHtml() this.setLine(line) return html } } class expanderParser extends scrollParagraphParser { buildHtml() { this.parent.sectionStack.push("") return `
${super.buildHtml()}` } buildTxt() { return this.content } tag = "summary" defaultClassName = "" } class footnoteDefinitionParser extends scrollParagraphParser { createParserCombinator() {class labelParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtomsFrom(1) } } return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"label" : labelParser}), [{regex: /^https?\:/, parser: quickLinkParser}]) } get isFootnote() { return true } get htmlId() { return `note${this.noteDefinitionIndex}` } get label() { // In the future we could allow common practices like author name return this.get("label") || `[${this.noteDefinitionIndex}]` } get linkBack() { return `noteUsage${this.noteDefinitionIndex}` } get text() { return `${this.label} ${super.text}` } get noteDefinitionIndex() { return this.parent.footnotes.indexOf(this) + 1 } buildTxt() { return this.getAtom(0) + ": " + super.buildTxt() } } class abstractHeaderParser extends scrollParagraphParser { buildHtml(buildSettings) { if (this.isHidden) return "" if (this.parent.sectionStack) this.parent.sectionStack.push("") return `
` + super.buildHtml(buildSettings) } buildTxt() { const line = super.buildTxt() return line + "\n" + "=".repeat(line.length) } isHeader = true } class h1Parser extends abstractHeaderParser { get isPopular() { return true } get isArticleContent() { return true } tag = "h1" } class h2Parser extends abstractHeaderParser { get isPopular() { return true } get isArticleContent() { return true } tag = "h2" } class h3Parser extends abstractHeaderParser { get isArticleContent() { return true } tag = "h3" } class h4Parser extends abstractHeaderParser { tag = "h4" } class scrollQuestionParser extends h4Parser { defaultClassName = "scrollQuestion" } class h5Parser extends abstractHeaderParser { tag = "h5" } class printTitleParser extends abstractHeaderParser { get isPopular() { return true } buildHtml(buildSettings) { // Hacky, I know. const {content} = this if (content === undefined) this.setContent(this.root.title) const { permalink } = this.root if (!permalink) { this.setContent(content) // Restore it as it was. return super.buildHtml(buildSettings) } const newLine = this.appendLine(`link ${permalink}`) const compiled = super.buildHtml(buildSettings) newLine.destroy() this.setContent(content) // Restore it as it was. return compiled } get originalText() { return this.content ?? this.root.title ?? "" } defaultClassName = "printTitleParser" tag = "h1" } class captionAftertextParser extends scrollParagraphParser { get isPopular() { return true } } class abstractMediaParser extends scrollParagraphParser { createParserCombinator() { return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"loop" : scrollMediaLoopParser, "autoplay" : scrollAutoplayParser}), undefined) } get atomIndex() { return 1 } buildTxt() { return "" } get filename() { return this.getAtom(this.atomIndex) } getAsHtmlAttribute(attr) { if (!this.has(attr)) return "" const value = this.get(attr) return value ? `${attr}="${value}"` : attr } getAsHtmlAttributes(list) { return list.map(atom => this.getAsHtmlAttribute(atom)).filter(i => i).join(" ") } buildHtml() { return `<${this.tag} src="${this.filename}" controls ${this.getAsHtmlAttributes("width height loop autoplay".split(" "))}>` } } class scrollMusicParser extends abstractMediaParser { buildHtml() { return `` } } class quickSoundParser extends scrollMusicParser { get urlAtom() { return this.getAtom(0) } get atomIndex() { return 0 } } class scrollVideoParser extends abstractMediaParser { createParserCombinator() {class widthParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get integerAtom() { return parseInt(this.getAtom(1)) } } class heightParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get integerAtom() { return parseInt(this.getAtom(1)) } } return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"width" : widthParser, "height" : heightParser}), undefined) } tag = "video" } class quickVideoParser extends scrollVideoParser { get urlAtom() { return this.getAtom(0) } get atomIndex() { return 0 } } class quickParagraphParser extends scrollParagraphParser { get isArticleContent() { return true } } class scrollStopwatchParser extends scrollParagraphParser { get cueAtom() { return this.getAtom(0) } get numberAtom() { return this.getAtomsFrom(1).map(val => parseFloat(val)) } buildHtml() { const line = this.getLine() const id = this._getUid() this.setLine(`* 0.0 `) const html = super.buildHtml() this.setLine(line) return html } } class thinColumnsParser extends abstractAftertextParser { get integerAtom() { return this.getAtomsFrom(0).map(val => parseInt(val)) } buildHtmlSnippet() { return "" } columnWidth = 35 columnGap = 20 buildHtml() { const {columnWidth, columnGap, maxColumns} = this const maxTotalWidth = maxColumns * columnWidth + (maxColumns - 1) * columnGap const stackContents = this.parent.clearSectionStack() // Starting columns always first clears the section stack. if (this.singleColumn) this.parent.sectionStack.push("
") // Single columns are self-closing after section break. return stackContents + `
` } get maxColumns() { return this.singleColumn ? 1 : parseInt(this.getAtom(1) ?? 10) } } class wideColumnsParser extends thinColumnsParser { columnWidth = 90 } class wideColumnParser extends wideColumnsParser { get singleColumn() { return true } } class mediumColumnsParser extends thinColumnsParser { columnWidth = 65 } class mediumColumnParser extends mediumColumnsParser { get singleColumn() { return true } } class thinColumnParser extends thinColumnsParser { get singleColumn() { return true } } class endColumnsParser extends abstractAftertextParser { buildHtml() { return "
" } buildHtmlSnippet() { return "" } } class scrollContainerParser extends abstractAftertextParser { get cssLengthAtom() { return this.getAtomsFrom(0) } get isHtml() { return true } get maxWidth() { return this.atoms[1] || "1200px" } buildHtmlSnippet() { return "" } tag = "div" defaultClassName = "scrollContainerParser" buildHtml() { this.parent.bodyStack.push("") return `` + super.buildHtml() } get text() { return ""} get closingTag() { return ""} } class debugSourceStackParser extends abstractAftertextParser { get sources() { const {file} = this.root const passNames = ["codeAtStart", "fusedCode", "codeAfterMacroPass"] let lastCode = "" return passNames.map(name => { let code = file[name] if (lastCode === code) code = "[Unchanged]" lastCode = file[name] return { name, code }}) } buildHtml() { return `${this.buildTxt().replace(/\` } buildTxt() { return this.sources.map((pass, index) => `Pass ${index + 1} - ${pass.name}\n========\n${pass.code}`).join("\n\n\n") } } class abstractDinkusParser extends abstractAftertextParser { get isDinkus() { return true } buildHtml() { return `
${this.dinkus}
` } defaultClass = "abstractDinkusParser" buildTxt() { return this.dinkus } get dinkus() { return this.content || this.getLine() } } class horizontalRuleParser extends abstractDinkusParser { buildHtml() { return `
` } } class scrollDinkusParser extends abstractDinkusParser { get isPopular() { return true } dinkus = "*" } class customDinkusParser extends abstractDinkusParser { } class endOfPostDinkusParser extends abstractDinkusParser { get isPopular() { return true } dinkus = "⁂" } class abstractIconButtonParser extends abstractAftertextParser { buildHtmlSnippet() { return "" } buildHtml() { return `${this.svg}` } } class downloadButtonParser extends abstractIconButtonParser { get urlAtom() { return this.getAtomsFrom(0) } get svg() { return `` } get style() { return `position:relative;` } get link() { return this.content } } class editButtonParser extends abstractIconButtonParser { get urlAtom() { return this.getAtomsFrom(0) } get svg() { return `` } get link() { return this.content || this.root.editUrl || "" } get style() { return this.parent.findParticles("editButton")[0] === this ? "right:2rem;": "position:relative;" } } class emailButtonParser extends abstractIconButtonParser { get emailAddressAtom() { return this.getAtomsFrom(0) } get svg() { return `` } get style() { return `position:relative;` } get link() { const email = this.content || this.parent.get("email") return email ? `mailto:${email}` : "" } } class homeButtonParser extends abstractIconButtonParser { get urlAtom() { return this.getAtomsFrom(0) } get svg() { return `` } get style() { return `left:2rem;` } get link() { return this.content || this.get("link") || "index.html" } } class theScrollButtonParser extends abstractIconButtonParser { get svg() { return `` } get style() { return `position:relative;` } get link() { return "https://wws.scroll.pub" } } class abstractTextLinkParser extends abstractAftertextParser { buildHtmlSnippet() { return "" } buildTxt() { return this.text } buildHtml() { return `` } } class editLinkParser extends abstractTextLinkParser { get text() { return `Edit` } get link() { return this.root.editUrl || "" } } class scrollVersionLinkParser extends abstractTextLinkParser { get link() { return `https://scroll.pub` } get text() { return `Built with Scroll v${this.root.scrollVersion}` } } class classicFormParser extends abstractAftertextParser { get cueAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtomsFrom(1) } get style() { return `` } get script() { return `` } get inputs() { return this.root.measures.filter(measure => !measure.IsComputed).map((measure, index) => { const {Name, Question, IsRequired, Type} = measure const type = Type || "text" const placeholder = Question const ucFirst = Name.substr(0, 1).toUpperCase() + Name.substr(1) // ${index ? "" : "autofocus"} let tag = "" if (Type === "textarea") tag = `` else tag = `` return `
${tag}
` }).join("\n") } buildHtml() { const {isEmail, formDestination, callToAction, subject} = this return `${this.script}${this.style}
${this.inputs}${this.footer}
` } get callToAction() { return (this.isEmail ? "Submit via email" : (this.subject || "Post")) } get isEmail() { return this.formDestination.includes("@") } get formDestination() { return this.getAtom(1) || "" } get subject() { return this.getAtomsFrom(2)?.join(" ") || "" } get footer() { return "" } } class scrollFormParser extends classicFormParser { createParserCombinator() {class placeholderParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } get cueAtom() { return this.getAtom(0) } } class valueParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } get cueAtom() { return this.getAtom(0) } } class nameParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtom(1) } } return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"placeholder" : placeholderParser, "value" : valueParser, "name" : nameParser}), undefined) } get requireOnce() { return ` ` } get copyFromExternal() { return `.codeMirror.css .scrollLibs.js .constants.js` } get placeholder() { return this.getParticle("placeholder")?.subparticlesToString() || "" } get value() { return this.getParticle("value")?.subparticlesToString() || "" } get footer() { return "" } get name() { return this.get("name") || "particles" } get parsersBundle() { const parserRegex = /^[a-zA-Z0-9_]+Parser$/gm const clone = this.root.clone() const parsers = clone.filter(line => parserRegex.test(line.getLine())) return "\n" + parsers.map(particle => { particle.prependLine("boolean suggestInAutocomplete true") return particle.toString() }).join("\n") } get inputs() { const Name = this.name return ` ` } buildHtml(buildSettings) { return this.getHtmlRequirements(buildSettings) + super.buildHtml() } } class loremIpsumParser extends abstractAftertextParser { get integerAtom() { return this.getAtomsFrom(0).map(val => parseInt(val)) } get placeholder() { return `Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.` } get originalText() { return this.placeholder.repeat(this.howMany) } get howMany() { return this.getAtom(1) ? parseInt(this.getAtom(1)) : 1 } } class nickelbackIpsumParser extends loremIpsumParser { get placeholder() { return `And one day, I’ll be at the door. And lose your wings to fall in love? To the bottom of every bottle. I’m on the ledge of the eighteenth story. Why must the blind always lead the blind?` } } class scrollModalParser extends abstractAftertextParser { get requireOnce() { return ` ` } get isHtml() { return true } buildHtml(buildSettings) { this.parent.sectionStack.push("") return this.getHtmlRequirements(buildSettings) + `
` } } class printSnippetsParser extends abstractAftertextParser { get cueAtom() { return this.getAtom(0) } get tagWithOptionalFolderAtom() { return this.getAtomsFrom(1) } makeSnippet(scrollProgram, buildSettings) { const {endSnippetIndex} = scrollProgram if (endSnippetIndex === -1) return scrollProgram.buildHtmlSnippet(buildSettings) + scrollProgram.editHtml const linkRelativeToCompileTarget = buildSettings.relativePath + scrollProgram.permalink const joinChar = "\n" const html = scrollProgram .map((subparticle, index) => (index >= endSnippetIndex ? "" : subparticle.buildHtmlSnippet ? subparticle.buildHtmlSnippet(buildSettings) : subparticle.buildHtml(buildSettings))) .filter(i => i) .join(joinChar) .trim() + `Continue reading...` return html } get files() { const thisFile = this.parent.file const files = this.root.getFilesByTags(this.content, this.has("limit") ? parseInt(this.get("limit")) : undefined).filter(file => file.file !== thisFile) // allow sortBy lastCommit Time if (this.get("sortBy") === "commitTime") { return this.root.sortBy(files, file => file.scrollProgram.lastCommitTime).reverse() } return files } buildHtml() { const alreadyRequired = this.root.alreadyRequired const snippets = this.files.map(file => { const buildSettings = {relativePath: file.relativePath, alreadyRequired } return `
${this.makeSnippet(file.file.scrollProgram, buildSettings)}
` }).join("\n\n") return `
${snippets}
` } buildTxt() { return this.files.map(file => { const {scrollProgram} = file.file const {title, date, absoluteLink} = scrollProgram const ruler = "=".repeat(title.length) // Note: I tried to print the description here but the description generating code needs work. return `${title}\n${ruler}\n${date}\n${absoluteLink}` }).join("\n\n") } } class scrollNavParser extends printSnippetsParser { createParserCombinator() {class joinParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtomsFrom(1) } get allowTrailingWhitespace() { return true } } return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"join" : joinParser}), undefined) } buildHtml() { return `` } } class printFullSnippetsParser extends printSnippetsParser { makeSnippet(scrollProgram, buildSettings) { return scrollProgram.buildHtmlSnippet(buildSettings) + scrollProgram.editHtml } } class printShortSnippetsParser extends printSnippetsParser { makeSnippet(scrollProgram, buildSettings) { const { title, permalink, description, timestamp } = scrollProgram return `
${title}
${description}...
${this.root.dayjs(timestamp * 1000).format(`MMMM D, YYYY`)}
` } } class printRelatedParser extends printSnippetsParser { buildHtml() { const alreadyRequired = this.root.alreadyRequired const list = this.files.map(fileWrapper => { const {relativePath, file} = fileWrapper const {title, permalink, year} = file.scrollProgram return `- ${title}${year ? " (" + year + ")" : ""}\n link ${relativePath + permalink}` }).join("\n") const items = this.parent.concat(list) const html = items.map(item => item.buildHtml()).join("\n") items.forEach(item => item.destroy()) return html } } class scrollNoticesParser extends abstractAftertextParser { buildHtml() { const id = this.htmlId return `` } } class abstractAssertionParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(htmlLineParser, undefined, undefined) } get codeAtom() { return this.getAtomsFrom(0) } get bindTo() { return `previous` } buildHtml() { return `` } get particleToTest() { // If the previous particle is also an assertion particle, use the one before that. return this.previous.particleToTest ? this.previous.particleToTest : this.previous } get actual() {return this.particleToTest.buildHtml()} getErrors() { const {actual, expected} = this const errors = super.getErrors() if (this.areEqual(actual, expected)) return errors return errors.concat(this.makeError(`'${actual}' did not ${this.kind} '${expected}'`)) } get expected() { return this.length ? this.subparticlesToString() : (this.content ? this.content : "") } } class assertHtmlEqualsParser extends abstractAssertionParser { get kind() { return `equal` } areEqual(actual, expected) { return actual === expected } // todo: why are we having to super here? getErrors() { return super.getErrors()} } class assertBuildIncludesParser extends abstractAssertionParser { get kind() { return `include` } areEqual(actual, expected) { return actual.includes(expected) } get actual() { return this.particleToTest.buildOutput()} getErrors() { return super.getErrors()} } class assertHtmlIncludesParser extends abstractAssertionParser { get kind() { return `include` } areEqual(actual, expected) { return actual.includes(expected) } getErrors() { return super.getErrors()} } class assertHtmlExcludesParser extends abstractAssertionParser { get kind() { return `exclude` } areEqual(actual, expected) { return !actual.includes(expected) } getErrors() { return super.getErrors()} } class assertIgnoreBelowErrorsParser extends abstractScrollParser { get bindTo() { return `next` } } class abstractPrintMetaParser extends abstractScrollParser { } class printAuthorsParser extends abstractPrintMetaParser { get stringAtom() { return this.getAtomsFrom(0) } get isPopular() { return true } buildHtml() { return this.parent.getParticle("authors")?.buildHtmlForPrint() } buildTxt() { return this.parent.getParticle("authors")?.buildTxtForPrint() } } class printDateParser extends abstractPrintMetaParser { get isPopular() { return true } buildHtml() { return `
${this.day}
` } get day() { let day = this.content || this.root.date if (!day) return "" return this.root.dayjs(day).format(`MMMM D, YYYY`) } buildTxt() { return this.day } } class printFormatLinksParser extends abstractPrintMetaParser { buildHtml() { const permalink = this.root.permalink.replace(".html", "") // hacky const particle = this.appendSibling(`HTML | TXT`, `class printDateParser\nlink ${permalink}.html HTML\nlink ${permalink}.txt TXT\nstyle text-align:center;`) const html = particle.buildHtml() particle.destroy() return html } buildTxt() { const permalink = this.root.permalink.replace(".html", "") return `HTML | TXT\n link ${permalink}.html HTML\n link ${permalink}.txt TXT` } } class abstractBuildCommandParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"//" : slashCommentParser}), undefined) } get buildCommandAtom() { return this.getAtom(0) } get filePathAtom() { return this.getAtomsFrom(1) } isTopMatter = true buildHtml() { return "" } get extension() { return this.cue.replace("build", "") } buildOutput() { return this.root.compileTo(this.extension) } get outputFileNames() { return this.content?.split(" ") || [this.root.permalink.replace(".html", "." + this.extension.toLowerCase())] } async _buildFileType(extension, options) { const {root} = this const { fileSystem, folderPath, filename, filePath, path, lodash } = root const capitalized = lodash.capitalize(extension) const buildKeyword = "build" + capitalized const {outputFileNames} = this for (let name of outputFileNames) { try { await fileSystem.writeProduct(path.join(folderPath, name), root.compileTo(capitalized)) root.log(`💾 Built ${name} from ${filename}`) } catch (err) { console.error(`Error while building '${filePath}' with extension '${extension}'`) throw err } } } } class abstractBuildOneCommandParser extends abstractBuildCommandParser { async buildOne(options) { await this._buildFileType(this.extension, options) } } class buildParsersParser extends abstractBuildOneCommandParser { } class buildCsvParser extends abstractBuildOneCommandParser { } class buildTsvParser extends abstractBuildOneCommandParser { } class buildJsonParser extends abstractBuildOneCommandParser { } class abstractBuildTwoCommandParser extends abstractBuildCommandParser { async buildTwo(options) { await this._buildFileType(this.extension, options) } } class buildCssParser extends abstractBuildTwoCommandParser { } class buildHtmlParser extends abstractBuildTwoCommandParser { get isPopular() { return true } async buildTwo(options) { await this._copyExternalFiles(options) await super.buildTwo(options) } async _copyExternalFiles(options) { if (!this.isNodeJs()) return const {root} = this const externalFilesCopied = options.externalFilesCopied || {} // If this file uses a parser that has external requirements, // copy those from external folder into the destination folder. const { parsersRequiringExternals, folderPath, fileSystem, filename, parserIdIndex, path, Disk, externalsPath } = root if (!externalFilesCopied[folderPath]) externalFilesCopied[folderPath] = {} parsersRequiringExternals.forEach(parserId => { if (externalFilesCopied[folderPath][parserId]) return if (!parserIdIndex[parserId]) return parserIdIndex[parserId].map(particle => { const externalFiles = particle.copyFromExternal.split(" ") externalFiles.forEach(name => { const newPath = path.join(folderPath, name) fileSystem.writeProduct(newPath, Disk.read(path.join(externalsPath, name))) root.log(`💾 Copied external file needed by ${filename} to ${name}`) }) }) if (parserId !== "scrollThemeParser") // todo: generalize when not to cache externalFilesCopied[folderPath][parserId] = true }) } } class buildJsParser extends abstractBuildTwoCommandParser { } class buildRssParser extends abstractBuildTwoCommandParser { } class buildTxtParser extends abstractBuildTwoCommandParser { get isPopular() { return true } } class loadConceptsParser extends abstractBuildCommandParser { get preBuildCommandAtom() { return this.getAtom(0) } get filePathAtom() { return this.getAtom(1) } async load() { const { Disk, path, importRegex } = this.root const folder = path.join(this.root.folderPath, this.getAtom(1)) const ONE_BIG_FILE = Disk.getFiles(folder).filter(file => file.endsWith(".scroll")).map(Disk.read).join("\n\n").replace(importRegex, "") this.parent.concat(ONE_BIG_FILE) //console.log(ONE_BIG_FILE) } buildHtml() { return "" } } class buildConceptsParser extends abstractBuildCommandParser { createParserCombinator() {class sortByParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get columnNameAtom() { return this.getAtom(1) } } return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"sortBy" : sortByParser}), undefined) } async buildOne() { const {root} = this const { fileSystem, folderPath, filename, path, permalink } = root const files = this.getAtomsFrom(1) if (!files.length) files.push(permalink.replace(".html", ".csv")) const sortBy = this.get("sortBy") for (let link of files) { await fileSystem.writeProduct(path.join(folderPath, link), root.compileConcepts(link, sortBy)) root.log(`💾 Built concepts in ${filename} to ${link}`) } } } class fetchParser extends abstractBuildCommandParser { get preBuildCommandAtom() { return this.getAtom(0) } get urlAtom() { return this.getAtom(1) } get url() { return this.getAtom(1) } get filename() { return this.getAtom(2) } async load() { await this.root.fetch(this.url, this.filename) } buildHtml() { return "" } } class buildMeasuresParser extends abstractBuildCommandParser { createParserCombinator() {class sortByParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get columnNameAtom() { return this.getAtom(1) } } return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"sortBy" : sortByParser}), undefined) } async buildOne() { const {root} = this const { fileSystem, folderPath, filename, path, permalink } = root const files = this.getAtomsFrom(1) if (!files.length) files.push(permalink.replace(".html", ".csv")) const sortBy = this.get("sortBy") for (let link of files) { await fileSystem.writeProduct(path.join(folderPath, link), root.compileMeasures(link, sortBy)) root.log(`💾 Built measures in ${filename} to ${link}`) } } } class buildPdfParser extends abstractBuildCommandParser { async buildTwo(options) { if (!this.isNodeJs()) return "Only works in Node currently." const {root} = this const { filename } = root const outputFile = root.filenameNoExtension + ".pdf" // relevant source code for chrome: https://github.com/chromium/chromium/blob/a56ef4a02086c6c09770446733700312c86f7623/components/headless/command_handler/headless_command_switches.cc#L22 const command = `/Applications/Google\\ Chrome.app/Contents/MacOS/Google\\ Chrome --headless --disable-gpu --no-pdf-header-footer --default-background-color=00000000 --no-pdf-background --print-to-pdf="${outputFile}" "${this.permalink}"` // console.log(`Node.js is running on architecture: ${process.arch}`) try { const output = require("child_process").execSync(command, { stdio: "ignore" }) root.log(`💾 Built ${outputFile} from ${filename}`) } catch (error) { console.error(error) } } } class abstractInlineFileParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(scrollFileAddressParser, undefined, undefined) } get filePathAtom() { return this.getAtomsFrom(0) } get htmlTag() { return `script` } get joinChar() { return `;\n\n` } get files() { const inline = this.atoms.slice(1) const children = this.map(particle => particle.cue) return inline.concat(children) } get contents() { return this.files.map(filename => this.root.readFile(filename)).join(this.joinChar) } buildHtml() { return `<${this.htmlTag}>/* ${this.files.join(" ")} */\n${this.contents}` } } class scrollInlineCssParser extends abstractInlineFileParser { get htmlTag() { return `style` } get joinChar() { return `\n\n` } buildCss() { return this.contents } } class scrollInlineJsParser extends abstractInlineFileParser { buildJs() { return this.contents } } class abstractTopLevelSingleMetaParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"//" : slashCommentParser}), undefined) } get metaCommandAtom() { return this.getAtom(0) } isTopMatter = true isSetterParser = true buildHtml() { return "" } } class testStrictParser extends abstractTopLevelSingleMetaParser { } class scrollDateParser extends abstractTopLevelSingleMetaParser { get dateAtom() { return this.getAtomsFrom(0) } get isPopular() { return true } } class abstractUrlSettingParser extends abstractTopLevelSingleMetaParser { get metaCommandAtom() { return this.getAtom(0) } get urlAtom() { return this.getAtom(1) } } class editBaseUrlParser extends abstractUrlSettingParser { } class canonicalUrlParser extends abstractUrlSettingParser { } class openGraphImageParser extends abstractUrlSettingParser { } class baseUrlParser extends abstractUrlSettingParser { } class rssFeedUrlParser extends abstractUrlSettingParser { } class editUrlParser extends abstractTopLevelSingleMetaParser { get urlAtom() { return this.getAtomsFrom(0) } } class siteOwnerEmailParser extends abstractTopLevelSingleMetaParser { get metaCommandAtom() { return this.getAtom(0) } get emailAddressAtom() { return this.getAtom(1) } } class faviconParser extends abstractTopLevelSingleMetaParser { get stringAtom() { return this.getAtomsFrom(0) } } class importOnlyParser extends abstractTopLevelSingleMetaParser { get preBuildCommandAtom() { return this.getAtom(0) } buildHtml() { return "" } } class inlineMarkupsParser extends abstractTopLevelSingleMetaParser { } class htmlLangParser extends abstractTopLevelSingleMetaParser { get metaCommandAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtom(1) } } class openGraphDescriptionParser extends abstractTopLevelSingleMetaParser { get stringAtom() { return this.getAtomsFrom(0) } } class permalinkParser extends abstractTopLevelSingleMetaParser { get metaCommandAtom() { return this.getAtom(0) } get permalinkAtom() { return this.getAtom(1) } } class scrollTagsParser extends abstractTopLevelSingleMetaParser { get tagAtom() { return this.getAtomsFrom(0) } } class scrollTitleParser extends abstractTopLevelSingleMetaParser { get stringAtom() { return this.getAtomsFrom(0) } get isPopular() { return true } } class scrollLinkTitleParser extends abstractTopLevelSingleMetaParser { get stringAtom() { return this.getAtomsFrom(0) } } class scrollChatParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(chatLineParser, undefined, undefined) } buildHtml() { return this.map((line, index) => line.asString ? `
${line.asString}
` : "").join("") } buildTxt() { return this.subparticlesToString() } } class abstractDatatableProviderParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"#" : h1Parser, "##" : h2Parser, "?" : scrollQuestionParser, "heatrix" : heatrixParser, "heatrixAdvanced" : heatrixAdvancedParser, "map" : mapParser, "scatterplot" : plotScatterplotParser, "barchart" : plotBarchartParser, "linechart" : plotLineChartParser, "sparkline" : sparklineParser, "printColumn" : printColumnParser, "printTable" : printTableParser, "//" : slashCommentParser, "br" : scrollBrParser, "splitYear" : scrollSplitYearParser, "splitDayName" : scrollSplitDayNameParser, "splitMonthName" : scrollSplitMonthNameParser, "splitMonth" : scrollSplitMonthParser, "splitDayOfMonth" : scrollSplitDayOfMonthParser, "splitDay" : scrollSplitDayOfWeekParser, "parseDate" : scrollParseDateParser, "groupBy" : scrollGroupByParser, "where" : scrollWhereParser, "select" : scrollSelectParser, "reverse" : scrollReverseParser, "compose" : scrollComposeParser, "compute" : scrollComputeParser, "eval" : scrollEvalParser, "rank" : scrollRankParser, "links" : scrollLinksParser, "limit" : scrollLimitParser, "shuffle" : scrollShuffleParser, "transpose" : scrollTransposeParser, "impute" : scrollImputeParser, "orderBy" : scrollOrderByParser, "assertRowCount" : assertRowCountParser, "rename" : scrollRenameParser, "summarize" : scrollSummarizeParser, "data" : scrollTableDataParser, "delimiter" : scrollTableDelimiterParser}), [{regex: /^ particle.isTableVisualization || particle.isHeader || particle.isHtml) } buildHtml(buildSettings) { return this.visualizations.map(particle => particle.buildHtml(buildSettings)) .join("\n") .trim() } buildTxt() { return this.visualizations.map(particle => particle.buildTxt()) .join("\n") .trim() } _coreTable get coreTable() { if (this._coreTable) return this._coreTable const {delimiter, delimitedData} = this return [] } get columnNames() { return [] } } class scrollTableParser extends abstractDatatableProviderParser { get filePathAtom() { return this.getAtomsFrom(0) } get atomIndex() { return 1 } get delimiter() { const {filename} = this let delimiter = "" if (filename) { const extension = filename.split('?')[0].split(".").pop() if (extension === "json") delimiter = "json" if (extension === "particles") delimiter = "particles" if (extension === "csv") delimiter = "," if (extension === "tsv") delimiter = "\t" if (extension === "ssv") delimiter = " " if (extension === "psv") delimiter = "|" } if (this.get("delimiter")) delimiter = this.get("delimiter") else if (!delimiter) { const header = this.delimitedData.split("\n")[0] if (header.includes("\t")) delimiter = "\t" else if (header.includes(",")) delimiter = "," else delimiter = " " } return delimiter } get filename() { return this.getAtom(this.atomIndex) } get coreTable() { if (this._coreTable) return this._coreTable const {delimiter, delimitedData} = this if (delimiter === "json") { const obj = JSON.parse(delimitedData) let rows = [] // Optimal case: Array of objects if (Array.isArray(obj)) { rows = obj} else if (!Array.isArray(obj) && typeof obj === "object") { // Case 2: Nested array under a key const arrayKey = Object.keys(obj).find(key => Array.isArray(obj[key])) if (arrayKey) rows = obj[arrayKey] } // Case 3: Array of primitive values else if (Array.isArray(obj) && obj.length && typeof obj[0] !== "object") { rows = obj.map(value => ({ value })) } this._columnNames = rows.length ? Object.keys(rows[0]) : [] this._coreTable = rows return rows } else if (delimiter === "particles") { const d3lib = this.root.d3 this._coreTable = d3lib.dsvFormat(",").parse(new Particle(delimitedData).asCsv, d3lib.autoType) } else { const d3lib = this.root.d3 this._coreTable = d3lib.dsvFormat(delimiter).parse(delimitedData, d3lib.autoType) } this._columnNames = this._coreTable.columns delete this._coreTable.columns return this._coreTable } get columnNames() { // init coreTable to set columns const coreTable = this.coreTable return this._columnNames } async load() { if (this.filename) await this.root.fetch(this.filename) } get fileContent() { return this.root.readSyncFromFileOrUrl(this.filename) } get delimitedData() { // json csv tsv if (this.filename) return this.fileContent const dataParticle = this.getParticle("data") if (dataParticle) return dataParticle.subparticlesToString() // if not dataparticle and no filename, check [permalink].csv if (this.isNodeJs()) return this.root.readFile(this.root.permalink.replace(".html", "") + ".csv") return "" } } class clocParser extends scrollTableParser { get copyFromExternal() { return `.clocLangs.txt` } delimiter = "," get delimitedData() { const { execSync } = require("child_process") const results = execSync(this.command).toString().trim() const csv = results.split("\n\n").pop().replace(/,\"github\.com\/AlDanial.+/, "") // cleanup output return csv } get command(){ return `cloc --vcs git . --csv --read-lang-def=.clocLangs.txt ${this.content || ""}` } } class scrollDependenciesParser extends scrollTableParser { delimiter = "," get delimitedData() { return `file\n` + this.root.dependencies.join("\n") } } class scrollDiskParser extends scrollTableParser { delimiter = "json" get delimitedData() { return this.isNodeJs() ? this.delimitedDataNodeJs : "" } get delimitedDataNodeJs() { const fs = require('fs'); const path = require('path'); const {folderPath} = this.root const folder = this.content ? path.join(folderPath, this.content) : folderPath function getDirectoryContents(dirPath) { const directoryContents = []; const items = fs.readdirSync(dirPath); items.forEach((item) => { const itemPath = path.join(dirPath, item); const stats = fs.statSync(itemPath); directoryContents.push({ name: item, type: stats.isDirectory() ? 'directory' : 'file', size: stats.size, lastModified: stats.mtime }); }); return directoryContents; } return JSON.stringify(getDirectoryContents(folder)) } } class scrollIrisParser extends scrollTableParser { delimitedData = this.constructor.iris } class vegaSampleDataParser extends scrollTableParser { get cueAtom() { return this.getAtom(0) } get vegaDataSetAtom() { return this.getAtom(1) } get filename() { return "https://ohayo.scroll.pub/ohayo/packages/vega/datasets/" + this.content } } class quickTableParser extends scrollTableParser { get urlAtom() { return this.getAtom(0) } get atomIndex() { return 0 } get dependencies() { return [this.cue]} } class scrollConceptsParser extends abstractDatatableProviderParser { get cueAtom() { return this.getAtom(0) } get coreTable() { return this.root.concepts } get columnNames() { return this.root.measures.map(col => col.Name) } } class abstractPostsParser extends abstractDatatableProviderParser { get cueAtom() { return this.getAtom(0) } get tagWithOptionalFolderAtom() { return this.getAtomsFrom(1) } async load() { const dependsOn = this.tags.map(tag => this.root.parseNestedTag(tag)).filter(i => i).map(i => i.folderPath) const {fileSystem} = this.root for (let folderPath of dependsOn) { // console.log(`${this.root.filePath} is loading: ${folderPath} in id '${fileSystem.fusionId}'`) await fileSystem.getLoadedFilesInFolder(folderPath, ".scroll") } } get tags() { return this.content?.split(" ") || [] } get files() { const thisFile = this.root.file // todo: we can include this file, but just not run asTxt const files = this.root.getFilesByTags(this.tags).filter(file => file.file !== thisFile) return files } get coreTable() { if (this._coreTable) return this._coreTable this._coreTable = this.files.map(file => this.postToRow(file)) return this._coreTable } postToRow(file) { const {relativePath} = file const {scrollProgram} = file.file const {title, permalink, asTxt, date, wordCount, minutes} = scrollProgram const text = asTxt.replace(/(\t|\n)/g, " ").replace(/ file.file.scrollProgram) const { title, baseUrl, description } = this.root return ` ${title} ${baseUrl} ${description} ${dayjs().format("ddd, DD MMM YYYY HH:mm:ss ZZ")} en-us ${scrollPrograms.map(program => program.toRss()).join("\n")} ` } buildTxt() { return this.buildRss() } } class printSourceParser extends printFeedParser { buildHtml() { const files = this.root.getFilesByTags(this.content).map(file => file.file) return `${files.map(file => file.filePath + "\n " + file.codeAtStart.replace(/\n/g, "\n ") ).join("\n")}` } } class printSiteMapParser extends abstractPostsParser { buildHtml() { const { baseUrl } = this.root return this.files.map(file => baseUrl + file.relativePath + file.file.scrollProgram.permalink).join("\n") } buildTxt() { return this.buildHtml() } get dependencies() { return this.files} } class codeParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(lineOfCodeParser, undefined, undefined) } get isPopular() { return true } buildHtml() { return `${this.code.replace(/\` } buildTxt() { return "```\n" + this.code + "\n```" } get code() { return this.subparticlesToString() } } class codeWithHeaderParser extends codeParser { get stringAtom() { return this.getAtomsFrom(0) } buildHtml() { return `
${this.content}
${super.buildHtml()}
` } buildTxt() { return "```" + this.content + "\n" + this.code + "\n```" } } class codeFromFileParser extends codeWithHeaderParser { get cueAtom() { return this.getAtom(0) } get urlAtom() { return this.getAtom(1) } get code() { return this.root.readSyncFromFileOrUrl(this.content) } } class codeWithLanguageParser extends codeParser { } class debugParsersParser extends codeParser { buildParsers() { return this.code} get code() { let code = new Particle(this.root.definition.toString()) // Remove comments code.filter((line) => line.getLine().startsWith("//")).forEach((particle) => particle.destroy()) // Remove blank lines code = code.toString().replace(/^\n/gm, "") return code } } class abstractScrollWithRequirementsParser extends abstractScrollParser { buildHtml(buildSettings) { return this.getHtmlRequirements(buildSettings) + this.buildInstance() } } class copyButtonsParser extends abstractScrollWithRequirementsParser { get requireOnce() { return `` } buildInstance() { return "" } } class abstractTableVisualizationParser extends abstractScrollWithRequirementsParser { get isTableVisualization() { return true } get columnNames() { return this.parent.columnNames } } class heatrixParser extends abstractTableVisualizationParser { createParserCombinator() { return new Particle.ParserCombinator(heatrixCatchAllParser, undefined, undefined) } buildHtml() { // A hacky but simple way to do this for now. const advanced = new Particle("heatrixAdvanced") advanced.appendLineAndSubparticles("table", "\n " + this.tableData.replace(/\n/g, "\n ")) const particle = this.appendSibling("heatrixAdvanced", advanced.subparticlesToString()) const html = particle.buildHtml() particle.destroy() return html } get tableData() { const {coreTable} = this.parent if (!coreTable) return this.subparticlesToString() let table = new Particle(coreTable).asSsv if (this.parent.cue === "transpose") { // drop first line after transpose const lines = table.split("\n") lines.shift() table = lines.join("\n") } // detect years and make strings const lines = table.split("\n") const yearLine = / \d{4}(\s+\d{4})+$/ if (yearLine.test(lines[0])) { lines[0] = lines[0].replace(/ /g, " '") table = lines.join("\n") } return table } } class heatrixAdvancedParser extends abstractTableVisualizationParser { createParserCombinator() { return new Particle.ParserCombinator(heatrixCatchAllParser, undefined, undefined) } buildHtml() { class Heatrix { static HeatrixId = 0 uid = Heatrix.HeatrixId++ constructor(program) { const isDirective = atom => /^(f|l|w|h)\d+$/.test(atom) || atom === "right" || atom === "left" || atom.startsWith("http://") || atom.startsWith("https://") || atom.endsWith(".html") const particle = new Particle(program) this.program = particle const generateColorBinningString = (data, colors) => { const sortedData = [...data].sort((a, b) => a - b); const n = sortedData.length; const numBins = colors.length; // Calculate the indices for each quantile const indices = []; for (let i = 1; i < numBins; i++) { indices.push(Math.floor((i / numBins) * n)); } // Get the quantile values and round them const thresholds = indices.map(index => Math.round(sortedData[index])); // Generate the string let result = ''; colors.forEach((color, index) => { const threshold = index === colors.length - 1 ? thresholds[index - 1] * 2 : thresholds[index]; result += `${color} ${threshold}\n`; }); return result.trim(); } const buildScale = (table) => { const numbers = table.split("\n").map(line => line.split(" ")).flat().filter(atom => !isDirective(atom)).map(atom => parseFloat(atom)).filter(number => !isNaN(number)) const colors = ['#ebedf0', '#c7e9c0', '#a1d99b', '#74c476', '#41ab5d', '#238b45', '#005a32']; numbers.unshift(0) return generateColorBinningString(numbers, colors); } const table = particle.getParticle("table").subparticlesToString() const scale = particle.getParticle("scale")?.subparticlesToString() || buildScale(table) const thresholds = [] const colors = [] scale.split("\n").map((line) => { const parts = line.split(" ") thresholds.push(parseFloat(parts[1])) colors.push(parts[0]) }) const colorCount = colors.length const colorFunction = (value) => { if (isNaN(value)) return "" // #ebedf0 for (let index = 0; index < colorCount; index++) { const threshold = thresholds[index] if (value <= threshold) return colors[index] } return colors[colorCount - 1] } const directiveDelimiter = ";" const getSize = (directives, letter) => directives .filter((directive) => directive.startsWith(letter)) .map((dir) => dir.replace(letter, "") + "px")[0] ?? "" this.table = table.split("\n").map((line) => line .trimEnd() .split(" ") .map((atom) => { const atoms = atom.split(directiveDelimiter).filter((atom) => !isDirective(atom)).join("") const directivesInThisAtom = atom .split(directiveDelimiter) .filter(isDirective) const value = parseFloat(atoms) const label = atoms.includes("'") ? atoms.split("'")[1] : atoms const alignment = directivesInThisAtom.includes("right") ? "right" : directivesInThisAtom.includes("left") ? "left" : "" const color = colorFunction(value) const width = getSize(directivesInThisAtom, "w") const height = getSize(directivesInThisAtom, "h") const fontSize = getSize(directivesInThisAtom, "f") const lineHeight = getSize(directivesInThisAtom, "l") || height const link = directivesInThisAtom.filter(i => i.startsWith("http") || i.endsWith(".html"))[0] const style = { "background-color": color, width, height, "font-size": fontSize, "line-height": lineHeight, "text-align": alignment, } Object.keys(style).filter(key => !style[key]).forEach((key) => delete style[key]) return { value, label, style, link, } }) ) } get html() { const { program } = this const cssId = `#heatrix${this.uid}` const defaultWidth = "40px" const defaultHeight = "40px" const fontSize = "10px" const lineHeight = defaultHeight const style = `` const firstRow = this.table[0] return ( `
${style}` + this.table .map((row, rowIndex) => { if (!rowIndex) return "" const rowStyle = row[0].style return `
${row .map((atom, columnIndex) => { if (!columnIndex) return "" const columnStyle = firstRow[columnIndex]?.style || {} let { value, label, style, link } = atom const extendedStyle = Object.assign( {}, rowStyle, columnStyle, style ) const inlineStyle = Object.keys(extendedStyle) .map((key) => `${key}:${extendedStyle[key]};`) .join("") let valueClass = value ? " valueAtom" : "" const href = link ? ` href="${link}"` : "" return `` }) .join("")}
` }) .join("\n") + "
" ).replace(/\n/g, "") } } return new Heatrix(this.subparticlesToString().trim()).html } } class mapParser extends abstractTableVisualizationParser { createParserCombinator() {class latParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get floatAtom() { return parseFloat(this.getAtom(1)) } } class longParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get floatAtom() { return parseFloat(this.getAtom(1)) } } class tilesParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get tileOptionAtom() { return this.getAtom(1) } } class zoomParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get integerAtom() { return parseInt(this.getAtom(1)) } } class geolocateParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } } class radiusParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get floatAtom() { return parseFloat(this.getAtom(1)) } } class fillOpacityParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get floatAtom() { return parseFloat(this.getAtom(1)) } } class fillColorParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get colorAtom() { return this.getAtom(1) } } class colorParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get colorAtom() { return this.getAtom(1) } } class heightParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get floatAtom() { return parseFloat(this.getAtom(1)) } } class hoverParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get colorAtom() { return this.getAtomsFrom(1) } } return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"lat" : latParser, "long" : longParser, "tiles" : tilesParser, "zoom" : zoomParser, "geolocate" : geolocateParser, "radius" : radiusParser, "fillOpacity" : fillOpacityParser, "fillColor" : fillColorParser, "color" : colorParser, "height" : heightParser, "hover" : hoverParser}), undefined) } get requireOnce() { return ` ` } get copyFromExternal() { return `.leaflet.css .leaflet.js .scrollLibs.js` } buildInstance() { const height = this.get("height") || 500 const id = this._getUid() const obj = this.toObject() const template = {} const style = height !== "full" ? `height: ${height}px;` : `height: 100%; position: fixed; z-index: -1; left: 0; top: 0; width: 100%;` const strs = ["color", "fillColor"] const nums = ["radius", "fillOpacity"] strs.filter(i => obj[i]).forEach(i => template[i] = obj[i]) nums.filter(i => obj[i]).forEach(i => template[i] = parseFloat(obj[i])) const mapId = `map${id}` return `
` } } class abstractPlotParser extends abstractTableVisualizationParser { createParserCombinator() {class widthParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get integerAtom() { return parseInt(this.getAtom(1)) } } class heightParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get integerAtom() { return parseInt(this.getAtom(1)) } } class titleParser extends abstractPlotLabelParser { } class subtitleParser extends abstractPlotLabelParser { } class captionParser extends abstractPlotLabelParser { } return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"radius" : scrollRadiusParser, "symbol" : scrollSymbolParser, "fill" : scrollFillParser, "stroke" : scrollStrokeParser, "label" : scrollLabelParser, "sort" : scrollSortParser, "x" : scrollXParser, "y" : scrollYParser, "width" : widthParser, "height" : heightParser, "title" : titleParser, "subtitle" : subtitleParser, "caption" : captionParser}), undefined) } get requireOnce() { return ` ` } get copyFromExternal() { return `.d3.js .plot.js` } buildInstance() { const id = "plot" + this._getUid() return `
` } get sortExpression() { const sort = this.get("sort") if (!sort) return "" let sort_expr = "" if (sort.startsWith("-")) { // Sort by a value descending const sortCol = sort.slice(1) sort_expr = `, sort: {x: "y", reverse: true}` } else if (sort.includes(" ")) { // Fixed order specified const order = sort.split(" ") sort_expr = `, sort: {x: (a,b) => { const order = ${JSON.stringify(order)}; return order.indexOf(a) - order.indexOf(b) }}` } else if (sort === "asc") { sort_expr = `, sort: {x: "x"}` } else if (sort === "desc") { sort_expr = `, sort: {x: "x", reverse: true}` } return sort_expr } get marks() { // just for testing purposes return `Plot.rectY({length: 10000}, Plot.binX({y: "count"}, {x: d3.randomNormal()}))` } get dataCode() { const {coreTable} = this.parent return `d3.csvParse(\`${new Particle(coreTable).asCsv}\`, d3.autoType)` } get plotOptions() { return `{ title: "${this.get("title") || ""}", subtitle: "${this.get("subtitle") || ""}", caption: "${this.get("caption") || ""}", symbol: {legend: ${this.has("symbol")}}, color: {legend: ${this.has("fill") || this.has("stroke")}}, grid: ${this.get("grid") !== "false"}, marks: [${this.marks}], width: ${this.get("width") || 640}, height: ${this.get("height") || 400}, }` } } class plotScatterplotParser extends abstractPlotParser { get marks() { const x = this.get("x") const y = this.get("y") const text = this.get("label") return `Plot.dot(data, { x: get("${x}", 0), y: get("${y}", 1), r: get("${this.get("radius")}"), fill: get("${this.get("fill")}"), tip: true${this.sortExpression}, symbol: get("${this.get("symbol")}")} ), Plot.text(data, {x: get("${x}",0), y: get("${y}", 1), text: "${text}", dy: -6, lineAnchor: "bottom"})` } } class plotBarchartParser extends abstractPlotParser { get marks() { const x = this.get("x") const y = this.get("y") const text = this.get("label") const fill = this.get("fill") return `Plot.barY(data, { x: get("${x}", 0), y: get("${y}", 1), fill: get("${fill}"), tip: true${this.sortExpression} }), Plot.ruleY([0])` } } class plotLineChartParser extends abstractPlotParser { get marks() { const x = this.get("x") const y = this.get("y") const stroke = this.get("stroke") || "steelblue" const strokeWidth = this.get("strokeWidth") || 2 const strokeLinecap = this.get("strokeLinecap") || "round" const fill = this.get("fill") return `Plot.line(data, { x: get("${x}", 0), y: get("${y}", 1), stroke: "${stroke}", fill: get("${fill}"), strokeWidth: ${strokeWidth}, strokeLinecap: "${strokeLinecap}"${this.sortExpression} })` } } class sparklineParser extends abstractTableVisualizationParser { createParserCombinator() { return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"y" : scrollYParser}), undefined) } get numberAtom() { return this.getAtomsFrom(0).map(val => parseFloat(val)) } get requireOnce() { return `` } get copyFromExternal() { return `.sparkline.js` } buildInstance() { const id = "spark" + this._getUid() const {columnValues} = this const start = this.has("start") ? parseInt(this.get("start")) : 0 const width = this.get("width") || 100 const height = this.get("height") || 30 const lineColor = this.get("color") || "black" return `` } get columnValues() { if (this.content) return this.content.split(" ").map(str => parseFloat(str)) const {coreTable} = this.parent if (coreTable) { const columnName = this.get("y") || Object.keys(coreTable[0]).find(key => typeof coreTable[0][key] === 'number') return coreTable.map(row => row[columnName]) } } } class printColumnParser extends abstractTableVisualizationParser { createParserCombinator() {class joinParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtomsFrom(1) } get allowTrailingWhitespace() { return true } } return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"join" : joinParser}), undefined) } get columnNameAtom() { return this.getAtomsFrom(0) } buildHtml() { return this.columnValues.join(this.join) } buildTxt() { return this.columnValues.join(this.join) } get join() { return this.get("join") || "\n" } get columnName() { return this.atoms[1] } get columnValues() { return this.parent.coreTable.map(row => row[this.columnName]) } } class printTableParser extends abstractTableVisualizationParser { get tableHeader() { return this.columns.filter(col => !col.isLink).map(column => `${column.name}\n`) } get columnNames() { return this.parent.columnNames } buildJson() { return JSON.stringify(this.coreTable, undefined, 2) } buildCsv() { return new Particle(this.coreTable).asCsv } buildTsv() { return new Particle(this.coreTable).asTsv } get columns() { const {columnNames} = this return columnNames.map((name, index) => { const isLink = name.endsWith("Link") const linkIndex = columnNames.indexOf(name + "Link") return { name, isLink, linkIndex } }) } toRow(row) { const {columns} = this const atoms = columns.map(col => row[col.name]) let str = "" let column = 0 const columnCount = columns.length while (column < columnCount) { const col = columns[column] column++ const content = ((columnCount === column ? atoms.slice(columnCount - 1).join(" ") : atoms[column - 1]) ?? "").toString() if (col.isLink) continue const isTimestamp = col.name.toLowerCase().includes("time") && /^\d{10}(\d{3})?$/.test(content) const text = isTimestamp ? new Date(parseInt(content.length === 10 ? content * 1000 : content)).toLocaleString() : content let tagged = text const link = atoms[col.linkIndex] const isUrl = content.match(/^https?\:[^ ]+$/) if (col.linkIndex > -1 && link) tagged = `${text}` else if (col.name.endsWith("Url")) tagged = `${col.name.replace("Url", "")}` else if (isUrl) tagged = `${text}` str += `${tagged}\n` } return str } get coreTable() { return this.parent.coreTable } get tableBody() { return this.coreTable .map(row => `${this.toRow(row)}`) .join("\n") } buildHtml() { return `${this.tableHeader.join("\n")}${this.tableBody}
` } buildTxt() { return this.parent.delimitedData || new Particle(this.coreTable).asCsv } } class katexParser extends abstractScrollWithRequirementsParser { createParserCombinator() { return new Particle.ParserCombinator(lineOfCodeParser, undefined, undefined) } get codeAtom() { return this.getAtomsFrom(0) } get requireOnce() { return ` ` } get copyFromExternal() { return `.katex.min.css .katex.min.js` } buildInstance() { const id = this._getUid() const content = this.content === undefined ? "" : this.content return `
${content + this.subparticlesToString()}
` } buildTxt() { return ( this.content ? this.content : "" )+ this.subparticlesToString() } } class helpfulNotFoundParser extends abstractScrollWithRequirementsParser { get filePathAtom() { return this.getAtomsFrom(0) } get copyFromExternal() { return `.helpfulNotFound.js` } buildInstance() { return `

` } } class slideshowParser extends abstractScrollWithRequirementsParser { get copyFromExternal() { return `.jquery-3.7.1.min.js .slideshow.js` } buildHtml() { return `
` } } class tableSearchParser extends abstractScrollWithRequirementsParser { get requireOnce() { return ` ` } get copyFromExternal() { return `.jquery-3.7.1.min.js .datatables.css .dayjs.min.js .datatables.js .tableSearch.js` } buildInstance() { return "" } } class abstractCommentParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } get commentAtom() { return this.getAtom(0) } get commentAtom() { return this.getAtomsFrom(1) } get bindTo() { return `next` } buildHtml() { return `` } } class commentParser extends abstractCommentParser { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } } class counterpointParser extends commentParser { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } } class slashCommentParser extends abstractCommentParser { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } get isPopular() { return true } } class thanksToParser extends abstractCommentParser { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } } class scrollClearStackParser extends abstractScrollParser { get isHtml() { return true } buildHtmlSnippet() { return "" } buildHtml() { return this.root.clearBodyStack().trim() } } class cssParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(cssLineParser, undefined, undefined) } get cssAnyAtom() { return this.getAtomsFrom(0) } buildHtml() { return `` } get css() { return this.content ?? this.subparticlesToString() } buildCss() { return this.css } } class scrollBackgroundColorParser extends abstractScrollParser { get cssAnyAtom() { return this.getAtomsFrom(0) } buildHtml() { return `` } } class scrollFontColorParser extends abstractScrollParser { get cssAnyAtom() { return this.getAtomsFrom(0) } buildHtml() { return `` } } class scrollFontParser extends abstractScrollParser { get cueAtom() { return this.getAtom(0) } get fontFamilyAtom() { return this.getAtom(1) } get cssAnyAtom() { return this.getAtomsFrom(2) } buildHtml() { const font = this.content === "Slim" ? "Helvetica Neue; font-weight:100;" : this.content return `` } } class abstractQuickIncludeParser extends abstractScrollParser { get urlAtom() { return this.getAtom(0) } get dependencies() { return [this.filename]} get filename() { return this.getAtom(0) } } class quickCssParser extends abstractQuickIncludeParser { get urlAtom() { return this.getAtom(0) } buildHtml() { return `` } buildHtmlSnippet() { return "" } } class quickIncludeHtmlParser extends abstractQuickIncludeParser { get urlAtom() { return this.getAtom(0) } buildHtml() { return this.root.readFile(this.filename) } } class quickScriptParser extends abstractQuickIncludeParser { get urlAtom() { return this.getAtom(0) } buildHtml() { return `` } buildHtmlSnippet() { return "" } } class scrollDashboardParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(lineOfCodeParser, undefined, undefined) } get tableBody() { const items = this.topDownArray let str = "" for (let i = 0; i < items.length; i = i + 3) { str += this.makeRow(items.slice(i, i + 3)) } return str } makeRow(items) { return `` + items.map(particle => `${particle.cue}${particle.content}`).join("\n") + `\n` } buildHtml() { return `${this.tableBody}
` } buildTxt() { return this.subparticlesToString() } } class belowAsCodeParser extends abstractScrollParser { get integerAtom() { return this.getAtomsFrom(0).map(val => parseInt(val)) } get bindTo() { return `next` } method = "next" get selectedParticles() { const { method } = this let code = "" let particles = [] let next = this[method] let {howMany} = this while (howMany) { particles.push(next) next = next[method] howMany-- } if (this.reverse) particles.reverse() return particles } get code() { return this.selectedParticles.map(particle => particle.asString).join("\n") } reverse = false buildHtml() { return `${this.code.replace(/\` } get howMany() { let howMany = parseInt(this.getAtom(1)) if (!howMany || isNaN(howMany)) howMany = 1 return howMany } } class debugBelowParser extends belowAsCodeParser { get copyFromExternal() { return `.debug.css` } get code() { const mapFn = particle => { const atomTypes = particle.lineAtomTypes.split(" ") return `
${particle.constructor.name}${particle.atoms.map((atom, index) => `${atom}${atomTypes[index]}`).join(" ")}${(particle.length ? `
` + particle.map(mapFn).join("
") + `
` : "")}
`} return this.selectedParticles.map(mapFn).join("
") } buildHtml() { return `` + this.code } buildTxt() { const mapFn = particle => { const atomTypes = particle.lineAtomTypes.split(" ") return `${particle.constructor.name} ${particle.atoms.map((atom, index) => `${atomTypes[index]}:${atom}`).join(" ")}${(particle.length ? `\n ` + particle.map(mapFn).join("\n") + `` : "")}`} return this.selectedParticles.map(mapFn).join("\n") } buildParsers() {return this.buildTxt()} } class debugAboveParser extends debugBelowParser { get bindTo() { return `previous` } method = "previous" reverse = true } class debugAllParser extends debugBelowParser { get selectedParticles() { return this.root.getSubparticles()} } class belowAsCodeUntilParser extends belowAsCodeParser { get codeAtom() { return this.getAtomsFrom(0) } get howMany() { let howMany = 1 const query = this.content let particle = this.next while (particle !== this) { if (particle.getLine().startsWith(query)) return howMany particle = particle.next howMany++ } return howMany } } class aboveAsCodeParser extends belowAsCodeParser { get bindTo() { return `previous` } method = "previous" reverse = true } class belowAsHtmlParser extends belowAsCodeParser { get code() { return this.selectedParticles.filter(p => p.buildHtml).map(p => p.buildHtml()).join("\n") } } class aboveAsHtmlParser extends belowAsHtmlParser { method = "previous" reverse = true } class scrollDefParser extends abstractScrollParser { get stringAtom() { return this.getAtomsFrom(0) } buildParsers(index) { const idStuff = index ? "" : `boolean isMeasure true boolean isMeasureRequired true boolean isConceptDelimiter true` const description = this.content const cue = this.cue.replace("Def", "") const sortIndex = 1 + index/10 return `${cue}DefParser cue ${cue} extends abstractStringMeasureParser description ${description} float sortIndex ${sortIndex} ${idStuff}`.trim() } } class hakonParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(hakonContentParser, undefined, undefined) } buildHtml() { return `` } get css() { const {hakonParser} = this.root return new hakonParser(this.subparticlesToString()).compile() } buildCss() { return this.css } } class hamlParser extends abstractScrollParser { get urlAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtomsFrom(1) } get tag() { return this.atoms[0].split(/[#\.]/).shift().replace("%", "") } get htmlId() { const idMatch = this.atoms[0].match(/#([\w-]+)/) return idMatch ? idMatch[1] : "" } get htmlClasses() { return this.atoms[0].match(/\.([\w-]+)/g)?.map(cls => cls.slice(1)) || []; } buildHtml() { const {htmlId, htmlClasses, content, tag} = this this.parent.sectionStack.unshift(``) const attrs = [htmlId ? ' id="' + htmlId + '"' : "", htmlClasses.length ? ' class="' + htmlClasses.join(" ") + '"' : ""].join(" ").trim() return `<${tag}${attrs ? " " + attrs : ""}>${content || ""}` } buildTxt() { return this.content } } class hamlTagParser extends hamlParser { } class abstractHtmlParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(htmlLineParser, undefined, undefined) } get htmlAnyAtom() { return this.getAtomsFrom(0) } buildHtml() { return `${this.content ?? ""}${this.subparticlesToString()}` } buildTxt() { return "" } } class htmlParser extends abstractHtmlParser { } class htmlInlineParser extends abstractHtmlParser { get htmlAnyAtom() { return this.getAtom(0) } get isPopular() { return true } get isHtml() { return true } buildHtml() { return `${this.getLine() ?? ""}${this.subparticlesToString()}` } } class scrollBrParser extends abstractScrollParser { get integerAtom() { return this.getAtomsFrom(0).map(val => parseInt(val)) } get isHtml() { return true } buildHtml() { return `
`.repeat(parseInt(this.getAtom(1) || 1)) } } class iframesParser extends abstractScrollParser { get urlAtom() { return this.getAtomsFrom(0) } buildHtml() { return this.atoms.slice(1).map(url => ``).join("\n") } } class abstractCaptionedParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"caption" : captionAftertextParser, "//" : slashCommentParser}), undefined) } get cueAtom() { return this.getAtom(0) } get urlAtom() { return this.getAtom(1) } buildHtml(buildSettings) { const caption = this.getParticle("caption") const captionFig = caption ? `
${caption.buildHtml()}
` : "" const {figureWidth} = this const widthStyle = figureWidth ? `width:${figureWidth}px; margin: auto;` : "" const float = this.has("float") ? `margin: 20px; float: ${this.get("float")};` : "" return `
${this.getFigureContent(buildSettings)}${captionFig}
` } get figureWidth() { return this.get("width") } } class scrollImageParser extends abstractCaptionedParser { createParserCombinator() { return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"id" : aftertextIdParser, "class" : classMarkupParser, "classes" : classesMarkupParser, "hoverNote" : hoverNoteParser, "link" : scrollLinkParser, "click" : scrollClickParser, "email" : emailLinkParser, "target" : linkTargetParser, "openGraph" : openGraphParser}), [{regex: /^https?\:/, parser: quickLinkParser},{regex: /^[^\s]+\.(html|htm)/, parser: quickRelativeLinkParser}]) } get atomIndex() { return 1 } get isPopular() { return true } get dimensions() { const width = this.get("width") const height = this.get("height") if (width || height) return {width, height} if (!this.isNodeJs()) return {} const src = this.filename // If its a local image, get the dimensions and put them in the HTML // to avoid flicker if (src.startsWith("http:") || src.startsWith("https:")) return {} if (this._dimensions) return this._dimensions try { const sizeOf = require("image-size") const path = require("path") const fullImagePath = path.join(this.root.folderPath, src) this._dimensions = sizeOf(fullImagePath) return this._dimensions } catch (err) { console.error(err) } return {} } get figureWidth() { return this.dimensions.width } get filename() { return this.getAtom(this.atomIndex) } get dependencies() { return [this.filename]} getFigureContent(buildSettings) { const linkRelativeToCompileTarget = (buildSettings ? (buildSettings.relativePath ?? "") : "") + this.filename const {width, height} = this.dimensions let dimensionAttributes = width || height ? `width="${width}" height="${height}" ` : "" // Todo: can we reuse more code from aftertext? const className = this.has("class") ? ` class="${this.get("class")}" ` : "" const id = this.has("id") ? ` id="${this.get("id")}" ` : "" const clickLink = this.find(particle => particle.definition.isOrExtendsAParserInScope(["scrollLinkParser"])) || linkRelativeToCompileTarget const target = this.has("target") ? this.get("target") : (this.has("link") ? "" : "_blank") return `` } buildTxt() { const subparticles = this.filter(particle => particle.buildTxt).map(particle => particle.buildTxt()).filter(i => i).join("\n") return "[Image Omitted]" + (subparticles ? "\n " + subparticles.replace(/\n/g, "\n ") : "") } } class quickImageParser extends scrollImageParser { get urlAtom() { return this.getAtom(0) } get atomIndex() { return 0 } } class qrcodeParser extends abstractCaptionedParser { getFigureContent() { const url = this.atoms[1] const isNode = this.isNodeJs() if (isNode) { const {externalsPath} = this.root const path = require("path") const {qrcodegen, toSvgString} = require(path.join(externalsPath, ".qrcodegen.js")) const QRC = qrcodegen.QrCode; const qr0 = QRC.encodeText(url, QRC.Ecc.MEDIUM); const svg = toSvgString(qr0, 4); // See qrcodegen-input-demo return svg } return `Not yet supported in browser.` } } class youtubeParser extends abstractCaptionedParser { getFigureContent() { const url = this.getAtom(1).replace("youtube.com/watch?v=", "youtube.com/embed/") return `
` } } class youTubeParser extends youtubeParser { } class importParser extends abstractScrollParser { get preBuildCommandAtom() { return this.getAtom(0) } get filePathAtom() { return this.getAtomsFrom(1) } buildHtml() { return "" } } class scrollImportedParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } get preBuildCommandAtom() { return this.getAtom(0) } get filePathAtom() { return this.getAtomsFrom(1) } get suggestInAutocomplete() { return false } buildHtml() { return "" } getErrors() { if (this.get("exists") === "false" && this.previous.getLine() !== "// optional") return [this.makeError(`File '${this.atoms[1]}' does not exist.`)] return [] } } class quickImportParser extends abstractScrollParser { get urlAtom() { return this.getAtom(0) } get isPopular() { return true } buildHtml() { return "" } } class scriptParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(scriptLineParser, undefined, undefined) } get javascriptAnyAtom() { return this.getAtomsFrom(0) } buildHtml() { return `` } get scriptContent() { return this.content ?? this.subparticlesToString() } buildJs() { return this.scriptContent } } class jsonScriptParser extends abstractScrollParser { get cueAtom() { return this.getAtom(0) } get urlAtom() { return this.getAtom(1) } buildHtml() { const varName = this.filename.split("/").pop().replace(".json", "") return `` } get filename() { return this.getAtom(1) } } class scrollLeftRightButtonsParser extends abstractScrollParser { buildHtmlSnippet() { return "" } buildHtml() { const { linkToPrevious, linkToNext } = this.root if (!linkToPrevious) return "" const style = `a.keyboardNav {display:block;position:absolute;top:0.25rem; color: rgba(204,204,204,.8); font-size: 1.875rem; line-height: 1.7rem;}a.keyboardNav:hover{color: #333;text-decoration: none;}` return `<>` } } class keyboardNavParser extends abstractScrollParser { get urlAtom() { return this.getAtomsFrom(0) } buildHtmlSnippet() { return "" } buildHtml() { const {root} = this const linkToPrevious = this.getAtom(1) ?? root.linkToPrevious const linkToNext = this.getAtom(2) ?? root.linkToNext const script = `` return `` } } class printUsageStatsParser extends abstractScrollParser { get stats() { const input = this.root.allScrollFiles.map(file => file.scrollProgram).map(program => program.parserIds.join("\n")).join("\n") const result = input.split('\n').reduce((acc, atom) => (acc[atom] = (acc[atom] || 0) + 1, acc), {}) const rows = Object.entries(result).map(([atom, count]) => { return {atom, count}}) const sorted = this.root.lodash.sortBy(rows, "count").reverse() return "parserId uses\n" + sorted.map(row => `${row.atom} ${row.count}`).join('\n') } buildHtml() { // A hacky but simple way to do this for now. const particle = this.appendSibling("table") particle.appendLine("delimiter ") particle.appendLine("printTable") const dataParticle = particle.appendLine("data") dataParticle.setSubparticles(this.stats) const html = particle.buildHtml() particle.destroy() return html } buildTxt() { return this.stats } buildCsv() { return this.stats.replace(/ /g, ",") } } class printScrollLeetSheetParser extends abstractScrollParser { get parsersToDocument() { const clone = this.root.clone() clone.setSubparticles("") const atoms = clone.getAutocompleteResultsAt(0,0).matches.map(a => a.text) atoms.push("blankline") // manually add blank line atoms.push("Catch All Paragraph.") // manually add catch all paragraph atoms.push("") // manually add html atoms.sort() clone.setSubparticles(atoms.join("\n").replace(/blankline/, "")) // insert blank line in right spot return clone } sortDocs(docs) { return docs.map(particle => { const {definition} = particle const {id, description, isPopular, examples, popularity} = definition const tags = definition.get("tags") || "" if (tags.includes("deprecate") || tags.includes("experimental")) return null const category = this.getCategory(tags) const note = this.getNote(category) return {id: definition.cueIfAny || id, description, isPopular, examples, note, popularity: Math.ceil(parseFloat(popularity) * 100000)} }).filter(i => i).sort((a, b) => a.id.localeCompare(b.id)) } makeLink(examples, cue) { // if (!examples.length) console.log(cue) // find particles that need docs const example = examples.length ? examples[0].subparticlesToString() : cue const base = `https://try.scroll.pub/` const particle = new Particle() particle.appendLineAndSubparticles("scroll", "theme gazette\n" + example) return base + "#" + encodeURIComponent(particle.asString) } docToHtml(doc) { const css = `#scrollLeetSheet {color: grey;} #scrollLeetSheet a {color: #3498db; }` return `
` + doc.map(obj => `
${obj.isPopular ? "" : ""}${obj.id} ${obj.description}${obj.isPopular ? "" : ""}${obj.note}
`).join("\n") + "
" } buildHtml() { return this.docToHtml(this.sortDocs(this.parsersToDocument)) } buildTxt() { return this.sortDocs(this.parsersToDocument).map(obj => `${obj.id} - ${obj.description}`).join("\n") } getCategory(input) { return "" } getNote() { return "" } buildCsv() { const rows = this.sortDocs(this.parsersToDocument).map(obj => { const {id, isPopular, description, popularity, category} = obj return { id, isPopular, description, popularity, category } }) return new Particle(this.root.lodash.sortBy(rows, "isPopular")).asCsv } } class printparsersLeetSheetParser extends printScrollLeetSheetParser { buildHtml() { return "

Parser Definition Parsers define parsers that acquire, analyze and act on code.

" + this.docToHtml(this.sortDocs(this.parsersToDocument)) + "

Atom Definition Parsers analyze the atoms in a line.

" + this.docToHtml(this.sortDocs(this.atomParsersToDocument)) } makeLink() { return "" } categories = "assemblePhase acquirePhase analyzePhase actPhase".split(" ") getCategory(tags) { return tags.split(" ").filter(w => w.endsWith("Phase"))[0] } getNote(category) { return ` A${category.replace("Phase", "").substr(1)}Time.` } get atomParsersToDocument() { const parsersParser = require("scrollsdk/products/parsers.nodejs.js") const clone = new parsersParser("anyAtom\n ").clone() const parserParticle = clone.getParticle("anyAtom") const atoms = clone.getAutocompleteResultsAt(1,1).matches.map(a => a.text) atoms.sort() parserParticle.setSubparticles(atoms.join("\n")) return parserParticle } get parsersToDocument() { const parsersParser = require("scrollsdk/products/parsers.nodejs.js") const clone = new parsersParser("latinParser\n ").clone() const parserParticle = clone.getParticle("latinParser") const atoms = clone.getAutocompleteResultsAt(1,1).matches.map(a => a.text) atoms.sort() parserParticle.setSubparticles(atoms.join("\n")) clone.appendLine("myParser") clone.appendLine("myAtom") return parserParticle } } class abstractMeasureParser extends abstractScrollParser { get measureNameAtom() { return this.getAtom(0) } get typeForWebForms() { return `text` } get isComputed() { return false } get sortIndex() { return 1.9 } get isMeasure() { return true } buildHtmlSnippet() { return "" } buildHtml() { return "" } get measureValue() { return this.content ?? "" } get measureName() { return this.getCuePath().replace(/ /g, "_") } } class abstractAtomMeasureParser extends abstractMeasureParser { get measureNameAtom() { return this.getAtom(0) } get atomAtom() { return this.getAtom(1) } } class abstractEmailMeasureParser extends abstractAtomMeasureParser { get measureNameAtom() { return this.getAtom(0) } get emailAddressAtom() { return this.getAtom(1) } get typeForWebForms() { return `email` } } class abstractUrlMeasureParser extends abstractAtomMeasureParser { get measureNameAtom() { return this.getAtom(0) } get urlAtom() { return this.getAtom(1) } get typeForWebForms() { return `url` } } class abstractStringMeasureParser extends abstractMeasureParser { get stringAtom() { return this.getAtomsFrom(0) } } class abstractIdParser extends abstractStringMeasureParser { get isConceptDelimiter() { return true } get isMeasureRequired() { return true } get sortIndex() { return 1 } getErrors() { const errors = super.getErrors() let requiredMeasureNames = this.root.measures.filter(measure => measure.isMeasureRequired).map(measure => measure.Name).filter(name => name !== "id") if (!requiredMeasureNames.length) return errors let next = this.next while (requiredMeasureNames.length && next.cue !== "id" && next.index !== 0) { requiredMeasureNames = requiredMeasureNames.filter(i => i !== next.cue) next = next.next } requiredMeasureNames.forEach(name => errors.push(this.makeError(`Concept "${this.content}" is missing required measure "${name}".`)) ) return errors } } class abstractTextareaMeasureParser extends abstractMeasureParser { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } get typeForWebForms() { return `textarea` } get measureValue() { return this.subparticlesToString().replace(/\n/g, "\\n") } } class abstractNumericMeasureParser extends abstractMeasureParser { get typeForWebForms() { return `number` } get measureValue() { const {content} = this return content === undefined ? "" : parseFloat(content) } } class abstractIntegerMeasureParser extends abstractNumericMeasureParser { get measureNameAtom() { return this.getAtom(0) } get integerAtom() { return parseInt(this.getAtom(1)) } } class abstractFloatMeasureParser extends abstractNumericMeasureParser { get measureNameAtom() { return this.getAtom(0) } get floatAtom() { return parseFloat(this.getAtom(1)) } } class abstractPercentageMeasureParser extends abstractNumericMeasureParser { get measureNameAtom() { return this.getAtom(0) } get percentAtom() { return this.getAtom(1) } get measureValue() { const {content} = this return content === undefined ? "" : parseFloat(content) } } class abstractEnumMeasureParser extends abstractMeasureParser { get measureNameAtom() { return this.getAtom(0) } get enumAtom() { return this.getAtom(1) } } class abstractBooleanMeasureParser extends abstractMeasureParser { get measureNameAtom() { return this.getAtom(0) } get booleanAtom() { return this.getAtom(1) } get measureValue() { const {content} = this return content === undefined ? "" : content == "true" } } class metaTagsParser extends abstractScrollParser { buildHtmlSnippet() { return "" } buildHtml() { const {root} = this const { title, description, canonicalUrl, gitRepo, scrollVersion, openGraphImage } = root const rssFeedUrl = root.get("rssFeedUrl") const favicon = root.get("favicon") const faviconTag = favicon ? `` : "" const rssTag = rssFeedUrl ? `` : "" const gitTag = gitRepo ? `` : "" return ` ${title} ${faviconTag} ${gitTag} ${rssTag} ` } } class quoteParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(quoteLineParser, undefined, undefined) } buildHtml() { return `
${this.subparticlesToString()}
` } buildTxt() { return this.subparticlesToString() } } class redirectToParser extends abstractScrollParser { get cueAtom() { return this.getAtom(0) } get urlAtom() { return this.getAtom(1) } buildHtml() { return `` } } class abstractVariableParser extends abstractScrollParser { get preBuildCommandAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtomsFrom(1) } isTopMatter = true buildHtml() { return "" } } class replaceParser extends abstractVariableParser { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } } class replaceJsParser extends replaceParser { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } get javascriptAnyAtom() { return this.getAtomsFrom(0) } } class replaceNodejsParser extends abstractVariableParser { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } get javascriptAnyAtom() { return this.getAtomsFrom(0) } } class runScriptParser extends abstractScrollParser { get cueAtom() { return this.getAtom(0) } get urlAtom() { return this.getAtom(1) } get filenameIndex() { return 1 } get dependencies() { return [this.filename]} results = "Not yet run" async execute() { if (!this.filename) return await this.root.fetch(this.filename) // todo: make async const { execSync } = require("child_process") this.results = execSync(this.command) } get command() { const path = this.root.path const {filename }= this const fullPath = this.root.makeFullPath(filename) const ext = path.extname(filename).slice(1) const interpreterMap = { php: "php", py: "python3", rb: "ruby", pl: "perl", sh: "sh" } return [interpreterMap[ext], fullPath].join(" ") } buildHtml() { return this.buildTxt() } get filename() { return this.getAtom(this.filenameIndex) } buildTxt() { return this.results.toString().trim() } } class quickRunScriptParser extends runScriptParser { get urlAtom() { return this.getAtom(0) } get filenameIndex() { return 0 } } class endSnippetParser extends abstractScrollParser { buildHtml() { return "" } } class toStampParser extends abstractScrollParser { get filePathAtom() { return this.getAtomsFrom(0) } buildTxt() { return this.makeStamp(this.content) } buildHtml() { return `
${this.buildTxt()}
` } makeStamp(dir) { const fs = require('fs'); const path = require('path'); const { execSync } = require('child_process'); let stamp = 'stamp\n'; const handleFile = (indentation, relativePath, itemPath, ) => { stamp += `${indentation}${relativePath}\n`; const content = fs.readFileSync(itemPath, 'utf8'); stamp += `${indentation} ${content.replace(/\n/g, `\n${indentation} `)}\n`; } let gitTrackedFiles function processDirectory(currentPath, depth) { const items = fs.readdirSync(currentPath); items.forEach(item => { const itemPath = path.join(currentPath, item); const relativePath = path.relative(dir, itemPath); //if (!gitTrackedFiles.has(item)) return const stats = fs.statSync(itemPath); const indentation = ' '.repeat(depth); if (stats.isDirectory()) { stamp += `${indentation}${relativePath}/\n`; processDirectory(itemPath, depth + 1); } else if (stats.isFile()) handleFile(indentation, relativePath, itemPath) }); } const stats = fs.statSync(dir); if (stats.isDirectory()) { // Get list of git-tracked files gitTrackedFiles = new Set(execSync('git ls-files', { cwd: dir, encoding: 'utf-8' }) .split('\n') .filter(Boolean)) processDirectory(dir, 1) } else handleFile(" ", dir, dir) return stamp.trim(); } } class stampParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(stampFileParser, undefined, [{regex: /\/$/, parser: stampFolderParser}]) } get preBuildCommandAtom() { return this.getAtom(0) } execute() { const dir = this.root.folderPath this.forEach(particle => particle.execute(dir)) } } class scrollStumpParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(stumpContentParser, undefined, undefined) } buildHtml() { const {stumpParser} = this return new stumpParser(this.subparticlesToString()).compile() } get stumpParser() { return this.isNodeJs() ? require("scrollsdk/products/stump.nodejs.js") : stumpParser } } class stumpNoSnippetParser extends scrollStumpParser { buildHtmlSnippet() { return "" } } class plainTextParser extends abstractScrollParser { createParserCombinator() { return new Particle.ParserCombinator(plainTextLineParser, undefined, undefined) } get stringAtom() { return this.getAtomsFrom(0) } buildHtml() { return this.buildTxt() } buildTxt() { return `${this.content ?? ""}${this.subparticlesToString()}` } } class plainTextOnlyParser extends plainTextParser { buildHtml() { return "" } } class scrollThemeParser extends abstractScrollParser { get scrollThemeAtom() { return this.getAtomsFrom(0) } get copyFromExternal() { return `// Note this will be replaced at runtime` } get isPopular() { return true } get copyFromExternal() { return this.files.join(" ") } get files() { return this.atoms.slice(1).map(name => `.${name}.css`).concat([".scroll.css"]) } buildHtml() { return this.files.map(name => ``).join("\n") } } class abstractAftertextAttributeParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get isAttribute() { return true } get htmlAttributes() { return `${this.cue}="${this.content}"` } buildHtml() { return "" } } class aftertextIdParser extends abstractAftertextAttributeParser { get cueAtom() { return this.getAtom(0) } get htmlIdAtom() { return this.getAtom(1) } } class aftertextStyleParser extends abstractAftertextAttributeParser { get cssAnyAtom() { return this.getAtomsFrom(0) } htmlAttributes = "" // special case this one get css() { return `${this.property}:${this.content};` } } class aftertextFontParser extends aftertextStyleParser { get cueAtom() { return this.getAtom(0) } get fontFamilyAtom() { return this.getAtom(1) } get cssAnyAtom() { return this.getAtomsFrom(2) } get property() { return `font-family` } get css() { if (this.content === "Slim") return "font-family:Helvetica Neue; font-weight:100;" return super.css } } class aftertextColorParser extends aftertextStyleParser { get cssAnyAtom() { return this.getAtomsFrom(0) } get property() { return `color` } } class aftertextOnclickParser extends abstractAftertextAttributeParser { get javascriptAnyAtom() { return this.getAtomsFrom(0) } } class aftertextHiddenParser extends abstractAftertextAttributeParser { get cueAtom() { return this.getAtom(0) } } class aftertextTagParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get htmlTagAtom() { return this.getAtom(1) } buildHtml() { return "" } } class abstractAftertextDirectiveParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtomsFrom(1) } isMarkup = true buildHtml() { return "" } getErrors() { const errors = super.getErrors() if (!this.isMarkup || this.matchWholeLine) return errors const inserts = this.getInserts(this.parent.originalTextPostLinkify) // todo: make AbstractParticleError class exported by sdk to allow Parsers to define their own error types. // todo: also need to be able to map lines back to their line in source (pre-imports) if (!inserts.length) errors.push(this.makeError(`No match found for "${this.getLine()}".`)) return errors } get pattern() { return this.getAtomsFrom(1).join(" ") } get shouldMatchAll() { return this.has("matchAll") } getMatches(text) { const { pattern } = this const escapedPattern = pattern.replace(/[-\/\\^$*+?.()|[\]{}]/g, "\\$&") return [...text.matchAll(new RegExp(escapedPattern, "g"))].map(match => { const { index } = match const endIndex = index + pattern.length return [ { index, string: `<${this.openTag}${this.allAttributes}>`, endIndex }, { index: endIndex, endIndex, string: `` } ] }) } getInserts(text) { const matches = this.getMatches(text) if (!matches.length) return false if (this.shouldMatchAll) return matches.flat() const match = this.getParticle("match") if (match) return match.indexes .map(index => matches[index]) .filter(i => i) .flat() return matches[0] } get allAttributes() { const attr = this.attributes.join(" ") return attr ? " " + attr : "" } get attributes() { return [] } get openTag() { return this.tag } get closeTag() { return this.tag } } class abstractMarkupParser extends abstractAftertextDirectiveParser { createParserCombinator() { return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"matchAll" : matchAllParser, "match" : matchParser}), undefined) } get matchWholeLine() { return this.getAtomsFrom(this.patternStartsAtAtom).length === 0 } get pattern() { return this.matchWholeLine ? this.parent.originalText : this.getAtomsFrom(this.patternStartsAtAtom).join(" ") } patternStartsAtAtom = 1 } class boldParser extends abstractMarkupParser { tag = "b" } class italicsParser extends abstractMarkupParser { tag = "i" } class underlineParser extends abstractMarkupParser { tag = "u" } class afterTextCenterParser extends abstractMarkupParser { tag = "center" } class aftertextCodeParser extends abstractMarkupParser { tag = "code" } class aftertextStrikeParser extends abstractMarkupParser { tag = "s" } class classMarkupParser extends abstractMarkupParser { get cueAtom() { return this.getAtom(0) } get classNameAtom() { return this.getAtom(1) } tag = "span" get applyToParentElement() { return this.atoms.length === 2 } getInserts(text) { // If no select text is added, set the class on the parent element. if (this.applyToParentElement) return [] return super.getInserts(text) } get className() { return this.getAtom(1) } get attributes() { return [`class="${this.className}"`] } get matchWholeLine() { return this.applyToParentElement } get pattern() { return this.matchWholeLine ? this.parent.content : this.getAtomsFrom(2).join(" ") } } class classesMarkupParser extends classMarkupParser { applyToParentElement = true get className() { return this.content } } class hoverNoteParser extends classMarkupParser { createParserCombinator() { return new Particle.ParserCombinator(lineOfTextParser, undefined, undefined) } get cueAtom() { return this.getAtom(0) } get pattern() { return this.getAtomsFrom(1).join(" ") } get attributes() { return [`class="scrollHoverNote"`, `title="${this.hoverNoteText}"`] } get hoverNoteText() { return this.subparticlesToString().replace(/\n/g, " ") } } class scrollLinkParser extends abstractMarkupParser { createParserCombinator() {class programParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(programLinkParser, undefined, undefined) } get cueAtom() { return this.getAtom(0) } get encoded() { return encodeURIComponent(this.subparticlesToString()) } } return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"comment" : commentParser, "!" : counterpointParser, "//" : slashCommentParser, "thanksTo" : thanksToParser, "target" : linkTargetParser, "title" : linkTitleParser, "program" : programParser}), undefined) } get cueAtom() { return this.getAtom(0) } get urlAtom() { return this.getAtom(1) } tag = "a" buildTxt() { return this.root.ensureAbsoluteLink(this.link) + " " + this.pattern } get link() { const {baseLink} = this if (this.has("program")) return baseLink + this.getParticle("program").encoded return baseLink } get baseLink() { const link = this.getAtom(1) const isAbsoluteLink = link.includes("://") if (isAbsoluteLink) return link const relativePath = this.parent.buildSettings?.relativePath || "" return relativePath + link } get linkAttribute() { return "href" } get attributes() { const attrs = [`${this.linkAttribute}="${this.link}"`] const options = ["title", "target"] options.forEach(option => { const particle = this.getParticle(option) if (particle) attrs.push(`${option}="${particle.content}"`) }) return attrs } patternStartsAtAtom = 2 } class scrollClickParser extends scrollLinkParser { get linkAttribute() { return "onclick" } } class emailLinkParser extends scrollLinkParser { get attributes() { return [`href="mailto:${this.link}"`] } } class quickLinkParser extends scrollLinkParser { get urlAtom() { return this.getAtom(0) } get link() { return this.cue } patternStartsAtAtom = 1 } class quickRelativeLinkParser extends scrollLinkParser { get urlAtom() { return this.getAtom(0) } get link() { return this.cue } patternStartsAtAtom = 1 } class datelineParser extends abstractAftertextDirectiveParser { getInserts() { const {day} = this if (!day) return false return [{ index: 0, string: `${day} — ` }] } matchWholeLine = true get day() { let day = this.content || this.root.date if (!day) return "" return this.root.dayjs(day).format(`MMMM D, YYYY`) } } class dayjsParser extends abstractAftertextDirectiveParser { getInserts() { const dayjs = this.root.dayjs const days = eval(this.content) const index = this.parent.originalTextPostLinkify.indexOf("days") return [{ index, string: `${days} ` }] } } class inlineMarkupsOnParser extends abstractAftertextDirectiveParser { get inlineMarkupNameAtom() { return this.getAtomsFrom(0) } get shouldMatchAll() { return true } get markups() { const {root} = this let markups = [{delimiter: "`", tag: "code", exclusive: true, name: "code"},{delimiter: "*", tag: "strong", name: "bold"}, {delimiter: "_", tag: "em", name: "italics"}] // only add katex markup if the root doc has katex. if (root.has("katex")) markups.unshift({delimiter: "$", tag: "span", attributes: ' class="scrollKatex"', exclusive: true, name: "katex"}) if (this.content) return markups.filter(markup => this.content.includes(markup.name)) if (root.has("inlineMarkups")) { root.getParticle("inlineMarkups").forEach(markup => { const delimiter = markup.getAtom(0) const tag = markup.getAtom(1) // todo: add support for providing custom functions for inline markups? // for example, !2+2! could run eval, or :about: could search a link map. const attributes = markup.getAtomsFrom(2).join(" ") markups = markups.filter(mu => mu.delimiter !== delimiter) // Remove any overridden markups if (tag) markups.push({delimiter, tag, attributes}) }) } return markups } matchWholeLine = true getMatches(text) { const exclusives = [] return this.markups.map(markup => this.applyMarkup(text, markup, exclusives)).filter(i => i).flat() } applyMarkup(text, markup, exclusives = []) { const {delimiter, tag, attributes} = markup const escapedDelimiter = delimiter.replace(/[-\/\\^$*+?.()|[\]{}]/g, "\\$&") const pattern = new RegExp(`${escapedDelimiter}[^${escapedDelimiter}]+${escapedDelimiter}`, "g") const delimiterLength = delimiter.length return [...text.matchAll(pattern)].map(match => { const { index } = match const endIndex = index + match[0].length // I'm too lazy to clean up sdk to write a proper inline markup parser so doing this for now. // The exclusive idea is to not try and apply bold or italic styles inside a TeX or code inline style. // Note that the way this is currently implemented any TeX in an inline code will get rendered, but code // inline of TeX will not. Seems like an okay tradeoff until a proper refactor and cleanup can be done. if (exclusives.some(exclusive => index >= exclusive[0] && index <= exclusive[1])) return undefined if (markup.exclusive) exclusives.push([index, endIndex]) return [ { index, string: `<${tag + (attributes ? " " + attributes : "")}>`, endIndex, consumeStartCharacters: delimiterLength }, { index: endIndex, endIndex, string: ``, consumeEndCharacters: delimiterLength } ] }).filter(i => i) } } class inlineMarkupParser extends inlineMarkupsOnParser { get cueAtom() { return this.getAtom(0) } get delimiterAtom() { return this.getAtom(1) } get tagOrUrlAtom() { return this.getAtom(2) } get htmlAttributesAtom() { return this.getAtomsFrom(3) } getMatches(text) { try { const delimiter = this.getAtom(1) const tag = this.getAtom(2) const attributes = this.getAtomsFrom(3).join(" ") return this.applyMarkup(text, {delimiter, tag, attributes}) } catch (err) { console.error(err) return [] } // Note: doubling up doesn't work because of the consumption characters. } } class linkifyParser extends abstractAftertextDirectiveParser { get cueAtom() { return this.getAtom(0) } get booleanAtom() { return this.getAtom(1) } } class abstractMarkupParameterParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } } class matchAllParser extends abstractMarkupParameterParser { } class matchParser extends abstractMarkupParameterParser { get integerAtom() { return this.getAtomsFrom(0).map(val => parseInt(val)) } get indexes() { return this.getAtomsFrom(1).map(num => parseInt(num)) } } class abstractHtmlAttributeParser extends ParserBackedParticle { buildHtml() { return "" } } class linkTargetParser extends abstractHtmlAttributeParser { get cueAtom() { return this.getAtom(0) } get codeAtom() { return this.getAtom(1) } } class blankLineParser extends ParserBackedParticle { get blankAtom() { return this.getAtom(0) } get isPopular() { return true } buildHtml() { return this.parent.clearSectionStack() } } class scrollFileAddressParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(scrollFileAddressParser, undefined, undefined) } get filePathAtom() { return this.getAtomsFrom(0) } } class chatLineParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(chatLineParser, undefined, undefined) } get stringAtom() { return this.getAtomsFrom(0) } } class lineOfCodeParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(lineOfCodeParser, undefined, undefined) } get codeAtom() { return this.getAtomsFrom(0) } } class commentLineParser extends ParserBackedParticle { get commentAtom() { return this.getAtomsFrom(0) } } class cssLineParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(cssLineParser, undefined, undefined) } get cssAnyAtom() { return this.getAtomsFrom(0) } } class abstractTableTransformParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"#" : h1Parser, "##" : h2Parser, "?" : scrollQuestionParser, "heatrix" : heatrixParser, "heatrixAdvanced" : heatrixAdvancedParser, "map" : mapParser, "scatterplot" : plotScatterplotParser, "barchart" : plotBarchartParser, "linechart" : plotLineChartParser, "sparkline" : sparklineParser, "printColumn" : printColumnParser, "printTable" : printTableParser, "//" : slashCommentParser, "br" : scrollBrParser, "splitYear" : scrollSplitYearParser, "splitDayName" : scrollSplitDayNameParser, "splitMonthName" : scrollSplitMonthNameParser, "splitMonth" : scrollSplitMonthParser, "splitDayOfMonth" : scrollSplitDayOfMonthParser, "splitDay" : scrollSplitDayOfWeekParser, "parseDate" : scrollParseDateParser, "groupBy" : scrollGroupByParser, "where" : scrollWhereParser, "select" : scrollSelectParser, "reverse" : scrollReverseParser, "compose" : scrollComposeParser, "compute" : scrollComputeParser, "eval" : scrollEvalParser, "rank" : scrollRankParser, "links" : scrollLinksParser, "limit" : scrollLimitParser, "shuffle" : scrollShuffleParser, "transpose" : scrollTransposeParser, "impute" : scrollImputeParser, "orderBy" : scrollOrderByParser, "assertRowCount" : assertRowCountParser, "rename" : scrollRenameParser, "summarize" : scrollSummarizeParser}), [{regex: /^ str.toLowerCase().trim() userColumnNames.forEach(userColumn => { // Strategy 1: Exact match const exactMatch = availableColumnNames.find(col => col === userColumn) if (exactMatch) { result[userColumn] = exactMatch return } // Strategy 2: Case-insensitive match const normalizedUserColumn = normalize(userColumn) const caseInsensitiveMatch = availableColumnNames.find(col => normalize(col) === normalizedUserColumn) if (caseInsensitiveMatch) { result[userColumn] = caseInsensitiveMatch return } // Strategy 3: Levenshtein distance match const THRESHOLD = 2 // Consider matches with distance <= 2 as "very close" let bestMatch = null let bestDistance = Infinity availableColumnNames.forEach(col => { const distance = this.root.levenshteinDistance(userColumn, col) if (distance < bestDistance) { bestDistance = distance bestMatch = col } }) // Only use Levenshtein match if it's very close if (bestDistance <= THRESHOLD) { result[userColumn] = bestMatch return } // Strategy 4: Fallback - use original unmatched name result[userColumn] = userColumn }) return result } connectColumnName(name) { return this.connectColumnNames([name])[name] } getErrors() { const errors = super.getErrors() if (errors.length && this.previous.cue !== "assertIgnoreBelowErrors") return errors return [] } getRunTimeEnumOptions(atom) { if (atom.atomTypeId === "columnNameAtom") return this.parent.columnNames return super.getRunTimeEnumOptions(atom) } getRunTimeEnumOptionsForValidation(atom) { // Note: this will fail if the CSV file hasnt been built yet. if (atom.atomTypeId === "columnNameAtom") return this.parent.columnNames.concat(this.parent.columnNames.map(c => "-" + c)) // Add reverse names return super.getRunTimeEnumOptions(atom) } } class abstractDateSplitTransformParser extends abstractTableTransformParser { get cueAtom() { return this.getAtom(0) } get columnNameAtom() { return this.getAtomsFrom(1) } get coreTable() { const columnName = this.getAtom(1) || this.detectDateColumn() if (!columnName) return this.parent.coreTable return this.parent.coreTable.map(row => { const newRow = {...row} try { const date = this.root.dayjs(row[columnName]) if (date.isValid()) newRow[this.newColumnName] = this.transformDate(date) } catch (err) {} return newRow }) } detectDateColumn() { const columns = this.parent.columnNames const dateColumns = ['date', 'created', 'published', 'timestamp'] for (const col of dateColumns) { if (columns.includes(col)) return col } for (const col of columns) { const sample = this.parent.coreTable[0][col] if (sample && this.root.dayjs(sample).isValid()) return col } return null } get columnNames() { return [...this.parent.columnNames, this.newColumnName] } transformDate(date) { const formatted = date.format(this.dateFormat) const isInt = !this.cue.includes("Name") return isInt ? parseInt(formatted) : formatted } } class scrollSplitYearParser extends abstractDateSplitTransformParser { get dateFormat() { return `YYYY` } get newColumnName() { return `year` } } class scrollSplitDayNameParser extends abstractDateSplitTransformParser { get dateFormat() { return `dddd` } get newColumnName() { return `dayName` } } class scrollSplitMonthNameParser extends abstractDateSplitTransformParser { get dateFormat() { return `MMMM` } get newColumnName() { return `monthName` } } class scrollSplitMonthParser extends abstractDateSplitTransformParser { get dateFormat() { return `M` } get newColumnName() { return `month` } } class scrollSplitDayOfMonthParser extends abstractDateSplitTransformParser { get dateFormat() { return `D` } get newColumnName() { return `dayOfMonth` } } class scrollSplitDayOfWeekParser extends abstractDateSplitTransformParser { get dateFormat() { return `d` } get newColumnName() { return `day` } } class scrollParseDateParser extends abstractTableTransformParser { createParserCombinator() {class formatParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtom(1) } } return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"format" : formatParser}), undefined) } get cueAtom() { return this.getAtom(0) } get columnNameAtom() { return this.getAtom(1) } get coreTable() { const columnName = this.connectColumnName(this.getAtom(1)) const formatOut = this.get("format") || "YYYY-MM-DD" const {dayjs} = this.root return this.parent.coreTable.map(row => { const newRow = {...row} try { const value = row[columnName] if (value) { const date = dayjs(value) if (date.isValid()) newRow[columnName] = date.format(formatOut) } } catch (err) { console.error(`Error parsing date in column ${columnName}:`, err) } return newRow }) } } class scrollGroupByParser extends abstractTableTransformParser { createParserCombinator() {class reduceParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get columnNameAtom() { return this.getAtom(1) } get reductionTypeAtom() { return this.getAtom(2) } get newColumnNameAtom() { return this.getAtom(3) } get reduction() { return { source: this.getAtom(1), reduction: this.getAtom(2), name: this.getAtom(3) || this.getAtomsFrom(1).join("_") } } } return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"reduce" : reduceParser}), undefined) } get columnNameAtom() { return this.getAtomsFrom(0) } get coreTable() { if (this._coreTable) return this._coreTable const groupByColNames = this.getAtomsFrom(1) const {coreTable} = this.parent if (!groupByColNames.length) return coreTable const newCols = this.findParticles("reduce").map(particle => particle.reduction) // Pivot is shorthand for group and reduce? const makePivotTable = (rows, groupByColumnNames, inputColumnNames, newCols) => { const colMap = {} inputColumnNames.forEach((col) => (colMap[col] = true)) const groupByCols = groupByColumnNames.filter((col) => colMap[col]) return new PivotTable(rows, inputColumnNames.map(c => {return {name: c}}), newCols).getNewRows(groupByCols) } class PivotTable { constructor(rows, inputColumns, outputColumns) { this._columns = {} this._rows = rows inputColumns.forEach((col) => (this._columns[col.name] = col)) outputColumns.forEach((col) => (this._columns[col.name] = col)) } _getGroups(allRows, groupByColNames) { const rowsInGroups = new Map() allRows.forEach((row) => { const groupKey = groupByColNames.map((col) => row[col]?.toString().replace(/ /g, "") || "").join(" ") if (!rowsInGroups.has(groupKey)) rowsInGroups.set(groupKey, []) rowsInGroups.get(groupKey).push(row) }) return rowsInGroups } getNewRows(groupByCols) { // make new particles const rowsInGroups = this._getGroups(this._rows, groupByCols) // Any column in the group should be reused by the children const columns = [ { name: "count", type: "number", min: 0, }, ] groupByCols.forEach((colName) => columns.push(this._columns[colName])) const colsToReduce = Object.values(this._columns).filter((col) => !!col.reduction) colsToReduce.forEach((col) => columns.push(col)) // for each group const rows = [] const totalGroups = rowsInGroups.size for (let [groupId, group] of rowsInGroups) { const firstRow = group[0] const newRow = {} groupByCols.forEach((col) => newRow[col] = firstRow ? firstRow[col] : 0 ) newRow.count = group.length // todo: add more reductions? count, stddev, median, variance. colsToReduce.forEach((col) => { const sourceColName = col.source const reduction = col.reduction const newColName = col.name if (reduction === "concat") { newRow[newColName] = group.map((row) => row[sourceColName]).join(" ") return } if (reduction === "first") { newRow[newColName] = group.find((row) => row[sourceColName] !== "")?.[sourceColName] return } const values = group.map((row) => row[sourceColName]).filter((val) => typeof val === "number" && !isNaN(val)) let reducedValue = firstRow[sourceColName] if (reduction === "sum") reducedValue = values.reduce((prev, current) => prev + current, 0) if (reduction === "max") reducedValue = Math.max(...values) if (reduction === "min") reducedValue = Math.min(...values) if (reduction === "mean") reducedValue = values.reduce((prev, current) => prev + current, 0) / values.length newRow[newColName] = reducedValue }) rows.push(newRow) } // todo: add tests. figure out this api better. Object.values(columns).forEach((col) => { // For pivot columns, remove the source and reduction info for now. Treat things as immutable. delete col.source delete col.reduction }) return { rows, columns, } } } const pivotTable = makePivotTable(coreTable, groupByColNames, this.parent.columnNames, newCols) this._coreTable = pivotTable.rows this._columnNames = pivotTable.columns.map(col => col.name) return pivotTable.rows } get columnNames() { const {coreTable} = this return this._columnNames || this.parent.columnNames } } class scrollWhereParser extends abstractTableTransformParser { get cueAtom() { return this.getAtom(0) } get columnNameAtom() { return this.getAtom(1) } get comparisonAtom() { return this.getAtom(2) } get constantAtom() { return this.getAtomsFrom(3) } get coreTable() { // todo: use atoms here. const columnName = this.connectColumnName(this.getAtom(1)) const operator = this.getAtom(2) let untypedScalarValue = this.getAtom(3) const typedValue = isNaN(parseFloat(untypedScalarValue)) ? untypedScalarValue : parseFloat(untypedScalarValue) const coreTable = this.parent.coreTable if (!columnName || !operator || (untypedScalarValue === undefined && !operator.includes("mpty"))) return coreTable const filterFn = row => { const atom = row[columnName] const typedAtom = atom === null ? undefined : atom // convert nulls to undefined if (operator === "=") return typedValue === typedAtom else if (operator === "!=") return typedValue !== typedAtom else if (operator === "includes") return typedAtom !== undefined && typedAtom.includes(typedValue) else if (operator === "startsWith") return typedAtom !== undefined && typedAtom.toString().startsWith(typedValue) else if (operator === "endsWith") return typedAtom !== undefined && typedAtom.toString().endsWith(typedValue) else if (operator === "doesNotInclude") return typedAtom === undefined || !typedAtom.includes(typedValue) else if (operator === ">") return typedAtom > typedValue else if (operator === "<") return typedAtom < typedValue else if (operator === ">=") return typedAtom >= typedValue else if (operator === "<=") return typedAtom <= typedValue else if (operator === "empty") return typedAtom === "" || typedAtom === undefined else if (operator === "notEmpty") return typedAtom !== "" && typedAtom !== undefined } return coreTable.filter(filterFn) } } class scrollSelectParser extends abstractTableTransformParser { get columnNameAtom() { return this.getAtomsFrom(0) } get coreTable() { const {coreTable} = this.parent const {columnNames} = this if (!columnNames.length) return coreTable return coreTable.map(row => Object.fromEntries(columnNames.map(colName => [colName, row[colName]]))) } get columnNames() { if (!this._columnNames) { const names = this.getAtomsFrom(1) this._columnNamesMap = this.connectColumnNames(names) this._columnNames = names.map(name => this._columnNamesMap[name]) } return this._columnNames } } class scrollReverseParser extends abstractTableTransformParser { get coreTable() { return this.parent.coreTable.slice().reverse() } } class scrollComposeParser extends abstractTableTransformParser { get cueAtom() { return this.getAtom(0) } get newColumnNameAtom() { return this.getAtom(1) } get codeAtom() { return this.getAtomsFrom(2) } get coreTable() { const {newColumnName} = this const formatString = this.getAtomsFrom(2).join(" ") return this.parent.coreTable.map((row, index) => { const newRow = Object.assign({}, row) newRow[newColumnName] = this.evaluate(new Particle(row).evalTemplateString(formatString), index) return newRow }) } evaluate(str) { return str } get newColumnName() { return this.atoms[1] } get columnNames() { return this.parent.columnNames.concat(this.newColumnName) } } class scrollComputeParser extends scrollComposeParser { evaluate(str) { return parseFloat(eval(str)) } } class scrollEvalParser extends scrollComputeParser { evaluate(str) { return eval(str) } } class scrollRankParser extends scrollComposeParser { get cueAtom() { return this.getAtom(0) } get newColumnName() { return `rank` } evaluate(str, index) { return index + 1 } } class scrollLinksParser extends abstractTableTransformParser { get columnNameAtom() { return this.getAtomsFrom(0) } get coreTable() { const {newColumnName, linkColumns} = this return this.parent.coreTable.map(row => { const newRow = Object.assign({}, row) let newValue = [] linkColumns.forEach(name => { const value = newRow[name] delete newRow[name] if (value) newValue.push(`${name}`) }) newRow[newColumnName] = newValue.join(" ") return newRow }) } get newColumnName() { return "links" } get linkColumns() { return this.getAtomsFrom(1) } get columnNames() { const {linkColumns} = this return this.parent.columnNames.filter(name => !linkColumns.includes(name)).concat(this.newColumnName) } } class scrollLimitParser extends abstractTableTransformParser { get cueAtom() { return this.getAtom(0) } get integerAtom() { return parseInt(this.getAtom(1)) } get integerAtom() { return parseInt(this.getAtom(2)) } get coreTable() { let start = this.getAtom(1) let end = this.getAtom(2) if (end === undefined) { end = start start = 0 } return this.parent.coreTable.slice(parseInt(start), parseInt(end)) } } class scrollShuffleParser extends abstractTableTransformParser { get coreTable() { // Create a copy of the table to avoid modifying original const rows = this.parent.coreTable.slice() // Fisher-Yates shuffle algorithm for (let i = rows.length - 1; i > 0; i--) { const j = Math.floor(Math.random() * (i + 1)) ;[rows[i], rows[j]] = [rows[j], rows[i]] } return rows } } class scrollTransposeParser extends abstractTableTransformParser { get coreTable() { // todo: we need to switch to column based coreTable, instead of row based const transpose = arr => Object.keys(arr[0]).map(key => [key, ...arr.map(row => row[key])]); return transpose(this.parent.coreTable) } } class scrollImputeParser extends abstractTableTransformParser { get cueAtom() { return this.getAtom(0) } get columnNameAtom() { return this.getAtom(1) } get coreTable() { const {columnName} = this const sorted = this.root.lodash.orderBy(this.parent.coreTable.slice(), columnName) // ascending const imputed = [] let lastInserted = sorted[0][columnName] sorted.forEach(row => { const measuredTime = row[columnName] while (measuredTime > lastInserted + 1) { lastInserted++ // synthesize rows const imputedRow = {} imputedRow[columnName] = lastInserted imputedRow.count = 0 imputed.push(imputedRow) } lastInserted = measuredTime imputed.push(row) }) return imputed } get columnName() { return this.connectColumnName(this.getAtom(1)) } } class scrollOrderByParser extends abstractTableTransformParser { get columnNameAtom() { return this.getAtomsFrom(0) } get coreTable() { const makeLodashOrderByParams = str => { const part1 = str.split(" ") const part2 = part1.map(col => (col.startsWith("-") ? "desc" : "asc")) return [part1.map(col => this.connectColumnName(col.replace(/^\-/, ""))), part2] } const orderBy = makeLodashOrderByParams(this.content) return this.root.lodash.orderBy(this.parent.coreTable.slice(), orderBy[0], orderBy[1]) } } class assertRowCountParser extends abstractTableTransformParser { get cueAtom() { return this.getAtom(0) } get integerAtom() { return parseInt(this.getAtom(1)) } getErrors() { const errors = super.getErrors() const actualRows = this.coreTable.length const expectedRows = parseInt(this.content) if (actualRows !== expectedRows) return errors.concat(this.makeError(`Expected '${expectedRows}' rows but got '${actualRows}'.`)) return errors } } class scrollRenameParser extends abstractTableTransformParser { get newColumnNameAtom() { return this.getAtomsFrom(0) } get coreTable() { const {coreTable} = this.parent const {renameMap} = this if (!Object.keys(renameMap).length) return coreTable return coreTable.map(row => { const newRow = {} Object.keys(row).forEach(key => { const name = renameMap[key] || key newRow[name] = row[key] }) return newRow }) } get renameMap() { const map = {} const pairs = this.getAtomsFrom(1) let oldName while (oldName = pairs.shift()) { map[oldName] = pairs.shift() } return map } _renamed get columnNames() { if (this._renamed) return this._renamed const {renameMap} = this this._renamed = this.parent.columnNames.map(name => renameMap[name] || name ) return this._renamed } } class scrollSummarizeParser extends abstractTableTransformParser { get coreTable() { const {lodash} = this.root const sourceData = this.parent.coreTable if (!sourceData.length) return [] return this.parent.columnNames.map(colName => { const values = sourceData.map(row => row[colName]).filter(val => val !== undefined && val !== null) const numericValues = values.filter(val => typeof val === "number" && !isNaN(val)) const sorted = [...numericValues].sort((a, b) => a - b) // Calculate mode const frequency = {} values.forEach(val => { frequency[val] = (frequency[val] || 0) + 1 }) const mode = Object.entries(frequency) .sort((a, b) => b[1] - a[1]) .map(entry => entry[0])[0] // Calculate median for numeric values const median = sorted.length ? sorted.length % 2 === 0 ? (sorted[sorted.length/2 - 1] + sorted[sorted.length/2]) / 2 : sorted[Math.floor(sorted.length/2)] : null const sum = numericValues.length ? numericValues.reduce((a, b) => a + b, 0) : null const theType = typeof values[0] const count = values.length const mean = theType === "number" ? sum/count : "" return { name: colName, type: theType, incompleteCount: sourceData.length - values.length, uniqueCount: new Set(values).size, count, sum, median, mean, min: sorted.length ? sorted[0] : null, max: sorted.length ? sorted[sorted.length - 1] : null, mode } }) } get columnNames() { return ["name", "type", "incompleteCount", "uniqueCount", "count", "sum", "median", "mean", "min", "max", "mode"] } } class errorParser extends ParserBackedParticle { getErrors() { return this._getErrorParserErrors() } } class hakonContentParser extends ParserBackedParticle { get codeAtom() { return this.getAtomsFrom(0) } } class heatrixCatchAllParser extends ParserBackedParticle { get stringAtom() { return this.getAtomsFrom(0) } } class lineOfTextParser extends ParserBackedParticle { get stringAtom() { return this.getAtomsFrom(0) } get isTextParser() { return true } } class htmlLineParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(htmlLineParser, undefined, undefined) } get htmlAnyAtom() { return this.getAtomsFrom(0) } } class openGraphParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } } class scrollFooterParser extends ParserBackedParticle { get preBuildCommandAtom() { return this.getAtom(0) } } class scriptLineParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(scriptLineParser, undefined, undefined) } get javascriptAnyAtom() { return this.getAtomsFrom(0) } } class linkTitleParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtomsFrom(1) } } class programLinkParser extends ParserBackedParticle { get codeAtom() { return this.getAtomsFrom(0) } } class scrollMediaLoopParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } } class scrollAutoplayParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } } class abstractCompilerRuleParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get anyAtom() { return this.getAtomsFrom(1) } } class closeSubparticlesParser extends abstractCompilerRuleParser { } class indentCharacterParser extends abstractCompilerRuleParser { } class catchAllAtomDelimiterParser extends abstractCompilerRuleParser { } class openSubparticlesParser extends abstractCompilerRuleParser { } class stringTemplateParser extends abstractCompilerRuleParser { } class joinSubparticlesWithParser extends abstractCompilerRuleParser { } class abstractConstantParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } } class parsersBooleanParser extends abstractConstantParser { get cueAtom() { return this.getAtom(0) } get constantIdentifierAtom() { return this.getAtom(1) } get booleanAtom() { return this.getAtomsFrom(2) } } class parsersFloatParser extends abstractConstantParser { get cueAtom() { return this.getAtom(0) } get constantIdentifierAtom() { return this.getAtom(1) } get floatAtom() { return this.getAtomsFrom(2).map(val => parseFloat(val)) } } class parsersIntParser extends abstractConstantParser { get cueAtom() { return this.getAtom(0) } get constantIdentifierAtom() { return this.getAtom(1) } get integerAtom() { return this.getAtomsFrom(2).map(val => parseInt(val)) } } class parsersStringParser extends abstractConstantParser { createParserCombinator() { return new Particle.ParserCombinator(catchAllMultilineStringConstantParser, undefined, undefined) } get cueAtom() { return this.getAtom(0) } get constantIdentifierAtom() { return this.getAtom(1) } get stringAtom() { return this.getAtomsFrom(2) } } class abstractParserRuleParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } } class abstractNonTerminalParserRuleParser extends abstractParserRuleParser { } class parsersBaseParserParser extends abstractParserRuleParser { get cueAtom() { return this.getAtom(0) } get baseParsersAtom() { return this.getAtom(1) } } class catchAllAtomTypeParser extends abstractParserRuleParser { get cueAtom() { return this.getAtom(0) } get atomTypeIdAtom() { return this.getAtom(1) } } class atomParserParser extends abstractParserRuleParser { get cueAtom() { return this.getAtom(0) } get atomParserAtom() { return this.getAtom(1) } } class catchAllParserParser extends abstractParserRuleParser { get cueAtom() { return this.getAtom(0) } get parserIdAtom() { return this.getAtom(1) } } class parsersAtomsParser extends abstractParserRuleParser { get atomTypeIdAtom() { return this.getAtomsFrom(0) } } class parsersCompilerParser extends abstractParserRuleParser { createParserCombinator() { return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"closeSubparticles" : closeSubparticlesParser, "indentCharacter" : indentCharacterParser, "catchAllAtomDelimiter" : catchAllAtomDelimiterParser, "openSubparticles" : openSubparticlesParser, "stringTemplate" : stringTemplateParser, "joinSubparticlesWith" : joinSubparticlesWithParser}), undefined) } get suggestInAutocomplete() { return false } } class parserDescriptionParser extends abstractParserRuleParser { get stringAtom() { return this.getAtomsFrom(0) } } class parsersExampleParser extends abstractParserRuleParser { createParserCombinator() { return new Particle.ParserCombinator(catchAllExampleLineParser, undefined, undefined) } get exampleAnyAtom() { return this.getAtomsFrom(0) } } class extendsParserParser extends abstractParserRuleParser { get cueAtom() { return this.getAtom(0) } get parserIdAtom() { return this.getAtom(1) } } class parsersPopularityParser extends abstractParserRuleParser { get cueAtom() { return this.getAtom(0) } get floatAtom() { return parseFloat(this.getAtom(1)) } } class inScopeParser extends abstractParserRuleParser { get parserIdAtom() { return this.getAtomsFrom(0) } } class parsersJavascriptParser extends abstractParserRuleParser { createParserCombinator() { return new Particle.ParserCombinator(catchAllJavascriptCodeLineParser, undefined, undefined) } format() { if (this.isNodeJs()) { const template = `class FOO{ ${this.subparticlesToString()}}` this.setSubparticles( require("prettier") .format(template, { semi: false, useTabs: true, parser: "babel", printWidth: 240 }) .replace(/class FOO \{\s+/, "") .replace(/\s+\}\s+$/, "") .replace(/\n\t/g, "\n") // drop one level of indent .replace(/\t/g, " ") // we used tabs instead of spaces to be able to dedent without breaking literals. ) } return this } } class abstractParseRuleParser extends abstractParserRuleParser { } class parsersCueParser extends abstractParseRuleParser { get cueAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtom(1) } } class cueFromIdParser extends abstractParseRuleParser { get cueAtom() { return this.getAtom(0) } } class parsersPatternParser extends abstractParseRuleParser { get regexAtom() { return this.getAtomsFrom(0) } } class parsersRequiredParser extends abstractParserRuleParser { } class abstractValidationRuleParser extends abstractParserRuleParser { get booleanAtom() { return this.getAtomsFrom(0) } } class parsersSingleParser extends abstractValidationRuleParser { } class uniqueLineParser extends abstractValidationRuleParser { } class uniqueCueParser extends abstractValidationRuleParser { } class listDelimiterParser extends abstractParserRuleParser { get stringAtom() { return this.getAtomsFrom(0) } } class contentKeyParser extends abstractParserRuleParser { get stringAtom() { return this.getAtomsFrom(0) } get suggestInAutocomplete() { return false } } class subparticlesKeyParser extends abstractParserRuleParser { get stringAtom() { return this.getAtomsFrom(0) } get suggestInAutocomplete() { return false } } class parsersTagsParser extends abstractParserRuleParser { get stringAtom() { return this.getAtomsFrom(0) } } class atomTypeDescriptionParser extends ParserBackedParticle { get stringAtom() { return this.getAtomsFrom(0) } } class catchAllErrorParser extends ParserBackedParticle { getErrors() { return this._getErrorParserErrors() } } class catchAllExampleLineParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(catchAllExampleLineParser, undefined, undefined) } get exampleAnyAtom() { return this.getAtom(0) } get exampleAnyAtom() { return this.getAtomsFrom(1) } } class catchAllJavascriptCodeLineParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(catchAllJavascriptCodeLineParser, undefined, undefined) } get javascriptCodeAtom() { return this.getAtomsFrom(0) } } class catchAllMultilineStringConstantParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(catchAllMultilineStringConstantParser, undefined, undefined) } get stringAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtomsFrom(1) } } class atomTypeDefinitionParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(undefined, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"//" : slashCommentParser, "description" : atomTypeDescriptionParser, "enumFromAtomTypes" : enumFromAtomTypesParser, "enum" : parsersEnumParser, "examples" : parsersExamplesParser, "min" : atomMinParser, "max" : atomMaxParser, "paint" : parsersPaintParser, "regex" : parsersRegexParser, "reservedAtoms" : reservedAtomsParser, "extends" : extendsAtomTypeParser}), undefined) } get atomTypeIdAtom() { return this.getAtom(0) } buildHtml() {return ""} } class enumFromAtomTypesParser extends ParserBackedParticle { get atomPropertyNameAtom() { return this.getAtom(0) } get atomTypeIdAtom() { return this.getAtomsFrom(1) } } class parsersEnumParser extends ParserBackedParticle { get atomPropertyNameAtom() { return this.getAtom(0) } get enumOptionAtom() { return this.getAtomsFrom(1) } } class parsersExamplesParser extends ParserBackedParticle { get atomPropertyNameAtom() { return this.getAtom(0) } get atomExampleAtom() { return this.getAtomsFrom(1) } } class atomMinParser extends ParserBackedParticle { get atomPropertyNameAtom() { return this.getAtom(0) } get numberAtom() { return parseFloat(this.getAtom(1)) } } class atomMaxParser extends ParserBackedParticle { get atomPropertyNameAtom() { return this.getAtom(0) } get numberAtom() { return parseFloat(this.getAtom(1)) } } class parsersPaintParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get paintTypeAtom() { return this.getAtom(1) } } class parserDefinitionParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(catchAllErrorParser, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"//" : slashCommentParser, "boolean" : parsersBooleanParser, "float" : parsersFloatParser, "int" : parsersIntParser, "string" : parsersStringParser, "baseParser" : parsersBaseParserParser, "catchAllAtomType" : catchAllAtomTypeParser, "atomParser" : atomParserParser, "catchAllParser" : catchAllParserParser, "atoms" : parsersAtomsParser, "compiler" : parsersCompilerParser, "description" : parserDescriptionParser, "example" : parsersExampleParser, "extends" : extendsParserParser, "popularity" : parsersPopularityParser, "inScope" : inScopeParser, "javascript" : parsersJavascriptParser, "cue" : parsersCueParser, "cueFromId" : cueFromIdParser, "pattern" : parsersPatternParser, "required" : parsersRequiredParser, "single" : parsersSingleParser, "uniqueLine" : uniqueLineParser, "uniqueCue" : uniqueCueParser, "listDelimiter" : listDelimiterParser, "contentKey" : contentKeyParser, "subparticlesKey" : subparticlesKeyParser, "tags" : parsersTagsParser}), [{regex: /^[a-zA-Z0-9_]+Parser$/, parser: parserDefinitionParser}]) } get parserIdAtom() { return this.getAtom(0) } buildHtml() { return ""} } class parsersRegexParser extends ParserBackedParticle { get atomPropertyNameAtom() { return this.getAtom(0) } get regexAtom() { return this.getAtomsFrom(1) } } class reservedAtomsParser extends ParserBackedParticle { get atomPropertyNameAtom() { return this.getAtom(0) } get reservedAtomAtom() { return this.getAtomsFrom(1) } } class extendsAtomTypeParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get atomTypeIdAtom() { return this.getAtom(1) } } class abstractColumnNameParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get columnNameAtom() { return this.getAtom(1) } getRunTimeEnumOptions(atom) { if (atom.atomTypeId === "columnNameAtom") return this.parent.columnNames return super.getRunTimeEnumOptions(atom) } } class scrollRadiusParser extends abstractColumnNameParser { } class scrollSymbolParser extends abstractColumnNameParser { } class scrollFillParser extends abstractColumnNameParser { } class scrollStrokeParser extends abstractColumnNameParser { } class scrollLabelParser extends abstractColumnNameParser { } class scrollSortParser extends abstractColumnNameParser { } class scrollXParser extends abstractColumnNameParser { } class scrollYParser extends abstractColumnNameParser { } class abstractPlotLabelParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtomsFrom(1) } } class quoteLineParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(quoteLineParser, undefined, undefined) } get stringAtom() { return this.getAtomsFrom(0) } } class scrollParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(catchAllParagraphParser, Object.assign(Object.assign({}, super.createParserCombinator()._getCueMapAsObject()), {"scrollParagraph" : scrollParagraphParser, "authors" : authorsParser, "blink" : blinkParser, "button" : scrollButtonParser, "catchAllParagraph" : catchAllParagraphParser, "center" : scrollCenterParser, "[]" : checklistTodoParser, "[x]" : checklistDoneParser, "-" : listAftertextParser, ">" : quickQuoteParser, "counter" : scrollCounterParser, "expander" : expanderParser, "#" : h1Parser, "##" : h2Parser, "###" : h3Parser, "####" : h4Parser, "?" : scrollQuestionParser, "#####" : h5Parser, "printTitle" : printTitleParser, "caption" : captionAftertextParser, "music" : scrollMusicParser, "video" : scrollVideoParser, "*" : quickParagraphParser, "stopwatch" : scrollStopwatchParser, "thinColumns" : thinColumnsParser, "wideColumns" : wideColumnsParser, "wideColumn" : wideColumnParser, "mediumColumns" : mediumColumnsParser, "mediumColumn" : mediumColumnParser, "thinColumn" : thinColumnParser, "endColumns" : endColumnsParser, "container" : scrollContainerParser, "debugSourceStack" : debugSourceStackParser, "---" : horizontalRuleParser, "***" : scrollDinkusParser, "dinkus" : customDinkusParser, "****" : endOfPostDinkusParser, "downloadButton" : downloadButtonParser, "editButton" : editButtonParser, "emailButton" : emailButtonParser, "homeButton" : homeButtonParser, "theScrollButton" : theScrollButtonParser, "editLink" : editLinkParser, "scrollVersionLink" : scrollVersionLinkParser, "classicForm" : classicFormParser, "scrollForm" : scrollFormParser, "loremIpsum" : loremIpsumParser, "nickelbackIpsum" : nickelbackIpsumParser, "modal" : scrollModalParser, "printSnippets" : printSnippetsParser, "nav" : scrollNavParser, "printFullSnippets" : printFullSnippetsParser, "printShortSnippets" : printShortSnippetsParser, "printRelated" : printRelatedParser, "notices" : scrollNoticesParser, "assertHtmlEquals" : assertHtmlEqualsParser, "assertBuildIncludes" : assertBuildIncludesParser, "assertHtmlIncludes" : assertHtmlIncludesParser, "assertHtmlExcludes" : assertHtmlExcludesParser, "assertIgnoreBelowErrors" : assertIgnoreBelowErrorsParser, "printAuthors" : printAuthorsParser, "printDate" : printDateParser, "printFormatLinks" : printFormatLinksParser, "buildParsers" : buildParsersParser, "buildCsv" : buildCsvParser, "buildTsv" : buildTsvParser, "buildJson" : buildJsonParser, "buildCss" : buildCssParser, "buildHtml" : buildHtmlParser, "buildJs" : buildJsParser, "buildRss" : buildRssParser, "buildTxt" : buildTxtParser, "loadConcepts" : loadConceptsParser, "buildConcepts" : buildConceptsParser, "fetch" : fetchParser, "buildMeasures" : buildMeasuresParser, "buildPdf" : buildPdfParser, "inlineCss" : scrollInlineCssParser, "inlineJs" : scrollInlineJsParser, "testStrict" : testStrictParser, "date" : scrollDateParser, "editBaseUrl" : editBaseUrlParser, "canonicalUrl" : canonicalUrlParser, "openGraphImage" : openGraphImageParser, "baseUrl" : baseUrlParser, "rssFeedUrl" : rssFeedUrlParser, "editUrl" : editUrlParser, "email" : siteOwnerEmailParser, "favicon" : faviconParser, "importOnly" : importOnlyParser, "inlineMarkups" : inlineMarkupsParser, "htmlLang" : htmlLangParser, "description" : openGraphDescriptionParser, "permalink" : permalinkParser, "tags" : scrollTagsParser, "title" : scrollTitleParser, "linkTitle" : scrollLinkTitleParser, "chat" : scrollChatParser, "table" : scrollTableParser, "cloc" : clocParser, "dependencies" : scrollDependenciesParser, "disk" : scrollDiskParser, "iris" : scrollIrisParser, "sampleData" : vegaSampleDataParser, "concepts" : scrollConceptsParser, "posts" : scrollPostsParser, "postsMeta" : scrollPostsMetaParser, "printFeed" : printFeedParser, "printSource" : printSourceParser, "printSiteMap" : printSiteMapParser, "code" : codeParser, "codeWithHeader" : codeWithHeaderParser, "codeFromFile" : codeFromFileParser, "debugParsers" : debugParsersParser, "copyButtons" : copyButtonsParser, "heatrix" : heatrixParser, "heatrixAdvanced" : heatrixAdvancedParser, "map" : mapParser, "scatterplot" : plotScatterplotParser, "barchart" : plotBarchartParser, "linechart" : plotLineChartParser, "sparkline" : sparklineParser, "printColumn" : printColumnParser, "printTable" : printTableParser, "katex" : katexParser, "helpfulNotFound" : helpfulNotFoundParser, "slideshow" : slideshowParser, "tableSearch" : tableSearchParser, "comment" : commentParser, "!" : counterpointParser, "//" : slashCommentParser, "thanksTo" : thanksToParser, "clearStack" : scrollClearStackParser, "css" : cssParser, "background" : scrollBackgroundColorParser, "color" : scrollFontColorParser, "font" : scrollFontParser, "dashboard" : scrollDashboardParser, "belowAsCode" : belowAsCodeParser, "debugBelow" : debugBelowParser, "debugAbove" : debugAboveParser, "debugAll" : debugAllParser, "belowAsCodeUntil" : belowAsCodeUntilParser, "aboveAsCode" : aboveAsCodeParser, "belowAsHtml" : belowAsHtmlParser, "aboveAsHtml" : aboveAsHtmlParser, "hakon" : hakonParser, "html" : htmlParser, "br" : scrollBrParser, "iframes" : iframesParser, "image" : scrollImageParser, "qrcode" : qrcodeParser, "youtube" : youtubeParser, "youTube" : youTubeParser, "import" : importParser, "imported" : scrollImportedParser, "script" : scriptParser, "jsonScript" : jsonScriptParser, "leftRightButtons" : scrollLeftRightButtonsParser, "keyboardNav" : keyboardNavParser, "printUsageStats" : printUsageStatsParser, "printScrollLeetSheet" : printScrollLeetSheetParser, "printparsersLeetSheet" : printparsersLeetSheetParser, "metaTags" : metaTagsParser, "quote" : quoteParser, "redirectTo" : redirectToParser, "replace" : replaceParser, "replaceJs" : replaceJsParser, "replaceNodejs" : replaceNodejsParser, "run" : runScriptParser, "endSnippet" : endSnippetParser, "toStamp" : toStampParser, "stamp" : stampParser, "stump" : scrollStumpParser, "stumpNoSnippet" : stumpNoSnippetParser, "plainText" : plainTextParser, "plainTextOnly" : plainTextOnlyParser, "theme" : scrollThemeParser}), [{regex: /^\d+\. /, parser: orderedListAftertextParser},{regex: /^\^.+$/, parser: footnoteDefinitionParser},{regex: /^[^\s]+\.(mp3|wav|ogg|aac|m4a|flac)/, parser: quickSoundParser},{regex: /^[^\s]+\.(mp4|webm|avi|mov)/, parser: quickVideoParser},{regex: /^[^\s]+\.(tsv|csv|ssv|psv|json)[^\s]*$/, parser: quickTableParser},{regex: /^[a-zA-Z0-9_]+Code$/, parser: codeWithLanguageParser},{regex: /^[^\s]+\.(css)$/, parser: quickCssParser},{regex: /^[^\s]+\.(html|htm)$/, parser: quickIncludeHtmlParser},{regex: /^[^\s]+\.(js)$/, parser: quickScriptParser},{regex: /^[a-zA-Z0-9_]+Def/, parser: scrollDefParser},{regex: /^%?[\w\.]+#[\w\.]+ */, parser: hamlParser},{regex: /^%[^#]+$/, parser: hamlTagParser},{regex: /^ subparticle.buildHtml).map(subparticle => { try {return subparticle.buildHtml(buildSettings)} catch (err) {console.error(err); return ""} }).filter(i => i).join("\n") + this.clearSectionStack() } sectionStack = [] clearSectionStack() { const result = this.sectionStack.join("\n") this.sectionStack = [] return result } bodyStack = [] clearBodyStack() { const result = this.bodyStack.join("") this.bodyStack = [] return result } get hakonParser() { if (this.isNodeJs()) return require("scrollsdk/products/hakon.nodejs.js") return hakonParser } readSyncFromFileOrUrl(fileOrUrl) { if (!this.isNodeJs()) return localStorage.getItem(fileOrUrl) || "" const isUrl = fileOrUrl.match(/^https?\:[^ ]+$/) if (!isUrl) return this.root.readFile(fileOrUrl) return this.readFile(this.makeFullPath(new URL(fileOrUrl).pathname.split('/').pop())) } async fetch(url, filename) { const isUrl = url.match(/^https?\:[^ ]+$/) if (!isUrl) return return this.isNodeJs() ? this.fetchNode(url, filename) : this.fetchBrowser(url) } get path() { return require("path") } makeFullPath(filename) { return this.path.join(this.folderPath, filename) } _nextAndPrevious(arr, index) { const nextIndex = index + 1 const previousIndex = index - 1 return { previous: arr[previousIndex] ?? arr[arr.length - 1], next: arr[nextIndex] ?? arr[0] } } // keyboard nav is always in the same folder. does not currently support cross folder includeFileInKeyboardNav(file) { const { scrollProgram } = file return scrollProgram.buildsHtml && scrollProgram.hasKeyboardNav && scrollProgram.tags.includes(this.primaryTag) } get timeIndex() { return this.file.timeIndex || 0 } get linkToPrevious() { if (!this.hasKeyboardNav) // Dont provide link to next unless keyboard nav is on return undefined const {allScrollFiles} = this let file = this._nextAndPrevious(allScrollFiles, this.timeIndex).previous while (!this.includeFileInKeyboardNav(file)) { file = this._nextAndPrevious(allScrollFiles, file.timeIndex).previous } return file.scrollProgram.permalink } importRegex = /^(import |[a-zA-Z\_\-\.0-9\/]+\.(scroll|parsers)$|https?:\/\/.+\.(scroll|parsers)$)/gm get linkToNext() { if (!this.hasKeyboardNav) // Dont provide link to next unless keyboard nav is on return undefined const {allScrollFiles} = this let file = this._nextAndPrevious(allScrollFiles, this.timeIndex).next while (!this.includeFileInKeyboardNav(file)) { file = this._nextAndPrevious(allScrollFiles, file.timeIndex).next } return file.scrollProgram.permalink } // todo: clean up this naming pattern and add a parser instead of special casing 404.html get allHtmlFiles() { return this.allScrollFiles.filter(file => file.scrollProgram.buildsHtml && file.scrollProgram.permalink !== "404.html") } parseNestedTag(tag) { if (!tag.includes("/")) return; const {path} = this const parts = tag.split("/") const group = parts.pop() const relativePath = parts.join("/") return { group, relativePath, folderPath: path.join(this.folderPath, path.normalize(relativePath)) } } getFilesByTags(tags, limit) { // todo: tags is currently matching partial substrings const getFilesWithTag = (tag, files) => files.filter(file => file.scrollProgram.buildsHtml && file.scrollProgram.tags.includes(tag)) if (typeof tags === "string") tags = tags.split(" ") if (!tags || !tags.length) return this.allHtmlFiles .filter(file => file !== this) // avoid infinite loops. todo: think this through better. .map(file => { return { file, relativePath: "" } }) .slice(0, limit) let arr = [] tags.forEach(tag => { if (!tag.includes("/")) return (arr = arr.concat( getFilesWithTag(tag, this.allScrollFiles) .map(file => { return { file, relativePath: "" } }) .slice(0, limit) )) const {folderPath, group, relativePath} = this.parseNestedTag(tag) let files = [] try { files = this.fileSystem.getCachedLoadedFilesInFolder(folderPath, this) } catch (err) { console.error(err) } const filtered = getFilesWithTag(group, files).map(file => { return { file, relativePath: relativePath + "/" } }) arr = arr.concat(filtered.slice(0, limit)) }) return this.lodash.sortBy(arr, file => file.file.timestamp).reverse() } async fetchNode(url, filename) { filename = filename || new URL(url).pathname.split('/').pop() const fullpath = this.makeFullPath(filename) if (require("fs").existsSync(fullpath)) return this.readFile(fullpath) this.log(`🛜 fetching ${url} to ${fullpath} `) await this.downloadToDisk(url, fullpath) return this.readFile(fullpath) } log(message) { if (this.logger) this.logger.log(message) } async fetchBrowser(url) { const content = localStorage.getItem(url) if (content) return content return this.downloadToLocalStorage(url) } async downloadToDisk(url, destination) { const { writeFile } = require('fs').promises const response = await fetch(url) const fileBuffer = await response.arrayBuffer() await writeFile(destination, Buffer.from(fileBuffer)) return this.readFile(destination) } async downloadToLocalStorage(url) { const response = await fetch(url) const blob = await response.blob() localStorage.setItem(url, await blob.text()) return localStorage.getItem(url) } readFile(filename) { const {path} = this const fs = require("fs") const fullPath = path.join(this.folderPath, filename.replace(this.folderPath, "")) try { if (fs.existsSync(fullPath)) return fs.readFileSync(fullPath, "utf8") console.error(`File '${filename}' not found`) return "" } catch (err) { console.error(`Error in '${this.filePath}' reading file: '${fullPath}'`) console.error(err) return "" } } alreadyRequired = new Set() buildHtmlSnippet(buildSettings) { this.sectionStack = [] return this.map(subparticle => (subparticle.buildHtmlSnippet ? subparticle.buildHtmlSnippet(buildSettings) : subparticle.buildHtml(buildSettings))) .filter(i => i) .join("\n") .trim() + this.clearSectionStack() } get footnotes() { if (this._footnotes === undefined) this._footnotes = this.filter(particle => particle.isFootnote) return this._footnotes } get authors() { return this.get("authors") } get allScrollFiles() { try { return this.fileSystem.getCachedLoadedFilesInFolder(this.folderPath, this) } catch (err) { console.error(err) return [] } } async doThing(thing) { await Promise.all(this.filter(particle => particle[thing]).map(async particle => particle[thing]())) } async load() { await this.doThing("load") } async execute() { await this.doThing("execute") } file = {} getFromParserId(parserId) { return this.parserIdIndex[parserId]?.[0].content } get fileSystem() { return this.file.fileSystem } get filePath() { return this.file.filePath } get folderPath() { return this.file.folderPath } get filename() { return this.file.filename || "" } get hasKeyboardNav() { return this.has("keyboardNav") } get editHtml() { return `Edit` } get externalsPath() { return this.file.EXTERNALS_PATH } get endSnippetIndex() { // Get the line number that the snippet should stop at. // First if its hard coded, use that if (this.has("endSnippet")) return this.getParticle("endSnippet").index // Next look for a dinkus const snippetBreak = this.find(particle => particle.isDinkus) if (snippetBreak) return snippetBreak.index return -1 } get parserIds() { return this.topDownArray.map(particle => particle.definition.id) } get tags() { return this.get("tags") || "" } get primaryTag() { return this.tags.split(" ")[0] } get filenameNoExtension() { return this.filename.replace(".scroll", "") } // todo: rename publishedUrl? Or something to indicate that this is only for stuff on the web (not localhost) // BaseUrl must be provided for RSS Feeds and OpenGraph tags to work get baseUrl() { const baseUrl = (this.get("baseUrl") || "").replace(/\/$/, "") return baseUrl + "/" } get canonicalUrl() { return this.get("canonicalUrl") || this.baseUrl + this.permalink } get openGraphImage() { const openGraphImage = this.get("openGraphImage") if (openGraphImage !== undefined) return this.ensureAbsoluteLink(openGraphImage) const images = this.filter(particle => particle.doesExtend("scrollImageParser")) const hit = images.find(particle => particle.has("openGraph")) || images[0] if (!hit) return "" return this.ensureAbsoluteLink(hit.filename) } get absoluteLink() { return this.ensureAbsoluteLink(this.permalink) } ensureAbsoluteLink(link) { if (link.includes("://")) return link return this.baseUrl + link.replace(/^\//, "") } get editUrl() { const editUrl = this.get("editUrl") if (editUrl) return editUrl const editBaseUrl = this.get("editBaseUrl") return (editBaseUrl ? editBaseUrl.replace(/\/$/, "") + "/" : "") + this.filename } get gitRepo() { // given https://github.com/breck7/breckyunits.com/blob/main/four-tips-to-improve-communication.scroll // return https://github.com/breck7/breckyunits.com return this.editUrl.split("/").slice(0, 5).join("/") } get scrollVersion() { // currently manually updated return "164.12.0" } // Use the first paragraph for the description // todo: add a particle method version of get that gets you the first particle. (actulaly make get return array?) // would speed up a lot. get description() { const description = this.getFromParserId("openGraphDescriptionParser") if (description) return description return this.generatedDescription } get generatedDescription() { const firstParagraph = this.find(particle => particle.isArticleContent) return firstParagraph ? firstParagraph.originalText.substr(0, 100).replace(/[&"<>']/g, "") : "" } get titleFromFilename() { const unCamelCase = str => str.replace(/([a-z])([A-Z])/g, "$1 $2").replace(/^./, match => match.toUpperCase()) return unCamelCase(this.filenameNoExtension) } get title() { return this.getFromParserId("scrollTitleParser") || this.titleFromFilename } get linkTitle() { return this.getFromParserId("scrollLinkTitleParser") || this.title } get permalink() { return this.get("permalink") || (this.filename ? this.filenameNoExtension + ".html" : "") } compileTo(extensionCapitalized) { if (extensionCapitalized === "Txt") return this.asTxt if (extensionCapitalized === "Html") return this.asHtml const methodName = "build" + extensionCapitalized return this.topDownArray .filter(particle => particle[methodName]) .map((particle, index) => particle[methodName](index)) .join("\n") .trim() } get asTxt() { return ( this.map(particle => { const text = particle.buildTxt ? particle.buildTxt() : "" if (text) return text + "\n" if (!particle.getLine().length) return "\n" return "" }) .join("") .replace(/<[^>]*>/g, "") .replace(/\n\n\n+/g, "\n\n") // Maximum 2 newlines in a row .trim() + "\n" // Always end in a newline, Posix style ) } get dependencies() { const dependencies = this.file.dependencies?.slice() || [] const files = this.topDownArray.filter(particle => particle.dependencies).map(particle => particle.dependencies).flat() return dependencies.concat(files) } get buildsHtml() { const { permalink } = this return !this.file.importOnly && (permalink.endsWith(".html") || permalink.endsWith(".htm")) } // Without specifying the language hyphenation will not work. get lang() { return this.get("htmlLang") || "en" } _compiledHtml = "" get asHtml() { if (!this._compiledHtml) { const { permalink, buildsHtml } = this const content = (this.buildHtml() + this.clearBodyStack()).trim() // Don't add html tags to CSV feeds. A little hacky as calling a getter named _html_ to get _xml_ is not ideal. But // <1% of use case so might be good enough. const wrapWithHtmlTags = buildsHtml const bodyTag = this.has("metaTags") ? "" : "\n" this._compiledHtml = wrapWithHtmlTags ? `\n\n${bodyTag}${content}\n\n` : content } return this._compiledHtml } get wordCount() { return this.asTxt.match(/\b\w+\b/g)?.length || 0 } get minutes() { return parseFloat((this.wordCount / 200).toFixed(1)) } get date() { const date = this.get("date") || (this.file.timestamp ? this.file.timestamp : 0) return this.dayjs(date).format(`MM/DD/YYYY`) } get year() { return parseInt(this.dayjs(this.date).format(`YYYY`)) } get dayjs() { if (!this.isNodeJs()) return dayjs const lib = require("dayjs") const relativeTime = require("dayjs/plugin/relativeTime") lib.extend(relativeTime) return lib } get lodash() { return this.isNodeJs() ? require("lodash") : lodash } get d3() { return this.isNodeJs() ? require('d3') : d3 } getConcepts(parsed) { const concepts = [] let currentConcept parsed.forEach(particle => { if (particle.isConceptDelimiter) { if (currentConcept) concepts.push(currentConcept) currentConcept = [] } if (currentConcept && particle.isMeasure) currentConcept.push(particle) }) if (currentConcept) concepts.push(currentConcept) return concepts } _formatConcepts(parsed) { const concepts = this.getConcepts(parsed) if (!concepts.length) return false const {lodash} = this // does a destructive sort in place on the parsed program concepts.forEach(concept => { let currentSection const newCode = lodash .sortBy(concept, ["sortIndex"]) .map(particle => { let newLines = "" const section = particle.sortIndex.toString().split(".")[0] if (section !== currentSection) { currentSection = section newLines = "\n" } return newLines + particle.toString() }) .join("\n") concept.forEach((particle, index) => (index ? particle.destroy() : "")) concept[0].replaceParticle(() => newCode) }) } get formatted() { return this.getFormatted(this.file.codeAtStart) } get lastCommitTime() { // todo: speed this up and do a proper release. also could add more metrics like this. if (this._lastCommitTime === undefined) { try { this._lastCommitTime = require("child_process").execSync(`git log -1 --format="%at" -- "${this.filePath}"`).toString().trim() } catch (err) { this._lastCommitTime = 0 } } return this._lastCommitTime } getFormatted(codeAtStart = this.toString()) { let formatted = codeAtStart.replace(/\r/g, "") // remove all carriage returns if there are any const parsed = new this.constructor(formatted) parsed.topDownArray.forEach(subparticle => { subparticle.format() const original = subparticle.getLine() const trimmed = original.replace(/(\S.*?)[ \t]*$/gm, "$1") // Trim trailing whitespace unless parser allows it if (original !== trimmed && !subparticle.allowTrailingWhitespace) subparticle.setLine(trimmed) }) this._formatConcepts(parsed) let importOnlys = [] let topMatter = [] let allElse = [] // Create any bindings parsed.forEach(particle => { if (particle.bindTo === "next") particle.binding = particle.next if (particle.bindTo === "previous") particle.binding = particle.previous }) parsed.forEach(particle => { if (particle.getLine() === "importOnly") importOnlys.push(particle) else if (particle.isTopMatter) topMatter.push(particle) else allElse.push(particle) }) const combined = importOnlys.concat(topMatter, allElse) // Move any bound particles combined .filter(particle => particle.bindTo) .forEach(particle => { // First remove the particle from its current position const originalIndex = combined.indexOf(particle) combined.splice(originalIndex, 1) // Then insert it at the new position // We need to find the binding index again after removal const bindingIndex = combined.indexOf(particle.binding) if (particle.bindTo === "next") combined.splice(bindingIndex, 0, particle) else combined.splice(bindingIndex + 1, 0, particle) }) const trimmed = combined .map(particle => particle.toString()) .join("\n") .replace(/^\n*/, "") // Remove leading newlines .replace(/\n\n\n+/g, "\n\n") // Maximum 2 newlines in a row .replace(/\n+$/, "") return trimmed === "" ? trimmed : trimmed + "\n" // End non blank Scroll files in a newline character POSIX style for better working with tools like git } get parser() { return this.constructor } get parsersRequiringExternals() { const { parser } = this // todo: could be cleaned up a bit if (!parser.parsersRequiringExternals) parser.parsersRequiringExternals = parser.cachedHandParsersProgramRoot.filter(particle => particle.copyFromExternal).map(particle => particle.atoms[0]) return parser.parsersRequiringExternals } get Disk() { return this.isNodeJs() ? require("scrollsdk/products/Disk.node.js").Disk : {}} async buildAll(options = {}) { await this.load() await this.buildOne(options) await this.buildTwo(options) } async buildOne(options) { await this.execute() const toBuild = this.filter(particle => particle.buildOne) for (let particle of toBuild) { await particle.buildOne(options) } } async buildTwo(options) { const toBuild = this.filter(particle => particle.buildTwo) for (let particle of toBuild) { await particle.buildTwo(options) } } get outputFileNames() { return this.filter(p => p.outputFileNames).map(p => p.outputFileNames).flat() } _compileArray(filename, arr) { const removeBlanks = data => data.map(obj => Object.fromEntries(Object.entries(obj).filter(([_, value]) => value !== ""))) const parts = filename.split(".") const format = parts.pop() if (format === "json") return JSON.stringify(removeBlanks(arr), null, 2) if (format === "js") return `const ${parts[0]} = ` + JSON.stringify(removeBlanks(arr), null, 2) if (format === "csv") return this.arrayToCSV(arr) if (format === "tsv") return this.arrayToCSV(arr, "\t") if (format === "particles") return particles.toString() return particles.toString() } levenshteinDistance(a, b) { const m = a.length const n = b.length const dp = Array.from({ length: m + 1 }, () => Array(n + 1).fill(0)) for (let i = 0; i <= m; i++) { dp[i][0] = i } for (let j = 0; j <= n; j++) { dp[0][j] = j } for (let i = 1; i <= m; i++) { for (let j = 1; j <= n; j++) { const cost = a[i - 1] === b[j - 1] ? 0 : 1 dp[i][j] = Math.min(dp[i - 1][j] + 1, dp[i][j - 1] + 1, dp[i - 1][j - 1] + cost) } } return dp[m][n] } makeLodashOrderByParams(str) { const part1 = str.split(" ") const part2 = part1.map(col => (col.startsWith("-") ? "desc" : "asc")) return [part1.map(col => col.replace(/^\-/, "")), part2] } arrayToCSV(data, delimiter = ",") { if (!data.length) return "" // Extract headers const headers = Object.keys(data[0]) const csv = data.map(row => headers .map(fieldName => { const fieldValue = row[fieldName] // Escape commas if the value is a string if (typeof fieldValue === "string" && fieldValue.includes(delimiter)) { return `"${fieldValue.replace(/"/g, '""')}"` // Escape double quotes and wrap in double quotes } return fieldValue }) .join(delimiter) ) csv.unshift(headers.join(delimiter)) // Add header row at the top return csv.join("\n") } compileConcepts(filename = "csv", sortBy = "") { const {lodash} = this if (!sortBy) return this._compileArray(filename, this.concepts) const orderBy = this.makeLodashOrderByParams(sortBy) return this._compileArray(filename, lodash.orderBy(this.concepts, orderBy[0], orderBy[1])) } _withStats get measuresWithStats() { if (!this._withStats) this._withStats = this.addMeasureStats(this.concepts, this.measures) return this._withStats } addMeasureStats(concepts, measures){ return measures.map(measure => { let Type = false concepts.forEach(concept => { const value = concept[measure.Name] if (value === undefined || value === "") return measure.Values++ if (!Type) { measure.Example = value.toString().replace(/\n/g, " ") measure.Type = typeof value Type = true } }) measure.Coverage = Math.floor((100 * measure.Values) / concepts.length) + "%" return measure }) } parseMeasures(parser) { if (!Particle.measureCache) Particle.measureCache = new Map() const measureCache = Particle.measureCache if (measureCache.get(parser)) return measureCache.get(parser) const {lodash} = this // todo: clean this up const getCueAtoms = rootParserProgram => rootParserProgram .filter(particle => particle.getLine().endsWith("Parser") && !particle.getLine().startsWith("abstract")) .map(particle => particle.get("cue") || particle.getLine()) .map(line => line.replace(/Parser$/, "")) // Generate a fake program with one of every of the available parsers. Then parse it. Then we can easily access the meta data on the parsers const dummyProgram = new parser( Array.from( new Set( getCueAtoms(parser.cachedHandParsersProgramRoot) // is there a better method name than this? ) ).join("\n") ) // Delete any particles that are not measures dummyProgram.filter(particle => !particle.isMeasure).forEach(particle => particle.destroy()) dummyProgram.forEach(particle => { // add nested measures Object.keys(particle.definition.cueMapWithDefinitions).forEach(key => particle.appendLine(key)) }) // Delete any nested particles that are not measures dummyProgram.topDownArray.filter(particle => !particle.isMeasure).forEach(particle => particle.destroy()) const measures = dummyProgram.topDownArray.map(particle => { return { Name: particle.measureName, Values: 0, Coverage: 0, Question: particle.definition.description, Example: particle.definition.getParticle("example")?.subparticlesToString() || "", Type: particle.typeForWebForms, Source: particle.sourceDomain, //Definition: parsedProgram.root.filename + ":" + particle.lineNumber SortIndex: particle.sortIndex, IsComputed: particle.isComputed, IsRequired: particle.isMeasureRequired, IsConceptDelimiter: particle.isConceptDelimiter, Cue: particle.definition.get("cue") } }) measureCache.set(parser, lodash.sortBy(measures, "SortIndex")) return measureCache.get(parser) } _concepts get concepts() { if (this._concepts) return this._concepts this._concepts = this.parseConcepts(this, this.measures) return this._concepts } _measures get measures() { if (this._measures) return this._measures this._measures = this.parseMeasures(this.parser) return this._measures } parseConcepts(parsedProgram, measures){ // Todo: might be a perf/memory/simplicity win to have a "segment" method in ScrollSDK, where you could // virtually split a Particle into multiple segments, and then query on those segments. // So we would "segment" on "id ", and then not need to create a bunch of new objects, and the original // already parsed lines could then learn about/access to their respective segments. const conceptDelimiter = measures.filter(measure => measure.IsConceptDelimiter)[0] if (!conceptDelimiter) return [] const concepts = parsedProgram.split(conceptDelimiter.Cue || conceptDelimiter.Name) concepts.shift() // Remove the part before "id" return concepts.map(concept => { const row = {} measures.forEach(measure => { const measureName = measure.Name const measureKey = measure.Cue || measureName.replace(/_/g, " ") if (!measure.IsComputed) row[measureName] = concept.getParticle(measureKey)?.measureValue ?? "" else row[measureName] = this.computeMeasure(parsedProgram, measureName, concept, concepts) }) return row }) } computeMeasure(parsedProgram, measureName, concept, concepts){ // note that this is currently global, assuming there wont be. name conflicts in computed measures in a single scroll if (!Particle.measureFnCache) Particle.measureFnCache = {} const measureFnCache = Particle.measureFnCache if (!measureFnCache[measureName]) { // a bit hacky but works?? const particle = parsedProgram.appendLine(measureName) measureFnCache[measureName] = particle.computeValue particle.destroy() } return measureFnCache[measureName](concept, measureName, parsedProgram, concepts) } compileMeasures(filename = "csv", sortBy = "") { const withStats = this.measuresWithStats if (!sortBy) return this._compileArray(filename, withStats) const orderBy = this.makeLodashOrderByParams(sortBy) return this._compileArray(filename, this.lodash.orderBy(withStats, orderBy[0], orderBy[1])) } evalNodeJsMacros(value, macroMap, filePath) { const tempPath = filePath + ".js" const {Disk} = this if (Disk.exists(tempPath)) throw new Error(`Failed to write/require replaceNodejs snippet since '${tempPath}' already exists.`) try { Disk.write(tempPath, value) const results = require(tempPath) Object.keys(results).forEach(key => (macroMap[key] = results[key])) } catch (err) { console.error(`Error in evalMacros in file '${filePath}'`) console.error(err) } finally { Disk.rm(tempPath) } } evalMacros(fusedFile) { const {fusedCode, codeAtStart, filePath} = fusedFile let code = fusedCode const absolutePath = filePath // note: the 2 params above are not used in this method, but may be used in user eval code. (todo: cleanup) const regex = /^(replace|footer$)/gm if (!regex.test(code)) return code const particle = new Particle(code) // todo: this can be faster. a more lightweight particle class? // Process macros const macroMap = {} particle .filter(particle => { const parserAtom = particle.cue return parserAtom === "replace" || parserAtom === "replaceJs" || parserAtom === "replaceNodejs" }) .forEach(particle => { let value = particle.length ? particle.subparticlesToString() : particle.getAtomsFrom(2).join(" ") const kind = particle.cue try { if (kind === "replaceJs") value = eval(value) if (this.isNodeJs() && kind === "replaceNodejs") this.evalNodeJsMacros(value, macroMap, absolutePath) else macroMap[particle.getAtom(1)] = value } catch (err) { console.error(err) } particle.destroy() // Destroy definitions after eval }) if (particle.has("footer")) { const pushes = particle.getParticles("footer") const append = pushes.map(push => push.section.join("\n")).join("\n") pushes.forEach(push => { push.section.forEach(particle => particle.destroy()) push.destroy() }) code = particle.asString + append } const keys = Object.keys(macroMap) if (!keys.length) return code let codeAfterMacroSubstitution = particle.asString // Todo: speed up. build a template? Object.keys(macroMap).forEach(key => (codeAfterMacroSubstitution = codeAfterMacroSubstitution.replace(new RegExp(key, "g"), macroMap[key]))) return codeAfterMacroSubstitution } toRss() { const { title, canonicalUrl } = this return ` ${title} ${canonicalUrl} ${this.dayjs(this.timestamp * 1000).format("ddd, DD MMM YYYY HH:mm:ss ZZ")} ` } static cachedHandParsersProgramRoot = new HandParsersProgram(`columnNameAtom paint constant newColumnNameAtom description Name a derived column. paint variable constantAtom paint constant percentAtom paint constant.numeric.float extends stringAtom // todo: this currently extends from stringAtom b/c scrollsdk needs to be fixed. seems like if extending from number then the hard coded number typescript regex takes precedence over a custom regex countAtom extends integerAtom yearAtom extends integerAtom preBuildCommandAtom extends cueAtom description Give build command atoms their own color. paint constant.character.escape delimiterAtom description String to use as a delimiter. paint string bulletPointAtom description Any token used as a bullet point such as "-" or "1." or ">" paint keyword comparisonAtom enum < > <= >= = != includes doesNotInclude empty notEmpty startsWith endsWith paint constant personNameAtom extends stringAtom urlAtom paint constant.language absoluteUrlAtom paint constant.language regex (ftp|https?)://.+ emailAddressAtom extends stringAtom paint constant.language permalinkAtom paint string description A string that doesn't contain characters that might interfere with most filesystems. No slashes, for instance. filePathAtom paint constant.language tagOrUrlAtom description An HTML tag or a url. paint constant.language htmlAttributesAtom paint constant htmlTagAtom paint constant.language enum div span p a img ul ol li h1 h2 h3 h4 h5 h6 header nav section article aside main footer input button form label select option textarea table tr td th tbody thead tfoot br hr meta link script style title code classNameAtom paint constant htmlIdAtom paint constant fontFamilyAtom enum Arial Helvetica Verdana Georgia Impact Tahoma Slim paint constant javascriptAnyAtom extends codeAtom htmlAnyAtom extends codeAtom colorAtom extends codeAtom buildCommandAtom extends cueAtom description Give build command atoms their own color. paint constant cssAnyAtom extends codeAtom cssLengthAtom extends codeAtom reductionTypeAtom enum sum mean max min concat first paint keyword inlineMarkupNameAtom description Options to turn on some inline markups. enum bold italics code katex none tileOptionAtom enum default light measureNameAtom extends cueAtom // A regex for column names for max compatibility with a broad range of data science tools: regex [a-zA-Z][a-zA-Z0-9]* abstractConstantAtom paint entity.name.tag javascriptSafeAlphaNumericIdentifierAtom regex [a-zA-Z0-9_]+ reservedAtoms enum extends function static if while export return class for default require var let const new anyAtom baseParsersAtom description There are a few classes of special parsers. BlobParsers don't have their subparticles parsed. Error particles always report an error. // todo Remove? enum blobParser errorParser paint variable.parameter enumAtom paint constant.language booleanAtom enum true false extends enumAtom atomParserAtom enum prefix postfix omnifix paint constant.numeric atomPropertyNameAtom paint variable.parameter atomTypeIdAtom examples integerAtom cueAtom someCustomAtom extends javascriptSafeAlphaNumericIdentifierAtom enumFromAtomTypes atomTypeIdAtom paint storage constantIdentifierAtom examples someId myVar // todo Extend javascriptSafeAlphaNumericIdentifier regex [a-zA-Z]\\w+ paint constant.other description A atom that can be assigned to the parser in the target language. constructorFilePathAtom enumOptionAtom // todo Add an enumOption top level type, so we can add data to an enum option such as a description. paint string atomExampleAtom description Holds an example for a atom with a wide range of options. paint string extraAtom paint invalid fileExtensionAtom examples js txt doc exe regex [a-zA-Z0-9]+ paint string numberAtom paint constant.numeric floatAtom extends numberAtom regex \\-?[0-9]*\\.?[0-9]* paint constant.numeric.float integerAtom regex \\-?[0-9]+ extends numberAtom paint constant.numeric.integer cueAtom description A atom that indicates a certain parser to use. paint keyword javascriptCodeAtom lowercaseAtom regex [a-z]+ parserIdAtom examples commentParser addParser description This doubles as the class name in Javascript. If this begins with \`abstract\`, then the parser will be considered an abstract parser, which cannot be used by itself but provides common functionality to parsers that extend it. paint variable.parameter extends javascriptSafeAlphaNumericIdentifierAtom enumFromAtomTypes parserIdAtom cueAtom paint constant.language regexAtom paint string.regexp reservedAtomAtom description A atom that a atom cannot contain. paint string paintTypeAtom enum comment comment.block comment.block.documentation comment.line constant constant.character.escape constant.language constant.numeric constant.numeric.complex constant.numeric.complex.imaginary constant.numeric.complex.real constant.numeric.float constant.numeric.float.binary constant.numeric.float.decimal constant.numeric.float.hexadecimal constant.numeric.float.octal constant.numeric.float.other constant.numeric.integer constant.numeric.integer.binary constant.numeric.integer.decimal constant.numeric.integer.hexadecimal constant.numeric.integer.octal constant.numeric.integer.other constant.other constant.other.placeholder entity entity.name entity.name.class entity.name.class.forward-decl entity.name.constant entity.name.enum entity.name.function entity.name.function.constructor entity.name.function.destructor entity.name.impl entity.name.interface entity.name.label entity.name.namespace entity.name.section entity.name.struct entity.name.tag entity.name.trait entity.name.type entity.name.union entity.other.attribute-name entity.other.inherited-class invalid invalid.deprecated invalid.illegal keyword keyword.control keyword.control.conditional keyword.control.import keyword.declaration keyword.operator keyword.operator.arithmetic keyword.operator.assignment keyword.operator.bitwise keyword.operator.logical keyword.operator.atom keyword.other markup markup.bold markup.deleted markup.heading markup.inserted markup.italic markup.list.numbered markup.list.unnumbered markup.other markup.quote markup.raw.block markup.raw.inline markup.underline markup.underline.link meta meta.annotation meta.annotation.identifier meta.annotation.parameters meta.block meta.braces meta.brackets meta.class meta.enum meta.function meta.function-call meta.function.parameters meta.function.return-type meta.generic meta.group meta.impl meta.interface meta.interpolation meta.namespace meta.paragraph meta.parens meta.path meta.preprocessor meta.string meta.struct meta.tag meta.toc-list meta.trait meta.type meta.union punctuation punctuation.accessor punctuation.definition.annotation punctuation.definition.comment punctuation.definition.generic.begin punctuation.definition.generic.end punctuation.definition.keyword punctuation.definition.string.begin punctuation.definition.string.end punctuation.definition.variable punctuation.section.block.begin punctuation.section.block.end punctuation.section.braces.begin punctuation.section.braces.end punctuation.section.brackets.begin punctuation.section.brackets.end punctuation.section.group.begin punctuation.section.group.end punctuation.section.interpolation.begin punctuation.section.interpolation.end punctuation.section.parens.begin punctuation.section.parens.end punctuation.separator punctuation.separator.continuation punctuation.terminator source source.language-suffix.embedded storage storage.modifier storage.type storage.type keyword.declaration.type storage.type.class keyword.declaration.class storage.type.enum keyword.declaration.enum storage.type.function keyword.declaration.function storage.type.impl keyword.declaration.impl storage.type.interface keyword.declaration.interface storage.type.struct keyword.declaration.struct storage.type.trait keyword.declaration.trait storage.type.union keyword.declaration.union string string.quoted.double string.quoted.other string.quoted.single string.quoted.triple string.regexp string.unquoted support support.class support.constant support.function support.module support.type text text.html text.xml variable variable.annotation variable.function variable.language variable.other variable.other.constant variable.other.member variable.other.readwrite variable.parameter paint string scriptUrlAtom semanticVersionAtom examples 1.0.0 2.2.1 regex [0-9]+\\.[0-9]+\\.[0-9]+ paint constant.numeric dateAtom paint string stringAtom paint string atomAtom paint constant description A non-empty single atom string. regex .+ exampleAnyAtom examples lorem ipsem // todo Eventually we want to be able to parse correctly the examples. paint comment extends stringAtom blankAtom commentAtom paint comment codeAtom paint comment metaCommandAtom extends cueAtom description Give meta command atoms their own color. paint constant.numeric // Obviously this is not numeric. But I like the green color for now. We need a better design to replace this "paint" concept https://github.com/breck7/scrollsdk/issues/186 vegaDataSetAtom paint constant.numeric enum airports.csv anscombe.json barley.json birdstrikes.json budget.json budgets.json burtin.json cars.json climate.json co2-concentration.csv countries.json crimea.json descriptions.json disasters.csv driving.json earthquakes.json flare-dependencies.json flare.json flights-10k.json flights-200k.json flights-20k.json flights-2k.json flights-3m.csv flights-5k.json flights-airport.csv gapminder-health-income.csv gapminder.json github.csv graticule.json income.json iowa-electricity.csv iris.json jobs.json la-riots.csv londonBoroughs.json londonCentroids.json londonTubeLines.json lookup_groups.csv lookup_people.csv miserables.json monarchs.json movies.json normal-2d.json obesity.json points.json population.json population_engineers_hurricanes.csv seattle-temps.csv seattle-weather.csv sf-temps.csv sp500.csv stocks.csv udistrict.json unemployment-across-industries.json unemployment.tsv us-10m.json us-employment.csv us-state-capitals.json weather.csv weather.json weball26.json wheat.json windvectors.csv world-110m.json zipcodes.csv tagAtom extends permalinkAtom tagWithOptionalFolderAtom description A group name optionally combined with a folder path. Only used when referencing tags, not in posts. extends stringAtom scrollThemeAtom enum roboto gazette dark tufte prestige paint constant abstractScrollParser atoms cueAtom javascript buildHtmlSnippet(buildSettings) { return this.buildHtml(buildSettings) } buildTxt() { return "" } getHtmlRequirements(buildSettings) { const {requireOnce} = this if (!requireOnce) return "" const set = buildSettings?.alreadyRequired || this.root.alreadyRequired if (set.has(requireOnce)) return "" set.add(requireOnce) return requireOnce + "\\n\\n" } abstractAftertextParser description Text followed by markup commands. extends abstractScrollParser inScope abstractAftertextDirectiveParser abstractAftertextAttributeParser aftertextTagParser abstractCommentParser javascript get markupInserts() { const { originalTextPostLinkify } = this return this.filter(particle => particle.isMarkup) .map(particle => particle.getInserts(originalTextPostLinkify)) .filter(i => i) .flat() } get originalText() { return this.content ?? "" } get originalTextPostLinkify() { const { originalText } = this const shouldLinkify = this.get("linkify") === "false" || originalText.includes(" { const needle = note.cue const {linkBack} = note if (originalText.includes(needle)) originalText = originalText.replace(new RegExp("\\\\" + needle + "\\\\b"), \`\${note.label}\`) }) return originalText } get text() { const { originalTextPostLinkify, markupInserts } = this let adjustment = 0 let newText = originalTextPostLinkify markupInserts.sort((a, b) => { if (a.index !== b.index) return a.index - b.index // If multiple tags start at same index, the tag that closes first should start last. Otherwise HTML breaks. if (b.index === b.endIndex) // unless the endIndex is the same as index return a.endIndex - b.endIndex return b.endIndex - a.endIndex }) markupInserts.forEach(insertion => { insertion.index += adjustment const consumeStartCharacters = insertion.consumeStartCharacters ?? 0 const consumeEndCharacters = insertion.consumeEndCharacters ?? 0 newText = newText.slice(0, insertion.index - consumeEndCharacters) + insertion.string + newText.slice(insertion.index + consumeStartCharacters) adjustment += insertion.string.length - consumeEndCharacters - consumeStartCharacters }) return newText } tag = "p" get className() { if (this.get("classes")) return this.get("classes") const classLine = this.getParticle("class") if (classLine && classLine.applyToParentElement) return classLine.content return this.defaultClassName } defaultClassName = "scrollParagraph" get isHidden() { return this.has("hidden") } buildHtml(buildSettings) { if (this.isHidden) return "" this.buildSettings = buildSettings const { className, styles } = this const classAttr = className ? \`class="\${this.className}"\` : "" const tag = this.get("tag") || this.tag if (tag === "none") // Allow no tag for aftertext in tables return this.text const id = this.has("id") ? "" : \`id="\${this.htmlId}" \` // always add an html id return this.getHtmlRequirements(buildSettings) + \`<\${tag} \${id}\${this.htmlAttributes}\${classAttr}\${styles}>\${this.text}\${this.closingTag}\` } get closingTag() { const tag = this.get("tag") || this.tag return \`\` } get htmlAttributes() { const attrs = this.filter(particle => particle.isAttribute) return attrs.length ? attrs.map(particle => particle.htmlAttributes).join(" ") + " " : "" } get styles() { const style = this.getParticle("style") const fontFamily = this.getParticle("font") const color = this.getParticle("color") if (!style && !fontFamily && !color) return "" return \` style="\${style?.content};\${fontFamily?.css};\${color?.css}"\` } get htmlId() { return this.get("id") || "particle" + this.index } scrollParagraphParser // todo Perhaps rewrite this from scratch and move out of aftertext. extends abstractAftertextParser catchAllAtomType stringAtom description A paragraph. boolean suggestInAutocomplete false cueFromId javascript buildHtml(buildSettings) { if (this.isHidden) return "" // Hacky, I know. const newLine = this.has("inlineMarkupsOn") ? undefined : this.appendLine("inlineMarkupsOn") const compiled = super.buildHtml(buildSettings) if (newLine) newLine.destroy() return compiled } buildTxt() { const subparticles = this.filter(particle => particle.buildTxt).map(particle => particle.buildTxt()).filter(i => i).join("\\n") const dateline = this.getParticle("dateline") return (dateline ? dateline.day + "\\n\\n" : "") + (this.originalText || "") + (subparticles ? "\\n " + subparticles.replace(/\\n/g, "\\n ") : "") } authorsParser popularity 0.007379 // multiple authors delimited by " and " boolean isPopular true extends scrollParagraphParser description Set author(s) name(s). example authors Breck Yunits https://breckyunits.com Breck Yunits // note: once we have mixins in Parsers, lets mixin the below from abstractTopLevelSingleMetaParser atoms metaCommandAtom javascript isTopMatter = true isSetterParser = true buildHtmlForPrint() { // hacky. todo: cleanup const originalContent = this.content this.setContent(\`by \${originalContent}\`) const html = super.buildHtml() this.setContent(originalContent) return html } buildTxtForPrint() { return 'by ' + super.buildTxt() } buildHtml() { return "" } buildTxt() { return "" } defaultClassName = "printAuthorsParser" blinkParser description Just for fun. extends scrollParagraphParser example blink Carpe diem! cue blink javascript buildHtml() { return \`\${super.buildHtml()} \` } scrollButtonParser extends scrollParagraphParser cue button description A button. postParser description Post a particle. example button Click me javascript defaultClassName = "scrollButton" tag = "button" get htmlAttributes() { const link = this.getFromParser("scrollLinkParser") const post = this.getParticle("post") if (post) { const method = "post" const action = link?.link || "" const formData = new URLSearchParams({particle: post.subparticlesToString()}).toString() return \` onclick="fetch('\${action}', {method: '\${method}', body: '\${formData}', headers: {'Content-Type': 'application/x-www-form-urlencoded'}}).then(async (message) => {const el = document.createElement('div'); el.textContent = await message.text(); this.insertAdjacentElement('afterend', el);}); return false;" \` } return super.htmlAttributes + (link ? \`onclick="window.location='\${link.link}'"\` : "") } getFromParser(parserId) { return this.find(particle => particle.doesExtend(parserId)) } catchAllParagraphParser popularity 0.115562 description A paragraph. extends scrollParagraphParser boolean suggestInAutocomplete false boolean isPopular true boolean isArticleContent true atoms stringAtom javascript getErrors() { const errors = super.getErrors() || [] return this.parent.has("testStrict") ? errors.concat(this.makeError(\`catchAllParagraphParser should not have any matches when testing with testStrict.\`)) : errors } get originalText() { return this.getLine() || "" } scrollCenterParser popularity 0.006415 cue center description A centered section. extends scrollParagraphParser example center This paragraph is centered. javascript buildHtml() { this.parent.sectionStack.push("
") return \`
\${super.buildHtml()}\` } buildTxt() { return this.content } abstractIndentableParagraphParser extends scrollParagraphParser inScope abstractAftertextDirectiveParser abstractAftertextAttributeParser abstractIndentableParagraphParser javascript compileSubparticles() { return this.map(particle => particle.buildHtml()) .join("\\n") .trim() } buildHtml() { return super.buildHtml() + this.compileSubparticles() } buildTxt() { return this.getAtom(0) + " " + super.buildTxt() } checklistTodoParser popularity 0.000193 extends abstractIndentableParagraphParser example [] Get milk description A task todo. cue [] string checked javascript get text() { return \`
\` } get id() { return this.get("id") || "item" + this._getUid() } checklistDoneParser popularity 0.000072 extends checklistTodoParser description A completed task. string checked checked cue [x] example [x] get milk listAftertextParser popularity 0.014325 extends abstractIndentableParagraphParser example - I had a _new_ thought. description A list item. cue - javascript defaultClassName = "" buildHtml() { const {index, parent} = this const particleClass = this.constructor const isStartOfList = index === 0 || !(parent.particleAt(index - 1) instanceof particleClass) const isEndOfList = parent.length === index + 1 || !(parent.particleAt(index + 1) instanceof particleClass) const { listType } = this return (isStartOfList ? \`<\${listType} \${this.attributes}>\` : "") + \`\${super.buildHtml()}\` + (isEndOfList ? \`\` : "") } get attributes() { return "" } tag = "li" listType = "ul" abstractCustomListItemParser extends listAftertextParser javascript get requireOnce() { return \`\` } get attributes() { return \`class="\${this.constructor.name}"\` } orderedListAftertextParser popularity 0.004485 extends listAftertextParser description A list item. example 1. Hello world pattern ^\\d+\\. javascript listType = "ol" get attributes() { return \` start="\${this.getAtom(0)}"\`} quickQuoteParser popularity 0.000482 cue > example > The only thing we have to fear is fear itself. - FDR boolean isPopular true extends abstractIndentableParagraphParser description A quote. javascript defaultClassName = "scrollQuote" tag = "blockquote" scrollCounterParser description Visualize the speed of something. extends scrollParagraphParser cue counter example counter 4.5 Babies Born atoms cueAtom numberAtom javascript buildHtml() { const line = this.getLine() const atoms = line.split(" ") atoms.shift() // drop the counter atom const perSecond = parseFloat(atoms.shift()) // get number const increment = perSecond/10 const id = this._getUid() this.setLine(\`* 0 \` + atoms.join(" ")) const html = super.buildHtml() this.setLine(line) return html } expanderParser popularity 0.000072 cueFromId description An collapsible HTML details tag. extends scrollParagraphParser example expander Knock Knock Who's there? javascript buildHtml() { this.parent.sectionStack.push("") return \`
\${super.buildHtml()}\` } buildTxt() { return this.content } tag = "summary" defaultClassName = "" footnoteDefinitionParser popularity 0.001953 description A footnote. Can also be used as section notes. extends scrollParagraphParser boolean isFootnote true pattern ^\\^.+$ // We need to quickLinks back in scope because there is currently a bug in ScrollSDK/parsers where if a parser extending a parent class has a child parser defined, then any regex parsers in the parent class will not be tested unless explicitly included in scope again. inScope quickLinkParser labelParser description If you want to show a custom label for a footnote. Default label is the note definition index. cueFromId atoms cueAtom catchAllAtomType stringAtom javascript get htmlId() { return \`note\${this.noteDefinitionIndex}\` } get label() { // In the future we could allow common practices like author name return this.get("label") || \`[\${this.noteDefinitionIndex}]\` } get linkBack() { return \`noteUsage\${this.noteDefinitionIndex}\` } get text() { return \`\${this.label} \${super.text}\` } get noteDefinitionIndex() { return this.parent.footnotes.indexOf(this) + 1 } buildTxt() { return this.getAtom(0) + ": " + super.buildTxt() } abstractHeaderParser extends scrollParagraphParser example # Hello world javascript buildHtml(buildSettings) { if (this.isHidden) return "" if (this.parent.sectionStack) this.parent.sectionStack.push("") return \`
\` + super.buildHtml(buildSettings) } buildTxt() { const line = super.buildTxt() return line + "\\n" + "=".repeat(line.length) } isHeader = true h1Parser popularity 0.017918 description An html h1 tag. extends abstractHeaderParser boolean isArticleContent true cue # boolean isPopular true javascript tag = "h1" h2Parser popularity 0.005257 description An html h2 tag. extends abstractHeaderParser boolean isArticleContent true cue ## boolean isPopular true javascript tag = "h2" h3Parser popularity 0.001085 description An html h3 tag. extends abstractHeaderParser boolean isArticleContent true cue ### javascript tag = "h3" h4Parser popularity 0.000289 description An html h4 tag. extends abstractHeaderParser cue #### javascript tag = "h4" scrollQuestionParser popularity 0.004244 description A question. extends h4Parser cue ? example ? Why is the sky blue? javascript defaultClassName = "scrollQuestion" h5Parser description An html h5 tag. extends abstractHeaderParser cue ##### javascript tag = "h5" printTitleParser popularity 0.007572 description Print title. extends abstractHeaderParser boolean isPopular true example title Eureka printTitle cueFromId javascript buildHtml(buildSettings) { // Hacky, I know. const {content} = this if (content === undefined) this.setContent(this.root.title) const { permalink } = this.root if (!permalink) { this.setContent(content) // Restore it as it was. return super.buildHtml(buildSettings) } const newLine = this.appendLine(\`link \${permalink}\`) const compiled = super.buildHtml(buildSettings) newLine.destroy() this.setContent(content) // Restore it as it was. return compiled } get originalText() { return this.content ?? this.root.title ?? "" } defaultClassName = "printTitleParser" tag = "h1" captionAftertextParser popularity 0.003207 description An image caption. cue caption extends scrollParagraphParser boolean isPopular true abstractMediaParser extends scrollParagraphParser inScope scrollMediaLoopParser scrollAutoplayParser int atomIndex 1 javascript buildTxt() { return "" } get filename() { return this.getAtom(this.atomIndex) } getAsHtmlAttribute(attr) { if (!this.has(attr)) return "" const value = this.get(attr) return value ? \`\${attr}="\${value}"\` : attr } getAsHtmlAttributes(list) { return list.map(atom => this.getAsHtmlAttribute(atom)).filter(i => i).join(" ") } buildHtml() { return \`<\${this.tag} src="\${this.filename}" controls \${this.getAsHtmlAttributes("width height loop autoplay".split(" "))}>\` } scrollMusicParser popularity 0.000024 extends abstractMediaParser cue music description Play sound files. example music sipOfCoffee.m4a javascript buildHtml() { return \`\` } quickSoundParser popularity 0.000024 extends scrollMusicParser atoms urlAtom pattern ^[^\\s]+\\.(mp3|wav|ogg|aac|m4a|flac) int atomIndex 0 scrollVideoParser popularity 0.000024 extends abstractMediaParser cue video example video spirit.mp4 description Play video files. widthParser cueFromId atoms cueAtom integerAtom heightParser cueFromId atoms cueAtom integerAtom javascript tag = "video" quickVideoParser popularity 0.000024 extends scrollVideoParser atoms urlAtom pattern ^[^\\s]+\\.(mp4|webm|avi|mov) int atomIndex 0 quickParagraphParser popularity 0.001881 cue * extends scrollParagraphParser description A paragraph. boolean isArticleContent true example * I had a _new_ idea. scrollStopwatchParser description A stopwatch. extends scrollParagraphParser cue stopwatch example stopwatch atoms cueAtom catchAllAtomType numberAtom javascript buildHtml() { const line = this.getLine() const id = this._getUid() this.setLine(\`* 0.0 \`) const html = super.buildHtml() this.setLine(line) return html } thinColumnsParser popularity 0.003690 extends abstractAftertextParser cueFromId catchAllAtomType integerAtom description Thin columns. javascript buildHtmlSnippet() { return "" } columnWidth = 35 columnGap = 20 buildHtml() { const {columnWidth, columnGap, maxColumns} = this const maxTotalWidth = maxColumns * columnWidth + (maxColumns - 1) * columnGap const stackContents = this.parent.clearSectionStack() // Starting columns always first clears the section stack. if (this.singleColumn) this.parent.sectionStack.push("
") // Single columns are self-closing after section break. return stackContents + \`
\` } get maxColumns() { return this.singleColumn ? 1 : parseInt(this.getAtom(1) ?? 10) } wideColumnsParser popularity 0.000386 extends thinColumnsParser description Wide columns. javascript columnWidth = 90 wideColumnParser popularity 0.003376 extends wideColumnsParser description A wide column section. boolean singleColumn true mediumColumnsParser popularity 0.003376 extends thinColumnsParser description Medium width columns. javascript columnWidth = 65 mediumColumnParser popularity 0.003376 extends mediumColumnsParser description A medium column section. boolean singleColumn true thinColumnParser popularity 0.003376 extends thinColumnsParser description A thin column section. boolean singleColumn true endColumnsParser popularity 0.007789 extends abstractAftertextParser cueFromId description End columns. javascript buildHtml() { return "
" } buildHtmlSnippet() { return "" } scrollContainerParser popularity 0.000096 cue container description A centered HTML div. catchAllAtomType cssLengthAtom extends abstractAftertextParser boolean isHtml true javascript get maxWidth() { return this.atoms[1] || "1200px" } buildHtmlSnippet() { return "" } tag = "div" defaultClassName = "scrollContainerParser" buildHtml() { this.parent.bodyStack.push("") return \`\` + super.buildHtml() } get text() { return ""} get closingTag() { return ""} debugSourceStackParser // useful for debugging description Print compilation steps. extends abstractAftertextParser cueFromId example printOriginalSource javascript get sources() { const {file} = this.root const passNames = ["codeAtStart", "fusedCode", "codeAfterMacroPass"] let lastCode = "" return passNames.map(name => { let code = file[name] if (lastCode === code) code = "[Unchanged]" lastCode = file[name] return { name, code }}) } buildHtml() { return \`\${this.buildTxt().replace(/\\\` } buildTxt() { return this.sources.map((pass, index) => \`Pass \${index + 1} - \${pass.name}\\n========\\n\${pass.code}\`).join("\\n\\n\\n") } abstractDinkusParser extends abstractAftertextParser boolean isDinkus true javascript buildHtml() { return \`
\${this.dinkus}
\` } defaultClass = "abstractDinkusParser" buildTxt() { return this.dinkus } get dinkus() { return this.content || this.getLine() } horizontalRuleParser popularity 0.000362 cue --- description A horizontal rule. extends abstractDinkusParser javascript buildHtml() { return \`
\` } scrollDinkusParser popularity 0.010828 cue *** description A dinkus. Breaks section. boolean isPopular true extends abstractDinkusParser javascript dinkus = "*" customDinkusParser cue dinkus description A custom dinkus. extends abstractDinkusParser endOfPostDinkusParser popularity 0.005740 extends abstractDinkusParser description End of post dinkus. boolean isPopular true cue **** javascript dinkus = "⁂" abstractIconButtonParser extends abstractAftertextParser cueFromId javascript buildHtmlSnippet() { return "" } buildHtml() { return \`\${this.svg}\` } downloadButtonParser popularity 0.006294 description Link to download/WWS page. extends abstractIconButtonParser catchAllAtomType urlAtom string style position:relative; string svg javascript get link() { return this.content } editButtonParser popularity 0.013963 description Print badge top right. extends abstractIconButtonParser catchAllAtomType urlAtom // SVG from https://github.com/32pixelsCo/zest-icons string svg javascript get link() { return this.content || this.root.editUrl || "" } get style() { return this.parent.findParticles("editButton")[0] === this ? "right:2rem;": "position:relative;" } emailButtonParser popularity 0.006294 description Email button. extends abstractIconButtonParser catchAllAtomType emailAddressAtom // todo: should just be "optionalAtomType" string style position:relative; string svg javascript get link() { const email = this.content || this.parent.get("email") return email ? \`mailto:\${email}\` : "" } homeButtonParser popularity 0.006391 description Home button. extends abstractIconButtonParser catchAllAtomType urlAtom string style left:2rem; string svg javascript get link() { return this.content || this.get("link") || "index.html" } theScrollButtonParser popularity 0.006294 description WWS button. extends abstractIconButtonParser string style position:relative; string svg javascript get link() { return "https://wws.scroll.pub" } abstractTextLinkParser extends abstractAftertextParser cueFromId javascript buildHtmlSnippet() { return "" } buildTxt() { return this.text } buildHtml() { return \`\` } editLinkParser popularity 0.001206 extends abstractTextLinkParser description Print "Edit" link. string text Edit javascript get link() { return this.root.editUrl || "" } scrollVersionLinkParser popularity 0.006294 extends abstractTextLinkParser string link https://scroll.pub description Print Scroll version. javascript get text() { return \`Built with Scroll v\${this.root.scrollVersion}\` } classicFormParser cue classicForm popularity 0.006391 description Generate input form for ScrollSet. extends abstractAftertextParser atoms cueAtom catchAllAtomType stringAtom string script string style javascript get inputs() { return this.root.measures.filter(measure => !measure.IsComputed).map((measure, index) => { const {Name, Question, IsRequired, Type} = measure const type = Type || "text" const placeholder = Question const ucFirst = Name.substr(0, 1).toUpperCase() + Name.substr(1) // \${index ? "" : "autofocus"} let tag = "" if (Type === "textarea") tag = \`\` else tag = \`\` return \`
\${tag}
\` }).join("\\n") } buildHtml() { const {isEmail, formDestination, callToAction, subject} = this return \`\${this.script}\${this.style}
\${this.inputs}\${this.footer}
\` } get callToAction() { return (this.isEmail ? "Submit via email" : (this.subject || "Post")) } get isEmail() { return this.formDestination.includes("@") } get formDestination() { return this.getAtom(1) || "" } get subject() { return this.getAtomsFrom(2)?.join(" ") || "" } get footer() { return "" } scrollFormParser extends classicFormParser cue scrollForm placeholderParser atoms cueAtom baseParser blobParser cueFromId single valueParser atoms cueAtom baseParser blobParser cueFromId single nameParser description Name for the post submission. atoms cueAtom stringAtom cueFromId single description Generate a Scroll Form. string copyFromExternal .codeMirror.css .scrollLibs.js .constants.js string requireOnce javascript get placeholder() { return this.getParticle("placeholder")?.subparticlesToString() || "" } get value() { return this.getParticle("value")?.subparticlesToString() || "" } get footer() { return "" } get name() { return this.get("name") || "particles" } get parsersBundle() { const parserRegex = /^[a-zA-Z0-9_]+Parser$/gm const clone = this.root.clone() const parsers = clone.filter(line => parserRegex.test(line.getLine())) return "\\n" + parsers.map(particle => { particle.prependLine("boolean suggestInAutocomplete true") return particle.toString() }).join("\\n") } get inputs() { const Name = this.name return \` \` } buildHtml(buildSettings) { return this.getHtmlRequirements(buildSettings) + super.buildHtml() } loremIpsumParser extends abstractAftertextParser cueFromId description Generate dummy text. catchAllAtomType integerAtom string placeholder Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. javascript get originalText() { return this.placeholder.repeat(this.howMany) } get howMany() { return this.getAtom(1) ? parseInt(this.getAtom(1)) : 1 } nickelbackIpsumParser extends loremIpsumParser string placeholder And one day, I’ll be at the door. And lose your wings to fall in love? To the bottom of every bottle. I’m on the ledge of the eighteenth story. Why must the blind always lead the blind? scrollModalParser description A modal dialog overlay. extends abstractAftertextParser boolean isHtml true cue modal string requireOnce javascript buildHtml(buildSettings) { this.parent.sectionStack.push("") return this.getHtmlRequirements(buildSettings) + \`
\` } printSnippetsParser popularity 0.000338 // todo: why are we extending AT here and not loops? Is it for class/id etc? extends abstractAftertextParser cueFromId atoms cueAtom catchAllAtomType tagWithOptionalFolderAtom description Prints snippets matching tag(s). example printSnippets index javascript makeSnippet(scrollProgram, buildSettings) { const {endSnippetIndex} = scrollProgram if (endSnippetIndex === -1) return scrollProgram.buildHtmlSnippet(buildSettings) + scrollProgram.editHtml const linkRelativeToCompileTarget = buildSettings.relativePath + scrollProgram.permalink const joinChar = "\\n" const html = scrollProgram .map((subparticle, index) => (index >= endSnippetIndex ? "" : subparticle.buildHtmlSnippet ? subparticle.buildHtmlSnippet(buildSettings) : subparticle.buildHtml(buildSettings))) .filter(i => i) .join(joinChar) .trim() + \`Continue reading...\` return html } get files() { const thisFile = this.parent.file const files = this.root.getFilesByTags(this.content, this.has("limit") ? parseInt(this.get("limit")) : undefined).filter(file => file.file !== thisFile) // allow sortBy lastCommit Time if (this.get("sortBy") === "commitTime") { return this.root.sortBy(files, file => file.scrollProgram.lastCommitTime).reverse() } return files } buildHtml() { const alreadyRequired = this.root.alreadyRequired const snippets = this.files.map(file => { const buildSettings = {relativePath: file.relativePath, alreadyRequired } return \`
\${this.makeSnippet(file.file.scrollProgram, buildSettings)}
\` }).join("\\n\\n") return \`
\${snippets}
\` } buildTxt() { return this.files.map(file => { const {scrollProgram} = file.file const {title, date, absoluteLink} = scrollProgram const ruler = "=".repeat(title.length) // Note: I tried to print the description here but the description generating code needs work. return \`\${title}\\n\${ruler}\\n\${date}\\n\${absoluteLink}\` }).join("\\n\\n") } scrollNavParser popularity 0.000048 extends printSnippetsParser cue nav description Titles and links in group(s). joinParser boolean allowTrailingWhitespace true cueFromId atoms cueAtom catchAllAtomType stringAtom javascript buildHtml() { return \`\` } printFullSnippetsParser popularity 0.000048 extends printSnippetsParser cueFromId description Print full pages in group(s). javascript makeSnippet(scrollProgram, buildSettings) { return scrollProgram.buildHtmlSnippet(buildSettings) + scrollProgram.editHtml } printShortSnippetsParser popularity 0.000048 extends printSnippetsParser cueFromId description Titles and descriptions in group(s). javascript makeSnippet(scrollProgram, buildSettings) { const { title, permalink, description, timestamp } = scrollProgram return \`
\${title}
\${description}...
\${this.root.dayjs(timestamp * 1000).format(\`MMMM D, YYYY\`)}
\` } printRelatedParser popularity 0.001182 description Print links to related posts. extends printSnippetsParser cueFromId javascript buildHtml() { const alreadyRequired = this.root.alreadyRequired const list = this.files.map(fileWrapper => { const {relativePath, file} = fileWrapper const {title, permalink, year} = file.scrollProgram return \`- \${title}\${year ? " (" + year + ")" : ""}\\n link \${relativePath + permalink}\` }).join("\\n") const items = this.parent.concat(list) const html = items.map(item => item.buildHtml()).join("\\n") items.forEach(item => item.destroy()) return html } scrollNoticesParser extends abstractAftertextParser description Display messages in URL query parameters. cue notices javascript buildHtml() { const id = this.htmlId return \`\` } abstractAssertionParser description Test above particle's output. extends abstractScrollParser string bindTo previous catchAllAtomType codeAtom cueFromId javascript buildHtml() { return \`\` } get particleToTest() { // If the previous particle is also an assertion particle, use the one before that. return this.previous.particleToTest ? this.previous.particleToTest : this.previous } get actual() {return this.particleToTest.buildHtml()} getErrors() { const {actual, expected} = this const errors = super.getErrors() if (this.areEqual(actual, expected)) return errors return errors.concat(this.makeError(\`'\${actual}' did not \${this.kind} '\${expected}'\`)) } get expected() { return this.length ? this.subparticlesToString() : (this.content ? this.content : "") } catchAllParser htmlLineParser assertHtmlEqualsParser extends abstractAssertionParser string kind equal javascript areEqual(actual, expected) { return actual === expected } // todo: why are we having to super here? getErrors() { return super.getErrors()} assertBuildIncludesParser extends abstractAssertionParser string kind include javascript areEqual(actual, expected) { return actual.includes(expected) } get actual() { return this.particleToTest.buildOutput()} getErrors() { return super.getErrors()} assertHtmlIncludesParser extends abstractAssertionParser string kind include javascript areEqual(actual, expected) { return actual.includes(expected) } getErrors() { return super.getErrors()} assertHtmlExcludesParser extends abstractAssertionParser string kind exclude javascript areEqual(actual, expected) { return !actual.includes(expected) } getErrors() { return super.getErrors()} assertIgnoreBelowErrorsParser description If you want to ignore any errors in the below particle in automated tests. extends abstractScrollParser string bindTo next cueFromId abstractPrintMetaParser extends abstractScrollParser cueFromId printAuthorsParser popularity 0.001664 description Prints author(s) byline. boolean isPopular true extends abstractPrintMetaParser // todo: we need pattern matching added to sdk to support having no params or a url and personNameAtom catchAllAtomType stringAtom example authors Breck Yunits https://breckyunits.com printAuthors javascript buildHtml() { return this.parent.getParticle("authors")?.buildHtmlForPrint() } buildTxt() { return this.parent.getParticle("authors")?.buildTxtForPrint() } printDateParser popularity 0.000434 extends abstractPrintMetaParser // If not present computes the date from the file's ctime. description Print published date. boolean isPopular true javascript buildHtml() { return \`
\${this.day}
\` } get day() { let day = this.content || this.root.date if (!day) return "" return this.root.dayjs(day).format(\`MMMM D, YYYY\`) } buildTxt() { return this.day } printFormatLinksParser description Prints links to other formats. extends abstractPrintMetaParser example printFormatLinks javascript buildHtml() { const permalink = this.root.permalink.replace(".html", "") // hacky const particle = this.appendSibling(\`HTML | TXT\`, \`class printDateParser\\nlink \${permalink}.html HTML\\nlink \${permalink}.txt TXT\\nstyle text-align:center;\`) const html = particle.buildHtml() particle.destroy() return html } buildTxt() { const permalink = this.root.permalink.replace(".html", "") return \`HTML | TXT\\n link \${permalink}.html HTML\\n link \${permalink}.txt TXT\` } abstractBuildCommandParser extends abstractScrollParser cueFromId atoms buildCommandAtom catchAllAtomType filePathAtom inScope slashCommentParser javascript isTopMatter = true buildHtml() { return "" } get extension() { return this.cue.replace("build", "") } buildOutput() { return this.root.compileTo(this.extension) } get outputFileNames() { return this.content?.split(" ") || [this.root.permalink.replace(".html", "." + this.extension.toLowerCase())] } async _buildFileType(extension, options) { const {root} = this const { fileSystem, folderPath, filename, filePath, path, lodash } = root const capitalized = lodash.capitalize(extension) const buildKeyword = "build" + capitalized const {outputFileNames} = this for (let name of outputFileNames) { try { await fileSystem.writeProduct(path.join(folderPath, name), root.compileTo(capitalized)) root.log(\`💾 Built \${name} from \${filename}\`) } catch (err) { console.error(\`Error while building '\${filePath}' with extension '\${extension}'\`) throw err } } } abstractBuildOneCommandParser // buildOne and buildTwo are just a dumb/temporary way to have CSVs/JSONs/TSVs build first. Will be merged at some point. extends abstractBuildCommandParser javascript async buildOne(options) { await this._buildFileType(this.extension, options) } buildParsersParser popularity 0.000096 description Compile to Parsers file. extends abstractBuildOneCommandParser buildCsvParser popularity 0.000096 description Compile to CSV file. extends abstractBuildOneCommandParser buildTsvParser popularity 0.000096 description Compile to TSV file. extends abstractBuildOneCommandParser buildJsonParser popularity 0.000096 description Compile to JSON file. extends abstractBuildOneCommandParser abstractBuildTwoCommandParser extends abstractBuildCommandParser javascript async buildTwo(options) { await this._buildFileType(this.extension, options) } buildCssParser popularity 0.000048 description Compile to CSS file. extends abstractBuildTwoCommandParser buildHtmlParser popularity 0.007645 description Compile to HTML file. extends abstractBuildTwoCommandParser boolean isPopular true javascript async buildTwo(options) { await this._copyExternalFiles(options) await super.buildTwo(options) } async _copyExternalFiles(options) { if (!this.isNodeJs()) return const {root} = this const externalFilesCopied = options.externalFilesCopied || {} // If this file uses a parser that has external requirements, // copy those from external folder into the destination folder. const { parsersRequiringExternals, folderPath, fileSystem, filename, parserIdIndex, path, Disk, externalsPath } = root if (!externalFilesCopied[folderPath]) externalFilesCopied[folderPath] = {} parsersRequiringExternals.forEach(parserId => { if (externalFilesCopied[folderPath][parserId]) return if (!parserIdIndex[parserId]) return parserIdIndex[parserId].map(particle => { const externalFiles = particle.copyFromExternal.split(" ") externalFiles.forEach(name => { const newPath = path.join(folderPath, name) fileSystem.writeProduct(newPath, Disk.read(path.join(externalsPath, name))) root.log(\`💾 Copied external file needed by \${filename} to \${name}\`) }) }) if (parserId !== "scrollThemeParser") // todo: generalize when not to cache externalFilesCopied[folderPath][parserId] = true }) } buildJsParser description Compile to JS file. extends abstractBuildTwoCommandParser buildRssParser popularity 0.000048 description Write RSS file. extends abstractBuildTwoCommandParser buildTxtParser popularity 0.007596 description Compile to TXT file. extends abstractBuildTwoCommandParser boolean isPopular true loadConceptsParser // todo: clean this up. just add smarter imports with globs? // this currently removes any "import" statements. description Import all concepts in a folder. extends abstractBuildCommandParser cueFromId atoms preBuildCommandAtom filePathAtom javascript async load() { const { Disk, path, importRegex } = this.root const folder = path.join(this.root.folderPath, this.getAtom(1)) const ONE_BIG_FILE = Disk.getFiles(folder).filter(file => file.endsWith(".scroll")).map(Disk.read).join("\\n\\n").replace(importRegex, "") this.parent.concat(ONE_BIG_FILE) //console.log(ONE_BIG_FILE) } buildHtml() { return "" } buildConceptsParser popularity 0.000024 cueFromId description Write concepts to csv+ files. extends abstractBuildCommandParser sortByParser cueFromId atoms cueAtom columnNameAtom javascript async buildOne() { const {root} = this const { fileSystem, folderPath, filename, path, permalink } = root const files = this.getAtomsFrom(1) if (!files.length) files.push(permalink.replace(".html", ".csv")) const sortBy = this.get("sortBy") for (let link of files) { await fileSystem.writeProduct(path.join(folderPath, link), root.compileConcepts(link, sortBy)) root.log(\`💾 Built concepts in \${filename} to \${link}\`) } } fetchParser description Download URL to disk. extends abstractBuildCommandParser cueFromId atoms preBuildCommandAtom urlAtom example fetch https://breckyunits.com/posts.csv fetch https://breckyunits.com/posts.csv renamed.csv javascript get url() { return this.getAtom(1) } get filename() { return this.getAtom(2) } async load() { await this.root.fetch(this.url, this.filename) } buildHtml() { return "" } buildMeasuresParser popularity 0.000024 cueFromId description Write measures to csv+ files. extends abstractBuildCommandParser sortByParser cueFromId atoms cueAtom columnNameAtom javascript async buildOne() { const {root} = this const { fileSystem, folderPath, filename, path, permalink } = root const files = this.getAtomsFrom(1) if (!files.length) files.push(permalink.replace(".html", ".csv")) const sortBy = this.get("sortBy") for (let link of files) { await fileSystem.writeProduct(path.join(folderPath, link), root.compileMeasures(link, sortBy)) root.log(\`💾 Built measures in \${filename} to \${link}\`) } } buildPdfParser popularity 0.000096 description Compile to PDF file. extends abstractBuildCommandParser javascript async buildTwo(options) { if (!this.isNodeJs()) return "Only works in Node currently." const {root} = this const { filename } = root const outputFile = root.filenameNoExtension + ".pdf" // relevant source code for chrome: https://github.com/chromium/chromium/blob/a56ef4a02086c6c09770446733700312c86f7623/components/headless/command_handler/headless_command_switches.cc#L22 const command = \`/Applications/Google\\\\ Chrome.app/Contents/MacOS/Google\\\\ Chrome --headless --disable-gpu --no-pdf-header-footer --default-background-color=00000000 --no-pdf-background --print-to-pdf="\${outputFile}" "\${this.permalink}"\` // console.log(\`Node.js is running on architecture: \${process.arch}\`) try { const output = require("child_process").execSync(command, { stdio: "ignore" }) root.log(\`💾 Built \${outputFile} from \${filename}\`) } catch (error) { console.error(error) } } abstractInlineFileParser extends abstractScrollParser catchAllAtomType filePathAtom catchAllParser scrollFileAddressParser string joinChar ;\\n\\n string htmlTag script javascript get files() { const inline = this.atoms.slice(1) const children = this.map(particle => particle.cue) return inline.concat(children) } get contents() { return this.files.map(filename => this.root.readFile(filename)).join(this.joinChar) } buildHtml() { return \`<\${this.htmlTag}>/* \${this.files.join(" ")} */\\n\${this.contents}\` } scrollInlineCssParser description Inline CSS from files. popularity 0.007211 extends abstractInlineFileParser cue inlineCss string joinChar \\n\\n string htmlTag style javascript buildCss() { return this.contents } scrollInlineJsParser description Inline JS from files. popularity 0.007211 extends abstractInlineFileParser cue inlineJs javascript buildJs() { return this.contents } abstractTopLevelSingleMetaParser description Use these parsers once per file. extends abstractScrollParser inScope slashCommentParser cueFromId atoms metaCommandAtom javascript isTopMatter = true isSetterParser = true buildHtml() { return "" } testStrictParser description Make catchAllParagraphParser = error. extends abstractTopLevelSingleMetaParser scrollDateParser cue date popularity 0.006680 catchAllAtomType dateAtom description Set published date. extends abstractTopLevelSingleMetaParser boolean isPopular true example date 1/11/2019 printDate Hello world dateline abstractUrlSettingParser extends abstractTopLevelSingleMetaParser atoms metaCommandAtom urlAtom cueFromId editBaseUrlParser popularity 0.007838 description Override edit link baseUrl. extends abstractUrlSettingParser canonicalUrlParser description Override canonical URL. extends abstractUrlSettingParser openGraphImageParser popularity 0.000796 // https://ogp.me/ // If not defined, Scroll will try to generate it's own using the first image tag on your page. description Override Open Graph Image. extends abstractUrlSettingParser baseUrlParser popularity 0.009188 description Required for RSS and OpenGraph. extends abstractUrlSettingParser rssFeedUrlParser popularity 0.008850 description Set RSS feed URL. extends abstractUrlSettingParser editUrlParser catchAllAtomType urlAtom description Override edit link. extends abstractTopLevelSingleMetaParser siteOwnerEmailParser popularity 0.001302 description Set email address for site contact. extends abstractTopLevelSingleMetaParser cue email atoms metaCommandAtom emailAddressAtom faviconParser popularity 0.001688 catchAllAtomType stringAtom cue favicon description Favicon file. example favicon logo.png metatags buildHtml extends abstractTopLevelSingleMetaParser importOnlyParser popularity 0.033569 // This line will be not be imported into the importing file. description Don't build this file. cueFromId atoms preBuildCommandAtom extends abstractTopLevelSingleMetaParser javascript buildHtml() { return "" } inlineMarkupsParser popularity 0.000024 description Set global inline markups. extends abstractTopLevelSingleMetaParser cueFromId example inlineMarkups * // Disable * for bold _ u // Make _ underline htmlLangParser atoms metaCommandAtom stringAtom // for the tag. If not specified will be "en". See https://developer.mozilla.org/en-US/docs/Web/HTML/Global_attributes/lang description Override HTML lang attribute. extends abstractTopLevelSingleMetaParser openGraphDescriptionParser popularity 0.001688 catchAllAtomType stringAtom cue description description Meta tag description. extends abstractTopLevelSingleMetaParser permalinkParser popularity 0.000265 description Override output filename. extends abstractTopLevelSingleMetaParser atoms metaCommandAtom permalinkAtom scrollTagsParser popularity 0.006801 cue tags description Set tags. example tags All extends abstractTopLevelSingleMetaParser catchAllAtomType tagAtom scrollTitleParser popularity 0.007524 catchAllAtomType stringAtom cue title description Set title. example title Eureka printTitle extends abstractTopLevelSingleMetaParser boolean isPopular true scrollLinkTitleParser popularity 0.007524 catchAllAtomType stringAtom cue linkTitle description Text for links. example title My blog - Eureka linkTitle Eureka extends abstractTopLevelSingleMetaParser scrollChatParser popularity 0.000362 description A faux text chat conversation. catchAllParser chatLineParser cue chat extends abstractScrollParser example chat Hi 👋 javascript buildHtml() { return this.map((line, index) => line.asString ? \`
\${line.asString}
\` : "").join("") } buildTxt() { return this.subparticlesToString() } abstractDatatableProviderParser description A datatable. extends abstractScrollParser inScope scrollTableDataParser scrollTableDelimiterParser abstractTableVisualizationParser abstractTableTransformParser h1Parser h2Parser scrollQuestionParser htmlInlineParser scrollBrParser slashCommentParser javascript get visualizations() { return this.topDownArray.filter(particle => particle.isTableVisualization || particle.isHeader || particle.isHtml) } buildHtml(buildSettings) { return this.visualizations.map(particle => particle.buildHtml(buildSettings)) .join("\\n") .trim() } buildTxt() { return this.visualizations.map(particle => particle.buildTxt()) .join("\\n") .trim() } _coreTable get coreTable() { if (this._coreTable) return this._coreTable const {delimiter, delimitedData} = this return [] } get columnNames() { return [] } scrollTableParser extends abstractDatatableProviderParser popularity 0.002133 cue table example table printTable data year,count 1900,10 2000,122 2020,23 catchAllAtomType filePathAtom int atomIndex 1 javascript get delimiter() { const {filename} = this let delimiter = "" if (filename) { const extension = filename.split('?')[0].split(".").pop() if (extension === "json") delimiter = "json" if (extension === "particles") delimiter = "particles" if (extension === "csv") delimiter = "," if (extension === "tsv") delimiter = "\\t" if (extension === "ssv") delimiter = " " if (extension === "psv") delimiter = "|" } if (this.get("delimiter")) delimiter = this.get("delimiter") else if (!delimiter) { const header = this.delimitedData.split("\\n")[0] if (header.includes("\\t")) delimiter = "\\t" else if (header.includes(",")) delimiter = "," else delimiter = " " } return delimiter } get filename() { return this.getAtom(this.atomIndex) } get coreTable() { if (this._coreTable) return this._coreTable const {delimiter, delimitedData} = this if (delimiter === "json") { const obj = JSON.parse(delimitedData) let rows = [] // Optimal case: Array of objects if (Array.isArray(obj)) { rows = obj} else if (!Array.isArray(obj) && typeof obj === "object") { // Case 2: Nested array under a key const arrayKey = Object.keys(obj).find(key => Array.isArray(obj[key])) if (arrayKey) rows = obj[arrayKey] } // Case 3: Array of primitive values else if (Array.isArray(obj) && obj.length && typeof obj[0] !== "object") { rows = obj.map(value => ({ value })) } this._columnNames = rows.length ? Object.keys(rows[0]) : [] this._coreTable = rows return rows } else if (delimiter === "particles") { const d3lib = this.root.d3 this._coreTable = d3lib.dsvFormat(",").parse(new Particle(delimitedData).asCsv, d3lib.autoType) } else { const d3lib = this.root.d3 this._coreTable = d3lib.dsvFormat(delimiter).parse(delimitedData, d3lib.autoType) } this._columnNames = this._coreTable.columns delete this._coreTable.columns return this._coreTable } get columnNames() { // init coreTable to set columns const coreTable = this.coreTable return this._columnNames } async load() { if (this.filename) await this.root.fetch(this.filename) } get fileContent() { return this.root.readSyncFromFileOrUrl(this.filename) } get delimitedData() { // json csv tsv if (this.filename) return this.fileContent const dataParticle = this.getParticle("data") if (dataParticle) return dataParticle.subparticlesToString() // if not dataparticle and no filename, check [permalink].csv if (this.isNodeJs()) return this.root.readFile(this.root.permalink.replace(".html", "") + ".csv") return "" } clocParser extends scrollTableParser description Output results of cloc as table. cue cloc string copyFromExternal .clocLangs.txt javascript delimiter = "," get delimitedData() { const { execSync } = require("child_process") const results = execSync(this.command).toString().trim() const csv = results.split("\\n\\n").pop().replace(/,\\"github\\.com\\/AlDanial.+/, "") // cleanup output return csv } get command(){ return \`cloc --vcs git . --csv --read-lang-def=.clocLangs.txt \${this.content || ""}\` } scrollDependenciesParser extends scrollTableParser description Get files this file depends on. cue dependencies javascript delimiter = "," get delimitedData() { return \`file\\n\` + this.root.dependencies.join("\\n") } scrollDiskParser extends scrollTableParser description Output file into as table. cue disk javascript delimiter = "json" get delimitedData() { return this.isNodeJs() ? this.delimitedDataNodeJs : "" } get delimitedDataNodeJs() { const fs = require('fs'); const path = require('path'); const {folderPath} = this.root const folder = this.content ? path.join(folderPath, this.content) : folderPath function getDirectoryContents(dirPath) { const directoryContents = []; const items = fs.readdirSync(dirPath); items.forEach((item) => { const itemPath = path.join(dirPath, item); const stats = fs.statSync(itemPath); directoryContents.push({ name: item, type: stats.isDirectory() ? 'directory' : 'file', size: stats.size, lastModified: stats.mtime }); }); return directoryContents; } return JSON.stringify(getDirectoryContents(folder)) } scrollIrisParser extends scrollTableParser description Iris dataset from R.A. Fisher. cue iris example iris printTable scatter x SepalLength y SepalWidth javascript delimitedData = this.constructor.iris vegaSampleDataParser extends scrollTableParser description Sample dataset from Vega. cue sampleData atoms cueAtom vegaDataSetAtom example sampleData zipcodes.csv printTable javascript get filename() { return "https://ohayo.scroll.pub/ohayo/packages/vega/datasets/" + this.content } quickTableParser popularity 0.000024 extends scrollTableParser atoms urlAtom pattern ^[^\\s]+\\.(tsv|csv|ssv|psv|json)[^\\s]*$ int atomIndex 0 javascript get dependencies() { return [this.cue]} scrollConceptsParser description Load concepts as table. extends abstractDatatableProviderParser cue concepts atoms cueAtom example concepts printTable javascript get coreTable() { return this.root.concepts } get columnNames() { return this.root.measures.map(col => col.Name) } abstractPostsParser description Load posts as table. extends abstractDatatableProviderParser cueFromId atoms cueAtom catchAllAtomType tagWithOptionalFolderAtom javascript async load() { const dependsOn = this.tags.map(tag => this.root.parseNestedTag(tag)).filter(i => i).map(i => i.folderPath) const {fileSystem} = this.root for (let folderPath of dependsOn) { // console.log(\`\${this.root.filePath} is loading: \${folderPath} in id '\${fileSystem.fusionId}'\`) await fileSystem.getLoadedFilesInFolder(folderPath, ".scroll") } } get tags() { return this.content?.split(" ") || [] } get files() { const thisFile = this.root.file // todo: we can include this file, but just not run asTxt const files = this.root.getFilesByTags(this.tags).filter(file => file.file !== thisFile) return files } get coreTable() { if (this._coreTable) return this._coreTable this._coreTable = this.files.map(file => this.postToRow(file)) return this._coreTable } postToRow(file) { const {relativePath} = file const {scrollProgram} = file.file const {title, permalink, asTxt, date, wordCount, minutes} = scrollProgram const text = asTxt.replace(/(\\t|\\n)/g, " ").replace(/ file.file.scrollProgram) const { title, baseUrl, description } = this.root return \` \${title} \${baseUrl} \${description} \${dayjs().format("ddd, DD MMM YYYY HH:mm:ss ZZ")} en-us \${scrollPrograms.map(program => program.toRss()).join("\\n")} \` } buildTxt() { return this.buildRss() } printSourceParser popularity 0.000024 description Print source for files in group(s). extends printFeedParser example printSource index buildTxt source.txt javascript buildHtml() { const files = this.root.getFilesByTags(this.content).map(file => file.file) return \`\${files.map(file => file.filePath + "\\n " + file.codeAtStart.replace(/\\n/g, "\\n ") ).join("\\n")}\` } printSiteMapParser popularity 0.000072 extends abstractPostsParser description Print text sitemap. example baseUrl http://test.com printSiteMap javascript buildHtml() { const { baseUrl } = this.root return this.files.map(file => baseUrl + file.relativePath + file.file.scrollProgram.permalink).join("\\n") } buildTxt() { return this.buildHtml() } get dependencies() { return this.files} codeParser popularity 0.001929 description A code block. catchAllParser lineOfCodeParser extends abstractScrollParser boolean isPopular true example code two = 1 + 1 javascript buildHtml() { return \`\${this.code.replace(/\\\` } buildTxt() { return "\`\`\`\\n" + this.code + "\\n\`\`\`" } get code() { return this.subparticlesToString() } cueFromId codeWithHeaderParser popularity 0.000169 cueFromId catchAllAtomType stringAtom extends codeParser example codeWithHeader math.py two = 1 + 1 javascript buildHtml() { return \`
\${this.content}
\${super.buildHtml()}
\` } buildTxt() { return "\`\`\`" + this.content + "\\n" + this.code + "\\n\`\`\`" } codeFromFileParser popularity 0.000169 cueFromId atoms cueAtom urlAtom extends codeWithHeaderParser example codeFromFile math.py javascript get code() { return this.root.readSyncFromFileOrUrl(this.content) } codeWithLanguageParser popularity 0.000458 description Use this to specify the language of the code block, such as csvCode or rustCode. extends codeParser pattern ^[a-zA-Z0-9_]+Code$ debugParsersParser description Print the parsers used. extends codeParser cueFromId javascript buildParsers() { return this.code} get code() { let code = new Particle(this.root.definition.toString()) // Remove comments code.filter((line) => line.getLine().startsWith("//")).forEach((particle) => particle.destroy()) // Remove blank lines code = code.toString().replace(/^\\n/gm, "") return code } abstractScrollWithRequirementsParser extends abstractScrollParser cueFromId javascript buildHtml(buildSettings) { return this.getHtmlRequirements(buildSettings) + this.buildInstance() } copyButtonsParser popularity 0.001471 extends abstractScrollWithRequirementsParser description Copy code widget. javascript buildInstance() { return "" } string requireOnce abstractTableVisualizationParser extends abstractScrollWithRequirementsParser boolean isTableVisualization true javascript get columnNames() { return this.parent.columnNames } heatrixParser cueFromId example heatrix '2007 '2008 '2009 '2010 '2011 '2012 '2013 '2014 '2015 '2016 '2017 '2018 '2019 '2020 '2021 '2022 '2023 '2024 4 11 23 37 3 14 12 0 0 0 5 1 2 11 15 10 12 56 description A heatmap matrix data visualization. catchAllParser heatrixCatchAllParser extends abstractTableVisualizationParser javascript buildHtml() { // A hacky but simple way to do this for now. const advanced = new Particle("heatrixAdvanced") advanced.appendLineAndSubparticles("table", "\\n " + this.tableData.replace(/\\n/g, "\\n ")) const particle = this.appendSibling("heatrixAdvanced", advanced.subparticlesToString()) const html = particle.buildHtml() particle.destroy() return html } get tableData() { const {coreTable} = this.parent if (!coreTable) return this.subparticlesToString() let table = new Particle(coreTable).asSsv if (this.parent.cue === "transpose") { // drop first line after transpose const lines = table.split("\\n") lines.shift() table = lines.join("\\n") } // detect years and make strings const lines = table.split("\\n") const yearLine = / \\d{4}(\\s+\\d{4})+$/ if (yearLine.test(lines[0])) { lines[0] = lines[0].replace(/ /g, " '") table = lines.join("\\n") } return table } heatrixAdvancedParser popularity 0.000048 cueFromId catchAllParser heatrixCatchAllParser extends abstractTableVisualizationParser description Advanced heatrix. example heatrix table %h10; '2007 '2008 '2009 12 4 323 scale #ebedf0 0 #c7e9c0 100 #a1d99b 400 #74c476 1600 javascript buildHtml() { class Heatrix { static HeatrixId = 0 uid = Heatrix.HeatrixId++ constructor(program) { const isDirective = atom => /^(f|l|w|h)\\d+$/.test(atom) || atom === "right" || atom === "left" || atom.startsWith("http://") || atom.startsWith("https://") || atom.endsWith(".html") const particle = new Particle(program) this.program = particle const generateColorBinningString = (data, colors) => { const sortedData = [...data].sort((a, b) => a - b); const n = sortedData.length; const numBins = colors.length; // Calculate the indices for each quantile const indices = []; for (let i = 1; i < numBins; i++) { indices.push(Math.floor((i / numBins) * n)); } // Get the quantile values and round them const thresholds = indices.map(index => Math.round(sortedData[index])); // Generate the string let result = ''; colors.forEach((color, index) => { const threshold = index === colors.length - 1 ? thresholds[index - 1] * 2 : thresholds[index]; result += \`\${color} \${threshold}\\n\`; }); return result.trim(); } const buildScale = (table) => { const numbers = table.split("\\n").map(line => line.split(" ")).flat().filter(atom => !isDirective(atom)).map(atom => parseFloat(atom)).filter(number => !isNaN(number)) const colors = ['#ebedf0', '#c7e9c0', '#a1d99b', '#74c476', '#41ab5d', '#238b45', '#005a32']; numbers.unshift(0) return generateColorBinningString(numbers, colors); } const table = particle.getParticle("table").subparticlesToString() const scale = particle.getParticle("scale")?.subparticlesToString() || buildScale(table) const thresholds = [] const colors = [] scale.split("\\n").map((line) => { const parts = line.split(" ") thresholds.push(parseFloat(parts[1])) colors.push(parts[0]) }) const colorCount = colors.length const colorFunction = (value) => { if (isNaN(value)) return "" // #ebedf0 for (let index = 0; index < colorCount; index++) { const threshold = thresholds[index] if (value <= threshold) return colors[index] } return colors[colorCount - 1] } const directiveDelimiter = ";" const getSize = (directives, letter) => directives .filter((directive) => directive.startsWith(letter)) .map((dir) => dir.replace(letter, "") + "px")[0] ?? "" this.table = table.split("\\n").map((line) => line .trimEnd() .split(" ") .map((atom) => { const atoms = atom.split(directiveDelimiter).filter((atom) => !isDirective(atom)).join("") const directivesInThisAtom = atom .split(directiveDelimiter) .filter(isDirective) const value = parseFloat(atoms) const label = atoms.includes("'") ? atoms.split("'")[1] : atoms const alignment = directivesInThisAtom.includes("right") ? "right" : directivesInThisAtom.includes("left") ? "left" : "" const color = colorFunction(value) const width = getSize(directivesInThisAtom, "w") const height = getSize(directivesInThisAtom, "h") const fontSize = getSize(directivesInThisAtom, "f") const lineHeight = getSize(directivesInThisAtom, "l") || height const link = directivesInThisAtom.filter(i => i.startsWith("http") || i.endsWith(".html"))[0] const style = { "background-color": color, width, height, "font-size": fontSize, "line-height": lineHeight, "text-align": alignment, } Object.keys(style).filter(key => !style[key]).forEach((key) => delete style[key]) return { value, label, style, link, } }) ) } get html() { const { program } = this const cssId = \`#heatrix\${this.uid}\` const defaultWidth = "40px" const defaultHeight = "40px" const fontSize = "10px" const lineHeight = defaultHeight const style = \`\` const firstRow = this.table[0] return ( \`
\${style}\` + this.table .map((row, rowIndex) => { if (!rowIndex) return "" const rowStyle = row[0].style return \`
\${row .map((atom, columnIndex) => { if (!columnIndex) return "" const columnStyle = firstRow[columnIndex]?.style || {} let { value, label, style, link } = atom const extendedStyle = Object.assign( {}, rowStyle, columnStyle, style ) const inlineStyle = Object.keys(extendedStyle) .map((key) => \`\${key}:\${extendedStyle[key]};\`) .join("") let valueClass = value ? " valueAtom" : "" const href = link ? \` href="\${link}"\` : "" return \`\` }) .join("")}
\` }) .join("\\n") + "
" ).replace(/\\n/g, "") } } return new Heatrix(this.subparticlesToString().trim()).html } mapParser latParser atoms cueAtom floatAtom cueFromId single longParser atoms cueAtom floatAtom cueFromId single tilesParser atoms cueAtom tileOptionAtom cueFromId single zoomParser atoms cueAtom integerAtom cueFromId single geolocateParser description Geolocate user. atoms cueAtom cueFromId single radiusParser atoms cueAtom floatAtom cueFromId single fillOpacityParser atoms cueAtom floatAtom cueFromId single fillColorParser atoms cueAtom colorAtom cueFromId single colorParser atoms cueAtom colorAtom cueFromId single heightParser atoms cueAtom floatAtom cueFromId single hoverParser atoms cueAtom catchAllAtomType colorAtom cueFromId single extends abstractTableVisualizationParser description Map widget. string copyFromExternal .leaflet.css .leaflet.js .scrollLibs.js string requireOnce javascript buildInstance() { const height = this.get("height") || 500 const id = this._getUid() const obj = this.toObject() const template = {} const style = height !== "full" ? \`height: \${height}px;\` : \`height: 100%; position: fixed; z-index: -1; left: 0; top: 0; width: 100%;\` const strs = ["color", "fillColor"] const nums = ["radius", "fillOpacity"] strs.filter(i => obj[i]).forEach(i => template[i] = obj[i]) nums.filter(i => obj[i]).forEach(i => template[i] = parseFloat(obj[i])) const mapId = \`map\${id}\` return \`
\` } abstractPlotParser // Observablehq extends abstractTableVisualizationParser string copyFromExternal .d3.js .plot.js string requireOnce example plot inScope abstractColumnNameParser widthParser cueFromId atoms cueAtom integerAtom heightParser cueFromId atoms cueAtom integerAtom titleParser extends abstractPlotLabelParser subtitleParser extends abstractPlotLabelParser captionParser extends abstractPlotLabelParser javascript buildInstance() { const id = "plot" + this._getUid() return \`
\` } get sortExpression() { const sort = this.get("sort") if (!sort) return "" let sort_expr = "" if (sort.startsWith("-")) { // Sort by a value descending const sortCol = sort.slice(1) sort_expr = \`, sort: {x: "y", reverse: true}\` } else if (sort.includes(" ")) { // Fixed order specified const order = sort.split(" ") sort_expr = \`, sort: {x: (a,b) => { const order = \${JSON.stringify(order)}; return order.indexOf(a) - order.indexOf(b) }}\` } else if (sort === "asc") { sort_expr = \`, sort: {x: "x"}\` } else if (sort === "desc") { sort_expr = \`, sort: {x: "x", reverse: true}\` } return sort_expr } get marks() { // just for testing purposes return \`Plot.rectY({length: 10000}, Plot.binX({y: "count"}, {x: d3.randomNormal()}))\` } get dataCode() { const {coreTable} = this.parent return \`d3.csvParse(\\\`\${new Particle(coreTable).asCsv}\\\`, d3.autoType)\` } get plotOptions() { return \`{ title: "\${this.get("title") || ""}", subtitle: "\${this.get("subtitle") || ""}", caption: "\${this.get("caption") || ""}", symbol: {legend: \${this.has("symbol")}}, color: {legend: \${this.has("fill") || this.has("stroke")}}, grid: \${this.get("grid") !== "false"}, marks: [\${this.marks}], width: \${this.get("width") || 640}, height: \${this.get("height") || 400}, }\` } plotScatterplotParser cue scatterplot extends abstractPlotParser description Scatterplot Widget. example iris scatterplot javascript get marks() { const x = this.get("x") const y = this.get("y") const text = this.get("label") return \`Plot.dot(data, { x: get("\${x}", 0), y: get("\${y}", 1), r: get("\${this.get("radius")}"), fill: get("\${this.get("fill")}"), tip: true\${this.sortExpression}, symbol: get("\${this.get("symbol")}")} ), Plot.text(data, {x: get("\${x}",0), y: get("\${y}", 1), text: "\${text}", dy: -6, lineAnchor: "bottom"})\` } plotBarchartParser cue barchart extends abstractPlotParser description Bar chart widget. example iris barchart javascript get marks() { const x = this.get("x") const y = this.get("y") const text = this.get("label") const fill = this.get("fill") return \`Plot.barY(data, { x: get("\${x}", 0), y: get("\${y}", 1), fill: get("\${fill}"), tip: true\${this.sortExpression} }), Plot.ruleY([0])\` } plotLineChartParser cue linechart extends abstractPlotParser description Line chart widget. example iris linechart x SepalLength y SepalWidth javascript get marks() { const x = this.get("x") const y = this.get("y") const stroke = this.get("stroke") || "steelblue" const strokeWidth = this.get("strokeWidth") || 2 const strokeLinecap = this.get("strokeLinecap") || "round" const fill = this.get("fill") return \`Plot.line(data, { x: get("\${x}", 0), y: get("\${y}", 1), stroke: "\${stroke}", fill: get("\${fill}"), strokeWidth: \${strokeWidth}, strokeLinecap: "\${strokeLinecap}"\${this.sortExpression} })\` } sparklineParser popularity 0.000024 description Sparkline widget. extends abstractTableVisualizationParser example sparkline 1 2 3 4 5 string copyFromExternal .sparkline.js string requireOnce catchAllAtomType numberAtom // we need pattern matching inScope scrollYParser javascript buildInstance() { const id = "spark" + this._getUid() const {columnValues} = this const start = this.has("start") ? parseInt(this.get("start")) : 0 const width = this.get("width") || 100 const height = this.get("height") || 30 const lineColor = this.get("color") || "black" return \`\` } get columnValues() { if (this.content) return this.content.split(" ").map(str => parseFloat(str)) const {coreTable} = this.parent if (coreTable) { const columnName = this.get("y") || Object.keys(coreTable[0]).find(key => typeof coreTable[0][key] === 'number') return coreTable.map(row => row[columnName]) } } printColumnParser popularity 0.000024 description Print one column extends abstractTableVisualizationParser example printColumn tags catchAllAtomType columnNameAtom joinParser boolean allowTrailingWhitespace true cueFromId atoms cueAtom catchAllAtomType stringAtom javascript buildHtml() { return this.columnValues.join(this.join) } buildTxt() { return this.columnValues.join(this.join) } get join() { return this.get("join") || "\\n" } get columnName() { return this.atoms[1] } get columnValues() { return this.parent.coreTable.map(row => row[this.columnName]) } printTableParser popularity 0.001085 cueFromId description Print table. extends abstractTableVisualizationParser javascript get tableHeader() { return this.columns.filter(col => !col.isLink).map(column => \`\${column.name}\\n\`) } get columnNames() { return this.parent.columnNames } buildJson() { return JSON.stringify(this.coreTable, undefined, 2) } buildCsv() { return new Particle(this.coreTable).asCsv } buildTsv() { return new Particle(this.coreTable).asTsv } get columns() { const {columnNames} = this return columnNames.map((name, index) => { const isLink = name.endsWith("Link") const linkIndex = columnNames.indexOf(name + "Link") return { name, isLink, linkIndex } }) } toRow(row) { const {columns} = this const atoms = columns.map(col => row[col.name]) let str = "" let column = 0 const columnCount = columns.length while (column < columnCount) { const col = columns[column] column++ const content = ((columnCount === column ? atoms.slice(columnCount - 1).join(" ") : atoms[column - 1]) ?? "").toString() if (col.isLink) continue const isTimestamp = col.name.toLowerCase().includes("time") && /^\\d{10}(\\d{3})?$/.test(content) const text = isTimestamp ? new Date(parseInt(content.length === 10 ? content * 1000 : content)).toLocaleString() : content let tagged = text const link = atoms[col.linkIndex] const isUrl = content.match(/^https?\\:[^ ]+$/) if (col.linkIndex > -1 && link) tagged = \`\${text}\` else if (col.name.endsWith("Url")) tagged = \`\${col.name.replace("Url", "")}\` else if (isUrl) tagged = \`\${text}\` str += \`\${tagged}\\n\` } return str } get coreTable() { return this.parent.coreTable } get tableBody() { return this.coreTable .map(row => \`\${this.toRow(row)}\`) .join("\\n") } buildHtml() { return \`\${this.tableHeader.join("\\n")}\${this.tableBody}
\` } buildTxt() { return this.parent.delimitedData || new Particle(this.coreTable).asCsv } katexParser popularity 0.001592 extends abstractScrollWithRequirementsParser catchAllAtomType codeAtom catchAllParser lineOfCodeParser example katex \\text{E} = \\text{T} / \\text{A}! description KaTex widget for typeset math. string copyFromExternal .katex.min.css .katex.min.js string requireOnce javascript buildInstance() { const id = this._getUid() const content = this.content === undefined ? "" : this.content return \`
\${content + this.subparticlesToString()}
\` } buildTxt() { return ( this.content ? this.content : "" )+ this.subparticlesToString() } helpfulNotFoundParser popularity 0.000048 extends abstractScrollWithRequirementsParser catchAllAtomType filePathAtom string copyFromExternal .helpfulNotFound.js description Helpful not found widget. javascript buildInstance() { return \`

\` } slideshowParser // Left and right arrows navigate. description Slideshow widget. *** delimits slides. extends abstractScrollWithRequirementsParser string copyFromExternal .jquery-3.7.1.min.js .slideshow.js example slideshow Why did the cow cross the road? *** Because it wanted to go to the MOOOO-vies. *** THE END **** javascript buildHtml() { return \`
\` } tableSearchParser popularity 0.000072 extends abstractScrollWithRequirementsParser string copyFromExternal .jquery-3.7.1.min.js .datatables.css .dayjs.min.js .datatables.js .tableSearch.js string requireOnce // adds to all tables on page description Table search and sort widget. javascript buildInstance() { return "" } abstractCommentParser description Prints nothing. catchAllAtomType commentAtom atoms commentAtom extends abstractScrollParser baseParser blobParser string bindTo next javascript buildHtml() { return \`\` } catchAllParser commentLineParser commentParser popularity 0.000193 extends abstractCommentParser cueFromId counterpointParser description Counterpoint comment. Prints nothing. extends commentParser cue ! slashCommentParser popularity 0.005643 extends abstractCommentParser cue // boolean isPopular true description A comment. Prints nothing. thanksToParser description Acknowledgements comment. Prints nothing. extends abstractCommentParser cueFromId scrollClearStackParser popularity 0.000096 cue clearStack description Clear body stack. extends abstractScrollParser boolean isHtml true javascript buildHtmlSnippet() { return "" } buildHtml() { return this.root.clearBodyStack().trim() } cssParser popularity 0.007211 extends abstractScrollParser description A style tag. cueFromId catchAllParser cssLineParser catchAllAtomType cssAnyAtom javascript buildHtml() { return \`\` } get css() { return this.content ?? this.subparticlesToString() } buildCss() { return this.css } scrollBackgroundColorParser description Quickly set CSS background. popularity 0.007211 extends abstractScrollParser cue background catchAllAtomType cssAnyAtom javascript buildHtml() { return \`\` } scrollFontColorParser description Quickly set CSS font-color. popularity 0.007211 extends abstractScrollParser cue color catchAllAtomType cssAnyAtom javascript buildHtml() { return \`\` } scrollFontParser description Quickly set font family. popularity 0.007211 extends abstractScrollParser cue font atoms cueAtom fontFamilyAtom catchAllAtomType cssAnyAtom javascript buildHtml() { const font = this.content === "Slim" ? "Helvetica Neue; font-weight:100;" : this.content return \`\` } abstractQuickIncludeParser popularity 0.007524 extends abstractScrollParser atoms urlAtom javascript get dependencies() { return [this.filename]} get filename() { return this.getAtom(0) } quickCssParser popularity 0.007524 description Make a CSS tag. extends abstractQuickIncludeParser atoms urlAtom pattern ^[^\\s]+\\.(css)$ javascript buildHtml() { return \`\` } buildHtmlSnippet() { return "" } quickIncludeHtmlParser popularity 0.007524 description Include an HTML file. extends abstractQuickIncludeParser atoms urlAtom pattern ^[^\\s]+\\.(html|htm)$ javascript buildHtml() { return this.root.readFile(this.filename) } quickScriptParser popularity 0.007524 description Make a Javascript tag. extends abstractQuickIncludeParser atoms urlAtom pattern ^[^\\s]+\\.(js)$ javascript buildHtml() { return \`\` } buildHtmlSnippet() { return "" } scrollDashboardParser popularity 0.000145 description Key stats in large font. catchAllParser lineOfCodeParser cue dashboard extends abstractScrollParser example dashboard #2 Popularity 30 Years Old $456 Revenue javascript get tableBody() { const items = this.topDownArray let str = "" for (let i = 0; i < items.length; i = i + 3) { str += this.makeRow(items.slice(i, i + 3)) } return str } makeRow(items) { return \`\` + items.map(particle => \`\${particle.cue}\${particle.content}\`).join("\\n") + \`\\n\` } buildHtml() { return \`\${this.tableBody}
\` } buildTxt() { return this.subparticlesToString() } belowAsCodeParser popularity 0.000651 description Print code below. string bindTo next extends abstractScrollParser catchAllAtomType integerAtom cueFromId javascript method = "next" get selectedParticles() { const { method } = this let code = "" let particles = [] let next = this[method] let {howMany} = this while (howMany) { particles.push(next) next = next[method] howMany-- } if (this.reverse) particles.reverse() return particles } get code() { return this.selectedParticles.map(particle => particle.asString).join("\\n") } reverse = false buildHtml() { return \`\${this.code.replace(/\\\` } get howMany() { let howMany = parseInt(this.getAtom(1)) if (!howMany || isNaN(howMany)) howMany = 1 return howMany } debugBelowParser description Inspect particle below. extends belowAsCodeParser string copyFromExternal .debug.css javascript get code() { const mapFn = particle => { const atomTypes = particle.lineAtomTypes.split(" ") return \`
\${particle.constructor.name}\${particle.atoms.map((atom, index) => \`\${atom}\${atomTypes[index]}\`).join(" ")}\${(particle.length ? \`
\` + particle.map(mapFn).join("
") + \`
\` : "")}
\`} return this.selectedParticles.map(mapFn).join("
") } buildHtml() { return \`\` + this.code } buildTxt() { const mapFn = particle => { const atomTypes = particle.lineAtomTypes.split(" ") return \`\${particle.constructor.name} \${particle.atoms.map((atom, index) => \`\${atomTypes[index]}:\${atom}\`).join(" ")}\${(particle.length ? \`\\n \` + particle.map(mapFn).join("\\n") + \`\` : "")}\`} return this.selectedParticles.map(mapFn).join("\\n") } buildParsers() {return this.buildTxt()} debugAboveParser description Inspect particle above. extends debugBelowParser string bindTo previous javascript method = "previous" reverse = true debugAllParser description Inspect entire document. extends debugBelowParser javascript get selectedParticles() { return this.root.getSubparticles()} belowAsCodeUntilParser description Print code above until match. extends belowAsCodeParser catchAllAtomType codeAtom example belowAsCode counter 1 second javascript get howMany() { let howMany = 1 const query = this.content let particle = this.next while (particle !== this) { if (particle.getLine().startsWith(query)) return howMany particle = particle.next howMany++ } return howMany } aboveAsCodeParser popularity 0.000482 string bindTo previous description Print code above. example counter 1 second aboveAsCode extends belowAsCodeParser javascript method = "previous" reverse = true belowAsHtmlParser extends belowAsCodeParser description Displays html output of next particle in a code block. cueFromId javascript get code() { return this.selectedParticles.filter(p => p.buildHtml).map(p => p.buildHtml()).join("\\n") } aboveAsHtmlParser description Displays html output of previous particle in a code block. extends belowAsHtmlParser javascript method = "previous" reverse = true scrollDefParser popularity 0.004244 description Parser short form. pattern ^[a-zA-Z0-9_]+Def extends abstractScrollParser catchAllAtomType stringAtom example urlDef What is the URL? javascript buildParsers(index) { const idStuff = index ? "" : \`boolean isMeasure true boolean isMeasureRequired true boolean isConceptDelimiter true\` const description = this.content const cue = this.cue.replace("Def", "") const sortIndex = 1 + index/10 return \`\${cue}DefParser cue \${cue} extends abstractStringMeasureParser description \${description} float sortIndex \${sortIndex} \${idStuff}\`.trim() } hakonParser cueFromId extends abstractScrollParser description Compile Hakon to CSS. catchAllParser hakonContentParser javascript buildHtml() { return \`\` } get css() { const {hakonParser} = this.root return new hakonParser(this.subparticlesToString()).compile() } buildCss() { return this.css } hamlParser popularity 0.007524 description HTML tag via HAML syntax. extends abstractScrollParser atoms urlAtom catchAllAtomType stringAtom pattern ^%?[\\w\\.]+#[\\w\\.]+ * javascript get tag() { return this.atoms[0].split(/[#\\.]/).shift().replace("%", "") } get htmlId() { const idMatch = this.atoms[0].match(/#([\\w-]+)/) return idMatch ? idMatch[1] : "" } get htmlClasses() { return this.atoms[0].match(/\\.([\\w-]+)/g)?.map(cls => cls.slice(1)) || []; } buildHtml() { const {htmlId, htmlClasses, content, tag} = this this.parent.sectionStack.unshift(\`\`) const attrs = [htmlId ? ' id="' + htmlId + '"' : "", htmlClasses.length ? ' class="' + htmlClasses.join(" ") + '"' : ""].join(" ").trim() return \`<\${tag}\${attrs ? " " + attrs : ""}>\${content || ""}\` } buildTxt() { return this.content } hamlTagParser // Match plain tags like %h1 extends hamlParser pattern ^%[^#]+$ abstractHtmlParser extends abstractScrollParser catchAllParser htmlLineParser catchAllAtomType htmlAnyAtom javascript buildHtml() { return \`\${this.content ?? ""}\${this.subparticlesToString()}\` } buildTxt() { return "" } htmlParser popularity 0.000048 extends abstractHtmlParser description HTML one liners or blocks. cueFromId htmlInlineParser popularity 0.005788 extends abstractHtmlParser atoms htmlAnyAtom boolean isHtml true pattern ^< description Inline HTML. boolean isPopular true javascript buildHtml() { return \`\${this.getLine() ?? ""}\${this.subparticlesToString()}\` } scrollBrParser popularity 0.000096 cue br description A break. extends abstractScrollParser catchAllAtomType integerAtom boolean isHtml true javascript buildHtml() { return \`
\`.repeat(parseInt(this.getAtom(1) || 1)) } iframesParser popularity 0.000121 cueFromId catchAllAtomType urlAtom extends abstractScrollParser description An iframe(s). example iframes frame.html javascript buildHtml() { return this.atoms.slice(1).map(url => \`\`).join("\\n") } abstractCaptionedParser extends abstractScrollParser atoms cueAtom urlAtom inScope captionAftertextParser slashCommentParser cueFromId javascript buildHtml(buildSettings) { const caption = this.getParticle("caption") const captionFig = caption ? \`
\${caption.buildHtml()}
\` : "" const {figureWidth} = this const widthStyle = figureWidth ? \`width:\${figureWidth}px; margin: auto;\` : "" const float = this.has("float") ? \`margin: 20px; float: \${this.get("float")};\` : "" return \`
\${this.getFigureContent(buildSettings)}\${captionFig}
\` } get figureWidth() { return this.get("width") } scrollImageParser cue image popularity 0.005908 description An img tag. boolean isPopular true extends abstractCaptionedParser int atomIndex 1 example image screenshot.png caption A caption. inScope classMarkupParser aftertextIdParser scrollLinkParser linkTargetParser openGraphParser javascript get dimensions() { const width = this.get("width") const height = this.get("height") if (width || height) return {width, height} if (!this.isNodeJs()) return {} const src = this.filename // If its a local image, get the dimensions and put them in the HTML // to avoid flicker if (src.startsWith("http:") || src.startsWith("https:")) return {} if (this._dimensions) return this._dimensions try { const sizeOf = require("image-size") const path = require("path") const fullImagePath = path.join(this.root.folderPath, src) this._dimensions = sizeOf(fullImagePath) return this._dimensions } catch (err) { console.error(err) } return {} } get figureWidth() { return this.dimensions.width } get filename() { return this.getAtom(this.atomIndex) } get dependencies() { return [this.filename]} getFigureContent(buildSettings) { const linkRelativeToCompileTarget = (buildSettings ? (buildSettings.relativePath ?? "") : "") + this.filename const {width, height} = this.dimensions let dimensionAttributes = width || height ? \`width="\${width}" height="\${height}" \` : "" // Todo: can we reuse more code from aftertext? const className = this.has("class") ? \` class="\${this.get("class")}" \` : "" const id = this.has("id") ? \` id="\${this.get("id")}" \` : "" const clickLink = this.find(particle => particle.definition.isOrExtendsAParserInScope(["scrollLinkParser"])) || linkRelativeToCompileTarget const target = this.has("target") ? this.get("target") : (this.has("link") ? "" : "_blank") return \`\` } buildTxt() { const subparticles = this.filter(particle => particle.buildTxt).map(particle => particle.buildTxt()).filter(i => i).join("\\n") return "[Image Omitted]" + (subparticles ? "\\n " + subparticles.replace(/\\n/g, "\\n ") : "") } quickImageParser popularity 0.005788 extends scrollImageParser atoms urlAtom pattern ^[^\\s]+\\.(jpg|jpeg|png|gif|webp|svg|bmp) int atomIndex 0 qrcodeParser extends abstractCaptionedParser description Make a QR code from a link. example qrcode https://scroll.pub javascript getFigureContent() { const url = this.atoms[1] const isNode = this.isNodeJs() if (isNode) { const {externalsPath} = this.root const path = require("path") const {qrcodegen, toSvgString} = require(path.join(externalsPath, ".qrcodegen.js")) const QRC = qrcodegen.QrCode; const qr0 = QRC.encodeText(url, QRC.Ecc.MEDIUM); const svg = toSvgString(qr0, 4); // See qrcodegen-input-demo return svg } return \`Not yet supported in browser.\` } youtubeParser popularity 0.000121 extends abstractCaptionedParser // Include the YouTube embed URL such as https://www.youtube.com/embed/CYPYZnVQoLg description A YouTube video widget. example youtube https://www.youtube.com/watch?v=lO8blNtYYBA javascript getFigureContent() { const url = this.getAtom(1).replace("youtube.com/watch?v=", "youtube.com/embed/") return \`
\` } youTubeParser extends youtubeParser tags deprecate // Deprecated. You youtube all lowercase. importParser description Import a file. popularity 0.007524 cueFromId atoms preBuildCommandAtom extends abstractScrollParser catchAllAtomType filePathAtom javascript buildHtml() { return "" } example import header.scroll scrollImportedParser description Inserted at import pass. boolean suggestInAutocomplete false cue imported atoms preBuildCommandAtom extends abstractScrollParser baseParser blobParser catchAllAtomType filePathAtom javascript buildHtml() { return "" } getErrors() { if (this.get("exists") === "false" && this.previous.getLine() !== "// optional") return [this.makeError(\`File '\${this.atoms[1]}' does not exist.\`)] return [] } quickImportParser popularity 0.007524 description Import a Scroll or Parsers file. extends abstractScrollParser boolean isPopular true atoms urlAtom pattern ^[^\\s]+\\.(scroll|parsers)$ javascript buildHtml() { return "" } example header.scroll scriptParser extends abstractScrollParser description Print script tag. cueFromId catchAllParser scriptLineParser catchAllAtomType javascriptAnyAtom javascript buildHtml() { return \`\` } get scriptContent() { return this.content ?? this.subparticlesToString() } buildJs() { return this.scriptContent } jsonScriptParser popularity 0.007524 cueFromId description Include JSON and assign to window. extends abstractScrollParser atoms cueAtom urlAtom javascript buildHtml() { const varName = this.filename.split("/").pop().replace(".json", "") return \`\` } get filename() { return this.getAtom(1) } scrollLeftRightButtonsParser popularity 0.006342 cue leftRightButtons description Previous and next nav buttons. extends abstractScrollParser javascript buildHtmlSnippet() { return "" } buildHtml() { const { linkToPrevious, linkToNext } = this.root if (!linkToPrevious) return "" const style = \`a.keyboardNav {display:block;position:absolute;top:0.25rem; color: rgba(204,204,204,.8); font-size: 1.875rem; line-height: 1.7rem;}a.keyboardNav:hover{color: #333;text-decoration: none;}\` return \`<>\` } keyboardNavParser popularity 0.007476 description Make left and right navigate files. extends abstractScrollParser cueFromId catchAllAtomType urlAtom javascript buildHtmlSnippet() { return "" } buildHtml() { const {root} = this const linkToPrevious = this.getAtom(1) ?? root.linkToPrevious const linkToNext = this.getAtom(2) ?? root.linkToNext const script = \`\` return \`\` } printUsageStatsParser popularity 0.000096 // todo: if we include the atom "Parser" in a cue, bad things seem to happen. description Parser usage stats for folder. extends abstractScrollParser cueFromId javascript get stats() { const input = this.root.allScrollFiles.map(file => file.scrollProgram).map(program => program.parserIds.join("\\n")).join("\\n") const result = input.split('\\n').reduce((acc, atom) => (acc[atom] = (acc[atom] || 0) + 1, acc), {}) const rows = Object.entries(result).map(([atom, count]) => { return {atom, count}}) const sorted = this.root.lodash.sortBy(rows, "count").reverse() return "parserId uses\\n" + sorted.map(row => \`\${row.atom} \${row.count}\`).join('\\n') } buildHtml() { // A hacky but simple way to do this for now. const particle = this.appendSibling("table") particle.appendLine("delimiter ") particle.appendLine("printTable") const dataParticle = particle.appendLine("data") dataParticle.setSubparticles(this.stats) const html = particle.buildHtml() particle.destroy() return html } buildTxt() { return this.stats } buildCsv() { return this.stats.replace(/ /g, ",") } printScrollLeetSheetParser popularity 0.000024 description Print Scroll parser leet sheet. extends abstractScrollParser tags experimental cueFromId javascript get parsersToDocument() { const clone = this.root.clone() clone.setSubparticles("") const atoms = clone.getAutocompleteResultsAt(0,0).matches.map(a => a.text) atoms.push("blankline") // manually add blank line atoms.push("Catch All Paragraph.") // manually add catch all paragraph atoms.push("") // manually add html atoms.sort() clone.setSubparticles(atoms.join("\\n").replace(/blankline/, "")) // insert blank line in right spot return clone } sortDocs(docs) { return docs.map(particle => { const {definition} = particle const {id, description, isPopular, examples, popularity} = definition const tags = definition.get("tags") || "" if (tags.includes("deprecate") || tags.includes("experimental")) return null const category = this.getCategory(tags) const note = this.getNote(category) return {id: definition.cueIfAny || id, description, isPopular, examples, note, popularity: Math.ceil(parseFloat(popularity) * 100000)} }).filter(i => i).sort((a, b) => a.id.localeCompare(b.id)) } makeLink(examples, cue) { // if (!examples.length) console.log(cue) // find particles that need docs const example = examples.length ? examples[0].subparticlesToString() : cue const base = \`https://try.scroll.pub/\` const particle = new Particle() particle.appendLineAndSubparticles("scroll", "theme gazette\\n" + example) return base + "#" + encodeURIComponent(particle.asString) } docToHtml(doc) { const css = \`#scrollLeetSheet {color: grey;} #scrollLeetSheet a {color: #3498db; }\` return \`
\` + doc.map(obj => \`
\${obj.isPopular ? "" : ""}\${obj.id} \${obj.description}\${obj.isPopular ? "" : ""}\${obj.note}
\`).join("\\n") + "
" } buildHtml() { return this.docToHtml(this.sortDocs(this.parsersToDocument)) } buildTxt() { return this.sortDocs(this.parsersToDocument).map(obj => \`\${obj.id} - \${obj.description}\`).join("\\n") } getCategory(input) { return "" } getNote() { return "" } buildCsv() { const rows = this.sortDocs(this.parsersToDocument).map(obj => { const {id, isPopular, description, popularity, category} = obj return { id, isPopular, description, popularity, category } }) return new Particle(this.root.lodash.sortBy(rows, "isPopular")).asCsv } printparsersLeetSheetParser popularity 0.000024 // todo: fix parse bug when atom Parser appears in parserId extends printScrollLeetSheetParser tags experimental description Parsers leetsheet. javascript buildHtml() { return "

Parser Definition Parsers define parsers that acquire, analyze and act on code.

" + this.docToHtml(this.sortDocs(this.parsersToDocument)) + "

Atom Definition Parsers analyze the atoms in a line.

" + this.docToHtml(this.sortDocs(this.atomParsersToDocument)) } makeLink() { return "" } categories = "assemblePhase acquirePhase analyzePhase actPhase".split(" ") getCategory(tags) { return tags.split(" ").filter(w => w.endsWith("Phase"))[0] } getNote(category) { return \` A\${category.replace("Phase", "").substr(1)}Time.\` } get atomParsersToDocument() { const parsersParser = require("scrollsdk/products/parsers.nodejs.js") const clone = new parsersParser("anyAtom\\n ").clone() const parserParticle = clone.getParticle("anyAtom") const atoms = clone.getAutocompleteResultsAt(1,1).matches.map(a => a.text) atoms.sort() parserParticle.setSubparticles(atoms.join("\\n")) return parserParticle } get parsersToDocument() { const parsersParser = require("scrollsdk/products/parsers.nodejs.js") const clone = new parsersParser("latinParser\\n ").clone() const parserParticle = clone.getParticle("latinParser") const atoms = clone.getAutocompleteResultsAt(1,1).matches.map(a => a.text) atoms.sort() parserParticle.setSubparticles(atoms.join("\\n")) clone.appendLine("myParser") clone.appendLine("myAtom") return parserParticle } abstractMeasureParser atoms measureNameAtom cueFromId boolean isMeasure true float sortIndex 1.9 boolean isComputed false string typeForWebForms text extends abstractScrollParser javascript buildHtmlSnippet() { return "" } buildHtml() { return "" } get measureValue() { return this.content ?? "" } get measureName() { return this.getCuePath().replace(/ /g, "_") } abstractAtomMeasureParser description A measure that contains a single atom. atoms measureNameAtom atomAtom extends abstractMeasureParser abstractEmailMeasureParser string typeForWebForms email atoms measureNameAtom emailAddressAtom extends abstractAtomMeasureParser abstractUrlMeasureParser string typeForWebForms url atoms measureNameAtom urlAtom extends abstractAtomMeasureParser abstractStringMeasureParser catchAllAtomType stringAtom extends abstractMeasureParser abstractIdParser cue id description What is the ID of this concept? extends abstractStringMeasureParser float sortIndex 1 boolean isMeasureRequired true boolean isConceptDelimiter true javascript getErrors() { const errors = super.getErrors() let requiredMeasureNames = this.root.measures.filter(measure => measure.isMeasureRequired).map(measure => measure.Name).filter(name => name !== "id") if (!requiredMeasureNames.length) return errors let next = this.next while (requiredMeasureNames.length && next.cue !== "id" && next.index !== 0) { requiredMeasureNames = requiredMeasureNames.filter(i => i !== next.cue) next = next.next } requiredMeasureNames.forEach(name => errors.push(this.makeError(\`Concept "\${this.content}" is missing required measure "\${name}".\`)) ) return errors } abstractTextareaMeasureParser string typeForWebForms textarea extends abstractMeasureParser baseParser blobParser javascript get measureValue() { return this.subparticlesToString().replace(/\\n/g, "\\\\n") } abstractNumericMeasureParser string typeForWebForms number extends abstractMeasureParser javascript get measureValue() { const {content} = this return content === undefined ? "" : parseFloat(content) } abstractIntegerMeasureParser atoms measureNameAtom integerAtom extends abstractNumericMeasureParser abstractFloatMeasureParser atoms measureNameAtom floatAtom extends abstractNumericMeasureParser abstractPercentageMeasureParser atoms measureNameAtom percentAtom extends abstractNumericMeasureParser javascript get measureValue() { const {content} = this return content === undefined ? "" : parseFloat(content) } abstractEnumMeasureParser atoms measureNameAtom enumAtom extends abstractMeasureParser abstractBooleanMeasureParser atoms measureNameAtom booleanAtom extends abstractMeasureParser javascript get measureValue() { const {content} = this return content === undefined ? "" : content == "true" } metaTagsParser popularity 0.007693 cueFromId extends abstractScrollParser description Print meta tags including title. javascript buildHtmlSnippet() { return "" } buildHtml() { const {root} = this const { title, description, canonicalUrl, gitRepo, scrollVersion, openGraphImage } = root const rssFeedUrl = root.get("rssFeedUrl") const favicon = root.get("favicon") const faviconTag = favicon ? \`\` : "" const rssTag = rssFeedUrl ? \`\` : "" const gitTag = gitRepo ? \`\` : "" return \` \${title} \${faviconTag} \${gitTag} \${rssTag} \` } quoteParser popularity 0.001471 cueFromId description A quote. catchAllParser quoteLineParser extends abstractScrollParser javascript buildHtml() { return \`
\${this.subparticlesToString()}
\` } buildTxt() { return this.subparticlesToString() } redirectToParser popularity 0.000072 description HTML redirect tag. extends abstractScrollParser atoms cueAtom urlAtom cueFromId example redirectTo https://scroll.pub/releaseNotes.html javascript buildHtml() { return \`\` } abstractVariableParser extends abstractScrollParser catchAllAtomType stringAtom atoms preBuildCommandAtom cueFromId javascript isTopMatter = true buildHtml() { return "" } replaceParser description Replace this with that. extends abstractVariableParser baseParser blobParser example replace YEAR 2022 replaceJsParser description Replace this with evaled JS. extends replaceParser catchAllAtomType javascriptAnyAtom example replaceJs SUM 1+1 * 1+1 = SUM replaceNodejsParser description Replace with evaled Node.JS. extends abstractVariableParser catchAllAtomType javascriptAnyAtom baseParser blobParser example replaceNodejs module.exports = {SCORE : 1 + 2} * The score is SCORE runScriptParser popularity 0.000024 description Run script and dump stdout. extends abstractScrollParser atoms cueAtom urlAtom cue run int filenameIndex 1 javascript get dependencies() { return [this.filename]} results = "Not yet run" async execute() { if (!this.filename) return await this.root.fetch(this.filename) // todo: make async const { execSync } = require("child_process") this.results = execSync(this.command) } get command() { const path = this.root.path const {filename }= this const fullPath = this.root.makeFullPath(filename) const ext = path.extname(filename).slice(1) const interpreterMap = { php: "php", py: "python3", rb: "ruby", pl: "perl", sh: "sh" } return [interpreterMap[ext], fullPath].join(" ") } buildHtml() { return this.buildTxt() } get filename() { return this.getAtom(this.filenameIndex) } buildTxt() { return this.results.toString().trim() } quickRunScriptParser extends runScriptParser atoms urlAtom pattern ^[^\\s]+\\.(py|pl|sh|rb|php)[^\\s]*$ int filenameIndex 0 endSnippetParser popularity 0.004293 description Cut for snippet here. extends abstractScrollParser cueFromId javascript buildHtml() { return "" } toStampParser description Print a directory to stamp. extends abstractScrollParser catchAllAtomType filePathAtom cueFromId javascript buildTxt() { return this.makeStamp(this.content) } buildHtml() { return \`
\${this.buildTxt()}
\` } makeStamp(dir) { const fs = require('fs'); const path = require('path'); const { execSync } = require('child_process'); let stamp = 'stamp\\n'; const handleFile = (indentation, relativePath, itemPath, ) => { stamp += \`\${indentation}\${relativePath}\\n\`; const content = fs.readFileSync(itemPath, 'utf8'); stamp += \`\${indentation} \${content.replace(/\\n/g, \`\\n\${indentation} \`)}\\n\`; } let gitTrackedFiles function processDirectory(currentPath, depth) { const items = fs.readdirSync(currentPath); items.forEach(item => { const itemPath = path.join(currentPath, item); const relativePath = path.relative(dir, itemPath); //if (!gitTrackedFiles.has(item)) return const stats = fs.statSync(itemPath); const indentation = ' '.repeat(depth); if (stats.isDirectory()) { stamp += \`\${indentation}\${relativePath}/\\n\`; processDirectory(itemPath, depth + 1); } else if (stats.isFile()) handleFile(indentation, relativePath, itemPath) }); } const stats = fs.statSync(dir); if (stats.isDirectory()) { // Get list of git-tracked files gitTrackedFiles = new Set(execSync('git ls-files', { cwd: dir, encoding: 'utf-8' }) .split('\\n') .filter(Boolean)) processDirectory(dir, 1) } else handleFile(" ", dir, dir) return stamp.trim(); } stampParser description Expand project template to disk. extends abstractScrollParser inScope stampFolderParser catchAllParser stampFileParser example stamp .gitignore *.html readme.scroll # Hello world scripts/ nested/ hello.js console.log("Hello world") cueFromId atoms preBuildCommandAtom javascript execute() { const dir = this.root.folderPath this.forEach(particle => particle.execute(dir)) } scrollStumpParser cue stump extends abstractScrollParser description Compile Stump to HTML. catchAllParser stumpContentParser javascript buildHtml() { const {stumpParser} = this return new stumpParser(this.subparticlesToString()).compile() } get stumpParser() { return this.isNodeJs() ? require("scrollsdk/products/stump.nodejs.js") : stumpParser } stumpNoSnippetParser popularity 0.010177 // todo: make noSnippets an aftertext directive? extends scrollStumpParser description Compile Stump unless snippet. cueFromId javascript buildHtmlSnippet() { return "" } plainTextParser description Plain text oneliner or block. cueFromId extends abstractScrollParser catchAllParser plainTextLineParser catchAllAtomType stringAtom javascript buildHtml() { return this.buildTxt() } buildTxt() { return \`\${this.content ?? ""}\${this.subparticlesToString()}\` } plainTextOnlyParser popularity 0.000072 extends plainTextParser description Only print for buildTxt. javascript buildHtml() { return "" } scrollThemeParser popularity 0.007524 boolean isPopular true cue theme extends abstractScrollParser catchAllAtomType scrollThemeAtom description A collection of simple themes. string copyFromExternal .gazette.css // Note this will be replaced at runtime javascript get copyFromExternal() { return this.files.join(" ") } get files() { return this.atoms.slice(1).map(name => \`.\${name}.css\`).concat([".scroll.css"]) } buildHtml() { return this.files.map(name => \`\`).join("\\n") } abstractAftertextAttributeParser atoms cueAtom boolean isAttribute true javascript get htmlAttributes() { return \`\${this.cue}="\${this.content}"\` } buildHtml() { return "" } aftertextIdParser popularity 0.000145 cue id description Provide an ID to be output in the generated HTML tag. extends abstractAftertextAttributeParser atoms cueAtom htmlIdAtom single aftertextStyleParser popularity 0.000217 cue style description Set HTML style attribute. extends abstractAftertextAttributeParser catchAllAtomType cssAnyAtom javascript htmlAttributes = "" // special case this one get css() { return \`\${this.property}:\${this.content};\` } aftertextFontParser popularity 0.000217 cue font description Set font. extends aftertextStyleParser atoms cueAtom fontFamilyAtom catchAllAtomType cssAnyAtom string property font-family javascript get css() { if (this.content === "Slim") return "font-family:Helvetica Neue; font-weight:100;" return super.css } aftertextColorParser popularity 0.000217 cue color description Set font color. extends aftertextStyleParser catchAllAtomType cssAnyAtom string property color aftertextOnclickParser popularity 0.000217 cue onclick description Set HTML onclick attribute. extends abstractAftertextAttributeParser catchAllAtomType javascriptAnyAtom aftertextHiddenParser cue hidden atoms cueAtom description Do not compile this particle to HTML. extends abstractAftertextAttributeParser single aftertextTagParser atoms cueAtom htmlTagAtom description Override the HTML tag that the compiled particle will use. cue tag javascript buildHtml() { return "" } abstractAftertextDirectiveParser atoms cueAtom catchAllAtomType stringAtom javascript isMarkup = true buildHtml() { return "" } getErrors() { const errors = super.getErrors() if (!this.isMarkup || this.matchWholeLine) return errors const inserts = this.getInserts(this.parent.originalTextPostLinkify) // todo: make AbstractParticleError class exported by sdk to allow Parsers to define their own error types. // todo: also need to be able to map lines back to their line in source (pre-imports) if (!inserts.length) errors.push(this.makeError(\`No match found for "\${this.getLine()}".\`)) return errors } get pattern() { return this.getAtomsFrom(1).join(" ") } get shouldMatchAll() { return this.has("matchAll") } getMatches(text) { const { pattern } = this const escapedPattern = pattern.replace(/[-\\/\\\\^$*+?.()|[\\]{}]/g, "\\\\$&") return [...text.matchAll(new RegExp(escapedPattern, "g"))].map(match => { const { index } = match const endIndex = index + pattern.length return [ { index, string: \`<\${this.openTag}\${this.allAttributes}>\`, endIndex }, { index: endIndex, endIndex, string: \`\` } ] }) } getInserts(text) { const matches = this.getMatches(text) if (!matches.length) return false if (this.shouldMatchAll) return matches.flat() const match = this.getParticle("match") if (match) return match.indexes .map(index => matches[index]) .filter(i => i) .flat() return matches[0] } get allAttributes() { const attr = this.attributes.join(" ") return attr ? " " + attr : "" } get attributes() { return [] } get openTag() { return this.tag } get closeTag() { return this.tag } abstractMarkupParser extends abstractAftertextDirectiveParser inScope abstractMarkupParameterParser javascript get matchWholeLine() { return this.getAtomsFrom(this.patternStartsAtAtom).length === 0 } get pattern() { return this.matchWholeLine ? this.parent.originalText : this.getAtomsFrom(this.patternStartsAtAtom).join(" ") } patternStartsAtAtom = 1 boldParser popularity 0.000096 cueFromId description Bold matching text. extends abstractMarkupParser javascript tag = "b" italicsParser popularity 0.000241 cueFromId description Italicize matching text. extends abstractMarkupParser javascript tag = "i" underlineParser popularity 0.000024 description Underline matching text. cueFromId extends abstractMarkupParser javascript tag = "u" afterTextCenterParser popularity 0.000193 description Center paragraph. cue center extends abstractMarkupParser javascript tag = "center" aftertextCodeParser popularity 0.000145 description Wrap matching text in code span. cue code extends abstractMarkupParser javascript tag = "code" aftertextStrikeParser popularity 0.000048 description Wrap matching text in s span. cue strike extends abstractMarkupParser javascript tag = "s" classMarkupParser popularity 0.000772 description Add a custom class to the parent element instead. If matching text provided, a span with the class will be added around the matching text. extends abstractMarkupParser atoms cueAtom classNameAtom cue class javascript tag = "span" get applyToParentElement() { return this.atoms.length === 2 } getInserts(text) { // If no select text is added, set the class on the parent element. if (this.applyToParentElement) return [] return super.getInserts(text) } get className() { return this.getAtom(1) } get attributes() { return [\`class="\${this.className}"\`] } get matchWholeLine() { return this.applyToParentElement } get pattern() { return this.matchWholeLine ? this.parent.content : this.getAtomsFrom(2).join(" ") } classesMarkupParser extends classMarkupParser cue classes javascript applyToParentElement = true get className() { return this.content } hoverNoteParser popularity 0.000265 description Add a caveat viewable on hover on matching text. When you want to be sure you've thoroughly addressed obvious concerns but ones that don't warrant to distract from the main argument of the text. cueFromId extends classMarkupParser catchAllParser lineOfTextParser atoms cueAtom javascript get pattern() { return this.getAtomsFrom(1).join(" ") } get attributes() { return [\`class="scrollHoverNote"\`, \`title="\${this.hoverNoteText}"\`] } get hoverNoteText() { return this.subparticlesToString().replace(/\\n/g, " ") } scrollLinkParser popularity 0.008706 extends abstractMarkupParser description Put the matching text in an tag. atoms cueAtom urlAtom inScope linkTitleParser linkTargetParser abstractCommentParser programParser description Anything here will be URI encoded and then appended to the link. cueFromId atoms cueAtom catchAllParser programLinkParser javascript get encoded() { return encodeURIComponent(this.subparticlesToString()) } cue link javascript tag = "a" buildTxt() { return this.root.ensureAbsoluteLink(this.link) + " " + this.pattern } get link() { const {baseLink} = this if (this.has("program")) return baseLink + this.getParticle("program").encoded return baseLink } get baseLink() { const link = this.getAtom(1) const isAbsoluteLink = link.includes("://") if (isAbsoluteLink) return link const relativePath = this.parent.buildSettings?.relativePath || "" return relativePath + link } get linkAttribute() { return "href" } get attributes() { const attrs = [\`\${this.linkAttribute}="\${this.link}"\`] const options = ["title", "target"] options.forEach(option => { const particle = this.getParticle(option) if (particle) attrs.push(\`\${option}="\${particle.content}"\`) }) return attrs } patternStartsAtAtom = 2 scrollClickParser extends scrollLinkParser description An a tag with an onclick. cue click javascript get linkAttribute() { return "onclick" } emailLinkParser popularity 0.000048 description A mailto link cue email extends scrollLinkParser javascript get attributes() { return [\`href="mailto:\${this.link}"\`] } quickLinkParser popularity 0.029228 pattern ^https?\\: extends scrollLinkParser atoms urlAtom javascript get link() { return this.cue } patternStartsAtAtom = 1 quickRelativeLinkParser popularity 0.029228 description Relative links. // note: only works if relative link ends in .html pattern ^[^\\s]+\\.(html|htm) extends scrollLinkParser atoms urlAtom javascript get link() { return this.cue } patternStartsAtAtom = 1 datelineParser popularity 0.006005 cueFromId description Gives your paragraph a dateline like "December 15, 2021 — The..." extends abstractAftertextDirectiveParser javascript getInserts() { const {day} = this if (!day) return false return [{ index: 0, string: \`\${day} — \` }] } matchWholeLine = true get day() { let day = this.content || this.root.date if (!day) return "" return this.root.dayjs(day).format(\`MMMM D, YYYY\`) } dayjsParser description Advanced directive that evals some Javascript code in an environment including "dayjs". cueFromId extends abstractAftertextDirectiveParser javascript getInserts() { const dayjs = this.root.dayjs const days = eval(this.content) const index = this.parent.originalTextPostLinkify.indexOf("days") return [{ index, string: \`\${days} \` }] } inlineMarkupsOnParser popularity 0.000024 cueFromId description Enable these inline markups only. example Hello *world*! inlineMarkupsOn bold extends abstractAftertextDirectiveParser catchAllAtomType inlineMarkupNameAtom javascript get shouldMatchAll() { return true } get markups() { const {root} = this let markups = [{delimiter: "\`", tag: "code", exclusive: true, name: "code"},{delimiter: "*", tag: "strong", name: "bold"}, {delimiter: "_", tag: "em", name: "italics"}] // only add katex markup if the root doc has katex. if (root.has("katex")) markups.unshift({delimiter: "$", tag: "span", attributes: ' class="scrollKatex"', exclusive: true, name: "katex"}) if (this.content) return markups.filter(markup => this.content.includes(markup.name)) if (root.has("inlineMarkups")) { root.getParticle("inlineMarkups").forEach(markup => { const delimiter = markup.getAtom(0) const tag = markup.getAtom(1) // todo: add support for providing custom functions for inline markups? // for example, !2+2! could run eval, or :about: could search a link map. const attributes = markup.getAtomsFrom(2).join(" ") markups = markups.filter(mu => mu.delimiter !== delimiter) // Remove any overridden markups if (tag) markups.push({delimiter, tag, attributes}) }) } return markups } matchWholeLine = true getMatches(text) { const exclusives = [] return this.markups.map(markup => this.applyMarkup(text, markup, exclusives)).filter(i => i).flat() } applyMarkup(text, markup, exclusives = []) { const {delimiter, tag, attributes} = markup const escapedDelimiter = delimiter.replace(/[-\\/\\\\^$*+?.()|[\\]{}]/g, "\\\\$&") const pattern = new RegExp(\`\${escapedDelimiter}[^\${escapedDelimiter}]+\${escapedDelimiter}\`, "g") const delimiterLength = delimiter.length return [...text.matchAll(pattern)].map(match => { const { index } = match const endIndex = index + match[0].length // I'm too lazy to clean up sdk to write a proper inline markup parser so doing this for now. // The exclusive idea is to not try and apply bold or italic styles inside a TeX or code inline style. // Note that the way this is currently implemented any TeX in an inline code will get rendered, but code // inline of TeX will not. Seems like an okay tradeoff until a proper refactor and cleanup can be done. if (exclusives.some(exclusive => index >= exclusive[0] && index <= exclusive[1])) return undefined if (markup.exclusive) exclusives.push([index, endIndex]) return [ { index, string: \`<\${tag + (attributes ? " " + attributes : "")}>\`, endIndex, consumeStartCharacters: delimiterLength }, { index: endIndex, endIndex, string: \`\`, consumeEndCharacters: delimiterLength } ] }).filter(i => i) } inlineMarkupParser popularity 0.000169 cueFromId atoms cueAtom delimiterAtom tagOrUrlAtom catchAllAtomType htmlAttributesAtom extends inlineMarkupsOnParser description Custom inline markup. for example @This@ will be in italics. inlineMarkup @ em javascript getMatches(text) { try { const delimiter = this.getAtom(1) const tag = this.getAtom(2) const attributes = this.getAtomsFrom(3).join(" ") return this.applyMarkup(text, {delimiter, tag, attributes}) } catch (err) { console.error(err) return [] } // Note: doubling up doesn't work because of the consumption characters. } linkifyParser description Use this to disable linkify on the text. extends abstractAftertextDirectiveParser cueFromId atoms cueAtom booleanAtom abstractMarkupParameterParser atoms cueAtom cueFromId matchAllParser popularity 0.000024 description Use this to match all occurrences of the text. extends abstractMarkupParameterParser matchParser popularity 0.000048 catchAllAtomType integerAtom description Use this to specify which index(es) to match. javascript get indexes() { return this.getAtomsFrom(1).map(num => parseInt(num)) } example aftertext hello ello ello bold ello match 0 2 extends abstractMarkupParameterParser abstractHtmlAttributeParser javascript buildHtml() { return "" } linkTargetParser popularity 0.000024 extends abstractHtmlAttributeParser description If you want to set the target of the link. To "_blank", for example. cue target atoms cueAtom codeAtom blankLineParser popularity 0.308149 description Print nothing. Break section. atoms blankAtom boolean isPopular true javascript buildHtml() { return this.parent.clearSectionStack() } pattern ^$ tags doNotSynthesize scrollFileAddressParser catchAllAtomType filePathAtom catchAllParser scrollFileAddressParser chatLineParser popularity 0.009887 catchAllAtomType stringAtom catchAllParser chatLineParser lineOfCodeParser popularity 0.018665 catchAllAtomType codeAtom catchAllParser lineOfCodeParser commentLineParser catchAllAtomType commentAtom cssLineParser popularity 0.002870 catchAllAtomType cssAnyAtom catchAllParser cssLineParser abstractTableTransformParser atoms cueAtom inScope abstractTableVisualizationParser abstractTableTransformParser h1Parser h2Parser scrollQuestionParser htmlInlineParser scrollBrParser slashCommentParser javascript get coreTable() { return this.parent.coreTable } get columnNames() { return this.parent.columnNames } connectColumnNames(userColumnNames, availableColumnNames = this.parent.columnNames) { const result = {} const normalize = str => str.toLowerCase().trim() userColumnNames.forEach(userColumn => { // Strategy 1: Exact match const exactMatch = availableColumnNames.find(col => col === userColumn) if (exactMatch) { result[userColumn] = exactMatch return } // Strategy 2: Case-insensitive match const normalizedUserColumn = normalize(userColumn) const caseInsensitiveMatch = availableColumnNames.find(col => normalize(col) === normalizedUserColumn) if (caseInsensitiveMatch) { result[userColumn] = caseInsensitiveMatch return } // Strategy 3: Levenshtein distance match const THRESHOLD = 2 // Consider matches with distance <= 2 as "very close" let bestMatch = null let bestDistance = Infinity availableColumnNames.forEach(col => { const distance = this.root.levenshteinDistance(userColumn, col) if (distance < bestDistance) { bestDistance = distance bestMatch = col } }) // Only use Levenshtein match if it's very close if (bestDistance <= THRESHOLD) { result[userColumn] = bestMatch return } // Strategy 4: Fallback - use original unmatched name result[userColumn] = userColumn }) return result } connectColumnName(name) { return this.connectColumnNames([name])[name] } getErrors() { const errors = super.getErrors() if (errors.length && this.previous.cue !== "assertIgnoreBelowErrors") return errors return [] } getRunTimeEnumOptions(atom) { if (atom.atomTypeId === "columnNameAtom") return this.parent.columnNames return super.getRunTimeEnumOptions(atom) } getRunTimeEnumOptionsForValidation(atom) { // Note: this will fail if the CSV file hasnt been built yet. if (atom.atomTypeId === "columnNameAtom") return this.parent.columnNames.concat(this.parent.columnNames.map(c => "-" + c)) // Add reverse names return super.getRunTimeEnumOptions(atom) } abstractDateSplitTransformParser extends abstractTableTransformParser atoms cueAtom catchAllAtomType columnNameAtom javascript get coreTable() { const columnName = this.getAtom(1) || this.detectDateColumn() if (!columnName) return this.parent.coreTable return this.parent.coreTable.map(row => { const newRow = {...row} try { const date = this.root.dayjs(row[columnName]) if (date.isValid()) newRow[this.newColumnName] = this.transformDate(date) } catch (err) {} return newRow }) } detectDateColumn() { const columns = this.parent.columnNames const dateColumns = ['date', 'created', 'published', 'timestamp'] for (const col of dateColumns) { if (columns.includes(col)) return col } for (const col of columns) { const sample = this.parent.coreTable[0][col] if (sample && this.root.dayjs(sample).isValid()) return col } return null } get columnNames() { return [...this.parent.columnNames, this.newColumnName] } transformDate(date) { const formatted = date.format(this.dateFormat) const isInt = !this.cue.includes("Name") return isInt ? parseInt(formatted) : formatted } scrollSplitYearParser extends abstractDateSplitTransformParser description Extract year into new column. cue splitYear string newColumnName year string dateFormat YYYY scrollSplitDayNameParser extends abstractDateSplitTransformParser description Extract day name into new column. cue splitDayName string newColumnName dayName string dateFormat dddd scrollSplitMonthNameParser extends abstractDateSplitTransformParser description Extract month name into new column. cue splitMonthName string newColumnName monthName string dateFormat MMMM scrollSplitMonthParser extends abstractDateSplitTransformParser description Extract month number (1-12) into new column. cue splitMonth string newColumnName month string dateFormat M scrollSplitDayOfMonthParser extends abstractDateSplitTransformParser description Extract day of month (1-31) into new column. cue splitDayOfMonth string newColumnName dayOfMonth string dateFormat D scrollSplitDayOfWeekParser extends abstractDateSplitTransformParser description Extract day of week (0-6) into new column. cue splitDay string newColumnName day string dateFormat d scrollParseDateParser extends abstractTableTransformParser description Parse dates in a column into standard format. cue parseDate atoms cueAtom columnNameAtom example sampleData stocks.csv parseDate date linechart x date y price javascript get coreTable() { const columnName = this.connectColumnName(this.getAtom(1)) const formatOut = this.get("format") || "YYYY-MM-DD" const {dayjs} = this.root return this.parent.coreTable.map(row => { const newRow = {...row} try { const value = row[columnName] if (value) { const date = dayjs(value) if (date.isValid()) newRow[columnName] = date.format(formatOut) } } catch (err) { console.error(\`Error parsing date in column \${columnName}:\`, err) } return newRow }) } formatParser description Specify output date format atoms cueAtom stringAtom cueFromId single scrollGroupByParser catchAllAtomType columnNameAtom extends abstractTableTransformParser reduceParser description Specify how to aggregate a column when grouping data atoms cueAtom columnNameAtom reductionTypeAtom newColumnNameAtom cue reduce example data.csv groupBy year reduce score sum totalScore reduce name concat names printTable javascript get reduction() { return { source: this.getAtom(1), reduction: this.getAtom(2), name: this.getAtom(3) || this.getAtomsFrom(1).join("_") } } description Combine rows with matching values into groups. example tables posts.csv groupBy year printTable cue groupBy javascript get coreTable() { if (this._coreTable) return this._coreTable const groupByColNames = this.getAtomsFrom(1) const {coreTable} = this.parent if (!groupByColNames.length) return coreTable const newCols = this.findParticles("reduce").map(particle => particle.reduction) // Pivot is shorthand for group and reduce? const makePivotTable = (rows, groupByColumnNames, inputColumnNames, newCols) => { const colMap = {} inputColumnNames.forEach((col) => (colMap[col] = true)) const groupByCols = groupByColumnNames.filter((col) => colMap[col]) return new PivotTable(rows, inputColumnNames.map(c => {return {name: c}}), newCols).getNewRows(groupByCols) } class PivotTable { constructor(rows, inputColumns, outputColumns) { this._columns = {} this._rows = rows inputColumns.forEach((col) => (this._columns[col.name] = col)) outputColumns.forEach((col) => (this._columns[col.name] = col)) } _getGroups(allRows, groupByColNames) { const rowsInGroups = new Map() allRows.forEach((row) => { const groupKey = groupByColNames.map((col) => row[col]?.toString().replace(/ /g, "") || "").join(" ") if (!rowsInGroups.has(groupKey)) rowsInGroups.set(groupKey, []) rowsInGroups.get(groupKey).push(row) }) return rowsInGroups } getNewRows(groupByCols) { // make new particles const rowsInGroups = this._getGroups(this._rows, groupByCols) // Any column in the group should be reused by the children const columns = [ { name: "count", type: "number", min: 0, }, ] groupByCols.forEach((colName) => columns.push(this._columns[colName])) const colsToReduce = Object.values(this._columns).filter((col) => !!col.reduction) colsToReduce.forEach((col) => columns.push(col)) // for each group const rows = [] const totalGroups = rowsInGroups.size for (let [groupId, group] of rowsInGroups) { const firstRow = group[0] const newRow = {} groupByCols.forEach((col) => newRow[col] = firstRow ? firstRow[col] : 0 ) newRow.count = group.length // todo: add more reductions? count, stddev, median, variance. colsToReduce.forEach((col) => { const sourceColName = col.source const reduction = col.reduction const newColName = col.name if (reduction === "concat") { newRow[newColName] = group.map((row) => row[sourceColName]).join(" ") return } if (reduction === "first") { newRow[newColName] = group.find((row) => row[sourceColName] !== "")?.[sourceColName] return } const values = group.map((row) => row[sourceColName]).filter((val) => typeof val === "number" && !isNaN(val)) let reducedValue = firstRow[sourceColName] if (reduction === "sum") reducedValue = values.reduce((prev, current) => prev + current, 0) if (reduction === "max") reducedValue = Math.max(...values) if (reduction === "min") reducedValue = Math.min(...values) if (reduction === "mean") reducedValue = values.reduce((prev, current) => prev + current, 0) / values.length newRow[newColName] = reducedValue }) rows.push(newRow) } // todo: add tests. figure out this api better. Object.values(columns).forEach((col) => { // For pivot columns, remove the source and reduction info for now. Treat things as immutable. delete col.source delete col.reduction }) return { rows, columns, } } } const pivotTable = makePivotTable(coreTable, groupByColNames, this.parent.columnNames, newCols) this._coreTable = pivotTable.rows this._columnNames = pivotTable.columns.map(col => col.name) return pivotTable.rows } get columnNames() { const {coreTable} = this return this._columnNames || this.parent.columnNames } scrollWhereParser extends abstractTableTransformParser description Filter rows by condition. cue where atoms cueAtom columnNameAtom comparisonAtom catchAllAtomType constantAtom example table iris.csv where Species = setosa javascript get coreTable() { // todo: use atoms here. const columnName = this.connectColumnName(this.getAtom(1)) const operator = this.getAtom(2) let untypedScalarValue = this.getAtom(3) const typedValue = isNaN(parseFloat(untypedScalarValue)) ? untypedScalarValue : parseFloat(untypedScalarValue) const coreTable = this.parent.coreTable if (!columnName || !operator || (untypedScalarValue === undefined && !operator.includes("mpty"))) return coreTable const filterFn = row => { const atom = row[columnName] const typedAtom = atom === null ? undefined : atom // convert nulls to undefined if (operator === "=") return typedValue === typedAtom else if (operator === "!=") return typedValue !== typedAtom else if (operator === "includes") return typedAtom !== undefined && typedAtom.includes(typedValue) else if (operator === "startsWith") return typedAtom !== undefined && typedAtom.toString().startsWith(typedValue) else if (operator === "endsWith") return typedAtom !== undefined && typedAtom.toString().endsWith(typedValue) else if (operator === "doesNotInclude") return typedAtom === undefined || !typedAtom.includes(typedValue) else if (operator === ">") return typedAtom > typedValue else if (operator === "<") return typedAtom < typedValue else if (operator === ">=") return typedAtom >= typedValue else if (operator === "<=") return typedAtom <= typedValue else if (operator === "empty") return typedAtom === "" || typedAtom === undefined else if (operator === "notEmpty") return typedAtom !== "" && typedAtom !== undefined } return coreTable.filter(filterFn) } scrollSelectParser catchAllAtomType columnNameAtom extends abstractTableTransformParser description Drop all columns except these. example tables data name,year,count index,2022,2 about,2023,4 select name year printTable cue select javascript get coreTable() { const {coreTable} = this.parent const {columnNames} = this if (!columnNames.length) return coreTable return coreTable.map(row => Object.fromEntries(columnNames.map(colName => [colName, row[colName]]))) } get columnNames() { if (!this._columnNames) { const names = this.getAtomsFrom(1) this._columnNamesMap = this.connectColumnNames(names) this._columnNames = names.map(name => this._columnNamesMap[name]) } return this._columnNames } scrollReverseParser extends abstractTableTransformParser description Reverse rows. cue reverse javascript get coreTable() { return this.parent.coreTable.slice().reverse() } scrollComposeParser extends abstractTableTransformParser description Add column using format string. catchAllAtomType codeAtom cue compose atoms cueAtom newColumnNameAtom example table compose sentence My name is {name} printTable javascript get coreTable() { const {newColumnName} = this const formatString = this.getAtomsFrom(2).join(" ") return this.parent.coreTable.map((row, index) => { const newRow = Object.assign({}, row) newRow[newColumnName] = this.evaluate(new Particle(row).evalTemplateString(formatString), index) return newRow }) } evaluate(str) { return str } get newColumnName() { return this.atoms[1] } get columnNames() { return this.parent.columnNames.concat(this.newColumnName) } scrollComputeParser extends scrollComposeParser description Add column by evaling format string. cue compute javascript evaluate(str) { return parseFloat(eval(str)) } scrollEvalParser extends scrollComputeParser description Add column by evaling format string. cue eval javascript evaluate(str) { return eval(str) } scrollRankParser extends scrollComposeParser description Add rank column. atoms cueAtom string newColumnName rank cue rank javascript evaluate(str, index) { return index + 1 } scrollLinksParser extends abstractTableTransformParser description Add column with links. cue links catchAllAtomType columnNameAtom javascript get coreTable() { const {newColumnName, linkColumns} = this return this.parent.coreTable.map(row => { const newRow = Object.assign({}, row) let newValue = [] linkColumns.forEach(name => { const value = newRow[name] delete newRow[name] if (value) newValue.push(\`\${name}\`) }) newRow[newColumnName] = newValue.join(" ") return newRow }) } get newColumnName() { return "links" } get linkColumns() { return this.getAtomsFrom(1) } get columnNames() { const {linkColumns} = this return this.parent.columnNames.filter(name => !linkColumns.includes(name)).concat(this.newColumnName) } scrollLimitParser extends abstractTableTransformParser description Select a subset. cue limit atoms cueAtom integerAtom integerAtom javascript get coreTable() { let start = this.getAtom(1) let end = this.getAtom(2) if (end === undefined) { end = start start = 0 } return this.parent.coreTable.slice(parseInt(start), parseInt(end)) } scrollShuffleParser extends abstractTableTransformParser description Randomly reorder rows. cue shuffle example table data.csv shuffle printTable javascript get coreTable() { // Create a copy of the table to avoid modifying original const rows = this.parent.coreTable.slice() // Fisher-Yates shuffle algorithm for (let i = rows.length - 1; i > 0; i--) { const j = Math.floor(Math.random() * (i + 1)) ;[rows[i], rows[j]] = [rows[j], rows[i]] } return rows } scrollTransposeParser extends abstractTableTransformParser description Tranpose table. cue transpose javascript get coreTable() { // todo: we need to switch to column based coreTable, instead of row based const transpose = arr => Object.keys(arr[0]).map(key => [key, ...arr.map(row => row[key])]); return transpose(this.parent.coreTable) } scrollImputeParser extends abstractTableTransformParser description Impute missing values of a columm. atoms cueAtom columnNameAtom cue impute javascript get coreTable() { const {columnName} = this const sorted = this.root.lodash.orderBy(this.parent.coreTable.slice(), columnName) // ascending const imputed = [] let lastInserted = sorted[0][columnName] sorted.forEach(row => { const measuredTime = row[columnName] while (measuredTime > lastInserted + 1) { lastInserted++ // synthesize rows const imputedRow = {} imputedRow[columnName] = lastInserted imputedRow.count = 0 imputed.push(imputedRow) } lastInserted = measuredTime imputed.push(row) }) return imputed } get columnName() { return this.connectColumnName(this.getAtom(1)) } scrollOrderByParser extends abstractTableTransformParser description Sort rows by column(s). catchAllAtomType columnNameAtom cue orderBy javascript get coreTable() { const makeLodashOrderByParams = str => { const part1 = str.split(" ") const part2 = part1.map(col => (col.startsWith("-") ? "desc" : "asc")) return [part1.map(col => this.connectColumnName(col.replace(/^\\-/, ""))), part2] } const orderBy = makeLodashOrderByParams(this.content) return this.root.lodash.orderBy(this.parent.coreTable.slice(), orderBy[0], orderBy[1]) } assertRowCountParser extends abstractTableTransformParser description Test row count is expected value. atoms cueAtom integerAtom cueFromId javascript getErrors() { const errors = super.getErrors() const actualRows = this.coreTable.length const expectedRows = parseInt(this.content) if (actualRows !== expectedRows) return errors.concat(this.makeError(\`Expected '\${expectedRows}' rows but got '\${actualRows}'.\`)) return errors } scrollRenameParser // todo: add support in Parsers for tuple catch alls catchAllAtomType columnNameAtom newColumnNameAtom catchAllAtomType newColumnNameAtom extends abstractTableTransformParser description Rename columns. example tables data name,year,count index,2022,2 rename name Name year Year printTable cue rename javascript get coreTable() { const {coreTable} = this.parent const {renameMap} = this if (!Object.keys(renameMap).length) return coreTable return coreTable.map(row => { const newRow = {} Object.keys(row).forEach(key => { const name = renameMap[key] || key newRow[name] = row[key] }) return newRow }) } get renameMap() { const map = {} const pairs = this.getAtomsFrom(1) let oldName while (oldName = pairs.shift()) { map[oldName] = pairs.shift() } return map } _renamed get columnNames() { if (this._renamed) return this._renamed const {renameMap} = this this._renamed = this.parent.columnNames.map(name => renameMap[name] || name ) return this._renamed } scrollSummarizeParser extends abstractTableTransformParser description Generate summary statistics for each column. cue summarize example table data.csv summarize printTable javascript get coreTable() { const {lodash} = this.root const sourceData = this.parent.coreTable if (!sourceData.length) return [] return this.parent.columnNames.map(colName => { const values = sourceData.map(row => row[colName]).filter(val => val !== undefined && val !== null) const numericValues = values.filter(val => typeof val === "number" && !isNaN(val)) const sorted = [...numericValues].sort((a, b) => a - b) // Calculate mode const frequency = {} values.forEach(val => { frequency[val] = (frequency[val] || 0) + 1 }) const mode = Object.entries(frequency) .sort((a, b) => b[1] - a[1]) .map(entry => entry[0])[0] // Calculate median for numeric values const median = sorted.length ? sorted.length % 2 === 0 ? (sorted[sorted.length/2 - 1] + sorted[sorted.length/2]) / 2 : sorted[Math.floor(sorted.length/2)] : null const sum = numericValues.length ? numericValues.reduce((a, b) => a + b, 0) : null const theType = typeof values[0] const count = values.length const mean = theType === "number" ? sum/count : "" return { name: colName, type: theType, incompleteCount: sourceData.length - values.length, uniqueCount: new Set(values).size, count, sum, median, mean, min: sorted.length ? sorted[0] : null, max: sorted.length ? sorted[sorted.length - 1] : null, mode } }) } get columnNames() { return ["name", "type", "incompleteCount", "uniqueCount", "count", "sum", "median", "mean", "min", "max", "mode"] } errorParser baseParser errorParser hakonContentParser popularity 0.102322 catchAllAtomType codeAtom heatrixCatchAllParser popularity 0.000193 // todo Fill this out catchAllAtomType stringAtom lineOfTextParser popularity 0.000289 catchAllAtomType stringAtom boolean isTextParser true htmlLineParser popularity 0.005209 catchAllAtomType htmlAnyAtom catchAllParser htmlLineParser openGraphParser // todo: fix Parsers scope issue so we can move this parser def under scrollImageParser description Add this line to make this the open graph image. cueFromId atoms cueAtom scrollFooterParser description Import to bottom of file. atoms preBuildCommandAtom cue footer scriptLineParser catchAllAtomType javascriptAnyAtom catchAllParser scriptLineParser linkTitleParser popularity 0.000048 description If you want to set the title of the link. cue title atoms cueAtom catchAllAtomType stringAtom example * This report showed the treatment had a big impact. https://example.com/report This report. title The average growth in the treatment group was 14.2x higher than the control group. programLinkParser popularity 0.000531 catchAllAtomType codeAtom scrollMediaLoopParser popularity 0.000048 cue loop atoms cueAtom scrollAutoplayParser cue autoplay atoms cueAtom abstractCompilerRuleParser catchAllAtomType anyAtom atoms cueAtom closeSubparticlesParser extends abstractCompilerRuleParser description When compiling a parent particle to a string, this string is appended to the compiled and joined subparticles. Default is blank. cueFromId indentCharacterParser extends abstractCompilerRuleParser description You can change the indent character for compiled subparticles. Default is a space. cueFromId catchAllAtomDelimiterParser description If a particle has a catchAllAtom, this is the string delimiter that will be used to join those atoms. Default is comma. extends abstractCompilerRuleParser cueFromId openSubparticlesParser extends abstractCompilerRuleParser description When compiling a parent particle to a string, this string is prepended to the compiled and joined subparticles. Default is blank. cueFromId stringTemplateParser extends abstractCompilerRuleParser description This template string is used to compile this line, and accepts strings of the format: const var = {someAtomId} cueFromId joinSubparticlesWithParser description When compiling a parent particle to a string, subparticles are compiled to strings and joined by this character. Default is a newline. extends abstractCompilerRuleParser cueFromId abstractConstantParser description A constant. atoms cueAtom cueFromId // todo: make tags inherit tags actPhase parsersBooleanParser cue boolean atoms cueAtom constantIdentifierAtom catchAllAtomType booleanAtom extends abstractConstantParser tags actPhase parsersFloatParser cue float atoms cueAtom constantIdentifierAtom catchAllAtomType floatAtom extends abstractConstantParser tags actPhase parsersIntParser cue int atoms cueAtom constantIdentifierAtom catchAllAtomType integerAtom tags actPhase extends abstractConstantParser parsersStringParser cue string atoms cueAtom constantIdentifierAtom catchAllAtomType stringAtom catchAllParser catchAllMultilineStringConstantParser extends abstractConstantParser tags actPhase abstractParserRuleParser single atoms cueAtom abstractNonTerminalParserRuleParser extends abstractParserRuleParser parsersBaseParserParser atoms cueAtom baseParsersAtom description Set for blobs or errors. // In rare cases with untyped content you can use a blobParser, for now, to skip parsing for performance gains. The base errorParser will report errors when parsed. Use that if you don't want to implement your own error parser. extends abstractParserRuleParser cue baseParser tags analyzePhase catchAllAtomTypeParser atoms cueAtom atomTypeIdAtom description Use for lists. // Aka 'listAtomType'. Use this when the value in a key/value pair is a list. If there are extra atoms in the particle's line, parse these atoms as this type. Often used with \`listDelimiterParser\`. extends abstractParserRuleParser cueFromId tags analyzePhase atomParserParser atoms cueAtom atomParserAtom description Set parsing strategy. // prefix/postfix/omnifix parsing strategy. If missing, defaults to prefix. extends abstractParserRuleParser cueFromId tags experimental analyzePhase catchAllParserParser description Attach this to unmatched lines. // If a parser is not found in the inScope list, instantiate this type of particle instead. atoms cueAtom parserIdAtom extends abstractParserRuleParser cueFromId tags acquirePhase parsersAtomsParser catchAllAtomType atomTypeIdAtom description Set required atomTypes. extends abstractParserRuleParser cue atoms tags analyzePhase parsersCompilerParser // todo Remove this and its subparticles? description Deprecated. For simple compilers. inScope stringTemplateParser catchAllAtomDelimiterParser openSubparticlesParser closeSubparticlesParser indentCharacterParser joinSubparticlesWithParser extends abstractParserRuleParser cue compiler tags deprecate boolean suggestInAutocomplete false parserDescriptionParser description Parser description. catchAllAtomType stringAtom extends abstractParserRuleParser cue description tags assemblePhase parsersExampleParser // todo Should this just be a "string" constant on particles? description Set example for docs and tests. catchAllAtomType exampleAnyAtom catchAllParser catchAllExampleLineParser extends abstractParserRuleParser cue example tags assemblePhase extendsParserParser cue extends tags assemblePhase description Extend another parser. // todo: add a catchall that is used for mixins atoms cueAtom parserIdAtom extends abstractParserRuleParser parsersPopularityParser // todo Remove this parser. Switch to conditional frequencies. description Parser popularity. atoms cueAtom floatAtom extends abstractParserRuleParser cue popularity tags assemblePhase inScopeParser description Parsers in scope. catchAllAtomType parserIdAtom extends abstractParserRuleParser cueFromId tags acquirePhase parsersJavascriptParser // todo Urgently need to get submode syntax highlighting running! (And eventually LSP) description Javascript code for Parser Actions. catchAllParser catchAllJavascriptCodeLineParser extends abstractParserRuleParser tags actPhase javascript format() { if (this.isNodeJs()) { const template = \`class FOO{ \${this.subparticlesToString()}}\` this.setSubparticles( require("prettier") .format(template, { semi: false, useTabs: true, parser: "babel", printWidth: 240 }) .replace(/class FOO \\{\\s+/, "") .replace(/\\s+\\}\\s+$/, "") .replace(/\\n\\t/g, "\\n") // drop one level of indent .replace(/\\t/g, " ") // we used tabs instead of spaces to be able to dedent without breaking literals. ) } return this } cue javascript abstractParseRuleParser // Each particle should have a pattern that it matches on unless it's a catch all particle. extends abstractParserRuleParser cueFromId parsersCueParser atoms cueAtom stringAtom description Attach by matching first atom. extends abstractParseRuleParser tags acquirePhase cue cue cueFromIdParser atoms cueAtom description Derive cue from parserId. // for example 'fooParser' would have cue of 'foo'. extends abstractParseRuleParser tags acquirePhase parsersPatternParser catchAllAtomType regexAtom description Attach via regex. extends abstractParseRuleParser tags acquirePhase cue pattern parsersRequiredParser description Assert is present at least once. extends abstractParserRuleParser cue required tags analyzePhase abstractValidationRuleParser extends abstractParserRuleParser cueFromId catchAllAtomType booleanAtom parsersSingleParser description Assert used once. // Can be overridden by a child class by setting to false. extends abstractValidationRuleParser tags analyzePhase cue single uniqueLineParser description Assert unique lines. For pattern parsers. // Can be overridden by a child class by setting to false. extends abstractValidationRuleParser tags analyzePhase uniqueCueParser description Assert unique first atoms. For pattern parsers. // For catch all parsers or pattern particles, use this to indicate the extends abstractValidationRuleParser tags analyzePhase listDelimiterParser description Split content by this delimiter. extends abstractParserRuleParser cueFromId catchAllAtomType stringAtom tags analyzePhase contentKeyParser description Deprecated. For to/from JSON. // Advanced keyword to help with isomorphic JSON serialization/deserialization. If present will serialize the particle to an object and set a property with this key and the value set to the particle's content. extends abstractParserRuleParser cueFromId catchAllAtomType stringAtom tags deprecate boolean suggestInAutocomplete false subparticlesKeyParser // todo: deprecate? description Deprecated. For to/from JSON. // Advanced keyword to help with serialization/deserialization of blobs. If present will serialize the particle to an object and set a property with this key and the value set to the particle's subparticles. extends abstractParserRuleParser cueFromId catchAllAtomType stringAtom tags deprecate boolean suggestInAutocomplete false parsersTagsParser catchAllAtomType stringAtom extends abstractParserRuleParser description Custom metadata. cue tags tags assemblePhase atomTypeDescriptionParser description Atom Type description. catchAllAtomType stringAtom cue description tags assemblePhase catchAllErrorParser baseParser errorParser catchAllExampleLineParser catchAllAtomType exampleAnyAtom catchAllParser catchAllExampleLineParser atoms exampleAnyAtom catchAllJavascriptCodeLineParser catchAllAtomType javascriptCodeAtom catchAllParser catchAllJavascriptCodeLineParser catchAllMultilineStringConstantParser description String constants can span multiple lines. catchAllAtomType stringAtom catchAllParser catchAllMultilineStringConstantParser atoms stringAtom atomTypeDefinitionParser // todo Generate a class for each atom type? // todo Allow abstract atom types? // todo Change pattern to postfix. pattern ^[a-zA-Z0-9_]+Atom$ inScope parsersPaintParser parsersRegexParser reservedAtomsParser enumFromAtomTypesParser atomTypeDescriptionParser parsersEnumParser slashCommentParser extendsAtomTypeParser parsersExamplesParser atomMinParser atomMaxParser atoms atomTypeIdAtom tags assemblePhase javascript buildHtml() {return ""} enumFromAtomTypesParser description Runtime enum options. catchAllAtomType atomTypeIdAtom atoms atomPropertyNameAtom cueFromId tags analyzePhase parsersEnumParser description Set enum options. cue enum catchAllAtomType enumOptionAtom atoms atomPropertyNameAtom tags analyzePhase parsersExamplesParser description Examples for documentation and tests. // If the domain of possible atom values is large, such as a string type, it can help certain methods—such as program synthesis—to provide a few examples. cue examples catchAllAtomType atomExampleAtom atoms atomPropertyNameAtom tags assemblePhase atomMinParser description Specify a min if numeric. cue min atoms atomPropertyNameAtom numberAtom tags analyzePhase atomMaxParser description Specify a max if numeric. cue max atoms atomPropertyNameAtom numberAtom tags analyzePhase parsersPaintParser atoms cueAtom paintTypeAtom description Instructor editor how to color these. single cue paint tags analyzePhase parserDefinitionParser // todo Add multiple dispatch? pattern ^[a-zA-Z0-9_]+Parser$ description Parser types are a core unit of your language. They translate to 1 class per parser. Examples of parser would be "header", "person", "if", "+", "define", etc. catchAllParser catchAllErrorParser inScope abstractParserRuleParser abstractConstantParser slashCommentParser parserDefinitionParser atoms parserIdAtom tags assemblePhase javascript buildHtml() { return ""} parsersRegexParser catchAllAtomType regexAtom description Atoms must match this. single atoms atomPropertyNameAtom cue regex tags analyzePhase reservedAtomsParser single description Atoms can't be any of these. catchAllAtomType reservedAtomAtom atoms atomPropertyNameAtom cueFromId tags analyzePhase extendsAtomTypeParser cue extends description Extend another atomType. // todo Add mixin support in addition to extends? atoms cueAtom atomTypeIdAtom tags assemblePhase single abstractColumnNameParser atoms cueAtom columnNameAtom javascript getRunTimeEnumOptions(atom) { if (atom.atomTypeId === "columnNameAtom") return this.parent.columnNames return super.getRunTimeEnumOptions(atom) } scrollRadiusParser cue radius extends abstractColumnNameParser scrollSymbolParser cue symbol extends abstractColumnNameParser scrollFillParser cue fill extends abstractColumnNameParser scrollStrokeParser cue stroke extends abstractColumnNameParser scrollLabelParser cue label extends abstractColumnNameParser scrollSortParser cue sort extends abstractColumnNameParser scrollXParser cue x extends abstractColumnNameParser scrollYParser cue y extends abstractColumnNameParser abstractPlotLabelParser cueFromId atoms cueAtom catchAllAtomType stringAtom quoteLineParser popularity 0.004172 catchAllAtomType stringAtom catchAllParser quoteLineParser scrollParser description Scroll is a language for scientists of all ages. Refine, share and collaborate on ideas. root inScope abstractScrollParser blankLineParser atomTypeDefinitionParser parserDefinitionParser catchAllParser catchAllParagraphParser javascript setFile(file) { this.file = file const date = this.get("date") if (date) this.file.timestamp = this.dayjs(this.get("date")).unix() return this } buildHtml(buildSettings) { this.sectionStack = [] return this.filter(subparticle => subparticle.buildHtml).map(subparticle => { try {return subparticle.buildHtml(buildSettings)} catch (err) {console.error(err); return ""} }).filter(i => i).join("\\n") + this.clearSectionStack() } sectionStack = [] clearSectionStack() { const result = this.sectionStack.join("\\n") this.sectionStack = [] return result } bodyStack = [] clearBodyStack() { const result = this.bodyStack.join("") this.bodyStack = [] return result } get hakonParser() { if (this.isNodeJs()) return require("scrollsdk/products/hakon.nodejs.js") return hakonParser } readSyncFromFileOrUrl(fileOrUrl) { if (!this.isNodeJs()) return localStorage.getItem(fileOrUrl) || "" const isUrl = fileOrUrl.match(/^https?\\:[^ ]+$/) if (!isUrl) return this.root.readFile(fileOrUrl) return this.readFile(this.makeFullPath(new URL(fileOrUrl).pathname.split('/').pop())) } async fetch(url, filename) { const isUrl = url.match(/^https?\\:[^ ]+$/) if (!isUrl) return return this.isNodeJs() ? this.fetchNode(url, filename) : this.fetchBrowser(url) } get path() { return require("path") } makeFullPath(filename) { return this.path.join(this.folderPath, filename) } _nextAndPrevious(arr, index) { const nextIndex = index + 1 const previousIndex = index - 1 return { previous: arr[previousIndex] ?? arr[arr.length - 1], next: arr[nextIndex] ?? arr[0] } } // keyboard nav is always in the same folder. does not currently support cross folder includeFileInKeyboardNav(file) { const { scrollProgram } = file return scrollProgram.buildsHtml && scrollProgram.hasKeyboardNav && scrollProgram.tags.includes(this.primaryTag) } get timeIndex() { return this.file.timeIndex || 0 } get linkToPrevious() { if (!this.hasKeyboardNav) // Dont provide link to next unless keyboard nav is on return undefined const {allScrollFiles} = this let file = this._nextAndPrevious(allScrollFiles, this.timeIndex).previous while (!this.includeFileInKeyboardNav(file)) { file = this._nextAndPrevious(allScrollFiles, file.timeIndex).previous } return file.scrollProgram.permalink } importRegex = /^(import |[a-zA-Z\\_\\-\\.0-9\\/]+\\.(scroll|parsers)$|https?:\\/\\/.+\\.(scroll|parsers)$)/gm get linkToNext() { if (!this.hasKeyboardNav) // Dont provide link to next unless keyboard nav is on return undefined const {allScrollFiles} = this let file = this._nextAndPrevious(allScrollFiles, this.timeIndex).next while (!this.includeFileInKeyboardNav(file)) { file = this._nextAndPrevious(allScrollFiles, file.timeIndex).next } return file.scrollProgram.permalink } // todo: clean up this naming pattern and add a parser instead of special casing 404.html get allHtmlFiles() { return this.allScrollFiles.filter(file => file.scrollProgram.buildsHtml && file.scrollProgram.permalink !== "404.html") } parseNestedTag(tag) { if (!tag.includes("/")) return; const {path} = this const parts = tag.split("/") const group = parts.pop() const relativePath = parts.join("/") return { group, relativePath, folderPath: path.join(this.folderPath, path.normalize(relativePath)) } } getFilesByTags(tags, limit) { // todo: tags is currently matching partial substrings const getFilesWithTag = (tag, files) => files.filter(file => file.scrollProgram.buildsHtml && file.scrollProgram.tags.includes(tag)) if (typeof tags === "string") tags = tags.split(" ") if (!tags || !tags.length) return this.allHtmlFiles .filter(file => file !== this) // avoid infinite loops. todo: think this through better. .map(file => { return { file, relativePath: "" } }) .slice(0, limit) let arr = [] tags.forEach(tag => { if (!tag.includes("/")) return (arr = arr.concat( getFilesWithTag(tag, this.allScrollFiles) .map(file => { return { file, relativePath: "" } }) .slice(0, limit) )) const {folderPath, group, relativePath} = this.parseNestedTag(tag) let files = [] try { files = this.fileSystem.getCachedLoadedFilesInFolder(folderPath, this) } catch (err) { console.error(err) } const filtered = getFilesWithTag(group, files).map(file => { return { file, relativePath: relativePath + "/" } }) arr = arr.concat(filtered.slice(0, limit)) }) return this.lodash.sortBy(arr, file => file.file.timestamp).reverse() } async fetchNode(url, filename) { filename = filename || new URL(url).pathname.split('/').pop() const fullpath = this.makeFullPath(filename) if (require("fs").existsSync(fullpath)) return this.readFile(fullpath) this.log(\`🛜 fetching \${url} to \${fullpath} \`) await this.downloadToDisk(url, fullpath) return this.readFile(fullpath) } log(message) { if (this.logger) this.logger.log(message) } async fetchBrowser(url) { const content = localStorage.getItem(url) if (content) return content return this.downloadToLocalStorage(url) } async downloadToDisk(url, destination) { const { writeFile } = require('fs').promises const response = await fetch(url) const fileBuffer = await response.arrayBuffer() await writeFile(destination, Buffer.from(fileBuffer)) return this.readFile(destination) } async downloadToLocalStorage(url) { const response = await fetch(url) const blob = await response.blob() localStorage.setItem(url, await blob.text()) return localStorage.getItem(url) } readFile(filename) { const {path} = this const fs = require("fs") const fullPath = path.join(this.folderPath, filename.replace(this.folderPath, "")) try { if (fs.existsSync(fullPath)) return fs.readFileSync(fullPath, "utf8") console.error(\`File '\${filename}' not found\`) return "" } catch (err) { console.error(\`Error in '\${this.filePath}' reading file: '\${fullPath}'\`) console.error(err) return "" } } alreadyRequired = new Set() buildHtmlSnippet(buildSettings) { this.sectionStack = [] return this.map(subparticle => (subparticle.buildHtmlSnippet ? subparticle.buildHtmlSnippet(buildSettings) : subparticle.buildHtml(buildSettings))) .filter(i => i) .join("\\n") .trim() + this.clearSectionStack() } get footnotes() { if (this._footnotes === undefined) this._footnotes = this.filter(particle => particle.isFootnote) return this._footnotes } get authors() { return this.get("authors") } get allScrollFiles() { try { return this.fileSystem.getCachedLoadedFilesInFolder(this.folderPath, this) } catch (err) { console.error(err) return [] } } async doThing(thing) { await Promise.all(this.filter(particle => particle[thing]).map(async particle => particle[thing]())) } async load() { await this.doThing("load") } async execute() { await this.doThing("execute") } file = {} getFromParserId(parserId) { return this.parserIdIndex[parserId]?.[0].content } get fileSystem() { return this.file.fileSystem } get filePath() { return this.file.filePath } get folderPath() { return this.file.folderPath } get filename() { return this.file.filename || "" } get hasKeyboardNav() { return this.has("keyboardNav") } get editHtml() { return \`Edit\` } get externalsPath() { return this.file.EXTERNALS_PATH } get endSnippetIndex() { // Get the line number that the snippet should stop at. // First if its hard coded, use that if (this.has("endSnippet")) return this.getParticle("endSnippet").index // Next look for a dinkus const snippetBreak = this.find(particle => particle.isDinkus) if (snippetBreak) return snippetBreak.index return -1 } get parserIds() { return this.topDownArray.map(particle => particle.definition.id) } get tags() { return this.get("tags") || "" } get primaryTag() { return this.tags.split(" ")[0] } get filenameNoExtension() { return this.filename.replace(".scroll", "") } // todo: rename publishedUrl? Or something to indicate that this is only for stuff on the web (not localhost) // BaseUrl must be provided for RSS Feeds and OpenGraph tags to work get baseUrl() { const baseUrl = (this.get("baseUrl") || "").replace(/\\/$/, "") return baseUrl + "/" } get canonicalUrl() { return this.get("canonicalUrl") || this.baseUrl + this.permalink } get openGraphImage() { const openGraphImage = this.get("openGraphImage") if (openGraphImage !== undefined) return this.ensureAbsoluteLink(openGraphImage) const images = this.filter(particle => particle.doesExtend("scrollImageParser")) const hit = images.find(particle => particle.has("openGraph")) || images[0] if (!hit) return "" return this.ensureAbsoluteLink(hit.filename) } get absoluteLink() { return this.ensureAbsoluteLink(this.permalink) } ensureAbsoluteLink(link) { if (link.includes("://")) return link return this.baseUrl + link.replace(/^\\//, "") } get editUrl() { const editUrl = this.get("editUrl") if (editUrl) return editUrl const editBaseUrl = this.get("editBaseUrl") return (editBaseUrl ? editBaseUrl.replace(/\\/$/, "") + "/" : "") + this.filename } get gitRepo() { // given https://github.com/breck7/breckyunits.com/blob/main/four-tips-to-improve-communication.scroll // return https://github.com/breck7/breckyunits.com return this.editUrl.split("/").slice(0, 5).join("/") } get scrollVersion() { // currently manually updated return "164.12.0" } // Use the first paragraph for the description // todo: add a particle method version of get that gets you the first particle. (actulaly make get return array?) // would speed up a lot. get description() { const description = this.getFromParserId("openGraphDescriptionParser") if (description) return description return this.generatedDescription } get generatedDescription() { const firstParagraph = this.find(particle => particle.isArticleContent) return firstParagraph ? firstParagraph.originalText.substr(0, 100).replace(/[&"<>']/g, "") : "" } get titleFromFilename() { const unCamelCase = str => str.replace(/([a-z])([A-Z])/g, "$1 $2").replace(/^./, match => match.toUpperCase()) return unCamelCase(this.filenameNoExtension) } get title() { return this.getFromParserId("scrollTitleParser") || this.titleFromFilename } get linkTitle() { return this.getFromParserId("scrollLinkTitleParser") || this.title } get permalink() { return this.get("permalink") || (this.filename ? this.filenameNoExtension + ".html" : "") } compileTo(extensionCapitalized) { if (extensionCapitalized === "Txt") return this.asTxt if (extensionCapitalized === "Html") return this.asHtml const methodName = "build" + extensionCapitalized return this.topDownArray .filter(particle => particle[methodName]) .map((particle, index) => particle[methodName](index)) .join("\\n") .trim() } get asTxt() { return ( this.map(particle => { const text = particle.buildTxt ? particle.buildTxt() : "" if (text) return text + "\\n" if (!particle.getLine().length) return "\\n" return "" }) .join("") .replace(/<[^>]*>/g, "") .replace(/\\n\\n\\n+/g, "\\n\\n") // Maximum 2 newlines in a row .trim() + "\\n" // Always end in a newline, Posix style ) } get dependencies() { const dependencies = this.file.dependencies?.slice() || [] const files = this.topDownArray.filter(particle => particle.dependencies).map(particle => particle.dependencies).flat() return dependencies.concat(files) } get buildsHtml() { const { permalink } = this return !this.file.importOnly && (permalink.endsWith(".html") || permalink.endsWith(".htm")) } // Without specifying the language hyphenation will not work. get lang() { return this.get("htmlLang") || "en" } _compiledHtml = "" get asHtml() { if (!this._compiledHtml) { const { permalink, buildsHtml } = this const content = (this.buildHtml() + this.clearBodyStack()).trim() // Don't add html tags to CSV feeds. A little hacky as calling a getter named _html_ to get _xml_ is not ideal. But // <1% of use case so might be good enough. const wrapWithHtmlTags = buildsHtml const bodyTag = this.has("metaTags") ? "" : "\\n" this._compiledHtml = wrapWithHtmlTags ? \`\\n\\n\${bodyTag}\${content}\\n\\n\` : content } return this._compiledHtml } get wordCount() { return this.asTxt.match(/\\b\\w+\\b/g)?.length || 0 } get minutes() { return parseFloat((this.wordCount / 200).toFixed(1)) } get date() { const date = this.get("date") || (this.file.timestamp ? this.file.timestamp : 0) return this.dayjs(date).format(\`MM/DD/YYYY\`) } get year() { return parseInt(this.dayjs(this.date).format(\`YYYY\`)) } get dayjs() { if (!this.isNodeJs()) return dayjs const lib = require("dayjs") const relativeTime = require("dayjs/plugin/relativeTime") lib.extend(relativeTime) return lib } get lodash() { return this.isNodeJs() ? require("lodash") : lodash } get d3() { return this.isNodeJs() ? require('d3') : d3 } getConcepts(parsed) { const concepts = [] let currentConcept parsed.forEach(particle => { if (particle.isConceptDelimiter) { if (currentConcept) concepts.push(currentConcept) currentConcept = [] } if (currentConcept && particle.isMeasure) currentConcept.push(particle) }) if (currentConcept) concepts.push(currentConcept) return concepts } _formatConcepts(parsed) { const concepts = this.getConcepts(parsed) if (!concepts.length) return false const {lodash} = this // does a destructive sort in place on the parsed program concepts.forEach(concept => { let currentSection const newCode = lodash .sortBy(concept, ["sortIndex"]) .map(particle => { let newLines = "" const section = particle.sortIndex.toString().split(".")[0] if (section !== currentSection) { currentSection = section newLines = "\\n" } return newLines + particle.toString() }) .join("\\n") concept.forEach((particle, index) => (index ? particle.destroy() : "")) concept[0].replaceParticle(() => newCode) }) } get formatted() { return this.getFormatted(this.file.codeAtStart) } get lastCommitTime() { // todo: speed this up and do a proper release. also could add more metrics like this. if (this._lastCommitTime === undefined) { try { this._lastCommitTime = require("child_process").execSync(\`git log -1 --format="%at" -- "\${this.filePath}"\`).toString().trim() } catch (err) { this._lastCommitTime = 0 } } return this._lastCommitTime } getFormatted(codeAtStart = this.toString()) { let formatted = codeAtStart.replace(/\\r/g, "") // remove all carriage returns if there are any const parsed = new this.constructor(formatted) parsed.topDownArray.forEach(subparticle => { subparticle.format() const original = subparticle.getLine() const trimmed = original.replace(/(\\S.*?)[ \\t]*$/gm, "$1") // Trim trailing whitespace unless parser allows it if (original !== trimmed && !subparticle.allowTrailingWhitespace) subparticle.setLine(trimmed) }) this._formatConcepts(parsed) let importOnlys = [] let topMatter = [] let allElse = [] // Create any bindings parsed.forEach(particle => { if (particle.bindTo === "next") particle.binding = particle.next if (particle.bindTo === "previous") particle.binding = particle.previous }) parsed.forEach(particle => { if (particle.getLine() === "importOnly") importOnlys.push(particle) else if (particle.isTopMatter) topMatter.push(particle) else allElse.push(particle) }) const combined = importOnlys.concat(topMatter, allElse) // Move any bound particles combined .filter(particle => particle.bindTo) .forEach(particle => { // First remove the particle from its current position const originalIndex = combined.indexOf(particle) combined.splice(originalIndex, 1) // Then insert it at the new position // We need to find the binding index again after removal const bindingIndex = combined.indexOf(particle.binding) if (particle.bindTo === "next") combined.splice(bindingIndex, 0, particle) else combined.splice(bindingIndex + 1, 0, particle) }) const trimmed = combined .map(particle => particle.toString()) .join("\\n") .replace(/^\\n*/, "") // Remove leading newlines .replace(/\\n\\n\\n+/g, "\\n\\n") // Maximum 2 newlines in a row .replace(/\\n+$/, "") return trimmed === "" ? trimmed : trimmed + "\\n" // End non blank Scroll files in a newline character POSIX style for better working with tools like git } get parser() { return this.constructor } get parsersRequiringExternals() { const { parser } = this // todo: could be cleaned up a bit if (!parser.parsersRequiringExternals) parser.parsersRequiringExternals = parser.cachedHandParsersProgramRoot.filter(particle => particle.copyFromExternal).map(particle => particle.atoms[0]) return parser.parsersRequiringExternals } get Disk() { return this.isNodeJs() ? require("scrollsdk/products/Disk.node.js").Disk : {}} async buildAll(options = {}) { await this.load() await this.buildOne(options) await this.buildTwo(options) } async buildOne(options) { await this.execute() const toBuild = this.filter(particle => particle.buildOne) for (let particle of toBuild) { await particle.buildOne(options) } } async buildTwo(options) { const toBuild = this.filter(particle => particle.buildTwo) for (let particle of toBuild) { await particle.buildTwo(options) } } get outputFileNames() { return this.filter(p => p.outputFileNames).map(p => p.outputFileNames).flat() } _compileArray(filename, arr) { const removeBlanks = data => data.map(obj => Object.fromEntries(Object.entries(obj).filter(([_, value]) => value !== ""))) const parts = filename.split(".") const format = parts.pop() if (format === "json") return JSON.stringify(removeBlanks(arr), null, 2) if (format === "js") return \`const \${parts[0]} = \` + JSON.stringify(removeBlanks(arr), null, 2) if (format === "csv") return this.arrayToCSV(arr) if (format === "tsv") return this.arrayToCSV(arr, "\\t") if (format === "particles") return particles.toString() return particles.toString() } levenshteinDistance(a, b) { const m = a.length const n = b.length const dp = Array.from({ length: m + 1 }, () => Array(n + 1).fill(0)) for (let i = 0; i <= m; i++) { dp[i][0] = i } for (let j = 0; j <= n; j++) { dp[0][j] = j } for (let i = 1; i <= m; i++) { for (let j = 1; j <= n; j++) { const cost = a[i - 1] === b[j - 1] ? 0 : 1 dp[i][j] = Math.min(dp[i - 1][j] + 1, dp[i][j - 1] + 1, dp[i - 1][j - 1] + cost) } } return dp[m][n] } makeLodashOrderByParams(str) { const part1 = str.split(" ") const part2 = part1.map(col => (col.startsWith("-") ? "desc" : "asc")) return [part1.map(col => col.replace(/^\\-/, "")), part2] } arrayToCSV(data, delimiter = ",") { if (!data.length) return "" // Extract headers const headers = Object.keys(data[0]) const csv = data.map(row => headers .map(fieldName => { const fieldValue = row[fieldName] // Escape commas if the value is a string if (typeof fieldValue === "string" && fieldValue.includes(delimiter)) { return \`"\${fieldValue.replace(/"/g, '""')}"\` // Escape double quotes and wrap in double quotes } return fieldValue }) .join(delimiter) ) csv.unshift(headers.join(delimiter)) // Add header row at the top return csv.join("\\n") } compileConcepts(filename = "csv", sortBy = "") { const {lodash} = this if (!sortBy) return this._compileArray(filename, this.concepts) const orderBy = this.makeLodashOrderByParams(sortBy) return this._compileArray(filename, lodash.orderBy(this.concepts, orderBy[0], orderBy[1])) } _withStats get measuresWithStats() { if (!this._withStats) this._withStats = this.addMeasureStats(this.concepts, this.measures) return this._withStats } addMeasureStats(concepts, measures){ return measures.map(measure => { let Type = false concepts.forEach(concept => { const value = concept[measure.Name] if (value === undefined || value === "") return measure.Values++ if (!Type) { measure.Example = value.toString().replace(/\\n/g, " ") measure.Type = typeof value Type = true } }) measure.Coverage = Math.floor((100 * measure.Values) / concepts.length) + "%" return measure }) } parseMeasures(parser) { if (!Particle.measureCache) Particle.measureCache = new Map() const measureCache = Particle.measureCache if (measureCache.get(parser)) return measureCache.get(parser) const {lodash} = this // todo: clean this up const getCueAtoms = rootParserProgram => rootParserProgram .filter(particle => particle.getLine().endsWith("Parser") && !particle.getLine().startsWith("abstract")) .map(particle => particle.get("cue") || particle.getLine()) .map(line => line.replace(/Parser$/, "")) // Generate a fake program with one of every of the available parsers. Then parse it. Then we can easily access the meta data on the parsers const dummyProgram = new parser( Array.from( new Set( getCueAtoms(parser.cachedHandParsersProgramRoot) // is there a better method name than this? ) ).join("\\n") ) // Delete any particles that are not measures dummyProgram.filter(particle => !particle.isMeasure).forEach(particle => particle.destroy()) dummyProgram.forEach(particle => { // add nested measures Object.keys(particle.definition.cueMapWithDefinitions).forEach(key => particle.appendLine(key)) }) // Delete any nested particles that are not measures dummyProgram.topDownArray.filter(particle => !particle.isMeasure).forEach(particle => particle.destroy()) const measures = dummyProgram.topDownArray.map(particle => { return { Name: particle.measureName, Values: 0, Coverage: 0, Question: particle.definition.description, Example: particle.definition.getParticle("example")?.subparticlesToString() || "", Type: particle.typeForWebForms, Source: particle.sourceDomain, //Definition: parsedProgram.root.filename + ":" + particle.lineNumber SortIndex: particle.sortIndex, IsComputed: particle.isComputed, IsRequired: particle.isMeasureRequired, IsConceptDelimiter: particle.isConceptDelimiter, Cue: particle.definition.get("cue") } }) measureCache.set(parser, lodash.sortBy(measures, "SortIndex")) return measureCache.get(parser) } _concepts get concepts() { if (this._concepts) return this._concepts this._concepts = this.parseConcepts(this, this.measures) return this._concepts } _measures get measures() { if (this._measures) return this._measures this._measures = this.parseMeasures(this.parser) return this._measures } parseConcepts(parsedProgram, measures){ // Todo: might be a perf/memory/simplicity win to have a "segment" method in ScrollSDK, where you could // virtually split a Particle into multiple segments, and then query on those segments. // So we would "segment" on "id ", and then not need to create a bunch of new objects, and the original // already parsed lines could then learn about/access to their respective segments. const conceptDelimiter = measures.filter(measure => measure.IsConceptDelimiter)[0] if (!conceptDelimiter) return [] const concepts = parsedProgram.split(conceptDelimiter.Cue || conceptDelimiter.Name) concepts.shift() // Remove the part before "id" return concepts.map(concept => { const row = {} measures.forEach(measure => { const measureName = measure.Name const measureKey = measure.Cue || measureName.replace(/_/g, " ") if (!measure.IsComputed) row[measureName] = concept.getParticle(measureKey)?.measureValue ?? "" else row[measureName] = this.computeMeasure(parsedProgram, measureName, concept, concepts) }) return row }) } computeMeasure(parsedProgram, measureName, concept, concepts){ // note that this is currently global, assuming there wont be. name conflicts in computed measures in a single scroll if (!Particle.measureFnCache) Particle.measureFnCache = {} const measureFnCache = Particle.measureFnCache if (!measureFnCache[measureName]) { // a bit hacky but works?? const particle = parsedProgram.appendLine(measureName) measureFnCache[measureName] = particle.computeValue particle.destroy() } return measureFnCache[measureName](concept, measureName, parsedProgram, concepts) } compileMeasures(filename = "csv", sortBy = "") { const withStats = this.measuresWithStats if (!sortBy) return this._compileArray(filename, withStats) const orderBy = this.makeLodashOrderByParams(sortBy) return this._compileArray(filename, this.lodash.orderBy(withStats, orderBy[0], orderBy[1])) } evalNodeJsMacros(value, macroMap, filePath) { const tempPath = filePath + ".js" const {Disk} = this if (Disk.exists(tempPath)) throw new Error(\`Failed to write/require replaceNodejs snippet since '\${tempPath}' already exists.\`) try { Disk.write(tempPath, value) const results = require(tempPath) Object.keys(results).forEach(key => (macroMap[key] = results[key])) } catch (err) { console.error(\`Error in evalMacros in file '\${filePath}'\`) console.error(err) } finally { Disk.rm(tempPath) } } evalMacros(fusedFile) { const {fusedCode, codeAtStart, filePath} = fusedFile let code = fusedCode const absolutePath = filePath // note: the 2 params above are not used in this method, but may be used in user eval code. (todo: cleanup) const regex = /^(replace|footer$)/gm if (!regex.test(code)) return code const particle = new Particle(code) // todo: this can be faster. a more lightweight particle class? // Process macros const macroMap = {} particle .filter(particle => { const parserAtom = particle.cue return parserAtom === "replace" || parserAtom === "replaceJs" || parserAtom === "replaceNodejs" }) .forEach(particle => { let value = particle.length ? particle.subparticlesToString() : particle.getAtomsFrom(2).join(" ") const kind = particle.cue try { if (kind === "replaceJs") value = eval(value) if (this.isNodeJs() && kind === "replaceNodejs") this.evalNodeJsMacros(value, macroMap, absolutePath) else macroMap[particle.getAtom(1)] = value } catch (err) { console.error(err) } particle.destroy() // Destroy definitions after eval }) if (particle.has("footer")) { const pushes = particle.getParticles("footer") const append = pushes.map(push => push.section.join("\\n")).join("\\n") pushes.forEach(push => { push.section.forEach(particle => particle.destroy()) push.destroy() }) code = particle.asString + append } const keys = Object.keys(macroMap) if (!keys.length) return code let codeAfterMacroSubstitution = particle.asString // Todo: speed up. build a template? Object.keys(macroMap).forEach(key => (codeAfterMacroSubstitution = codeAfterMacroSubstitution.replace(new RegExp(key, "g"), macroMap[key]))) return codeAfterMacroSubstitution } toRss() { const { title, canonicalUrl } = this return \` \${title} \${canonicalUrl} \${this.dayjs(this.timestamp * 1000).format("ddd, DD MMM YYYY HH:mm:ss ZZ")} \` } example # Hello world ## This is Scroll * It compiles to HTML. code // You can add code as well. print("Hello world") stampFileParser catchAllAtomType stringAtom description Create a file. javascript execute(parentDir) { const fs = require("fs") const path = require("path") const fullPath = path.join(parentDir, this.getLine()) this.root.log(\`Creating file \${fullPath}\`) fs.mkdirSync(path.dirname(fullPath), {recursive: true}) const content = this.subparticlesToString() fs.writeFileSync(fullPath, content, "utf8") const isExecutable = content.startsWith("#!") if (isExecutable) fs.chmodSync(fullPath, "755") } stampFolderParser catchAllAtomType stringAtom description Create a folder. inScope stampFolderParser catchAllParser stampFileParser pattern \\/$ javascript execute(parentDir) { const fs = require("fs") const path = require("path") const newPath = path.join(parentDir, this.getLine()) this.root.log(\`Creating folder \${newPath}\`) fs.mkdirSync(newPath, {recursive: true}) this.forEach(particle => particle.execute(newPath)) } stumpContentParser popularity 0.102322 catchAllAtomType codeAtom scrollTableDataParser popularity 0.001061 cue data atoms cueAtom description Table from inline delimited data. baseParser blobParser scrollTableDelimiterParser popularity 0.001037 description Set the delimiter. cue delimiter atoms cueAtom stringAtom javascript buildHtml() { return "" } plainTextLineParser popularity 0.000121 catchAllAtomType stringAtom catchAllParser plainTextLineParser BlobParser baseParser blobParser`) get handParsersProgram() { return this.constructor.cachedHandParsersProgramRoot } static rootParser = scrollParser } class stampFileParser extends ParserBackedParticle { get stringAtom() { return this.getAtomsFrom(0) } execute(parentDir) { const fs = require("fs") const path = require("path") const fullPath = path.join(parentDir, this.getLine()) this.root.log(`Creating file ${fullPath}`) fs.mkdirSync(path.dirname(fullPath), {recursive: true}) const content = this.subparticlesToString() fs.writeFileSync(fullPath, content, "utf8") const isExecutable = content.startsWith("#!") if (isExecutable) fs.chmodSync(fullPath, "755") } } class stampFolderParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(stampFileParser, undefined, [{regex: /\/$/, parser: stampFolderParser}]) } get stringAtom() { return this.getAtomsFrom(0) } execute(parentDir) { const fs = require("fs") const path = require("path") const newPath = path.join(parentDir, this.getLine()) this.root.log(`Creating folder ${newPath}`) fs.mkdirSync(newPath, {recursive: true}) this.forEach(particle => particle.execute(newPath)) } } class stumpContentParser extends ParserBackedParticle { get codeAtom() { return this.getAtomsFrom(0) } } class scrollTableDataParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } get cueAtom() { return this.getAtom(0) } } class scrollTableDelimiterParser extends ParserBackedParticle { get cueAtom() { return this.getAtom(0) } get stringAtom() { return this.getAtom(1) } buildHtml() { return "" } } class plainTextLineParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(plainTextLineParser, undefined, undefined) } get stringAtom() { return this.getAtomsFrom(0) } } class BlobParser extends ParserBackedParticle { createParserCombinator() { return new Particle.ParserCombinator(this._getBlobParserCatchAllParser())} getErrors() { return [] } } window.scrollParser = scrollParser }