Skip to content

Commit

Permalink
fix(benchmark): adding benchmarks
Browse files Browse the repository at this point in the history
  • Loading branch information
johnlindquist committed Dec 9, 2024
1 parent 06364a0 commit 67149aa
Show file tree
Hide file tree
Showing 5 changed files with 159 additions and 159 deletions.
59 changes: 59 additions & 0 deletions src/core/format.benchmark.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import ava from "ava"
import { loadPreviousResults, saveResults } from "./test-utils"
import tmp from 'tmp-promise'
import { formatChoices } from "./format"

ava.serial('benchmark - formatChoices', async (t) => {
const previousResults = await loadPreviousResults()

// Some sample data to benchmark
const largeInput = Array.from({ length: 1000 }, (_, i) => ({
name: `Item ${i}`,
value: `value${i}`,
description: `This is item number ${i}`
}))

// Run the benchmark multiple times to get stable measurements
const runs = 10
const times = []

// Warm-up run (not measured)
formatChoices(largeInput)

for (let i = 0; i < runs; i++) {
const start = performance.now()
formatChoices(largeInput)
const end = performance.now()
times.push(end - start)
}

const mean = times.reduce((a, b) => a + b, 0) / runs
const opsPerSecond = (1000 / mean) // If each run counts as 1 operation

// Compare to previous results if available
const testName = 'formatChoices'
const oldResult = previousResults[testName]
if (oldResult) {
const oldOps = oldResult.operationsPerSecond
const improvement = ((opsPerSecond - oldOps) / oldOps) * 100
t.log(`Previous OPS: ${oldOps.toFixed(2)}`)
t.log(`Current OPS: ${opsPerSecond.toFixed(2)}`)
const emoji = improvement > 0 ? "🚀" : "🐌"
t.log(`${emoji} Change: ${improvement.toFixed(2)}%`)
} else {
t.log('No previous benchmark to compare against.')
}

// Write new results
const newResults = {
...previousResults,
[testName]: {
timestamp: new Date().toISOString(),
operationsPerSecond: opsPerSecond,
meanDurationMs: mean
}
}
await saveResults(newResults)

t.pass()
})
85 changes: 9 additions & 76 deletions src/core/format.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,14 @@ import ava from "ava"
import { formatChoices, defaultGroupClassName, defaultGroupNameClassName } from "./format"
import { PROMPT } from "./enum.js"
import type { Choice } from "../types"
import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'node:fs'
import { performance } from 'node:perf_hooks'
import tmp from 'tmp-promise'
import { parseScript } from "./parser.js"
import { writeFile } from "node:fs/promises"
import path from "node:path"
import { loadPreviousResults, saveResults } from "./test-utils"


ava("formatChoices - basic primitive choices", (t) => {
const choices = ["option1", "option2", "option3"] as unknown as Choice[]
Expand Down Expand Up @@ -326,81 +334,6 @@ ava("formatChoices - index property determines position within groups", (t) => {
)
})


import test from 'ava'
import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'node:fs'
import { performance } from 'node:perf_hooks'
import { dirname } from 'node:path'

const DEFAULT_BENCHMARK_FILENAME = `${process.env.HOME}/.benchmarks/${
new URL(import.meta.url).pathname.split('/').pop()?.replace(/\.test\.ts$/, '-benchmark.json')
}`

function loadPreviousResults(filename = DEFAULT_BENCHMARK_FILENAME) {
if (!existsSync(filename)) return {}
return JSON.parse(readFileSync(filename, 'utf8'))
}

function saveResults(results, filename = DEFAULT_BENCHMARK_FILENAME) {
mkdirSync(dirname(filename), { recursive: true })
writeFileSync(filename, JSON.stringify(results, null, 2), 'utf8')
}

test.serial('benchmark - formatChoices', (t) => {
const previousResults = loadPreviousResults()

// Some sample data to benchmark
const largeInput = Array.from({ length: 1000 }, (_, i) => ({
name: `Item ${i}`,
value: `value${i}`,
description: `This is item number ${i}`
}))

// Run the benchmark multiple times to get stable measurements
const runs = 10
const times = []

// Warm-up run (not measured)
formatChoices(largeInput)

for (let i = 0; i < runs; i++) {
const start = performance.now()
formatChoices(largeInput)
const end = performance.now()
times.push(end - start)
}

const mean = times.reduce((a, b) => a + b, 0) / runs
const opsPerSecond = (1000 / mean) // If each run counts as 1 operation

// Compare to previous results if available
const testName = 'formatChoices'
const oldResult = previousResults[testName]
if (oldResult) {
const oldOps = oldResult.operationsPerSecond
const improvement = ((opsPerSecond - oldOps) / oldOps) * 100
t.log(`Previous OPS: ${oldOps.toFixed(2)}`)
t.log(`Current OPS: ${opsPerSecond.toFixed(2)}`)
const emoji = improvement > 0 ? "🚀" : "🐌"
t.log(`${emoji} Change: ${improvement.toFixed(2)}%`)
} else {
t.log('No previous benchmark to compare against.')
}

// Write new results
const newResults = {
...previousResults,
[testName]: {
timestamp: new Date().toISOString(),
operationsPerSecond: opsPerSecond,
meanDurationMs: mean
}
}
saveResults(newResults)

t.pass()
})

// Add these after the existing tests in src/core/format.test.ts

// --------------------------------------------
Expand Down Expand Up @@ -547,4 +480,4 @@ ava("formatChoices - group headers with an out-of-bound height are clamped", (t)
t.is(result[0].height, PROMPT.ITEM.HEIGHT.XXL, "Group header height is clamped to XXL")
t.is(result[1].name, "Inside Group")
t.is(result[1].height, PROMPT.ITEM.HEIGHT.XXXS, "Sub-choice height is clamped to XXXS")
})
})
154 changes: 73 additions & 81 deletions src/core/parser.benchmark.test.ts
Original file line number Diff line number Diff line change
@@ -1,94 +1,86 @@
// parser.benchmark.test.ts

import ava from "ava"
import { promises as fs } from "fs"
import { performance } from "node:perf_hooks"
import { dirname } from "node:path"
import { mkdirSync, writeFileSync, existsSync, readFileSync } from "node:fs"
import tmp from "tmp-promise"
import { parseScript } from "./parser.js" // Adjust this import path as necessary

const DEFAULT_BENCHMARK_FILENAME = `${process.env.HOME}/.benchmarks/${
new URL(import.meta.url).pathname.split('/').pop()?.replace(/\.test\.ts$/, '-benchmark.json')
}`
import { parseScript } from "./parser"
import { loadPreviousResults, saveResults } from "./test-utils"
import tmp from 'tmp-promise'

function loadPreviousResults(filename = DEFAULT_BENCHMARK_FILENAME) {
if (!existsSync(filename)) return {}
return JSON.parse(readFileSync(filename, 'utf8'))
}

function saveResults(results, filename = DEFAULT_BENCHMARK_FILENAME) {
mkdirSync(dirname(filename), { recursive: true })
writeFileSync(filename, JSON.stringify(results, null, 2), 'utf8')
}
ava.only('benchmark - parseScript', async (t) => {
await tmp.withDir(async (dir) => {
const previousResults = await loadPreviousResults()

ava.serial("benchmark - parseScript", async (t) => {
const previousResults = loadPreviousResults()
const scriptContents = `
// Name: Concat Kenv Examples into a Single File
// Description: Join all of Kenv Examples Scripts
// We'll create a temporary directory and write a sample script file to parse.
await tmp.withDir(async (dir) => {
const filePath = `${dir.path}/test-script.js`
import "@johnlindquist/kit"
import { globby } from "globby"
// Sample script content can be large or complex to simulate a realistic scenario.
// We'll include metadata-like comments and some code to simulate real parsing.
const scriptContent = `
#!/usr/bin/env node
// Name: Sample Test Script
// Description: A script used for benchmarking parseScript
// Shortcut: test-script
// Some code to parse:
function main() {
console.log("Hello World");
// preview: true
let all = await globby(home("dev", "kit-examples-ts", "scripts", "**", "*.ts"))
// split into 5 arrays of equal length
let arrayLength = Math.ceil(all.length / 5)
let arrays = []
for (let i = 0; i < all.length; i += arrayLength) {
arrays.push(all.slice(i, i + arrayLength))
}
main();
`.repeat(50) // Repeat multiple times to increase file size and complexity
await fs.writeFile(filePath, scriptContent, "utf8")
let allKitPath = kenvPath("all-kenv-examples.txt")
await writeFile(allKitPath, "")
// Run the benchmark multiple times to get stable measurements
const runs = 10
const times = []

// Warm-up run (not measured)
await parseScript(filePath)

for (let i = 0; i < runs; i++) {
const start = performance.now()
await parseScript(filePath)
const end = performance.now()
times.push(end - start)
}

const mean = times.reduce((a, b) => a + b, 0) / runs
const opsPerSecond = 1000 / mean
for await (let array of arrays) {
for await (let filePath of array) {
let contents = await readFile(filePath, "utf8")
await appendFile(allKitPath, contents)
}
}
// Compare to previous results if available
const testName = "parseScript"
const oldResult = previousResults[testName]
if (oldResult) {
const oldOps = oldResult.operationsPerSecond
const improvement = ((opsPerSecond - oldOps) / oldOps) * 100
t.log(`Previous OPS: ${oldOps.toFixed(2)}`)
t.log(`Current OPS: ${opsPerSecond.toFixed(2)}`)
t.log(`Change: ${improvement > 0 ? '⚡' : '🐌'} ${improvement.toFixed(2)}%`)
} else {
t.log("No previous benchmark to compare against.")
}
await revealFile(allKitPath)
`

// Write new results
const newResults = {
...previousResults,
[testName]: {
timestamp: new Date().toISOString(),
operationsPerSecond: opsPerSecond,
meanDurationMs: mean
}
}
saveResults(newResults)
const scriptPath = path.join(dir.path, "script.ts")
await writeFile(scriptPath, scriptContents)


// Run the benchmark multiple times to get stable measurements
const runs = 1000
const times = []

t.pass()

for (let i = 0; i < runs; i++) {
const start = performance.now()
await parseScript(scriptPath)
const end = performance.now()
times.push(end - start)
}

const mean = times.reduce((a, b) => a + b, 0) / runs
const opsPerSecond = (1000 / mean) // If each run counts as 1 operation

// Compare to previous results if available
const testName = 'formatChoices'
const oldResult = previousResults[testName]
if (oldResult) {
const oldOps = oldResult.operationsPerSecond
const improvement = ((opsPerSecond - oldOps) / oldOps) * 100
t.log(`Previous OPS: ${oldOps.toFixed(2)}`)
t.log(`Current OPS: ${opsPerSecond.toFixed(2)}`)
const emoji = improvement > 0 ? "🚀" : "🐌"
t.log(`${emoji} Change: ${improvement.toFixed(2)}%`)
} else {
t.log('No previous benchmark to compare against.')
}

// Write new results
const newResults = {
...previousResults,
[testName]: {
timestamp: new Date().toISOString(),
operationsPerSecond: opsPerSecond,
meanDurationMs: mean
}
}
await saveResults(newResults)

t.pass()
}, {
unsafeCleanup: true
unsafeCleanup: true
})
})
})
2 changes: 0 additions & 2 deletions src/core/parser.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -95,5 +95,3 @@ ava("postprocessMetadata - empty input", (t) => {

t.deepEqual(result, { type: ProcessType.Prompt })
})

// ... You can add more tests as needed ...
18 changes: 18 additions & 0 deletions src/core/test-utils.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import path from "node:path"
import { ensureDir, pathExists, readFile, writeFile } from "../../src/globals/index"

export async function loadPreviousResults(filename: string = getBenchmarkFilename()) {
if (!await pathExists(filename)) return {}
return JSON.parse(await readFile(filename, 'utf8'))
}

export async function saveResults(results: any, filename: string = getBenchmarkFilename()) {
await ensureDir(path.dirname(filename))
await writeFile(filename, JSON.stringify(results, null, 2), 'utf8')
}

export function getBenchmarkFilename() {
return path.join(process.env.HOME, ".benchmarks", `${
new URL(import.meta.url).pathname.split('/').pop()?.replace(/\.test\.ts$/, '-benchmark.json')
}`)
}

0 comments on commit 67149aa

Please sign in to comment.