Skip to content

Commit

Permalink
fix(benchmark): clear BenchmarkResult.samples array to reduce memor…
Browse files Browse the repository at this point in the history
…y usage (#6541)

Co-authored-by: Ari Perkkiö <[email protected]>
  • Loading branch information
hi-ogawa and AriPerkkio authored Sep 25, 2024
1 parent f131f93 commit a6407af
Show file tree
Hide file tree
Showing 9 changed files with 51 additions and 21 deletions.
1 change: 1 addition & 0 deletions packages/vitest/src/defaults.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ export const benchmarkConfigDefaults: Required<
exclude: defaultExclude,
includeSource: [],
reporters: ['default'],
includeSamples: false,
}

const defaultCoverageExcludes = [
Expand Down
3 changes: 3 additions & 0 deletions packages/vitest/src/node/config/serializeConfig.ts
Original file line number Diff line number Diff line change
Expand Up @@ -160,5 +160,8 @@ export function serializeConfig(
standalone: config.standalone,
printConsoleTrace:
config.printConsoleTrace ?? coreConfig.printConsoleTrace,
benchmark: config.benchmark && {
includeSamples: config.benchmark.includeSamples,
},
}
}
17 changes: 2 additions & 15 deletions packages/vitest/src/node/reporters/benchmark/table/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -167,10 +167,8 @@ interface FormattedBenchmarkGroup {
benchmarks: FormattedBenchmarkResult[]
}

export type FormattedBenchmarkResult = Omit<BenchmarkResult, 'samples'> & {
export type FormattedBenchmarkResult = BenchmarkResult & {
id: string
sampleCount: number
median: number
}

function createFormattedBenchmarkReport(files: File[]) {
Expand All @@ -183,18 +181,7 @@ function createFormattedBenchmarkReport(files: File[]) {
for (const t of task.tasks) {
const benchmark = t.meta.benchmark && t.result?.benchmark
if (benchmark) {
const { samples, ...rest } = benchmark
benchmarks.push({
id: t.id,
sampleCount: samples.length,
median:
samples.length % 2
? samples[Math.floor(samples.length / 2)]
: (samples[samples.length / 2]
+ samples[samples.length / 2 - 1])
/ 2,
...rest,
})
benchmarks.push({ id: t.id, ...benchmark, samples: [] })
}
}
if (benchmarks.length) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ function renderBenchmarkItems(result: BenchmarkResult) {
formatNumber(result.p995 || 0),
formatNumber(result.p999 || 0),
${(result.rme || 0).toFixed(2)}%`,
result.samples.length.toString(),
(result.sampleCount || 0).toString(),
]
}

Expand Down Expand Up @@ -124,10 +124,7 @@ export function renderTree(
}
const baseline = options.compare?.[t.id]
if (baseline) {
benchMap[t.id].baseline = {
...baseline,
samples: Array.from({ length: baseline.sampleCount }),
}
benchMap[t.id].baseline = baseline
}
}
}
Expand Down
7 changes: 7 additions & 0 deletions packages/vitest/src/node/types/benchmark.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,11 @@ export interface BenchmarkUserOptions {
* benchmark output file
*/
outputJson?: string

/**
* Include `samples` array of benchmark results for API or custom reporter usages.
* This is disabled by default to reduce memory usage.
* @default false
*/
includeSamples?: boolean
}
3 changes: 3 additions & 0 deletions packages/vitest/src/runtime/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,9 @@ export interface SerializedConfig {
standalone: boolean
logHeapUsage: boolean | undefined
coverage: SerializedCoverageConfig
benchmark?: {
includeSamples: boolean
}
}

export interface SerializedCoverageConfig {
Expand Down
9 changes: 9 additions & 0 deletions packages/vitest/src/runtime/runners/benchmark.ts
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,15 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
const taskRes = task.result!
const result = benchmark.result!.benchmark!
Object.assign(result, taskRes)
// compute extra stats and free raw samples as early as possible
const samples = result.samples
result.sampleCount = samples.length
result.median = samples.length % 2
? samples[Math.floor(samples.length / 2)]
: (samples[samples.length / 2] + samples[samples.length / 2 - 1]) / 2
if (!runner.config.benchmark?.includeSamples) {
result.samples.length = 0
}
updateTask(benchmark)
},
{
Expand Down
2 changes: 2 additions & 0 deletions packages/vitest/src/runtime/types/benchmark.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ export interface Benchmark extends Custom {
export interface BenchmarkResult extends TinybenchResult {
name: string
rank: number
sampleCount: number
median: number
}

export type BenchFunction = (this: BenchFactory) => Promise<void> | void
Expand Down
23 changes: 22 additions & 1 deletion test/benchmark/test/reporter.test.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { expect, it } from 'vitest'
import { assert, expect, it } from 'vitest'
import * as pathe from 'pathe'
import { runVitest } from '../../test-utils'

Expand Down Expand Up @@ -27,3 +27,24 @@ it('non-tty', async () => {
`
expect(lines).toMatchObject(expected.trim().split('\n').map(s => expect.stringContaining(s)))
})

it.for([true, false])('includeSamples %s', async (includeSamples) => {
const result = await runVitest(
{
root: pathe.join(import.meta.dirname, '../fixtures/reporter'),
benchmark: { includeSamples },
},
['summary.bench.ts'],
'benchmark',
)
assert(result.ctx)
const allSamples = [...result.ctx.state.idMap.values()]
.filter(t => t.meta.benchmark)
.map(t => t.result?.benchmark?.samples)
if (includeSamples) {
expect(allSamples[0]).not.toEqual([])
}
else {
expect(allSamples[0]).toEqual([])
}
})

0 comments on commit a6407af

Please sign in to comment.