feat(data): Added nats jetstream dependency
This commit is contained in:
367
scripts/benchmark_scan_intake.ts
Normal file
367
scripts/benchmark_scan_intake.ts
Normal file
@@ -0,0 +1,367 @@
|
||||
/**
|
||||
* Scan Intake Benchmark Script
|
||||
*
|
||||
* Measures TrackScan creation performance before and after each optimisation phase.
|
||||
* Run against a live dev server: npm run dev
|
||||
*
|
||||
* Usage:
|
||||
* npx ts-node scripts/benchmark_scan_intake.ts
|
||||
* npx ts-node scripts/benchmark_scan_intake.ts --base http://localhost:4010
|
||||
*
|
||||
* What it measures:
|
||||
* 1. Single sequential scans — baseline latency per request (p50/p95/p99/max)
|
||||
* 2. Parallel scans (10 stations) — simulates 10 concurrent stations each submitting
|
||||
* one scan at a time at the expected event rate
|
||||
* (~1 scan/3s per station = ~3.3 scans/s total)
|
||||
*
|
||||
* The script self-provisions all required data (org, runners, cards, track, stations)
|
||||
* and cleans up after itself. It authenticates via the station token, matching the
|
||||
* real production auth path exactly.
|
||||
*
|
||||
* Output is printed to stdout in a copy-paste-friendly table format so results can
|
||||
* be compared across phases.
|
||||
*/
|
||||
|
||||
import axios, { AxiosInstance } from 'axios';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Config
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const BASE = (() => {
|
||||
const idx = process.argv.indexOf('--base');
|
||||
return idx !== -1 ? process.argv[idx + 1] : 'http://localhost:4010';
|
||||
})();
|
||||
|
||||
const API = `${BASE}/api`;
|
||||
|
||||
// Number of simulated scan stations
|
||||
const STATION_COUNT = 10;
|
||||
|
||||
// Sequential benchmark: total number of scans to send, one at a time
|
||||
const SEQUENTIAL_SCAN_COUNT = 50;
|
||||
|
||||
// Parallel benchmark: number of rounds. Each round fires STATION_COUNT scans concurrently.
|
||||
// 20 rounds × 10 stations = 200 total scans, matching the expected event throughput pattern.
|
||||
const PARALLEL_ROUNDS = 20;
|
||||
|
||||
// Minimum lap time on the test track (seconds). Set low so most scans are valid.
|
||||
// The benchmark measures submission speed, not business logic.
|
||||
const TRACK_MINIMUM_LAP_TIME = 1;
|
||||
|
||||
// Track distance (metres)
|
||||
const TRACK_DISTANCE = 400;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
interface StationHandle {
|
||||
id: number;
|
||||
key: string; // cleartext token, used as Bearer token
|
||||
cardCode: number; // EAN-13 barcode of the card assigned to this station's runner
|
||||
axiosInstance: AxiosInstance;
|
||||
}
|
||||
|
||||
interface Percentiles {
|
||||
p50: number;
|
||||
p95: number;
|
||||
p99: number;
|
||||
max: number;
|
||||
min: number;
|
||||
mean: number;
|
||||
}
|
||||
|
||||
interface BenchmarkResult {
|
||||
label: string;
|
||||
totalScans: number;
|
||||
totalTimeMs: number;
|
||||
scansPerSecond: number;
|
||||
latencies: Percentiles;
|
||||
errors: number;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// HTTP helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const adminClient = axios.create({
|
||||
baseURL: API,
|
||||
validateStatus: () => true,
|
||||
});
|
||||
|
||||
async function adminLogin(): Promise<string> {
|
||||
const res = await adminClient.post('/auth/login', { username: 'demo', password: 'demo' });
|
||||
if (res.status !== 200) {
|
||||
throw new Error(`Login failed: ${res.status} ${JSON.stringify(res.data)}`);
|
||||
}
|
||||
return res.data.access_token;
|
||||
}
|
||||
|
||||
function authedClient(token: string): AxiosInstance {
|
||||
return axios.create({
|
||||
baseURL: API,
|
||||
validateStatus: () => true,
|
||||
headers: { authorization: `Bearer ${token}` },
|
||||
});
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Data provisioning
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async function provision(adminToken: string): Promise<{
|
||||
stations: StationHandle[];
|
||||
trackId: number;
|
||||
orgId: number;
|
||||
cleanup: () => Promise<void>;
|
||||
}> {
|
||||
const client = authedClient(adminToken);
|
||||
const createdIds: { type: string; id: number }[] = [];
|
||||
|
||||
const create = async (path: string, body: object): Promise<any> => {
|
||||
const res = await client.post(path, body);
|
||||
if (res.status !== 200) {
|
||||
throw new Error(`POST ${path} failed: ${res.status} ${JSON.stringify(res.data)}`);
|
||||
}
|
||||
return res.data;
|
||||
};
|
||||
|
||||
process.stdout.write('Provisioning test data... ');
|
||||
|
||||
// Organisation
|
||||
const org = await create('/organizations', { name: 'benchmark-org' });
|
||||
createdIds.push({ type: 'organizations', id: org.id });
|
||||
|
||||
// Track with a low minimumLapTime so re-scans within the benchmark are mostly valid
|
||||
const track = await create('/tracks', {
|
||||
name: 'benchmark-track',
|
||||
distance: TRACK_DISTANCE,
|
||||
minimumLapTime: TRACK_MINIMUM_LAP_TIME,
|
||||
});
|
||||
createdIds.push({ type: 'tracks', id: track.id });
|
||||
|
||||
// One runner + card + station per simulated scan station
|
||||
const stations: StationHandle[] = [];
|
||||
|
||||
for (let i = 0; i < STATION_COUNT; i++) {
|
||||
const runner = await create('/runners', {
|
||||
firstname: `Bench`,
|
||||
lastname: `Runner${i}`,
|
||||
group: org.id,
|
||||
});
|
||||
createdIds.push({ type: 'runners', id: runner.id });
|
||||
|
||||
const card = await create('/cards', { runner: runner.id });
|
||||
createdIds.push({ type: 'cards', id: card.id });
|
||||
|
||||
const station = await create('/stations', {
|
||||
track: track.id,
|
||||
description: `bench-station-${i}`,
|
||||
});
|
||||
createdIds.push({ type: 'stations', id: station.id });
|
||||
|
||||
stations.push({
|
||||
id: station.id,
|
||||
key: station.key,
|
||||
cardCode: card.id, // the test spec uses card.id directly as the barcode value
|
||||
axiosInstance: axios.create({
|
||||
baseURL: API,
|
||||
validateStatus: () => true,
|
||||
headers: { authorization: `Bearer ${station.key}` },
|
||||
}),
|
||||
});
|
||||
}
|
||||
|
||||
console.log(`done. (${STATION_COUNT} stations, ${STATION_COUNT} runners, ${STATION_COUNT} cards)`);
|
||||
|
||||
const cleanup = async () => {
|
||||
process.stdout.write('Cleaning up test data... ');
|
||||
// Delete in reverse-dependency order
|
||||
for (const item of [...createdIds].reverse()) {
|
||||
await client.delete(`/${item.type}/${item.id}?force=true`);
|
||||
}
|
||||
console.log('done.');
|
||||
};
|
||||
|
||||
return { stations, trackId: track.id, orgId: org.id, cleanup };
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Single scan submission (returns latency in ms)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async function submitScan(station: StationHandle): Promise<{ latencyMs: number; ok: boolean }> {
|
||||
const start = performance.now();
|
||||
const res = await station.axiosInstance.post('/scans/trackscans', {
|
||||
card: station.cardCode,
|
||||
station: station.id,
|
||||
});
|
||||
const latencyMs = performance.now() - start;
|
||||
const ok = res.status === 200;
|
||||
return { latencyMs, ok };
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Statistics
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function percentiles(latencies: number[]): Percentiles {
|
||||
const sorted = [...latencies].sort((a, b) => a - b);
|
||||
const at = (pct: number) => sorted[Math.floor((pct / 100) * sorted.length)] ?? sorted[sorted.length - 1];
|
||||
const mean = sorted.reduce((s, v) => s + v, 0) / sorted.length;
|
||||
return {
|
||||
p50: Math.round(at(50)),
|
||||
p95: Math.round(at(95)),
|
||||
p99: Math.round(at(99)),
|
||||
max: Math.round(sorted[sorted.length - 1]),
|
||||
min: Math.round(sorted[0]),
|
||||
mean: Math.round(mean),
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Benchmark 1 — Sequential (single station, one scan at a time)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async function benchmarkSequential(station: StationHandle): Promise<BenchmarkResult> {
|
||||
const latencies: number[] = [];
|
||||
let errors = 0;
|
||||
|
||||
process.stdout.write(` Running ${SEQUENTIAL_SCAN_COUNT} sequential scans`);
|
||||
const wallStart = performance.now();
|
||||
|
||||
for (let i = 0; i < SEQUENTIAL_SCAN_COUNT; i++) {
|
||||
const { latencyMs, ok } = await submitScan(station);
|
||||
latencies.push(latencyMs);
|
||||
if (!ok) errors++;
|
||||
if ((i + 1) % 10 === 0) process.stdout.write('.');
|
||||
}
|
||||
|
||||
const totalTimeMs = performance.now() - wallStart;
|
||||
console.log(' done.');
|
||||
|
||||
return {
|
||||
label: 'Sequential (1 station)',
|
||||
totalScans: SEQUENTIAL_SCAN_COUNT,
|
||||
totalTimeMs,
|
||||
scansPerSecond: (SEQUENTIAL_SCAN_COUNT / totalTimeMs) * 1000,
|
||||
latencies: percentiles(latencies),
|
||||
errors,
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Benchmark 2 — Parallel (10 stations, concurrent rounds)
|
||||
//
|
||||
// Models the real event scenario: every ~3 seconds each station submits one scan.
|
||||
// We don't actually sleep between rounds — we fire each round as fast as the
|
||||
// previous one completes, which gives us the worst-case sustained throughput
|
||||
// (all stations submitting at maximum rate simultaneously).
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async function benchmarkParallel(stations: StationHandle[]): Promise<BenchmarkResult> {
|
||||
const latencies: number[] = [];
|
||||
let errors = 0;
|
||||
|
||||
process.stdout.write(` Running ${PARALLEL_ROUNDS} rounds × ${STATION_COUNT} concurrent stations`);
|
||||
const wallStart = performance.now();
|
||||
|
||||
for (let round = 0; round < PARALLEL_ROUNDS; round++) {
|
||||
const results = await Promise.all(stations.map(s => submitScan(s)));
|
||||
for (const { latencyMs, ok } of results) {
|
||||
latencies.push(latencyMs);
|
||||
if (!ok) errors++;
|
||||
}
|
||||
if ((round + 1) % 4 === 0) process.stdout.write('.');
|
||||
}
|
||||
|
||||
const totalTimeMs = performance.now() - wallStart;
|
||||
const totalScans = PARALLEL_ROUNDS * STATION_COUNT;
|
||||
console.log(' done.');
|
||||
|
||||
return {
|
||||
label: `Parallel (${STATION_COUNT} stations concurrent)`,
|
||||
totalScans,
|
||||
totalTimeMs,
|
||||
scansPerSecond: (totalScans / totalTimeMs) * 1000,
|
||||
latencies: percentiles(latencies),
|
||||
errors,
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Output formatting
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function printResult(result: BenchmarkResult) {
|
||||
const { label, totalScans, totalTimeMs, scansPerSecond, latencies, errors } = result;
|
||||
console.log(`\n ${label}`);
|
||||
console.log(` ${'─'.repeat(52)}`);
|
||||
console.log(` Total scans : ${totalScans}`);
|
||||
console.log(` Total time : ${totalTimeMs.toFixed(0)} ms`);
|
||||
console.log(` Throughput : ${scansPerSecond.toFixed(2)} scans/sec`);
|
||||
console.log(` Latency min : ${latencies.min} ms`);
|
||||
console.log(` Latency mean : ${latencies.mean} ms`);
|
||||
console.log(` Latency p50 : ${latencies.p50} ms`);
|
||||
console.log(` Latency p95 : ${latencies.p95} ms`);
|
||||
console.log(` Latency p99 : ${latencies.p99} ms`);
|
||||
console.log(` Latency max : ${latencies.max} ms`);
|
||||
console.log(` Errors : ${errors}`);
|
||||
}
|
||||
|
||||
function printSummary(results: BenchmarkResult[]) {
|
||||
const now = new Date().toISOString();
|
||||
console.log('\n');
|
||||
console.log('═'.repeat(60));
|
||||
console.log(` SCAN INTAKE BENCHMARK RESULTS — ${now}`);
|
||||
console.log(` Server: ${BASE}`);
|
||||
console.log('═'.repeat(60));
|
||||
for (const r of results) {
|
||||
printResult(r);
|
||||
}
|
||||
console.log('\n' + '═'.repeat(60));
|
||||
console.log(' Copy the block above to compare across phases.');
|
||||
console.log('═'.repeat(60) + '\n');
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Entry point
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async function main() {
|
||||
console.log(`\nScan Intake Benchmark — target: ${BASE}\n`);
|
||||
|
||||
let adminToken: string;
|
||||
try {
|
||||
adminToken = await adminLogin();
|
||||
} catch (err) {
|
||||
console.error(`Could not authenticate. Is the server running at ${BASE}?\n`, err.message);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const { stations, cleanup } = await provision(adminToken);
|
||||
|
||||
const results: BenchmarkResult[] = [];
|
||||
|
||||
try {
|
||||
console.log('\nBenchmark 1 — Sequential');
|
||||
results.push(await benchmarkSequential(stations[0]));
|
||||
|
||||
// Brief pause between benchmarks so the sequential scans don't skew
|
||||
// the parallel benchmark's first-scan latency (minimumLapTime window)
|
||||
await new Promise(r => setTimeout(r, (TRACK_MINIMUM_LAP_TIME + 1) * 1000));
|
||||
|
||||
console.log('\nBenchmark 2 — Parallel');
|
||||
results.push(await benchmarkParallel(stations));
|
||||
} finally {
|
||||
await cleanup();
|
||||
}
|
||||
|
||||
printSummary(results);
|
||||
}
|
||||
|
||||
main().catch(err => {
|
||||
console.error('Benchmark failed:', err);
|
||||
process.exit(1);
|
||||
});
|
||||
Reference in New Issue
Block a user