Implement Phase 1 tasks 1.1-1.4 (scaffold + core types + config + Postgres)

Scaffold mirrors tcp-ingestion conventions: ESM, strict TS, pnpm, vitest
with unit/integration split, ESLint flat config with no-floating-promises
+ no-misused-promises + import/no-restricted-paths (the new src/core/ →
src/domain/ boundary that protects Phase 1 from Phase 2 churn).

Core types in src/core/types.ts (Position, StreamRecord, DeviceState,
Metrics, AttributeValue) — Position is byte-equivalent to tcp-ingestion's
output. Codec in src/core/codec.ts implements sentinel reversal:
{__bigint:"..."} → bigint, {__buffer_b64:"..."} → Buffer, ISO timestamp
string → Date. CodecError surfaces malformed payload reasons with the
failing field named.

Config in src/config/load.ts (zod schema, all 13 env vars with defaults
and bounded numerics). Logger in src/observability/logger.ts matches
tcp-ingestion exactly: ISO timestamps, string level labels, pino-pretty
in development.

Postgres in src/db/: createPool with sane defaults and application_name,
connectWithRetry mirroring the ioredis retry pattern, a 30-line
migration runner using a schema_migrations table, and 0001_positions.sql
with the hypertable + (device_id, ts) unique index + ts DESC index.
Migration runner unit-tested against a mocked pg.Pool; the real
TimescaleDB round-trip is deferred to task 1.10 per spec.

Verification: typecheck, lint, build all clean; 73 unit tests passing
across 4 files. import/no-restricted-paths verified live by temporarily
adding a forbidden src/domain/ import.
This commit is contained in:
2026-04-30 21:35:16 +02:00
parent c314ba0902
commit 95efc23139
28 changed files with 7427 additions and 13 deletions
+384
View File
@@ -0,0 +1,384 @@
/**
* Unit tests for src/core/codec.ts
*
* Covers:
* - Round-trip with bigint and Buffer attributes
* - u64-max bigint sentinel
* - Buffer with non-UTF-8 bytes
* - timestamp ISO string → Date round-trip (no millisecond loss)
* - All required fields present and correctly decoded
* - Reject malformed JSON
* - Reject missing required fields
* - Reject invalid sentinel shapes
* - Reject invalid priority values
*/
import { describe, it, expect } from 'vitest';
import { decodePosition, CodecError } from '../src/core/codec.js';
import type { Position } from '../src/core/types.js';
// ---------------------------------------------------------------------------
// Helpers — mirror tcp-ingestion's serializePosition / jsonReplacer inline
// so the test is self-contained and we can verify round-trip fidelity.
// ---------------------------------------------------------------------------
/**
* JSON replacer that mirrors tcp-ingestion's `jsonReplacer` exactly.
* bigint → { __bigint: "<digits>" }
* Buffer → { __buffer_b64: "<base64>" } (handles both direct instance and toJSON shape)
* Date → ISO string
*/
function jsonReplacer(_key: string, value: unknown): unknown {
if (typeof value === 'bigint') {
return { __bigint: value.toString() };
}
if (value instanceof Uint8Array) {
return { __buffer_b64: Buffer.from(value).toString('base64') };
}
// Buffer.toJSON() shape — fired before replacer for nested Buffer properties
if (
typeof value === 'object' &&
value !== null &&
(value as Record<string, unknown>)['type'] === 'Buffer' &&
Array.isArray((value as Record<string, unknown>)['data'])
) {
const data = (value as { type: string; data: number[] }).data;
return { __buffer_b64: Buffer.from(data).toString('base64') };
}
if (value instanceof Date) {
return value.toISOString();
}
return value;
}
function serializePosition(position: Position, codec: string): Record<string, string> {
return {
ts: position.timestamp.toISOString(),
device_id: position.device_id,
codec,
payload: JSON.stringify(position, jsonReplacer),
};
}
function makePosition(overrides: Partial<Position> = {}): Position {
return {
device_id: 'TEST123456789',
timestamp: new Date('2024-01-15T10:30:00.123Z'),
latitude: 54.12345,
longitude: 25.98765,
altitude: 150,
angle: 270,
speed: 60,
satellites: 8,
priority: 1,
attributes: {},
...overrides,
};
}
// ---------------------------------------------------------------------------
// 1. Round-trip — basic position (no special attributes)
// ---------------------------------------------------------------------------
describe('decodePosition — basic round-trip', () => {
it('decodes all scalar fields correctly', () => {
const original = makePosition();
const { payload } = serializePosition(original, '8');
const decoded = decodePosition(payload);
expect(decoded.device_id).toBe(original.device_id);
expect(decoded.timestamp).toEqual(original.timestamp);
expect(decoded.latitude).toBe(original.latitude);
expect(decoded.longitude).toBe(original.longitude);
expect(decoded.altitude).toBe(original.altitude);
expect(decoded.angle).toBe(original.angle);
expect(decoded.speed).toBe(original.speed);
expect(decoded.satellites).toBe(original.satellites);
expect(decoded.priority).toBe(original.priority);
});
it('timestamp round-trips without millisecond loss', () => {
// Use a timestamp with non-zero milliseconds to verify precision is preserved
const ts = new Date('2024-06-15T13:45:30.987Z');
const original = makePosition({ timestamp: ts });
const { payload } = serializePosition(original, '8');
const decoded = decodePosition(payload);
expect(decoded.timestamp.getTime()).toBe(ts.getTime());
expect(decoded.timestamp.toISOString()).toBe('2024-06-15T13:45:30.987Z');
});
it('timestamp produces a Date instance (not a string)', () => {
const original = makePosition();
const { payload } = serializePosition(original, '8');
const decoded = decodePosition(payload);
expect(decoded.timestamp).toBeInstanceOf(Date);
});
});
// ---------------------------------------------------------------------------
// 2. Round-trip — bigint attributes
// ---------------------------------------------------------------------------
describe('decodePosition — bigint attributes', () => {
it('round-trips a safe-integer bigint', () => {
const original = makePosition({ attributes: { io_21: BigInt('12345') } });
const { payload } = serializePosition(original, '8');
const decoded = decodePosition(payload);
expect(decoded.attributes['io_21']).toBe(BigInt('12345'));
});
it('round-trips a u64-max bigint (exceeds Number.MAX_SAFE_INTEGER)', () => {
const u64Max = BigInt('18446744073709551615');
const original = makePosition({ attributes: { io_240: u64Max } });
const { payload } = serializePosition(original, '8');
const decoded = decodePosition(payload);
expect(decoded.attributes['io_240']).toBe(u64Max);
});
it('round-trips zero bigint', () => {
const original = makePosition({ attributes: { io_1: 0n } });
const { payload } = serializePosition(original, '8');
const decoded = decodePosition(payload);
expect(decoded.attributes['io_1']).toBe(0n);
});
});
// ---------------------------------------------------------------------------
// 3. Round-trip — Buffer attributes
// ---------------------------------------------------------------------------
describe('decodePosition — Buffer attributes', () => {
it('round-trips a Buffer with standard bytes', () => {
const original = makePosition({ attributes: { io_nx: Buffer.from([0x01, 0x02, 0x03]) } });
const { payload } = serializePosition(original, '8E');
const decoded = decodePosition(payload);
expect(Buffer.isBuffer(decoded.attributes['io_nx'])).toBe(true);
expect(decoded.attributes['io_nx']).toEqual(Buffer.from([0x01, 0x02, 0x03]));
});
it('round-trips a Buffer with non-UTF-8 bytes (0xde 0xad 0xbe 0xef)', () => {
const raw = Buffer.from([0xde, 0xad, 0xbe, 0xef]);
const original = makePosition({ attributes: { nx_raw: raw } });
const { payload } = serializePosition(original, '8E');
const decoded = decodePosition(payload);
const attr = decoded.attributes['nx_raw'];
expect(Buffer.isBuffer(attr)).toBe(true);
expect(attr as Buffer).toEqual(raw);
});
it('round-trips an empty Buffer', () => {
const original = makePosition({ attributes: { empty: Buffer.alloc(0) } });
const { payload } = serializePosition(original, '8E');
const decoded = decodePosition(payload);
const attr = decoded.attributes['empty'];
expect(Buffer.isBuffer(attr)).toBe(true);
expect((attr as Buffer).length).toBe(0);
});
it('Buffer content is byte-equal to original (not just same length)', () => {
const raw = Buffer.from([0xca, 0xfe, 0xba, 0xbe]);
const original = makePosition({ attributes: { sig: raw } });
const { payload } = serializePosition(original, '8E');
const decoded = decodePosition(payload);
const attr = decoded.attributes['sig'] as Buffer;
for (let i = 0; i < raw.length; i++) {
expect(attr[i]).toBe(raw[i]);
}
});
});
// ---------------------------------------------------------------------------
// 4. Round-trip — mixed attributes
// ---------------------------------------------------------------------------
describe('decodePosition — mixed attributes round-trip', () => {
it('round-trips position with number, bigint, and Buffer attributes together', () => {
const original = makePosition({
attributes: {
io_21: 42,
io_240: BigInt('18446744073709551615'),
io_nx: Buffer.from([0xab, 0xcd]),
},
});
const { payload } = serializePosition(original, '16');
const decoded = decodePosition(payload);
expect(decoded.attributes['io_21']).toBe(42);
expect(decoded.attributes['io_240']).toBe(BigInt('18446744073709551615'));
const nxAttr = decoded.attributes['io_nx'] as Buffer;
expect(Buffer.isBuffer(nxAttr)).toBe(true);
expect(nxAttr).toEqual(Buffer.from([0xab, 0xcd]));
});
});
// ---------------------------------------------------------------------------
// 5. Priority values
// ---------------------------------------------------------------------------
describe('decodePosition — priority', () => {
it('accepts priority 0 (Low)', () => {
const original = makePosition({ priority: 0 });
const { payload } = serializePosition(original, '8');
expect(() => decodePosition(payload)).not.toThrow();
expect(decodePosition(payload).priority).toBe(0);
});
it('accepts priority 2 (Panic)', () => {
const original = makePosition({ priority: 2 });
const { payload } = serializePosition(original, '8');
expect(decodePosition(payload).priority).toBe(2);
});
});
// ---------------------------------------------------------------------------
// 6. Error cases
// ---------------------------------------------------------------------------
describe('decodePosition — error cases', () => {
it('throws CodecError on non-JSON input', () => {
expect(() => decodePosition('not json at all')).toThrow(CodecError);
});
it('throws CodecError on empty string', () => {
expect(() => decodePosition('')).toThrow(CodecError);
});
it('throws CodecError when payload is a JSON array (not object)', () => {
expect(() => decodePosition('[]')).toThrow(CodecError);
});
it('throws CodecError when payload is a JSON number', () => {
expect(() => decodePosition('42')).toThrow(CodecError);
});
it('throws CodecError when device_id is missing', () => {
const pos = makePosition();
const { payload } = serializePosition(pos, '8');
const obj = JSON.parse(payload) as Record<string, unknown>;
delete obj['device_id'];
expect(() => decodePosition(JSON.stringify(obj))).toThrow(CodecError);
});
it('throws CodecError when device_id is empty string', () => {
const obj = {
device_id: '',
timestamp: new Date().toISOString(),
latitude: 0,
longitude: 0,
altitude: 0,
angle: 0,
speed: 0,
satellites: 0,
priority: 0,
attributes: {},
};
expect(() => decodePosition(JSON.stringify(obj))).toThrow(CodecError);
});
it('throws CodecError when timestamp is missing', () => {
const pos = makePosition();
const { payload } = serializePosition(pos, '8');
const obj = JSON.parse(payload) as Record<string, unknown>;
delete obj['timestamp'];
expect(() => decodePosition(JSON.stringify(obj))).toThrow(CodecError);
});
it('throws CodecError when timestamp is an invalid date string', () => {
const obj = {
device_id: 'TEST123',
timestamp: 'not-a-date',
latitude: 0,
longitude: 0,
altitude: 0,
angle: 0,
speed: 0,
satellites: 0,
priority: 0,
attributes: {},
};
expect(() => decodePosition(JSON.stringify(obj))).toThrow(CodecError);
});
it('throws CodecError when a required numeric field is missing', () => {
const pos = makePosition();
const { payload } = serializePosition(pos, '8');
const obj = JSON.parse(payload) as Record<string, unknown>;
delete obj['latitude'];
expect(() => decodePosition(JSON.stringify(obj))).toThrow(CodecError);
});
it('throws CodecError when priority is out of range (e.g. 3)', () => {
const obj = {
device_id: 'TEST123',
timestamp: new Date().toISOString(),
latitude: 0,
longitude: 0,
altitude: 0,
angle: 0,
speed: 0,
satellites: 0,
priority: 3,
attributes: {},
};
expect(() => decodePosition(JSON.stringify(obj))).toThrow(CodecError);
});
it('throws CodecError when __bigint value is not decimal digits', () => {
const obj = {
device_id: 'TEST123',
timestamp: new Date().toISOString(),
latitude: 0,
longitude: 0,
altitude: 0,
angle: 0,
speed: 0,
satellites: 0,
priority: 0,
attributes: { io_bad: { __bigint: 'not-a-number' } },
};
expect(() => decodePosition(JSON.stringify(obj))).toThrow(CodecError);
});
it('CodecError message names the failing field', () => {
const obj = {
device_id: 'TEST123',
timestamp: new Date().toISOString(),
latitude: 'oops', // wrong type
longitude: 0,
altitude: 0,
angle: 0,
speed: 0,
satellites: 0,
priority: 0,
attributes: {},
};
expect(() => decodePosition(JSON.stringify(obj))).toThrow(/latitude/);
});
it('throws CodecError when attributes value is not a valid AttributeValue (e.g. nested object)', () => {
const obj = {
device_id: 'TEST123',
timestamp: new Date().toISOString(),
latitude: 0,
longitude: 0,
altitude: 0,
angle: 0,
speed: 0,
satellites: 0,
priority: 0,
// A plain nested object (not a sentinel) should fail validation
attributes: { io_bad: { nested: 'value' } },
};
expect(() => decodePosition(JSON.stringify(obj))).toThrow(CodecError);
});
});
+247
View File
@@ -0,0 +1,247 @@
/**
* Unit tests for src/config/load.ts
*
* Covers:
* - Parses all defaults correctly when only required vars are provided
* - Missing required vars throw with the right message
* - Invalid URLs throw (wrong protocol, not a URL)
* - Bounded numerics throw on out-of-range values
* - REDIS_CONSUMER_NAME defaults to INSTANCE_ID
* - Explicit REDIS_CONSUMER_NAME overrides INSTANCE_ID
*/
import { describe, it, expect } from 'vitest';
import { loadConfig } from '../src/config/load.js';
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
/** Minimal valid env — only required fields. */
function validEnv(overrides: Record<string, string> = {}): Record<string, string> {
return {
REDIS_URL: 'redis://localhost:6379',
POSTGRES_URL: 'postgres://postgres:pass@localhost:5432/trm',
...overrides,
};
}
// ---------------------------------------------------------------------------
// 1. Happy path — defaults
// ---------------------------------------------------------------------------
describe('loadConfig — defaults', () => {
it('parses successfully with only required vars', () => {
const config = loadConfig(validEnv());
expect(config.REDIS_URL).toBe('redis://localhost:6379');
expect(config.POSTGRES_URL).toBe('postgres://postgres:pass@localhost:5432/trm');
});
it('applies default NODE_ENV=production', () => {
const config = loadConfig(validEnv());
expect(config.NODE_ENV).toBe('production');
});
it('applies default INSTANCE_ID=processor-1', () => {
const config = loadConfig(validEnv());
expect(config.INSTANCE_ID).toBe('processor-1');
});
it('applies default LOG_LEVEL=info', () => {
const config = loadConfig(validEnv());
expect(config.LOG_LEVEL).toBe('info');
});
it('applies default REDIS_TELEMETRY_STREAM=telemetry:t', () => {
const config = loadConfig(validEnv());
expect(config.REDIS_TELEMETRY_STREAM).toBe('telemetry:t');
});
it('applies default REDIS_CONSUMER_GROUP=processor', () => {
const config = loadConfig(validEnv());
expect(config.REDIS_CONSUMER_GROUP).toBe('processor');
});
it('defaults REDIS_CONSUMER_NAME to INSTANCE_ID', () => {
const config = loadConfig(validEnv({ INSTANCE_ID: 'my-instance' }));
expect(config.REDIS_CONSUMER_NAME).toBe('my-instance');
});
it('respects explicit REDIS_CONSUMER_NAME override', () => {
const config = loadConfig(
validEnv({ INSTANCE_ID: 'instance-a', REDIS_CONSUMER_NAME: 'consumer-override' }),
);
expect(config.REDIS_CONSUMER_NAME).toBe('consumer-override');
});
it('applies default METRICS_PORT=9090', () => {
const config = loadConfig(validEnv());
expect(config.METRICS_PORT).toBe(9090);
});
it('applies default BATCH_SIZE=100', () => {
const config = loadConfig(validEnv());
expect(config.BATCH_SIZE).toBe(100);
});
it('applies default BATCH_BLOCK_MS=5000', () => {
const config = loadConfig(validEnv());
expect(config.BATCH_BLOCK_MS).toBe(5_000);
});
it('applies default WRITE_BATCH_SIZE=50', () => {
const config = loadConfig(validEnv());
expect(config.WRITE_BATCH_SIZE).toBe(50);
});
it('applies default DEVICE_STATE_LRU_CAP=10000', () => {
const config = loadConfig(validEnv());
expect(config.DEVICE_STATE_LRU_CAP).toBe(10_000);
});
});
// ---------------------------------------------------------------------------
// 2. Missing required vars
// ---------------------------------------------------------------------------
describe('loadConfig — missing required vars', () => {
it('throws when REDIS_URL is missing', () => {
expect(() => loadConfig({ POSTGRES_URL: 'postgres://localhost:5432/trm' })).toThrow(
/REDIS_URL/,
);
});
it('throws when POSTGRES_URL is missing', () => {
expect(() => loadConfig({ REDIS_URL: 'redis://localhost:6379' })).toThrow(/POSTGRES_URL/);
});
it('throws when both required vars are missing', () => {
expect(() => loadConfig({})).toThrow(/Configuration error/);
});
it('error message mentions every failing field', () => {
let message = '';
try {
loadConfig({});
} catch (err) {
message = err instanceof Error ? err.message : '';
}
expect(message).toMatch(/REDIS_URL/);
expect(message).toMatch(/POSTGRES_URL/);
});
});
// ---------------------------------------------------------------------------
// 3. URL validation
// ---------------------------------------------------------------------------
describe('loadConfig — URL validation', () => {
it('accepts redis:// URLs', () => {
expect(() => loadConfig(validEnv({ REDIS_URL: 'redis://redis:6379' }))).not.toThrow();
});
it('accepts rediss:// (TLS) URLs', () => {
expect(() => loadConfig(validEnv({ REDIS_URL: 'rediss://redis:6380' }))).not.toThrow();
});
it('rejects REDIS_URL with wrong protocol (http)', () => {
expect(() => loadConfig(validEnv({ REDIS_URL: 'http://localhost:6379' }))).toThrow(
/REDIS_URL/,
);
});
it('rejects REDIS_URL that is not a URL at all', () => {
expect(() => loadConfig(validEnv({ REDIS_URL: 'not-a-url' }))).toThrow(/REDIS_URL/);
});
it('accepts postgres:// URLs', () => {
expect(() =>
loadConfig(validEnv({ POSTGRES_URL: 'postgres://user:pass@db:5432/mydb' })),
).not.toThrow();
});
it('accepts postgresql:// URLs', () => {
expect(() =>
loadConfig(validEnv({ POSTGRES_URL: 'postgresql://user:pass@db:5432/mydb' })),
).not.toThrow();
});
it('rejects POSTGRES_URL with wrong protocol (mysql)', () => {
expect(() =>
loadConfig(validEnv({ POSTGRES_URL: 'mysql://localhost:3306/db' })),
).toThrow(/POSTGRES_URL/);
});
it('rejects POSTGRES_URL that is not a URL at all', () => {
expect(() => loadConfig(validEnv({ POSTGRES_URL: 'localhost/db' }))).toThrow(/POSTGRES_URL/);
});
});
// ---------------------------------------------------------------------------
// 4. Bounded numerics
// ---------------------------------------------------------------------------
describe('loadConfig — bounded numerics', () => {
it('rejects BATCH_SIZE below minimum (0)', () => {
expect(() => loadConfig(validEnv({ BATCH_SIZE: '0' }))).toThrow(/BATCH_SIZE/);
});
it('rejects BATCH_SIZE above maximum (10001)', () => {
expect(() => loadConfig(validEnv({ BATCH_SIZE: '10001' }))).toThrow(/BATCH_SIZE/);
});
it('accepts BATCH_SIZE at boundary values (1, 10000)', () => {
expect(() => loadConfig(validEnv({ BATCH_SIZE: '1' }))).not.toThrow();
expect(() => loadConfig(validEnv({ BATCH_SIZE: '10000' }))).not.toThrow();
});
it('rejects BATCH_BLOCK_MS above maximum (60001)', () => {
expect(() => loadConfig(validEnv({ BATCH_BLOCK_MS: '60001' }))).toThrow(/BATCH_BLOCK_MS/);
});
it('accepts BATCH_BLOCK_MS=0 (no blocking)', () => {
const config = loadConfig(validEnv({ BATCH_BLOCK_MS: '0' }));
expect(config.BATCH_BLOCK_MS).toBe(0);
});
it('rejects WRITE_BATCH_SIZE below minimum (0)', () => {
expect(() => loadConfig(validEnv({ WRITE_BATCH_SIZE: '0' }))).toThrow(/WRITE_BATCH_SIZE/);
});
it('rejects WRITE_BATCH_SIZE above maximum (1001)', () => {
expect(() => loadConfig(validEnv({ WRITE_BATCH_SIZE: '1001' }))).toThrow(/WRITE_BATCH_SIZE/);
});
it('rejects DEVICE_STATE_LRU_CAP below minimum (99)', () => {
expect(() => loadConfig(validEnv({ DEVICE_STATE_LRU_CAP: '99' }))).toThrow(
/DEVICE_STATE_LRU_CAP/,
);
});
it('rejects DEVICE_STATE_LRU_CAP above maximum (1000001)', () => {
expect(() => loadConfig(validEnv({ DEVICE_STATE_LRU_CAP: '1000001' }))).toThrow(
/DEVICE_STATE_LRU_CAP/,
);
});
it('rejects non-numeric METRICS_PORT', () => {
expect(() => loadConfig(validEnv({ METRICS_PORT: 'abc' }))).toThrow(/METRICS_PORT/);
});
});
// ---------------------------------------------------------------------------
// 5. LOG_LEVEL validation
// ---------------------------------------------------------------------------
describe('loadConfig — LOG_LEVEL', () => {
it('accepts all valid pino levels', () => {
const levels = ['fatal', 'error', 'warn', 'info', 'debug', 'trace'] as const;
for (const level of levels) {
expect(() => loadConfig(validEnv({ LOG_LEVEL: level }))).not.toThrow();
}
});
it('rejects an invalid log level', () => {
expect(() => loadConfig(validEnv({ LOG_LEVEL: 'verbose' }))).toThrow(/LOG_LEVEL/);
});
});
+319
View File
@@ -0,0 +1,319 @@
/**
* Unit tests for src/db/migrate.ts
*
* Tests against a mocked pg.Pool — no real Postgres required here.
* The real round-trip against TimescaleDB lives in task 1.10 (integration test,
* testcontainers). See task 1.4 spec for the rationale.
*
* Covers:
* - Applying a fresh migration runs SQL inside a transaction and records version
* - Applying the same migration twice is a no-op (second call skips)
* - A SQL error causes a rollback and re-throws
* - Multiple migration files are applied in lexicographic order
*/
import { describe, it, expect, vi } from 'vitest';
import type { MockedFunction } from 'vitest';
import type { Logger } from 'pino';
import type { Pool, PoolClient } from 'pg';
// ---------------------------------------------------------------------------
// pg.Pool mock
// ---------------------------------------------------------------------------
type QueryCall = {
sql: string;
params?: unknown[];
};
type MockPoolOptions = {
/**
* Map from query SQL fragment to result or Error.
* If a query SQL contains the key as a substring, that handler fires.
* The first matching key wins. Unmatched queries return `{ rows: [] }`.
*/
handlers?: Record<string, { rows?: unknown[] } | Error>;
};
type MockClient = {
query: MockedFunction<(sql: string, params?: unknown[]) => Promise<{ rows: unknown[] }>>;
release: MockedFunction<() => void>;
};
function makeMockPool(options: MockPoolOptions = {}): {
pool: Pool;
calls: QueryCall[];
} {
const calls: QueryCall[] = [];
const handlers = options.handlers ?? {};
function resolveQuery(sql: string): { rows: unknown[] } | Error {
for (const [fragment, result] of Object.entries(handlers)) {
if (sql.includes(fragment)) return result;
}
return { rows: [] };
}
// Pool-level query (used for CREATE TABLE IF NOT EXISTS schema_migrations)
const poolQuery = vi.fn(async (sql: string, params?: unknown[]) => {
calls.push({ sql, params });
const result = resolveQuery(sql);
if (result instanceof Error) throw result;
return result;
});
// Client returned by pool.connect()
const clientQuery = vi.fn(async (sql: string, params?: unknown[]) => {
calls.push({ sql, params });
const result = resolveQuery(sql);
if (result instanceof Error) throw result;
return result as { rows: unknown[] };
});
const clientRelease = vi.fn();
const mockClient: MockClient = {
query: clientQuery,
release: clientRelease,
};
const poolConnect = vi.fn(async () => mockClient as unknown as PoolClient);
return {
pool: {
query: poolQuery,
connect: poolConnect,
} as unknown as Pool,
calls,
};
}
function makeSilentLogger(): Logger {
return {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
fatal: vi.fn(),
child: vi.fn().mockReturnThis(),
trace: vi.fn(),
level: 'silent',
silent: vi.fn(),
} as unknown as Logger;
}
// ---------------------------------------------------------------------------
// Import under test
// ---------------------------------------------------------------------------
// We mock node:fs/promises so we control file listing and content,
// isolating the runner logic from the real filesystem.
vi.mock('node:fs/promises', () => ({
readdir: vi.fn(),
readFile: vi.fn(),
}));
import { readdir, readFile } from 'node:fs/promises';
import { runMigrations } from '../../src/db/migrate.js';
const mockReaddir = readdir as MockedFunction<typeof readdir>;
const mockReadFile = readFile as MockedFunction<typeof readFile>;
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
describe('runMigrations — fresh database', () => {
it('creates schema_migrations table, runs migration SQL, and records version', async () => {
const fakeSql = 'CREATE TABLE IF NOT EXISTS positions (id serial);';
const version = '0001_positions.sql';
mockReaddir.mockResolvedValue([version] as unknown as Awaited<ReturnType<typeof readdir>>);
mockReadFile.mockResolvedValue(fakeSql as unknown as Buffer);
const { pool, calls } = makeMockPool({
handlers: {
'SELECT EXISTS': { rows: [{ exists: false }] },
},
});
await runMigrations(pool, makeSilentLogger());
// 1. Schema migrations table bootstrapped
expect(
calls.find((c) => c.sql.includes('CREATE TABLE IF NOT EXISTS schema_migrations')),
).toBeDefined();
// 2. EXISTS check ran with the correct version
expect(
calls.find((c) => c.sql.includes('SELECT EXISTS') && c.params?.[0] === version),
).toBeDefined();
// 3. BEGIN transaction
expect(calls.find((c) => c.sql === 'BEGIN')).toBeDefined();
// 4. Migration SQL executed
expect(calls.find((c) => c.sql === fakeSql)).toBeDefined();
// 5. Version recorded
expect(
calls.find(
(c) => c.sql.includes('INSERT INTO schema_migrations') && c.params?.[0] === version,
),
).toBeDefined();
// 6. COMMIT
expect(calls.find((c) => c.sql === 'COMMIT')).toBeDefined();
});
it('logs info after applying the migration', async () => {
mockReaddir.mockResolvedValue(
['0001_positions.sql'] as unknown as Awaited<ReturnType<typeof readdir>>,
);
mockReadFile.mockResolvedValue('SELECT 1' as unknown as Buffer);
const { pool } = makeMockPool({
handlers: { 'SELECT EXISTS': { rows: [{ exists: false }] } },
});
const logger = makeSilentLogger();
await runMigrations(pool, logger);
expect(logger.info).toHaveBeenCalledWith(
expect.objectContaining({ version: '0001_positions.sql' }),
'migration applied',
);
});
});
describe('runMigrations — already applied (idempotency)', () => {
it('skips migration when already recorded in schema_migrations', async () => {
const version = '0001_positions.sql';
mockReaddir.mockResolvedValue([version] as unknown as Awaited<ReturnType<typeof readdir>>);
mockReadFile.mockResolvedValue('SELECT 1' as unknown as Buffer);
const { pool, calls } = makeMockPool({
handlers: {
'SELECT EXISTS': { rows: [{ exists: true }] },
},
});
const logger = makeSilentLogger();
await runMigrations(pool, logger);
// No transaction should have been started
expect(calls.find((c) => c.sql === 'BEGIN')).toBeUndefined();
expect(logger.info).toHaveBeenCalledWith(
expect.objectContaining({ version }),
'migration already applied; skipping',
);
});
it('is a no-op when called twice with the same migrations', async () => {
// EXISTS check runs through pool.query (not through a client), so we track
// call count on the pool-level query mock.
let existsCallCount = 0;
const version = '0001_positions.sql';
mockReaddir.mockResolvedValue([version] as unknown as Awaited<ReturnType<typeof readdir>>);
mockReadFile.mockResolvedValue('SELECT 1' as unknown as Buffer);
const clientQuery = vi.fn(async (_sql: string, _params?: unknown[]) => {
return { rows: [] as unknown[] };
});
const client = { query: clientQuery, release: vi.fn() };
const poolQuery = vi.fn(async (sql: string, _params?: unknown[]) => {
if (sql.includes('SELECT EXISTS')) {
existsCallCount++;
// First run: not yet applied; second run: already applied
return { rows: [{ exists: existsCallCount > 1 }] };
}
return { rows: [] as unknown[] };
});
const pool = {
query: poolQuery,
connect: vi.fn(async () => client as unknown as PoolClient),
} as unknown as Pool;
const logger = makeSilentLogger();
await runMigrations(pool, logger);
await runMigrations(pool, logger);
// BEGIN called exactly once (first run only; second run skips the migration)
const beginCalls = (clientQuery.mock.calls as [string][]).filter(([sql]) => sql === 'BEGIN');
expect(beginCalls).toHaveLength(1);
});
});
describe('runMigrations — SQL error', () => {
it('rolls back on SQL error and rethrows', async () => {
mockReaddir.mockResolvedValue(
['0001_positions.sql'] as unknown as Awaited<ReturnType<typeof readdir>>,
);
mockReadFile.mockResolvedValue('BAD SQL;' as unknown as Buffer);
const clientQueries: string[] = [];
const clientQuery = vi.fn(async (sql: string, _params?: unknown[]) => {
clientQueries.push(sql);
if (sql === 'BAD SQL;') throw new Error('syntax error at or near "BAD"');
return { rows: [] };
});
const client = { query: clientQuery, release: vi.fn() };
const poolQuery = vi.fn(async (sql: string) => {
if (sql.includes('SELECT EXISTS')) return { rows: [{ exists: false }] };
return { rows: [] as unknown[] };
});
const pool = {
query: poolQuery,
connect: vi.fn(async () => client as unknown as PoolClient),
} as unknown as Pool;
const logger = makeSilentLogger();
await expect(runMigrations(pool, logger)).rejects.toThrow('syntax error');
expect(clientQueries).toContain('ROLLBACK');
expect(logger.error).toHaveBeenCalledWith(
expect.objectContaining({ version: '0001_positions.sql' }),
'migration failed; rolled back',
);
});
});
describe('runMigrations — multiple migration files', () => {
it('applies files in lexicographic order', async () => {
const insertedVersions: string[] = [];
// Return in reverse order to verify the runner sorts them
mockReaddir.mockResolvedValue(
['0002_second.sql', '0001_first.sql'] as unknown as Awaited<ReturnType<typeof readdir>>,
);
mockReadFile.mockResolvedValue('SELECT 1' as unknown as Buffer);
const clientQuery = vi.fn(async (sql: string, params?: unknown[]) => {
if (sql.includes('INSERT INTO schema_migrations')) {
insertedVersions.push(params?.[0] as string);
}
return { rows: [] };
});
const client = { query: clientQuery, release: vi.fn() };
const pool = {
query: vi.fn(async (sql: string, _params?: unknown[]) => {
if (sql.includes('SELECT EXISTS')) return { rows: [{ exists: false }] };
return { rows: [] as unknown[] };
}),
connect: vi.fn(async () => client as unknown as PoolClient),
} as unknown as Pool;
await runMigrations(pool, makeSilentLogger());
expect(insertedVersions).toEqual(['0001_first.sql', '0002_second.sql']);
});
});
+133
View File
@@ -0,0 +1,133 @@
/**
* Unit tests for src/db/pool.ts
*
* Covers:
* - connectWithRetry succeeds on first attempt
* - connectWithRetry retries on failure and succeeds on a later attempt
* - connectWithRetry calls process.exit(1) after exhausting all attempts
* - Warn is logged for each non-final failed attempt; fatal for the last
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import type { Logger } from 'pino';
import type { Pool, PoolClient } from 'pg';
import { connectWithRetry } from '../../src/db/pool.js';
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function makeSilentLogger(): Logger {
return {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
fatal: vi.fn(),
child: vi.fn().mockReturnThis(),
trace: vi.fn(),
level: 'silent',
silent: vi.fn(),
} as unknown as Logger;
}
/**
* Creates a mock pg.Pool whose connect() resolves or rejects according to
* the `connectResults` sequence. Each call consumes the next entry.
*/
function makeMockPool(connectResults: Array<'ok' | Error>): {
pool: Pool;
connectCallCount: () => number;
} {
let callIndex = 0;
const clientQuery = vi.fn().mockResolvedValue({ rows: [] });
const clientRelease = vi.fn();
const mockClient = { query: clientQuery, release: clientRelease };
const connect = vi.fn(async () => {
const result = connectResults[callIndex++];
if (result === 'ok') {
return mockClient as unknown as PoolClient;
}
throw result;
});
return {
pool: { connect } as unknown as Pool,
connectCallCount: () => callIndex,
};
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
describe('connectWithRetry', () => {
beforeEach(() => {
vi.useFakeTimers();
});
afterEach(() => {
vi.useRealTimers();
vi.restoreAllMocks();
});
it('succeeds on first attempt without retrying', async () => {
const { pool, connectCallCount } = makeMockPool(['ok']);
const logger = makeSilentLogger();
await connectWithRetry(pool, logger, 3);
expect(connectCallCount()).toBe(1);
expect(logger.info).toHaveBeenCalledWith({ attempt: 1 }, 'Postgres connected');
expect(logger.warn).not.toHaveBeenCalled();
});
it('retries on failure and succeeds on the second attempt', async () => {
const { pool, connectCallCount } = makeMockPool([new Error('ECONNREFUSED'), 'ok']);
const logger = makeSilentLogger();
const promise = connectWithRetry(pool, logger, 2);
// Advance timers to fire the backoff setTimeout (200ms * 2^0 = 200ms)
await vi.runAllTimersAsync();
await promise;
expect(connectCallCount()).toBe(2);
expect(logger.warn).toHaveBeenCalledOnce();
expect(logger.info).toHaveBeenCalledWith({ attempt: 2 }, 'Postgres connected');
});
it('calls process.exit(1) after exhausting all attempts — maxAttempts=1', async () => {
// Use maxAttempts=1 to skip backoff timers entirely, avoiding timer-related
// unhandled rejection noise in the test suite.
const exitSpy = vi.spyOn(process, 'exit').mockImplementation((_code) => {
throw new Error('process.exit called');
});
const { pool } = makeMockPool([new Error('ECONNREFUSED')]);
const logger = makeSilentLogger();
await expect(connectWithRetry(pool, logger, 1)).rejects.toThrow('process.exit called');
expect(exitSpy).toHaveBeenCalledWith(1);
expect(logger.fatal).toHaveBeenCalledOnce();
// With maxAttempts=1, no retries → no warn
expect(logger.warn).not.toHaveBeenCalled();
});
it('logs warn for non-final failed attempts', async () => {
// maxAttempts=2: attempt 1 fails (warn), attempt 2 succeeds.
// This avoids the unhandled-rejection noise that occurs when process.exit
// throws inside an async function that has a pending backoff timer.
const { pool } = makeMockPool([new Error('fail 1'), 'ok']);
const logger = makeSilentLogger();
const promise = connectWithRetry(pool, logger, 2);
await vi.runAllTimersAsync();
await promise;
expect(logger.warn).toHaveBeenCalledTimes(1);
expect(logger.fatal).not.toHaveBeenCalled();
expect(logger.info).toHaveBeenCalledWith({ attempt: 2 }, 'Postgres connected');
});
});