Implement Phase 1 tasks 1.1-1.4 (scaffold + core types + config + Postgres)

Scaffold mirrors tcp-ingestion conventions: ESM, strict TS, pnpm, vitest
with unit/integration split, ESLint flat config with no-floating-promises
+ no-misused-promises + import/no-restricted-paths (the new src/core/ →
src/domain/ boundary that protects Phase 1 from Phase 2 churn).

Core types in src/core/types.ts (Position, StreamRecord, DeviceState,
Metrics, AttributeValue) — Position is byte-equivalent to tcp-ingestion's
output. Codec in src/core/codec.ts implements sentinel reversal:
{__bigint:"..."} → bigint, {__buffer_b64:"..."} → Buffer, ISO timestamp
string → Date. CodecError surfaces malformed payload reasons with the
failing field named.

Config in src/config/load.ts (zod schema, all 13 env vars with defaults
and bounded numerics). Logger in src/observability/logger.ts matches
tcp-ingestion exactly: ISO timestamps, string level labels, pino-pretty
in development.

Postgres in src/db/: createPool with sane defaults and application_name,
connectWithRetry mirroring the ioredis retry pattern, a 30-line
migration runner using a schema_migrations table, and 0001_positions.sql
with the hypertable + (device_id, ts) unique index + ts DESC index.
Migration runner unit-tested against a mocked pg.Pool; the real
TimescaleDB round-trip is deferred to task 1.10 per spec.

Verification: typecheck, lint, build all clean; 73 unit tests passing
across 4 files. import/no-restricted-paths verified live by temporarily
adding a forbidden src/domain/ import.
This commit is contained in:
2026-04-30 21:35:16 +02:00
parent c314ba0902
commit 95efc23139
28 changed files with 7427 additions and 13 deletions
+319
View File
@@ -0,0 +1,319 @@
/**
* Unit tests for src/db/migrate.ts
*
* Tests against a mocked pg.Pool — no real Postgres required here.
* The real round-trip against TimescaleDB lives in task 1.10 (integration test,
* testcontainers). See task 1.4 spec for the rationale.
*
* Covers:
* - Applying a fresh migration runs SQL inside a transaction and records version
* - Applying the same migration twice is a no-op (second call skips)
* - A SQL error causes a rollback and re-throws
* - Multiple migration files are applied in lexicographic order
*/
import { describe, it, expect, vi } from 'vitest';
import type { MockedFunction } from 'vitest';
import type { Logger } from 'pino';
import type { Pool, PoolClient } from 'pg';
// ---------------------------------------------------------------------------
// pg.Pool mock
// ---------------------------------------------------------------------------
type QueryCall = {
sql: string;
params?: unknown[];
};
type MockPoolOptions = {
/**
* Map from query SQL fragment to result or Error.
* If a query SQL contains the key as a substring, that handler fires.
* The first matching key wins. Unmatched queries return `{ rows: [] }`.
*/
handlers?: Record<string, { rows?: unknown[] } | Error>;
};
type MockClient = {
query: MockedFunction<(sql: string, params?: unknown[]) => Promise<{ rows: unknown[] }>>;
release: MockedFunction<() => void>;
};
function makeMockPool(options: MockPoolOptions = {}): {
pool: Pool;
calls: QueryCall[];
} {
const calls: QueryCall[] = [];
const handlers = options.handlers ?? {};
function resolveQuery(sql: string): { rows: unknown[] } | Error {
for (const [fragment, result] of Object.entries(handlers)) {
if (sql.includes(fragment)) return result;
}
return { rows: [] };
}
// Pool-level query (used for CREATE TABLE IF NOT EXISTS schema_migrations)
const poolQuery = vi.fn(async (sql: string, params?: unknown[]) => {
calls.push({ sql, params });
const result = resolveQuery(sql);
if (result instanceof Error) throw result;
return result;
});
// Client returned by pool.connect()
const clientQuery = vi.fn(async (sql: string, params?: unknown[]) => {
calls.push({ sql, params });
const result = resolveQuery(sql);
if (result instanceof Error) throw result;
return result as { rows: unknown[] };
});
const clientRelease = vi.fn();
const mockClient: MockClient = {
query: clientQuery,
release: clientRelease,
};
const poolConnect = vi.fn(async () => mockClient as unknown as PoolClient);
return {
pool: {
query: poolQuery,
connect: poolConnect,
} as unknown as Pool,
calls,
};
}
function makeSilentLogger(): Logger {
return {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
fatal: vi.fn(),
child: vi.fn().mockReturnThis(),
trace: vi.fn(),
level: 'silent',
silent: vi.fn(),
} as unknown as Logger;
}
// ---------------------------------------------------------------------------
// Import under test
// ---------------------------------------------------------------------------
// We mock node:fs/promises so we control file listing and content,
// isolating the runner logic from the real filesystem.
vi.mock('node:fs/promises', () => ({
readdir: vi.fn(),
readFile: vi.fn(),
}));
import { readdir, readFile } from 'node:fs/promises';
import { runMigrations } from '../../src/db/migrate.js';
const mockReaddir = readdir as MockedFunction<typeof readdir>;
const mockReadFile = readFile as MockedFunction<typeof readFile>;
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
describe('runMigrations — fresh database', () => {
it('creates schema_migrations table, runs migration SQL, and records version', async () => {
const fakeSql = 'CREATE TABLE IF NOT EXISTS positions (id serial);';
const version = '0001_positions.sql';
mockReaddir.mockResolvedValue([version] as unknown as Awaited<ReturnType<typeof readdir>>);
mockReadFile.mockResolvedValue(fakeSql as unknown as Buffer);
const { pool, calls } = makeMockPool({
handlers: {
'SELECT EXISTS': { rows: [{ exists: false }] },
},
});
await runMigrations(pool, makeSilentLogger());
// 1. Schema migrations table bootstrapped
expect(
calls.find((c) => c.sql.includes('CREATE TABLE IF NOT EXISTS schema_migrations')),
).toBeDefined();
// 2. EXISTS check ran with the correct version
expect(
calls.find((c) => c.sql.includes('SELECT EXISTS') && c.params?.[0] === version),
).toBeDefined();
// 3. BEGIN transaction
expect(calls.find((c) => c.sql === 'BEGIN')).toBeDefined();
// 4. Migration SQL executed
expect(calls.find((c) => c.sql === fakeSql)).toBeDefined();
// 5. Version recorded
expect(
calls.find(
(c) => c.sql.includes('INSERT INTO schema_migrations') && c.params?.[0] === version,
),
).toBeDefined();
// 6. COMMIT
expect(calls.find((c) => c.sql === 'COMMIT')).toBeDefined();
});
it('logs info after applying the migration', async () => {
mockReaddir.mockResolvedValue(
['0001_positions.sql'] as unknown as Awaited<ReturnType<typeof readdir>>,
);
mockReadFile.mockResolvedValue('SELECT 1' as unknown as Buffer);
const { pool } = makeMockPool({
handlers: { 'SELECT EXISTS': { rows: [{ exists: false }] } },
});
const logger = makeSilentLogger();
await runMigrations(pool, logger);
expect(logger.info).toHaveBeenCalledWith(
expect.objectContaining({ version: '0001_positions.sql' }),
'migration applied',
);
});
});
describe('runMigrations — already applied (idempotency)', () => {
it('skips migration when already recorded in schema_migrations', async () => {
const version = '0001_positions.sql';
mockReaddir.mockResolvedValue([version] as unknown as Awaited<ReturnType<typeof readdir>>);
mockReadFile.mockResolvedValue('SELECT 1' as unknown as Buffer);
const { pool, calls } = makeMockPool({
handlers: {
'SELECT EXISTS': { rows: [{ exists: true }] },
},
});
const logger = makeSilentLogger();
await runMigrations(pool, logger);
// No transaction should have been started
expect(calls.find((c) => c.sql === 'BEGIN')).toBeUndefined();
expect(logger.info).toHaveBeenCalledWith(
expect.objectContaining({ version }),
'migration already applied; skipping',
);
});
it('is a no-op when called twice with the same migrations', async () => {
// EXISTS check runs through pool.query (not through a client), so we track
// call count on the pool-level query mock.
let existsCallCount = 0;
const version = '0001_positions.sql';
mockReaddir.mockResolvedValue([version] as unknown as Awaited<ReturnType<typeof readdir>>);
mockReadFile.mockResolvedValue('SELECT 1' as unknown as Buffer);
const clientQuery = vi.fn(async (_sql: string, _params?: unknown[]) => {
return { rows: [] as unknown[] };
});
const client = { query: clientQuery, release: vi.fn() };
const poolQuery = vi.fn(async (sql: string, _params?: unknown[]) => {
if (sql.includes('SELECT EXISTS')) {
existsCallCount++;
// First run: not yet applied; second run: already applied
return { rows: [{ exists: existsCallCount > 1 }] };
}
return { rows: [] as unknown[] };
});
const pool = {
query: poolQuery,
connect: vi.fn(async () => client as unknown as PoolClient),
} as unknown as Pool;
const logger = makeSilentLogger();
await runMigrations(pool, logger);
await runMigrations(pool, logger);
// BEGIN called exactly once (first run only; second run skips the migration)
const beginCalls = (clientQuery.mock.calls as [string][]).filter(([sql]) => sql === 'BEGIN');
expect(beginCalls).toHaveLength(1);
});
});
describe('runMigrations — SQL error', () => {
it('rolls back on SQL error and rethrows', async () => {
mockReaddir.mockResolvedValue(
['0001_positions.sql'] as unknown as Awaited<ReturnType<typeof readdir>>,
);
mockReadFile.mockResolvedValue('BAD SQL;' as unknown as Buffer);
const clientQueries: string[] = [];
const clientQuery = vi.fn(async (sql: string, _params?: unknown[]) => {
clientQueries.push(sql);
if (sql === 'BAD SQL;') throw new Error('syntax error at or near "BAD"');
return { rows: [] };
});
const client = { query: clientQuery, release: vi.fn() };
const poolQuery = vi.fn(async (sql: string) => {
if (sql.includes('SELECT EXISTS')) return { rows: [{ exists: false }] };
return { rows: [] as unknown[] };
});
const pool = {
query: poolQuery,
connect: vi.fn(async () => client as unknown as PoolClient),
} as unknown as Pool;
const logger = makeSilentLogger();
await expect(runMigrations(pool, logger)).rejects.toThrow('syntax error');
expect(clientQueries).toContain('ROLLBACK');
expect(logger.error).toHaveBeenCalledWith(
expect.objectContaining({ version: '0001_positions.sql' }),
'migration failed; rolled back',
);
});
});
describe('runMigrations — multiple migration files', () => {
it('applies files in lexicographic order', async () => {
const insertedVersions: string[] = [];
// Return in reverse order to verify the runner sorts them
mockReaddir.mockResolvedValue(
['0002_second.sql', '0001_first.sql'] as unknown as Awaited<ReturnType<typeof readdir>>,
);
mockReadFile.mockResolvedValue('SELECT 1' as unknown as Buffer);
const clientQuery = vi.fn(async (sql: string, params?: unknown[]) => {
if (sql.includes('INSERT INTO schema_migrations')) {
insertedVersions.push(params?.[0] as string);
}
return { rows: [] };
});
const client = { query: clientQuery, release: vi.fn() };
const pool = {
query: vi.fn(async (sql: string, _params?: unknown[]) => {
if (sql.includes('SELECT EXISTS')) return { rows: [{ exists: false }] };
return { rows: [] as unknown[] };
}),
connect: vi.fn(async () => client as unknown as PoolClient),
} as unknown as Pool;
await runMigrations(pool, makeSilentLogger());
expect(insertedVersions).toEqual(['0001_first.sql', '0002_second.sql']);
});
});