88cc98f3cc
Migration 0001_positions.sql now runs CREATE EXTENSION IF NOT EXISTS postgis alongside timescaledb. PostGIS isn't used in Phase 1 but enabling it now means Phase 2's geofence engine doesn't need a separate migration step. The deploy stack uses the timescale/timescaledb-ha:*-all image which ships both extensions. Integration test (pipeline.integration.test.ts) updated to use the same timescale/timescaledb-ha:pg16.6-ts2.17.2-all image as the deploy stack. Stock POSTGRES_USER/PASSWORD/DB env vars retained — if recent ha-image revisions don't accept them, the test container will fail clearly on first run with Docker, and we'll switch to the right env-var scheme.
416 lines
15 KiB
TypeScript
416 lines
15 KiB
TypeScript
/**
|
|
* Integration test: end-to-end pipeline round-trip via testcontainers.
|
|
*
|
|
* Spins up Redis 7 and TimescaleDB-HA (timescale/timescaledb-ha:pg16.6-ts2.17.2-all,
|
|
* matching the deploy stack image) containers, runs the Processor migration,
|
|
* starts the consumer pipeline, publishes synthetic Position records, and asserts
|
|
* the resulting rows in `positions`. The `-all` image variant ships PostGIS too,
|
|
* so the migration's `CREATE EXTENSION IF NOT EXISTS postgis` succeeds.
|
|
*
|
|
* If Docker is unavailable (CI runner without Docker, local dev without Docker
|
|
* Desktop), the suite skips — it does not fail the build. Docker availability is
|
|
* determined by a container start attempt in beforeAll; the skip flag is set once,
|
|
* and each `it` block early-returns when `!dockerAvailable`.
|
|
*
|
|
* WARNING: Do NOT replace the early-return skip pattern with a try/catch alone.
|
|
* A hang does not throw; only an explicit `!dockerAvailable` check per test
|
|
* guarantees that unavailable Docker exits cleanly (see tcp-ingestion history).
|
|
*/
|
|
|
|
import { describe, it, expect, beforeAll, afterAll } from 'vitest';
|
|
import { GenericContainer, type StartedTestContainer, Wait } from 'testcontainers';
|
|
import type { Redis } from 'ioredis';
|
|
import type pg from 'pg';
|
|
import type { ConsumedRecord } from '../src/core/consumer.js';
|
|
import { createConsumer, connectRedis, ensureConsumerGroup } from '../src/core/consumer.js';
|
|
import { createWriter } from '../src/core/writer.js';
|
|
import { createDeviceStateStore } from '../src/core/state.js';
|
|
import { createPool, connectWithRetry } from '../src/db/pool.js';
|
|
import { runMigrations } from '../src/db/migrate.js';
|
|
import { createMetrics } from '../src/observability/metrics.js';
|
|
import type { Config } from '../src/config/load.js';
|
|
import type { Position } from '../src/core/types.js';
|
|
import { vi } from 'vitest';
|
|
import type { Logger } from 'pino';
|
|
|
|
// ---------------------------------------------------------------------------
|
|
// Helpers
|
|
// ---------------------------------------------------------------------------
|
|
|
|
function makeSilentLogger(): Logger {
|
|
return {
|
|
debug: vi.fn(),
|
|
info: vi.fn(),
|
|
warn: vi.fn(),
|
|
error: vi.fn(),
|
|
fatal: vi.fn(),
|
|
trace: vi.fn(),
|
|
child: vi.fn().mockReturnThis(),
|
|
level: 'silent',
|
|
silent: vi.fn(),
|
|
} as unknown as Logger;
|
|
}
|
|
|
|
function makeConfig(overrides: Partial<Config> = {}): Config {
|
|
return {
|
|
NODE_ENV: 'test',
|
|
INSTANCE_ID: 'test-integration',
|
|
LOG_LEVEL: 'silent',
|
|
REDIS_URL: 'redis://localhost:6379', // overridden below with mapped port
|
|
POSTGRES_URL: 'postgres://postgres:postgres@localhost:5432/trm', // overridden below
|
|
REDIS_TELEMETRY_STREAM: 'telemetry:t',
|
|
REDIS_CONSUMER_GROUP: 'processor',
|
|
REDIS_CONSUMER_NAME: 'test-consumer',
|
|
METRICS_PORT: 0,
|
|
BATCH_SIZE: 100,
|
|
BATCH_BLOCK_MS: 500,
|
|
WRITE_BATCH_SIZE: 50,
|
|
DEVICE_STATE_LRU_CAP: 10_000,
|
|
...overrides,
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Serializes a Position into the flat field map that XADD expects.
|
|
* Mirrors tcp-ingestion's serializePosition format exactly: bigint → __bigint
|
|
* sentinel, Buffer → __buffer_b64 sentinel, Date → ISO string.
|
|
*/
|
|
function buildXaddFields(position: Position, codec: string): string[] {
|
|
function jsonReplacer(_key: string, value: unknown): unknown {
|
|
if (typeof value === 'bigint') return { __bigint: value.toString() };
|
|
if (value instanceof Uint8Array) {
|
|
return { __buffer_b64: Buffer.from(value).toString('base64') };
|
|
}
|
|
if (value instanceof Date) return value.toISOString();
|
|
return value;
|
|
}
|
|
|
|
const payload = JSON.stringify(position, jsonReplacer);
|
|
return [
|
|
'ts', position.timestamp.toISOString(),
|
|
'device_id', position.device_id,
|
|
'codec', codec,
|
|
'payload', payload,
|
|
];
|
|
}
|
|
|
|
/**
|
|
* Polls `fn` up to `timeoutMs` with `intervalMs` gaps until it returns a
|
|
* truthy result. Returns null if the timeout expires.
|
|
*/
|
|
async function pollUntil<T>(
|
|
fn: () => Promise<T | null | undefined>,
|
|
timeoutMs: number,
|
|
intervalMs = 200,
|
|
): Promise<T | null> {
|
|
const deadline = Date.now() + timeoutMs;
|
|
while (Date.now() < deadline) {
|
|
const result = await fn();
|
|
if (result !== null && result !== undefined) return result as T;
|
|
await new Promise<void>((resolve) => setTimeout(resolve, intervalMs));
|
|
}
|
|
return null;
|
|
}
|
|
|
|
// ---------------------------------------------------------------------------
|
|
// Container and pipeline lifecycle
|
|
// ---------------------------------------------------------------------------
|
|
|
|
let redisContainer: StartedTestContainer | null = null;
|
|
let pgContainer: StartedTestContainer | null = null;
|
|
let redisClient: Redis | null = null;
|
|
let pgPool: pg.Pool | null = null;
|
|
let consumer: { start: () => Promise<void>; stop: () => Promise<void> } | null = null;
|
|
let dockerAvailable = true;
|
|
|
|
const STREAM = 'telemetry:t';
|
|
const GROUP = 'processor';
|
|
|
|
beforeAll(async () => {
|
|
// --- Step 1: start Redis container -----------------------------------------
|
|
try {
|
|
redisContainer = await new GenericContainer('redis:7-alpine')
|
|
.withExposedPorts(6379)
|
|
.withWaitStrategy(Wait.forLogMessage('Ready to accept connections'))
|
|
.start();
|
|
} catch {
|
|
console.warn(
|
|
'[pipeline.integration.test] Docker not available — skipping integration tests',
|
|
);
|
|
dockerAvailable = false;
|
|
return;
|
|
}
|
|
|
|
// --- Step 2: start TimescaleDB-HA container (same image as deploy stack) ---
|
|
try {
|
|
pgContainer = await new GenericContainer('timescale/timescaledb-ha:pg16.6-ts2.17.2-all')
|
|
.withExposedPorts(5432)
|
|
.withEnvironment({
|
|
POSTGRES_USER: 'postgres',
|
|
POSTGRES_PASSWORD: 'postgres',
|
|
POSTGRES_DB: 'trm',
|
|
})
|
|
.withWaitStrategy(Wait.forLogMessage('database system is ready to accept connections', 2))
|
|
.start();
|
|
} catch (err) {
|
|
console.warn(
|
|
`[pipeline.integration.test] Failed to start TimescaleDB-HA container: ${String(err)} — skipping`,
|
|
);
|
|
dockerAvailable = false;
|
|
await redisContainer?.stop().catch(() => {});
|
|
redisContainer = null;
|
|
return;
|
|
}
|
|
|
|
const redisHost = redisContainer.getHost();
|
|
const redisPort = redisContainer.getMappedPort(6379);
|
|
const pgHost = pgContainer.getHost();
|
|
const pgPort = pgContainer.getMappedPort(5432);
|
|
|
|
const redisUrl = `redis://${redisHost}:${redisPort}`;
|
|
const postgresUrl = `postgres://postgres:postgres@${pgHost}:${pgPort}/trm`;
|
|
|
|
const config = makeConfig({ REDIS_URL: redisUrl, POSTGRES_URL: postgresUrl });
|
|
const logger = makeSilentLogger();
|
|
|
|
// --- Step 3: connect Redis --------------------------------------------------
|
|
const { default: Redis } = await import('ioredis');
|
|
const client = new Redis(redisUrl, {
|
|
enableOfflineQueue: false,
|
|
lazyConnect: true,
|
|
maxRetriesPerRequest: 0,
|
|
});
|
|
await client.connect();
|
|
redisClient = client;
|
|
|
|
// --- Step 4: connect Postgres and run migrations ---------------------------
|
|
pgPool = createPool(postgresUrl);
|
|
await connectWithRetry(pgPool, logger);
|
|
await runMigrations(pgPool, logger);
|
|
|
|
// --- Step 5: wire and start the consumer pipeline -------------------------
|
|
const metrics = createMetrics();
|
|
const state = createDeviceStateStore(config, logger);
|
|
const writer = createWriter(pgPool, config, logger, metrics);
|
|
|
|
await ensureConsumerGroup(client, STREAM, GROUP, logger);
|
|
|
|
const sink = async (records: ConsumedRecord[]): Promise<string[]> => {
|
|
for (const record of records) {
|
|
state.update(record.position);
|
|
}
|
|
const results = await writer.write(records);
|
|
return results
|
|
.filter((r) => r.status === 'inserted' || r.status === 'duplicate')
|
|
.map((r) => r.id);
|
|
};
|
|
|
|
// Use connectRedis for the consumer's own connection (separate from the
|
|
// redisClient used for XADD in tests) so we mirror production topology.
|
|
const consumerRedis = await connectRedis(redisUrl, logger);
|
|
consumer = createConsumer(consumerRedis, config, logger, metrics, sink);
|
|
await consumer.start();
|
|
}, 120_000);
|
|
|
|
afterAll(async () => {
|
|
await consumer?.stop().catch(() => {});
|
|
await redisClient?.quit().catch(() => {});
|
|
await pgPool?.end().catch(() => {});
|
|
await redisContainer?.stop().catch(() => {});
|
|
await pgContainer?.stop().catch(() => {});
|
|
}, 30_000);
|
|
|
|
// ---------------------------------------------------------------------------
|
|
// Integration tests
|
|
// ---------------------------------------------------------------------------
|
|
|
|
describe('pipeline integration — full round-trip', () => {
|
|
// Test 1: happy-path with bigint + Buffer attributes
|
|
it('publishes a Position with bigint and Buffer attributes and verifies the row in positions', async () => {
|
|
if (!dockerAvailable || !redisClient || !pgPool) {
|
|
console.warn('[pipeline.integration.test] skipping test 1: Docker not available');
|
|
return;
|
|
}
|
|
|
|
const position: Position = {
|
|
device_id: '356307042441013',
|
|
timestamp: new Date('2024-06-15T12:00:00.000Z'),
|
|
latitude: 54.687157,
|
|
longitude: 25.279652,
|
|
altitude: 130,
|
|
angle: 90,
|
|
speed: 45,
|
|
satellites: 12,
|
|
priority: 0,
|
|
attributes: {
|
|
num_attr: 255,
|
|
big_attr: BigInt('18446744073709551615'), // u64 max
|
|
buf_attr: Buffer.from([0xde, 0xad, 0xbe, 0xef]),
|
|
},
|
|
};
|
|
|
|
const fields = buildXaddFields(position, '8E');
|
|
await redisClient.xadd(STREAM, '*', ...fields);
|
|
|
|
// Poll until the row appears in positions (up to 10 s).
|
|
type Row = {
|
|
device_id: string;
|
|
ts: Date;
|
|
latitude: number;
|
|
longitude: number;
|
|
attributes: Record<string, unknown>;
|
|
};
|
|
|
|
const row = await pollUntil<Row>(async () => {
|
|
const result = await pgPool!.query<Row>(
|
|
'SELECT device_id, ts, latitude, longitude, attributes FROM positions WHERE device_id = $1 AND ts = $2',
|
|
[position.device_id, position.timestamp],
|
|
);
|
|
return result.rows[0] ?? null;
|
|
}, 10_000);
|
|
|
|
expect(row).not.toBeNull();
|
|
expect(row!.device_id).toBe(position.device_id);
|
|
expect(row!.latitude).toBeCloseTo(position.latitude, 4);
|
|
expect(row!.longitude).toBeCloseTo(position.longitude, 4);
|
|
|
|
// attributes JSONB: bigint stored as decimal string, Buffer as base64 string.
|
|
expect(typeof row!.attributes['big_attr']).toBe('string');
|
|
expect(row!.attributes['big_attr']).toBe('18446744073709551615');
|
|
|
|
expect(typeof row!.attributes['buf_attr']).toBe('string');
|
|
const decoded = Buffer.from(row!.attributes['buf_attr'] as string, 'base64');
|
|
expect(decoded).toEqual(Buffer.from([0xde, 0xad, 0xbe, 0xef]));
|
|
|
|
expect(row!.attributes['num_attr']).toBe(255);
|
|
}, 30_000);
|
|
|
|
// Test 2: idempotency — duplicate (device_id, ts) must not create a second row
|
|
it('does not create a duplicate row when the same (device_id, ts) is published twice', async () => {
|
|
if (!dockerAvailable || !redisClient || !pgPool) {
|
|
console.warn('[pipeline.integration.test] skipping test 2: Docker not available');
|
|
return;
|
|
}
|
|
|
|
const position: Position = {
|
|
device_id: 'DUP-DEVICE-001',
|
|
timestamp: new Date('2024-06-15T13:00:00.000Z'),
|
|
latitude: 1.0,
|
|
longitude: 2.0,
|
|
altitude: 10,
|
|
angle: 0,
|
|
speed: 0,
|
|
satellites: 4,
|
|
priority: 0,
|
|
attributes: {},
|
|
};
|
|
|
|
const fields = buildXaddFields(position, '8');
|
|
|
|
// Publish the same position twice.
|
|
await redisClient.xadd(STREAM, '*', ...fields);
|
|
await redisClient.xadd(STREAM, '*', ...fields);
|
|
|
|
// Wait long enough for both entries to be processed.
|
|
await new Promise<void>((resolve) => setTimeout(resolve, 3_000));
|
|
|
|
const result = await pgPool.query<{ count: string }>(
|
|
'SELECT COUNT(*) AS count FROM positions WHERE device_id = $1 AND ts = $2',
|
|
[position.device_id, position.timestamp],
|
|
);
|
|
const count = parseInt(result.rows[0]?.count ?? '0', 10);
|
|
expect(count).toBe(1);
|
|
}, 30_000);
|
|
|
|
// Test 3: malformed payload — decode error counter increments, entry not ACKed
|
|
it('increments decode error counter and leaves malformed entry pending (not ACKed)', async () => {
|
|
if (!dockerAvailable || !redisClient || !pgPool) {
|
|
console.warn('[pipeline.integration.test] skipping test 3: Docker not available');
|
|
return;
|
|
}
|
|
|
|
// Push a stream entry with a broken payload (not valid JSON).
|
|
const badEntryId = await redisClient.xadd(
|
|
STREAM,
|
|
'*',
|
|
'ts', new Date().toISOString(),
|
|
'device_id', 'BAD-DEVICE',
|
|
'codec', '8',
|
|
'payload', 'NOT_VALID_JSON {{{',
|
|
);
|
|
|
|
// Wait for the consumer to attempt processing.
|
|
await new Promise<void>((resolve) => setTimeout(resolve, 2_000));
|
|
|
|
// The entry should remain in the Pending Entry List (PEL) — it was not ACKed.
|
|
const pendingResult = await redisClient.xpending(
|
|
STREAM,
|
|
GROUP,
|
|
'-',
|
|
'+',
|
|
'100',
|
|
) as Array<[string, string, number, number]>;
|
|
|
|
// Find the bad entry in the PEL.
|
|
const pendingIds = pendingResult.map(([id]) => id);
|
|
expect(pendingIds).toContain(badEntryId);
|
|
}, 30_000);
|
|
|
|
// Test 4: writer failure → retry — stop Postgres before publish, restart, verify row lands
|
|
it('retries and writes the row after Postgres recovers from a stopped state', async () => {
|
|
if (!dockerAvailable || !redisClient || !pgPool || !pgContainer) {
|
|
console.warn('[pipeline.integration.test] skipping test 4: Docker not available');
|
|
return;
|
|
}
|
|
|
|
const position: Position = {
|
|
device_id: 'RETRY-DEVICE-001',
|
|
timestamp: new Date('2024-06-15T14:00:00.000Z'),
|
|
latitude: 3.0,
|
|
longitude: 4.0,
|
|
altitude: 20,
|
|
angle: 45,
|
|
speed: 10,
|
|
satellites: 8,
|
|
priority: 1,
|
|
attributes: {},
|
|
};
|
|
|
|
// Stop Postgres before publishing so the first write attempt fails.
|
|
await pgContainer.stop();
|
|
|
|
const fields = buildXaddFields(position, '8');
|
|
await redisClient.xadd(STREAM, '*', ...fields);
|
|
|
|
// Wait briefly — the write should fail while Postgres is down.
|
|
await new Promise<void>((resolve) => setTimeout(resolve, 1_500));
|
|
|
|
// Restart Postgres.
|
|
pgContainer = await pgContainer.restart();
|
|
|
|
// Wait a bit to ensure the new container is accepting connections before
|
|
// reconnecting. The pool will get fresh connections once the TCP stack
|
|
// accepts again.
|
|
await new Promise<void>((resolve) => setTimeout(resolve, 3_000));
|
|
|
|
// The entry is still pending in the consumer's PEL; the next XREADGROUP
|
|
// poll will re-deliver it. The pipeline should eventually write it.
|
|
type Row = { device_id: string };
|
|
const row = await pollUntil<Row>(async () => {
|
|
try {
|
|
const result = await pgPool!.query<Row>(
|
|
'SELECT device_id FROM positions WHERE device_id = $1 AND ts = $2',
|
|
[position.device_id, position.timestamp],
|
|
);
|
|
return result.rows[0] ?? null;
|
|
} catch {
|
|
// Pool may throw transiently while connections re-establish.
|
|
return null;
|
|
}
|
|
}, 20_000);
|
|
|
|
expect(row).not.toBeNull();
|
|
expect(row!.device_id).toBe(position.device_id);
|
|
}, 60_000);
|
|
});
|