Implement Phase 1 tasks 1.5-1.8 (consumer + state + writer + main wiring)

src/core/consumer.ts — XREADGROUP loop with consumer-group resumption,
ensureConsumerGroup (BUSYGROUP-tolerant), decodeBatch (CodecError → log
+ skip + leave pending; never speculative ACK), partial-ACK semantics,
connectRedis (mirroring tcp-ingestion's retry pattern), clean stop.

src/core/state.ts — LRU Map<device_id, DeviceState> using delete+set
bump trick (no third-party LRU dep); last_seen = max(prev, ts) so
out-of-order replays don't regress the high-water mark; evictedTotal()
counter.

src/core/writer.ts — multi-row INSERT ON CONFLICT (device_id, ts) DO
NOTHING with RETURNING. Duplicate detection by set-difference between
input and RETURNING rows (xmax=0 doesn't work for skipped-conflict
rows, only returned ones — confirmed in the task spec's own Note).
Sequential chunking to WRITE_BATCH_SIZE; bigint→string and Buffer→base64
attribute serialization that handles Buffer.toJSON shape.

src/main.ts — full pipeline: pool → migrate → redis → state → writer →
sink → consumer → graceful-shutdown stub. Sink ordering is
state.update BEFORE writer.write per spec rationale (state stays
consistent with what's been seen even if not yet persisted; redelivery
is idempotent on state). Metrics is still the trace-logging shim from
tcp-ingestion's pre-1.10 pattern; real prom-client lands in task 1.9.

Verification: typecheck, lint clean; 112 unit tests passing across 7
test files (+39 from this batch).
This commit is contained in:
2026-04-30 21:47:43 +02:00
parent 6a14eb1d01
commit 2a50aaf175
12 changed files with 2218 additions and 15 deletions
+608
View File
@@ -0,0 +1,608 @@
/**
* Unit tests for src/core/consumer.ts
*
* All Redis I/O is mocked — no real Redis required. The integration test
* (task 1.10) covers the end-to-end round-trip.
*
* Covers:
* - Decodes a synthetic stream entry into a ConsumedRecord with the right shape
* - Calls sink with the decoded batch and ACKs only the IDs the sink returned
* - Partial ACK: sink returns subset of IDs; only those are ACKed
* - BUSYGROUP error from XGROUP CREATE is swallowed and continues
* - Malformed payload: increments metric, logs at error, does NOT ACK the entry
* - Missing payload field: logs at error, does NOT ACK the entry
* - stop() causes the loop to exit cleanly
* - XREADGROUP failure logs error and backs off without crashing
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import type { MockedFunction } from 'vitest';
import type { Redis } from 'ioredis';
import type { Logger } from 'pino';
import type { Config } from '../src/config/load.js';
import type { Metrics, Position } from '../src/core/types.js';
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function makeSilentLogger(): Logger {
return {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
fatal: vi.fn(),
child: vi.fn().mockReturnThis(),
trace: vi.fn(),
level: 'silent',
silent: vi.fn(),
} as unknown as Logger;
}
function makeMetrics(): Metrics & {
incCalls: Array<{ name: string; labels?: Record<string, string> }>;
} {
const incCalls: Array<{ name: string; labels?: Record<string, string> }> = [];
return {
incCalls,
inc: (name: string, labels?: Record<string, string>) => {
incCalls.push({ name, labels });
},
observe: vi.fn(),
};
}
function makeConfig(overrides: Partial<Config> = {}): Config {
return {
NODE_ENV: 'test',
INSTANCE_ID: 'test-processor',
LOG_LEVEL: 'silent',
REDIS_URL: 'redis://localhost:6379',
POSTGRES_URL: 'postgres://localhost:5432/test',
REDIS_TELEMETRY_STREAM: 'telemetry:t',
REDIS_CONSUMER_GROUP: 'processor',
REDIS_CONSUMER_NAME: 'test-consumer',
METRICS_PORT: 9090,
BATCH_SIZE: 10,
BATCH_BLOCK_MS: 100,
WRITE_BATCH_SIZE: 50,
DEVICE_STATE_LRU_CAP: 1000,
...overrides,
};
}
/**
* Builds the JSON payload for a synthetic Position, mirroring tcp-ingestion's
* serialization format (sentinel encoding for bigint/Buffer/Date).
*/
function buildPayload(overrides: Partial<Position> = {}): string {
const position: Position = {
device_id: 'TESTDEVICE001',
timestamp: new Date('2024-05-01T12:00:00.000Z'),
latitude: 54.6872,
longitude: 25.2797,
altitude: 100,
angle: 90,
speed: 50,
satellites: 12,
priority: 1,
attributes: {},
...overrides,
};
function jsonReplacer(_key: string, value: unknown): unknown {
if (typeof value === 'bigint') return { __bigint: value.toString() };
if (value instanceof Uint8Array) return { __buffer_b64: Buffer.from(value).toString('base64') };
if (value instanceof Date) return value.toISOString();
return value;
}
return JSON.stringify(position, jsonReplacer);
}
/**
* Builds a raw XREADGROUP response for a single entry.
* ioredis returns: [[streamName, [[entryId, [field, value, ...]], ...]]]
*/
function buildXreadgroupResponse(
stream: string,
entries: Array<{ id: string; fields: Record<string, string> }>,
): [string, [string, string[]][]][] {
return [
[
stream,
entries.map(({ id, fields }) => [
id,
Object.entries(fields).flat(),
] as [string, string[]]),
],
];
}
// ---------------------------------------------------------------------------
// Mock ioredis
// ---------------------------------------------------------------------------
type MockRedis = {
xgroup: MockedFunction<(...args: unknown[]) => Promise<string>>;
xreadgroup: MockedFunction<(...args: unknown[]) => Promise<unknown>>;
xack: MockedFunction<(...args: unknown[]) => Promise<number>>;
};
function makeMockRedis(): MockRedis {
return {
xgroup: vi.fn().mockResolvedValue('OK'),
xreadgroup: vi.fn().mockResolvedValue(null), // default: BLOCK timeout
xack: vi.fn().mockResolvedValue(1),
};
}
// ---------------------------------------------------------------------------
// ensureConsumerGroup tests
// ---------------------------------------------------------------------------
import { ensureConsumerGroup } from '../src/core/consumer.js';
describe('ensureConsumerGroup', () => {
it('calls XGROUP CREATE with MKSTREAM and $ start ID', async () => {
const redis = makeMockRedis();
const logger = makeSilentLogger();
await ensureConsumerGroup(redis as unknown as Redis, 'telemetry:t', 'processor', logger);
expect(redis.xgroup).toHaveBeenCalledWith('CREATE', 'telemetry:t', 'processor', '$', 'MKSTREAM');
expect(logger.info).toHaveBeenCalledWith(
expect.objectContaining({ stream: 'telemetry:t', group: 'processor' }),
'consumer group created',
);
});
it('swallows BUSYGROUP error and logs info', async () => {
const redis = makeMockRedis();
redis.xgroup.mockRejectedValue(new Error('BUSYGROUP Consumer Group name already exists'));
const logger = makeSilentLogger();
await expect(
ensureConsumerGroup(redis as unknown as Redis, 'telemetry:t', 'processor', logger),
).resolves.toBeUndefined();
expect(logger.info).toHaveBeenCalledWith(
expect.objectContaining({ stream: 'telemetry:t', group: 'processor' }),
'consumer group already exists',
);
});
it('rethrows non-BUSYGROUP errors', async () => {
const redis = makeMockRedis();
redis.xgroup.mockRejectedValue(new Error('NOPERM no permissions'));
const logger = makeSilentLogger();
await expect(
ensureConsumerGroup(redis as unknown as Redis, 'telemetry:t', 'processor', logger),
).rejects.toThrow('NOPERM no permissions');
});
});
// ---------------------------------------------------------------------------
// createConsumer tests
// ---------------------------------------------------------------------------
import { createConsumer } from '../src/core/consumer.js';
import type { ConsumedRecord } from '../src/core/consumer.js';
describe('createConsumer — happy path', () => {
afterEach(() => {
vi.restoreAllMocks();
vi.useRealTimers();
});
it('decodes a stream entry and passes a ConsumedRecord to the sink', async () => {
const redis = makeMockRedis();
const logger = makeSilentLogger();
const metrics = makeMetrics();
const config = makeConfig();
const payload = buildPayload({ device_id: 'DEV001' });
const stream = 'telemetry:t';
const entryId = '1714488000000-0';
// First call: return one entry. Subsequent calls: return null (BLOCK timeout).
redis.xreadgroup
.mockResolvedValueOnce(
buildXreadgroupResponse(stream, [
{ id: entryId, fields: { ts: '2024-05-01T12:00:00.000Z', device_id: 'DEV001', codec: '8', payload } },
]),
)
.mockResolvedValue(null);
const receivedRecords: ConsumedRecord[][] = [];
let consumerRef: ReturnType<typeof createConsumer> | undefined;
const sink = vi.fn(async (records: ConsumedRecord[]) => {
receivedRecords.push(records);
// Stop the consumer after processing the first batch so the loop exits.
void consumerRef?.stop();
return records.map((r) => r.id);
});
const consumer = createConsumer(
redis as unknown as Redis,
config,
logger,
metrics,
sink,
);
consumerRef = consumer;
await consumer.start();
// Wait for the consumer to process and stop
await consumer.stop();
expect(receivedRecords.length).toBeGreaterThanOrEqual(1);
const firstBatch = receivedRecords[0];
expect(firstBatch).toBeDefined();
expect(firstBatch!.length).toBe(1);
const record = firstBatch![0]!;
expect(record.id).toBe(entryId);
expect(record.codec).toBe('8');
expect(record.ts).toBe('2024-05-01T12:00:00.000Z');
expect(record.position.device_id).toBe('DEV001');
expect(record.position.latitude).toBe(54.6872);
});
it('ACKs only the IDs returned by the sink (partial ACK)', async () => {
const redis = makeMockRedis();
const logger = makeSilentLogger();
const metrics = makeMetrics();
const config = makeConfig();
const stream = 'telemetry:t';
const ids = ['1000-0', '1000-1', '1000-2'];
const entries = ids.map((id) => ({
id,
fields: {
ts: '2024-05-01T12:00:00.000Z',
device_id: `DEV${id}`,
codec: '8',
payload: buildPayload({ device_id: `DEV${id}` }),
},
}));
let consumerRef: ReturnType<typeof createConsumer> | undefined;
redis.xreadgroup
.mockResolvedValueOnce(buildXreadgroupResponse(stream, entries))
.mockResolvedValue(null);
// Sink returns only the first and third IDs — second stays pending
const sink = vi.fn(async (records: ConsumedRecord[]) => {
void consumerRef?.stop();
return [records[0]!.id, records[2]!.id];
});
const consumer = createConsumer(
redis as unknown as Redis,
config,
logger,
metrics,
sink,
);
consumerRef = consumer;
await consumer.start();
await consumer.stop();
expect(redis.xack).toHaveBeenCalledWith(stream, 'processor', ids[0], ids[2]);
// id[1] must NOT be in any xack call
const xackCalls = redis.xack.mock.calls.flat();
expect(xackCalls).not.toContain(ids[1]);
});
it('does not call xack when sink returns an empty array', async () => {
const redis = makeMockRedis();
const logger = makeSilentLogger();
const metrics = makeMetrics();
const config = makeConfig();
const stream = 'telemetry:t';
let consumerRef: ReturnType<typeof createConsumer> | undefined;
redis.xreadgroup
.mockResolvedValueOnce(
buildXreadgroupResponse(stream, [
{
id: '2000-0',
fields: {
ts: '2024-05-01T12:00:00.000Z',
device_id: 'DEV002',
codec: '8',
payload: buildPayload({ device_id: 'DEV002' }),
},
},
]),
)
.mockResolvedValue(null);
const sink = vi.fn(async (_records: ConsumedRecord[]) => {
void consumerRef?.stop();
return [];
});
const consumer = createConsumer(
redis as unknown as Redis,
config,
logger,
metrics,
sink,
);
consumerRef = consumer;
await consumer.start();
await consumer.stop();
expect(redis.xack).not.toHaveBeenCalled();
});
});
describe('createConsumer — decode errors', () => {
afterEach(() => {
vi.restoreAllMocks();
});
it('skips malformed payload: increments metric, logs error, does not ACK', async () => {
const redis = makeMockRedis();
const logger = makeSilentLogger();
const metrics = makeMetrics();
const config = makeConfig();
const stream = 'telemetry:t';
const badId = '3000-0';
let consumerRef: ReturnType<typeof createConsumer> | undefined;
redis.xreadgroup
.mockResolvedValueOnce(
buildXreadgroupResponse(stream, [
{
id: badId,
fields: {
ts: '2024-05-01T12:00:00.000Z',
device_id: 'DEV003',
codec: '8',
payload: 'not valid json {{{',
},
},
]),
)
.mockResolvedValue(null);
const sink = vi.fn(async (_records: ConsumedRecord[]) => {
void consumerRef?.stop();
return [];
});
const consumer = createConsumer(
redis as unknown as Redis,
config,
logger,
metrics,
sink,
);
consumerRef = consumer;
await consumer.start();
await consumer.stop();
// Decode error metric incremented
expect(metrics.incCalls.some((c) => c.name === 'processor_decode_errors_total')).toBe(true);
// Logged at error
expect(logger.error).toHaveBeenCalled();
// Sink was called with empty records (bad entry filtered out)
expect(sink).toHaveBeenCalledWith([]);
// No XACK for the bad entry
expect(redis.xack).not.toHaveBeenCalledWith(stream, 'processor', badId);
});
it('skips entry with missing payload field: increments metric, logs error, does not ACK', async () => {
const redis = makeMockRedis();
const logger = makeSilentLogger();
const metrics = makeMetrics();
const config = makeConfig();
const stream = 'telemetry:t';
const badId = '3001-0';
let consumerRef: ReturnType<typeof createConsumer> | undefined;
redis.xreadgroup
.mockResolvedValueOnce(
buildXreadgroupResponse(stream, [
{
id: badId,
// No payload field
fields: { ts: '2024-05-01T12:00:00.000Z', device_id: 'DEV004', codec: '8' },
},
]),
)
.mockResolvedValue(null);
const sink = vi.fn(async (_records: ConsumedRecord[]) => {
void consumerRef?.stop();
return [];
});
const consumer = createConsumer(
redis as unknown as Redis,
config,
logger,
metrics,
sink,
);
consumerRef = consumer;
await consumer.start();
await consumer.stop();
expect(metrics.incCalls.some((c) => c.name === 'processor_decode_errors_total')).toBe(true);
expect(logger.error).toHaveBeenCalled();
expect(redis.xack).not.toHaveBeenCalled();
});
it('valid and invalid entries in the same batch: ACKs only valid ones', async () => {
const redis = makeMockRedis();
const logger = makeSilentLogger();
const metrics = makeMetrics();
const config = makeConfig();
const stream = 'telemetry:t';
const goodId = '4000-0';
const badId = '4000-1';
let consumerRef: ReturnType<typeof createConsumer> | undefined;
redis.xreadgroup
.mockResolvedValueOnce(
buildXreadgroupResponse(stream, [
{
id: goodId,
fields: {
ts: '2024-05-01T12:00:00.000Z',
device_id: 'DEV005',
codec: '8',
payload: buildPayload({ device_id: 'DEV005' }),
},
},
{
id: badId,
fields: {
ts: '2024-05-01T12:00:00.000Z',
device_id: 'DEV005',
codec: '8',
payload: 'not json',
},
},
]),
)
.mockResolvedValue(null);
const sink = vi.fn(async (records: ConsumedRecord[]) => {
void consumerRef?.stop();
return records.map((r) => r.id);
});
const consumer = createConsumer(
redis as unknown as Redis,
config,
logger,
metrics,
sink,
);
consumerRef = consumer;
await consumer.start();
await consumer.stop();
// Sink received only the good record
expect(sink).toHaveBeenCalledWith(
expect.arrayContaining([expect.objectContaining({ id: goodId })]),
);
expect(sink).toHaveBeenCalledWith(
expect.not.arrayContaining([expect.objectContaining({ id: badId })]),
);
// ACK called for good entry only
expect(redis.xack).toHaveBeenCalledWith(stream, 'processor', goodId);
const xackArgs = redis.xack.mock.calls.flat();
expect(xackArgs).not.toContain(badId);
});
});
describe('createConsumer — XREADGROUP failure', () => {
beforeEach(() => {
vi.useFakeTimers();
});
afterEach(() => {
vi.useRealTimers();
vi.restoreAllMocks();
});
it('backs off and retries after XREADGROUP error', async () => {
const redis = makeMockRedis();
const logger = makeSilentLogger();
const metrics = makeMetrics();
const config = makeConfig({ BATCH_BLOCK_MS: 10 });
let consumerRef: ReturnType<typeof createConsumer> | undefined;
let callCount = 0;
redis.xreadgroup.mockImplementation(async () => {
callCount++;
if (callCount === 1) {
throw new Error('LOADING Redis is loading the dataset in memory');
}
// Stop consumer on second call
void consumerRef?.stop();
return null;
});
const sink = vi.fn(async () => []);
const consumer = createConsumer(
redis as unknown as Redis,
config,
logger,
metrics,
sink,
);
consumerRef = consumer;
await consumer.start();
// Advance timers past the 1000ms backoff
await vi.advanceTimersByTimeAsync(1_100);
await consumer.stop();
expect(logger.error).toHaveBeenCalledWith(
expect.objectContaining({ err: expect.anything() }),
'XREADGROUP failed; backing off',
);
// Should have retried at least once
expect(callCount).toBeGreaterThanOrEqual(2);
});
});
describe('createConsumer — clean stop', () => {
afterEach(() => {
vi.restoreAllMocks();
});
it('stop() returns after current batch completes', async () => {
const redis = makeMockRedis();
const logger = makeSilentLogger();
const metrics = makeMetrics();
const config = makeConfig();
// Return null immediately (BLOCK timeout) so the loop spins and we can stop it
redis.xreadgroup.mockResolvedValue(null);
const sink = vi.fn(async () => []);
const consumer = createConsumer(
redis as unknown as Redis,
config,
logger,
metrics,
sink,
);
await consumer.start();
// stop() should resolve without hanging
await expect(consumer.stop()).resolves.toBeUndefined();
});
});
+257
View File
@@ -0,0 +1,257 @@
/**
* Unit tests for src/core/state.ts
*
* Covers:
* - First update creates entry; subsequent updates increment position_count_session
* - LRU eviction: with cap=3, after 4 distinct devices the oldest is evicted
* - Eviction increments evictedTotal()
* - last_seen reflects the position's timestamp (device-reported time)
* - Out-of-order positions: last_seen only advances forward (max semantics)
* - get() returns undefined for unknown devices
* - size() returns the current number of stored devices
* - LRU order: most-recently-updated device is not evicted on overflow
*/
import { describe, it, expect, vi } from 'vitest';
import type { Logger } from 'pino';
import type { Config } from '../src/config/load.js';
import type { Position } from '../src/core/types.js';
import { createDeviceStateStore } from '../src/core/state.js';
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function makeSilentLogger(): Logger {
return {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
fatal: vi.fn(),
child: vi.fn().mockReturnThis(),
trace: vi.fn(),
level: 'silent',
silent: vi.fn(),
} as unknown as Logger;
}
function makeConfig(overrides: Partial<Config> = {}): Config {
return {
NODE_ENV: 'test',
INSTANCE_ID: 'test-processor',
LOG_LEVEL: 'silent',
REDIS_URL: 'redis://localhost:6379',
POSTGRES_URL: 'postgres://localhost:5432/test',
REDIS_TELEMETRY_STREAM: 'telemetry:t',
REDIS_CONSUMER_GROUP: 'processor',
REDIS_CONSUMER_NAME: 'test-consumer',
METRICS_PORT: 9090,
BATCH_SIZE: 10,
BATCH_BLOCK_MS: 100,
WRITE_BATCH_SIZE: 50,
DEVICE_STATE_LRU_CAP: 1000,
...overrides,
};
}
function makePosition(deviceId: string, overrides: Partial<Position> = {}): Position {
return {
device_id: deviceId,
timestamp: new Date('2024-05-01T12:00:00.000Z'),
latitude: 54.6872,
longitude: 25.2797,
altitude: 100,
angle: 90,
speed: 50,
satellites: 12,
priority: 1,
attributes: {},
...overrides,
};
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
describe('createDeviceStateStore — initial state', () => {
it('creates a new entry on first update', () => {
const store = createDeviceStateStore(makeConfig(), makeSilentLogger());
const position = makePosition('DEV001');
const state = store.update(position);
expect(state.device_id).toBe('DEV001');
expect(state.last_position).toBe(position);
expect(state.position_count_session).toBe(1);
expect(state.last_seen).toEqual(position.timestamp);
});
it('increments position_count_session on subsequent updates', () => {
const store = createDeviceStateStore(makeConfig(), makeSilentLogger());
const pos1 = makePosition('DEV001', { timestamp: new Date('2024-05-01T12:00:00.000Z') });
const pos2 = makePosition('DEV001', { timestamp: new Date('2024-05-01T12:00:01.000Z') });
const pos3 = makePosition('DEV001', { timestamp: new Date('2024-05-01T12:00:02.000Z') });
store.update(pos1);
store.update(pos2);
const state = store.update(pos3);
expect(state.position_count_session).toBe(3);
});
it('get() returns undefined for an unknown device', () => {
const store = createDeviceStateStore(makeConfig(), makeSilentLogger());
expect(store.get('UNKNOWN')).toBeUndefined();
});
it('get() returns the current state for a known device', () => {
const store = createDeviceStateStore(makeConfig(), makeSilentLogger());
const position = makePosition('DEV002');
store.update(position);
const state = store.get('DEV002');
expect(state).toBeDefined();
expect(state?.device_id).toBe('DEV002');
});
it('size() returns 0 before any updates', () => {
const store = createDeviceStateStore(makeConfig(), makeSilentLogger());
expect(store.size()).toBe(0);
});
it('size() returns the number of distinct devices after updates', () => {
const store = createDeviceStateStore(makeConfig(), makeSilentLogger());
store.update(makePosition('DEV001'));
store.update(makePosition('DEV002'));
store.update(makePosition('DEV001')); // duplicate device — should not increase size
expect(store.size()).toBe(2);
});
});
describe('createDeviceStateStore — last_seen semantics', () => {
it('last_seen reflects the position timestamp (not wall clock)', () => {
const store = createDeviceStateStore(makeConfig(), makeSilentLogger());
const ts = new Date('2024-03-15T08:30:00.000Z');
const position = makePosition('DEV010', { timestamp: ts });
const state = store.update(position);
expect(state.last_seen).toEqual(ts);
expect(state.last_seen).not.toBe(new Date()); // not wall clock
});
it('last_seen advances on newer timestamps', () => {
const store = createDeviceStateStore(makeConfig(), makeSilentLogger());
const ts1 = new Date('2024-05-01T10:00:00.000Z');
const ts2 = new Date('2024-05-01T11:00:00.000Z');
store.update(makePosition('DEV011', { timestamp: ts1 }));
const state = store.update(makePosition('DEV011', { timestamp: ts2 }));
expect(state.last_seen).toEqual(ts2);
});
it('last_seen does NOT regress on out-of-order (older) timestamps', () => {
// Devices buffer offline records and replay them in bursts; within a burst
// consecutive timestamps may decrease. last_seen must mean "highest device
// timestamp seen so far" — it must never go backward.
const store = createDeviceStateStore(makeConfig(), makeSilentLogger());
const newer = new Date('2024-05-01T12:00:00.000Z');
const older = new Date('2024-05-01T10:00:00.000Z');
store.update(makePosition('DEV012', { timestamp: newer }));
const state = store.update(makePosition('DEV012', { timestamp: older }));
// last_seen must remain at the newer timestamp, not regress to older
expect(state.last_seen).toEqual(newer);
});
it('last_seen stays the same when equal timestamps arrive', () => {
const store = createDeviceStateStore(makeConfig(), makeSilentLogger());
const ts = new Date('2024-05-01T12:00:00.000Z');
store.update(makePosition('DEV013', { timestamp: ts }));
const state = store.update(makePosition('DEV013', { timestamp: new Date(ts.getTime()) }));
expect(state.last_seen).toEqual(ts);
});
});
describe('createDeviceStateStore — LRU eviction', () => {
it('evicts the least-recently-updated device when cap is exceeded', () => {
const store = createDeviceStateStore(makeConfig({ DEVICE_STATE_LRU_CAP: 3 }), makeSilentLogger());
const ts = new Date('2024-05-01T12:00:00.000Z');
// Insert 3 devices: DEV001, DEV002, DEV003 (DEV001 is oldest)
store.update(makePosition('DEV001', { timestamp: ts }));
store.update(makePosition('DEV002', { timestamp: ts }));
store.update(makePosition('DEV003', { timestamp: ts }));
expect(store.size()).toBe(3);
// Add a 4th device — DEV001 (the oldest / least-recently-updated) should be evicted
store.update(makePosition('DEV004', { timestamp: ts }));
expect(store.size()).toBe(3);
expect(store.get('DEV001')).toBeUndefined(); // evicted
expect(store.get('DEV002')).toBeDefined();
expect(store.get('DEV003')).toBeDefined();
expect(store.get('DEV004')).toBeDefined();
});
it('re-using an existing device bumps it to most-recent so it is not evicted next', () => {
const store = createDeviceStateStore(makeConfig({ DEVICE_STATE_LRU_CAP: 3 }), makeSilentLogger());
const ts1 = new Date('2024-05-01T12:00:00.000Z');
const ts2 = new Date('2024-05-01T12:00:01.000Z');
store.update(makePosition('DEV001', { timestamp: ts1 }));
store.update(makePosition('DEV002', { timestamp: ts1 }));
store.update(makePosition('DEV003', { timestamp: ts1 }));
// Re-touch DEV001 — it should now be the most-recently-updated
store.update(makePosition('DEV001', { timestamp: ts2 }));
// Add DEV004 — DEV002 should be evicted (it is now the oldest)
store.update(makePosition('DEV004', { timestamp: ts1 }));
expect(store.size()).toBe(3);
expect(store.get('DEV001')).toBeDefined(); // was re-touched
expect(store.get('DEV002')).toBeUndefined(); // evicted (oldest after DEV001 was re-touched)
expect(store.get('DEV003')).toBeDefined();
expect(store.get('DEV004')).toBeDefined();
});
it('evictedTotal() increments on each eviction', () => {
const store = createDeviceStateStore(makeConfig({ DEVICE_STATE_LRU_CAP: 2 }), makeSilentLogger());
const ts = new Date('2024-05-01T12:00:00.000Z');
expect(store.evictedTotal()).toBe(0);
store.update(makePosition('DEV001', { timestamp: ts }));
store.update(makePosition('DEV002', { timestamp: ts }));
expect(store.evictedTotal()).toBe(0);
store.update(makePosition('DEV003', { timestamp: ts })); // evicts DEV001
expect(store.evictedTotal()).toBe(1);
store.update(makePosition('DEV004', { timestamp: ts })); // evicts DEV002
expect(store.evictedTotal()).toBe(2);
});
it('evictedTotal() stays 0 when cap is never reached', () => {
const store = createDeviceStateStore(makeConfig({ DEVICE_STATE_LRU_CAP: 1000 }), makeSilentLogger());
const ts = new Date('2024-05-01T12:00:00.000Z');
for (let i = 0; i < 10; i++) {
store.update(makePosition(`DEV${i}`, { timestamp: ts }));
}
expect(store.evictedTotal()).toBe(0);
});
});
+501
View File
@@ -0,0 +1,501 @@
/**
* Unit tests for src/core/writer.ts
*
* All Postgres I/O is mocked — no real database required. The integration test
* (task 1.10) covers byte-level round-trip including TimescaleDB hypertable.
*
* Covers:
* - Happy path: all records inserted (all appear in RETURNING rows)
* - Duplicate-key: ON CONFLICT DO NOTHING → records absent from RETURNING → 'duplicate'
* - Mixed: half new, half duplicate
* - Pool error: all records in the batch get 'failed'; error is attached
* - Chunking: batch larger than WRITE_BATCH_SIZE results in multiple queries
* - Bigint attribute is stringified before serialization
* - Buffer attribute is base64-encoded before serialization
* - Empty batch returns empty results
* - SQL parameter order: device_id, ts, latitude, longitude, altitude, angle,
* speed, satellites, priority, codec, attributes
*/
import { describe, it, expect, vi } from 'vitest';
import type { Logger } from 'pino';
import type { Pool } from 'pg';
import type { Config } from '../src/config/load.js';
import type { Metrics, Position } from '../src/core/types.js';
import type { ConsumedRecord } from '../src/core/consumer.js';
import { createWriter } from '../src/core/writer.js';
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function makeSilentLogger(): Logger {
return {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
fatal: vi.fn(),
child: vi.fn().mockReturnThis(),
trace: vi.fn(),
level: 'silent',
silent: vi.fn(),
} as unknown as Logger;
}
function makeMetrics(): Metrics {
return {
inc: vi.fn(),
observe: vi.fn(),
};
}
function makeConfig(overrides: Partial<Config> = {}): Config {
return {
NODE_ENV: 'test',
INSTANCE_ID: 'test-processor',
LOG_LEVEL: 'silent',
REDIS_URL: 'redis://localhost:6379',
POSTGRES_URL: 'postgres://localhost:5432/test',
REDIS_TELEMETRY_STREAM: 'telemetry:t',
REDIS_CONSUMER_GROUP: 'processor',
REDIS_CONSUMER_NAME: 'test-consumer',
METRICS_PORT: 9090,
BATCH_SIZE: 10,
BATCH_BLOCK_MS: 100,
WRITE_BATCH_SIZE: 50,
DEVICE_STATE_LRU_CAP: 1000,
...overrides,
};
}
function makePosition(deviceId: string, overrides: Partial<Position> = {}): Position {
return {
device_id: deviceId,
timestamp: new Date('2024-05-01T12:00:00.000Z'),
latitude: 54.6872,
longitude: 25.2797,
altitude: 100,
angle: 90,
speed: 50,
satellites: 12,
priority: 1,
attributes: {},
...overrides,
};
}
function makeRecord(id: string, deviceId: string, overrides: Partial<Position> = {}): ConsumedRecord {
return {
id,
position: makePosition(deviceId, overrides),
codec: '8',
ts: '2024-05-01T12:00:00.000Z',
};
}
/**
* Creates a mock pg.Pool whose query() returns the given rows.
* Captures all SQL and params for assertion.
*/
function makeMockPool(
queryResponses: Array<{ rows: unknown[] } | Error>,
): {
pool: Pool;
queryCalls: Array<{ sql: string; params: unknown[] }>;
} {
const queryCalls: Array<{ sql: string; params: unknown[] }> = [];
let callIndex = 0;
const query = vi.fn(async (sql: string, params: unknown[] = []) => {
queryCalls.push({ sql, params });
const response = queryResponses[callIndex++];
if (response instanceof Error) throw response;
return response ?? { rows: [] };
});
return {
pool: { query } as unknown as Pool,
queryCalls,
};
}
// ---------------------------------------------------------------------------
// Tests — happy path
// ---------------------------------------------------------------------------
describe('createWriter — happy path', () => {
it('returns inserted for all records when all appear in RETURNING', async () => {
const ts = new Date('2024-05-01T12:00:00.000Z');
const records = [
makeRecord('1-0', 'DEV001', { timestamp: ts }),
makeRecord('1-1', 'DEV002', { timestamp: ts }),
];
const { pool } = makeMockPool([
{
rows: [
{ device_id: 'DEV001', ts },
{ device_id: 'DEV002', ts },
],
},
]);
const writer = createWriter(pool, makeConfig(), makeSilentLogger(), makeMetrics());
const results = await writer.write(records);
expect(results).toHaveLength(2);
expect(results[0]).toEqual({ id: '1-0', status: 'inserted' });
expect(results[1]).toEqual({ id: '1-1', status: 'inserted' });
});
it('returns duplicate for records absent from RETURNING', async () => {
const ts = new Date('2024-05-01T12:00:00.000Z');
const records = [
makeRecord('2-0', 'DEV003', { timestamp: ts }),
makeRecord('2-1', 'DEV004', { timestamp: ts }),
];
// Only DEV003 returned — DEV004 was a conflict
const { pool } = makeMockPool([{ rows: [{ device_id: 'DEV003', ts }] }]);
const writer = createWriter(pool, makeConfig(), makeSilentLogger(), makeMetrics());
const results = await writer.write(records);
expect(results).toHaveLength(2);
expect(results.find((r) => r.id === '2-0')?.status).toBe('inserted');
expect(results.find((r) => r.id === '2-1')?.status).toBe('duplicate');
});
it('handles mixed batch: some inserted, some duplicate', async () => {
const ts = new Date('2024-05-01T12:00:00.000Z');
const records = [
makeRecord('3-0', 'DEV005', { timestamp: ts }),
makeRecord('3-1', 'DEV006', { timestamp: ts }),
makeRecord('3-2', 'DEV007', { timestamp: ts }),
makeRecord('3-3', 'DEV008', { timestamp: ts }),
];
// DEV005 and DEV007 are new; DEV006 and DEV008 are duplicates
const { pool } = makeMockPool([
{
rows: [
{ device_id: 'DEV005', ts },
{ device_id: 'DEV007', ts },
],
},
]);
const writer = createWriter(pool, makeConfig(), makeSilentLogger(), makeMetrics());
const results = await writer.write(records);
expect(results.find((r) => r.id === '3-0')?.status).toBe('inserted');
expect(results.find((r) => r.id === '3-1')?.status).toBe('duplicate');
expect(results.find((r) => r.id === '3-2')?.status).toBe('inserted');
expect(results.find((r) => r.id === '3-3')?.status).toBe('duplicate');
});
it('returns empty array for empty input', async () => {
const { pool } = makeMockPool([]);
const writer = createWriter(pool, makeConfig(), makeSilentLogger(), makeMetrics());
const results = await writer.write([]);
expect(results).toEqual([]);
});
});
// ---------------------------------------------------------------------------
// Tests — failure handling
// ---------------------------------------------------------------------------
describe('createWriter — pool error', () => {
it('returns failed for all records in the chunk when pool throws', async () => {
const ts = new Date('2024-05-01T12:00:00.000Z');
const records = [
makeRecord('4-0', 'DEV009', { timestamp: ts }),
makeRecord('4-1', 'DEV010', { timestamp: ts }),
];
const dbError = new Error('connection terminated unexpectedly');
const { pool } = makeMockPool([dbError]);
const writer = createWriter(pool, makeConfig(), makeSilentLogger(), makeMetrics());
const results = await writer.write(records);
expect(results).toHaveLength(2);
for (const result of results) {
expect(result.status).toBe('failed');
expect(result.error).toBeDefined();
expect(result.error?.message).toBe('connection terminated unexpectedly');
}
});
it('logs error and increments failed metric on pool error', async () => {
const records = [makeRecord('5-0', 'DEV011')];
const { pool } = makeMockPool([new Error('timeout')]);
const logger = makeSilentLogger();
const metrics = makeMetrics();
const writer = createWriter(pool, makeConfig(), logger, metrics);
await writer.write(records);
expect(logger.error).toHaveBeenCalled();
expect(metrics.inc).toHaveBeenCalledWith(
'processor_position_writes_total',
{ status: 'failed' },
);
});
});
// ---------------------------------------------------------------------------
// Tests — chunking
// ---------------------------------------------------------------------------
describe('createWriter — chunking', () => {
it('splits a batch larger than WRITE_BATCH_SIZE into multiple sequential queries', async () => {
const writeBatchSize = 3;
const config = makeConfig({ WRITE_BATCH_SIZE: writeBatchSize });
const ts = new Date('2024-05-01T12:00:00.000Z');
// 7 records → ceil(7/3) = 3 chunks: [3, 3, 1]
const records = Array.from({ length: 7 }, (_, i) =>
makeRecord(`6-${i}`, `DEV${100 + i}`, { timestamp: ts }),
);
const { pool, queryCalls } = makeMockPool([
{ rows: records.slice(0, 3).map((r) => ({ device_id: r.position.device_id, ts })) },
{ rows: records.slice(3, 6).map((r) => ({ device_id: r.position.device_id, ts })) },
{ rows: records.slice(6).map((r) => ({ device_id: r.position.device_id, ts })) },
]);
const writer = createWriter(pool, config, makeSilentLogger(), makeMetrics());
const results = await writer.write(records);
// 3 separate queries issued
expect(queryCalls).toHaveLength(3);
// All 7 records should be returned
expect(results).toHaveLength(7);
for (const result of results) {
expect(result.status).toBe('inserted');
}
});
it('first chunk fails, second chunk succeeds — correct per-record status', async () => {
const writeBatchSize = 2;
const config = makeConfig({ WRITE_BATCH_SIZE: writeBatchSize });
const ts = new Date('2024-05-01T12:00:00.000Z');
const records = Array.from({ length: 4 }, (_, i) =>
makeRecord(`7-${i}`, `DEV${200 + i}`, { timestamp: ts }),
);
const { pool } = makeMockPool([
new Error('chunk 1 failed'),
{
rows: records.slice(2).map((r) => ({ device_id: r.position.device_id, ts })),
},
]);
const writer = createWriter(pool, config, makeSilentLogger(), makeMetrics());
const results = await writer.write(records);
expect(results[0]?.status).toBe('failed');
expect(results[1]?.status).toBe('failed');
expect(results[2]?.status).toBe('inserted');
expect(results[3]?.status).toBe('inserted');
});
});
// ---------------------------------------------------------------------------
// Tests — attribute serialization
// ---------------------------------------------------------------------------
describe('createWriter — attribute serialization', () => {
it('serializes bigint attributes as decimal strings in the attributes JSON', async () => {
const ts = new Date('2024-05-01T12:00:00.000Z');
const u64Max = BigInt('18446744073709551615');
const records = [
makeRecord('8-0', 'DEV020', {
timestamp: ts,
attributes: { io_240: u64Max },
}),
];
let capturedParams: unknown[] = [];
const query = vi.fn(async (sql: string, params: unknown[]) => {
capturedParams = params;
return { rows: [{ device_id: 'DEV020', ts }] };
});
const pool = { query } as unknown as Pool;
const writer = createWriter(pool, makeConfig(), makeSilentLogger(), makeMetrics());
await writer.write(records);
// The attributes param is the 11th per row (index 10)
const attributesParam = capturedParams[10] as string;
expect(typeof attributesParam).toBe('string');
const parsed = JSON.parse(attributesParam) as Record<string, unknown>;
expect(parsed['io_240']).toBe('18446744073709551615');
// Must be a string, not a bigint (JSON can't hold bigints)
expect(typeof parsed['io_240']).toBe('string');
});
it('serializes Buffer attributes as base64 strings in the attributes JSON', async () => {
const ts = new Date('2024-05-01T12:00:00.000Z');
const rawBytes = Buffer.from([0xde, 0xad, 0xbe, 0xef]);
const records = [
makeRecord('9-0', 'DEV021', {
timestamp: ts,
attributes: { io_nx: rawBytes },
}),
];
let capturedParams: unknown[] = [];
const query = vi.fn(async (sql: string, params: unknown[]) => {
capturedParams = params;
return { rows: [{ device_id: 'DEV021', ts }] };
});
const pool = { query } as unknown as Pool;
const writer = createWriter(pool, makeConfig(), makeSilentLogger(), makeMetrics());
await writer.write(records);
const attributesParam = capturedParams[10] as string;
expect(typeof attributesParam).toBe('string');
const parsed = JSON.parse(attributesParam) as Record<string, unknown>;
const b64 = parsed['io_nx'];
expect(typeof b64).toBe('string');
// Decode and verify byte equality
const decoded = Buffer.from(b64 as string, 'base64');
expect(decoded).toEqual(rawBytes);
});
it('serializes numeric attributes as-is in the attributes JSON', async () => {
const ts = new Date('2024-05-01T12:00:00.000Z');
const records = [
makeRecord('10-0', 'DEV022', {
timestamp: ts,
attributes: { io_21: 42, io_1: 0 },
}),
];
let capturedParams: unknown[] = [];
const query = vi.fn(async (sql: string, params: unknown[]) => {
capturedParams = params;
return { rows: [{ device_id: 'DEV022', ts }] };
});
const pool = { query } as unknown as Pool;
const writer = createWriter(pool, makeConfig(), makeSilentLogger(), makeMetrics());
await writer.write(records);
const attributesParam = capturedParams[10] as string;
const parsed = JSON.parse(attributesParam) as Record<string, unknown>;
expect(parsed['io_21']).toBe(42);
expect(parsed['io_1']).toBe(0);
});
});
// ---------------------------------------------------------------------------
// Tests — SQL parameter ordering
// ---------------------------------------------------------------------------
describe('createWriter — SQL parameter ordering', () => {
it('passes parameters in the correct column order', async () => {
const ts = new Date('2024-05-01T12:00:00.000Z');
const position: Position = {
device_id: 'PARAMTEST001',
timestamp: ts,
latitude: 10.0,
longitude: 20.0,
altitude: 300,
angle: 180,
speed: 75,
satellites: 9,
priority: 2,
attributes: { io_21: 99 },
};
const records: ConsumedRecord[] = [{ id: 'p-0', position, codec: '8E', ts: ts.toISOString() }];
let capturedParams: unknown[] = [];
const query = vi.fn(async (sql: string, params: unknown[]) => {
capturedParams = params;
return { rows: [{ device_id: 'PARAMTEST001', ts }] };
});
const pool = { query } as unknown as Pool;
const writer = createWriter(pool, makeConfig(), makeSilentLogger(), makeMetrics());
await writer.write(records);
// Expected column order (11 params for row 0, 1-indexed in SQL, 0-indexed here):
// $1 device_id
// $2 ts
// $3 latitude
// $4 longitude
// $5 altitude
// $6 angle
// $7 speed
// $8 satellites
// $9 priority
// $10 codec
// $11 attributes
expect(capturedParams[0]).toBe('PARAMTEST001');
expect(capturedParams[1]).toEqual(ts);
expect(capturedParams[2]).toBe(10.0);
expect(capturedParams[3]).toBe(20.0);
expect(capturedParams[4]).toBe(300);
expect(capturedParams[5]).toBe(180);
expect(capturedParams[6]).toBe(75);
expect(capturedParams[7]).toBe(9);
expect(capturedParams[8]).toBe(2);
expect(capturedParams[9]).toBe('8E');
expect(typeof capturedParams[10]).toBe('string');
const attrs = JSON.parse(capturedParams[10] as string) as Record<string, unknown>;
expect(attrs['io_21']).toBe(99);
});
it('SQL contains ON CONFLICT DO NOTHING and RETURNING clause', async () => {
const records = [makeRecord('q-0', 'DEV030')];
let capturedSql = '';
const query = vi.fn(async (sql: string, _params: unknown[]) => {
capturedSql = sql;
return { rows: [{ device_id: 'DEV030', ts: new Date() }] };
});
const pool = { query } as unknown as Pool;
const writer = createWriter(pool, makeConfig(), makeSilentLogger(), makeMetrics());
await writer.write(records);
expect(capturedSql).toMatch(/ON CONFLICT.*DO NOTHING/i);
expect(capturedSql).toMatch(/RETURNING/i);
expect(capturedSql).toMatch(/device_id/);
expect(capturedSql).toMatch(/ts/);
});
});
// ---------------------------------------------------------------------------
// Tests — metrics
// ---------------------------------------------------------------------------
describe('createWriter — metrics', () => {
it('emits inserted and duplicate counters after a successful write', async () => {
const ts = new Date('2024-05-01T12:00:00.000Z');
const records = [
makeRecord('m-0', 'DEV040', { timestamp: ts }),
makeRecord('m-1', 'DEV041', { timestamp: ts }),
];
// Only DEV040 returned — DEV041 is duplicate
const { pool } = makeMockPool([{ rows: [{ device_id: 'DEV040', ts }] }]);
const metrics = makeMetrics();
const writer = createWriter(pool, makeConfig(), makeSilentLogger(), metrics);
await writer.write(records);
expect(metrics.inc).toHaveBeenCalledWith('processor_position_writes_total', { status: 'inserted' });
expect(metrics.inc).toHaveBeenCalledWith('processor_position_writes_total', { status: 'duplicate' });
expect(metrics.observe).toHaveBeenCalledWith(
'processor_position_write_duration_seconds',
expect.any(Number),
);
});
});