feat(live): task 1.5.4 — broadcast consumer group and fan-out
Adds the per-instance Redis Stream consumer group (live-broadcast-{instance_id})
that reads the telemetry stream and fans out each position to subscribed
WebSocket connections without affecting the durable-write consumer path.
Key changes:
- src/shared/codec.ts: moved decodePosition/CodecError out of src/core/ so
src/live/broadcast.ts can decode positions without crossing the enforced
src/core/ ↔ src/live/ boundary; src/core/codec.ts now re-exports from there
- src/shared/types.ts: added Position and AttributeValue (same move, same reason);
src/core/types.ts re-exports both to preserve existing import paths
- src/live/broadcast.ts: createBroadcastConsumer factory — XREADGROUP loop,
immediate ACK semantics, toPositionMessage mapper, fanOut per event/topic
- src/live/device-event-map.ts: createDeviceEventMap factory — in-memory cache
of entry_devices × entries join, refreshed every LIVE_DEVICE_EVENT_REFRESH_MS
- src/db/migrations/0002_positions_faulty.sql: adds faulty boolean column and
positions_device_ts_idx for snapshot-on-subscribe query (task 1.5.5)
- src/main.ts: wired authClient, authzClient, registry, liveServer,
deviceEventMap, broadcastConsumer; shutdown chain: liveServer → deviceEventMap
+ broadcastConsumer → durable-write consumer → metricsServer → Redis → Postgres
- test/live-broadcast.test.ts: 4 unit tests covering single subscriber, multiple
subscribers, orphan device, and multi-event device fan-out
This commit is contained in:
@@ -0,0 +1,385 @@
|
||||
/**
|
||||
* Unit tests for src/live/broadcast.ts — broadcast consumer fan-out logic.
|
||||
*
|
||||
* Strategy: exercise fanOut in isolation by driving a single-iteration loop.
|
||||
* We stub XREADGROUP to return one batch of entries, then immediately set
|
||||
* `stopping = true` via `stop()`. The Redis `xgroup` CREATE call returns
|
||||
* BUSYGROUP (group already exists) so `ensureGroup` succeeds without a real
|
||||
* server.
|
||||
*
|
||||
* `sendOutbound` is called with real LiveConnection stubs that have a mock
|
||||
* `ws.send`. This tests the full fanOut → sendOutbound → ws.send path without
|
||||
* any module mocking.
|
||||
*
|
||||
* Covers (spec: task 1.5.4):
|
||||
* 1. Single subscriber on an event receives a correctly-shaped position message.
|
||||
* 2. Multiple subscribers on the same event each receive the message.
|
||||
* 3. Orphan device (not in any event) increments orphan counter, sends nothing.
|
||||
* 4. Device registered to multiple events emits one message per event topic.
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import type { Logger } from 'pino';
|
||||
import type { Config } from '../src/config/load.js';
|
||||
import type { Metrics } from '../src/shared/types.js';
|
||||
import type { SubscriptionRegistry } from '../src/live/registry.js';
|
||||
import type { DeviceEventMap } from '../src/live/device-event-map.js';
|
||||
import type { LiveConnection } from '../src/live/server.js';
|
||||
import { createBroadcastConsumer } from '../src/live/broadcast.js';
|
||||
import WebSocket from 'ws';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeSilentLogger(): Logger {
|
||||
return {
|
||||
debug: vi.fn(),
|
||||
info: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
error: vi.fn(),
|
||||
fatal: vi.fn(),
|
||||
trace: vi.fn(),
|
||||
child: vi.fn().mockReturnThis(),
|
||||
level: 'silent',
|
||||
silent: vi.fn(),
|
||||
} as unknown as Logger;
|
||||
}
|
||||
|
||||
type RecordedMetrics = Metrics & {
|
||||
incCalls: Array<{ name: string; labels?: Record<string, string>; value?: number }>;
|
||||
observeCalls: Array<{ name: string; value: number }>;
|
||||
};
|
||||
|
||||
function makeMetrics(): RecordedMetrics {
|
||||
const incCalls: Array<{ name: string; labels?: Record<string, string>; value?: number }> = [];
|
||||
const observeCalls: Array<{ name: string; value: number }> = [];
|
||||
return {
|
||||
incCalls,
|
||||
observeCalls,
|
||||
inc(name, labels?, value?) { incCalls.push({ name, labels, value }); },
|
||||
observe(name, value) { observeCalls.push({ name, value }); },
|
||||
};
|
||||
}
|
||||
|
||||
function makeConfig(): Config {
|
||||
return {
|
||||
NODE_ENV: 'test',
|
||||
INSTANCE_ID: 'test-instance',
|
||||
LOG_LEVEL: 'silent',
|
||||
REDIS_URL: 'redis://localhost:6379',
|
||||
POSTGRES_URL: 'postgres://localhost:5432/test',
|
||||
REDIS_TELEMETRY_STREAM: 'telemetry:teltonika',
|
||||
REDIS_CONSUMER_GROUP: 'processor',
|
||||
REDIS_CONSUMER_NAME: 'test-consumer',
|
||||
METRICS_PORT: 0,
|
||||
BATCH_SIZE: 100,
|
||||
BATCH_BLOCK_MS: 500,
|
||||
WRITE_BATCH_SIZE: 50,
|
||||
DEVICE_STATE_LRU_CAP: 10_000,
|
||||
LIVE_WS_PORT: 8081,
|
||||
LIVE_WS_HOST: '0.0.0.0',
|
||||
LIVE_WS_PING_INTERVAL_MS: 30_000,
|
||||
LIVE_WS_DRAIN_TIMEOUT_MS: 5_000,
|
||||
LIVE_WS_BACKPRESSURE_THRESHOLD_BYTES: 1_048_576,
|
||||
DIRECTUS_BASE_URL: 'http://directus.test',
|
||||
DIRECTUS_AUTH_TIMEOUT_MS: 5_000,
|
||||
DIRECTUS_AUTHZ_TIMEOUT_MS: 5_000,
|
||||
LIVE_BROADCAST_GROUP_PREFIX: 'live-broadcast',
|
||||
LIVE_BROADCAST_BATCH_SIZE: 100,
|
||||
LIVE_BROADCAST_BATCH_BLOCK_MS: 1_000,
|
||||
LIVE_DEVICE_EVENT_REFRESH_MS: 30_000,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a synthetic LiveConnection stub whose `ws.send` captures JSON-parsed
|
||||
* outbound messages. `bufferedAmount` is 0 so sendOutbound never closes it.
|
||||
*/
|
||||
function makeConn(id = 'conn-1'): LiveConnection & { sentMessages: unknown[] } {
|
||||
const sentMessages: unknown[] = [];
|
||||
const ws = {
|
||||
readyState: WebSocket.OPEN,
|
||||
bufferedAmount: 0,
|
||||
send: vi.fn((data: string) => { sentMessages.push(JSON.parse(data)); }),
|
||||
close: vi.fn(),
|
||||
} as unknown as WebSocket;
|
||||
|
||||
return {
|
||||
id,
|
||||
ws,
|
||||
remoteAddr: '127.0.0.1',
|
||||
openedAt: new Date(),
|
||||
lastSeenAt: new Date(),
|
||||
user: {
|
||||
id: 'user-1',
|
||||
email: 'test@test.com',
|
||||
role: null,
|
||||
first_name: 'T',
|
||||
last_name: 'U',
|
||||
},
|
||||
cookieHeader: 'session=x',
|
||||
sentMessages,
|
||||
};
|
||||
}
|
||||
|
||||
/** Serialises a Position into the flat wire payload that broadcast.ts expects. */
|
||||
function makePositionPayload(overrides: Partial<{
|
||||
device_id: string;
|
||||
timestamp: string;
|
||||
speed: number;
|
||||
angle: number;
|
||||
}> = {}): string {
|
||||
return JSON.stringify({
|
||||
device_id: overrides.device_id ?? 'IMEI123',
|
||||
timestamp: overrides.timestamp ?? new Date('2025-01-01T12:00:00.000Z').toISOString(),
|
||||
latitude: 41.33165,
|
||||
longitude: 19.83177,
|
||||
altitude: 50,
|
||||
angle: overrides.angle ?? 0,
|
||||
speed: overrides.speed ?? 0,
|
||||
satellites: 8,
|
||||
priority: 0,
|
||||
attributes: {},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a fake XREADGROUP result for a single stream entry.
|
||||
* ioredis returns: `[[streamName, [[id, fieldValueArray]]]]`
|
||||
*/
|
||||
function makeXreadgroupResult(
|
||||
stream: string,
|
||||
id: string,
|
||||
payload: string,
|
||||
): [string, [string, string[]][]][] {
|
||||
return [[stream, [[id, ['payload', payload]]]]];
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Redis stub that:
|
||||
* - `xgroup` returns BUSYGROUP error (group already exists — happy path).
|
||||
* - `xreadgroup` returns the provided result on the first call, then blocks
|
||||
* for up to 2s on subsequent calls (simulating real BLOCK behaviour).
|
||||
* Blocking is implemented by waiting for `stopSignal` to resolve, capped
|
||||
* at 2000ms so tests cannot hang indefinitely.
|
||||
* - `xack` resolves immediately and triggers the stopSignal promise.
|
||||
*/
|
||||
function makeRedis(
|
||||
firstXreadgroupResult: [string, [string, string[]][]][] | null,
|
||||
): Redis & { stopSignal: Promise<void>; triggerStop: () => void } {
|
||||
let xreadgroupCallCount = 0;
|
||||
let triggerStop!: () => void;
|
||||
const stopSignal = new Promise<void>((resolve) => { triggerStop = resolve; });
|
||||
|
||||
const redis: Redis & { stopSignal: Promise<void>; triggerStop: () => void } = {
|
||||
xgroup: vi.fn().mockRejectedValue(Object.assign(new Error('BUSYGROUP group already exists'), {})),
|
||||
xreadgroup: vi.fn((..._args: unknown[]) => {
|
||||
xreadgroupCallCount += 1;
|
||||
if (xreadgroupCallCount === 1) {
|
||||
return Promise.resolve(firstXreadgroupResult);
|
||||
}
|
||||
// Block until stop() is called (or 2s timeout as safety valve).
|
||||
return Promise.race([
|
||||
stopSignal.then(() => null as null),
|
||||
new Promise<null>((resolve) => setTimeout(() => resolve(null), 2_000)),
|
||||
]);
|
||||
}),
|
||||
xack: vi.fn().mockImplementation(() => {
|
||||
// Signal that the batch has been processed — stop() can now be called.
|
||||
triggerStop();
|
||||
return Promise.resolve(1);
|
||||
}),
|
||||
status: 'ready',
|
||||
stopSignal,
|
||||
triggerStop,
|
||||
} as unknown as Redis & { stopSignal: Promise<void>; triggerStop: () => void };
|
||||
|
||||
return redis;
|
||||
}
|
||||
|
||||
/** Creates a SubscriptionRegistry stub that maps topic → connections. */
|
||||
function makeRegistry(
|
||||
topicToConns: Map<string, LiveConnection[]>,
|
||||
): SubscriptionRegistry {
|
||||
return {
|
||||
connectionsForTopic: vi.fn((topic: string) => topicToConns.get(topic) ?? []),
|
||||
subscribe: vi.fn(),
|
||||
unsubscribe: vi.fn(),
|
||||
onConnectionClose: vi.fn(),
|
||||
topicsForConnection: vi.fn().mockReturnValue([]),
|
||||
stats: vi.fn().mockReturnValue({ connections: 0, subscriptions: 0, topics: 0 }),
|
||||
};
|
||||
}
|
||||
|
||||
/** Creates a DeviceEventMap stub. */
|
||||
function makeDeviceEventMap(deviceToEvents: Map<string, string[]>): DeviceEventMap {
|
||||
return {
|
||||
lookup: vi.fn((deviceId: string) => deviceToEvents.get(deviceId) ?? []),
|
||||
start: vi.fn().mockResolvedValue(undefined),
|
||||
stop: vi.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs the broadcast consumer for one batch: starts it, waits until xack has
|
||||
* been called (the batch was fully processed), then stops it.
|
||||
*
|
||||
* The Redis stub's xreadgroup blocks on the second call until xack fires
|
||||
* (or 2s timeout), so `stop()` always finds the loop idle before terminating.
|
||||
*/
|
||||
async function runOneBatch(
|
||||
redis: ReturnType<typeof makeRedis>,
|
||||
registry: SubscriptionRegistry,
|
||||
deviceEventMap: DeviceEventMap,
|
||||
config: Config,
|
||||
logger: Logger,
|
||||
metrics: Metrics,
|
||||
): Promise<void> {
|
||||
const consumer = createBroadcastConsumer(redis, registry, deviceEventMap, config, logger, metrics);
|
||||
await consumer.start();
|
||||
|
||||
// Wait until the xack mock fires (which also triggers stopSignal, causing the
|
||||
// second xreadgroup call to unblock and return null). Give up after 3s to
|
||||
// avoid hanging if the batch was empty / all entries were skipped.
|
||||
await Promise.race([
|
||||
redis.stopSignal,
|
||||
new Promise<void>((resolve) => setTimeout(resolve, 3_000)),
|
||||
]);
|
||||
|
||||
await consumer.stop();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('createBroadcastConsumer', () => {
|
||||
let config: Config;
|
||||
let logger: Logger;
|
||||
let metrics: RecordedMetrics;
|
||||
const STREAM = 'telemetry:teltonika';
|
||||
const EVENT_A = 'aaa00000-0000-0000-0000-000000000001';
|
||||
const EVENT_B = 'bbb00000-0000-0000-0000-000000000002';
|
||||
const DEVICE_ID = 'IMEI999888777';
|
||||
|
||||
beforeEach(() => {
|
||||
config = makeConfig();
|
||||
logger = makeSilentLogger();
|
||||
metrics = makeMetrics();
|
||||
});
|
||||
|
||||
it('sends a correctly-shaped position message to a single subscriber', async () => {
|
||||
const conn = makeConn('c1');
|
||||
const topicToConns = new Map([[`event:${EVENT_A}`, [conn]]]);
|
||||
const deviceToEvents = new Map([[DEVICE_ID, [EVENT_A]]]);
|
||||
|
||||
const payload = makePositionPayload({ device_id: DEVICE_ID, speed: 42, angle: 180 });
|
||||
const redis = makeRedis(makeXreadgroupResult(STREAM, '1-0', payload));
|
||||
const registry = makeRegistry(topicToConns);
|
||||
const deviceEventMap = makeDeviceEventMap(deviceToEvents);
|
||||
|
||||
await runOneBatch(redis, registry, deviceEventMap, config, logger, metrics);
|
||||
|
||||
expect(conn.sentMessages).toHaveLength(1);
|
||||
const msg = conn.sentMessages[0] as Record<string, unknown>;
|
||||
expect(msg['type']).toBe('position');
|
||||
expect(msg['topic']).toBe(`event:${EVENT_A}`);
|
||||
expect(msg['deviceId']).toBe(DEVICE_ID);
|
||||
expect(typeof msg['lat']).toBe('number');
|
||||
expect(typeof msg['lon']).toBe('number');
|
||||
expect(typeof msg['ts']).toBe('number');
|
||||
// speed and course are included when non-zero
|
||||
expect(msg['speed']).toBe(42);
|
||||
expect(msg['course']).toBe(180);
|
||||
});
|
||||
|
||||
it('sends to all subscribers on the same event', async () => {
|
||||
const conn1 = makeConn('c1');
|
||||
const conn2 = makeConn('c2');
|
||||
const conn3 = makeConn('c3');
|
||||
const topicToConns = new Map([[`event:${EVENT_A}`, [conn1, conn2, conn3]]]);
|
||||
const deviceToEvents = new Map([[DEVICE_ID, [EVENT_A]]]);
|
||||
|
||||
const payload = makePositionPayload({ device_id: DEVICE_ID });
|
||||
const redis = makeRedis(makeXreadgroupResult(STREAM, '1-0', payload));
|
||||
const registry = makeRegistry(topicToConns);
|
||||
const deviceEventMap = makeDeviceEventMap(deviceToEvents);
|
||||
|
||||
await runOneBatch(redis, registry, deviceEventMap, config, logger, metrics);
|
||||
|
||||
expect(conn1.sentMessages).toHaveLength(1);
|
||||
expect(conn2.sentMessages).toHaveLength(1);
|
||||
expect(conn3.sentMessages).toHaveLength(1);
|
||||
|
||||
// All received the same topic
|
||||
for (const conn of [conn1, conn2, conn3]) {
|
||||
expect((conn.sentMessages[0] as Record<string, unknown>)['topic']).toBe(`event:${EVENT_A}`);
|
||||
}
|
||||
});
|
||||
|
||||
it('increments orphan counter and sends nothing for an unregistered device', async () => {
|
||||
const conn = makeConn('c1');
|
||||
// Device has no events registered
|
||||
const deviceToEvents = new Map<string, string[]>();
|
||||
const topicToConns = new Map([[`event:${EVENT_A}`, [conn]]]);
|
||||
|
||||
const payload = makePositionPayload({ device_id: DEVICE_ID });
|
||||
const redis = makeRedis(makeXreadgroupResult(STREAM, '1-0', payload));
|
||||
const registry = makeRegistry(topicToConns);
|
||||
const deviceEventMap = makeDeviceEventMap(deviceToEvents);
|
||||
|
||||
await runOneBatch(redis, registry, deviceEventMap, config, logger, metrics);
|
||||
|
||||
expect(conn.sentMessages).toHaveLength(0);
|
||||
|
||||
const orphanInc = metrics.incCalls.find(
|
||||
(c) => c.name === 'processor_live_broadcast_orphan_records_total',
|
||||
);
|
||||
expect(orphanInc).toBeDefined();
|
||||
});
|
||||
|
||||
it('emits one message per topic for a device registered to multiple events', async () => {
|
||||
// conn1 subscribes to EVENT_A only, conn2 to EVENT_B only,
|
||||
// conn3 subscribes to both. The device is registered to both events.
|
||||
const conn1 = makeConn('c1');
|
||||
const conn2 = makeConn('c2');
|
||||
const conn3a = makeConn('c3a'); // conn3's subscription to EVENT_A
|
||||
const conn3b = makeConn('c3b'); // conn3's subscription to EVENT_B (separate entry)
|
||||
|
||||
const topicToConns = new Map([
|
||||
[`event:${EVENT_A}`, [conn1, conn3a]],
|
||||
[`event:${EVENT_B}`, [conn2, conn3b]],
|
||||
]);
|
||||
const deviceToEvents = new Map([[DEVICE_ID, [EVENT_A, EVENT_B]]]);
|
||||
|
||||
const payload = makePositionPayload({ device_id: DEVICE_ID });
|
||||
const redis = makeRedis(makeXreadgroupResult(STREAM, '1-0', payload));
|
||||
const registry = makeRegistry(topicToConns);
|
||||
const deviceEventMap = makeDeviceEventMap(deviceToEvents);
|
||||
|
||||
await runOneBatch(redis, registry, deviceEventMap, config, logger, metrics);
|
||||
|
||||
// conn1 is in EVENT_A only → 1 message with topic event:EVENT_A
|
||||
expect(conn1.sentMessages).toHaveLength(1);
|
||||
expect((conn1.sentMessages[0] as Record<string, unknown>)['topic']).toBe(`event:${EVENT_A}`);
|
||||
|
||||
// conn2 is in EVENT_B only → 1 message with topic event:EVENT_B
|
||||
expect(conn2.sentMessages).toHaveLength(1);
|
||||
expect((conn2.sentMessages[0] as Record<string, unknown>)['topic']).toBe(`event:${EVENT_B}`);
|
||||
|
||||
// conn3a is the EVENT_A entry for conn3 → 1 message
|
||||
expect(conn3a.sentMessages).toHaveLength(1);
|
||||
expect((conn3a.sentMessages[0] as Record<string, unknown>)['topic']).toBe(`event:${EVENT_A}`);
|
||||
|
||||
// conn3b is the EVENT_B entry for conn3 → 1 message
|
||||
expect(conn3b.sentMessages).toHaveLength(1);
|
||||
expect((conn3b.sentMessages[0] as Record<string, unknown>)['topic']).toBe(`event:${EVENT_B}`);
|
||||
|
||||
// Fanout counter: EVENT_A has 2 conns, EVENT_B has 2 conns → total 4 increments
|
||||
const fanoutIncs = metrics.incCalls.filter(
|
||||
(c) => c.name === 'processor_live_broadcast_fanout_messages_total',
|
||||
);
|
||||
expect(fanoutIncs).toHaveLength(4);
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user