Implement Phase 1 task 1.8 (Redis Streams publisher + main wiring)
- Bounded in-memory queue (default 10000); overflow throws PublishOverflowError so the framing layer skips ACK and the device retransmits. - Background worker drains via XADD with MAXLEN ~ approximate trimming. - JSON serialization with sentinel encoding for bigint/Buffer/Date; correctly handles Buffer.prototype.toJSON firing before the replacer. - AdapterContext.publish(position, codec) with codec-label closure at dispatch in adapters/teltonika/index.ts; zero changes to the three codec parsers. - connectRedis with retry-on-startup; main.ts wires the full pipeline. - installGracefulShutdown stubbed (full hardening in task 1.12). - 19 new tests (17 unit + 2 Docker-conditional integration). Total 81 passing.
This commit is contained in:
@@ -0,0 +1,295 @@
|
||||
/**
|
||||
* Unit tests for src/core/publish.ts
|
||||
*
|
||||
* Covers:
|
||||
* - jsonReplacer sentinel encoding (bigint, Buffer, Date, plain values)
|
||||
* - serializePosition field shape
|
||||
* - PublishOverflowError thrown when queue is full
|
||||
* - publish() is non-blocking (returns before XADD completes)
|
||||
* - connectRedis retry helper fails with a clear error on unreachable host
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import type { Logger } from 'pino';
|
||||
import type { Metrics, Position } from '../src/core/types.js';
|
||||
import {
|
||||
jsonReplacer,
|
||||
serializePosition,
|
||||
createPublisher,
|
||||
PublishOverflowError,
|
||||
connectRedis,
|
||||
} from '../src/core/publish.js';
|
||||
import type { Config } from '../src/config/load.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makePosition(overrides: Partial<Position> = {}): Position {
|
||||
return {
|
||||
device_id: 'TEST123456789',
|
||||
timestamp: new Date('2024-01-15T10:30:00.000Z'),
|
||||
latitude: 54.12345,
|
||||
longitude: 25.98765,
|
||||
altitude: 150,
|
||||
angle: 270,
|
||||
speed: 60,
|
||||
satellites: 8,
|
||||
priority: 1,
|
||||
attributes: {},
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function makeConfig(overrides: Partial<Config> = {}): Config {
|
||||
return {
|
||||
NODE_ENV: 'test',
|
||||
INSTANCE_ID: 'test-instance',
|
||||
LOG_LEVEL: 'silent',
|
||||
TELTONIKA_PORT: 5027,
|
||||
REDIS_URL: 'redis://localhost:6379',
|
||||
REDIS_TELEMETRY_STREAM: 'telemetry:teltonika',
|
||||
REDIS_STREAM_MAXLEN: 1_000_000,
|
||||
METRICS_PORT: 9090,
|
||||
PUBLISH_QUEUE_CAPACITY: 10_000,
|
||||
STRICT_DEVICE_AUTH: false,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function makeSilentLogger(): Logger {
|
||||
return {
|
||||
debug: vi.fn(),
|
||||
info: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
error: vi.fn(),
|
||||
fatal: vi.fn(),
|
||||
child: vi.fn().mockReturnThis(),
|
||||
} as unknown as Logger;
|
||||
}
|
||||
|
||||
function makeMetrics(): Metrics {
|
||||
return {
|
||||
inc: vi.fn(),
|
||||
observe: vi.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
// Minimal ioredis stub used for queue tests
|
||||
function makeHangingRedis(): {
|
||||
xadd: ReturnType<typeof vi.fn>;
|
||||
quit: ReturnType<typeof vi.fn>;
|
||||
} {
|
||||
return {
|
||||
// Returns a promise that never resolves — simulates a hung Redis
|
||||
xadd: vi.fn(() => new Promise<string>(() => {})),
|
||||
quit: vi.fn().mockResolvedValue('OK'),
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// 1. jsonReplacer encoding
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('jsonReplacer', () => {
|
||||
it('encodes bigint as { __bigint: "<digits>" }', () => {
|
||||
const result = jsonReplacer('x', BigInt('9007199254740993'));
|
||||
expect(result).toEqual({ __bigint: '9007199254740993' });
|
||||
});
|
||||
|
||||
it('encodes zero bigint correctly', () => {
|
||||
expect(jsonReplacer('x', 0n)).toEqual({ __bigint: '0' });
|
||||
});
|
||||
|
||||
it('encodes Buffer as { __buffer_b64: "<base64>" }', () => {
|
||||
const buf = Buffer.from([0xde, 0xad, 0xbe, 0xef]);
|
||||
const result = jsonReplacer('x', buf);
|
||||
expect(result).toEqual({ __buffer_b64: buf.toString('base64') });
|
||||
});
|
||||
|
||||
it('encodes Buffer subarray view (zero-copy) correctly', () => {
|
||||
const backing = Buffer.from([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]);
|
||||
const view = backing.subarray(2, 5); // [0x02, 0x03, 0x04]
|
||||
const result = jsonReplacer('x', view);
|
||||
expect(result).toEqual({ __buffer_b64: view.toString('base64') });
|
||||
});
|
||||
|
||||
it('encodes Date as ISO string', () => {
|
||||
const d = new Date('2024-06-01T00:00:00.000Z');
|
||||
expect(jsonReplacer('x', d)).toBe('2024-06-01T00:00:00.000Z');
|
||||
});
|
||||
|
||||
it('passes through plain numbers unchanged', () => {
|
||||
expect(jsonReplacer('x', 42)).toBe(42);
|
||||
expect(jsonReplacer('x', -3.14)).toBe(-3.14);
|
||||
expect(jsonReplacer('x', 0)).toBe(0);
|
||||
});
|
||||
|
||||
it('passes through strings unchanged', () => {
|
||||
expect(jsonReplacer('x', 'hello')).toBe('hello');
|
||||
});
|
||||
|
||||
it('passes through null unchanged', () => {
|
||||
expect(jsonReplacer('x', null)).toBeNull();
|
||||
});
|
||||
|
||||
it('round-trips bigint + Buffer through JSON.parse with sentinel decoding', () => {
|
||||
const original = {
|
||||
big: BigInt('12345678901234567890'),
|
||||
buf: Buffer.from([0xca, 0xfe]),
|
||||
num: 99,
|
||||
};
|
||||
|
||||
const json = JSON.stringify(original, jsonReplacer);
|
||||
const parsed = JSON.parse(json) as {
|
||||
big: { __bigint: string };
|
||||
buf: { __buffer_b64: string };
|
||||
num: number;
|
||||
};
|
||||
|
||||
// Decode sentinels (simulating Processor-side decoder)
|
||||
expect(BigInt(parsed.big.__bigint)).toBe(BigInt('12345678901234567890'));
|
||||
expect(Buffer.from(parsed.buf.__buffer_b64, 'base64')).toEqual(
|
||||
Buffer.from([0xca, 0xfe]),
|
||||
);
|
||||
expect(parsed.num).toBe(99);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// 2. serializePosition field shape
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('serializePosition', () => {
|
||||
it('produces the expected top-level Redis fields', () => {
|
||||
const pos = makePosition();
|
||||
const fields = serializePosition(pos, '8');
|
||||
|
||||
expect(fields).toHaveProperty('ts', '2024-01-15T10:30:00.000Z');
|
||||
expect(fields).toHaveProperty('device_id', 'TEST123456789');
|
||||
expect(fields).toHaveProperty('codec', '8');
|
||||
expect(fields).toHaveProperty('payload');
|
||||
});
|
||||
|
||||
it('payload is valid JSON', () => {
|
||||
const pos = makePosition({ attributes: { speed_raw: 1234n, raw: Buffer.from([0xab]) } });
|
||||
const { payload } = serializePosition(pos, '8E');
|
||||
expect(() => JSON.parse(payload)).not.toThrow();
|
||||
});
|
||||
|
||||
it('payload round-trips bigint and Buffer sentinels', () => {
|
||||
const original = makePosition({
|
||||
attributes: {
|
||||
big_io: BigInt('18446744073709551615'), // u64 max
|
||||
buf_io: Buffer.from([0xde, 0xad]),
|
||||
num_io: 255,
|
||||
},
|
||||
});
|
||||
|
||||
const { payload } = serializePosition(original, '16');
|
||||
const parsed = JSON.parse(payload) as {
|
||||
attributes: Record<string, unknown>;
|
||||
};
|
||||
|
||||
const big = parsed.attributes['big_io'] as { __bigint: string };
|
||||
const buf = parsed.attributes['buf_io'] as { __buffer_b64: string };
|
||||
const num = parsed.attributes['num_io'] as number;
|
||||
|
||||
expect(BigInt(big.__bigint)).toBe(BigInt('18446744073709551615'));
|
||||
expect(Buffer.from(buf.__buffer_b64, 'base64')).toEqual(Buffer.from([0xde, 0xad]));
|
||||
expect(num).toBe(255);
|
||||
});
|
||||
|
||||
it('codec label "8E" is preserved verbatim', () => {
|
||||
const fields = serializePosition(makePosition(), '8E');
|
||||
expect(fields['codec']).toBe('8E');
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// 3. Bounded queue overflow
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('createPublisher — overflow', () => {
|
||||
it('throws PublishOverflowError when queue is at capacity', async () => {
|
||||
const redis = makeHangingRedis();
|
||||
// Capacity=3: worker picks up the 1st item (hangs on xadd), so the queue
|
||||
// drains to 0 after the first publish microtask. Subsequent publishes fill
|
||||
// it: 2nd→queue=1, 3rd→queue=2, 4th→queue=3 (full). 5th should overflow.
|
||||
const config = makeConfig({ PUBLISH_QUEUE_CAPACITY: 3 });
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const publisher = createPublisher(redis as any, config, makeSilentLogger(), makeMetrics());
|
||||
|
||||
await publisher.publish(makePosition({ device_id: 'A' }), '8'); // worker takes A → queue=0
|
||||
await publisher.publish(makePosition({ device_id: 'B' }), '8'); // queue=1
|
||||
await publisher.publish(makePosition({ device_id: 'C' }), '8'); // queue=2
|
||||
await publisher.publish(makePosition({ device_id: 'D' }), '8'); // queue=3 = capacity
|
||||
|
||||
// 5th publish should overflow
|
||||
await expect(publisher.publish(makePosition({ device_id: 'E' }), '8')).rejects.toBeInstanceOf(
|
||||
PublishOverflowError,
|
||||
);
|
||||
});
|
||||
|
||||
it('increments the overflow metric on overflow', async () => {
|
||||
const redis = makeHangingRedis();
|
||||
// Capacity=1: worker picks up the 1st item immediately (queue→0).
|
||||
// 2nd item fills queue to capacity=1. 3rd overflows.
|
||||
const config = makeConfig({ PUBLISH_QUEUE_CAPACITY: 1 });
|
||||
const metrics = makeMetrics();
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const publisher = createPublisher(redis as any, config, makeSilentLogger(), metrics);
|
||||
|
||||
await publisher.publish(makePosition({ device_id: 'A' }), '8'); // worker takes A
|
||||
await publisher.publish(makePosition({ device_id: 'B' }), '8'); // queue=1 = capacity
|
||||
await expect(publisher.publish(makePosition({ device_id: 'C' }), '8')).rejects.toBeInstanceOf(
|
||||
PublishOverflowError,
|
||||
);
|
||||
|
||||
expect(metrics.inc).toHaveBeenCalledWith('teltonika_publish_overflow_total', { codec: '8' });
|
||||
});
|
||||
|
||||
it('publish() resolves without waiting for XADD to complete (non-blocking guarantee)', async () => {
|
||||
// The hanging redis means XADD never completes. publish() must still return
|
||||
// promptly — it only enqueues, it does not await the XADD call itself.
|
||||
const redis = makeHangingRedis();
|
||||
const config = makeConfig({ PUBLISH_QUEUE_CAPACITY: 100 });
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const publisher = createPublisher(redis as any, config, makeSilentLogger(), makeMetrics());
|
||||
|
||||
const start = Date.now();
|
||||
await publisher.publish(makePosition(), '8');
|
||||
const elapsed = Date.now() - start;
|
||||
|
||||
// Should complete in well under 200ms regardless of Redis latency.
|
||||
// The worker may have already started (calling xadd which hangs), but
|
||||
// publish() itself returned before xadd resolved — that is the guarantee.
|
||||
expect(elapsed).toBeLessThan(200);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// 4. connectRedis retry helper
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('connectRedis', () => {
|
||||
it('calls process.exit(1) when Redis is unreachable after all retries', async () => {
|
||||
const exitSpy = vi.spyOn(process, 'exit').mockImplementation((_code) => {
|
||||
throw new Error('process.exit called');
|
||||
});
|
||||
|
||||
const logger = makeSilentLogger();
|
||||
|
||||
// Port 1 is almost certainly not listening
|
||||
await expect(
|
||||
connectRedis('redis://127.0.0.1:1', logger, 1),
|
||||
).rejects.toThrow('process.exit called');
|
||||
|
||||
expect(exitSpy).toHaveBeenCalledWith(1);
|
||||
|
||||
exitSpy.mockRestore();
|
||||
});
|
||||
}, 15_000);
|
||||
Reference in New Issue
Block a user