Implement Phase 1 tasks 1.1-1.4 (scaffold + core types + config + Postgres)

Scaffold mirrors tcp-ingestion conventions: ESM, strict TS, pnpm, vitest
with unit/integration split, ESLint flat config with no-floating-promises
+ no-misused-promises + import/no-restricted-paths (the new src/core/ →
src/domain/ boundary that protects Phase 1 from Phase 2 churn).

Core types in src/core/types.ts (Position, StreamRecord, DeviceState,
Metrics, AttributeValue) — Position is byte-equivalent to tcp-ingestion's
output. Codec in src/core/codec.ts implements sentinel reversal:
{__bigint:"..."} → bigint, {__buffer_b64:"..."} → Buffer, ISO timestamp
string → Date. CodecError surfaces malformed payload reasons with the
failing field named.

Config in src/config/load.ts (zod schema, all 13 env vars with defaults
and bounded numerics). Logger in src/observability/logger.ts matches
tcp-ingestion exactly: ISO timestamps, string level labels, pino-pretty
in development.

Postgres in src/db/: createPool with sane defaults and application_name,
connectWithRetry mirroring the ioredis retry pattern, a 30-line
migration runner using a schema_migrations table, and 0001_positions.sql
with the hypertable + (device_id, ts) unique index + ts DESC index.
Migration runner unit-tested against a mocked pg.Pool; the real
TimescaleDB round-trip is deferred to task 1.10 per spec.

Verification: typecheck, lint, build all clean; 73 unit tests passing
across 4 files. import/no-restricted-paths verified live by temporarily
adding a forbidden src/domain/ import.
This commit is contained in:
2026-04-30 21:35:16 +02:00
parent c314ba0902
commit 95efc23139
28 changed files with 7427 additions and 13 deletions
+110
View File
@@ -0,0 +1,110 @@
import { z } from 'zod';
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
/**
* Validates a URL string and checks that its protocol matches one of the
* accepted schemes. Returns the url string unchanged on success.
*/
function urlWithProtocol(accepted: string[]): z.ZodEffects<z.ZodString, string, string> {
return z.string().superRefine((val, ctx) => {
let parsed: URL;
try {
parsed = new URL(val);
} catch {
ctx.addIssue({ code: z.ZodIssueCode.custom, message: `Not a valid URL: "${val}"` });
return;
}
// URL.protocol includes the trailing colon, e.g. "redis:" or "postgres:"
const scheme = parsed.protocol.replace(/:$/, '');
if (!accepted.includes(scheme)) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `Expected protocol ${accepted.join(' or ')}:, got ${scheme}:`,
});
}
});
}
// ---------------------------------------------------------------------------
// Schema
// ---------------------------------------------------------------------------
const ConfigSchema = z.object({
NODE_ENV: z.enum(['development', 'test', 'production']).default('production'),
INSTANCE_ID: z.string().min(1).default('processor-1'),
LOG_LEVEL: z
.enum(['fatal', 'error', 'warn', 'info', 'debug', 'trace'])
.default('info'),
// Required — no silent defaults for connectivity strings
REDIS_URL: urlWithProtocol(['redis', 'rediss']),
POSTGRES_URL: urlWithProtocol(['postgres', 'postgresql']),
// Redis stream / group config — must match tcp-ingestion's output stream
REDIS_TELEMETRY_STREAM: z.string().min(1).default('telemetry:t'),
REDIS_CONSUMER_GROUP: z.string().min(1).default('processor'),
// Consumer name defaults to INSTANCE_ID; resolved after schema parse (see below)
REDIS_CONSUMER_NAME: z.string().min(1).optional(),
// Observability
METRICS_PORT: z.coerce.number().int().min(0).max(65535).default(9090),
// Throughput tuning
BATCH_SIZE: z.coerce.number().int().min(1).max(10_000).default(100),
BATCH_BLOCK_MS: z.coerce.number().int().min(0).max(60_000).default(5_000),
WRITE_BATCH_SIZE: z.coerce.number().int().min(1).max(1_000).default(50),
// Per-device in-memory state LRU cap
DEVICE_STATE_LRU_CAP: z.coerce.number().int().min(100).max(1_000_000).default(10_000),
});
// ---------------------------------------------------------------------------
// Config type
// ---------------------------------------------------------------------------
/**
* `REDIS_CONSUMER_NAME` in the raw schema is optional (string | undefined).
* After loading we fill it in with INSTANCE_ID if absent, so the exported
* Config always has a non-optional consumer name.
*/
type RawConfig = z.infer<typeof ConfigSchema>;
export type Config = Omit<RawConfig, 'REDIS_CONSUMER_NAME'> & {
readonly REDIS_CONSUMER_NAME: string;
};
// ---------------------------------------------------------------------------
// loadConfig
// ---------------------------------------------------------------------------
/**
* Reads `process.env`, validates with zod, and returns a fully typed Config.
* Throws with a human-readable multi-line error listing every invalid field if
* validation fails — the intent is loud, fast failure rather than running with
* bad configuration.
*
* Accepts an optional `env` parameter so tests can inject arbitrary env maps
* without touching process.env.
*/
export function loadConfig(env: Record<string, string | undefined> = process.env): Config {
const result = ConfigSchema.safeParse(env);
if (!result.success) {
const issues = result.error.issues
.map((issue) => ` ${issue.path.join('.')}: ${issue.message}`)
.join('\n');
throw new Error(`Configuration error — invalid or missing environment variables:\n${issues}`);
}
const raw: RawConfig = result.data;
return {
...raw,
// Default REDIS_CONSUMER_NAME to INSTANCE_ID — both must be unique per
// instance for safe consumer-group operation (see .env.example).
REDIS_CONSUMER_NAME: raw.REDIS_CONSUMER_NAME ?? raw.INSTANCE_ID,
};
}
+225
View File
@@ -0,0 +1,225 @@
/**
* Sentinel decoder for Position records arriving from the Redis Stream.
*
* tcp-ingestion serializes Position objects with a custom JSON replacer that
* encodes types not natively supported by JSON:
* - bigint → { __bigint: "<decimal-digits>" }
* - Buffer → { __buffer_b64: "<base64>" }
* - Date → ISO8601 string
*
* This module reverses that encoding so the Processor receives fully-typed
* Position objects. The contract is documented in:
* docs/wiki/concepts/position-record.md
* tcp-ingestion/src/core/publish.ts (jsonReplacer)
*/
import type { Position, AttributeValue } from './types.js';
// ---------------------------------------------------------------------------
// Error type
// ---------------------------------------------------------------------------
export class CodecError extends Error {
override readonly name = 'CodecError';
constructor(message: string, options?: ErrorOptions) {
super(message, options);
}
}
// ---------------------------------------------------------------------------
// Sentinel detection helpers
// ---------------------------------------------------------------------------
/**
* Returns true when the value is exactly `{ __bigint: "<string>" }`.
* The shape must have exactly one key — any extra keys indicate a user-defined
* object that coincidentally has a `__bigint` field, which is not a sentinel.
* In practice tcp-ingestion only emits single-key sentinels; validate strictly.
*/
function isBigintSentinel(value: unknown): value is { __bigint: string } {
if (typeof value !== 'object' || value === null) return false;
const keys = Object.keys(value);
return (
keys.length === 1 &&
keys[0] === '__bigint' &&
typeof (value as Record<string, unknown>)['__bigint'] === 'string'
);
}
/**
* Returns true when the value is exactly `{ __buffer_b64: "<string>" }`.
*/
function isBufferSentinel(value: unknown): value is { __buffer_b64: string } {
if (typeof value !== 'object' || value === null) return false;
const keys = Object.keys(value);
return (
keys.length === 1 &&
keys[0] === '__buffer_b64' &&
typeof (value as Record<string, unknown>)['__buffer_b64'] === 'string'
);
}
// ---------------------------------------------------------------------------
// Reviver
// ---------------------------------------------------------------------------
/**
* JSON.parse reviver that reconstructs the live types from sentinel encodings.
*
* Called by JSON.parse for every key-value pair in the document, bottom-up.
* By the time `attributes` is visited, each attribute value has already been
* converted (sentinels → bigint/Buffer), because JSON.parse visits leaves first.
*
* Reviver must return `unknown` because the result type depends on the key.
* The caller casts the final result to `PositionJson` after validation.
*/
function reviver(key: string, value: unknown): unknown {
// Timestamp field: ISO string → Date
if (key === 'timestamp' && typeof value === 'string') {
const date = new Date(value);
if (isNaN(date.getTime())) {
throw new CodecError(`Invalid timestamp value: "${value}"`);
}
return date;
}
// bigint sentinel
if (isBigintSentinel(value)) {
const digits = value.__bigint;
// Validate: only decimal digits (including optional leading minus for
// negative bigints, though Teltonika IO elements are unsigned).
if (!/^-?\d+$/.test(digits)) {
throw new CodecError(
`Malformed __bigint sentinel: expected decimal digits, got "${digits}"`,
);
}
return BigInt(digits);
}
// Buffer sentinel
if (isBufferSentinel(value)) {
const b64 = value.__buffer_b64;
// Validate base64 characters (standard + URL-safe alphabets, with padding)
if (!/^[A-Za-z0-9+/\-_]*={0,2}$/.test(b64)) {
throw new CodecError(
`Malformed __buffer_b64 sentinel: invalid base64 string "${b64}"`,
);
}
return Buffer.from(b64, 'base64');
}
return value;
}
// ---------------------------------------------------------------------------
// Required field validation
// ---------------------------------------------------------------------------
const REQUIRED_NUMERIC_FIELDS = [
'latitude',
'longitude',
'altitude',
'angle',
'speed',
'satellites',
'priority',
] as const;
/**
* Validates the decoded object has all required Position fields with the
* correct types. Throws `CodecError` naming the first failing field.
*/
function validateDecodedPosition(obj: Record<string, unknown>): asserts obj is {
device_id: string;
timestamp: Date;
latitude: number;
longitude: number;
altitude: number;
angle: number;
speed: number;
satellites: number;
priority: number;
attributes: Record<string, AttributeValue>;
} {
if (typeof obj['device_id'] !== 'string' || obj['device_id'].length === 0) {
throw new CodecError('Missing or invalid field: device_id (expected non-empty string)');
}
if (!(obj['timestamp'] instanceof Date)) {
throw new CodecError(
'Missing or invalid field: timestamp (expected Date after reviver; was ISO string decoded?)',
);
}
for (const field of REQUIRED_NUMERIC_FIELDS) {
if (typeof obj[field] !== 'number') {
throw new CodecError(
`Missing or invalid field: ${field} (expected number, got ${typeof obj[field]})`,
);
}
}
if (typeof obj['attributes'] !== 'object' || obj['attributes'] === null) {
throw new CodecError('Missing or invalid field: attributes (expected object)');
}
// Validate priority is exactly 0, 1, or 2
const priority = obj['priority'] as number;
if (priority !== 0 && priority !== 1 && priority !== 2) {
throw new CodecError(
`Invalid field: priority (expected 0 | 1 | 2, got ${priority})`,
);
}
// Validate attributes values are only AttributeValue types
const attrs = obj['attributes'] as Record<string, unknown>;
for (const [attrKey, attrVal] of Object.entries(attrs)) {
if (
typeof attrVal !== 'number' &&
typeof attrVal !== 'bigint' &&
!Buffer.isBuffer(attrVal)
) {
throw new CodecError(
`Invalid attribute "${attrKey}": expected number | bigint | Buffer, got ${typeof attrVal}`,
);
}
}
}
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
/**
* Decodes a JSON-encoded Position string (with sentinel encoding applied by
* tcp-ingestion's `serializePosition`) into a fully-typed `Position` object.
*
* Throws `CodecError` if the JSON is malformed, a sentinel is invalid, a
* required field is missing, or a field has the wrong type.
*/
export function decodePosition(payload: string): Position {
let parsed: unknown;
try {
parsed = JSON.parse(payload, reviver);
} catch (err) {
if (err instanceof CodecError) {
throw err;
}
throw new CodecError(
`Failed to parse Position payload as JSON: ${err instanceof Error ? err.message : String(err)}`,
{ cause: err },
);
}
if (typeof parsed !== 'object' || parsed === null || Array.isArray(parsed)) {
throw new CodecError('Position payload must be a JSON object');
}
const obj = parsed as Record<string, unknown>;
validateDecodedPosition(obj);
return obj as unknown as Position;
}
+94
View File
@@ -0,0 +1,94 @@
/**
* Canonical TypeScript types for the Processor service.
*
* `Position` is the boundary contract received from the Redis Stream (produced
* by tcp-ingestion). All other types here are Processor-internal — they describe
* what flows through the pipeline, not what crosses service boundaries.
*/
// ---------------------------------------------------------------------------
// Shared value types
// ---------------------------------------------------------------------------
/**
* A single IO attribute value from the Teltonika AVL record.
* - number : fixed-width IO elements (N1/N2/N4 — fit safely in JS number)
* - bigint : N8 elements (u64, may exceed Number.MAX_SAFE_INTEGER)
* - Buffer : NX variable-length elements (Codec 8 Extended)
*/
export type AttributeValue = number | bigint | Buffer;
// ---------------------------------------------------------------------------
// Position — input contract from tcp-ingestion
// ---------------------------------------------------------------------------
/**
* Normalized GPS position record. Byte-equivalent to tcp-ingestion's `Position`
* type (docs/wiki/concepts/position-record.md).
*
* `priority` is typed as a union rather than `number` to stay consistent with
* tcp-ingestion and make exhaustive switches possible in domain logic.
*/
export type Position = {
readonly device_id: string;
readonly timestamp: Date;
readonly latitude: number;
readonly longitude: number;
readonly altitude: number;
readonly angle: number; // heading 0360°
readonly speed: number; // km/h; 0 may mean "GPS invalid" — preserve verbatim
readonly satellites: number;
readonly priority: 0 | 1 | 2; // 0=Low, 1=High, 2=Panic
readonly attributes: Readonly<Record<string, AttributeValue>>;
};
// ---------------------------------------------------------------------------
// StreamRecord — raw shape returned by XREADGROUP before codec decoding
// ---------------------------------------------------------------------------
/**
* The flat field-value record as written by tcp-ingestion's `serializePosition`.
* The `payload` field contains a JSON-encoded `Position` with sentinel encoding
* applied (`__bigint`, `__buffer_b64`). The consumer calls `decodePosition` on
* `payload` to reconstruct the live `Position` object.
*
* Top-level `ts`, `device_id`, and `codec` fields allow downstream filtering
* without JSON parsing; `payload` is the source of truth.
*/
export type StreamRecord = {
readonly id: string; // Redis Stream entry ID, e.g. "1714488000000-0"
readonly ts: string; // ISO8601 timestamp (same value as Position.timestamp)
readonly device_id: string;
readonly codec: string; // '8' | '8E' | '16'
readonly payload: string; // JSON-encoded Position with sentinel encoding
};
// ---------------------------------------------------------------------------
// DeviceState — per-device in-memory runtime state
// ---------------------------------------------------------------------------
/**
* Runtime state maintained per-device in the LRU map (task 1.6).
* Bounded by DEVICE_STATE_LRU_CAP; evicted devices are rehydrated from Postgres
* on next packet (Phase 3 — Phase 1 treats restart/eviction as a state loss).
*/
export type DeviceState = {
readonly device_id: string;
readonly last_position: Position;
readonly last_seen: Date;
readonly position_count_session: number;
};
// ---------------------------------------------------------------------------
// Metrics — observability surface
// ---------------------------------------------------------------------------
/**
* Minimal metrics interface exposed to pipeline components. Concrete
* implementation (prom-client) lands in task 1.9; this keeps types stable
* through tasks 1.21.8.
*/
export type Metrics = {
readonly inc: (name: string, labels?: Record<string, string>) => void;
readonly observe: (name: string, value: number, labels?: Record<string, string>) => void;
};
+117
View File
@@ -0,0 +1,117 @@
/**
* Minimal SQL migration runner.
*
* Tracks applied migrations in a `schema_migrations` table (created on first
* run). Discovers migration files by reading the `migrations/` directory
* adjacent to this file, sorted lexicographically by filename. Each migration
* runs inside a transaction; failure rolls back that migration only.
*
* Idempotent: re-running against a database where all migrations are already
* applied is a no-op (every file is checked before execution).
*
* Not a framework — the Processor has one migration file in Phase 1. A 30-line
* runner is the right answer per the task spec.
*/
import { readdir, readFile } from 'node:fs/promises';
import { join, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
import type pg from 'pg';
import type { Logger } from 'pino';
const MIGRATIONS_DIR = join(dirname(fileURLToPath(import.meta.url)), 'migrations');
// ---------------------------------------------------------------------------
// Schema migrations table bootstrap
// ---------------------------------------------------------------------------
const CREATE_MIGRATIONS_TABLE_SQL = `
CREATE TABLE IF NOT EXISTS schema_migrations (
version text PRIMARY KEY,
applied_at timestamptz NOT NULL DEFAULT now()
)
`;
// ---------------------------------------------------------------------------
// Public runner
// ---------------------------------------------------------------------------
/**
* Applies all pending migrations in `src/db/migrations/` in filename order.
* Each migration file is wrapped in a transaction. Already-applied migrations
* are skipped with an info log.
*/
export async function runMigrations(pool: pg.Pool, logger: Logger): Promise<void> {
// Bootstrap the tracking table before we try to use it
await pool.query(CREATE_MIGRATIONS_TABLE_SQL);
const sqlFiles = await discoverMigrationFiles();
for (const filename of sqlFiles) {
const version = filename;
const alreadyApplied = await isMigrationApplied(pool, version);
if (alreadyApplied) {
logger.info({ version }, 'migration already applied; skipping');
continue;
}
const sql = await readMigrationFile(filename);
await applyMigration(pool, version, sql, logger);
}
}
// ---------------------------------------------------------------------------
// Internals
// ---------------------------------------------------------------------------
/**
* Lists `*.sql` files in the migrations directory, sorted lexicographically
* (filename prefix `NNNN_` ensures correct ordering).
*/
async function discoverMigrationFiles(): Promise<string[]> {
const entries = await readdir(MIGRATIONS_DIR);
return entries.filter((f) => f.endsWith('.sql')).sort();
}
async function readMigrationFile(filename: string): Promise<string> {
return readFile(join(MIGRATIONS_DIR, filename), 'utf8');
}
async function isMigrationApplied(pool: pg.Pool, version: string): Promise<boolean> {
const result = await pool.query<{ exists: boolean }>(
'SELECT EXISTS(SELECT 1 FROM schema_migrations WHERE version = $1) AS exists',
[version],
);
// noUncheckedIndexedAccess: result.rows[0] may be undefined
return result.rows[0]?.exists ?? false;
}
/**
* Applies a single migration inside a transaction. Logs success at `info` and
* throws on any SQL error (the caller bubbles it up — no silent skip).
*/
async function applyMigration(
pool: pg.Pool,
version: string,
sql: string,
logger: Logger,
): Promise<void> {
const client = await pool.connect();
try {
await client.query('BEGIN');
await client.query(sql);
await client.query(
'INSERT INTO schema_migrations (version) VALUES ($1)',
[version],
);
await client.query('COMMIT');
logger.info({ version }, 'migration applied');
} catch (err) {
await client.query('ROLLBACK');
logger.error({ err, version }, 'migration failed; rolled back');
throw err;
} finally {
client.release();
}
}
+60
View File
@@ -0,0 +1,60 @@
-- Migration: 0001_positions
-- Creates the positions hypertable owned by the Processor service.
--
-- Schema authority note: this is the ONLY table whose schema the Processor
-- owns directly (per ROADMAP.md design rule #2). All other tables Processor
-- writes to (timing_records, stage_results, etc.) are defined in Directus.
-- Do NOT modify this table from the Directus admin UI.
-- Enable TimescaleDB extension (no-op if already installed at the DB level).
CREATE EXTENSION IF NOT EXISTS timescaledb;
-- Raw position history. High-volume append-only table; the hypertable
-- partitioning column is `ts` (device-reported GPS time).
--
-- Column notes:
-- device_id text — IMEIs are 15 ASCII digits. text keeps the door
-- open for non-IMEI device identifiers (future
-- vendors) and avoids any leading-zero loss.
-- ts timestamptz— device-reported event time. This is the
-- hypertable partition column. NOT ingestion time.
-- ingested_at timestamptz— when Processor wrote the row. Useful for
-- diagnosing clock skew or buffered record flushes.
-- altitude/angle/speed real — float32 is sufficient precision; saves space
-- on a high-volume append-only table.
-- attributes jsonb — verbatim IO bag from the AVL record, with bigint
-- values stored as decimal strings and Buffer values
-- stored as base64 strings (see task 1.4 spec).
-- No naming or unit conversion here; that is Phase 2.
CREATE TABLE IF NOT EXISTS positions (
device_id text NOT NULL,
ts timestamptz NOT NULL,
ingested_at timestamptz NOT NULL DEFAULT now(),
latitude double precision NOT NULL,
longitude double precision NOT NULL,
altitude real NOT NULL,
angle real NOT NULL,
speed real NOT NULL,
satellites smallint NOT NULL,
priority smallint NOT NULL,
codec text NOT NULL,
attributes jsonb NOT NULL
);
-- Convert to TimescaleDB hypertable partitioned by event time.
-- chunk_time_interval = 1 day is appropriate for GPS telemetry where queries
-- typically span hours-to-days and devices send at 160 second intervals.
SELECT create_hypertable(
'positions',
'ts',
if_not_exists => TRUE,
chunk_time_interval => INTERVAL '1 day'
);
-- Unique constraint: natural key for idempotent upserts.
-- ON CONFLICT (device_id, ts) DO NOTHING ensures a replayed or duplicated
-- record does not create a second row (ROADMAP.md design rule #5).
CREATE UNIQUE INDEX IF NOT EXISTS positions_device_ts ON positions (device_id, ts);
-- Descending ts index for range queries (most recent positions first).
CREATE INDEX IF NOT EXISTS positions_ts ON positions (ts DESC);
+71
View File
@@ -0,0 +1,71 @@
import pg from 'pg';
import type { Logger } from 'pino';
// ---------------------------------------------------------------------------
// Pool factory
// ---------------------------------------------------------------------------
/**
* Creates a pg.Pool configured for the Processor service.
*
* `application_name` is set so connections are identifiable in pg_stat_activity
* when debugging slow queries or connection exhaustion.
*/
export function createPool(url: string): pg.Pool {
return new pg.Pool({
connectionString: url,
max: 10,
idleTimeoutMillis: 30_000,
connectionTimeoutMillis: 5_000,
application_name: 'processor',
});
}
// ---------------------------------------------------------------------------
// Startup connectivity check
// ---------------------------------------------------------------------------
/**
* Verifies Postgres connectivity on startup with exponential-backoff retry.
*
* Runs `SELECT 1` (3 attempts, backoff capped at 5s). Mirrors
* tcp-ingestion's `connectRedis` pattern so operators see the same failure
* shape in logs regardless of which dependency is down.
*
* Calls `process.exit(1)` on final failure rather than throwing — the
* orchestrator (Docker/systemd) restarts the process.
*/
export async function connectWithRetry(
pool: pg.Pool,
logger: Logger,
maxAttempts = 3,
): Promise<void> {
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
try {
const client = await pool.connect();
try {
await client.query('SELECT 1');
} finally {
client.release();
}
logger.info({ attempt }, 'Postgres connected');
return;
} catch (err) {
if (attempt === maxAttempts) {
logger.fatal({ err }, 'Postgres connection failed after all retries; exiting');
process.exit(1);
}
const backoffMs = Math.min(200 * 2 ** (attempt - 1), 5_000);
logger.warn(
{ err, attempt, maxAttempts, backoffMs },
'Postgres connection failed; retrying',
);
await new Promise<void>((resolve) => setTimeout(resolve, backoffMs));
}
}
// TypeScript: unreachable after process.exit above, but needed for type safety
/* c8 ignore next */
throw new Error('unreachable');
}
+28
View File
@@ -0,0 +1,28 @@
import { loadConfig } from './config/load.js';
import type { Config } from './config/load.js';
import { createLogger } from './observability/logger.js';
// -------------------------------------------------------------------------
// Startup: validate config (fail fast on bad env), build logger
// -------------------------------------------------------------------------
let config: Config;
try {
config = loadConfig();
} catch (err) {
// Config validation failures print a human-readable message and exit 1.
// Logger is not available yet — process.stderr is the only output channel.
process.stderr.write(`${err instanceof Error ? err.message : String(err)}\n`);
process.exit(1);
}
const logger = createLogger({
level: config.LOG_LEVEL,
nodeEnv: config.NODE_ENV,
instanceId: config.INSTANCE_ID,
});
logger.info('processor starting');
// Consumer, writer, and state wiring land in tasks 1.51.8.
process.exit(0);
+52
View File
@@ -0,0 +1,52 @@
import pino from 'pino';
import type { Logger } from 'pino';
export type { Logger };
/**
* Builds the root pino logger. Called once at startup with config values.
*
* In development, pino-pretty is used for human-readable output (lazy transport
* so it is never required in production paths). In test/production, raw JSON is
* emitted — fast and parseable by log aggregators (Portainer, Loki, etc.).
*/
export function createLogger(options: {
level: string;
nodeEnv: string;
instanceId: string;
}): Logger {
const { level, nodeEnv, instanceId } = options;
const base = {
service: 'processor',
instance_id: instanceId,
};
// Emit `"level":"info"` instead of pino's default numeric `"level":30` so
// log viewers show a human-readable label rather than the numeric level.
const formatters = {
level: (label: string) => ({ level: label }),
};
if (nodeEnv === 'development') {
return pino({
level,
base,
timestamp: pino.stdTimeFunctions.isoTime,
formatters,
transport: {
target: 'pino-pretty',
options: {
colorize: true,
translateTime: 'SYS:standard',
ignore: 'pid,hostname',
},
},
});
}
// Production and test: plain JSON — fast, no extra deps.
// ISO-8601 string timestamps (vs default epoch-ms) survive downstream
// log renderers without losing precision.
return pino({ level, base, timestamp: pino.stdTimeFunctions.isoTime, formatters });
}