feat: task 2.4 WS client + rAF coalescer + Zustand position store
Eight files under src/live/:
- constants.ts: throughput-discipline numbers (MAX_TRAIL_LENGTH=200,
reconnect backoff [1s/2s/4s/8s/16s, 30s ceiling], STALE_CONNECTION_MS).
- protocol.ts: zod discriminatedUnion('type', ...) for inbound
(subscribed / unsubscribed / position / error). PositionEntry +
SubscribeResult types per processor-ws-contract.
- connection-store.ts: Zustand store with status state machine.
- position-store.ts: Zustand store with latestByDevice + trailsByDevice
Maps, applySnapshot/applyPositions/clearForEvent/selectDevice
actions. Ring-buffer cap on trails; same-coordinate dedup.
- coalescer.ts: ~30-line rAF coalescer. Per-device buffer; flushes once
per animation frame regardless of receive rate.
- ws-client.ts: state machine (idle / connecting / connected /
reconnecting / closed) with exponential backoff, re-subscribe on
reconnect, stale-connection check, subscribe correlation via
pending-map + 5s timeout. URL resolution helper toAbsoluteWsUrl()
for same-origin path inputs.
- bootstrap.tsx: <LiveBootstrap> creates the client when authenticated,
wires positions through the coalescer to the position store, tears
down on logout. getLiveClient() exposes the singleton for 2.7.
- index.ts: barrel re-exports.
main.tsx wraps <App /> in <LiveBootstrap> alongside <AuthBootstrap>.
Deviations:
1. Skipped the setStatus helper inside createLiveClient; conditional
Parameters<> generics were hostile. Direct
useConnectionStore.getState().setStatus(...) at the ~6 call sites.
2. subscribe() adds to the subscriptions Set even when not connected
(so it replays on reconnect). Caller handles 'not-connected' by
waiting for connection-store status transition.
3. onPosition returns an unsubscribe fn (Set-based). Multi-handler is
free; lets future debug panels/tests attach.
Bundle: src/live/ adds ~15KB raw to the main bundle (mostly zod's
discriminated-union runtime). Total 393KB / 120KB gz.
This commit is contained in:
@@ -0,0 +1,51 @@
|
||||
import type { PositionEntry } from './protocol';
|
||||
|
||||
export interface Coalescer {
|
||||
/** Buffer the latest position for `deviceId`. Replaces any earlier in-flight value. */
|
||||
push: (p: PositionEntry) => void;
|
||||
/** Drop the buffer and cancel any pending flush. Used on event-switch and shutdown. */
|
||||
cancel: () => void;
|
||||
}
|
||||
|
||||
/**
|
||||
* Per-frame coalescer at the WebSocket boundary.
|
||||
*
|
||||
* Every incoming `position` message lands in a per-device buffer; the
|
||||
* latest wins. One `requestAnimationFrame` tick flushes the snapshot to
|
||||
* the consumer (typically `usePositionStore.getState().applyPositions`).
|
||||
* That caps the dispatch rate at the browser's frame rate (~60 Hz)
|
||||
* regardless of how fast positions arrive.
|
||||
*
|
||||
* This is the discipline traccar-web lacks: per-message Redux dispatch
|
||||
* cascades through selectors and rebuilds full feature collections at
|
||||
* every position arrival, which is the most likely cause of its observed
|
||||
* lag at high update rates.
|
||||
*/
|
||||
export function createCoalescer(onFlush: (snapshot: PositionEntry[]) => void): Coalescer {
|
||||
const buffer = new Map<string, PositionEntry>();
|
||||
let rafId: number | null = null;
|
||||
|
||||
function flush(): void {
|
||||
rafId = null;
|
||||
if (buffer.size === 0) return;
|
||||
const snapshot = Array.from(buffer.values());
|
||||
buffer.clear();
|
||||
onFlush(snapshot);
|
||||
}
|
||||
|
||||
return {
|
||||
push(p) {
|
||||
buffer.set(p.deviceId, p);
|
||||
if (rafId === null) {
|
||||
rafId = requestAnimationFrame(flush);
|
||||
}
|
||||
},
|
||||
cancel() {
|
||||
buffer.clear();
|
||||
if (rafId !== null) {
|
||||
cancelAnimationFrame(rafId);
|
||||
rafId = null;
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user