mirror of
https://github.com/koala73/worldmonitor.git
synced 2026-04-25 17:14:57 +02:00
* fix(intelligence): read regional-snapshot latestKey as raw string
Regional Intelligence panel rendered "No snapshot available yet" for every
region despite the 6h cron writing per-region snapshots successfully. Root
cause: writer/reader encoding mismatch.
Writer (scripts/regional-snapshot/persist-snapshot.mjs:60) stores the
snapshot_id pointer via `['SET', latestKey, snapshotId]` — a BARE string,
not JSON.stringify'd. The seeder's own reader (line 97) reads it as-is
and works.
Vercel RPC handler used `getCachedJson(latestKey, true)`, which internally
does `JSON.parse(data.result)`. `JSON.parse('mena-20260421T000000-steady')`
throws; the try/catch silently returns null; handler returns {}; panel
renders empty.
Fix: new `getCachedRawString()` helper in server/_shared/redis.ts that
reads a Redis key as-is with no JSON.parse. Handler uses it for the
latestKey read (while still using getCachedJson for the snapshot-by-id
payload, which IS JSON.stringify'd). No writer or backfill change needed.
Regression guard: new structural test asserts the handler reads latestKey
specifically via getCachedRawString so a future refactor can't silently
revert to getCachedJson and re-break every region.
Health.js monitors the summary key (intelligence:regional-snapshots:
summary:v1), which stays green because summary writes succeed. Per-region
health probes would be worth adding as a follow-up.
* fix(redis): detect AbortSignal.timeout() as TimeoutError too
Greptile P2 on PR #3302: AbortSignal.timeout() throws a DOMException with
name='TimeoutError' on V8 runtimes (Vercel Edge included). The existing
isTimeout check only matched name==='AbortError' — what you'd get from a
manual controller.abort() — so the [REDIS-TIMEOUT] structured log never
fired. Every redis fetch timeout silently fell through to the generic
console.warn branch, eroding the Sentry drain we added specifically to
catch these per docs/plans/chokepoint-rpc-payload-split.md.
Fix both getCachedJson (pre-existing) and the new getCachedRawString by
matching TimeoutError OR AbortError — covers both the current
AbortSignal.timeout() path and any future switch to manual AbortController.
Pre-existing copy in getCachedJson fixed in the same edit since it's the
same file and the same observability hole.
* test(redis): update isTimeout regex to match new TimeoutError|AbortError check
Pre-push hook caught the brittle static-analysis test in
tests/get-chokepoint-history.test.mjs:83 that asserted the exact old
single-name pattern. Update the regex (and description) to cover both
TimeoutError and AbortError, matching the observability fix in the
previous commit.
95 lines
4.1 KiB
JavaScript
95 lines
4.1 KiB
JavaScript
import { describe, it } from 'node:test';
|
|
import assert from 'node:assert/strict';
|
|
import { readFileSync } from 'node:fs';
|
|
import { dirname, resolve } from 'node:path';
|
|
import { fileURLToPath } from 'node:url';
|
|
|
|
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
const root = resolve(__dirname, '..');
|
|
const handlerSrc = readFileSync(
|
|
resolve(root, 'server/worldmonitor/supply-chain/v1/get-chokepoint-history.ts'),
|
|
'utf-8',
|
|
);
|
|
const handlerMapSrc = readFileSync(
|
|
resolve(root, 'server/worldmonitor/supply-chain/v1/handler.ts'),
|
|
'utf-8',
|
|
);
|
|
|
|
describe('get-chokepoint-history handler (source analysis)', () => {
|
|
it('reads from the per-id history key prefix', () => {
|
|
assert.match(handlerSrc, /supply_chain:transit-summaries:history:v1:/);
|
|
});
|
|
|
|
it('uses getCachedJson in raw mode (unprefixed key)', () => {
|
|
assert.match(handlerSrc, /getCachedJson\(`\$\{HISTORY_KEY_PREFIX\}\$\{id\}`,\s*true\)/);
|
|
});
|
|
|
|
it('validates chokepointId against the canonical set', () => {
|
|
assert.match(handlerSrc, /CANONICAL_CHOKEPOINTS/);
|
|
assert.match(handlerSrc, /VALID_IDS\.has\(id\)/);
|
|
});
|
|
|
|
it('returns empty history with fetchedAt=0 on invalid id, missing key, or error', () => {
|
|
// Invalid id branch
|
|
assert.match(handlerSrc, /!id\s*\|\|\s*!VALID_IDS\.has\(id\)/);
|
|
// Missing key / non-array branch
|
|
assert.match(handlerSrc, /!payload\s*\|\|\s*!Array\.isArray\(payload\.history\)/);
|
|
// Catch block returns empty history (all three paths return fetchedAt '0')
|
|
const emptyReturns = [...handlerSrc.matchAll(/fetchedAt:\s*'0'/g)];
|
|
assert.ok(emptyReturns.length >= 3, `expected 3+ fetchedAt:'0' returns, got ${emptyReturns.length}`);
|
|
});
|
|
|
|
it('is wired into the SupplyChainService handler map', () => {
|
|
assert.match(handlerMapSrc, /import\s+\{\s*getChokepointHistory\s*\}/);
|
|
assert.match(handlerMapSrc, /\bgetChokepointHistory,/);
|
|
});
|
|
});
|
|
|
|
describe('proto wiring', () => {
|
|
const protoSrc = readFileSync(
|
|
resolve(root, 'proto/worldmonitor/supply_chain/v1/service.proto'),
|
|
'utf-8',
|
|
);
|
|
const historyProto = readFileSync(
|
|
resolve(root, 'proto/worldmonitor/supply_chain/v1/get_chokepoint_history.proto'),
|
|
'utf-8',
|
|
);
|
|
|
|
it('service.proto imports and registers GetChokepointHistory', () => {
|
|
assert.match(protoSrc, /import "worldmonitor\/supply_chain\/v1\/get_chokepoint_history\.proto"/);
|
|
assert.match(protoSrc, /rpc GetChokepointHistory\(GetChokepointHistoryRequest\) returns \(GetChokepointHistoryResponse\)/);
|
|
assert.match(protoSrc, /path:\s*"\/get-chokepoint-history",\s*method:\s*HTTP_METHOD_GET/);
|
|
});
|
|
|
|
it('GetChokepointHistoryRequest requires chokepoint_id as a query param', () => {
|
|
assert.match(historyProto, /\(buf\.validate\.field\)\.required\s*=\s*true/);
|
|
assert.match(historyProto, /\(sebuf\.http\.query\)\s*=\s*\{name:\s*"chokepointId"\}/);
|
|
});
|
|
|
|
it('GetChokepointHistoryResponse carries chokepoint_id, history, fetched_at', () => {
|
|
assert.match(historyProto, /string chokepoint_id\s*=\s*1/);
|
|
assert.match(historyProto, /repeated TransitDayCount history\s*=\s*2/);
|
|
assert.match(historyProto, /int64 fetched_at\s*=\s*3/);
|
|
});
|
|
});
|
|
|
|
describe('Redis timeout observability', () => {
|
|
const redisSrc = readFileSync(resolve(root, 'server/_shared/redis.ts'), 'utf-8');
|
|
|
|
it('logs [REDIS-TIMEOUT] with key and timeoutMs on timeout (TimeoutError or AbortError)', () => {
|
|
// Grepable tag that log drains / Sentry-Vercel integration can pick up —
|
|
// before this, large-payload timeouts silently returned null and consumers
|
|
// cached zero-state. See docs/plans/chokepoint-rpc-payload-split.md.
|
|
//
|
|
// AbortSignal.timeout() throws DOMException name='TimeoutError' (V8
|
|
// runtimes incl. Vercel Edge); manual controller.abort() throws
|
|
// 'AbortError'. The predicate must match both — historically only
|
|
// 'AbortError' was checked and every real timeout silently fell through.
|
|
assert.match(
|
|
redisSrc,
|
|
/isTimeout\s*=\s*err instanceof Error && \(err\.name === 'TimeoutError' \|\| err\.name === 'AbortError'\)/,
|
|
);
|
|
assert.match(redisSrc, /\[REDIS-TIMEOUT\] getCachedJson key=\$\{key\} timeoutMs=\$\{REDIS_OP_TIMEOUT_MS\}/);
|
|
});
|
|
});
|