From 21a47b9082f49761b7d24ca9cfe70ad815158877 Mon Sep 17 00:00:00 2001 From: Richard Gibson Date: Mon, 27 Jan 2025 18:02:46 -0500 Subject: [PATCH 1/3] refactor(cosmic-swingset): Minor cleanup in kernel-stats.js * variable and parameter names * imported OpenTelemetry types * "TODO" comments for future architectural cleanup * JSDoc descriptions --- packages/cosmic-swingset/src/kernel-stats.js | 89 ++++++++++---------- 1 file changed, 43 insertions(+), 46 deletions(-) diff --git a/packages/cosmic-swingset/src/kernel-stats.js b/packages/cosmic-swingset/src/kernel-stats.js index 3f9e9438628..9faac70cbb9 100644 --- a/packages/cosmic-swingset/src/kernel-stats.js +++ b/packages/cosmic-swingset/src/kernel-stats.js @@ -12,18 +12,17 @@ import { KERNEL_STATS_UPDOWN_METRICS, } from '@agoric/swingset-vat/src/kernel/metrics.js'; -// import { diag, DiagConsoleLogger, DiagLogLevel } from '@opentelemetry/api'; - -// diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.VERBOSE); - -/** @import {MetricAttributes as Attributes} from '@opentelemetry/api' */ -/** @import {Histogram} from '@opentelemetry/api' */ - import { getTelemetryProviders as getTelemetryProvidersOriginal } from '@agoric/telemetry'; import v8 from 'node:v8'; import process from 'node:process'; +/** @import {Histogram, Meter as OTelMeter, MetricAttributes} from '@opentelemetry/api' */ + +// import { diag, DiagConsoleLogger, DiagLogLevel } from '@opentelemetry/api'; + +// diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.VERBOSE); + /** * TODO Would be nice somehow to label the vats individually, but it's too * high cardinality for us unless we can somehow limit the number of active @@ -48,7 +47,9 @@ export const HISTOGRAM_MS_LATENCY_BOUNDARIES = [ export const HISTOGRAM_SECONDS_LATENCY_BOUNDARIES = HISTOGRAM_MS_LATENCY_BOUNDARIES.map(ms => ms / 1000); -const baseMetricOptions = /** @type {const} */ ({ +// TODO: Validate these boundaries. We're not going to have 5ms blocks, but +// we probably care about the difference between 10 vs. 30 seconds. +const HISTOGRAM_METRICS = /** @type {const} */ ({ swingset_crank_processing_time: { description: 'Processing time per crank (ms)', boundaries: [1, 11, 21, 31, 41, 51, 61, 71, 81, 91, Infinity], @@ -88,12 +89,19 @@ const recordToKey = record => Object.entries(record).sort(([ka], [kb]) => (ka < kb ? -1 : 1)), ); +/** + * Return an array of Views defining explicit buckets for Histogram instruments + * to which we record measurements. + */ export function getMetricsProviderViews() { - return Object.entries(baseMetricOptions).map( + return Object.entries(HISTOGRAM_METRICS).map( ([instrumentName, { boundaries }]) => + // TODO: Add `instrumentType: InstrumentType.HISTOGRAM` and `meterName` + // filters (the latter of which should be a parameter of the exported + // function). new View({ - aggregation: new ExplicitBucketHistogramAggregation([...boundaries]), instrumentName, + aggregation: new ExplicitBucketHistogramAggregation([...boundaries]), }), ); } @@ -111,19 +119,16 @@ export function getTelemetryProviders(powers = {}) { } /** - * @param {import('@opentelemetry/api').Meter} metricMeter + * @param {OTelMeter} metricMeter * @param {string} name */ function createHistogram(metricMeter, name) { - const { description } = baseMetricOptions[name] || {}; + const { description } = HISTOGRAM_METRICS[name] || {}; return metricMeter.createHistogram(name, { description }); } /** - * @param {{ - * metricMeter: import('@opentelemetry/api').Meter, - * attributes?: import('@opentelemetry/api').MetricAttributes, - * }} param0 + * @param {{ metricMeter: OTelMeter, attributes?: MetricAttributes }} options */ export function makeSlogCallbacks({ metricMeter, attributes = {} }) { // Legacy because legacyMaps are not passable @@ -133,9 +138,9 @@ export function makeSlogCallbacks({ metricMeter, attributes = {} }) { * This function reuses or creates per-group named metrics. * * @param {string} name name of the base metric - * @param {Attributes} [group] the - * attributes to associate with a group - * @param {Attributes} [instance] the specific metric attributes + * @param {MetricAttributes} [group] the + * attributes to associate with a group + * @param {MetricAttributes} [instance] the specific metric attributes * @returns {Pick} the attribute-aware recorder */ const getGroupedRecorder = (name, group = undefined, instance = {}) => { @@ -215,21 +220,16 @@ export function makeSlogCallbacks({ metricMeter, attributes = {} }) { (deltaMS, [[_status, _problem, meterUsage]]) => { const group = getVatGroup(vatID); getGroupedRecorder('swingset_vat_delivery', group).record(deltaMS); - if (meterUsage) { - // Add to aggregated metering stats. - for (const [key, value] of Object.entries(meterUsage)) { - if (key === 'meterType') { - continue; - } - getGroupedRecorder(`swingset_meter_usage`, group, { - // The meterType is an instance-specific attribute--a change in - // it will result in the old value being discarded. - ...(meterUsage.meterType && { - meterType: meterUsage.meterType, - }), - stat: key, - }).record(value || 0); - } + const { meterType, ...measurements } = meterUsage || {}; + for (const [key, value] of Object.entries(measurements)) { + if (typeof value === 'object') continue; + // TODO: Each measurement key should have its own histogram; there's + // no reason to mix e.g. allocate/compute/currentHeapCount. + // cf. https://prometheus.io/docs/practices/naming/#metric-names + const detail = { ...(meterType ? { meterType } : {}), stat: key }; + getGroupedRecorder('swingset_meter_usage', group, detail).record( + value || 0, + ); } }, ); @@ -240,12 +240,9 @@ export function makeSlogCallbacks({ metricMeter, attributes = {} }) { } /** - * Create a metrics manager for the 'inboundQueue' structure, which - * can be scaped to report current length, and the number of - * increments and decrements. This must be created with the initial - * length as extracted from durable storage, but after that we assume - * that we're told about every up and down, so our RAM-backed shadow - * 'length' will remain accurate. + * Create a metrics manager for inbound queues. It must be initialized with the + * length from durable storage and informed of each subsequent change so that + * metrics can be provided from RAM. * * Note that the add/remove counts will get reset at restart, but * Prometheus/etc tools can tolerate that just fine. @@ -282,12 +279,12 @@ export function makeInboundQueueMetrics(initialLength) { } /** - * @param {object} param0 - * @param {any} param0.controller - * @param {import('@opentelemetry/api').Meter} param0.metricMeter - * @param {Console} param0.log - * @param {Attributes} [param0.attributes] - * @param {any} [param0.inboundQueueMetrics] + * @param {object} config + * @param {any} config.controller + * @param {OTelMeter} config.metricMeter + * @param {Console} config.log + * @param {MetricAttributes} [config.attributes] + * @param {ReturnType} [config.inboundQueueMetrics] */ export function exportKernelStats({ controller, From e8b99bc5b675aba78a48e5f4012fec3049a92c3b Mon Sep 17 00:00:00 2001 From: Richard Gibson Date: Mon, 27 Jan 2025 18:50:01 -0500 Subject: [PATCH 2/3] feat(cosmic-swingset): Split inbound queue length metrics by queue name Fixes #10900 --- packages/cosmic-swingset/src/kernel-stats.js | 207 +++++++++++++++---- packages/cosmic-swingset/src/launch-chain.js | 28 ++- 2 files changed, 180 insertions(+), 55 deletions(-) diff --git a/packages/cosmic-swingset/src/kernel-stats.js b/packages/cosmic-swingset/src/kernel-stats.js index 9faac70cbb9..d1d24df5001 100644 --- a/packages/cosmic-swingset/src/kernel-stats.js +++ b/packages/cosmic-swingset/src/kernel-stats.js @@ -5,6 +5,9 @@ import { View, } from '@opentelemetry/sdk-metrics'; +import { Fail } from '@endo/errors'; +import { isNat } from '@endo/nat'; + import { makeLegacyMap } from '@agoric/store'; import { @@ -19,6 +22,8 @@ import process from 'node:process'; /** @import {Histogram, Meter as OTelMeter, MetricAttributes} from '@opentelemetry/api' */ +/** @import {TotalMap} from '@agoric/internal' */ + // import { diag, DiagConsoleLogger, DiagLogLevel } from '@opentelemetry/api'; // diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.VERBOSE); @@ -72,6 +77,44 @@ const HISTOGRAM_METRICS = /** @type {const} */ ({ }, }); +/** @enum {(typeof QueueMetricAspect)[keyof typeof QueueMetricAspect]} */ +const QueueMetricAspect = /** @type {const} */ ({ + Length: 'length', + IncrementCount: 'increments', + DecrementCount: 'decrements', +}); + +/** + * Queue metrics come in {length,add,remove} triples sharing a common prefix. + * + * @param {string} namePrefix + * @param {string} descPrefix + * @returns {Record} + */ +const makeQueueMetrics = (namePrefix, descPrefix) => { + /** @type {Array<[QueueMetricAspect, string, string]>} */ + const metricsMeta = [ + [QueueMetricAspect.Length, 'length', 'length'], + [QueueMetricAspect.IncrementCount, 'add', 'increments'], + [QueueMetricAspect.DecrementCount, 'remove', 'decrements'], + ]; + const entries = metricsMeta.map(([aspect, nameSuffix, descSuffix]) => { + const name = `${namePrefix}_${nameSuffix}`; + const description = `${descPrefix} ${descSuffix}`; + return [name, { aspect, description }]; + }); + return Object.fromEntries(entries); +}; + +const QUEUE_METRICS = harden({ + // "cosmic_swingset_inbound_queue_{length,add,remove}" measurements carry a + // "queue" attribute. + // Future OpenTelemetry SDKs should support expressing that in Instrument + // creation: + // https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-advisory-parameter-attributes + ...makeQueueMetrics('cosmic_swingset_inbound_queue', 'inbound queue'), +}); + const wrapDeltaMS = (finisher, useDeltaMS) => { const startMS = Date.now(); return (...finishArgs) => { @@ -239,59 +282,148 @@ export function makeSlogCallbacks({ metricMeter, attributes = {} }) { return harden(slogCallbacks); } +/** + * @template {string} QueueName + * @typedef InboundQueueMetricsManager + * @property {(newLengths: Record) => void} updateLengths + * @property {(queueName: QueueName, delta?: number) => void} decStat + * @property {() => Record} getStats + */ + /** * Create a metrics manager for inbound queues. It must be initialized with the - * length from durable storage and informed of each subsequent change so that - * metrics can be provided from RAM. + * length of each queue and informed of each subsequent change so that metrics + * can be provided from RAM. * * Note that the add/remove counts will get reset at restart, but * Prometheus/etc tools can tolerate that just fine. * - * @param {number} initialLength + * @template {string} QueueName + * @param {OTelMeter} metricMeter + * @param {Record} initialLengths per-queue + * @param {Console} logger + * @returns {InboundQueueMetricsManager} */ -export function makeInboundQueueMetrics(initialLength) { - let length = initialLength; - let add = 0; - let remove = 0; +function makeInboundQueueMetrics(metricMeter, initialLengths, logger) { + const initialEntries = Object.entries(initialLengths); + const zeroEntries = initialEntries.map(([queueName]) => [queueName, 0]); + const makeQueueCounts = entries => { + for (const [queueName, length] of entries) { + isNat(length) || + Fail`invalid initial length for queue ${queueName}: ${length}`; + } + return /** @type {TotalMap} */ (new Map(entries)); + }; + /** + * For each {length,increment count,decrement count} aspect (each such aspect + * corresponding to a single OpenTelemetry Instrument), keep a map of values + * keyed by queue name (each corresponding to a value of Attribute "queue"). + * + * @type {Record>} + */ + const counterData = { + [QueueMetricAspect.Length]: makeQueueCounts(initialEntries), + [QueueMetricAspect.IncrementCount]: makeQueueCounts(zeroEntries), + [QueueMetricAspect.DecrementCount]: makeQueueCounts(zeroEntries), + }; + + // In the event of misconfigured reporting for an unknown queue, accept the + // data with a warning rather than either ignore it or halt the chain. + const provideQueue = queueName => { + if (counterData[QueueMetricAspect.Length].has(queueName)) return; + logger.warn(`unknown inbound queue ${JSON.stringify(queueName)}`); + for (const [aspect, map] of Object.entries(counterData)) { + const old = map.get(queueName); + old === undefined || + Fail`internal: unexpected preexisting ${aspect}=${old} data for late queue ${queueName}`; + map.set(queueName, 0); + } + }; + + const nudge = (map, queueName, delta) => { + const old = map.get(queueName); + old !== undefined || + Fail`internal: unexpected missing data for queue ${queueName}`; + map.set(queueName, old + delta); + }; + + // Wire up callbacks for reporting the OpenTelemetry measurements: + // queue length is an UpDownCounter, while increment and decrement counts are + // [monotonic] Counters. + // But note that the Prometheus representation of the former will be a Gauge: + // https://prometheus.io/docs/concepts/metric_types/ + for (const [name, { aspect, description }] of Object.entries(QUEUE_METRICS)) { + const isMonotonic = aspect !== QueueMetricAspect.Length; + const instrumentOptions = { description }; + const asyncInstrument = isMonotonic + ? metricMeter.createObservableCounter(name, instrumentOptions) + : metricMeter.createObservableUpDownCounter(name, instrumentOptions); + asyncInstrument.addCallback(observer => { + for (const [queueName, value] of counterData[aspect].entries()) { + observer.observe(value, { queue: queueName }); + } + }); + } return harden({ - updateLength: newLength => { - const delta = newLength - length; - length = newLength; - if (delta > 0) { - add += delta; - } else { - remove -= delta; + updateLengths: newLengths => { + for (const [queueName, newLength] of Object.entries(newLengths)) { + provideQueue(queueName); + isNat(newLength) || + Fail`invalid length for queue ${queueName}: ${newLength}`; + const oldLength = counterData[QueueMetricAspect.Length].get(queueName); + counterData[QueueMetricAspect.Length].set(queueName, newLength); + if (newLength > oldLength) { + const map = counterData[QueueMetricAspect.IncrementCount]; + nudge(map, queueName, newLength - oldLength); + } else if (newLength < oldLength) { + const map = counterData[QueueMetricAspect.DecrementCount]; + nudge(map, queueName, oldLength - newLength); + } } }, - decStat: (delta = 1) => { - length -= delta; - remove += delta; + decStat: (queueName, delta = 1) => { + provideQueue(queueName); + isNat(delta) || Fail`invalid decStat for queue ${queueName}: ${delta}`; + nudge(counterData[QueueMetricAspect.Length], queueName, -delta); + nudge(counterData[QueueMetricAspect.DecrementCount], queueName, delta); }, - getStats: () => ({ - cosmic_swingset_inbound_queue_length: length, - cosmic_swingset_inbound_queue_add: add, - cosmic_swingset_inbound_queue_remove: remove, - }), + getStats: () => { + // For each [length,add,remove] metric name, emit both a + // per-queue-name count and a pre-aggregated sum over all queue names + // (the latter is necessary for backwards compatibility until all old + // consumers of e.g. slog entries have been updated). + const entries = []; + for (const [name, { aspect }] of Object.entries(QUEUE_METRICS)) { + let sum = 0; + for (const [queueName, value] of counterData[aspect].entries()) { + sum += value; + entries.push([`${name}_${queueName}`, value]); + } + entries.push([name, sum]); + } + return Object.fromEntries(entries); + }, }); } /** + * @template {string} QueueName * @param {object} config * @param {any} config.controller * @param {OTelMeter} config.metricMeter * @param {Console} config.log * @param {MetricAttributes} [config.attributes] - * @param {ReturnType} [config.inboundQueueMetrics] + * @param {Record} [config.initialQueueLengths] per-queue */ export function exportKernelStats({ controller, metricMeter, log = console, attributes = {}, - inboundQueueMetrics, + initialQueueLengths = /** @type {any} */ ({}), }) { const kernelStatsMetrics = new Set(); const kernelStatsCounters = new Map(); @@ -356,26 +488,12 @@ export function exportKernelStats({ kernelStatsMetrics.add(key); } - if (inboundQueueMetrics) { - // These are not kernelStatsMetrics, they're outside the kernel. - for (const name of ['length', 'add', 'remove']) { - const key = `cosmic_swingset_inbound_queue_${name}`; - const options = { - description: `inbound queue ${name}`, - }; - const counter = - name === 'length' - ? metricMeter.createObservableUpDownCounter(key, options) - : metricMeter.createObservableCounter(key, options); - - counter.addCallback(observableResult => { - observableResult.observe( - inboundQueueMetrics.getStats()[key], - attributes, - ); - }); - } - } + // These are not kernelStatsMetrics, they're outside the kernel. + const inboundQueueMetrics = makeInboundQueueMetrics( + metricMeter, + initialQueueLengths, + log, + ); // TODO: We probably shouldn't roll our own Node.js process metrics, but a // cursory search for "opentelemetry node.js VM instrumentation" didn't reveal @@ -466,6 +584,7 @@ export function exportKernelStats({ return { crankScheduler, + inboundQueueMetrics, schedulerCrankTimeHistogram, schedulerBlockTimeHistogram, }; diff --git a/packages/cosmic-swingset/src/launch-chain.js b/packages/cosmic-swingset/src/launch-chain.js index 73f090bf64f..1f8d5108c3b 100644 --- a/packages/cosmic-swingset/src/launch-chain.js +++ b/packages/cosmic-swingset/src/launch-chain.js @@ -38,7 +38,6 @@ import { fileURLToPath } from 'url'; import { makeDefaultMeterProvider, - makeInboundQueueMetrics, exportKernelStats, makeSlogCallbacks, } from './kernel-stats.js'; @@ -97,6 +96,8 @@ const parseUpgradePlanInfo = (upgradePlan, prefix = '') => { * - cleanup: for dealing with data from terminated vats */ +/** @typedef {Extract} InboundQueueName */ + /** @type {CrankerPhase} */ const CLEANUP = 'cleanup'; @@ -466,15 +467,17 @@ export async function launch({ ? parseInt(env.END_BLOCK_SPIN_MS, 10) : 0; - const inboundQueueMetrics = makeInboundQueueMetrics( - actionQueue.size() + highPriorityQueue.size(), - ); - const { crankScheduler } = exportKernelStats({ + const initialQueueLengths = /** @type {Record} */ ({ + queued: actionQueue.size(), + 'high-priority': highPriorityQueue.size(), + forced: runThisBlock.size(), + }); + const { crankScheduler, inboundQueueMetrics } = exportKernelStats({ controller, metricMeter, // @ts-expect-error Type 'Logger' is not assignable to type 'Console'. log: console, - inboundQueueMetrics, + initialQueueLengths, }); /** @@ -734,13 +737,13 @@ export async function launch({ * * @param {InboundQueue} inboundQueue * @param {Cranker} runSwingset - * @param {CrankerPhase} phase + * @param {InboundQueueName} phase */ async function processActions(inboundQueue, runSwingset, phase) { let keepGoing = true; for await (const { action, context } of inboundQueue.consumeAll()) { const inboundNum = `${context.blockHeight}-${context.txHash}-${context.msgIdx}`; - inboundQueueMetrics.decStat(); + inboundQueueMetrics.decStat(phase); countInboundAction(action.type); await performAction(action, inboundNum); keepGoing = await runSwingset(phase); @@ -814,9 +817,12 @@ export async function launch({ // First, record new actions (bridge/mailbox/etc events that cosmos // added up for delivery to swingset) into our inboundQueue metrics - inboundQueueMetrics.updateLength( - actionQueue.size() + highPriorityQueue.size() + runThisBlock.size(), - ); + const newLengths = /** @type {Record} */ ({ + queued: actionQueue.size(), + 'high-priority': highPriorityQueue.size(), + forced: runThisBlock.size(), + }); + inboundQueueMetrics.updateLengths(newLengths); // If we have work to complete this block, it needs to run to completion. // It will also run to completion any work that swingset still had pending. From 529d2f6056dfd2ffa49e1544a7d562c6e92ac54f Mon Sep 17 00:00:00 2001 From: Richard Gibson Date: Wed, 29 Jan 2025 16:24:56 -0500 Subject: [PATCH 3/3] chore(cosmic-swingset): Rename CrankerPhase values to be single-word values of a JSDoc enum --- packages/cosmic-swingset/src/launch-chain.js | 66 ++++++++++++-------- 1 file changed, 41 insertions(+), 25 deletions(-) diff --git a/packages/cosmic-swingset/src/launch-chain.js b/packages/cosmic-swingset/src/launch-chain.js index 1f8d5108c3b..f8ef579a9da 100644 --- a/packages/cosmic-swingset/src/launch-chain.js +++ b/packages/cosmic-swingset/src/launch-chain.js @@ -87,19 +87,35 @@ const parseUpgradePlanInfo = (upgradePlan, prefix = '') => { */ /** - * @typedef {'leftover' | 'forced' | 'high-priority' | 'timer' | 'queued' | 'cleanup'} CrankerPhase - * - leftover: work from a previous block - * - forced: work that claims the entirety of the current block - * - high-priority: queued work the precedes timer advancement - * - intermission: needed to note state exports and update consistency hashes - * - queued: queued work the follows timer advancement - * - cleanup: for dealing with data from terminated vats + * The phase associated with a controller run. + * - Leftover: work from a previous block + * - Forced: work that claims the entirety of the current block + * - Priority: queued work that precedes timer device advancement (e.g., oracle price updates) + * - Timer: work prompted by timer advancement to the new external time + * - Inbound: queued work that follows timer advancement (e.g., normal messages) + * - Cleanup: for dealing with data from terminated vats + * + * @enum {(typeof CrankerPhase)[keyof typeof CrankerPhase]} CrankerPhase */ +const CrankerPhase = /** @type {const} */ ({ + Leftover: 'leftover', + Forced: 'forced', + Priority: 'priority', + Timer: 'timer', + Inbound: 'inbound', + Cleanup: 'cleanup', +}); -/** @typedef {Extract} InboundQueueName */ - -/** @type {CrankerPhase} */ -const CLEANUP = 'cleanup'; +/** + * Some phases correspond with inbound message queues. + * + * @enum {(typeof InboundQueueName)[keyof typeof InboundQueueName]} InboundQueueName + */ +const InboundQueueName = /** @type {const} */ ({ + Forced: CrankerPhase.Forced, + Priority: CrankerPhase.Priority, + Inbound: CrankerPhase.Inbound, +}); /** * @typedef {(phase: CrankerPhase) => Promise} Cranker runs the kernel @@ -468,9 +484,9 @@ export async function launch({ : 0; const initialQueueLengths = /** @type {Record} */ ({ - queued: actionQueue.size(), - 'high-priority': highPriorityQueue.size(), - forced: runThisBlock.size(), + [InboundQueueName.Forced]: runThisBlock.size(), + [InboundQueueName.Priority]: highPriorityQueue.size(), + [InboundQueueName.Inbound]: actionQueue.size(), }); const { crankScheduler, inboundQueueMetrics } = exportKernelStats({ controller, @@ -488,7 +504,7 @@ export async function launch({ function makeRunSwingset(blockHeight, runPolicy) { let runNum = 0; async function runSwingset(phase) { - if (phase === CLEANUP) { + if (phase === CrankerPhase.Cleanup) { const allowCleanup = runPolicy.startCleanup(); if (!allowCleanup) return false; } @@ -535,7 +551,7 @@ export async function launch({ const runPolicy = computronCounter(params, true); const runSwingset = makeRunSwingset(blockHeight, runPolicy); - await runSwingset('forced'); + await runSwingset(CrankerPhase.Forced); } async function saveChainState() { @@ -766,13 +782,13 @@ export async function launch({ */ async function processBlockActions(runSwingset, blockHeight, blockTime) { // First, complete leftover work, if any - let keepGoing = await runSwingset('leftover'); + let keepGoing = await runSwingset(CrankerPhase.Leftover); if (!keepGoing) return; // Then, if we have anything in the special runThisBlock queue, process // it and do no further work. if (runThisBlock.size()) { - await processActions(runThisBlock, runSwingset, 'forced'); + await processActions(runThisBlock, runSwingset, CrankerPhase.Forced); return; } @@ -780,7 +796,7 @@ export async function launch({ keepGoing = await processActions( highPriorityQueue, runSwingset, - 'high-priority', + CrankerPhase.Priority, ); if (!keepGoing) return; @@ -800,14 +816,14 @@ export async function launch({ // We must run the kernel even if nothing was added since the kernel // only notes state exports and updates consistency hashes when attempting // to perform a crank. - keepGoing = await runSwingset('timer'); + keepGoing = await runSwingset(CrankerPhase.Timer); if (!keepGoing) return; // Finally, process as much as we can from the actionQueue. - await processActions(actionQueue, runSwingset, 'queued'); + await processActions(actionQueue, runSwingset, CrankerPhase.Inbound); // Cleanup after terminated vats as allowed. - await runSwingset('cleanup'); + await runSwingset(CrankerPhase.Cleanup); } async function endBlock(blockHeight, blockTime, params) { @@ -818,9 +834,9 @@ export async function launch({ // First, record new actions (bridge/mailbox/etc events that cosmos // added up for delivery to swingset) into our inboundQueue metrics const newLengths = /** @type {Record} */ ({ - queued: actionQueue.size(), - 'high-priority': highPriorityQueue.size(), - forced: runThisBlock.size(), + [InboundQueueName.Forced]: runThisBlock.size(), + [InboundQueueName.Priority]: highPriorityQueue.size(), + [InboundQueueName.Inbound]: actionQueue.size(), }); inboundQueueMetrics.updateLengths(newLengths);