diff --git a/jest.config.js b/jest.config.js index d964a3d7d..f4b4457f3 100644 --- a/jest.config.js +++ b/jest.config.js @@ -1,7 +1,4 @@ -const os = require('os'); const path = require('path'); -const fs = require('fs'); -const process = require('process'); const { pathsToModuleNameMapper } = require('ts-jest'); const { compilerOptions } = require('./tsconfig'); @@ -16,10 +13,6 @@ const globals = { projectDir: __dirname, // Absolute directory to the test root testDir: path.join(__dirname, 'tests'), - // Default global data directory - dataDir: fs.mkdtempSync( - path.join(os.tmpdir(), 'polykey-test-global-'), - ), // Default asynchronous test timeout defaultTimeout: 20000, failedConnectionTimeout: 50000, @@ -29,8 +22,8 @@ const globals = { // The `globalSetup` and `globalTeardown` cannot access the `globals` // They run in their own process context -// They can receive process environment -process.env['GLOBAL_DATA_DIR'] = globals.dataDir; +// They can however receive the process environment +// Use `process.env` to set variables module.exports = { testEnvironment: 'node', diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index 8b971bbe6..b8df370e0 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -1,13 +1,12 @@ import type { DeepPartial, FileSystem, ObjectEmpty } from './types'; import type { PolykeyWorkerManagerInterface } from './workers/types'; import type { TLSConfig } from './network/types'; -import type { SeedNodes } from './nodes/types'; +import type { NodeAddress, NodeId, SeedNodes } from './nodes/types'; import type { Key, PasswordOpsLimit, PasswordMemLimit } from './keys/types'; import path from 'path'; import process from 'process'; import Logger from '@matrixai/logger'; import { DB } from '@matrixai/db'; -import { MDNS } from '@matrixai/mdns'; import { CreateDestroyStartStop, ready, @@ -76,7 +75,8 @@ type PolykeyAgentOptions = { rpcParserBufferSize: number; }; nodes: { - connectionIdleTimeoutTime: number; + connectionIdleTimeoutTimeMin: number; + connectionIdleTimeoutTimeScale: number; connectionFindConcurrencyLimit: number; connectionConnectTimeoutTime: number; connectionKeepAliveTimeoutTime: number; @@ -160,8 +160,10 @@ class PolykeyAgent { rpcParserBufferSize: config.defaultsSystem.rpcParserBufferSize, }, nodes: { - connectionIdleTimeoutTime: - config.defaultsSystem.nodesConnectionIdleTimeoutTime, + connectionIdleTimeoutTimeMin: + config.defaultsSystem.nodesConnectionIdleTimeoutTimeMin, + connectionIdleTimeoutTimeScale: + config.defaultsSystem.nodesConnectionIdleTimeoutTimeScale, connectionFindConcurrencyLimit: config.defaultsSystem.nodesConnectionFindConcurrencyLimit, connectionFindLocalTimeoutTime: @@ -213,7 +215,6 @@ class PolykeyAgent { let gestaltGraph: GestaltGraph | undefined; let identitiesManager: IdentitiesManager | undefined; let nodeGraph: NodeGraph | undefined; - let mdns: MDNS | undefined; let nodeConnectionManager: NodeConnectionManager | undefined; let nodeManager: NodeManager | undefined; let discovery: Discovery | undefined; @@ -330,30 +331,20 @@ class PolykeyAgent { keyRing, logger: logger.getChild(NodeGraph.name), }); - mdns = new MDNS({ - logger: logger.getChild(MDNS.name), - }); - await mdns.start({ - id: keyRing.getNodeId().toBuffer().readUint16BE(), - hostname: nodesUtils.encodeNodeId(keyRing.getNodeId()), - groups: optionsDefaulted.mdns.groups, - port: optionsDefaulted.mdns.port, - }); // Remove your own node ID if provided as a seed node const nodeIdOwnEncoded = nodesUtils.encodeNodeId(keyRing.getNodeId()); delete optionsDefaulted.seedNodes[nodeIdOwnEncoded]; nodeConnectionManager = new NodeConnectionManager({ keyRing, - nodeGraph, tlsConfig, - mdns, - seedNodes: optionsDefaulted.seedNodes, connectionFindConcurrencyLimit: optionsDefaulted.nodes.connectionFindConcurrencyLimit, connectionFindLocalTimeoutTime: optionsDefaulted.nodes.connectionFindLocalTimeoutTime, - connectionIdleTimeoutTime: - optionsDefaulted.nodes.connectionIdleTimeoutTime, + connectionIdleTimeoutTimeMin: + optionsDefaulted.nodes.connectionIdleTimeoutTimeMin, + connectionIdleTimeoutTimeScale: + optionsDefaulted.nodes.connectionIdleTimeoutTimeScale, connectionConnectTimeoutTime: optionsDefaulted.nodes.connectionConnectTimeoutTime, connectionKeepAliveTimeoutTime: @@ -374,22 +365,12 @@ class PolykeyAgent { nodeConnectionManager, taskManager, gestaltGraph, + mdnsOptions: { + groups: optionsDefaulted.mdns.groups, + port: optionsDefaulted.mdns.port, + }, logger: logger.getChild(NodeManager.name), }); - await nodeManager.start(); - // Add seed nodes to the nodeGraph - const setNodeProms = new Array>(); - for (const nodeIdEncoded in optionsDefaulted.seedNodes) { - const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); - if (nodeId == null) utils.never(); - const setNodeProm = nodeManager.setNode( - nodeId, - optionsDefaulted.seedNodes[nodeIdEncoded], - true, - ); - setNodeProms.push(setNodeProm); - } - await Promise.all(setNodeProms); discovery = await Discovery.createDiscovery({ db, keyRing, @@ -403,7 +384,6 @@ class PolykeyAgent { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger: logger.getChild(NotificationsManager.name), @@ -412,7 +392,7 @@ class PolykeyAgent { vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing, - nodeConnectionManager, + nodeManager, notificationsManager, gestaltGraph, acl, @@ -458,7 +438,6 @@ class PolykeyAgent { await discovery?.stop(); await identitiesManager?.stop(); await gestaltGraph?.stop(); - await mdns?.stop(); await acl?.stop(); await sigchain?.stop(); await certManager?.stop(); @@ -483,7 +462,6 @@ class PolykeyAgent { acl, gestaltGraph, nodeGraph, - mdns, taskManager, nodeConnectionManager, nodeManager, @@ -504,6 +482,7 @@ class PolykeyAgent { agentServicePort: optionsDefaulted.agentServicePort, workers: optionsDefaulted.workers, ipv6Only: optionsDefaulted.ipv6Only, + seedNodes: optionsDefaulted.seedNodes, }, fresh, }); @@ -523,7 +502,6 @@ class PolykeyAgent { public readonly acl: ACL; public readonly gestaltGraph: GestaltGraph; public readonly nodeGraph: NodeGraph; - public readonly mdns: MDNS; public readonly taskManager: TaskManager; public readonly nodeConnectionManager: NodeConnectionManager; public readonly nodeManager: NodeManager; @@ -570,7 +548,6 @@ class PolykeyAgent { acl, gestaltGraph, nodeGraph, - mdns, taskManager, nodeConnectionManager, nodeManager, @@ -594,7 +571,6 @@ class PolykeyAgent { acl: ACL; gestaltGraph: GestaltGraph; nodeGraph: NodeGraph; - mdns: MDNS; taskManager: TaskManager; nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; @@ -620,7 +596,6 @@ class PolykeyAgent { this.gestaltGraph = gestaltGraph; this.discovery = discovery; this.nodeGraph = nodeGraph; - this.mdns = mdns; this.taskManager = taskManager; this.nodeConnectionManager = nodeConnectionManager; this.nodeManager = nodeManager; @@ -681,6 +656,7 @@ class PolykeyAgent { groups: Array; port: number; }; + seedNodes: SeedNodes; }>; workers?: number; fresh?: boolean; @@ -698,6 +674,7 @@ class PolykeyAgent { groups: config.defaultsSystem.mdnsGroups, port: config.defaultsSystem.mdnsPort, }, + seedNodes: config.defaultsUser.seedNodes, }); // Register event handlers this.certManager.addEventListener( @@ -768,18 +745,11 @@ class PolykeyAgent { host: optionsDefaulted.clientServiceHost, port: optionsDefaulted.clientServicePort, }); - await this.mdns.start({ - id: this.keyRing.getNodeId().toBuffer().readUint16BE(), - hostname: nodesUtils.encodeNodeId(this.keyRing.getNodeId()), - groups: optionsDefaulted.mdns.groups, - port: optionsDefaulted.mdns.port, - }); - await this.nodeManager.start(); await this.nodeConnectionManager.start({ host: optionsDefaulted.agentServiceHost, port: optionsDefaulted.agentServicePort, ipv6Only: optionsDefaulted.ipv6Only, - manifest: agentServerManifest({ + agentService: agentServerManifest({ acl: this.acl, db: this.db, keyRing: this.keyRing, @@ -792,8 +762,39 @@ class PolykeyAgent { vaultManager: this.vaultManager, }), }); + await this.nodeManager.start(); + // Add seed nodes to the nodeGraph + const setNodeProms = new Array>(); + for (const nodeIdEncoded in optionsDefaulted.seedNodes) { + const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); + if (nodeId == null) utils.never(); + const setNodeProm = this.nodeManager.setNode( + nodeId, + optionsDefaulted.seedNodes[nodeIdEncoded], + { + mode: 'direct', + connectedTime: 0, + scopes: ['global'], + }, + true, + ); + setNodeProms.push(setNodeProm); + } + await Promise.all(setNodeProms); await this.nodeGraph.start({ fresh }); - await this.nodeManager.syncNodeGraph(false); + const seedNodeEntries = Object.entries( + optionsDefaulted.seedNodes as SeedNodes, + ); + if (seedNodeEntries.length > 0) { + const initialNodes = seedNodeEntries.map( + ([nodeIdEncoded, nodeAddress]) => { + const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); + if (nodeId == null) utils.never('nodeId should be defined'); + return [nodeId, nodeAddress] as [NodeId, NodeAddress]; + }, + ); + await this.nodeManager.syncNodeGraph(initialNodes, undefined, false); + } await this.discovery.start({ fresh }); await this.vaultManager.start({ fresh }); await this.notificationsManager.start({ fresh }); @@ -838,7 +839,6 @@ class PolykeyAgent { await this.vaultManager?.stop(); await this.discovery?.stop(); await this.nodeGraph?.stop(); - await this.mdns?.stop(); await this.nodeConnectionManager?.stop(); await this.nodeManager?.stop(); await this.clientService.stop({ force: true }); @@ -882,7 +882,6 @@ class PolykeyAgent { await this.clientService.stop({ force: true }); await this.identitiesManager.stop(); await this.gestaltGraph.stop(); - await this.mdns.stop(); await this.acl.stop(); await this.sigchain.stop(); await this.certManager.stop(); diff --git a/src/bootstrap/utils.ts b/src/bootstrap/utils.ts index 23643dcd5..77d96cf99 100644 --- a/src/bootstrap/utils.ts +++ b/src/bootstrap/utils.ts @@ -195,7 +195,6 @@ async function bootstrapState({ await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager: {} as any, // No connections are attempted nodeManager, keyRing, logger: logger.getChild(NotificationsManager.name), @@ -206,7 +205,7 @@ async function bootstrapState({ db, gestaltGraph, keyRing, - nodeConnectionManager: {} as any, // No connections are attempted + nodeManager: {} as any, // No connections are attempted vaultsPath, notificationsManager, logger: logger.getChild(VaultManager.name), diff --git a/src/client/handlers/NodesAdd.ts b/src/client/handlers/NodesAdd.ts index 09fe84bc8..77240eeac 100644 --- a/src/client/handlers/NodesAdd.ts +++ b/src/client/handlers/NodesAdd.ts @@ -5,7 +5,7 @@ import type { NodesAddMessage, } from '../types'; import type { NodeId } from '../../ids'; -import type { Host, Hostname, Port } from '../../network/types'; +import type { Host, Port } from '../../network/types'; import type NodeManager from '../../nodes/NodeManager'; import { UnaryHandler } from '@matrixai/rpc'; import * as ids from '../../ids'; @@ -32,13 +32,13 @@ class NodesAdd extends UnaryHandler< port, }: { nodeId: NodeId; - host: Host | Hostname; + host: Host; port: Port; } = validateSync( (keyPath, value) => { return matchSync(keyPath)( [['nodeId'], () => ids.parseNodeId(value)], - [['host'], () => networkUtils.parseHostOrHostname(value)], + [['host'], () => networkUtils.parseHost(value)], [['port'], () => networkUtils.parsePort(value)], () => value, ); @@ -52,9 +52,7 @@ class NodesAdd extends UnaryHandler< // Pinging to authenticate the node if ( (input.ping ?? false) && - !(await nodeManager.pingNode(nodeId, [ - { host, port, scopes: ['global'] }, - ])) + !(await nodeManager.pingNodeAddress(nodeId, host, port)) ) { throw new nodeErrors.ErrorNodePingFailed( 'Failed to authenticate target node', @@ -64,9 +62,10 @@ class NodesAdd extends UnaryHandler< await db.withTransactionF((tran) => nodeManager.setNode( nodeId, + [host, port], { - host, - port, + mode: 'direct', + connectedTime: Date.now(), scopes: ['global'], }, true, diff --git a/src/client/handlers/NodesFind.ts b/src/client/handlers/NodesFind.ts index a3bb98abe..dbb0d0c78 100644 --- a/src/client/handlers/NodesFind.ts +++ b/src/client/handlers/NodesFind.ts @@ -5,7 +5,8 @@ import type { NodesFindMessage, } from '../types'; import type { NodeId } from '../../ids'; -import type NodeConnectionManager from '../../nodes/NodeConnectionManager'; +import type NodeManager from '../../nodes/NodeManager'; +import type { ContextTimed } from '@matrixai/contexts'; import { UnaryHandler } from '@matrixai/rpc'; import * as ids from '../../ids'; import * as nodesErrors from '../../nodes/errors'; @@ -14,15 +15,18 @@ import { matchSync } from '../../utils'; class NodesFind extends UnaryHandler< { - nodeConnectionManager: NodeConnectionManager; + nodeManager: NodeManager; }, ClientRPCRequestParams, ClientRPCResponseResult > { public handle = async ( input: ClientRPCRequestParams, + _cancel, + _meta, + ctx: ContextTimed, ): Promise> => { - const { nodeConnectionManager } = this.container; + const { nodeManager } = this.container; const { nodeId, @@ -39,12 +43,21 @@ class NodesFind extends UnaryHandler< nodeId: input.nodeIdEncoded, }, ); - const addresses = await nodeConnectionManager.findNodeAll(nodeId); - if (addresses.length === 0) { + const result = await nodeManager.findNode( + nodeId, + undefined, + undefined, + undefined, + ctx, + ); + if (result == null) { throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); } - - return { addresses }; + const [nodeAddress, nodeContactAddressData] = result; + return { + nodeAddress, + nodeContactAddressData, + }; }; } diff --git a/src/client/handlers/NodesGetAll.ts b/src/client/handlers/NodesGetAll.ts index a782b53c6..ba7bf94b6 100644 --- a/src/client/handlers/NodesGetAll.ts +++ b/src/client/handlers/NodesGetAll.ts @@ -25,7 +25,7 @@ class NodesGetAll extends ServerHandler< if (ctx.signal.aborted) throw ctx.signal.reason; const { nodeGraph } = this.container; for await (const [index, bucket] of nodeGraph.getBuckets()) { - for (const [id, info] of bucket) { + for (const [id, nodeContact] of bucket) { const encodedId = nodesUtils.encodeNodeId(id); // For every node in every bucket, add it to our message if (ctx.signal.aborted) { @@ -34,8 +34,7 @@ class NodesGetAll extends ServerHandler< yield { bucketIndex: index, nodeIdEncoded: encodedId, - host: info.address.host, - port: info.address.port, + nodeContact, }; } } diff --git a/src/client/handlers/NodesPing.ts b/src/client/handlers/NodesPing.ts index 21563c07e..74c9f5658 100644 --- a/src/client/handlers/NodesPing.ts +++ b/src/client/handlers/NodesPing.ts @@ -37,9 +37,9 @@ class NodesPing extends UnaryHandler< nodeId: input.nodeIdEncoded, }, ); - const success = await nodeManager.pingNode(nodeId); + const result = await nodeManager.pingNode(nodeId); return { - success, + success: result != null, }; }; } diff --git a/src/client/types.ts b/src/client/types.ts index a3bc767cd..f57c63396 100644 --- a/src/client/types.ts +++ b/src/client/types.ts @@ -18,6 +18,11 @@ import type { Notification } from '../notifications/types'; import type { ProviderToken } from '../identities/types'; import type { AuditEventsGetTypeOverride } from './callers/auditEventsGet'; import type { AuditMetricGetTypeOverride } from './callers/auditMetricGet'; +import type { + NodeContact, + NodeAddress, + NodeContactAddressData, +} from '../nodes/types'; type ClientRPCRequestParams = JSONRPCResponseResult< @@ -106,10 +111,14 @@ type AddressMessage = { type NodeAddressMessage = NodeIdMessage & AddressMessage; type NodesFindMessage = { - addresses: Array; + nodeAddress: NodeAddress; + nodeContactAddressData: NodeContactAddressData; }; -type NodesGetMessage = NodeAddressMessage & { bucketIndex: number }; +type NodesGetMessage = NodeIdMessage & { + nodeContact: NodeContact; + bucketIndex: number; +}; type NodesAddMessage = NodeAddressMessage & { force?: boolean; diff --git a/src/config.ts b/src/config.ts index 23293067e..875f77438 100644 --- a/src/config.ts +++ b/src/config.ts @@ -186,7 +186,7 @@ const config = { */ nodesConnectionFindLocalTimeoutTime: 1_500, // 1.5 seconds /** - * Timeout for idle node connections. + * Minimum timeout for idle node connections. * * A node connection is idle, if nothing is using the connection. A * connection is being used when its resource counter is above 0. @@ -201,7 +201,13 @@ const config = { * * This should always be greater than the keep alive timeout. */ - nodesConnectionIdleTimeoutTime: 60_000, // 60 seconds + nodesConnectionIdleTimeoutTimeMin: 60_000, // 60 seconds + /** + * Scale factor for timeout for idle node connections + * + * This scales + */ + nodesConnectionIdleTimeoutTimeScale: 7_200_000, // 2 hours /** * Timeout for establishing a node connection. * @@ -238,6 +244,43 @@ const config = { * Interval for hole punching reverse node connections. */ nodesConnectionHolePunchIntervalTime: 1_000, // 1 second + /** + * Interval for refreshing buckets. + * + * A bucket that hasn't had any lookups for this amount of time will be + * refreshed. Lookups can be successful or unsuccessful. A look up will + * generally result in updates to the node graph. + */ + nodesRefreshBucketIntervalTime: 3_600_000, // 1 hour + /** + * Interval time jitter multiplier for refreshing buckets. + * + * For example, if the interval is 60 seconds, and the jitter is configured + * as 0.5 (50%), the actual interval could vary between 30 seconds + * (60 * 0.5) and 90 seconds (60 * 1.5). + */ + nodesRefreshBucketIntervalTimeJitter: 0.5, + /** + * Node graph bucket limit. The node graph keeps record of all node + * addresses of the network. + * + * A smaller limit reduces how many node addresses each node needs to + * keep track of. This can increase stability and fault toelrance + * because it can be kept up to date more quickly, and when nodes + * leave the network, it has a smaller impact on the network. However, + * it may increase the number hops required to find a node. + * + * A larger limit increases how many node addresses each node needs to + * keep track of. This can decrease stability and fault tolerance + * because it can take longer to keep it up to date, and when nodes + * leave the network, it has a larger impact on the network. However, + * it may decrease the number hops required to find a node. + * + * This must be balannced between an efficient number of hops to look up + * a node and resource usage per node and across the network. + */ + nodesGraphBucketLimit: 20, + /** * Multicast group addresses that the MDNS stack will operate on. * diff --git a/src/network/utils.ts b/src/network/utils.ts index f58ed0a67..4fafebf1c 100644 --- a/src/network/utils.ts +++ b/src/network/utils.ts @@ -1,7 +1,7 @@ import type { PromiseCancellable } from '@matrixai/async-cancellable'; import type { ContextTimed } from '@matrixai/contexts'; import type { Address, Host, Hostname, Port } from './types'; -import type { NodeAddress, NodeAddressScope } from '../nodes/types'; +import type { NodeAddress } from '../nodes/types'; import type { JSONValue } from '../types'; import dns from 'dns'; import { IPv4, IPv6, Validator } from 'ip-num'; @@ -13,17 +13,113 @@ import * as validationErrors from '../validation/errors'; import * as errors from '../errors'; import ErrorPolykey from '../ErrorPolykey'; +/** + * Is it an IPv4 address? + */ +function isIPv4(host: any): host is Host { + if (typeof host !== 'string') return false; + const [isIPv4] = Validator.isValidIPv4String(host); + return isIPv4; +} + +/** + * Is it an IPv6 address? + * This considers IPv4 mapped IPv6 addresses to also be IPv6 addresses. + */ +function isIPv6(host: any): host is Host { + if (typeof host !== 'string') return false; + const [isIPv6] = Validator.isValidIPv6String(host.replace(/%.+$/, '')); + if (isIPv6) return true; + // Test if the host is an IPv4 mapped IPv6 address. + // In the future, `isValidIPv6String` should be able to handle this + // and this code can be removed. + return isIPv4MappedIPv6(host); +} + +/** + * There are 2 kinds of IPv4 mapped IPv6 addresses. + * 1. ::ffff:127.0.0.1 - dotted decimal version + * 2. ::ffff:7f00:1 - hex version + * Both are accepted by Node's dgram module. + */ +function isIPv4MappedIPv6(host: any): host is Host { + if (typeof host !== 'string') return false; + if (host.startsWith('::ffff:')) { + try { + // The `ip-num` package understands `::ffff:7f00:1` + IPv6.fromString(host); + return true; + } catch { + // But it does not understand `::ffff:127.0.0.1` + const ipv4 = host.slice('::ffff:'.length); + if (isIPv4(ipv4)) { + return true; + } + } + } + return false; +} + +function isIPv4MappedIPv6Hex(host: any): host is Host { + if (typeof host !== 'string') return false; + if (host.startsWith('::ffff:')) { + try { + // The `ip-num` package understands `::ffff:7f00:1` + IPv6.fromString(host); + return true; + } catch { + return false; + } + } + return false; +} + +function isIPv4MappedIPv6Dec(host: any): host is Host { + if (typeof host !== 'string') return false; + if (host.startsWith('::ffff:')) { + // But it does not understand `::ffff:127.0.0.1` + const ipv4 = host.slice('::ffff:'.length); + if (isIPv4(ipv4)) { + return true; + } + } + return false; +} + +/** + * Extracts the IPv4 portion out of the IPv4 mapped IPv6 address. + * Can handle both the dotted decimal and hex variants. + * 1. ::ffff:7f00:1 + * 2. ::ffff:127.0.0.1 + * Always returns the dotted decimal variant. + */ +function fromIPv4MappedIPv6(host: unknown): Host { + if (typeof host !== 'string') { + throw new TypeError('Invalid IPv4 mapped IPv6 address'); + } + const ipv4 = host.slice('::ffff:'.length); + if (isIPv4(ipv4)) { + return ipv4 as Host; + } + const matches = ipv4.match(/^([0-9a-fA-F]{1,4}):([0-9a-fA-F]{1,4})$/); + if (matches == null) { + throw new TypeError('Invalid IPv4 mapped IPv6 address'); + } + const ipv4Hex = matches[1].padStart(4, '0') + matches[2].padStart(4, '0'); + const ipv4Hexes = ipv4Hex.match(/.{1,2}/g)!; + const ipv4Decs = ipv4Hexes.map((h) => parseInt(h, 16)); + return ipv4Decs.join('.') as Host; +} + /** * Validates that a provided host address is a valid IPv4 or IPv6 address. */ function isHost(host: any): host is Host { if (typeof host !== 'string') return false; - const [isIPv4] = Validator.isValidIPv4String(host); - const [isIPv6] = Validator.isValidIPv6String(host.replace(/%.*/, '')); - return isIPv4 || isIPv6; + return isIPv4(host) || isIPv6(host); } -function isHostWildcard(host: Host): boolean { +function isHostWildcard(host: any): boolean { return host === '0.0.0.0' || host === '::'; } @@ -104,6 +200,43 @@ function parsePort(data: any, connect: boolean = false): Port { return data; } +/** + * Canonicalizes an IP address into a consistent format. + * This will: + * - Remove leading 0s from IPv4 addresses and IPv6 addresses + * - Expand :: into 0s for IPv6 addresses + * - Extract IPv4 decimal notation from IPv4 mapped IPv6 addresses + * - Lowercase all hex characters in IPv6 addresses + */ +function toCanonicalHost(host: string): Host { + let host_: string = host.trim(); + const scope = host_.match(/%.+$/); + if (scope != null) { + host_ = host_.replace(/%.+/, ''); + } + if (isIPv4MappedIPv6(host_)) { + host_ = fromIPv4MappedIPv6(host_); + } else if (isIPv4(host_)) { + host_ = IPv4.fromString(host_).toString(); + // Host_ = (new IPv4(host)).toString(); + } else if (isIPv6(host_)) { + host_ = IPv6.fromString(host_).toString(); + // Host_ = (new IPv6(host)).toString(); + } else { + throw new TypeError('Invalid IP address'); + } + return (host_ + (scope != null ? scope[0] : '')) as Host; +} + +function toCanonicalHostname(hostname: string): Hostname { + let hostname_ = hostname.trim(); + hostname_ = hostname_.toLowerCase(); + if (hostname_.endsWith('.')) { + hostname_ = hostname_.substring(0, hostname_.length - 1); + } + return hostname_ as Hostname; +} + /** * Given host and port, create an address string. */ @@ -274,34 +407,31 @@ function resolvesZeroIP(ip: Host): Host { async function resolveHostnames( addresses: Array, existingAddresses: Set = new Set(), -): Promise }>> { +): Promise> { const final: Array<{ host: Host; port: Port; - scopes: Array; }> = []; - for (const address of addresses) { - if (isHost(address.host)) { - if (existingAddresses.has(`${address.host}|${address.port}`)) continue; + for (const [host, port] of addresses) { + if (isHost(host)) { + if (existingAddresses.has(`${host}|${port}`)) continue; final.push({ - host: address.host, - port: address.port, - scopes: address.scopes, + host: host, + port: port, }); - existingAddresses.add(`${address.host}|${address.port}`); + existingAddresses.add(`${host}|${port}`); continue; } - const resolvedAddresses = await resolveHostname(address.host); + const resolvedAddresses = await resolveHostname(host); for (const resolvedHost of resolvedAddresses) { const newAddress = { host: resolvedHost, - port: address.port, - scopes: address.scopes, + port: port, }; if (!Validator.isValidIPv4String(resolvedHost)[0]) continue; - if (existingAddresses.has(`${resolvedHost}|${address.port}`)) continue; + if (existingAddresses.has(`${resolvedHost}|${port}`)) continue; final.push(newAddress); - existingAddresses.add(`${resolvedHost}|${address.port}`); + existingAddresses.add(`${resolvedHost}|${port}`); } } return final; @@ -309,7 +439,6 @@ async function resolveHostnames( // TODO: review and fix the `toError` and `fromError` code here. // Right now it's very basic and need fleshing out. - function fromError(error: any) { switch (typeof error) { case 'symbol': @@ -476,14 +605,22 @@ function toError( } export { + isIPv4, + isIPv6, + isIPv4MappedIPv6, + isIPv4MappedIPv6Hex, + isIPv4MappedIPv6Dec, + fromIPv4MappedIPv6, isHost, isHostWildcard, isHostname, isPort, parseHost, parseHostname, - parsePort, parseHostOrHostname, + parsePort, + toCanonicalHost, + toCanonicalHostname, buildAddress, parseAddress, isDNSError, diff --git a/src/nodes/NodeConnection.ts b/src/nodes/NodeConnection.ts index 8ea4b5569..a72b8c7d5 100644 --- a/src/nodes/NodeConnection.ts +++ b/src/nodes/NodeConnection.ts @@ -1,49 +1,42 @@ -import type { ContextTimed } from '@matrixai/contexts'; +import type { X509Certificate } from '@peculiar/x509'; +import type { ContextTimed, ContextTimedInput } from '@matrixai/contexts'; import type { PromiseCancellable } from '@matrixai/async-cancellable'; -import type { NodeId } from './types'; +import type { QUICSocket, QUICConnection } from '@matrixai/quic'; import type { Host, Hostname, Port, TLSConfig } from '../network/types'; import type { Certificate } from '../keys/types'; -import type { ClientManifest } from '@matrixai/rpc'; -import type { - QUICSocket, - ClientCryptoOps, - QUICConnection, - Host as QUICHost, -} from '@matrixai/quic'; -import type { ContextTimedInput } from '@matrixai/contexts/dist/types'; -import type { X509Certificate } from '@peculiar/x509'; +import type { NodeId } from './types'; +import type agentClientManifest from './agent/callers'; import Logger from '@matrixai/logger'; import { CreateDestroy } from '@matrixai/async-init/dist/CreateDestroy'; import { status } from '@matrixai/async-init'; import { timedCancellable, context } from '@matrixai/contexts/dist/decorators'; +import { errors as contextErrors } from '@matrixai/contexts'; import { AbstractEvent, EventAll } from '@matrixai/events'; import { QUICClient, events as quicEvents, errors as quicErrors, - utils as quicUtils, } from '@matrixai/quic'; -import { RPCClient } from '@matrixai/rpc'; -import { middleware as rpcUtilsMiddleware } from '@matrixai/rpc'; -import { errors as contextErrors } from '@matrixai/contexts'; +import { RPCClient, middleware as rpcUtilsMiddleware } from '@matrixai/rpc'; +import { ConnectionErrorReason, ConnectionErrorCode } from './types'; import * as nodesErrors from './errors'; import * as nodesEvents from './events'; -import { ConnectionErrorReason, ConnectionErrorCode } from './types'; -import * as networkUtils from '../network/utils'; import * as nodesUtils from '../nodes/utils'; import { never } from '../utils'; import config from '../config'; +import * as networkUtils from '../network/utils'; + +type AgentClientManifest = typeof agentClientManifest; /** * Encapsulates the unidirectional client-side connection of one node to another. */ -// eslint-disable-next-line @typescript-eslint/no-unused-vars -- False positive for M -interface NodeConnection extends CreateDestroy {} +interface NodeConnection extends CreateDestroy {} @CreateDestroy({ eventDestroy: nodesEvents.EventNodeConnectionDestroy, eventDestroyed: nodesEvents.EventNodeConnectionDestroyed, }) -class NodeConnection { +class NodeConnection { /** * Hostname is defined if the target's host was resolved from this hostname * Undefined if a Host was directly provided @@ -66,7 +59,8 @@ class NodeConnection { protected logger: Logger; public readonly quicClient: QUICClient | undefined; public readonly quicConnection: QUICConnection; - public readonly rpcClient: RPCClient; + public readonly connectionId: string; + public readonly rpcClient: RPCClient; /** * Dispatches a `EventNodeConnectionClose` in response to any `NodeConnection` @@ -167,7 +161,7 @@ class NodeConnection { } }; - static createNodeConnection( + static createNodeConnection( { targetNodeIds, targetHost, @@ -176,7 +170,7 @@ class NodeConnection { tlsConfig, connectionKeepAliveIntervalTime, connectionKeepAliveTimeoutTime = config.defaultsSystem - .nodesConnectionIdleTimeoutTime, + .nodesConnectionIdleTimeoutTimeMin, quicSocket, manifest, logger, @@ -185,32 +179,30 @@ class NodeConnection { targetHost: Host; targetPort: Port; targetHostname?: Hostname; - crypto: ClientCryptoOps; tlsConfig: TLSConfig; connectionKeepAliveIntervalTime?: number; connectionKeepAliveTimeoutTime?: number; quicSocket?: QUICSocket; - manifest: M; + manifest: AgentClientManifest; logger?: Logger; }, ctx?: Partial, - ): PromiseCancellable>; + ): PromiseCancellable; @timedCancellable( true, config.defaultsSystem.nodesConnectionConnectTimeoutTime, ) - static async createNodeConnection( + static async createNodeConnection( { targetNodeIds, targetHost, targetPort, targetHostname, - crypto, tlsConfig, manifest, connectionKeepAliveIntervalTime, connectionKeepAliveTimeoutTime = config.defaultsSystem - .nodesConnectionIdleTimeoutTime, + .nodesConnectionIdleTimeoutTimeMin, quicSocket, logger = new Logger(this.name), }: { @@ -218,16 +210,15 @@ class NodeConnection { targetHost: Host; targetPort: Port; targetHostname?: Hostname; - crypto: ClientCryptoOps; tlsConfig: TLSConfig; - manifest: M; + manifest: AgentClientManifest; connectionKeepAliveIntervalTime?: number; connectionKeepAliveTimeoutTime?: number; quicSocket: QUICSocket; logger?: Logger; }, @context ctx: ContextTimed, - ): Promise> { + ): Promise { logger.info(`Creating ${this.name}`); // Checking if attempting to connect to a wildcard IP if (networkUtils.isHostWildcard(targetHost)) { @@ -265,9 +256,7 @@ class NodeConnection { key: tlsConfig.keyPrivatePem, cert: tlsConfig.certChainPem, }, - crypto: { - ops: crypto, - }, + crypto: nodesUtils.quicClientCrypto, reasonToCode: nodesUtils.reasonToCode, codeToReason: nodesUtils.codeToReason, logger: logger.getChild(QUICClient.name), @@ -297,7 +286,7 @@ class NodeConnection { quicEvents.EventQUICConnectionStream.name, throwFunction, ); - const rpcClient = new RPCClient({ + const rpcClient = new RPCClient({ manifest, middlewareFactory: rpcUtilsMiddleware.defaultClientMiddlewareWrapper(), streamFactory: async () => { @@ -320,7 +309,7 @@ class NodeConnection { quicConnection.remoteHost }:${quicConnection.remotePort}]`, ); - const nodeConnection = new this({ + const nodeConnection = new this({ validatedNodeId, nodeId, host: targetHost, @@ -331,6 +320,9 @@ class NodeConnection { hostname: targetHostname, quicClient, quicConnection, + connectionId: Buffer.from(quicConnection.connectionIdShared).toString( + 'base64url', + ), rpcClient, logger: newLogger, }); @@ -367,7 +359,7 @@ class NodeConnection { return nodeConnection; } - static createNodeConnectionReverse({ + static createNodeConnectionReverse({ certChain, nodeId, quicConnection, @@ -377,12 +369,12 @@ class NodeConnection { certChain: Array; nodeId: NodeId; quicConnection: QUICConnection; - manifest: M; + manifest: AgentClientManifest; logger?: Logger; - }): NodeConnection { + }): NodeConnection { logger.info(`Creating ${this.name}`); // Creating RPCClient - const rpcClient = new RPCClient({ + const rpcClient = new RPCClient({ manifest, middlewareFactory: rpcUtilsMiddleware.defaultClientMiddlewareWrapper(), streamFactory: async (_ctx) => { @@ -392,7 +384,7 @@ class NodeConnection { logger: logger.getChild(RPCClient.name), }); // Creating NodeConnection - const nodeConnection = new this({ + const nodeConnection = new this({ validatedNodeId: nodeId, nodeId: nodeId, localHost: quicConnection.localHost as unknown as Host, @@ -404,6 +396,9 @@ class NodeConnection { hostname: undefined, quicClient: undefined, quicConnection, + connectionId: Buffer.from(quicConnection.connectionIdShared).toString( + 'base64url', + ), rpcClient, logger, }); @@ -448,6 +443,7 @@ class NodeConnection { hostname, quicClient, quicConnection, + connectionId, rpcClient, logger, }: { @@ -461,21 +457,21 @@ class NodeConnection { hostname?: Hostname; quicClient?: QUICClient; quicConnection: QUICConnection; - rpcClient: RPCClient; + connectionId: string; + rpcClient: RPCClient; logger: Logger; }) { this.validatedNodeId = validatedNodeId; this.nodeId = nodeId; - this.host = quicUtils.toCanonicalIP(host) as unknown as Host; + this.host = networkUtils.toCanonicalHost(host); this.port = port; - this.localHost = quicUtils.resolvesZeroIP( - localHost as unknown as QUICHost, - ) as unknown as Host; + this.localHost = networkUtils.resolvesZeroIP(localHost); this.localPort = localPort; this.certChain = certChain; this.hostname = hostname; this.quicClient = quicClient; this.quicConnection = quicConnection; + this.connectionId = connectionId; this.rpcClient = rpcClient; this.logger = logger; } @@ -539,7 +535,7 @@ class NodeConnection { /** * Gets RPCClient for this node connection */ - public getClient(): RPCClient { + public getClient(): RPCClient { return this.rpcClient; } } diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 0e85f47b1..a8f3ef08e 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -1,18 +1,8 @@ -import type { LockRequest } from '@matrixai/async-locks'; import type { ResourceAcquire } from '@matrixai/resources'; -import type { ContextTimedInput, ContextTimed } from '@matrixai/contexts'; -import type { ClientCryptoOps, QUICConnection } from '@matrixai/quic'; -import type NodeGraph from './NodeGraph'; -import type { - NodeAddress, - NodeData, - NodeId, - NodeIdString, - SeedNodes, - NodeAddressScope, -} from './types'; +import type { ContextTimed, ContextTimedInput } from '@matrixai/contexts'; +import type { QUICConnection } from '@matrixai/quic'; import type KeyRing from '../keys/KeyRing'; -import type { Key, CertificatePEM } from '../keys/types'; +import type { CertificatePEM } from '../keys/types'; import type { ConnectionData, Host, @@ -20,68 +10,75 @@ import type { Port, TLSConfig, } from '../network/types'; -import type { ServerManifest } from '@matrixai/rpc'; -import type { MDNS, ServicePOJO } from '@matrixai/mdns'; -import Logger from '@matrixai/logger'; -import { withF } from '@matrixai/resources'; -import { ready, StartStop } from '@matrixai/async-init/dist/StartStop'; -import { IdInternal } from '@matrixai/id'; -import { Lock, LockBox, Semaphore } from '@matrixai/async-locks'; -import { Timer } from '@matrixai/timer'; -import { timedCancellable, context } from '@matrixai/contexts/dist/decorators'; -import { AbstractEvent, EventAll } from '@matrixai/events'; -import { PromiseCancellable } from '@matrixai/async-cancellable'; +import type { AgentServerManifest } from './agent/handlers'; +import type { NodeId, NodeIdString } from './types'; import { - QUICSocket, - QUICServer, events as quicEvents, + QUICServer, + QUICSocket, utils as quicUtils, } from '@matrixai/quic'; -import { running, status } from '@matrixai/async-init'; -import { RPCServer, middleware as rpcUtilsMiddleware } from '@matrixai/rpc'; -import { events as mdnsEvents, utils as mdnsUtils } from '@matrixai/mdns'; +import { withF } from '@matrixai/resources'; +import { middleware as rpcMiddleware, RPCServer } from '@matrixai/rpc'; +import Logger from '@matrixai/logger'; +import { Timer } from '@matrixai/timer'; +import { IdInternal } from '@matrixai/id'; +import { + ready, + running, + StartStop, + status, +} from '@matrixai/async-init/dist/StartStop'; +import { AbstractEvent, EventAll } from '@matrixai/events'; +import { context, timedCancellable } from '@matrixai/contexts/dist/decorators'; +import { Semaphore } from '@matrixai/async-locks'; +import { PromiseCancellable } from '@matrixai/async-cancellable'; import NodeConnection from './NodeConnection'; +import agentClientManifest from './agent/callers'; import * as nodesUtils from './utils'; import * as nodesErrors from './errors'; import * as nodesEvents from './events'; -import manifestClientAgent from './agent/callers'; import * as keysUtils from '../keys/utils'; import * as networkUtils from '../network/utils'; import * as utils from '../utils'; -import config from '../config'; import RateLimiter from '../utils/ratelimiter/RateLimiter'; - -type ManifestClientAgent = typeof manifestClientAgent; +import config from '../config'; type ConnectionAndTimer = { - connection: NodeConnection; + connection: NodeConnection; timer: Timer | null; usageCount: number; }; +type ConnectionsEntry = { + activeConnection: string; + connections: Record; +}; + +type ConnectionInfo = { + host: Host; + hostName: Hostname | undefined; + port: Port; + timeout: number | undefined; + primary: boolean; +}; + +type ActiveConnectionsInfo = { + nodeId: NodeId; + connections: Record; +}; + /** * NodeConnectionManager is a server that manages all node connections. - * It manages both initiated and received connections - * - * It's an event target that emits events for new connections. + * It manages both initiated and received connections. * - * We will need to fully encapsulate all the errors in NCM if we can. - * Otherwise, it goes all the way to PolykeyAgent. + * It acts like a phone call system. + * It can maintain mulitple calls to other nodes. + * There's no guarantee that we need to make it. * - * That means the QUICSocket, QUICServer and QUICClient - * As well as the QUICConnection and QUICStream. - * The NCM basically encapsulates it. - * - * The NCM and NC both must encapsulate all the QUIC transport. - * - * Events: - * - * - connectionManagerStop - * - connectionManagerError - * - nodeConnection - * - nodeConnectionError - * - nodeConnectionStream - * - nodeConnectionDestroy + * Node connections make use of the QUIC protocol. + * The NodeConnectionManager encapsulates `QUICServer`. + * While the NodeConnection encapsulates `QUICClient`. */ interface NodeConnectionManager extends StartStop {} @StartStop({ @@ -103,9 +100,14 @@ class NodeConnectionManager { public readonly connectionFindLocalTimeoutTime: number; /** - * Time to wait to garbage collect un-used node connections. + * Minimum time to wait to garbage collect un-used node connections. + */ + public readonly connectionIdleTimeoutTimeMin: number; + + /** + * Scaling factor to apply to Idle timeout */ - public readonly connectionIdleTimeoutTime: number; + public readonly connectionIdleTimeoutTimeScale: number; /** * Time used to establish `NodeConnection` @@ -136,6 +138,7 @@ class NodeConnectionManager { * Default timeout for RPC handlers */ public readonly rpcCallTimeoutTime: number; + /** * Used to track active hole punching attempts. * Attempts are mapped by a string of `${host}:${port}`. @@ -155,7 +158,7 @@ class NodeConnectionManager { * Used track the active `nodesConnectionSignalFinal` attempts and prevent orphaned promises. * Used to cancel and await the active `nodesConnectionSignalFinal` when stopping. */ - protected activeSignalFinalPs = new Set>(); + protected activeSignalFinalPs = new Set>(); /** * Used to limit signalling requests on a per-requester basis. * This is mainly used to limit a single source node making too many requests through a relay. @@ -164,16 +167,11 @@ class NodeConnectionManager { protected logger: Logger; protected keyRing: KeyRing; - protected nodeGraph: NodeGraph; - protected mdns: MDNS | undefined; protected tlsConfig: TLSConfig; - protected seedNodes: SeedNodes; protected quicSocket: QUICSocket; protected quicServer: QUICServer; - protected quicClientCrypto: ClientCryptoOps; - /** * Data structure to store all NodeConnections. If a connection to a node n does * not exist, no entry for n will exist in the map. Alternatively, if a @@ -184,17 +182,7 @@ class NodeConnectionManager { * A nodeIdString is used for the key here since * NodeIds can't be used to properly retrieve a value from the map. */ - protected connections: Map = new Map(); - - /** - * Tracks `NodeConnection`s that are still running but not part of the connection map. - * These are doppelganger connections created by concurrent connection creation - * between two nodes. These will be cleaned up after all their streams end. - */ - protected connectionsDraining: Set> = - new Set(); - - protected connectionLocks: LockBox = new LockBox(); + protected connections: Map = new Map(); protected rpcServer: RPCServer; @@ -209,7 +197,7 @@ class NodeConnectionManager { this.logger.warn( `NodeConnectionManager error caused by ${evt.detail.message}`, ); - this.dispatchEvent(new nodesEvents.EventNodeConnectionClose()); + this.dispatchEvent(new nodesEvents.EventNodeConnectionManagerClose()); }; /** @@ -231,44 +219,47 @@ class NodeConnectionManager { evt: nodesEvents.EventNodeConnectionStream, ) => { if (evt.target == null) utils.never('target should be defined here'); - const nodeConnection = evt.target as NodeConnection; + const nodeConnection = evt.target as NodeConnection; + const connectionId = nodeConnection.connectionId; const nodeId = nodeConnection.validatedNodeId as NodeId; const nodeIdString = nodeId.toString() as NodeIdString; const stream = evt.detail; this.rpcServer.handleStream(stream); - const connectionAndTimer = this.connections.get(nodeIdString); - if (connectionAndTimer != null) { - connectionAndTimer.usageCount += 1; - connectionAndTimer.timer?.cancel(); - connectionAndTimer.timer = null; - void stream.closedP.finally(() => { - connectionAndTimer.usageCount -= 1; - if (connectionAndTimer.usageCount <= 0 && !this.isSeedNode(nodeId)) { - this.logger.debug( - `creating TTL for ${nodesUtils.encodeNodeId(nodeId)}`, - ); - connectionAndTimer.timer = new Timer({ - handler: async () => await this.destroyConnection(nodeId, false), - delay: this.connectionIdleTimeoutTime, - }); - } - }); - } + const connectionsEntry = this.connections.get(nodeIdString); + if (connectionsEntry == null) utils.never('should have a connection entry'); + const connectionAndTimer = connectionsEntry.connections[connectionId]; + if (connectionAndTimer == null) utils.never('should have a connection'); + connectionAndTimer.usageCount += 1; + connectionAndTimer.timer?.cancel(); + connectionAndTimer.timer = null; + void stream.closedP.finally(() => { + connectionAndTimer.usageCount -= 1; + if (connectionAndTimer.usageCount <= 0) { + const delay = this.getStickyTimeoutValue( + nodeId, + connectionsEntry.activeConnection === + connectionAndTimer.connection.connectionId, + ); + this.logger.debug( + `creating TTL for ${nodesUtils.encodeNodeId(nodeId)}`, + ); + connectionAndTimer.timer = new Timer({ + handler: async () => + await this.destroyConnection(nodeId, false, connectionId), + delay, + }); + } + }); }; protected handleEventNodeConnectionDestroyed = async ( evt: nodesEvents.EventNodeConnectionDestroyed, ) => { if (evt.target == null) utils.never('target should be defined here'); - const nodeConnection = evt.target as NodeConnection; + const nodeConnection = evt.target as NodeConnection; const nodeId = nodeConnection.validatedNodeId as NodeId; - const nodeIdString = nodeId.toString() as NodeIdString; - // To avoid deadlock only in the case where this is called - // we want to check for destroying connection and read lock - // If the connection is calling destroyCallback then it SHOULD exist in the connection map. - // Already locked so already destroying - if (this.connectionLocks.isLocked(nodeIdString)) return; - await this.destroyConnection(nodeId, true); + const connectionId = nodeConnection.connectionId; + await this.destroyConnection(nodeId, true, connectionId); nodeConnection.removeEventListener( nodesEvents.EventNodeConnectionStream.name, this.handleEventNodeConnectionStream, @@ -342,18 +333,20 @@ class NodeConnectionManager { } }; + /** + * Constructs the `NodeConnectionManager`. + */ public constructor({ keyRing, - nodeGraph, - mdns, tlsConfig, - seedNodes = {}, connectionFindConcurrencyLimit = config.defaultsSystem .nodesConnectionFindConcurrencyLimit, connectionFindLocalTimeoutTime = config.defaultsSystem .nodesConnectionFindLocalTimeoutTime, - connectionIdleTimeoutTime = config.defaultsSystem - .nodesConnectionIdleTimeoutTime, + connectionIdleTimeoutTimeMin = config.defaultsSystem + .nodesConnectionIdleTimeoutTimeMin, + connectionIdleTimeoutTimeScale = config.defaultsSystem + .nodesConnectionIdleTimeoutTimeScale, connectionConnectTimeoutTime = config.defaultsSystem .nodesConnectionConnectTimeoutTime, connectionKeepAliveTimeoutTime = config.defaultsSystem @@ -364,16 +357,15 @@ class NodeConnectionManager { .nodesConnectionHolePunchIntervalTime, rpcParserBufferSize = config.defaultsSystem.rpcParserBufferSize, rpcCallTimeoutTime = config.defaultsSystem.rpcCallTimeoutTime, + logger, }: { keyRing: KeyRing; - nodeGraph: NodeGraph; - mdns?: MDNS; tlsConfig: TLSConfig; - seedNodes?: SeedNodes; connectionFindConcurrencyLimit?: number; connectionFindLocalTimeoutTime?: number; - connectionIdleTimeoutTime?: number; + connectionIdleTimeoutTimeMin?: number; + connectionIdleTimeoutTimeScale?: number; connectionConnectTimeoutTime?: number; connectionKeepAliveTimeoutTime?: number; connectionKeepAliveIntervalTime?: number; @@ -384,63 +376,28 @@ class NodeConnectionManager { }) { this.logger = logger ?? new Logger(this.constructor.name); this.keyRing = keyRing; - this.nodeGraph = nodeGraph; this.tlsConfig = tlsConfig; - // Filter out own node ID - const nodeIdEncodedOwn = nodesUtils.encodeNodeId(keyRing.getNodeId()); - this.seedNodes = utils.filterObject(seedNodes, ([k]) => { - return k !== nodeIdEncodedOwn; - }) as SeedNodes; this.connectionFindConcurrencyLimit = connectionFindConcurrencyLimit; this.connectionFindLocalTimeoutTime = connectionFindLocalTimeoutTime; - this.connectionIdleTimeoutTime = connectionIdleTimeoutTime; + this.connectionIdleTimeoutTimeMin = connectionIdleTimeoutTimeMin; + this.connectionIdleTimeoutTimeScale = connectionIdleTimeoutTimeScale; this.connectionConnectTimeoutTime = connectionConnectTimeoutTime; this.connectionKeepAliveTimeoutTime = connectionKeepAliveTimeoutTime; this.connectionKeepAliveIntervalTime = connectionKeepAliveIntervalTime; this.connectionHolePunchIntervalTime = connectionHolePunchIntervalTime; this.rpcParserBufferSize = rpcParserBufferSize; this.rpcCallTimeoutTime = rpcCallTimeoutTime; - // Note that all buffers allocated for crypto operations is using - // `allocUnsafeSlow`. Which ensures that the underlying `ArrayBuffer` - // is not shared. Also, all node buffers satisfy the `ArrayBuffer` interface. - const quicClientCrypto = { - async randomBytes(data: ArrayBuffer): Promise { - const randomBytes = keysUtils.getRandomBytes(data.byteLength); - randomBytes.copy(utils.bufferWrap(data)); - }, - }; - const quicServerCrypto = { - key: keysUtils.generateKey(), - ops: { - async sign(key: ArrayBuffer, data: ArrayBuffer): Promise { - const sig = keysUtils.macWithKey( - utils.bufferWrap(key) as Key, - utils.bufferWrap(data), - ); - // Convert the MAC to an ArrayBuffer - return sig.slice().buffer; - }, - async verify( - key: ArrayBuffer, - data: ArrayBuffer, - sig: ArrayBuffer, - ): Promise { - return keysUtils.authWithKey( - utils.bufferWrap(key) as Key, - utils.bufferWrap(data), - utils.bufferWrap(sig), - ); - }, - }, - }; + const quicSocket = new QUICSocket({ + resolveHostname: () => { + // `NodeConnectionManager` must resolve all hostnames before it reaches + // `QUICSocket`. + utils.never(); + }, logger: this.logger.getChild(QUICSocket.name), }); - // By the time we get to using QUIC server, all hostnames would have been - // resolved, we would not resolve hostnames inside the QUIC server. - // This is because node connections require special hostname resolution - // procedures. const quicServer = new QUICServer({ + crypto: nodesUtils.quicServerCrypto, config: { maxIdleTimeout: connectionKeepAliveTimeoutTime, keepAliveIntervalTime: connectionKeepAliveIntervalTime, @@ -449,16 +406,14 @@ class NodeConnectionManager { verifyPeer: true, verifyCallback: nodesUtils.verifyClientCertificateChain, }, - crypto: quicServerCrypto, socket: quicSocket, reasonToCode: nodesUtils.reasonToCode, codeToReason: nodesUtils.codeToReason, minIdleTimeout: connectionConnectTimeoutTime, logger: this.logger.getChild(QUICServer.name), }); - // Setting up RPCServer const rpcServer = new RPCServer({ - middlewareFactory: rpcUtilsMiddleware.defaultServerMiddlewareWrapper( + middlewareFactory: rpcMiddleware.defaultServerMiddlewareWrapper( undefined, this.rpcParserBufferSize, ), @@ -466,12 +421,9 @@ class NodeConnectionManager { timeoutTime: this.rpcCallTimeoutTime, logger: this.logger.getChild(RPCServer.name), }); - - this.quicClientCrypto = quicClientCrypto; this.quicSocket = quicSocket; this.quicServer = quicServer; this.rpcServer = rpcServer; - this.mdns = mdns; } /** @@ -490,25 +442,30 @@ class NodeConnectionManager { return this.quicSocket.port as unknown as Port; } + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public get type(): 'ipv4' | 'ipv6' | 'ipv4&ipv6' { + return this.quicSocket.type; + } + public async start({ + agentService, host = '::' as Host, port = 0 as Port, - reuseAddr = false, - ipv6Only = false, - manifest = {}, + reuseAddr, + ipv6Only, }: { + agentService: AgentServerManifest; host?: Host; port?: Port; reuseAddr?: boolean; ipv6Only?: boolean; - manifest?: ServerManifest; - } = {}) { + }) { const address = networkUtils.buildAddress(host, port); this.logger.info(`Start ${this.constructor.name} on ${address}`); // We should expect that seed nodes are already in the node manager // It should not be managed here! - await this.rpcServer.start({ manifest }); + await this.rpcServer.start({ manifest: agentService }); // Setting up QUICSocket await this.quicSocket.start({ host, @@ -516,6 +473,7 @@ class NodeConnectionManager { reuseAddr, ipv6Only, }); + this.quicSocket.addEventListener( quicEvents.EventQUICSocketError.name, this.handleEventQUICError, @@ -547,19 +505,21 @@ class NodeConnectionManager { ); this.quicSocket.addEventListener(EventAll.name, this.handleEventAll); this.rateLimiter.startRefillInterval(); - // MDNS Start - if (this.mdns != null) { - this.mdns.registerService({ - name: nodesUtils.encodeNodeId(this.keyRing.getNodeId()), - port: this.quicServer.port, - type: 'polykey', - protocol: 'udp', - }); - } + + await this.rpcServer.start({ manifest: agentService }); + this.logger.info(`Started ${this.constructor.name}`); } - public async stop() { + /** + * What doe stop do with force? + * Figure it out. + */ + public async stop({ + force = false, + }: { + force?: boolean; + } = {}) { this.logger.info(`Stop ${this.constructor.name}`); this.rateLimiter.stop(); @@ -595,29 +555,22 @@ class NodeConnectionManager { this.quicSocket.removeEventListener(EventAll.name, this.handleEventAll); const destroyProms: Array> = []; - for (const [nodeId, connAndTimer] of this.connections) { - if (connAndTimer.connection == null) continue; + for (const [nodeId] of this.connections) { // It exists so we want to destroy it const destroyProm = this.destroyConnection( IdInternal.fromString(nodeId), - true, + force, ); destroyProms.push(destroyProm); } - for (const drainingConnection of this.connectionsDraining) { - const destroyProm = drainingConnection.destroy({ force: true }); - drainingConnection.quicConnection.destroyStreams(); - destroyProms.push(destroyProm); - } await Promise.all(destroyProms); - const signallingProms: Array> = []; + const signallingProms: Array | Promise> = []; for (const [, activePunch] of this.activeHolePunchPs) { signallingProms.push(activePunch); activePunch.cancel(); } for (const activeSignal of this.activeSignalFinalPs) { signallingProms.push(activeSignal); - activeSignal.cancel(); } await Promise.allSettled(signallingProms); await this.quicServer.stop({ force: true }); @@ -632,14 +585,12 @@ class NodeConnectionManager { * itself is such that we can pass targetNodeId as a parameter (as opposed to * an acquire function with no parameters). * @param targetNodeId Id of target node to communicate with - * @param ctx * @returns ResourceAcquire Resource API for use in with contexts */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async acquireConnection( + public acquireConnection( targetNodeId: NodeId, - ctx?: Partial, - ): Promise>> { + ): ResourceAcquire { if (this.keyRing.getNodeId().equals(targetNodeId)) { this.logger.warn('Attempting connection to our own NodeId'); } @@ -647,11 +598,17 @@ class NodeConnectionManager { this.logger.debug( `acquiring connection to node ${nodesUtils.encodeNodeId(targetNodeId)}`, ); - const connectionAndTimer = await this.getConnection( - targetNodeId, - undefined, - ctx, - ); + const targetNodeIdString = targetNodeId.toString() as NodeIdString; + const connectionsEntry = this.connections.get(targetNodeIdString); + if (connectionsEntry == null) { + throw Error('TMP IMP connection should exist'); + } + const connectionAndTimer = + connectionsEntry.connections[connectionsEntry.activeConnection]; + if (connectionAndTimer == null) { + utils.never('ConnectionAndTimer should exist'); + } + // Increment usage count, and cancel timer connectionAndTimer.usageCount += 1; connectionAndTimer.timer?.cancel(); @@ -662,17 +619,20 @@ class NodeConnectionManager { // Decrement usage count and set up TTL if needed. // We're only setting up TTLs for non-seed nodes. connectionAndTimer.usageCount -= 1; - if ( - connectionAndTimer.usageCount <= 0 && - !this.isSeedNode(targetNodeId) - ) { + if (connectionAndTimer.usageCount <= 0) { this.logger.debug( `creating TTL for ${nodesUtils.encodeNodeId(targetNodeId)}`, ); + + const delay = this.getStickyTimeoutValue( + targetNodeId, + connectionsEntry.activeConnection === + connectionAndTimer.connection.connectionId, + ); connectionAndTimer.timer = new Timer({ handler: async () => await this.destroyConnection(targetNodeId, false), - delay: this.connectionIdleTimeoutTime, + delay, }); } }, @@ -688,26 +648,13 @@ class NodeConnectionManager { * for use with normal arrow function * @param targetNodeId Id of target node to communicate with * @param f Function to handle communication - * @param ctx */ - public withConnF( - targetNodeId: NodeId, - f: (conn: NodeConnection) => Promise, - ctx?: Partial, - ): PromiseCancellable; - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - @timedCancellable( - true, - (nodeConnectionManager: NodeConnectionManager) => - nodeConnectionManager.connectionConnectTimeoutTime, - ) public async withConnF( targetNodeId: NodeId, - f: (conn: NodeConnection) => Promise, - @context ctx: ContextTimed, + f: (conn: NodeConnection) => Promise, ): Promise { return await withF( - [await this.acquireConnection(targetNodeId, ctx)], + [this.acquireConnection(targetNodeId)], async ([conn]) => { return await f(conn); }, @@ -721,17 +668,13 @@ class NodeConnectionManager { * for use with a generator function * @param targetNodeId Id of target node to communicate with * @param g Generator function to handle communication - * @param ctx */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async *withConnG( targetNodeId: NodeId, - g: ( - conn: NodeConnection, - ) => AsyncGenerator, - ctx?: Partial, + g: (conn: NodeConnection) => AsyncGenerator, ): AsyncGenerator { - const acquire = await this.acquireConnection(targetNodeId, ctx); + const acquire = this.acquireConnection(targetNodeId); const [release, conn] = await acquire(); let caughtError; try { @@ -747,484 +690,146 @@ class NodeConnectionManager { } /** - * This will return an existing connection or establish a new one as needed. - * If no address is provided it will preform a kademlia search for the node. + * Starts a connection. + * */ - protected getConnection( - targetNodeId: NodeId, - addresses?: Array, - ctx?: Partial, - ): PromiseCancellable; - @timedCancellable( - true, - (nodeConnectionManager: NodeConnectionManager) => - nodeConnectionManager.connectionConnectTimeoutTime, - ) - protected async getConnection( - targetNodeId: NodeId, - addresses: Array | undefined, - @context ctx: ContextTimed, - ): Promise { - // If the connection already exists then we need to return it. - const existingConnection = await this.getExistingConnection(targetNodeId); - if (existingConnection != null) return existingConnection; - - // If there were no addresses provided then we need to find them. - if (addresses == null || addresses.length === 0) { - // Find the node - addresses = await this.findNodeAll(targetNodeId, undefined, ctx); - if (addresses.length === 0) { - throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); - } - } - // Then we just get the connection, it should already exist. - return this.getConnectionWithAddresses(targetNodeId, addresses, ctx); - } - - protected async getExistingConnection( - targetNodeId: NodeId, - ): Promise { - const targetNodeIdString = targetNodeId.toString() as NodeIdString; - return await this.connectionLocks.withF( - [targetNodeIdString, Lock], - async () => { - const connAndTimer = this.connections.get(targetNodeIdString); - if (connAndTimer != null) { - this.logger.debug( - `Found existing NodeConnection for ${nodesUtils.encodeNodeId( - targetNodeId, - )}`, - ); - return connAndTimer; - } - this.logger.debug( - `no existing NodeConnection for ${nodesUtils.encodeNodeId( - targetNodeId, - )}`, - ); + public async createConnection( + nodeIds: Array, + host: Host, + port: Port, + ctx?: Partial, + ): Promise { + const nodeConnection = await NodeConnection.createNodeConnection( + { + targetNodeIds: nodeIds, + manifest: agentClientManifest, + targetHost: host, + targetPort: port, + tlsConfig: this.tlsConfig, + connectionKeepAliveIntervalTime: this.connectionKeepAliveIntervalTime, + connectionKeepAliveTimeoutTime: this.connectionKeepAliveTimeoutTime, + quicSocket: this.quicSocket, + logger: this.logger.getChild( + `${NodeConnection.name} [${host}:${port}]`, + ), }, + ctx, ); + this.addConnection(nodeConnection.validatedNodeId, nodeConnection); + return nodeConnection; } /** - * This gets a connection with a known address. - * @param targetNodeId Id of node we are creating connection to. - * @param address - The address to connect on if specified. If not provided we attempt a kademlia search. - * @param ctx - * @returns ConnectionAndLock that was created or exists in the connection map - */ - protected getConnectionWithAddresses( - targetNodeId: NodeId, - addresses: Array, - ctx?: Partial, - ): PromiseCancellable; - @timedCancellable( - true, - (nodeConnectionManager: NodeConnectionManager) => - nodeConnectionManager.connectionConnectTimeoutTime, - ) - protected async getConnectionWithAddresses( - targetNodeId: NodeId, - addresses: Array, - @context ctx: ContextTimed, - ): Promise { - if (addresses.length === 0) { - throw new nodesErrors.ErrorNodeConnectionManagerNodeAddressRequired(); - } - const targetNodeIdString = targetNodeId.toString() as NodeIdString; - const existingConnection = await this.getExistingConnection(targetNodeId); - if (existingConnection != null) return existingConnection; - const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); - let timeoutDivisions = 0; - const addressGroups: { - local: Array; - global: Array; - } = { local: [], global: [] }; - for (const address of addresses) { - const scope = address.scopes.includes('local') ? 'local' : 'global'; - // If this is the first time an addressGroup has had an address added, the timeout divisions must be incremented. - if (addressGroups[scope].length === 0) { - timeoutDivisions++; - } - addressGroups[scope].push(address); - } - this.logger.debug(`Getting NodeConnection for ${targetNodeIdEncoded}`); - return await this.connectionLocks - .withF([targetNodeIdString, Lock, ctx], async () => { - this.logger.debug(`acquired lock for ${targetNodeIdEncoded}`); - // Attempting a multi-connection for the target node using local addresses - const timeout = ctx.timer.getTimeout() / timeoutDivisions; - let results: Map | undefined; - if (addressGroups.local.length !== 0) { - results = await this.establishMultiConnection( - [targetNodeId], - addressGroups.local, - { - signal: ctx.signal, - timer: timeout, - }, - ); - } - // If there are no results from the attempted local connections, attempt a multi-connection for the target node using external addresses - if (results == null || results.size === 0) { - results = await this.establishMultiConnection( - [targetNodeId], - addressGroups.global, - { - signal: ctx.signal, - timer: timeout, - }, - ); - } - // Should be a single result. - for (const [, connAndTimer] of results) { - return connAndTimer; - } - // Should throw before reaching here - utils.never(); - }) - .finally(() => { - this.logger.debug(`lock finished for ${targetNodeIdEncoded}`); - }); - } - - /** - * This will connect to the provided address looking for any of the listed nodes. - * Locking is not handled at this level, it must be handled by the caller. - * @param nodeIds - * @param addresses - * @param ctx - * @protected + * Creates multiple connections looking for a single node. Once the connection + * has been established then all pending connections are cancelled. + * This will return the first connection made or timeout. */ - protected establishMultiConnection( + public createConnectionMultiple( nodeIds: Array, - addresses: Array, + addresses: Array<[Host, Port]>, ctx?: Partial, - ): PromiseCancellable>; + ): Promise; + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) @timedCancellable( true, (nodeConnectionManager: NodeConnectionManager) => nodeConnectionManager.connectionConnectTimeoutTime, ) - protected async establishMultiConnection( + public async createConnectionMultiple( nodeIds: Array, - addresses: Array, + addresses: Array<[Host, Port]>, @context ctx: ContextTimed, - ): Promise> { - const nodesEncoded = nodeIds.map((v) => nodesUtils.encodeNodeId(v)); - this.logger.debug(`getting multi-connection for ${nodesEncoded}`); - if (nodeIds.length === 0) { - throw new nodesErrors.ErrorNodeConnectionManagerNodeIdRequired(); - } - if (addresses.length === 0) { - throw new nodesErrors.ErrorNodeConnectionManagerNodeAddressRequired(); - } - const connectionsResults: Map = new Map(); - // 1. short circuit any existing connections - const nodesShortlist: Set = new Set(); - for (const nodeId of nodeIds) { - const nodeIdString = nodeId.toString() as NodeIdString; - const connAndTimer = this.connections.get(nodeIdString); - if (connAndTimer == null) { - nodesShortlist.add(nodeIdString); - continue; - } - this.logger.debug( - `found existing connection for ${nodesUtils.encodeNodeId(nodeId)}`, - ); - connectionsResults.set(nodeIdString, connAndTimer); - } - // 2. resolve the addresses into a full list. Any host names need to be resolved. - // If we have existing nodes then we have existing addresses - const existingAddresses: Set = new Set(); - for (const [, connAndTimer] of connectionsResults) { - const address = `${connAndTimer.connection.host}|${connAndTimer.connection.port}`; - existingAddresses.add(address); + ): Promise { + // Setting up intermediate signal + const abortControllerMultiConn = new AbortController(); + const handleAbort = () => { + abortControllerMultiConn.abort(ctx.signal.reason); + }; + if (ctx.signal.aborted) { + handleAbort(); + } else { + ctx.signal.addEventListener('abort', handleAbort, { + once: true, + }); } - const resolvedAddresses = await networkUtils.resolveHostnames( - addresses, - existingAddresses, - ); - if (ctx.signal.aborted) return connectionsResults; - // 3. Concurrently attempt connections - // Abort signal for cleaning up - const abortController = new AbortController(); - const signal = abortController.signal; - - ctx.signal.addEventListener( - 'abort', - () => { - abortController.abort(ctx.signal.reason); - }, - { once: true }, - ); + const newCtx = { + timer: ctx.timer, + signal: abortControllerMultiConn.signal, + }; + + const attempts = addresses.map(([host, port]) => { + return this.createConnection(nodeIds, host, port, newCtx); + }); - const nodesShortlistArray: Array = []; - for (const nodeIdString of nodesShortlist) { - nodesShortlistArray.push(IdInternal.fromString(nodeIdString)); - } - const cleanUpReason = Symbol('CleanUpReason'); - this.logger.debug( - `attempting connections for ${nodesShortlistArray.map((v) => - nodesUtils.encodeNodeId(v), - )}`, - ); - const connProms = resolvedAddresses.map((address) => - this.establishSingleConnection( - nodesShortlistArray, - address, - connectionsResults, - { timer: ctx.timer, signal }, - ).finally(() => { - if (connectionsResults.size === nodeIds.length) { - // We have found all nodes, clean up remaining connections - abortController.abort(cleanUpReason); - } - }), - ); - // We race the connections with timeout try { - this.logger.debug(`awaiting connections`); - await Promise.allSettled(connProms); - this.logger.debug(`awaiting connections resolved`); + // Await first success + return await Promise.any(attempts).catch((e) => { + throw new nodesErrors.ErrorNodeConnectionTimeout(undefined, { + cause: e, + }); + }); } finally { - // Cleaning up - this.logger.debug(`cleaning up`); - abortController.abort(cleanUpReason); - await Promise.allSettled(connProms); - } - if (connectionsResults.size === 0) { - throw new nodesErrors.ErrorNodeConnectionManagerMultiConnectionFailed( - undefined, - { - cause: new AggregateError(await Promise.allSettled(connProms)), - }, - ); + // Abort and clean up the rest + abortControllerMultiConn.abort(Error('TMP IMP clean up')); + await Promise.allSettled(attempts); + ctx.signal.removeEventListener('abort', handleAbort); } - return connectionsResults; } /** - * Used internally by getMultiConnection to attempt a single connection. - * Locking is not done at this stage, it must be done at a higher level. - * This will do the following... - * 1. Attempt the connection - * 2. On success, do final setup and add connection to result and connection map. - * 3. If already in the map it will clean up connection. + * This will start a new connection using a signalling node to coordinate hole punching. */ - protected async establishSingleConnection( - nodeIds: Array, - address: { - host: Host; - port: Port; - scopes: Array; - }, - connectionsResults: Map, - ctx: ContextTimed, - ): Promise { - // TODO: do we bother with a concurrency limit for now? It's simple to use a semaphore. - // TODO: if all connections fail then this needs to throw. Or does it? Do we just report the allSettled result? - // 1. attempt connection to an address - this.logger.debug( - `establishing single connection for address ${address.host}:${address.port}`, - ); - if (address.scopes.includes('global')) { - // Get updated address from ice procedure, using first result for now - const result = await this.initiateHolePunch(nodeIds, ctx); - for (const newAddress of result) { - if (newAddress != null) { - this.logger.debug( - `initiateHolePunch returned new ${newAddress.host}:${newAddress.port} vs old ${address.host}:${address.port}`, - ); - address.host = newAddress.host as Host; - address.port = newAddress.port; - } - } + public async createConnectionPunch( + nodeIdTarget: NodeId, + nodeIdSignaller: NodeId, + ctx?: Partial, + ): Promise { + // Get the signaller node from the existing connections + if (!this.hasConnection(nodeIdSignaller)) { + throw Error('TMP IMP no existing connection to signaller'); } - const connection = - await NodeConnection.createNodeConnection( - { - targetNodeIds: nodeIds, - manifest: manifestClientAgent, - crypto: this.quicClientCrypto, - targetHost: address.host, - targetPort: address.port, - tlsConfig: this.tlsConfig, - connectionKeepAliveIntervalTime: this.connectionKeepAliveIntervalTime, - connectionKeepAliveTimeoutTime: this.connectionKeepAliveTimeoutTime, - quicSocket: this.quicSocket, - logger: this.logger.getChild( - `${NodeConnection.name} [${address.host}:${address.port}]`, - ), - }, - ctx, - ).catch((e) => { - this.logger.debug( - `establish single connection failed for ${address.host}:${address.port} with ${e.message}`, + const { host, port } = await this.withConnF( + nodeIdSignaller, + async (conn) => { + const client = conn.getClient(); + const nodeIdSource = this.keyRing.getNodeId(); + // Data is just `` concatenated + const data = Buffer.concat([nodeIdSource, nodeIdTarget]); + const signature = keysUtils.signWithPrivateKey( + this.keyRing.keyPair, + data, ); - throw e; - }); - // 2. if established then add to result map - const nodeId = connection.nodeId; - const nodeIdString = nodeId.toString() as NodeIdString; - if (connectionsResults.has(nodeIdString)) { - this.logger.debug( - `single connection already existed, cleaning up ${address.host}:${address.port}`, - ); - // 3. if already exists then clean up - await connection.destroy({ force: true }); - // I can only see this happening as a race condition with creating a forward connection and receiving a reverse. - return; - } - // Final setup - const newConnAndTimer = this.addConnection(nodeId, connection); - // We can assume connection was established and destination was valid, we can add the target to the nodeGraph - connectionsResults.set(nodeIdString, newConnAndTimer); - const connectionData: ConnectionData = { - remoteNodeId: connection.nodeId, - remoteHost: connection.host, - remotePort: connection.port, - }; - this.dispatchEvent( - new nodesEvents.EventNodeConnectionManagerConnectionForward({ - detail: connectionData, - }), - ); - this.logger.debug( - `Created NodeConnection for ${nodesUtils.encodeNodeId( - nodeId, - )} on ${address}`, + const addressMessage = + await client.methods.nodesConnectionSignalInitial( + { + targetNodeIdEncoded: nodesUtils.encodeNodeId(nodeIdTarget), + signature: signature.toString('base64url'), + }, + ctx, + ); + return { + host: addressMessage.host as Host, + port: addressMessage.port as Port, + }; + }, ); + return await this.createConnection([nodeIdTarget], host, port, ctx); } /** - * This will take a `QUICConnection` emitted by the `QUICServer` and handle adding it to the connection map. - * If a connection already exists within the connection map for that node ID. - * The `STONITH` mechanism is used to select one connection to keep. - * The connection with the 'lower' `connectionIdShared` is kept while the - * other is destroyed. - * Depending on the decision, the existing connection may be swapped out for - * the new one. - * If there are any streams active on the duplicate connection then they are - * allowed to gracefully end before the connection is fully destroyed. - * The duplicate connection is tracked in a map and cleans up when it is done - * draining. These duplicate connections are forced to close when the NCM is - * stopped. + * Adds connection to the connections map. Preforms some checks and lifecycle hooks. + * This code is shared between the reverse and forward connection creation. + * + * Multiple connections can be added for a single NodeId, but the connection + * with the 'lowest' `connectionId` will be used. The remaining + * connections will be left to timeout gracefully. */ - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - protected handleConnectionReverse(quicConnection: QUICConnection) { - // Checking NodeId - // No specific error here, validation is handled by the QUICServer - const certChain = quicConnection.getRemoteCertsChain().map((der) => { - const cert = keysUtils.certFromPEM( - quicUtils.derToPEM(der) as CertificatePEM, - ); - if (cert == null) utils.never(); - return cert; - }); - if (certChain == null) utils.never(); - const nodeId = keysUtils.certNodeId(certChain[0]); - if (nodeId == null) utils.never(); + protected addConnection( + nodeId: NodeId, + nodeConnection: NodeConnection, + ): ConnectionAndTimer { const nodeIdString = nodeId.toString() as NodeIdString; - const nodeConnectionNew = - NodeConnection.createNodeConnectionReverse({ - nodeId, - certChain, - manifest: manifestClientAgent, - quicConnection: quicConnection, - logger: this.logger.getChild( - `${NodeConnection.name} [${nodesUtils.encodeNodeId(nodeId)}@${ - quicConnection.remoteHost - }:${quicConnection.remotePort}]`, - ), - }); - // Check if the connection already exists under that nodeId and reject the connection if so - if (this.connections.has(nodeIdString)) { - // We need to decide which one to reject, for this we compare the connection IDs. - const existingConnAndTimer = this.connections.get(nodeIdString)!; - const existingConnection = existingConnAndTimer.connection.quicConnection; - const existingId = Buffer.from(existingConnection.connectionIdShared); - const newId = Buffer.from(quicConnection.connectionIdShared); - if (existingId.compare(newId) <= 0) { - // Keep existing - this.logger.debug( - 'handling duplicate connection, keeping existing connection', - ); - // Temp handling for events - const handleEventNodeConnectionStreamTemp = ( - evt: nodesEvents.EventNodeConnectionStream, - ) => { - const stream = evt.detail; - this.rpcServer.handleStream(stream); - }; - nodeConnectionNew.addEventListener( - nodesEvents.EventNodeConnectionStream.name, - handleEventNodeConnectionStreamTemp, - ); - // Clean up new connection in the background - this.connectionsDraining.add(nodeConnectionNew); - void utils - .sleep(100) - .then(async () => nodeConnectionNew.destroy({ force: false })) - .finally(() => { - nodeConnectionNew.removeEventListener( - nodesEvents.EventNodeConnectionStream.name, - handleEventNodeConnectionStreamTemp, - ); - this.connectionsDraining.delete(nodeConnectionNew); - }); - return; - } else { - // Keeping new - this.logger.debug( - 'handling duplicate connection, keeping new connection', - ); - const nodeConnectionOld = existingConnAndTimer.connection; - // Swap out the existing connection with the new one - nodeConnectionOld.removeEventListener( - EventAll.name, - this.handleEventAll, - ); - nodeConnectionOld.removeEventListener( - nodesEvents.EventNodeConnectionDestroyed.name, - this.handleEventNodeConnectionDestroyed, - ); - this.connections.delete(nodeIdString); - this.addConnection(nodeId, nodeConnectionNew); - // Clean up existing connection in the background - this.connectionsDraining.add(nodeConnectionOld); - void utils - .sleep(100) - .then(async () => nodeConnectionOld.destroy({ force: false })) - .finally(() => { - nodeConnectionOld.removeEventListener( - nodesEvents.EventNodeConnectionStream.name, - this.handleEventNodeConnectionStream, - ); - this.connectionsDraining.delete(nodeConnectionOld); - }); - // Destroying TTL timer - if (existingConnAndTimer.timer != null) { - existingConnAndTimer.timer.cancel(); - } - } - } else { - // Add the new connection into the map - this.addConnection(nodeId, nodeConnectionNew); - } - } - - /** - * Adds connection to the connections map. Preforms some checks and lifecycle hooks. - * This code is shared between the reverse and forward connection creation. - */ - protected addConnection( - nodeId: NodeId, - nodeConnection: NodeConnection, - ): ConnectionAndTimer { - const nodeIdString = nodeId.toString() as NodeIdString; - // Check if exists in map, this should never happen but better safe than sorry. - if (this.connections.has(nodeIdString)) utils.never(); + const connectionId = nodeConnection.connectionId; // Setting up events nodeConnection.addEventListener( nodesEvents.EventNodeConnectionStream.name, @@ -1238,27 +843,55 @@ class NodeConnectionManager { ); // Creating TTL timeout. - // We don't create a TTL for seed nodes. - const timeToLiveTimer = !this.isSeedNode(nodeId) - ? new Timer({ - handler: async () => await this.destroyConnection(nodeId, false), - delay: this.connectionIdleTimeoutTime, - }) - : null; // Add to map const newConnAndTimer: ConnectionAndTimer = { connection: nodeConnection, - timer: timeToLiveTimer, + timer: null, usageCount: 0, }; - this.connections.set(nodeIdString, newConnAndTimer); + + // Adding the new connection into the connection map + + let entry = this.connections.get(nodeIdString); + if (entry == null) { + // Creating a new entry + newConnAndTimer.timer = new Timer({ + handler: async () => + await this.destroyConnection(nodeId, false, connectionId), + delay: this.getStickyTimeoutValue(nodeId, true), + }); + entry = { + activeConnection: connectionId, + connections: { + [connectionId]: newConnAndTimer, + }, + }; + this.connections.set(nodeIdString, entry); + } else { + newConnAndTimer.timer = new Timer({ + handler: async () => + await this.destroyConnection(nodeId, false, connectionId), + delay: this.getStickyTimeoutValue( + nodeId, + entry.activeConnection > connectionId, + ), + }); + // Updating existing entry + entry.connections[connectionId] = newConnAndTimer; + // If the new connection ID is less than the old then replace it + if (entry.activeConnection > connectionId) { + entry.activeConnection = connectionId; + } + } + + // Dispatch the connection event const connectionData: ConnectionData = { remoteNodeId: nodeConnection.nodeId, remoteHost: nodeConnection.host, remotePort: nodeConnection.port, }; this.dispatchEvent( - new nodesEvents.EventNodeConnectionManagerConnectionReverse({ + new nodesEvents.EventNodeConnectionManagerConnection({ detail: connectionData, }), ); @@ -1266,32 +899,115 @@ class NodeConnectionManager { } /** - * Removes the connection from the connection man and destroys it. + * Gets the existing active connection for the target node + */ + public getConnection(nodeId): ConnectionAndTimer | undefined { + const nodeIdString = nodeId.toString() as NodeIdString; + const connectionsEntry = this.connections.get(nodeIdString); + if (connectionsEntry == null) return; + return connectionsEntry.connections[connectionsEntry.activeConnection]; + } + + /** + * Removes the connection from the connection map and destroys it. + * If the connectionId is specified then just that connection is destroyed. + * If no connectionId is specified then all connections for that node are destroyed. + * * @param targetNodeId Id of node we are destroying connection to - * @param force + * @param force - if true force the connection to end with error. + * @param connectionIdTarget - if specified destroys only the desired connection. */ - protected async destroyConnection( + public async destroyConnection( targetNodeId: NodeId, force: boolean, + connectionIdTarget?: string, ): Promise { const targetNodeIdString = targetNodeId.toString() as NodeIdString; - return await this.connectionLocks.withF( - [targetNodeIdString, Lock], - async () => { - const connAndTimer = this.connections.get(targetNodeIdString); - if (connAndTimer?.connection == null) return; + const connectionsEntry = this.connections.get(targetNodeIdString); + // No entry then nothing to destroy + if (connectionsEntry == null) return; + const destroyPs: Array> = []; + const connections = connectionsEntry.connections; + for (const connectionId of Object.keys(connections)) { + // Destroy if target or no target set + if (connectionIdTarget == null || connectionIdTarget === connectionId) { + const connAndTimer = connections[connectionId]; this.logger.debug( `Destroying NodeConnection for ${nodesUtils.encodeNodeId( targetNodeId, - )}`, + )}:${connectionId}`, ); - await connAndTimer.connection.destroy({ force }); + destroyPs.push(connAndTimer.connection.destroy({ force })); // Destroying TTL timer if (connAndTimer.timer != null) connAndTimer.timer.cancel(); - // Updating the connection map - this.connections.delete(targetNodeIdString); - }, + delete connections[connectionId]; + } + } + // If empty then remove the entry + const remainingKeys = Object.keys(connectionsEntry.connections); + if (remainingKeys.length === 0) { + this.connections.delete(targetNodeIdString); + } else { + // Check if the active connection was removed. + if (connections[connectionsEntry.activeConnection] == null) { + // Find the new lowest + connectionsEntry.activeConnection = remainingKeys.sort()[0]; + } + } + // Now that all the mutations are done we await destruction + await Promise.all(destroyPs); + } + + /** + * Will determine how long to keep a node around for. + * + * Timeout is scaled linearly from 1 min to 2 hours based on it's bucket. + * The value will be symmetric for two nodes, + * they will assign the same timeout for each other. + */ + protected getStickyTimeoutValue(nodeId: NodeId, primary: boolean): number { + const min = this.connectionIdleTimeoutTimeMin; + if (!primary) return min; + const max = this.connectionIdleTimeoutTimeScale; + // Determine the bucket + const bucketIndex = nodesUtils.bucketIndex( + this.keyRing.getNodeId(), + nodeId, ); + const factor = 1 - bucketIndex / 255; + return min + factor * max; + } + + /** + * This takes a reverse initiated QUICConnection, wraps it as a + * NodeConnection and adds it to the connection map. + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + protected handleConnectionReverse(quicConnection: QUICConnection) { + // Checking NodeId + // No specific error here, validation is handled by the QUICServer + const certChain = quicConnection.getRemoteCertsChain().map((der) => { + const cert = keysUtils.certFromPEM( + quicUtils.derToPEM(der) as CertificatePEM, + ); + if (cert == null) utils.never(); + return cert; + }); + if (certChain == null) utils.never(); + const nodeId = keysUtils.certNodeId(certChain[0]); + if (nodeId == null) utils.never(); + const nodeConnectionNew = NodeConnection.createNodeConnectionReverse({ + nodeId, + certChain, + manifest: agentClientManifest, + quicConnection: quicConnection, + logger: this.logger.getChild( + `${NodeConnection.name} [${nodesUtils.encodeNodeId(nodeId)}@${ + quicConnection.remoteHost + }:${quicConnection.remotePort}]`, + ), + }); + this.addConnection(nodeId, nodeConnectionNew); } /** @@ -1352,411 +1068,69 @@ class NodeConnectionManager { } } - /** - * Will attempt to find a connection via a Kademlia search. - * The connection will be established in the process. - * @param targetNodeId Id of the node we are tying to find - * @param pingTimeoutTime timeout for any ping attempts - * @param ctx - */ - public findNode( - targetNodeId: NodeId, - pingTimeoutTime?: number, - ctx?: Partial, - ): PromiseCancellable; @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - @timedCancellable(true) - public async findNode( - targetNodeId: NodeId, - pingTimeoutTime: number | undefined, - @context ctx: ContextTimed, - ): Promise { - this.logger.debug( - `Finding address for ${nodesUtils.encodeNodeId(targetNodeId)}`, - ); - // First check if we already have an existing ID -> address record - let address = (await this.nodeGraph.getNode(targetNodeId))?.address; - if (address != null) { - this.logger.debug( - `found address for ${nodesUtils.encodeNodeId(targetNodeId)} at ${ - address.host - }:${address.port}`, - ); - return address; - } else { - this.logger.debug(`attempting to find in the network`); - } - // Otherwise, attempt to locate it by contacting network - address = await this.getClosestGlobalNodes( - targetNodeId, - pingTimeoutTime ?? this.connectionConnectTimeoutTime, - ctx, - ); - if (address != null) { - this.logger.debug( - `found address for ${nodesUtils.encodeNodeId(targetNodeId)} at ${ - address.host - }:${address.port}`, - ); - } else { - this.logger.debug(`no address found`); - } - return address; + public hasConnection(nodeId: NodeId): boolean { + return this.connections.has(nodeId.toString() as NodeIdString); } - /** - * Will attempt to find a connection via MDNS. - * @param targetNodeId Id of the node we are tying to find - */ - public findNodeLocal( - targetNodeId: NodeId, - ctx?: Partial, - ): PromiseCancellable>; @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - @timedCancellable( - true, - (nodeConnectionManager: NodeConnectionManager) => - nodeConnectionManager.connectionFindLocalTimeoutTime, - ) - public async findNodeLocal( - targetNodeId: NodeId, - @context ctx: ContextTimed, - ): Promise> { - const encodedNodeId = nodesUtils.encodeNodeId(targetNodeId); - this.logger.debug(`Finding local addresses for ${encodedNodeId}`); - const addresses: Array = []; - if (this.mdns == null) { - return addresses; - } - // First check if we already have an existing MDNS Service - const mdnsOptions = { type: 'polykey', protocol: 'udp' } as const; - let service: ServicePOJO | void = this.mdns.networkServices.get( - mdnsUtils.toFqdn({ name: encodedNodeId, ...mdnsOptions }), - ); - if (service == null) { - // Setup promises - const { p: endedP, resolveP: resolveEndedP } = utils.promise(); - const abortHandler = () => { - resolveEndedP(); - }; - ctx.signal.addEventListener('abort', abortHandler, { once: true }); - ctx.timer.catch(() => {}).finally(() => abortHandler()); - const { p: serviceP, resolveP: resolveServiceP } = - utils.promise(); - const handleEventMDNSService = (evt: mdnsEvents.EventMDNSService) => { - if (evt.detail.name === encodedNodeId) { - resolveServiceP(evt.detail); - } - }; - this.mdns.addEventListener( - mdnsEvents.EventMDNSService.name, - handleEventMDNSService, - { once: true }, - ); - // Abort and restart query in case already running - this.mdns.stopQuery(mdnsOptions); - this.mdns.startQuery(mdnsOptions); - // Race promises to find node or timeout - service = await Promise.race([serviceP, endedP]); - this.mdns.removeEventListener( - mdnsEvents.EventMDNSService.name, - handleEventMDNSService, - ); - this.mdns.stopQuery(mdnsOptions); - ctx.signal.removeEventListener('abort', abortHandler); - } - // If the service is not found, just return no addresses - if (service == null) { - return addresses; - } - for (const host_ of service.hosts) { - let host: string; - switch (this.quicSocket.type) { - case 'ipv4': - if (quicUtils.isIPv4(host_)) { - host = host_; - } else if (quicUtils.isIPv4MappedIPv6(host_)) { - host = quicUtils.fromIPv4MappedIPv6(host_); - } else { - continue; - } - break; - case 'ipv6': - if (quicUtils.isIPv6(host_)) host = host_; - else continue; - break; - case 'ipv4&ipv6': - host = host_; - break; - default: - continue; + public listConnections(): Array<{ + nodeId: NodeId; + connectionId: string; + primary: boolean; + address: { host: Host; port: Port; hostname: Hostname | undefined }; + usageCount: number; + timeout: number | undefined; + }> { + const results: Array<{ + nodeId: NodeId; + connectionId: string; + primary: boolean; + address: { host: Host; port: Port; hostname: Hostname | undefined }; + usageCount: number; + timeout: number | undefined; + }> = []; + for (const [nodeIdString, connectionsEntry] of this.connections.entries()) { + const nodeId = IdInternal.fromString(nodeIdString); + const connections = connectionsEntry.connections; + for (const connectionId of Object.keys(connections)) { + const connectionAndTimer = connections[connectionId]; + const connection = connectionAndTimer.connection; + results.push({ + nodeId, + connectionId: connection.connectionId, + primary: + connectionsEntry.activeConnection === connection.connectionId, + address: { + host: connection.host, + port: connection.port, + hostname: connection.hostname, + }, + usageCount: connectionAndTimer.usageCount, + timeout: connectionAndTimer.timer?.getTimeout(), + }); } - addresses.push({ - host: host as Host, - port: service.port as Port, - scopes: ['local'], - }); - this.logger.debug( - `found address for ${nodesUtils.encodeNodeId( - targetNodeId, - )} at ${host}:${service.port}`, - ); - } - return addresses; - } - - /** - * Will attempt to find a connection via a Kademlia search or MDNS. - * The connection may be established in the process. - * @param targetNodeId Id of the node we are tying to find - * @param pingTimeoutTime timeout for any ping attempts - * @param ctx - */ - public findNodeAll( - targetNodeId: NodeId, - pingTimeoutTime?: number, - ctx?: Partial, - ): PromiseCancellable>; - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - @timedCancellable(true) - public async findNodeAll( - targetNodeId: NodeId, - pingTimeoutTime: number | undefined, - @context ctx: ContextTimed, - ): Promise> { - const [localAddresses, kademliaAddress] = await Promise.allSettled([ - this.findNodeLocal(targetNodeId, { - signal: ctx.signal, - timer: this.connectionFindLocalTimeoutTime, - }), - this.findNode(targetNodeId, pingTimeoutTime, ctx), - ]); - const addresses = - localAddresses.status === 'fulfilled' ? localAddresses.value : []; - if ( - kademliaAddress.status === 'fulfilled' && - kademliaAddress.value != null - ) { - addresses.push(kademliaAddress.value); } - return addresses; + return results; } /** - * Attempts to locate a target node in the network (using Kademlia). - * Adds all discovered, active nodes to the current node's database (up to k - * discovered nodes). - * Once the target node is found, the method returns and stops trying to locate - * other nodes. - * - * Ultimately, attempts to perform a "DNS resolution" on the given target node - * ID (i.e. given a node ID, retrieves the node address, containing its IP and - * port). - * @param targetNodeId ID of the node attempting to be found (i.e. attempting - * to find its IP address and port) - * @param pingTimeoutTime - * @param ctx - * @returns whether the target node was located in the process + * Returns the number of active connections */ - public getClosestGlobalNodes( - targetNodeId: NodeId, - pingTimeoutTime?: number, - ctx?: Partial, - ): PromiseCancellable; - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - @timedCancellable(true) - public async getClosestGlobalNodes( - targetNodeId: NodeId, - pingTimeoutTime: number | undefined, - @context ctx: ContextTimed, - ): Promise { - const localNodeId = this.keyRing.getNodeId(); - // Let foundTarget: boolean = false; - let foundAddress: NodeAddress | undefined = undefined; - // Get the closest alpha nodes to the target node (set as shortlist) - const shortlist = await this.nodeGraph.getClosestNodes( - targetNodeId, - this.connectionFindConcurrencyLimit, - ); - // If we have no nodes at all in our database (even after synchronising), - // then we should return nothing. We aren't going to find any others - if (shortlist.length === 0) { - this.logger.debug('Node graph was empty, No nodes to query'); - return; - } - // Need to keep track of the nodes that have been contacted - // Not sufficient to simply check if there's already a pre-existing connection - // in nodeConnections - what if there's been more than 1 invocation of - // getClosestGlobalNodes()? - const contacted: Set = new Set(); - // Iterate until we've found and contacted k nodes - while (contacted.size <= this.nodeGraph.nodeBucketLimit) { - if (ctx.signal?.aborted) return; - // Remove the node from the front of the array - const nextNode = shortlist.shift(); - // If we have no nodes left in the shortlist, then stop - if (nextNode == null) { - break; - } - const [nextNodeId, nextNodeAddress] = nextNode; - this.logger.debug( - `asking ${nodesUtils.encodeNodeId( - nextNodeId, - )} for closes nodes to ${nodesUtils.encodeNodeId(targetNodeId)}`, - ); - // Skip if the node has already been contacted - if (contacted.has(nextNodeId.toString())) continue; - // Connect to the node (check if pre-existing connection exists, otherwise - // create a new one) - if ( - !(await this.pingNode( - nextNodeId, - [ - { - host: nextNodeAddress.address.host, - port: nextNodeAddress.address.port, - scopes: ['global'], - }, - ], - { - signal: ctx.signal, - timer: pingTimeoutTime ?? this.connectionConnectTimeoutTime, - }, - )) - ) { - continue; - } - contacted[nextNodeId] = true; - // Ask the node to get their own closest nodes to the target - let foundClosest: Array<[NodeId, NodeData]>; - try { - foundClosest = await this.getRemoteNodeClosestNodes( - nextNodeId, - targetNodeId, - { signal: ctx.signal }, - ); - } catch (e) { - if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) return; - throw e; - } - if (foundClosest.length === 0) continue; - // Check to see if any of these are the target node. At the same time, add - // them to the shortlist - for (const [nodeId, nodeData] of foundClosest) { - if (ctx.signal?.aborted) return; - // Ignore any nodes that have been contacted or our own node - if (contacted[nodeId] || localNodeId.equals(nodeId)) { - continue; - } - if ( - nodeId.equals(targetNodeId) && - (await this.pingNode( - nodeId, - [ - { - host: nodeData.address.host, - port: nodeData.address.port, - scopes: ['global'], - }, - ], - { - signal: ctx.signal, - timer: pingTimeoutTime ?? this.connectionConnectTimeoutTime, - }, - )) - ) { - foundAddress = nodeData.address; - // We have found the target node, so we can stop trying to look for it - // in the shortlist - break; - } - shortlist.push([nodeId, nodeData]); - } - // To make the number of jumps relatively short, should connect to the nodes - // closest to the target first, and ask if they know of any closer nodes - // than we can simply unshift the first (closest) element from the shortlist - const distance = (nodeId: NodeId) => - nodesUtils.nodeDistance(targetNodeId, nodeId); - shortlist.sort(function ([nodeIdA], [nodeIdB]) { - const distanceA = distance(nodeIdA); - const distanceB = distance(nodeIdB); - if (distanceA > distanceB) { - return 1; - } else if (distanceA < distanceB) { - return -1; - } else { - return 0; - } - }); + public connectionsActive(): number { + let size = 0; + for (const [, connectionsEntry] of this.connections) { + size += Object.keys(connectionsEntry.connections).length; } - return foundAddress; + return size; } - /** - * Performs an RPC request to retrieve the closest nodes relative to the given - * target node ID. - * @param nodeId the node ID to search on - * @param targetNodeId the node ID to find other nodes closest to it - * @param ctx - */ - public getRemoteNodeClosestNodes( - nodeId: NodeId, - targetNodeId: NodeId, - ctx?: Partial, - ): PromiseCancellable>; - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - @timedCancellable( - true, - (nodeConnectionManager: NodeConnectionManager) => - nodeConnectionManager.connectionConnectTimeoutTime, - ) - public async getRemoteNodeClosestNodes( - nodeId: NodeId, - targetNodeId: NodeId, - @context ctx: ContextTimed, - ): Promise> { - try { - // Send through client - return await this.withConnF( - nodeId, - async (connection) => { - const client = connection.getClient(); - const closestNodes = await client.methods.nodesClosestLocalNodesGet( - { nodeIdEncoded: nodesUtils.encodeNodeId(targetNodeId) }, - ctx, - ); - const localNodeId = this.keyRing.getNodeId(); - const nodes: Array<[NodeId, NodeData]> = []; - for await (const result of closestNodes) { - const nodeId = nodesUtils.decodeNodeId(result.nodeIdEncoded); - // If the nodeId is not valid we don't add it to the list of nodes - // Our own nodeId is considered not valid here - if (nodeId != null && !localNodeId.equals(nodeId)) { - nodes.push([ - nodeId, - { - address: { - host: result.host as Host | Hostname, - port: result.port as Port, - scopes: ['global'], - }, - // Not really needed - // But if it's needed then we need to add the information to the proto definition - lastUpdated: 0, - }, - ]); - } - } - return nodes; - }, - ctx, - ); - } catch (e) { - if (nodesUtils.isConnectionError(e)) { - return []; - } - throw e; - } + public updateTlsConfig(tlsConfig: TLSConfig) { + this.tlsConfig = tlsConfig; + this.quicServer.updateConfig({ + key: tlsConfig.keyPrivatePem, + cert: tlsConfig.certChainPem, + }); } /** @@ -1815,13 +1189,19 @@ class NodeConnectionManager { public async handleNodesConnectionSignalInitial( sourceNodeId: NodeId, targetNodeId: NodeId, - address: NodeAddress, + address: { + host: Host; + port: Port; + }, requestSignature: string, - ): Promise { + ): Promise<{ + host: Host; + port: Port; + }> { // Need to get the connection details of the requester and add it to the message. // Then send the message to the target. // This would only function with existing connections - const existingConnection = await this.getExistingConnection(targetNodeId); + const existingConnection = this.getConnection(targetNodeId); if (existingConnection == null) { throw new nodesErrors.ErrorNodeConnectionManagerConnectionNotFound(); } @@ -1859,253 +1239,46 @@ class NodeConnectionManager { return { host, port, - scopes: ['global'], }; } /** - * Will make a connection to the signalling node and make a nodesConnectionSignalInitial` request. - * - * If the signalling node does not have an existing connection to the target then this will throw. - * If verification of the request fails then this will throw, but this shouldn't happen. - * The request contains a signature generated from ``. - * - * - * - * @param targetNodeId - NodeId of the node that needs to signal back. - * @param signallingNodeId - NodeId of the signalling node. - * @param ctx + * Returns a list of active connections and their address information. + * TODO: take limit from config */ - public connectionSignalInitial( - targetNodeId: NodeId, - signallingNodeId: NodeId, - ctx?: Partial, - ): PromiseCancellable; @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - @timedCancellable( - true, - (nodeConnectionManager: NodeConnectionManager) => - nodeConnectionManager.connectionConnectTimeoutTime, - ) - public async connectionSignalInitial( + public getClosestConnections( targetNodeId: NodeId, - signallingNodeId: NodeId, - @context ctx: ContextTimed, - ): Promise { - return await this.withConnF( - signallingNodeId, - async (conn) => { - const client = conn.getClient(); - const sourceNodeId = this.keyRing.getNodeId(); - // Data is just `` concatenated - const data = Buffer.concat([sourceNodeId, targetNodeId]); - const signature = keysUtils.signWithPrivateKey( - this.keyRing.keyPair, - data, - ); - const addressMessage = - await client.methods.nodesConnectionSignalInitial( - { - targetNodeIdEncoded: nodesUtils.encodeNodeId(targetNodeId), - signature: signature.toString('base64url'), - }, - ctx, - ); - const nodeAddress: NodeAddress = { - host: addressMessage.host as Host, - port: addressMessage.port as Port, - scopes: ['global'], - }; - return nodeAddress; - }, - ctx, - ); - } - - /** - * Returns an array of the seed nodes. - */ - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public getSeedNodes(): Array { - return Object.keys(this.seedNodes).map((nodeIdEncoded) => { - const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); - if (nodeId == null) utils.never(); - return nodeId; - }); - } - - /** - * Returns true if the given node is a seed node. - */ - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public isSeedNode(nodeId: NodeId): boolean { - const seedNodes = this.getSeedNodes(); - return !!seedNodes.find((seedNode) => { - return nodeId.equals(seedNode); - }); - } - - /** - * Checks if a connection can be made to the target. Returns true if the - * connection can be authenticated, it's certificate matches the nodeId and - * the addresses match if provided. Otherwise, returns false. - * @param nodeId - NodeId of the target - * @param addresses - Contains the Hosts and Ports of the target node - * @param ctx - */ - public pingNode( - nodeId: NodeId, - addresses: Array, - ctx?: Partial, - ): PromiseCancellable; - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - @timedCancellable( - true, - (nodeConnectionManager: NodeConnectionManager) => - nodeConnectionManager.connectionConnectTimeoutTime, - ) - public async pingNode( - nodeId: NodeId, - addresses: Array, - @context ctx: ContextTimed, - ): Promise { - try { - await this.getConnectionWithAddresses(nodeId, addresses, ctx); - return true; - } catch { - return false; + limit: number = 20, + ): Array { + const nodeIds: Array = []; + for (const nodeIdString of this.connections.keys()) { + nodeIds.push(IdInternal.fromString(nodeIdString)); } - } - - /** - * Used to start connections to multiple nodes and hosts at the same time. - * The main use-case is to connect to multiple seed nodes on the same hostname. - * @param nodeIds - * @param addresses - * @param ctx - */ - public getMultiConnection( - nodeIds: Array, - addresses: Array, - ctx?: Partial, - ): PromiseCancellable>; - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - @timedCancellable( - true, - (nodeConnectionManager: NodeConnectionManager) => - nodeConnectionManager.connectionConnectTimeoutTime, - ) - public async getMultiConnection( - nodeIds: Array, - addresses: Array, - @context ctx: ContextTimed, - ): Promise> { - const locks: Array> = nodeIds.map((nodeId) => { - return [nodeId.toString(), Lock, ctx]; - }); - return await this.connectionLocks.withF(...locks, async () => { - const results = await this.establishMultiConnection( - nodeIds, - addresses, - ctx, - ); - const resultsArray: Array = []; - for (const [nodeIdString] of results) { - resultsArray.push(IdInternal.fromString(nodeIdString)); - } - return resultsArray; - }); - } - - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public hasConnection(nodeId: NodeId): boolean { - return this.connections.has(nodeId.toString() as NodeIdString); - } - - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public listConnections(): Array<{ - nodeId: NodeId; - address: { host: Host; port: Port; hostname: Hostname | undefined }; - usageCount: number; - timeout: number | undefined; - }> { - const results: Array<{ - nodeId: NodeId; - address: { host: Host; port: Port; hostname: Hostname | undefined }; - usageCount: number; - timeout: number | undefined; - }> = []; - for (const [ - nodeIdString, - connectionAndTimer, - ] of this.connections.entries()) { - const connection = connectionAndTimer.connection; - const nodeId = IdInternal.fromString(nodeIdString); - results.push({ - nodeId, - address: { + // Sort and draw limit + nodeIds.sort(nodesUtils.nodeDistanceCmpFactory(targetNodeId)); + const nodesShortList = nodeIds.slice(0, limit); + // With the desired nodes we can format data + return nodesShortList.map((nodeId) => { + const nodeIdString = nodeId.toString() as NodeIdString; + const entry = this.connections.get(nodeIdString); + if (entry == null) utils.never('Connection should exist'); + const entryRecord: ActiveConnectionsInfo = { + nodeId: nodeId, + connections: {}, + }; + for (const connAndTimer of Object.values(entry.connections)) { + const connection = connAndTimer.connection; + entryRecord.connections[connection.connectionId] = { host: connection.host, + hostName: connection.hostname, port: connection.port, - hostname: connection.hostname, - }, - usageCount: connectionAndTimer.usageCount, - timeout: connectionAndTimer.timer?.getTimeout(), - }); - } - return results; - } - - /** - * Returns the number of active connections - */ - public connectionsActive(): number { - return this.connections.size + this.connectionsDraining.size; - } - - public updateTlsConfig(tlsConfig: TLSConfig) { - this.tlsConfig = tlsConfig; - this.quicServer.updateConfig({ - key: tlsConfig.keyPrivatePem, - cert: tlsConfig.certChainPem, - }); - } - - /** - * This attempts the NAT hole punch procedure. It will return a - * `PromiseCancellable` that will resolve once the procedure times out, is - * cancelled or the other end responds. - * - * This is pretty simple, it will contact all known seed nodes and get them to - * relay a punch signal message. - * - * This doesn't care if the requests fail or succeed, so any errors or results are ignored. - * - */ - protected initiateHolePunch( - targetNodeIds: Array, - ctx?: Partial, - ): PromiseCancellable>; - @timedCancellable(true) - protected async initiateHolePunch( - targetNodeIds: Array, - @context ctx: ContextTimed, - ): Promise> { - const seedNodes = this.getSeedNodes(); - const allProms: Array> = []; - for (const targetNodeId of targetNodeIds) { - if (!this.isSeedNode(targetNodeId)) { - // Ask seed nodes to signal hole punching for target - const holePunchProms = seedNodes.map((seedNodeId) => { - return ( - this.connectionSignalInitial(targetNodeId, seedNodeId, ctx) - // Ignore errors - .catch(() => undefined) - ); - }); - allProms.push(...holePunchProms); + timeout: connAndTimer.timer?.getTimeout(), + primary: connection.connectionId === entry.activeConnection, + }; } - } - return await Promise.all(allProms); + return entryRecord; + }); } } diff --git a/src/nodes/NodeConnectionQueue.ts b/src/nodes/NodeConnectionQueue.ts new file mode 100644 index 000000000..5cb6c4c46 --- /dev/null +++ b/src/nodes/NodeConnectionQueue.ts @@ -0,0 +1,218 @@ +import type { NodeIdString, NodeId } from '../ids/types'; +import type { NodeContact } from './types'; +import type { Semaphore } from '@matrixai/async-locks'; +import type { ContextCancellable } from '@matrixai/contexts'; +import * as nodesUtils from './utils'; +import * as utils from '../utils'; + +// Temp utility class for tracking shared queue +export class NodeConnectionQueue { + protected nodesContacted: Set = new Set(); + protected nodesFoundSignal: Set = new Set(); + protected nodesFoundDirect: Set = new Set(); + + protected queueSignal: Array<[NodeId, NodeId | undefined]> = []; + protected queueDirect: Array<[NodeId, NodeContact]> = []; + + protected nodeDistanceCmp: ( + [nodeId1]: [NodeId, unknown], + [nodeId2]: [NodeId, unknown], + ) => 0 | 1 | -1; + protected connectionMade: boolean = false; + public nodesRunningSignal: Set> = new Set(); + public nodesRunningDirect: Set> = new Set(); + + constructor( + protected nodeIdSelf: NodeId, + protected nodeIdTarget: NodeId, + protected limit: number, + protected rateLimitSignal: Semaphore, + protected rateLimitDirect: Semaphore, + ) { + const nodeDistanceCmp = nodesUtils.nodeDistanceCmpFactory(nodeIdTarget); + this.nodeDistanceCmp = ( + [nodeId1]: [NodeId, unknown], + [nodeId2]: [NodeId, unknown], + ) => nodeDistanceCmp(nodeId1, nodeId2); + } + + /** + * Adds the node to the queueSignal and found set. + * Nodes that are in the found or contacted sets are filtered out. + * @param nodeIdTarget + * @param nodeIdSignaller + */ + public queueNodeSignal( + nodeIdTarget: NodeId, + nodeIdSignaller: NodeId | undefined, + ) { + const nodeIdTargetString = nodeIdTarget.toString() as NodeIdString; + // If in the found, contacted or our own nodeId then skip + if ( + this.nodesContacted.has(nodeIdTargetString) || + this.nodesFoundSignal.has(nodeIdTargetString) || + this.nodeIdSelf.equals(nodeIdTarget) + ) { + return; + } + this.nodesFoundSignal.add(nodeIdTargetString); + // Add to queue + this.queueSignal.push([nodeIdTarget, nodeIdSignaller]); + // Sort queue + this.queueSignal.sort(this.nodeDistanceCmp); + this.queueSignal.splice(20); + } + + public queueNodeDirect(nodeIdTarget: NodeId, nodeContact: NodeContact) { + const nodeIdTargetString = nodeIdTarget.toString() as NodeIdString; + // If in the found, contacted or our own nodeId then skip + if ( + this.nodesContacted.has(nodeIdTargetString) || + this.nodesFoundDirect.has(nodeIdTargetString) || + this.nodeIdSelf.equals(nodeIdTarget) + ) { + return; + } + this.nodesFoundDirect.add(nodeIdTargetString); + // Add to queue + this.queueDirect.push([nodeIdTarget, nodeContact]); + // Sort queue + this.queueDirect.sort(this.nodeDistanceCmp); + } + + /** + * Uses a bracketing pattern to track the resource + * will wait for slot in the `rateLimit` to be free and run the function. + * + * It will not run the function and return with true under the following conditions + * 1. The limit is reached. + * 2. The queue is exausted and there are no pending connections. + * 3. The function returned true indicating target was connected to + */ + public async withNodeSignal( + f: ( + nodeIdTarget: NodeId, + nodeIdSignaller: NodeId | undefined, + ) => Promise, + ctx: ContextCancellable, + ): Promise { + return this.withNode( + this.queueSignal, + this.rateLimitSignal, + this.nodesRunningSignal, + f, + ctx, + ); + } + + public async withNodeDirect( + f: (nodeIdTarget: NodeId, nodeIdSignaller: NodeContact) => Promise, + ctx: ContextCancellable, + ): Promise { + return this.withNode( + this.queueDirect, + this.rateLimitDirect, + this.nodesRunningDirect, + f, + ctx, + ); + } + + /** + * Generic with node for shared code between direct and signal queues + * @protected + */ + protected async withNode( + queue: Array<[NodeId, T]>, + rateLimit: Semaphore, + nodesRunning: Set>, + f: (nodeIdTarget: NodeId, nodeIdSignaller: T) => Promise, + ctx: ContextCancellable, + ): Promise { + // Checking if hit limit + if (this.nodesContacted.size >= this.limit) { + return true; + } + // If queue is empty then we need to wait for alternative sources + await this.waitForQueue(queue, ctx); + + // If queue still empty then we've run out of nodes to contact + if (queue.length === 0) { + return true; + } + // Wait for a free concurrency slot + const [rateLimitReleaser] = await rateLimit.lock()(); + if (this.connectionMade) { + await rateLimitReleaser(); + return true; + } + const nextNode = queue.shift(); + // If queue exhausted or target found then end + if (nextNode == null) { + await rateLimitReleaser(); + return true; + } + const [nodeIdTarget, data] = nextNode; + + // Running the function + const attempt = f(nodeIdTarget, data) + .then( + (result) => { + if (result) this.connectionMade = true; + }, + () => {}, + ) + .finally(async () => { + // Release the rateLimiter lock + await rateLimitReleaser(); + nodesRunning.delete(attempt); + }); + nodesRunning.add(attempt); + return false; + } + + public contactedNode(nodeIdTarget: NodeId) { + const nodeIdTargetString = nodeIdTarget.toString() as NodeIdString; + this.nodesContacted.add(nodeIdTargetString); + this.nodesFoundSignal.delete(nodeIdTargetString); + this.nodesFoundDirect.delete(nodeIdTargetString); + } + + /** + * Resolves under the following conditions + * 1. When the queue has an entry + * 2. When all active and pending attempts have ended + * 3. When the connection has been found + */ + protected async waitForQueue( + queue: Array, + ctx: ContextCancellable, + ): Promise { + const abortP = utils.signalPromise(ctx.signal).catch(() => {}); + while ( + !this.connectionMade && + queue.length === 0 && + (this.nodesRunningSignal.size > 0 || + this.nodesRunningDirect.size > 0 || + this.queueSignal.length > 0 || + this.queueDirect.length > 0) + ) { + if (ctx.signal.aborted) return; + if (this.nodesRunningSignal.size + this.nodesRunningDirect.size === 0) { + // Yield to the event loop to allow queued attempts to start + await utils.sleep(0); + continue; + } + const runningPs: Array> = []; + for (const P of this.nodesRunningSignal) { + runningPs.push(P); + } + for (const P of this.nodesRunningDirect) { + runningPs.push(P); + } + await Promise.any([...runningPs, abortP]); + } + } +} + +export default NodeConnectionQueue; diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index 6b54f6f32..f4626973f 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -1,9 +1,11 @@ -import type { DB, DBTransaction, KeyPath, LevelPath } from '@matrixai/db'; +import type { DB, DBTransaction, LevelPath } from '@matrixai/db'; import type { NodeId, NodeAddress, NodeBucket, - NodeData, + NodeContact, + NodeContactAddress, + NodeContactAddressData, NodeBucketMeta, NodeBucketIndex, NodeGraphSpace, @@ -18,11 +20,23 @@ import { IdInternal } from '@matrixai/id'; import * as nodesUtils from './utils'; import * as nodesErrors from './errors'; import * as nodesEvents from './events'; -import { getUnixtime, never } from '../utils'; +import * as utils from '../utils'; +import config from '../config'; /** - * NodeGraph is an implementation of Kademlia for maintaining peer to peer information - * It is a database of fixed-size buckets, where each bucket contains NodeId -> NodeData + * NodeGraph is an implementation of Kademlia for maintaining peer to peer + * information about Polkey nodes. + * + * It is a database of fixed-size buckets, where each bucket + * contains NodeId -> NodeData. The bucket index is a prefix key. + * This means the data is ordered in terms of bucket index, and then node ID. + * From lowest to highest. + * + * The NodeGraph is partitioned into 2 spaces. The reason to do this is allow + * transactional resetting of the buckets if the own node ID changes. + * + * When the node ID changes, either due to key renewal or reset, we remap all + * existing records to the other space, and then we swap the active space key. */ interface NodeGraph extends CreateDestroyStartStop {} @CreateDestroyStartStop( @@ -42,12 +56,14 @@ class NodeGraph { db, keyRing, nodeIdBits = 256, + nodeBucketLimit = config.defaultsSystem.nodesGraphBucketLimit, logger = new Logger(this.name), fresh = false, }: { db: DB; keyRing: KeyRing; nodeIdBits?: number; + nodeBucketLimit?: number; logger?: Logger; fresh?: boolean; }): Promise { @@ -56,6 +72,7 @@ class NodeGraph { db, keyRing, nodeIdBits, + nodeBucketLimit, logger, }); await nodeGraph.start({ fresh }); @@ -64,39 +81,57 @@ class NodeGraph { } /** - * Bit size of the NodeIds - * This equals the number of buckets + * Bit size of the node IDs. + * This is also the total number of buckets. */ public readonly nodeIdBits: number; + /** - * Max number of nodes in each k-bucket + * Max number of nodes in each bucket. */ - public readonly nodeBucketLimit: number = 20; + public readonly nodeBucketLimit: number; protected logger: Logger; protected db: DB; protected keyRing: KeyRing; protected space: NodeGraphSpace; + protected nodeGraphDbPath: LevelPath = [this.constructor.name]; + /** + * Meta stores the `keyof NodeBucketMeta` -> `NodeBucketMeta[keyof NodeBucketMeta]`. + */ protected nodeGraphMetaDbPath: LevelPath; + /** + * Buckets stores `lexi(NodeBucketIndex)/NodeId/nodeContactAddress` -> `NodeContactAddressData`. + * + * nodeContactAddress are canoncialized to be consistent. + */ protected nodeGraphBucketsDbPath: LevelPath; - protected nodeGraphLastUpdatedDbPath: LevelPath; + /** + * Last updated stores + * `lexi(NodeBucketIndex)/"time"/lexi(connectedTime)/nodeId` -> `nodeId`. + * `lexi(NodeBucketIndex)/"nodeId"/nodeId` -> `lexi(connectedTime)`. + */ + protected nodeGraphConnectedDbPath: LevelPath; constructor({ db, keyRing, nodeIdBits, + nodeBucketLimit, logger, }: { db: DB; keyRing: KeyRing; nodeIdBits: number; + nodeBucketLimit: number; logger: Logger; }) { this.logger = logger; this.db = db; this.keyRing = keyRing; this.nodeIdBits = nodeIdBits; + this.nodeBucketLimit = nodeBucketLimit; } public async start({ @@ -108,7 +143,7 @@ class NodeGraph { await tran.clear(this.nodeGraphDbPath); } // Space key is used to create a swappable sublevel - // when remapping the buckets during `this.refreshBuckets` + // when remapping the buckets during `this.resetBuckets` return await this.setupSpace(tran); }); // Bucket metadata sublevel: `!meta!! -> value` @@ -117,12 +152,12 @@ class NodeGraph { // The BucketIndex can range from 0 to NodeId bit-size minus 1 // So 256 bits means 256 buckets of 0 to 255 this.nodeGraphBucketsDbPath = [...this.nodeGraphDbPath, 'buckets' + space]; - // Last updated sublevel: `!lastUpdated!!- -> NodeId` - // This is used as a sorted index of the NodeId by `lastUpdated` timestamp - // The `NodeId` must be appended in the key in order to disambiguate `NodeId` with same `lastUpdated` timestamp - this.nodeGraphLastUpdatedDbPath = [ + // Last updated sublevel: `!connected!!- -> NodeId` + // This is used as a sorted index of the NodeId by `connected` timestamp + // The `NodeId` must be appended in the key in order to disambiguate `NodeId` with same `connected` timestamp + this.nodeGraphConnectedDbPath = [ ...this.nodeGraphDbPath, - 'lastUpdated' + space, + 'connected' + space, ]; this.space = space; this.logger.info(`Started ${this.constructor.name}`); @@ -135,8 +170,6 @@ class NodeGraph { public async destroy(): Promise { this.logger.info(`Destroying ${this.constructor.name}`); - // If the DB was stopped, the existing sublevel `this.nodeGraphDb` will not be valid - // Therefore we recreate the sublevel here await this.db.clear(this.nodeGraphDbPath); this.logger.info(`Destroyed ${this.constructor.name}`); } @@ -145,7 +178,7 @@ class NodeGraph { * Sets up the space key * The space string is suffixed to the `buckets` and `meta` sublevels * This is used to allow swapping of sublevels when remapping buckets - * during `this.refreshBuckets` + * during `this.resetBuckets` */ protected async setupSpace(tran: DBTransaction): Promise { let space = await tran.get([ @@ -160,6 +193,25 @@ class NodeGraph { return space; } + /** + * Derive the bucket index of the k-buckets from the new `NodeId` + * The bucket key is the string encoded version of bucket index + * that preserves lexicographic order + */ + public bucketIndex(nodeId: NodeId): [NodeBucketIndex, string] { + const nodeIdOwn = this.keyRing.getNodeId(); + if (nodeId.equals(nodeIdOwn)) { + throw new nodesErrors.ErrorNodeGraphSameNodeId(); + } + const bucketIndex = nodesUtils.bucketIndex(nodeIdOwn, nodeId); + const bucketKey = nodesUtils.bucketKey(bucketIndex); + return [bucketIndex, bucketKey]; + } + + /** + * Locks the bucket index for exclusive operations. + * This allows you sequence operations for any bucket. + */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async lockBucket(bucketIndex: number, tran: DBTransaction) { const keyPath = [ @@ -169,225 +221,433 @@ class NodeGraph { return await tran.lock(keyPath.join('')); } + /** + * Get a single `NodeContact` + */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getNode( + public async getNodeContact( nodeId: NodeId, tran?: DBTransaction, - ): Promise { - const tranOrDb = tran ?? this.db; - + ): Promise { + if (tran == null) { + return this.db.withTransactionF((tran) => + this.getNodeContact(nodeId, tran), + ); + } const [bucketIndex] = this.bucketIndex(nodeId); - const bucketDomain = [ - ...this.nodeGraphBucketsDbPath, - nodesUtils.bucketKey(bucketIndex), - nodesUtils.bucketDbKey(nodeId), - ]; - return await tranOrDb.get(bucketDomain); + const contact: NodeContact = {}; + for await (const [ + keyPath, + nodeContactAddressData, + ] of tran.iterator( + [ + ...this.nodeGraphBucketsDbPath, + nodesUtils.bucketKey(bucketIndex), + nodesUtils.bucketDbKey(nodeId), + ], + { + valueAsBuffer: false, + }, + )) { + const nodeContactAddress = keyPath[0].toString(); + contact[nodeContactAddress] = nodeContactAddressData; + } + if (Object.keys(contact).length === 0) return undefined; + return contact; } /** - * Get all nodes. - * Nodes are always sorted by `NodeBucketIndex` first - * Then secondly by the node IDs - * The `order` parameter applies to both, for example possible sorts: - * NodeBucketIndex asc, NodeID asc - * NodeBucketIndex desc, NodeId desc + * Get all `NodeContact`. + * + * Results are sorted by `NodeBucketIndex` then `NodeId` then + * `NodeContactAddress`. + * The `order` parameter applies to both, for example: + * NodeBucketIndex asc, NodeID asc, NodeContactAddress asc + * NodeBucketIndex desc, NodeId desc, NodeContactAddress desc */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async *getNodes( + public async *getNodeContacts( order: 'asc' | 'desc' = 'asc', tran?: DBTransaction, - ): AsyncGenerator<[NodeId, NodeData]> { + ): AsyncGenerator<[NodeId, NodeContact]> { if (tran == null) { - const getNodes = (tran) => this.getNodes(order, tran); + // Lambda generators don't grab the `this` context, so we need to bind it + const getNodeContacts = (tran) => this.getNodeContacts(order, tran); return yield* this.db.withTransactionG(async function* (tran) { - return yield* getNodes(tran); + return yield* getNodeContacts(tran); }); } + return yield* nodesUtils.collectNodeContacts( + [...this.nodeGraphBucketsDbPath], + tran, + { reverse: order !== 'asc' }, + ); + } - for await (const [keyPath, nodeData] of tran.iterator( - this.nodeGraphBucketsDbPath, - { - reverse: order !== 'asc', - valueAsBuffer: false, - }, - )) { - const { nodeId } = nodesUtils.parseBucketsDbKey(keyPath); - yield [nodeId, nodeData]; + /** + * Get a single `NodeContactAddressData`. + */ + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async getNodeContactAddressData( + nodeId: NodeId, + nodeAddress: NodeAddress | NodeContactAddress, + tran?: DBTransaction, + ): Promise { + if (tran == null) { + return this.db.withTransactionF((tran) => + this.getNodeContactAddressData(nodeId, nodeAddress, tran), + ); + } + const [bucketIndex] = this.bucketIndex(nodeId); + let nodeContactAddress: NodeContactAddress; + if (Array.isArray(nodeAddress)) { + nodeContactAddress = nodesUtils.nodeContactAddress(nodeAddress); + } else { + nodeContactAddress = nodeAddress; } + return tran.get([ + ...this.nodeGraphBucketsDbPath, + nodesUtils.bucketKey(bucketIndex), + nodesUtils.bucketDbKey(nodeId), + nodeContactAddress, + ]); } /** - * Will add a node to the node graph and increment the bucket count. - * If the node already existed it will be updated. - * @param nodeId NodeId to add to the NodeGraph - * @param nodeAddress Address information to add - * @param tran + * Sets a single `NodeContact` for a `NodeId`. + * This replaces the entire `NodeContact` for the `NodeId`. + * This will increment the bucket count if it is a new `NodeID`. + * + * @throws {nodesErrors.ErrorNodeGraphBucketLimit} If the bucket is full. */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async setNode( + public async setNodeContact( nodeId: NodeId, - nodeAddress: NodeAddress, + nodeContact: NodeContact, tran?: DBTransaction, ): Promise { if (tran == null) { return this.db.withTransactionF((tran) => - this.setNode(nodeId, nodeAddress, tran), + this.setNodeContact(nodeId, nodeContact, tran), ); } - const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); - const lastUpdatedPath = [...this.nodeGraphLastUpdatedDbPath, bucketKey]; const nodeIdKey = nodesUtils.bucketDbKey(nodeId); - const bucketPath = [...this.nodeGraphBucketsDbPath, bucketKey, nodeIdKey]; - const nodeData = await tran.get(bucketPath); - if (nodeData != null) { - this.logger.debug( - `Updating node ${nodesUtils.encodeNodeId( - nodeId, - )} in bucket ${bucketIndex}`, + const nodeContactPath = [ + ...this.nodeGraphBucketsDbPath, + bucketKey, + nodeIdKey, + ]; + if ((await tran.count(nodeContactPath)) === 0) { + // It didn't exist, so we want to increment the bucket count + const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); + if (count >= this.nodeBucketLimit) { + throw new nodesErrors.ErrorNodeGraphBucketLimit(); + } + await this.setBucketMetaProp(bucketIndex, 'count', count + 1, tran); + } + // Clear the entire contact if it exists + await tran.clear(nodeContactPath); + let connectedTimeMax = 0; + for (const nodeContactAddress in nodeContact) { + const nodeContactAddressData = nodeContact[nodeContactAddress]; + await tran.put( + [...nodeContactPath, nodeContactAddress], + nodeContactAddressData, ); - // If the node already exists we want to remove the old `lastUpdated` - const lastUpdatedKey = nodesUtils.lastUpdatedKey(nodeData.lastUpdated); - await tran.del([...lastUpdatedPath, lastUpdatedKey, nodeIdKey]); - } else { - this.logger.debug( - `Adding node ${nodesUtils.encodeNodeId( + connectedTimeMax = Math.max( + connectedTimeMax, + nodeContactAddressData.connectedTime, + ); + } + await this.setConnectedTime(nodeId, connectedTimeMax, tran); + } + + /** + * Sets a single `NodeContactAddressData` for a `NodeId`. + * This will increment the bucket count if it is a new `NodeID`. + * + * @throws {nodesErrors.ErrorNodeGraphBucketLimit} If the bucket is full. + */ + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async setNodeContactAddressData( + nodeId: NodeId, + nodeAddress: NodeAddress | NodeContactAddress, + nodeContactAddressData: NodeContactAddressData, + tran?: DBTransaction, + ): Promise { + if (tran == null) { + return this.db.withTransactionF((tran) => + this.setNodeContactAddressData( nodeId, - )} to bucket ${bucketIndex}`, + nodeAddress, + nodeContactAddressData, + tran, + ), ); + } + const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); + const nodeContactPath = [ + ...this.nodeGraphBucketsDbPath, + bucketKey, + nodeIdKey, + ]; + if ((await tran.count(nodeContactPath)) === 0) { // It didn't exist, so we want to increment the bucket count const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); + if (count >= this.nodeBucketLimit) { + throw new nodesErrors.ErrorNodeGraphBucketLimit(); + } await this.setBucketMetaProp(bucketIndex, 'count', count + 1, tran); } - const lastUpdated = getUnixtime(); - await tran.put(bucketPath, { - address: nodeAddress, - lastUpdated, - }); - const newLastUpdatedKey = nodesUtils.lastUpdatedKey(lastUpdated); + let nodeContactAddress: NodeContactAddress; + if (Array.isArray(nodeAddress)) { + nodeContactAddress = nodesUtils.nodeContactAddress(nodeAddress); + } else { + nodeContactAddress = nodeAddress; + } await tran.put( - [...lastUpdatedPath, newLastUpdatedKey, nodeIdKey], - nodeIdKey, - true, + [...nodeContactPath, nodeContactAddress], + nodeContactAddressData, + ); + await this.setConnectedTime( + nodeId, + nodeContactAddressData.connectedTime, + tran, ); } + /** + * Unsets a `NodeId` record. + * It will decrement the bucket count if it existed. + */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getOldestNode( - bucketIndex: number, - limit: number = 1, + public async unsetNodeContact( + nodeId: NodeId, tran?: DBTransaction, - ): Promise> { + ): Promise { if (tran == null) { return this.db.withTransactionF((tran) => - this.getOldestNode(bucketIndex, limit, tran), + this.unsetNodeContact(nodeId, tran), ); } - const bucketKey = nodesUtils.bucketKey(bucketIndex); - // Remove the oldest entry in the bucket - const oldestNodeIds: Array = []; - for await (const [keyPath] of tran.iterator( - [...this.nodeGraphLastUpdatedDbPath, bucketKey], - { limit }, - )) { - const { nodeId } = nodesUtils.parseLastUpdatedBucketDbKey(keyPath); - oldestNodeIds.push(nodeId); - } - return oldestNodeIds; + const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); + const nodeContactPath = [ + ...this.nodeGraphBucketsDbPath, + bucketKey, + nodeIdKey, + ]; + // Skip if node doesn't exist + if ((await tran.count(nodeContactPath)) === 0) return; + // Decrement the bucket count + const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); + await this.setBucketMetaProp(bucketIndex, 'count', count - 1, tran); + // Clear the records + await tran.clear(nodeContactPath); + await this.delConnectedTime(nodeId, tran); } @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async unsetNode(nodeId: NodeId, tran?: DBTransaction): Promise { + public async unsetNodeContactAddress( + nodeId: NodeId, + nodeAddress: NodeAddress | NodeContactAddress, + tran?: DBTransaction, + ): Promise { if (tran == null) { - return this.db.withTransactionF((tran) => this.unsetNode(nodeId, tran)); + return this.db.withTransactionF((tran) => + this.unsetNodeContactAddress(nodeId, nodeAddress, tran), + ); } - const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); - const bucketPath = [...this.nodeGraphBucketsDbPath, bucketKey]; - const lastUpdatedPath = [...this.nodeGraphLastUpdatedDbPath, bucketKey]; + let nodeContactAddress: NodeContactAddress; + if (Array.isArray(nodeAddress)) { + nodeContactAddress = nodesUtils.nodeContactAddress(nodeAddress); + } else { + nodeContactAddress = nodeAddress; + } const nodeIdKey = nodesUtils.bucketDbKey(nodeId); - const nodeData = await tran.get([...bucketPath, nodeIdKey]); - if (nodeData != null) { - this.logger.debug( - `Removing node ${nodesUtils.encodeNodeId( - nodeId, - )} from bucket ${bucketIndex}`, - ); + const nodeContactPath = [ + ...this.nodeGraphBucketsDbPath, + bucketKey, + nodeIdKey, + ]; + + // Skip if node doesn't exist + const addressCount = await tran.count(nodeContactPath); + if (addressCount === 0) return; + + // Skip if no data + const data = tran.get([ + ...this.nodeGraphBucketsDbPath, + nodesUtils.bucketKey(bucketIndex), + nodesUtils.bucketDbKey(nodeId), + nodeContactAddress, + ]); + if (data == null) return; + + // Remove data + await tran.del([ + ...this.nodeGraphBucketsDbPath, + nodesUtils.bucketKey(bucketIndex), + nodesUtils.bucketDbKey(nodeId), + nodeContactAddress, + ]); + + // If last address then clear node from bucket and decrement count + if (addressCount === 1) { + await tran.clear(nodeContactPath); const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); await this.setBucketMetaProp(bucketIndex, 'count', count - 1, tran); - await tran.del([...bucketPath, nodeIdKey]); - const lastUpdatedKey = nodesUtils.lastUpdatedKey(nodeData.lastUpdated); - await tran.del([...lastUpdatedPath, lastUpdatedKey, nodeIdKey]); + await this.delConnectedTime(nodeId, tran); } } /** - * Gets a bucket + * Sets the `connectedTime` for a NodeId, replaces the old value if it exists + */ + protected async setConnectedTime( + nodeId: NodeId, + connectedTime: number, + tran: DBTransaction, + path: LevelPath = this.nodeGraphConnectedDbPath, + ) { + const [, bucketKey] = this.bucketIndex(nodeId); + const connectedPath = [...path, bucketKey]; + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); + const newConnectedKey = nodesUtils.connectedKey(connectedTime); + + // Lookup the old time and delete it + const oldConnectedKey = await tran.get( + [...connectedPath, 'nodeId', nodeIdKey], + true, + ); + if (oldConnectedKey != null) { + await tran.del([...connectedPath, 'time', oldConnectedKey, nodeIdKey]); + } + // Set the new values + await tran.put( + [...connectedPath, 'nodeId', nodeIdKey], + newConnectedKey, + true, + ); + await tran.put( + [...connectedPath, 'time', newConnectedKey, nodeIdKey], + nodeIdKey, + true, + ); + } + + /** + * Deletes the `connectedTime` for a NodeId + */ + protected async delConnectedTime(nodeId: NodeId, tran: DBTransaction) { + const [, bucketKey] = this.bucketIndex(nodeId); + const lastConnectedPath = [...this.nodeGraphConnectedDbPath, bucketKey]; + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); + + // Look up the existing time + const oldConnectedKey = await tran.get( + [...lastConnectedPath, 'nodeId', nodeIdKey], + true, + ); + // And delete the values + await tran.del([...lastConnectedPath, 'nodeId', nodeIdKey]); + if (oldConnectedKey == null) return; + await tran.del([...lastConnectedPath, 'time', oldConnectedKey, nodeIdKey]); + } + + /** + * Gets the `connectedTime` for a node + */ + public async getConnectedTime(nodeId: NodeId, tran?: DBTransaction) { + if (tran == null) { + return this.db.withTransactionF((tran) => + this.getConnectedTime(nodeId, tran), + ); + } + const [, bucketKey] = this.bucketIndex(nodeId); + const connectedPath = [...this.nodeGraphConnectedDbPath, bucketKey]; + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); + + // Look up the existing time + const oldConnectedKey = await tran.get( + [...connectedPath, 'nodeId', nodeIdKey], + true, + ); + // Convert and return + if (oldConnectedKey == null) return; + return nodesUtils.parseConnectedKey(oldConnectedKey); + } + + // ... + + /** + * Gets a bucket. + * * The bucket's node IDs is sorted lexicographically by default - * Alternatively you can acquire them sorted by lastUpdated timestamp - * or by distance to the own NodeId + * Alternatively you can acquire them sorted by connected timestamp + * or by distance to the own NodeId. + * + * @param bucketIndex + * @param sort + * @param order + * @param limit Limit the number of nodes returned, note that `-1` means + * no limit, but `Infinity` means `0`. + * @param tran */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getBucket( bucketIndex: NodeBucketIndex, - sort: 'nodeId' | 'distance' | 'lastUpdated' = 'nodeId', + sort: 'nodeId' | 'distance' | 'connected' = 'nodeId', order: 'asc' | 'desc' = 'asc', + limit?: number, tran?: DBTransaction, ): Promise { if (tran == null) { return this.db.withTransactionF((tran) => - this.getBucket(bucketIndex, sort, order, tran), + this.getBucket(bucketIndex, sort, order, limit, tran), ); } - if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { throw new nodesErrors.ErrorNodeGraphBucketIndex( `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, ); } + const nodeIdOwn = this.keyRing.getNodeId(); const bucketKey = nodesUtils.bucketKey(bucketIndex); const bucket: NodeBucket = []; if (sort === 'nodeId' || sort === 'distance') { - for await (const [key, nodeData] of tran.iterator( + for await (const result of nodesUtils.collectNodeContacts( [...this.nodeGraphBucketsDbPath, bucketKey], + tran, { reverse: order !== 'asc', - valueAsBuffer: false, + limit, + pathAdjust: [''], }, )) { - const nodeId = nodesUtils.parseBucketDbKey(key[0] as Buffer); - bucket.push([nodeId, nodeData]); + bucket.push(result); } if (sort === 'distance') { - nodesUtils.bucketSortByDistance( - bucket, - this.keyRing.getNodeId(), - order, - ); + nodesUtils.bucketSortByDistance(bucket, nodeIdOwn, order); } - } else if (sort === 'lastUpdated') { - const bucketDbIterator = tran.iterator( - [...this.nodeGraphBucketsDbPath, bucketKey], - { valueAsBuffer: false }, - ); - try { - for await (const [, nodeIdBuffer] of tran.iterator( - [...this.nodeGraphLastUpdatedDbPath, bucketKey], - { - reverse: order !== 'asc', - }, - )) { - const nodeId = IdInternal.fromBuffer(nodeIdBuffer); - bucketDbIterator.seek(nodeIdBuffer); - // eslint-disable-next-line - const iteratorResult = await bucketDbIterator.next(); - if (iteratorResult == null) never(); - const [, nodeData] = iteratorResult; - bucket.push([nodeId, nodeData]); - } - } finally { - await bucketDbIterator.destroy(); + } else if (sort === 'connected') { + for await (const [, nodeIdBuffer] of tran.iterator( + [...this.nodeGraphConnectedDbPath, bucketKey, 'time'], + { + reverse: order !== 'asc', + limit, + }, + )) { + const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + const nodeContact = await this.getNodeContact( + IdInternal.fromBuffer(nodeIdBuffer), + tran, + ); + if (nodeContact == null) utils.never(); + bucket.push([nodeId, nodeContact]); } } return bucket; @@ -402,12 +662,12 @@ class NodeGraph { * NodeBucketIndex desc, NodeId desc * NodeBucketIndex asc, distance asc * NodeBucketIndex desc, distance desc - * NodeBucketIndex asc, lastUpdated asc - * NodeBucketIndex desc, lastUpdated desc + * NodeBucketIndex asc, connected asc + * NodeBucketIndex desc, connected desc */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async *getBuckets( - sort: 'nodeId' | 'distance' | 'lastUpdated' = 'nodeId', + sort: 'nodeId' | 'distance' | 'connected' = 'nodeId', order: 'asc' | 'desc' = 'asc', tran?: DBTransaction, ): AsyncGenerator<[NodeBucketIndex, NodeBucket]> { @@ -418,175 +678,95 @@ class NodeGraph { }); } - let bucketIndex: NodeBucketIndex | undefined = undefined; - let bucket: NodeBucket = []; - if (sort === 'nodeId' || sort === 'distance') { - for await (const [key, nodeData] of tran.iterator( - this.nodeGraphBucketsDbPath, - { - reverse: order !== 'asc', - valueAsBuffer: false, - }, - )) { - const { bucketIndex: bucketIndex_, nodeId } = - nodesUtils.parseBucketsDbKey(key); - if (bucketIndex == null) { - // First entry of the first bucket - bucketIndex = bucketIndex_; - bucket.push([nodeId, nodeData]); - } else if (bucketIndex === bucketIndex_) { - // Subsequent entries of the same bucket - bucket.push([nodeId, nodeData]); - } else if (bucketIndex !== bucketIndex_) { - // New bucket - if (sort === 'distance') { - nodesUtils.bucketSortByDistance( - bucket, - this.keyRing.getNodeId(), - order, - ); - } - yield [bucketIndex, bucket]; - bucketIndex = bucketIndex_; - bucket = [[nodeId, nodeData]]; - } - } - // Yield the last bucket if it exists - if (bucketIndex != null) { - if (sort === 'distance') { - nodesUtils.bucketSortByDistance( - bucket, - this.keyRing.getNodeId(), - order, - ); - } - yield [bucketIndex, bucket]; - } - } else if (sort === 'lastUpdated') { - const bucketsDbIterator = tran.iterator( - this.nodeGraphBucketsDbPath, - { valueAsBuffer: false }, + for (let i = 0; i < this.nodeIdBits; i++) { + const bucketIndex = order === 'asc' ? i : this.nodeIdBits - i; + const nodeBucket = await this.getBucket( + bucketIndex, + sort, + order, + undefined, + tran, ); - try { - for await (const [key] of tran.iterator( - this.nodeGraphLastUpdatedDbPath, - { - reverse: order !== 'asc', - }, - )) { - const { bucketIndex: bucketIndex_, nodeId } = - nodesUtils.parseLastUpdatedBucketsDbKey(key); - bucketsDbIterator.seek([key[0], key[2]]); - // eslint-disable-next-line - const iteratorResult = await bucketsDbIterator.next(); - if (iteratorResult == null) never(); - const [, nodeData] = iteratorResult; - if (bucketIndex == null) { - // First entry of the first bucket - bucketIndex = bucketIndex_; - bucket.push([nodeId, nodeData]); - } else if (bucketIndex === bucketIndex_) { - // Subsequent entries of the same bucket - bucket.push([nodeId, nodeData]); - } else if (bucketIndex !== bucketIndex_) { - // New bucket - yield [bucketIndex, bucket]; - bucketIndex = bucketIndex_; - bucket = [[nodeId, nodeData]]; - } - } - // Yield the last bucket if it exists - if (bucketIndex != null) { - yield [bucketIndex, bucket]; - } - } finally { - await bucketsDbIterator.destroy(); - } + if (nodeBucket.length > 0) yield [bucketIndex, nodeBucket]; } } + /** + * Resets the bucket according to the new node ID. + * Run this after new node ID is generated via renewal or reset. + */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async resetBuckets( - nodeIdOwn: NodeId, - tran?: DBTransaction, - ): Promise { + public async resetBuckets(tran?: DBTransaction): Promise { if (tran == null) { - return this.db.withTransactionF((tran) => - this.resetBuckets(nodeIdOwn, tran), - ); + return this.db.withTransactionF((tran) => this.resetBuckets(tran)); } - - const logger = this.logger.getChild('resetBuckets'); // Setup new space const spaceNew = this.space === '0' ? '1' : '0'; - logger.debug('new space: ' + spaceNew); const nodeGraphMetaDbPathNew = [...this.nodeGraphDbPath, 'meta' + spaceNew]; const nodeGraphBucketsDbPathNew = [ ...this.nodeGraphDbPath, 'buckets' + spaceNew, ]; - const nodeGraphLastUpdatedDbPathNew = [ + const nodeGraphConnectedDbPathNew = [ ...this.nodeGraphDbPath, - 'index' + spaceNew, + 'connected' + spaceNew, ]; // Clear the new space (in case it wasn't cleaned properly last time) await tran.clear(nodeGraphMetaDbPathNew); await tran.clear(nodeGraphBucketsDbPathNew); - await tran.clear(nodeGraphLastUpdatedDbPathNew); + await tran.clear(nodeGraphConnectedDbPathNew); // Iterating over all entries across all buckets - - for await (const [key, nodeData] of tran.iterator( - this.nodeGraphBucketsDbPath, - { valueAsBuffer: false }, + for await (const [nodeId, nodeContact] of nodesUtils.collectNodeContacts( + [...this.nodeGraphBucketsDbPath], + tran, )) { - // The key is a combined bucket key and node ID - const { bucketIndex: bucketIndexOld, nodeId } = - nodesUtils.parseBucketsDbKey(key); - const nodeIdEncoded = nodesUtils.encodeNodeId(nodeId); const nodeIdKey = nodesUtils.bucketDbKey(nodeId); - // If the new own node ID is one of the existing node IDs, it is just dropped - // We only map to the new bucket if it isn't one of the existing node IDs + const nodeIdOwn = this.keyRing.getNodeId(); if (nodeId.equals(nodeIdOwn)) { - logger.debug( - `nodeId ${nodeIdEncoded} from bucket ${bucketIndexOld} was identical to new NodeId and was dropped.`, - ); continue; } const bucketIndexNew = nodesUtils.bucketIndex(nodeIdOwn, nodeId); const bucketKeyNew = nodesUtils.bucketKey(bucketIndexNew); const metaPathNew = [...nodeGraphMetaDbPathNew, bucketKeyNew]; const bucketPathNew = [...nodeGraphBucketsDbPathNew, bucketKeyNew]; - const indexPathNew = [...nodeGraphLastUpdatedDbPathNew, bucketKeyNew]; const countNew = (await tran.get([...metaPathNew, 'count'])) ?? 0; if (countNew < this.nodeBucketLimit) { + // If the new bucket is not filled up, the node is moved to the new bucket await tran.put([...metaPathNew, 'count'], countNew + 1); } else { - let oldestIndexKey: KeyPath | undefined = undefined; - let oldestNodeId: NodeId | undefined = undefined; - for await (const [key] of tran.iterator(indexPathNew, { - limit: 1, - })) { - oldestIndexKey = key; - ({ nodeId: oldestNodeId } = - nodesUtils.parseLastUpdatedBucketDbKey(key)); - } - await tran.del([ - ...bucketPathNew, - nodesUtils.bucketDbKey(oldestNodeId!), - ]); - await tran.del([...indexPathNew, ...oldestIndexKey!]); + // TODO + // If the new bucket is already filled up, the oldest node is dropped + // skipping for now + continue; } - if (bucketIndexOld !== bucketIndexNew) { - logger.debug( - `nodeId ${nodeIdEncoded} moved ${bucketIndexOld}=>${bucketIndexNew}`, + // Adding in node + let connectedTimeMax = 0; + for (const nodeContactAddress in nodeContact) { + const nodeContactAddressData = nodeContact[nodeContactAddress]; + await tran.put( + [...bucketPathNew, nodeIdKey, nodeContactAddress], + nodeContactAddressData, + ); + connectedTimeMax = Math.max( + connectedTimeMax, + nodeContactAddressData.connectedTime, ); - } else { - logger.debug(`nodeId ${nodeIdEncoded} unchanged ${bucketIndexOld}`); } - await tran.put([...bucketPathNew, nodeIdKey], nodeData); - const lastUpdatedKey = nodesUtils.lastUpdatedKey(nodeData.lastUpdated); + // Set the new values + const newConnectedKey = nodesUtils.connectedKey(connectedTimeMax); await tran.put( - [...indexPathNew, lastUpdatedKey, nodeIdKey], + [...nodeGraphConnectedDbPathNew, bucketKeyNew, 'nodeId', nodeIdKey], + newConnectedKey, + true, + ); + await tran.put( + [ + ...nodeGraphConnectedDbPathNew, + bucketKeyNew, + 'time', + newConnectedKey, + nodeIdKey, + ], nodeIdKey, true, ); @@ -596,14 +776,18 @@ class NodeGraph { // Clear old space await tran.clear(this.nodeGraphMetaDbPath); await tran.clear(this.nodeGraphBucketsDbPath); - await tran.clear(this.nodeGraphLastUpdatedDbPath); + await tran.clear(this.nodeGraphConnectedDbPath); // Swap the spaces this.space = spaceNew; this.nodeGraphMetaDbPath = nodeGraphMetaDbPathNew; this.nodeGraphBucketsDbPath = nodeGraphBucketsDbPathNew; - this.nodeGraphLastUpdatedDbPath = nodeGraphLastUpdatedDbPathNew; + this.nodeGraphConnectedDbPath = nodeGraphConnectedDbPathNew; } + /** + * Get a bucket meta POJO. + * This will provide default values for missing properties. + */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getBucketMeta( bucketIndex: NodeBucketIndex, @@ -634,6 +818,10 @@ class NodeGraph { }; } + /** + * Get a single bucket meta property. + * This will provide default values for missing properties. + */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getBucketMetaProp( bucketIndex: NodeBucketIndex, @@ -645,7 +833,6 @@ class NodeGraph { this.getBucketMetaProp(bucketIndex, key, tran), ); } - if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { throw new nodesErrors.ErrorNodeGraphBucketIndex( `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, @@ -666,20 +853,14 @@ class NodeGraph { } /** - * Finds the set of nodes (of size k) known by the current node (i.e. in its - * buckets' database) that have the smallest distance to the target node (i.e. - * are closest to the target node). - * i.e. FIND_NODE RPC from Kademlia spec - * - * Used by the RPC service. + * Gets the closest nodes (closest based on Kademlia XOR operator) to a + * given node ID. The returned results will be sorted by distance in + * ascending order. If the given node ID already exists in the node graph, + * then it will be the first result. * - * @param nodeId the node ID to find other nodes closest to it - * @param limit the number of the closest nodes to return (by default, returns - * according to the maximum number of nodes per bucket) - * @param tran - * @returns a mapping containing exactly k nodeIds -> nodeAddresses (unless the - * current node has less than k nodes in all of its buckets, in which case it - * returns all nodes it has knowledge of) + * @param limit - Defaults to the bucket limit. + * @returns The `NodeBucket` which could have less than `limit` nodes if the + * node graph has less than the requested limit. */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getClosestNodes( @@ -692,7 +873,6 @@ class NodeGraph { this.getClosestNodes(nodeId, limit, tran), ); } - // Buckets map to the target node in the following way; // 1. 0, 1, ..., T-1 -> T // 2. T -> 0, 1, ..., T-1 @@ -701,95 +881,92 @@ class NodeGraph { // 1. T // 2. iterate over 0 ---> T-1 // 3. iterate over T+1 ---> K - // Need to work out the relevant bucket to start from - const localNodeId = this.keyRing.getNodeId(); - const startingBucket = localNodeId.equals(nodeId) + // If our own node ID, start at bucket 0 + // Otherwise find the bucket that the given node ID belongs to + const nodeIdOwn = this.keyRing.getNodeId(); + const bucketIndexFirst = nodeIdOwn.equals(nodeId) ? 0 - : nodesUtils.bucketIndex(localNodeId, nodeId); + : nodesUtils.bucketIndex(nodeIdOwn, nodeId); // Getting the whole target's bucket first - const nodeIds: NodeBucket = await this.getBucket( - startingBucket, + const nodes: NodeBucket = await this.getBucket( + bucketIndexFirst, + undefined, undefined, undefined, tran, ); // We need to iterate over the key stream // When streaming we want all nodes in the starting bucket - // The keys takes the form `!(lexpack bucketId)!(nodeId)` - // We can just use `!(lexpack bucketId)` to start from - // Less than `!(bucketId 101)!` gets us buckets 100 and lower - // greater than `!(bucketId 99)!` gets up buckets 100 and greater - if (nodeIds.length < limit) { + // The keys takes the form `lexi/NodeId` + // We can just use `lexi` to start from + // Less than `lexi` gets us buckets 100 and lower + // Greater than `lexi` gets us buckets 100 and greater + if (nodes.length < limit && bucketIndexFirst !== 0) { // Just before target bucket - const bucketIdKey = Buffer.from(nodesUtils.bucketKey(startingBucket)); - const remainingLimit = limit - nodeIds.length; + const bucketIdKey = Buffer.from( + nodesUtils.bucketKey(bucketIndexFirst - 1), + ); + const remainingLimit = limit - nodes.length; // Iterate over lower buckets - for await (const [key, nodeData] of tran.iterator( + for await (const nodeEntry of nodesUtils.collectNodeContacts( this.nodeGraphBucketsDbPath, + tran, { lt: [bucketIdKey, ''], limit: remainingLimit, - valueAsBuffer: false, }, )) { - const info = nodesUtils.parseBucketsDbKey(key); - nodeIds.push([info.nodeId, nodeData]); + nodes.push(nodeEntry); } } - if (nodeIds.length < limit) { + if (nodes.length < limit) { // Just after target bucket - const bucketId = Buffer.from(nodesUtils.bucketKey(startingBucket + 1)); - const remainingLimit = limit - nodeIds.length; + const bucketId = Buffer.from(nodesUtils.bucketKey(bucketIndexFirst)); + const remainingLimit = limit - nodes.length; // Iterate over ids further away - tran.iterator(this.nodeGraphBucketsDbPath, { - gt: [bucketId, ''], - limit: remainingLimit, - }); - for await (const [key, nodeData] of tran.iterator( + for await (const nodeEntry of nodesUtils.collectNodeContacts( this.nodeGraphBucketsDbPath, + tran, { gt: [bucketId, ''], limit: remainingLimit, - valueAsBuffer: false, }, )) { - const info = nodesUtils.parseBucketsDbKey(key); - nodeIds.push([info.nodeId, nodeData]); + nodes.push(nodeEntry); } } // If no nodes were found, return nothing - if (nodeIds.length === 0) return []; + if (nodes.length === 0) return []; // Need to get the whole of the last bucket - const lastBucketIndex = nodesUtils.bucketIndex( - this.keyRing.getNodeId(), - nodeIds[nodeIds.length - 1][0], + const bucketIndexLast = nodesUtils.bucketIndex( + nodeIdOwn, + nodes[nodes.length - 1][0], ); const lastBucket = await this.getBucket( - lastBucketIndex, + bucketIndexLast, + undefined, undefined, undefined, tran, ); // Pop off elements of the same bucket to avoid duplicates - let element = nodeIds.pop(); + let element = nodes.pop(); while ( element != null && - nodesUtils.bucketIndex(this.keyRing.getNodeId(), element[0]) === - lastBucketIndex + nodesUtils.bucketIndex(nodeIdOwn, element[0]) === bucketIndexLast ) { - element = nodeIds.pop(); + element = nodes.pop(); } - if (element != null) nodeIds.push(element); + if (element != null) nodes.push(element); // Adding last bucket to the list - nodeIds.push(...lastBucket); - - nodesUtils.bucketSortByDistance(nodeIds, nodeId, 'asc'); - return nodeIds.slice(0, limit); + nodes.push(...lastBucket); + nodesUtils.bucketSortByDistance(nodes, nodeId, 'asc'); + return nodes.slice(0, limit); } /** - * Sets a bucket meta property - * This is protected because users cannot directly manipulate bucket meta + * Sets a single bucket meta property. + * Bucket meta properties cannot be mutated outside. */ protected async setBucketMetaProp( bucketIndex: NodeBucketIndex, @@ -806,21 +983,6 @@ class NodeGraph { return; } - /** - * Derive the bucket index of the k-buckets from the new `NodeId` - * The bucket key is the string encoded version of bucket index - * that preserves lexicographic order - */ - public bucketIndex(nodeId: NodeId): [NodeBucketIndex, string] { - const nodeIdOwn = this.keyRing.getNodeId(); - if (nodeId.equals(nodeIdOwn)) { - throw new nodesErrors.ErrorNodeGraphSameNodeId(); - } - const bucketIndex = nodesUtils.bucketIndex(nodeIdOwn, nodeId); - const bucketKey = nodesUtils.bucketKey(bucketIndex); - return [bucketIndex, bucketKey]; - } - /** * Returns to total number of nodes in the `NodeGraph` */ @@ -828,7 +990,8 @@ class NodeGraph { if (tran == null) { return this.db.withTransactionF((tran) => this.nodesTotal(tran)); } - return await tran.count(this.nodeGraphBucketsDbPath); + // `nodeGraphConnectedDbPath` will contain 2 entries for each `NodeId` within the `NodeGraph` + return (await tran.count(this.nodeGraphConnectedDbPath)) / 2; } } diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 4d042d33f..646cc6305 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -1,59 +1,71 @@ import type { DB, DBTransaction } from '@matrixai/db'; -import type NodeConnectionManager from './NodeConnectionManager'; -import type NodeGraph from './NodeGraph'; +import type { ContextTimed, ContextTimedInput } from '@matrixai/contexts'; +import type { PromiseCancellable } from '@matrixai/async-cancellable'; +import type { ResourceAcquire } from '@matrixai/resources'; import type KeyRing from '../keys/KeyRing'; import type Sigchain from '../sigchain/Sigchain'; +import type TaskManager from '../tasks/TaskManager'; +import type GestaltGraph from '../gestalts/GestaltGraph'; import type { - NodeId, - NodeAddress, - NodeBucket, - NodeBucketIndex, - NodeData, -} from './types'; + TaskHandler, + TaskHandlerId, + Task, + TaskInfo, +} from '../tasks/types'; +import type { SignedTokenEncoded } from '../tokens/types'; +import type { Host, Port } from '../network/types'; import type { Claim, ClaimId, ClaimIdEncoded, SignedClaim, } from '../claims/types'; -import type TaskManager from '../tasks/TaskManager'; -import type GestaltGraph from '../gestalts/GestaltGraph'; -import type { TaskHandler, TaskHandlerId, Task } from '../tasks/types'; -import type { ContextTimed } from '@matrixai/contexts'; -import type { PromiseCancellable } from '@matrixai/async-cancellable'; -import type { Host, Port } from '../network/types'; -import type { SignedTokenEncoded } from '../tokens/types'; import type { ClaimLinkNode } from '../claims/payloads'; +import type NodeConnection from '../nodes/NodeConnection'; import type { AgentRPCRequestParams, AgentRPCResponseResult, AgentClaimMessage, } from './agent/types'; -import type { ContextTimedInput } from '@matrixai/contexts/dist/types'; +import type { + NodeId, + NodeAddress, + NodeBucket, + NodeBucketIndex, + NodeContactAddressData, + NodeIdEncoded, +} from './types'; +import type NodeConnectionManager from './NodeConnectionManager'; +import type NodeGraph from './NodeGraph'; +import type { ServicePOJO } from '@matrixai/mdns'; import Logger from '@matrixai/logger'; import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; import { Semaphore, Lock } from '@matrixai/async-locks'; import { IdInternal } from '@matrixai/id'; import { timedCancellable, context } from '@matrixai/contexts/dist/decorators'; -import * as nodesErrors from './errors'; +import { withF } from '@matrixai/resources'; +import { MDNS, events as mdnsEvents, utils as mdnsUtils } from '@matrixai/mdns'; import * as nodesUtils from './utils'; import * as nodesEvents from './events'; -import * as claimsUtils from '../claims/utils'; +import * as nodesErrors from './errors'; +import NodeConnectionQueue from './NodeConnectionQueue'; +import Token from '../tokens/Token'; +import * as keysUtils from '../keys/utils'; import * as tasksErrors from '../tasks/errors'; +import * as claimsUtils from '../claims/utils'; import * as claimsErrors from '../claims/errors'; -import * as keysUtils from '../keys/utils'; -import { never, promise } from '../utils/utils'; -import { - decodeClaimId, - encodeClaimId, - parseSignedClaim, -} from '../claims/utils'; -import Token from '../tokens/Token'; +import * as utils from '../utils/utils'; import config from '../config'; +import * as networkUtils from '../network/utils'; const abortEphemeralTaskReason = Symbol('abort ephemeral task reason'); const abortSingletonTaskReason = Symbol('abort singleton task reason'); +/** + * NodeManager manages all operations involving nodes. + * It encapsulates mutations to the NodeGraph. + * It listens to the NodeConnectionManager events. + */ interface NodeManager extends StartStop {} @StartStop({ eventStart: nodesEvents.EventNodeManagerStart, @@ -62,51 +74,77 @@ interface NodeManager extends StartStop {} eventStopped: nodesEvents.EventNodeManagerStopped, }) class NodeManager { + /** + * Time used to establish `NodeConnection` + */ + public readonly connectionConnectTimeoutTime: number; + + public readonly refreshBucketDelayTime: number; + public readonly refreshBucketDelayJitter: number; + /** + * Interval used to reestablish connections to maintain network health. + * Will trigger a refreshBucket for bucket 255 if it is missing connections. + * Will always trigger a findNode(this.keyRing.getNodeId()). + */ + public readonly retryConnectionsDelayTime: number; + public readonly tasksPath = this.constructor.name; + protected db: DB; protected logger: Logger; - protected sigchain: Sigchain; protected keyRing: KeyRing; - protected nodeConnectionManager: NodeConnectionManager; - protected nodeGraph: NodeGraph; - protected taskManager: TaskManager; + protected sigchain: Sigchain; protected gestaltGraph: GestaltGraph; - protected refreshBucketDelay: number; - protected refreshBucketDelayJitter: number; - protected retrySeedConnectionsDelay: number; - protected pendingNodes: Map> = new Map(); + protected taskManager: TaskManager; + protected nodeGraph: NodeGraph; + protected nodeConnectionManager: NodeConnectionManager; + protected mdnsOptions: + | { + groups: Array; + port: Port; + } + | undefined; + protected mdns: MDNS | undefined; - /** - * Time used to establish `NodeConnection` - */ - public readonly connectionConnectTimeoutTime: number; + protected pendingNodes: Map< + number, + Map + > = new Map(); - public readonly basePath = this.constructor.name; protected refreshBucketHandler: TaskHandler = async ( ctx, _taskInfo, - bucketIndex, + bucketIndex: NodeBucketIndex, ) => { + // Don't use defaults like this + // if a default is to be used + // provide it directly + await this.refreshBucket( bucketIndex, this.connectionConnectTimeoutTime, ctx, ); // When completed reschedule the task - const jitter = nodesUtils.refreshBucketsDelayJitter( - this.refreshBucketDelay, - this.refreshBucketDelayJitter, - ); - await this.taskManager.scheduleTask({ - delay: this.refreshBucketDelay + jitter, - handlerId: this.refreshBucketHandlerId, - lazy: true, - parameters: [bucketIndex], - path: [this.basePath, this.refreshBucketHandlerId, `${bucketIndex}`], - priority: 0, - }); + // if refreshBucketDelay is 0 then it's considered disabled + if (this.refreshBucketDelayTime > 0) { + const jitter = nodesUtils.refreshBucketsDelayJitter( + this.refreshBucketDelayTime, + this.refreshBucketDelayJitter, + ); + await this.taskManager.scheduleTask({ + delay: this.refreshBucketDelayTime + jitter, + handlerId: this.refreshBucketHandlerId, + lazy: true, + parameters: [bucketIndex], + path: [this.tasksPath, this.refreshBucketHandlerId, `${bucketIndex}`], + priority: 0, + }); + } }; + public readonly refreshBucketHandlerId = - `${this.basePath}.${this.refreshBucketHandler.name}.refreshBucketHandlerId` as TaskHandlerId; + `${this.tasksPath}.${this.refreshBucketHandler.name}` as TaskHandlerId; + protected gcBucketHandler: TaskHandler = async ( ctx, _taskInfo, @@ -125,96 +163,115 @@ class NodeManager { // Re-schedule the task await this.setupGCTask(bucketIndex); }; + public readonly gcBucketHandlerId = - `${this.basePath}.${this.gcBucketHandler.name}.gcBucketHandlerId` as TaskHandlerId; - protected pingAndSetNodeHandler: TaskHandler = async ( - ctx, - _taskInfo, - nodeIdEncoded: string, - host: Host, - port: Port, - ) => { - const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); - if (nodeId == null) { - this.logger.error( - `pingAndSetNodeHandler received invalid NodeId: ${nodeIdEncoded}`, - ); - never(); + `${this.tasksPath}.${this.gcBucketHandler.name}` as TaskHandlerId; + + protected checkConnectionsHandler: TaskHandler = async (ctx, taskInfo) => { + this.logger.debug('Checking connections'); + let connectionCount = 0; + for (const connection of this.nodeConnectionManager.listConnections()) { + if (connection.primary) { + const [bucketId] = this.nodeGraph.bucketIndex(connection.nodeId); + if (bucketId === 255) connectionCount++; + } } - if ( - await this.pingNode(nodeId, [{ host, port, scopes: ['global'] }], { - signal: ctx.signal, - }) - ) { - await this.setNode( - nodeId, - { host, port, scopes: ['global'] }, - false, - false, - 2000, - ctx, - ); + if (connectionCount > 0) { + this.logger.debug('triggering bucket refresh for bucket 255'); + await this.updateRefreshBucketDelay(255, 0); } - }; - public readonly pingAndSetNodeHandlerId: TaskHandlerId = - `${this.basePath}.${this.pingAndSetNodeHandler.name}.pingAndSetNodeHandlerId` as TaskHandlerId; - protected checkSeedConnectionsHandler: TaskHandler = async ( - ctx, - taskInfo, - ) => { - this.logger.debug('Checking seed connections'); - // Check for existing seed node connections - const seedNodes = this.nodeConnectionManager.getSeedNodes(); - const allInactive = !seedNodes - .map((nodeId) => this.nodeConnectionManager.hasConnection(nodeId)) - .reduce((a, b) => a || b, false); try { - if (allInactive) { - this.logger.debug( - 'No active seed connections were found, retrying network entry', - ); - // If no seed node connections exist then we redo syncNodeGraph - await this.syncNodeGraph(true, undefined, ctx); - } else { - // Doing this concurrently, we don't care about the results - await Promise.allSettled( - seedNodes.map((nodeId) => { - // Retry any failed seed node connections - if (!this.nodeConnectionManager.hasConnection(nodeId)) { - this.logger.debug( - `Re-establishing seed connection for ${nodesUtils.encodeNodeId( - nodeId, - )}`, - ); - return this.pingNode(nodeId, undefined, ctx); - } - }), - ); - } + this.logger.debug( + 'triggering fidNode for self to populate closest nodes', + ); + await this.findNode( + this.keyRing.getNodeId(), + undefined, + undefined, + undefined, + ctx, + ); } finally { - this.logger.debug('Checked seed connections'); + this.logger.debug('Checked connections'); // Re-schedule this task await this.taskManager.scheduleTask({ delay: taskInfo.delay, deadline: taskInfo.deadline, - handlerId: this.checkSeedConnectionsHandlerId, + handlerId: this.checkConnectionsHandlerId, lazy: true, - path: [this.basePath, this.checkSeedConnectionsHandlerId], + path: [this.tasksPath, this.checkConnectionsHandlerId], priority: taskInfo.priority, }); } }; - public readonly checkSeedConnectionsHandlerId: TaskHandlerId = - `${this.basePath}.${this.checkSeedConnectionsHandler.name}.checkSeedConnectionsHandler` as TaskHandlerId; + + public readonly checkConnectionsHandlerId: TaskHandlerId = + `${this.tasksPath}.${this.checkConnectionsHandler.name}` as TaskHandlerId; + + protected syncNodeGraphHandler = async ( + ctx: ContextTimed, + taskInfo: TaskInfo | undefined, + initialNodes: Array<[NodeIdEncoded, [Host, Port]]>, + pingTimeoutTime: number | undefined, + ) => { + // Establishing connections to the initial nodes + const connectionResults = await Promise.allSettled( + initialNodes.map(([nodeIdEncoded, [host, port]]) => { + const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); + if (nodeId == null) utils.never(); + return this.nodeConnectionManager.createConnection( + [nodeId], + host, + port, + { timer: pingTimeoutTime, signal: ctx.signal }, + ); + }), + ); + const successfulConnections = connectionResults.filter( + (r) => r.status === 'fulfilled', + ).length; + if (successfulConnections === 0) { + throw Error('TMP IMP Failed to enter network'); + } + + // Attempt a findNode operation looking for ourselves + await this.findNode( + this.keyRing.getNodeId(), + undefined, + undefined, + undefined, + ctx, + ); + + // Getting the closest node from the `NodeGraph` + let bucketIndex: number | undefined; + for await (const bucket of this.nodeGraph.getBuckets('distance', 'asc')) { + bucketIndex = bucket[0]; + } + // If no buckets then end here + if (bucketIndex == null) return; + // Trigger refreshBucket operations for all buckets above bucketIndex + const refreshBuckets: Array> = []; + for (let i = bucketIndex; i < this.nodeGraph.nodeIdBits; i++) { + const task = await this.updateRefreshBucketDelay(i, 0, false); + refreshBuckets.push(task.promise()); + } + await Promise.all(refreshBuckets); + }; + + public readonly syncNodeGraphHandlerId: TaskHandlerId = + `${this.tasksPath}.${this.syncNodeGraphHandler.name}` as TaskHandlerId; protected handleEventNodeConnectionManagerConnectionReverse = async ( e: nodesEvents.EventNodeConnectionManagerConnectionReverse, ) => { await this.setNode( e.detail.remoteNodeId, + [e.detail.remoteHost, e.detail.remotePort], + // FIXME { - host: e.detail.remoteHost, - port: e.detail.remotePort, + mode: 'direct', + connectedTime: Date.now(), scopes: ['global'], }, false, @@ -226,28 +283,35 @@ class NodeManager { db, keyRing, sigchain, - nodeConnectionManager, - nodeGraph, - taskManager, gestaltGraph, - refreshBucketDelay = 3600000, // 1 hour in milliseconds - refreshBucketDelayJitter = 0.5, // Multiple of refreshBucketDelay to jitter by - retrySeedConnectionsDelay = 120000, // 2 minuets + taskManager, + nodeGraph, + nodeConnectionManager, + mdnsOptions, connectionConnectTimeoutTime = config.defaultsSystem .nodesConnectionConnectTimeoutTime, + refreshBucketDelayTime = config.defaultsSystem + .nodesRefreshBucketIntervalTime, + refreshBucketDelayJitter = config.defaultsSystem + .nodesRefreshBucketIntervalTimeJitter, + retryConnectionsDelayTime = 120000, // 2 minutes logger, }: { db: DB; keyRing: KeyRing; sigchain: Sigchain; - nodeConnectionManager: NodeConnectionManager; - nodeGraph: NodeGraph; - taskManager: TaskManager; gestaltGraph: GestaltGraph; - refreshBucketDelay?: number; - refreshBucketDelayJitter?: number; - retrySeedConnectionsDelay?: number; + taskManager: TaskManager; + nodeGraph: NodeGraph; + mdnsOptions?: { + groups: Array; + port: Port; + }; + nodeConnectionManager: NodeConnectionManager; connectionConnectTimeoutTime?: number; + refreshBucketDelayTime?: number; + refreshBucketDelayJitter?: number; + retryConnectionsDelayTime?: number; logger?: Logger; }) { this.logger = logger ?? new Logger(this.constructor.name); @@ -258,117 +322,801 @@ class NodeManager { this.nodeGraph = nodeGraph; this.taskManager = taskManager; this.gestaltGraph = gestaltGraph; - this.refreshBucketDelay = refreshBucketDelay; - // Clamped from 0 to 1 inclusive - this.refreshBucketDelayJitter = Math.max( - 0, - Math.min(refreshBucketDelayJitter, 1), - ); - this.retrySeedConnectionsDelay = retrySeedConnectionsDelay; + this.mdnsOptions = mdnsOptions; + if (mdnsOptions != null) { + this.mdns = new MDNS({ logger: this.logger.getChild(MDNS.name) }); + } this.connectionConnectTimeoutTime = connectionConnectTimeoutTime; + this.refreshBucketDelayTime = refreshBucketDelayTime; + this.refreshBucketDelayJitter = Math.max(0, refreshBucketDelayJitter); + this.retryConnectionsDelayTime = retryConnectionsDelayTime; } - public async start() { - this.logger.info(`Starting ${this.constructor.name}`); - this.logger.info(`Registering handler for setNode`); - this.taskManager.registerHandler( - this.refreshBucketHandlerId, - this.refreshBucketHandler, - ); - this.taskManager.registerHandler( - this.gcBucketHandlerId, - this.gcBucketHandler, - ); - this.taskManager.registerHandler( - this.pingAndSetNodeHandlerId, - this.pingAndSetNodeHandler, - ); - this.taskManager.registerHandler( - this.checkSeedConnectionsHandlerId, - this.checkSeedConnectionsHandler, - ); - await this.setupRefreshBucketTasks(); - await this.taskManager.scheduleTask({ - delay: this.retrySeedConnectionsDelay, - handlerId: this.checkSeedConnectionsHandlerId, - lazy: true, - path: [this.basePath, this.checkSeedConnectionsHandlerId], - }); - // Add handling for connections - this.nodeConnectionManager.addEventListener( - nodesEvents.EventNodeConnectionManagerConnectionReverse.name, - this.handleEventNodeConnectionManagerConnectionReverse, - ); - this.logger.info(`Started ${this.constructor.name}`); + public async start() { + this.logger.info(`Starting ${this.constructor.name}`); + this.taskManager.registerHandler( + this.refreshBucketHandlerId, + this.refreshBucketHandler, + ); + this.taskManager.registerHandler( + this.gcBucketHandlerId, + this.gcBucketHandler, + ); + this.taskManager.registerHandler( + this.checkConnectionsHandlerId, + this.checkConnectionsHandler, + ); + this.taskManager.registerHandler( + this.syncNodeGraphHandlerId, + this.syncNodeGraphHandler, + ); + await this.setupRefreshBucketTasks(); + // Can be disabled with 0 delay, only use for testing + if (this.retryConnectionsDelayTime > 0) { + await this.taskManager.scheduleTask({ + delay: this.retryConnectionsDelayTime, + handlerId: this.checkConnectionsHandlerId, + lazy: true, + path: [this.tasksPath, this.checkConnectionsHandlerId], + }); + } + // Starting MDNS + if (this.mdns != null) { + const nodeId = this.keyRing.getNodeId(); + await this.mdns.start({ + id: nodeId.toBuffer().readUint16BE(), + hostname: nodesUtils.encodeNodeId(nodeId), + groups: this.mdnsOptions!.groups, + port: this.mdnsOptions!.port, + }); + this.mdns.registerService({ + name: nodesUtils.encodeNodeId(this.keyRing.getNodeId()), + port: this.nodeConnectionManager.port, + type: 'polykey', + protocol: 'udp', + }); + } + // Add handling for connections + this.nodeConnectionManager.addEventListener( + nodesEvents.EventNodeConnectionManagerConnectionReverse.name, + this.handleEventNodeConnectionManagerConnectionReverse, + ); + this.logger.info(`Started ${this.constructor.name}`); + } + + public async stop() { + this.logger.info(`Stopping ${this.constructor.name}`); + // Remove handling for connections + this.nodeConnectionManager.removeEventListener( + nodesEvents.EventNodeConnectionManagerConnectionReverse.name, + this.handleEventNodeConnectionManagerConnectionReverse, + ); + await this.mdns?.stop(); + // Cancels all NodeManager tasks + const taskPs: Array> = []; + for await (const task of this.taskManager.getTasks(undefined, false, [ + this.tasksPath, + ])) { + taskPs.push(task.promise()); + task.cancel(abortEphemeralTaskReason); + } + await Promise.allSettled(taskPs); + this.taskManager.deregisterHandler(this.refreshBucketHandlerId); + this.taskManager.deregisterHandler(this.gcBucketHandlerId); + this.taskManager.deregisterHandler(this.checkConnectionsHandlerId); + this.taskManager.deregisterHandler(this.syncNodeGraphHandlerId); + this.logger.info(`Stopped ${this.constructor.name}`); + } + + /** + * For usage with withF, to acquire a connection + * This unique acquire function structure of returning the ResourceAcquire + * itself is such that we can pass targetNodeId as a parameter (as opposed to + * an acquire function with no parameters). + * @param nodeId Id of target node to communicate with + * @param ctx + * @returns ResourceAcquire Resource API for use in with contexts + */ + public acquireConnection( + nodeId: NodeId, + ctx?: Partial, + ): ResourceAcquire { + if (this.keyRing.getNodeId().equals(nodeId)) { + this.logger.warn('Attempting connection to our own NodeId'); + throw new nodesErrors.ErrorNodeManagerNodeIdOwn(); + } + return async () => { + // Checking if connection already exists + if (!this.nodeConnectionManager.hasConnection(nodeId)) { + // Establish the connection + const result = await this.findNode( + nodeId, + undefined, + undefined, + undefined, + ctx, + ); + if (result == null) { + throw new nodesErrors.ErrorNodeManagerConnectionFailed(); + } + } + return await this.nodeConnectionManager.acquireConnection(nodeId)(); + }; + } + + /** + * Perform some function on another node over the network with a connection. + * Will either retrieve an existing connection, or create a new one if it + * doesn't exist. + * for use with normal arrow function + * @param nodeId Id of target node to communicate with + * @param f Function to handle communication + * @param ctx + */ + public withConnF( + nodeId: NodeId, + f: (conn: NodeConnection) => Promise, + ctx?: Partial, + ): PromiseCancellable; + @ready(new nodesErrors.ErrorNodeManagerNotRunning()) + public async withConnF( + nodeId: NodeId, + f: (conn: NodeConnection) => Promise, + @context ctx: ContextTimed, + ) { + return await withF( + [this.acquireConnection(nodeId, ctx)], + async ([conn]) => { + return await f(conn); + }, + ); + } + + /** + * Perform some function on another node over the network with a connection. + * Will either retrieve an existing connection, or create a new one if it + * doesn't exist. + * for use with a generator function + * @param nodeId Id of target node to communicate with + * @param g Generator function to handle communication + * @param ctx + */ + @ready(new nodesErrors.ErrorNodeManagerNotRunning()) + public async *withConnG( + nodeId: NodeId, + g: (conn: NodeConnection) => AsyncGenerator, + ctx?: Partial, + ): AsyncGenerator { + const acquire = this.acquireConnection(nodeId, ctx); + const [release, conn] = await acquire(); + let caughtError; + try { + if (conn == null) utils.never(); + return yield* g(conn); + } catch (e) { + caughtError = e; + throw e; + } finally { + await release(caughtError); + } + } + + /** + * Will attempt to find a node within the network using a hybrid strategy of + * attempting signalled connections, direct connections and checking MDNS. + * + * Will attempt to fix regardless of existing connection. + * @param nodeId - NodeId of target to find. + * @param pingTimeoutTime - timeout time for each individual connection. + * @param concurrencyLimit - Limit the number of concurrent connections. + * @param limit - Limit the number of total connections to be made before giving up. + * @param ctx + */ + public findNode( + nodeId: NodeId, + pingTimeoutTime?: number, + concurrencyLimit?: number, + limit?: number, + ctx?: Partial, + ): PromiseCancellable<[NodeAddress, NodeContactAddressData] | undefined>; + @timedCancellable(true) + public async findNode( + nodeId: NodeId, + pingTimeoutTime: number = 2000, + concurrencyLimit: number = 3, + limit: number = this.nodeGraph.nodeBucketLimit, + @context ctx: ContextTimed, + ): Promise<[NodeAddress, NodeContactAddressData] | undefined> { + // Setting up intermediate signal + const abortController = new AbortController(); + const newCtx = { + timer: ctx.timer, + signal: abortController.signal, + }; + const handleAbort = () => { + abortController.abort(ctx.signal.reason); + }; + if (ctx.signal.aborted) { + handleAbort(); + } else { + ctx.signal.addEventListener('abort', handleAbort, { once: true }); + } + + const rateLimit = new Semaphore(concurrencyLimit); + const connectionsQueue = new NodeConnectionQueue( + this.keyRing.getNodeId(), + nodeId, + limit, + rateLimit, + rateLimit, + ); + + // Starting discovery strategies + const findBySignal = this.findNodeBySignal( + nodeId, + connectionsQueue, + pingTimeoutTime, + newCtx, + ); + const findByDirect = this.findNodeByDirect( + nodeId, + connectionsQueue, + pingTimeoutTime, + newCtx, + ); + const findByMDNS = this.findNodeByMDNS(nodeId, newCtx); + + try { + return await Promise.any([findBySignal, findByDirect, findByMDNS]); + } catch (e) { + // FIXME: check error type and throw if not connection related failure + return; + } finally { + abortController.abort(Error('TMP IMP cancelling pending connections')); + await Promise.allSettled([findBySignal, findByDirect, findByMDNS]); + ctx.signal.removeEventListener('abort', handleAbort); + } + } + + /** + * Will try to make a connection to the node using hole punched connections only + * + * @param nodeId - NodeId of the target to find. + * @param nodeConnectionsQueue - shared nodeConnectionQueue helper class. + * @param pingTimeoutTime - timeout time for each individual connection. + * @param ctx + */ + public findNodeBySignal( + nodeId: NodeId, + nodeConnectionsQueue: NodeConnectionQueue, + pingTimeoutTime?: number, + ctx?: Partial, + ): PromiseCancellable<[[Host, Port], NodeContactAddressData]>; + @timedCancellable(true) + public async findNodeBySignal( + nodeId: NodeId, + nodeConnectionsQueue: NodeConnectionQueue, + pingTimeoutTime: number = 2000, + @context ctx: ContextTimed, + ): Promise<[[Host, Port], NodeContactAddressData]> { + // Setting up intermediate signal + const abortController = new AbortController(); + const newCtx = { + timer: ctx.timer, + signal: abortController.signal, + }; + const handleAbort = () => { + abortController.abort(ctx.signal.reason); + }; + if (ctx.signal.aborted) { + handleAbort(); + } else { + ctx.signal.addEventListener('abort', handleAbort, { once: true }); + } + + const chain: Map = new Map(); + let connectionMade: [Host, Port] | undefined; + + // Seed the initial queue + for (const { + nodeId: nodeIdConnected, + } of this.nodeConnectionManager.getClosestConnections(nodeId)) { + nodeConnectionsQueue.queueNodeSignal(nodeIdConnected, undefined); + } + + while (true) { + const isDone = await nodeConnectionsQueue.withNodeSignal( + async (nodeIdTarget, nodeIdSignaller) => { + let nodeConnection: NodeConnection | undefined; + if ( + !this.nodeConnectionManager.hasConnection(nodeIdTarget) && + nodeIdSignaller != null + ) { + this.logger.debug( + `attempting connection to ${nodesUtils.encodeNodeId( + nodeIdTarget, + )} via ${ + nodeIdSignaller != null + ? nodesUtils.encodeNodeId(nodeIdSignaller) + : 'local' + }`, + ); + nodeConnection = + await this.nodeConnectionManager.createConnectionPunch( + nodeIdTarget, + nodeIdSignaller, + { + timer: pingTimeoutTime, + signal: newCtx.signal, + }, + ); + // If connection succeeds add it to the chain + chain.set(nodeIdTarget.toString(), nodeIdSignaller?.toString()); + } + nodeConnectionsQueue.contactedNode(nodeIdTarget); + // If connection was our target then we're done + if (nodeId.toString() === nodeIdTarget.toString()) { + nodeConnection = + nodeConnection ?? + this.nodeConnectionManager.getConnection(nodeIdTarget) + ?.connection; + if (nodeConnection == null) utils.never('connection should exist'); + connectionMade = [nodeConnection.host, nodeConnection.port]; + return true; + } + await this.queueDataFromRequest( + nodeIdTarget, + nodeId, + nodeConnectionsQueue, + newCtx, + ); + this.logger.debug( + `connection attempt to ${nodesUtils.encodeNodeId( + nodeIdTarget, + )} succeeded`, + ); + return false; + }, + newCtx, + ); + if (isDone) break; + } + // After queue is done we want to signal and await clean up + abortController.abort(Error('TMP IMP cancelling pending connections')); + ctx.signal.removeEventListener('abort', handleAbort); + // Wait for pending attempts to finish + for (const pendingP of nodeConnectionsQueue.nodesRunningSignal) { + await pendingP.catch((e) => { + if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) return; + throw e; + }); + } + + // Connection was not made + if (connectionMade == null) { + throw new nodesErrors.ErrorNodeManagerFindNodeFailed( + 'failed to find node via signal', + ); + } + // We can get the path taken with this code + // const path: Array = []; + // let current: string | undefined = nodeId.toString(); + // while (current != null) { + // const nodeId = IdInternal.fromString(current); + // path.unshift(nodeId); + // current = chain.get(current); + // } + // console.log(path); + return [ + connectionMade, + { + mode: 'signal', + connectedTime: Date.now(), + scopes: ['global'], + }, + ] as [[Host, Port], NodeContactAddressData]; + } + + /** + * Will try to make a connection to the node using direct connections only + * + * @param nodeId - NodeId of the target to find. + * @param nodeConnectionsQueue - shared nodeConnectionQueue helper class. + * @param pingTimeoutTime - timeout time for each individual connection. + * @param ctx + */ + public findNodeByDirect( + nodeId: NodeId, + nodeConnectionsQueue: NodeConnectionQueue, + pingTimeoutTime?: number, + ctx?: Partial, + ): PromiseCancellable<[[Host, Port], NodeContactAddressData]>; + @timedCancellable(true) + public async findNodeByDirect( + nodeId: NodeId, + nodeConnectionsQueue: NodeConnectionQueue, + pingTimeoutTime: number = 2000, + @context ctx: ContextTimed, + ): Promise<[[Host, Port], NodeContactAddressData]> { + // Setting up intermediate signal + const abortController = new AbortController(); + const newCtx = { + timer: ctx.timer, + signal: abortController.signal, + }; + const handleAbort = () => { + abortController.abort(ctx.signal.reason); + }; + if (ctx.signal.aborted) { + handleAbort(); + } else { + ctx.signal.addEventListener('abort', handleAbort, { once: true }); + } + + let connectionMade = false; + + // Seed the initial queue + for (const [ + nodeIdTarget, + nodeContact, + ] of await this.nodeGraph.getClosestNodes( + nodeId, + this.nodeGraph.nodeBucketLimit, + )) { + nodeConnectionsQueue.queueNodeDirect(nodeIdTarget, nodeContact); + } + + while (true) { + const isDone = await nodeConnectionsQueue.withNodeDirect( + async (nodeIdTarget, nodeContact) => { + if (!this.nodeConnectionManager.hasConnection(nodeIdTarget)) { + this.logger.debug( + `attempting connection to ${nodesUtils.encodeNodeId( + nodeIdTarget, + )} via direct connection`, + ); + + // Attempt all direct + const addresses: Array<[Host, Port]> = []; + for (const [ + nodeContactAddress, + nodeContactAddressData, + ] of Object.entries(nodeContact)) { + // FIXME: handle hostnames by resolving them. + const [host, port] = + nodesUtils.parseNodeContactAddress(nodeContactAddress); + if ( + nodeContactAddressData.mode === 'direct' && + networkUtils.isHost(host) + ) { + addresses.push([host as Host, port]); + } + } + + try { + await this.nodeConnectionManager.createConnectionMultiple( + [nodeIdTarget], + addresses, + { timer: pingTimeoutTime, signal: newCtx.signal }, + ); + } catch (e) { + if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) { + return false; + } + throw e; + } + } + nodeConnectionsQueue.contactedNode(nodeIdTarget); + // If connection was our target then we're done + if (nodeId.toString() === nodeIdTarget.toString()) { + connectionMade = true; + return true; + } + await this.queueDataFromRequest( + nodeIdTarget, + nodeId, + nodeConnectionsQueue, + newCtx, + ); + this.logger.debug( + `connection attempt to ${nodesUtils.encodeNodeId( + nodeIdTarget, + )} succeeded`, + ); + return false; + }, + newCtx, + ); + if (isDone) break; + } + // After queue is done we want to signal and await clean up + abortController.abort(Error('TMP IMP cancelling pending connections')); + ctx.signal.removeEventListener('abort', handleAbort); + // Wait for pending attempts to finish + for (const pendingP of nodeConnectionsQueue.nodesRunningDirect) { + await pendingP.catch((e) => { + if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) return; + throw e; + }); + } + + if (!connectionMade) { + throw new nodesErrors.ErrorNodeManagerFindNodeFailed( + 'failed to find node via direct', + ); + } + const conAndTimer = this.nodeConnectionManager.getConnection(nodeId); + if (conAndTimer == null) { + utils.never('connection should have been established'); + } + return [ + [conAndTimer.connection.host, conAndTimer.connection.port], + { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + ] as [[Host, Port], NodeContactAddressData]; } - public async stop() { - this.logger.info(`Stopping ${this.constructor.name}`); - // Remove handling for connections - this.nodeConnectionManager.removeEventListener( - nodesEvents.EventNodeConnectionManagerConnectionReverse.name, - this.handleEventNodeConnectionManagerConnectionReverse, + /** + * Will query via MDNS for running Polykey nodes with the matching NodeId + * + * @param nodeId + * @param ctx + */ + public queryMDNS( + nodeId: NodeId, + ctx?: Partial, + ): PromiseCancellable>; + @timedCancellable(true) + public async queryMDNS( + nodeId: NodeId, + @context ctx: ContextTimed, + ): Promise> { + const addresses: Array<[Host, Port]> = []; + if (this.mdns == null) return addresses; + const encodedNodeId = nodesUtils.encodeNodeId(nodeId); + // First check if we already have an existing MDNS Service + const mdnsOptions = { type: 'polykey', protocol: 'udp' } as const; + let service: ServicePOJO | void = this.mdns.networkServices.get( + mdnsUtils.toFqdn({ name: encodedNodeId, ...mdnsOptions }), ); - this.logger.info('Cancelling ephemeral tasks'); - if (this.taskManager.isProcessing()) { - throw new tasksErrors.ErrorTaskManagerProcessing(); + if (service == null) { + // Setup promises + const { p: endedP, resolveP: resolveEndedP } = utils.promise(); + const abortHandler = () => { + resolveEndedP(); + }; + ctx.signal.addEventListener('abort', abortHandler, { once: true }); + ctx.timer.catch(() => {}).finally(() => abortHandler()); + const { p: serviceP, resolveP: resolveServiceP } = + utils.promise(); + const handleEventMDNSService = (evt: mdnsEvents.EventMDNSService) => { + if (evt.detail.name === encodedNodeId) { + resolveServiceP(evt.detail); + } + }; + this.mdns.addEventListener( + mdnsEvents.EventMDNSService.name, + handleEventMDNSService, + { once: true }, + ); + // Abort and restart query in case already running + this.mdns.stopQuery(mdnsOptions); + this.mdns.startQuery(mdnsOptions); + // Race promises to find node or timeout + service = await Promise.race([serviceP, endedP]); + this.mdns.removeEventListener( + mdnsEvents.EventMDNSService.name, + handleEventMDNSService, + ); + this.mdns.stopQuery(mdnsOptions); + ctx.signal.removeEventListener('abort', abortHandler); } - const tasks: Array> = []; - for await (const task of this.taskManager.getTasks('asc', false, [ - this.basePath, - ])) { - tasks.push(task.promise()); - task.cancel(abortEphemeralTaskReason); + // If the service is not found, just return no addresses + if (service == null) { + return addresses; + } + for (const host_ of service.hosts) { + let host: string; + switch (this.nodeConnectionManager.type) { + case 'ipv4': + if (networkUtils.isIPv4(host_)) { + host = host_; + } else if (networkUtils.isIPv4MappedIPv6(host_)) { + host = networkUtils.fromIPv4MappedIPv6(host_); + } else { + continue; + } + break; + case 'ipv6': + if (networkUtils.isIPv6(host_)) host = host_; + else continue; + break; + case 'ipv4&ipv6': + host = host_; + break; + default: + continue; + } + addresses.push([host as Host, service.port as Port]); + this.logger.debug( + `found address for ${nodesUtils.encodeNodeId(nodeId)} at ${host}:${ + service.port + }`, + ); + } + return addresses; + } + + /** + * Will query MDNS for local nodes and attempt a connection. + * @param nodeId - NodeId of the target to find. + * @param ctx + */ + public findNodeByMDNS( + nodeId: NodeId, + ctx?: Partial, + ): PromiseCancellable<[[Host, Port], NodeContactAddressData]>; + @timedCancellable(true) + public async findNodeByMDNS( + nodeId: NodeId, + @context ctx: ContextTimed, + ): Promise<[[Host, Port], NodeContactAddressData]> { + try { + if (this.mdns == null) { + throw new nodesErrors.ErrorNodeManagerFindNodeFailed( + 'MDNS not running', + ); + } + // First get the address data + const addresses = await this.queryMDNS(nodeId, ctx); + if (addresses.length === 0) { + throw new nodesErrors.ErrorNodeManagerFindNodeFailed( + 'query resulted in no addresses found', + ); + } + // Then make the connection + const nodeConnection = + await this.nodeConnectionManager.createConnectionMultiple( + [nodeId], + addresses, + ); + return [ + [nodeConnection.host, nodeConnection.port], + { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['local'], + }, + ] as [[Host, Port], NodeContactAddressData]; + } catch (e) { + throw new nodesErrors.ErrorNodeManagerFindNodeFailed( + 'failed to find node via MDNS', + { cause: e }, + ); } - // We don't care about the result, only that they've ended - await Promise.allSettled(tasks); - this.logger.info('Cancelled ephemeral tasks'); - this.logger.info(`Unregistering handler for setNode`); - this.taskManager.deregisterHandler(this.refreshBucketHandlerId); - this.taskManager.deregisterHandler(this.gcBucketHandlerId); - this.taskManager.deregisterHandler(this.pingAndSetNodeHandlerId); - this.taskManager.deregisterHandler(this.checkSeedConnectionsHandlerId); - this.logger.info(`Stopped ${this.constructor.name}`); } /** - * Determines whether a node in the Polykey network is online. - * @return true if online, false if offline - * @param nodeId - NodeId of the node we're pinging - * @param addresses - Optional Host and Port we want to ping + * Will ask the target node about the closest nodes to the `node` + * and add them to the `nodeConnectionsQueue`. + * + * @param nodeId - node to find the closest nodes to + * @param nodeIdTarget - node to make RPC requests to + * @param nodeConnectionsQueue * @param ctx */ + protected async queueDataFromRequest( + nodeId: NodeId, + nodeIdTarget: NodeId, + nodeConnectionsQueue: NodeConnectionQueue, + ctx: ContextTimed, + ) { + await this.nodeConnectionManager.withConnF(nodeId, async (conn) => { + const nodeIdEncoded = nodesUtils.encodeNodeId(nodeIdTarget); + const closestConnectionsRequestP = (async () => { + const resultStream = + await conn.rpcClient.methods.nodesClosestActiveConnectionsGet( + { + nodeIdEncoded: nodeIdEncoded, + }, + ctx, + ); + // Collecting results + for await (const result of resultStream) { + const nodeIdNew = nodesUtils.decodeNodeId(result.nodeId); + if (nodeIdNew == null) { + utils.never('failed to decode nodeId'); + } + nodeConnectionsQueue.queueNodeSignal(nodeIdNew, nodeId); + } + })(); + const closestNodesRequestP = (async () => { + const resultStream = + await conn.rpcClient.methods.nodesClosestLocalNodesGet({ + nodeIdEncoded: nodeIdEncoded, + }); + for await (const { nodeIdEncoded, nodeContact } of resultStream) { + const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); + if (nodeId == null) utils.never(); + nodeConnectionsQueue.queueNodeDirect(nodeId, nodeContact); + } + })(); + await Promise.allSettled([ + closestConnectionsRequestP, + closestNodesRequestP, + ]); + }); + } + + /** + * Will attempt to establish connection using `findNode` or use existing connection. + * Will return true if connection was established or already exists, false otherwise. + */ public pingNode( nodeId: NodeId, - addresses?: Array, ctx?: Partial, - ): PromiseCancellable; + ): PromiseCancellable<[NodeAddress, NodeContactAddressData] | undefined>; + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) @timedCancellable( true, - (nodeManager: NodeManager) => nodeManager.connectionConnectTimeoutTime, + (nodeConnectionManager: NodeConnectionManager) => + nodeConnectionManager.connectionConnectTimeoutTime, ) public async pingNode( nodeId: NodeId, - addresses: Array | undefined, + @context ctx: ContextTimed, + ): Promise<[NodeAddress, NodeContactAddressData] | undefined> { + return await this.findNode( + nodeId, + 2000, + 3, + this.nodeGraph.nodeBucketLimit, + ctx, + ); + } + + /** + * Will attempt to make a direct connection without ICE. + * This will only succeed due to these conditions + * 1. connection already exists to target. + * 2. Nat already allows port due to already being punched. + * 3. Port is publicly accessible due to nat configuration . + * Will return true if connection was established or already exists, false otherwise. + */ + public pingNodeAddress( + nodeId: NodeId, + host: Host, + port: Port, + ctx?: Partial, + ): PromiseCancellable; + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + @timedCancellable( + true, + (nodeConnectionManager: NodeConnectionManager) => + nodeConnectionManager.connectionConnectTimeoutTime, + ) + public async pingNodeAddress( + nodeId: NodeId, + host: Host, + port: Port, @context ctx: ContextTimed, ): Promise { - // We need to attempt a connection using the proxies - // For now we will just do a forward connect + relay message - const targetAddresses = - addresses ?? - (await this.nodeConnectionManager.findNodeAll( - nodeId, - this.connectionConnectTimeoutTime, + if (this.nodeConnectionManager.hasConnection(nodeId)) return true; + try { + await this.nodeConnectionManager.createConnection( + [nodeId], + host, + port, ctx, - )); - if (targetAddresses == null) { + ); + return true; + } catch { + // TODO: stricter error checking return false; } - return await this.nodeConnectionManager.pingNode( - nodeId, - targetAddresses, - ctx, - ); } /** @@ -378,10 +1126,9 @@ class NodeManager { * For node1 -> node2 claims, the verification process also involves connecting * to node2 to verify the claim (to retrieve its signing public key). * @param targetNodeId Id of the node to connect request the chain data of. - * @param claimId If set then we get the claims newer that this claim Id. + * @param claimId If set then we get the claims newer that this claim ID. * @param ctx */ - // FIXME: this should be a generator/stream public requestChainData( targetNodeId: NodeId, claimId?: ClaimId, @@ -394,20 +1141,24 @@ class NodeManager { @context ctx: ContextTimed, ): Promise> { // Verify the node's chain with its own public key - return await this.nodeConnectionManager.withConnF( + return await this.withConnF( targetNodeId, async (connection) => { const claims: Record = {}; const client = connection.getClient(); for await (const agentClaim of await client.methods.nodesClaimsGet({ claimIdEncoded: - claimId != null ? encodeClaimId(claimId) : ('' as ClaimIdEncoded), + claimId != null + ? claimsUtils.encodeClaimId(claimId) + : ('' as ClaimIdEncoded), })) { if (ctx.signal.aborted) throw ctx.signal.reason; // Need to re-construct each claim - const claimId: ClaimId = decodeClaimId(agentClaim.claimIdEncoded)!; + const claimId: ClaimId = claimsUtils.decodeClaimId( + agentClaim.claimIdEncoded, + )!; const signedClaimEncoded = agentClaim.signedTokenEncoded; - const signedClaim = parseSignedClaim(signedClaimEncoded); + const signedClaim = claimsUtils.parseSignedClaim(signedClaimEncoded); // Verifying the claim const issPublicKey = keysUtils.publicKeyFromNodeId( nodesUtils.decodeNodeId(signedClaim.payload.iss)!, @@ -442,10 +1193,16 @@ class NodeManager { * Call this function upon receiving a "claim node request" notification from * another node. */ - public async claimNode( + public claimNode( targetNodeId: NodeId, tran?: DBTransaction, - ctx?: ContextTimed, // FIXME, this needs to be a timed cancellable + ctx?: Partial, + ): PromiseCancellable; + @ready(new nodesErrors.ErrorNodeManagerNotRunning()) + public async claimNode( + targetNodeId: NodeId, + tran: DBTransaction | undefined, + @context ctx: ContextTimed, ): Promise { if (tran == null) { return this.db.withTransactionF((tran) => { @@ -460,7 +1217,7 @@ class NodeManager { }, undefined, async (token) => { - return this.nodeConnectionManager.withConnF( + return this.withConnF( targetNodeId, async (conn) => { // 2. create the agentClaim message to send @@ -483,7 +1240,7 @@ class NodeManager { } const receivedClaim = readStatus.value; // We need to re-construct the token from the message - const signedClaim = parseSignedClaim( + const signedClaim = claimsUtils.parseSignedClaim( receivedClaim.signedTokenEncoded, ); fullySignedToken = Token.fromSigned(signedClaim); @@ -506,7 +1263,7 @@ class NodeManager { } const receivedClaimRemote = readStatus2.value; // We need to re-construct the token from the message - const signedClaimRemote = parseSignedClaim( + const signedClaimRemote = claimsUtils.parseSignedClaim( receivedClaimRemote.signedTokenEncoded, ); // This is a singly signed claim, @@ -553,7 +1310,6 @@ class NodeManager { }); } - // TODO: make cancellable public async *handleClaimNode( requestingNodeId: NodeId, input: AsyncIterableIterator>, @@ -570,7 +1326,9 @@ class NodeManager { throw new claimsErrors.ErrorEmptyStream(); } const receivedMessage = readStatus.value; - const signedClaim = parseSignedClaim(receivedMessage.signedTokenEncoded); + const signedClaim = claimsUtils.parseSignedClaim( + receivedMessage.signedTokenEncoded, + ); const token = Token.fromSigned(signedClaim); // Verify if the token is signed if ( @@ -591,7 +1349,7 @@ class NodeManager { }; // Now we want to send our own claim signed - const halfSignedClaimProm = promise(); + const halfSignedClaimProm = utils.promise(); const claimProm = this.sigchain.addClaim( { typ: 'ClaimLinkNode', @@ -610,7 +1368,9 @@ class NodeManager { } const receivedClaim = readStatus.value; // We need to re-construct the token from the message - const signedClaim = parseSignedClaim(receivedClaim.signedTokenEncoded); + const signedClaim = claimsUtils.parseSignedClaim( + receivedClaim.signedTokenEncoded, + ); const fullySignedToken = Token.fromSigned(signedClaim); // Check that the signatures are correct const requestingNodePublicKey = @@ -644,53 +1404,13 @@ class NodeManager { }); } - /** - * Retrieves the node Address from the NodeGraph - * @param nodeId node ID of the target node - * @param tran - * @returns Node Address of the target node - */ - public async getNodeAddress( - nodeId: NodeId, - tran: DBTransaction, - ): Promise { - return (await this.nodeGraph.getNode(nodeId, tran))?.address; - } - - /** - * Determines whether a node ID -> node address mapping exists in the NodeGraph - * @param targetNodeId the node ID of the node to find - * @param tran - * @returns true if the node exists in the table, false otherwise - */ - public async knowsNode( - targetNodeId: NodeId, - tran: DBTransaction, - ): Promise { - return (await this.nodeGraph.getNode(targetNodeId, tran)) != null; - } - - /** - * Gets the specified bucket from the NodeGraph - */ - public async getBucket( - bucketIndex: number, - tran?: DBTransaction, - ): Promise { - return await this.nodeGraph.getBucket( - bucketIndex, - undefined, - undefined, - tran, - ); - } - /** * Adds a node to the node graph. This assumes that you have already authenticated the node * Updates the node if the node already exists * This operation is blocking by default - set `block` 2qto false to make it non-blocking - * @param nodeId - Id of the node we wish to add + * @param nodeId - ID of the node we wish to add * @param nodeAddress - Expected address of the node we want to add + * @param nodeContactAddressData * @param block - When true it will wait for any garbage collection to finish before returning. * @param force - Flag for if we want to add the node without authenticating or if the bucket is full. * This will drop the oldest node in favor of the new. @@ -701,6 +1421,7 @@ class NodeManager { public setNode( nodeId: NodeId, nodeAddress: NodeAddress, + nodeContactAddressData: NodeContactAddressData, block?: boolean, force?: boolean, pingTimeoutTime?: number, @@ -712,6 +1433,7 @@ class NodeManager { public async setNode( nodeId: NodeId, nodeAddress: NodeAddress, + nodeContactAddressData: NodeContactAddressData, block: boolean = false, force: boolean = false, pingTimeoutTime: number = this.connectionConnectTimeoutTime, @@ -729,6 +1451,7 @@ class NodeManager { this.setNode( nodeId, nodeAddress, + nodeContactAddressData, block, force, pingTimeoutTime, @@ -749,21 +1472,28 @@ class NodeManager { const [bucketIndex] = this.nodeGraph.bucketIndex(nodeId); // To avoid conflict we want to lock on the bucket index await this.nodeGraph.lockBucket(bucketIndex, tran); - const nodeData = await this.nodeGraph.getNode(nodeId, tran); + + const nodeContact = await this.nodeGraph.getNodeContact(nodeId, tran); // If this is a new entry, check the bucket limit const count = await this.nodeGraph.getBucketMetaProp( bucketIndex, 'count', tran, ); - if (nodeData != null || count < this.nodeGraph.nodeBucketLimit) { + + if (nodeContact != null || count < this.nodeGraph.nodeBucketLimit) { // Either already exists or has room in the bucket // We want to add or update the node - await this.nodeGraph.setNode(nodeId, nodeAddress, tran); + await this.nodeGraph.setNodeContactAddressData( + nodeId, + nodeAddress, + nodeContactAddressData, + tran, + ); // Updating the refreshBucket timer await this.updateRefreshBucketDelay( bucketIndex, - this.refreshBucketDelay, + this.refreshBucketDelayTime, true, tran, ); @@ -771,21 +1501,33 @@ class NodeManager { // We want to add a node but the bucket is full if (force) { // We just add the new node anyway without checking the old one - const oldNodeId = ( - await this.nodeGraph.getOldestNode(bucketIndex, 1, tran) - ).pop(); - if (oldNodeId == null) never(); + const bucket = await this.nodeGraph.getBucket( + bucketIndex, + 'connected', + 'asc', + 1, + tran, + ); + const oldNodeId = bucket[0]?.[0]; + if (oldNodeId == null) { + utils.never('bucket should be full in this case'); + } this.logger.debug( `Force was set, removing ${nodesUtils.encodeNodeId( oldNodeId, )} and adding ${nodesUtils.encodeNodeId(nodeId)}`, ); - await this.nodeGraph.unsetNode(oldNodeId, tran); - await this.nodeGraph.setNode(nodeId, nodeAddress, tran); + await this.nodeGraph.unsetNodeContact(oldNodeId, tran); + await this.nodeGraph.setNodeContactAddressData( + nodeId, + nodeAddress, + nodeContactAddressData, + tran, + ); // Updating the refreshBucket timer await this.updateRefreshBucketDelay( bucketIndex, - this.refreshBucketDelay, + this.refreshBucketDelayTime, true, tran, ); @@ -801,6 +1543,7 @@ class NodeManager { bucketIndex, nodeId, nodeAddress, + nodeContactAddressData, block, pingTimeoutTime, ctx, @@ -809,6 +1552,29 @@ class NodeManager { } } + /** + * Removes a node from the NodeGraph + */ + public async unsetNode(nodeId: NodeId, tran: DBTransaction): Promise { + return await this.nodeGraph.unsetNodeContact(nodeId, tran); + } + + /** + * Gets the specified bucket from the NodeGraph + */ + public async getBucket( + bucketIndex: number, + tran?: DBTransaction, + ): Promise { + return await this.nodeGraph.getBucket( + bucketIndex, + undefined, + undefined, + undefined, + tran, + ); + } + protected garbageCollectBucket( bucketIndex: number, pingTimeoutTime?: number, @@ -842,20 +1608,19 @@ class NodeManager { // Locking on bucket await this.nodeGraph.lockBucket(bucketIndex, tran); const semaphore = new Semaphore(3); - // Iterating over existing nodes - const bucket = await this.nodeGraph.getOldestNode( + const bucket = await this.nodeGraph.getBucket( bucketIndex, + 'connected', + 'asc', this.nodeGraph.nodeBucketLimit, tran, ); - if (bucket == null) never(); + if (bucket == null) utils.never(); let removedNodes = 0; const unsetLock = new Lock(); const pendingPromises: Array> = []; - for (const nodeId of bucket) { - // We want to retain seed nodes regardless of state, so skip them - if (this.nodeConnectionManager.isSeedNode(nodeId)) continue; + for (const [nodeId] of bucket) { if (removedNodes >= pendingNodes.size) break; await semaphore.waitForUnlock(); if (ctx.signal?.aborted === true) break; @@ -867,13 +1632,14 @@ class NodeManager { signal: ctx.signal, timer: pingTimeoutTime, }; - const nodeAddress = await this.getNodeAddress(nodeId, tran); - if (nodeAddress == null) never(); - if (await this.pingNode(nodeId, [nodeAddress], pingCtx)) { + const pingResult = await this.pingNode(nodeId, pingCtx); + if (pingResult != null) { // Succeeded so update + const [nodeAddress, nodeContactAddressData] = pingResult; await this.setNode( nodeId, nodeAddress, + nodeContactAddressData, false, false, undefined, @@ -898,12 +1664,16 @@ class NodeManager { // Wait for pending pings to complete await Promise.all(pendingPromises); // Fill in bucket with pending nodes - for (const [nodeIdString, address] of pendingNodes) { + for (const [ + nodeIdString, + [address, nodeContactAddressData], + ] of pendingNodes) { if (removedNodes <= 0) break; const nodeId = IdInternal.fromString(nodeIdString); await this.setNode( nodeId, address, + nodeContactAddressData, false, false, undefined, @@ -918,6 +1688,7 @@ class NodeManager { bucketIndex: number, nodeId: NodeId, nodeAddress: NodeAddress, + nodeContactAddressData: NodeContactAddressData, block: boolean = false, pingTimeoutTime: number = this.connectionConnectTimeoutTime, ctx: ContextTimed, @@ -927,7 +1698,7 @@ class NodeManager { this.pendingNodes.set(bucketIndex, new Map()); } const pendingNodes = this.pendingNodes.get(bucketIndex); - pendingNodes!.set(nodeId.toString(), nodeAddress); + pendingNodes!.set(nodeId.toString(), [nodeAddress, nodeContactAddressData]); // No need to re-set it in the map, Maps are by reference // If set to blocking we just run the GC operation here @@ -943,7 +1714,7 @@ class NodeManager { // Check and start a 'garbageCollect` bucket task let scheduled: boolean = false; for await (const task of this.taskManager.getTasks('asc', true, [ - this.basePath, + this.tasksPath, this.gcBucketHandlerId, `${bucketIndex}`, ])) { @@ -972,45 +1743,38 @@ class NodeManager { await this.taskManager.scheduleTask({ handlerId: this.gcBucketHandlerId, parameters: [bucketIndex], - path: [this.basePath, this.gcBucketHandlerId, `${bucketIndex}`], + path: [this.tasksPath, this.gcBucketHandlerId, `${bucketIndex}`], lazy: true, }); } } - /** - * Removes a node from the NodeGraph - */ - public async unsetNode(nodeId: NodeId, tran: DBTransaction): Promise { - return await this.nodeGraph.unsetNode(nodeId, tran); - } - /** * To be called on key renewal. Re-orders all nodes in all buckets with respect * to the new node ID. */ public async resetBuckets(): Promise { - return await this.nodeGraph.resetBuckets(this.keyRing.getNodeId()); + return await this.nodeGraph.resetBuckets(); } /** * Kademlia refresh bucket operation. - * It picks a random node within a bucket and does a search for that node. - * Connections during the search will share node information with other - * nodes. + * It generates a random node ID within the range of a bucket and does a + * lookup for that node in the network. This will cause the network to update + * its node graph information. * @param bucketIndex * @param pingTimeoutTime * @param ctx */ public refreshBucket( - bucketIndex: number, + bucketIndex: NodeBucketIndex, pingTimeoutTime?: number, ctx?: Partial, ): PromiseCancellable; @timedCancellable(true) public async refreshBucket( bucketIndex: NodeBucketIndex, - pingTimeoutTime: number | undefined, + pingTimeoutTime: number | undefined = this.connectionConnectTimeoutTime, @context ctx: ContextTimed, ): Promise { // We need to generate a random nodeId for this bucket @@ -1020,9 +1784,11 @@ class NodeManager { bucketIndex, ); // We then need to start a findNode procedure - await this.nodeConnectionManager.findNode( + await this.findNode( bucketRandomNodeId, pingTimeoutTime, + undefined, + undefined, ctx, ); } @@ -1040,7 +1806,7 @@ class NodeManager { for await (const task of this.taskManager.getTasks( 'asc', true, - [this.basePath, this.refreshBucketHandlerId], + [this.tasksPath, this.refreshBucketHandlerId], tran, )) { const bucketIndex = parseInt(task.path[0]); @@ -1054,9 +1820,9 @@ class NodeManager { performance.now() + performance.timeOrigin - task.created.getTime() + - this.refreshBucketDelay + + this.refreshBucketDelayTime + nodesUtils.refreshBucketsDelayJitter( - this.refreshBucketDelay, + this.refreshBucketDelayTime, this.refreshBucketDelayJitter, ); await this.taskManager.updateTask(task.id, { delay }, tran); @@ -1080,21 +1846,22 @@ class NodeManager { bucketIndex++ ) { const exists = existingTasks[bucketIndex]; - if (!exists) { + // Can be disabled with 0 delay, only use for testing + if (!exists && this.refreshBucketDelayTime > 0) { // Create a new task this.logger.debug( `Creating refreshBucket task for bucket ${bucketIndex}`, ); const jitter = nodesUtils.refreshBucketsDelayJitter( - this.refreshBucketDelay, + this.refreshBucketDelayTime, this.refreshBucketDelayJitter, ); await this.taskManager.scheduleTask({ handlerId: this.refreshBucketHandlerId, - delay: this.refreshBucketDelay + jitter, + delay: this.refreshBucketDelayTime + jitter, lazy: true, parameters: [bucketIndex], - path: [this.basePath, this.refreshBucketHandlerId, `${bucketIndex}`], + path: [this.tasksPath, this.refreshBucketHandlerId, `${bucketIndex}`], priority: 0, }); } @@ -1105,7 +1872,7 @@ class NodeManager { @ready(new nodesErrors.ErrorNodeManagerNotRunning()) public async updateRefreshBucketDelay( bucketIndex: number, - delay: number = this.refreshBucketDelay, + delay: number = this.refreshBucketDelayTime, lazy: boolean = true, tran?: DBTransaction, ): Promise { @@ -1124,7 +1891,7 @@ class NodeManager { for await (const task of this.taskManager.getTasks( 'asc', true, - [this.basePath, this.refreshBucketHandlerId, `${bucketIndex}`], + [this.tasksPath, this.refreshBucketHandlerId, `${bucketIndex}`], tran, )) { if (!existingTask) { @@ -1169,11 +1936,11 @@ class NodeManager { handlerId: this.refreshBucketHandlerId, lazy: true, parameters: [bucketIndex], - path: [this.basePath, this.refreshBucketHandlerId, `${bucketIndex}`], + path: [this.tasksPath, this.refreshBucketHandlerId, `${bucketIndex}`], priority: 0, }); } - if (foundTask == null) never(); + if (foundTask == null) utils.never(); return foundTask; } @@ -1182,161 +1949,61 @@ class NodeManager { * from each seed node and add them to this database * Establish a connection to each node before adding it * By default this operation is blocking, set `block` to `false` to make it - * non-blocking + * + * From the spec: + * To join the network, a node u must have a contact to an already participating node w. u inserts w into the + * appropriate k-bucket. u then performs a node lookup for its own node ID. Finally, u refreshes all kbuckets further + * away than its closest neighbor. During the refreshes, u both populates its own k-buckets and inserts itself into + * other nodes’ k-buckets as necessary. + * + * So this will do 3 steps + * 1. Connect to the initial nodes + * 2. do a find-node operation for itself + * 3. reschedule refresh bucket operations for every bucket above the closest node we found + * */ public syncNodeGraph( - block?: boolean, + initialNodes: Array<[NodeId, NodeAddress]>, pingTimeoutTime?: number, + blocking?: boolean, ctx?: Partial, ): PromiseCancellable; @ready(new nodesErrors.ErrorNodeManagerNotRunning()) @timedCancellable(true) public async syncNodeGraph( - block: boolean = true, + initialNodes: Array<[NodeId, NodeAddress]>, pingTimeoutTime: number | undefined, + blocking: boolean = false, @context ctx: ContextTimed, ): Promise { const logger = this.logger.getChild('syncNodeGraph'); logger.info('Synchronizing NodeGraph'); - const seedNodes = this.nodeConnectionManager.getSeedNodes(); - if (seedNodes.length === 0) { - logger.info('Seed nodes list is empty, skipping synchronization'); - return; - } - const addresses = await Promise.all( - await this.db.withTransactionF(async (tran) => - seedNodes.map( - async (seedNode) => - (await this.nodeGraph.getNode(seedNode, tran))?.address, - ), - ), - ); - const filteredAddresses = addresses.filter( - (address) => address != null, - ) as Array; - logger.debug( - `establishing multi-connection to the following seed nodes ${seedNodes.map( - (nodeId) => nodesUtils.encodeNodeId(nodeId), - )}`, - ); - logger.debug( - `and addresses ${filteredAddresses.map( - (address) => `${address.host}:${address.port}`, - )}`, - ); - // Establishing connections to the seed nodes - let connections: Array; - try { - connections = await this.nodeConnectionManager.getMultiConnection( - seedNodes, - filteredAddresses, - { signal: ctx.signal }, - ); - } catch (e) { - if ( - e instanceof nodesErrors.ErrorNodeConnectionManagerMultiConnectionFailed - ) { - // Not explicitly a failure but we do want to stop here - this.logger.warn( - 'Failed to connect to any seed nodes when syncing node graph', - ); - return; - } - throw e; + if (initialNodes.length === 0) { + throw Error('TMP IMP Must provide at least 1 initial node'); } - logger.debug(`Multi-connection established for`); - connections.forEach((nodeId) => { - logger.debug(`${nodesUtils.encodeNodeId(nodeId)}`); + const initialNodesParameter = initialNodes.map(([nodeId, address]) => { + return [nodesUtils.encodeNodeId(nodeId), address] as [ + NodeIdEncoded, + [Host, Port], + ]; }); - // Using a map to avoid duplicates - const closestNodesAll: Map = new Map(); - const localNodeId = this.keyRing.getNodeId(); - let closestNode: NodeId | null = null; - logger.debug('Getting closest nodes'); - for (const nodeId of connections) { - const closestNodes = - await this.nodeConnectionManager.getRemoteNodeClosestNodes( - nodeId, - localNodeId, - { signal: ctx.signal }, - ); - // Setting node information into the map, filtering out local node - closestNodes.forEach(([nodeId, address]) => { - if (!localNodeId.equals(nodeId)) closestNodesAll.set(nodeId, address); - }); - - // Getting the closest node - let closeNodeInfo = closestNodes.pop(); - if (closeNodeInfo != null && localNodeId.equals(closeNodeInfo[0])) { - closeNodeInfo = closestNodes.pop(); - } - if (closeNodeInfo == null) continue; - const [closeNode] = closeNodeInfo; - if (closestNode == null) closestNode = closeNode; - const distA = nodesUtils.nodeDistance(localNodeId, closeNode); - const distB = nodesUtils.nodeDistance(localNodeId, closestNode); - if (distA < distB) closestNode = closeNode; - } - logger.debug('Starting pingsAndSet tasks'); - const pingTasks: Array = []; - for (const [nodeId, nodeData] of closestNodesAll) { - if (!localNodeId.equals(nodeId)) { - logger.debug( - `pingAndSetTask for ${nodesUtils.encodeNodeId(nodeId)}@${ - nodeData.address.host - }:${nodeData.address.port}`, - ); - const pingAndSetTask = await this.taskManager.scheduleTask({ - delay: 0, - handlerId: this.pingAndSetNodeHandlerId, - lazy: !block, - parameters: [ - nodesUtils.encodeNodeId(nodeId), - nodeData.address.host, - nodeData.address.port, - ], - path: [this.basePath, this.pingAndSetNodeHandlerId], - // Need to be somewhat active so high priority - priority: 100, - deadline: pingTimeoutTime, - }); - pingTasks.push(pingAndSetTask); - } - } - if (block) { - // We want to wait for all the tasks - logger.debug('Awaiting all pingAndSetTasks'); - await Promise.all( - pingTasks.map((task) => { - const prom = task.promise(); - // Hook on cancellation - if (ctx.signal.aborted) { - prom.cancel(ctx.signal.reason); - } else { - ctx.signal.addEventListener('abort', () => - prom.cancel(ctx.signal.reason), - ); - } - // Ignore errors - return task.promise().catch(() => {}); - }), + if (blocking) { + await this.syncNodeGraphHandler( + ctx, + undefined, + initialNodesParameter, + pingTimeoutTime, ); - } - // Refreshing every bucket above the closest node - logger.debug(`Triggering refreshBucket tasks`); - let index = this.nodeGraph.nodeIdBits; - if (closestNode != null) { - const [bucketIndex] = this.nodeGraph.bucketIndex(closestNode); - index = bucketIndex; - } - const refreshBuckets: Array> = []; - for (let i = index; i < this.nodeGraph.nodeIdBits; i++) { - const task = await this.updateRefreshBucketDelay(i, 0, !block); - refreshBuckets.push(task.promise()); - } - if (block) { - logger.debug(`Awaiting refreshBucket tasks`); - await Promise.all(refreshBuckets); + } else { + // Create task + await this.taskManager.scheduleTask({ + delay: 0, + handlerId: this.syncNodeGraphHandlerId, + lazy: true, + parameters: [initialNodesParameter, pingTimeoutTime], + path: [this.tasksPath, this.syncNodeGraphHandlerId], + priority: 0, + }); } } } diff --git a/src/nodes/agent/callers/index.ts b/src/nodes/agent/callers/index.ts index 3bb027081..12009b063 100644 --- a/src/nodes/agent/callers/index.ts +++ b/src/nodes/agent/callers/index.ts @@ -1,4 +1,5 @@ import nodesClaimsGet from './nodesClaimsGet'; +import nodesClosestActiveConnectionsGet from './nodesClosestActiveConnectionsGet'; import nodesClosestLocalNodesGet from './nodesClosestLocalNodesGet'; import nodesConnectionSignalFinal from './nodesConnectionSignalFinal'; import nodesConnectionSignalInitial from './nodesConnectionSignalInitial'; @@ -13,6 +14,7 @@ import vaultsScan from './vaultsScan'; */ const manifestClient = { nodesClaimsGet, + nodesClosestActiveConnectionsGet, nodesClosestLocalNodesGet, nodesConnectionSignalFinal, nodesConnectionSignalInitial, @@ -23,10 +25,13 @@ const manifestClient = { vaultsScan, }; +type AgentClientManifest = typeof manifestClient; + export default manifestClient; export { nodesClaimsGet, + nodesClosestActiveConnectionsGet, nodesClosestLocalNodesGet, nodesConnectionSignalFinal, nodesConnectionSignalInitial, @@ -36,3 +41,5 @@ export { vaultsGitPackGet, vaultsScan, }; + +export type { AgentClientManifest }; diff --git a/src/nodes/agent/callers/nodesClosestActiveConnectionsGet.ts b/src/nodes/agent/callers/nodesClosestActiveConnectionsGet.ts new file mode 100644 index 000000000..dcce92321 --- /dev/null +++ b/src/nodes/agent/callers/nodesClosestActiveConnectionsGet.ts @@ -0,0 +1,12 @@ +import type { HandlerTypes } from '@matrixai/rpc'; +import type NodesClosestActiveConnectionsGet from '../handlers/NodesClosestActiveConnectionsGet'; +import { ServerCaller } from '@matrixai/rpc'; + +type CallerTypes = HandlerTypes; + +const nodesClosestActiveConnectionsGet = new ServerCaller< + CallerTypes['input'], + CallerTypes['output'] +>(); + +export default nodesClosestActiveConnectionsGet; diff --git a/src/nodes/agent/handlers/NodesClosestActiveConnectionsGet.ts b/src/nodes/agent/handlers/NodesClosestActiveConnectionsGet.ts new file mode 100644 index 000000000..cee6c9b1d --- /dev/null +++ b/src/nodes/agent/handlers/NodesClosestActiveConnectionsGet.ts @@ -0,0 +1,58 @@ +import type { + AgentRPCRequestParams, + AgentRPCResponseResult, + NodeIdMessage, +} from '../types'; +import type NodeConnectionManager from '../../NodeConnectionManager'; +import type { NodeId } from '../../../ids'; +import type { ActiveConnectionDataMessage } from '../types'; +import { ServerHandler } from '@matrixai/rpc'; +import * as utils from '../../../utils'; +import * as ids from '../../../ids'; +import * as validation from '../../../validation'; +import * as nodesUtils from '../../utils'; + +/** + * Gets the closest local nodes to a target node + */ +class NodesClosestActiveConnectionsGet extends ServerHandler< + { + nodeConnectionManager: NodeConnectionManager; + }, + AgentRPCRequestParams, + AgentRPCResponseResult +> { + public handle = async function* ( + input: AgentRPCRequestParams, + ): AsyncGenerator> { + const { nodeConnectionManager } = this.container as { + nodeConnectionManager: NodeConnectionManager; + }; + + const { + nodeId, + }: { + nodeId: NodeId; + } = validation.validateSync( + (keyPath, value) => { + return utils.matchSync(keyPath)( + [['nodeId'], () => ids.parseNodeId(value)], + () => value, + ); + }, + { + nodeId: input.nodeIdEncoded, + }, + ); + + const nodes = nodeConnectionManager.getClosestConnections(nodeId); + for (const nodeInfo of nodes) { + yield { + nodeId: nodesUtils.encodeNodeId(nodeInfo.nodeId), + connections: nodeInfo.connections, + }; + } + }; +} + +export default NodesClosestActiveConnectionsGet; diff --git a/src/nodes/agent/handlers/NodesClosestLocalNodesGet.ts b/src/nodes/agent/handlers/NodesClosestLocalNodesGet.ts index dedcb9fcb..0c6e4a529 100644 --- a/src/nodes/agent/handlers/NodesClosestLocalNodesGet.ts +++ b/src/nodes/agent/handlers/NodesClosestLocalNodesGet.ts @@ -2,7 +2,7 @@ import type { DB } from '@matrixai/db'; import type { AgentRPCRequestParams, AgentRPCResponseResult, - NodeAddressMessage, + NodeContactMessage, NodeIdMessage, } from '../types'; import type NodeGraph from '../../NodeGraph'; @@ -22,11 +22,11 @@ class NodesClosestLocalNodesGet extends ServerHandler< db: DB; }, AgentRPCRequestParams, - AgentRPCResponseResult + AgentRPCResponseResult > { public handle = async function* ( input: AgentRPCRequestParams, - ): AsyncGenerator> { + ): AsyncGenerator> { const { nodeGraph, db } = this.container; const { @@ -46,18 +46,17 @@ class NodesClosestLocalNodesGet extends ServerHandler< ); // Get all local nodes that are closest to the target node from the request return yield* db.withTransactionG(async function* (tran): AsyncGenerator< - AgentRPCResponseResult + AgentRPCResponseResult > { const closestNodes = await nodeGraph.getClosestNodes( nodeId, undefined, tran, ); - for (const [nodeId, nodeData] of closestNodes) { + for (const [nodeId, nodeContact] of closestNodes) { yield { nodeIdEncoded: nodesUtils.encodeNodeId(nodeId), - host: nodeData.address.host, - port: nodeData.address.port, + nodeContact, }; } }); diff --git a/src/nodes/agent/handlers/NodesConnectionSignalInitial.ts b/src/nodes/agent/handlers/NodesConnectionSignalInitial.ts index eb34854a8..3cbd7ed30 100644 --- a/src/nodes/agent/handlers/NodesConnectionSignalInitial.ts +++ b/src/nodes/agent/handlers/NodesConnectionSignalInitial.ts @@ -6,7 +6,6 @@ import type { } from '../types'; import type NodeConnectionManager from '../../../nodes/NodeConnectionManager'; import type { Host, Port } from '../../../network/types'; -import type { NodeAddress } from '../../../nodes/types'; import type { JSONValue } from '../../../types'; import { UnaryHandler } from '@matrixai/rpc'; import * as agentErrors from '../errors'; @@ -50,16 +49,14 @@ class NodesConnectionSignalInitial extends UnaryHandler< if (remotePort == null || typeof remotePort !== 'number') { never('Missing or invalid remotePort'); } - const address: NodeAddress = { - host: remoteHost as Host, - port: remotePort as Port, - scopes: ['global'], - }; const targetAddress = await nodeConnectionManager.handleNodesConnectionSignalInitial( requestingNodeId, targetNodeId, - address, + { + host: remoteHost as Host, + port: remotePort as Port, + }, input.signature, ); return { diff --git a/src/nodes/agent/handlers/index.ts b/src/nodes/agent/handlers/index.ts index cb2549c72..a59564497 100644 --- a/src/nodes/agent/handlers/index.ts +++ b/src/nodes/agent/handlers/index.ts @@ -9,6 +9,7 @@ import type NodeConnectionManager from '../../../nodes/NodeConnectionManager'; import type NotificationsManager from '../../../notifications/NotificationsManager'; import type VaultManager from '../../../vaults/VaultManager'; import NodesClaimsGet from './NodesClaimsGet'; +import NodesClosestActiveConnectionsGet from './NodesClosestActiveConnectionsGet'; import NodesClosestLocalNodesGet from './NodesClosestLocalNodesGet'; import NodesConnectionSignalFinal from './NodesConnectionSignalFinal'; import NodesConnectionSignalInitial from './NodesConnectionSignalInitial'; @@ -35,6 +36,9 @@ const manifestServer = (container: { }) => { return { nodesClaimsGet: new NodesClaimsGet(container), + nodesClosestActiveConnectionsGet: new NodesClosestActiveConnectionsGet( + container, + ), nodesClosestLocalNodesGet: new NodesClosestLocalNodesGet(container), nodesConnectionSignalFinal: new NodesConnectionSignalFinal(container), nodesConnectionSignalInitial: new NodesConnectionSignalInitial(container), @@ -46,10 +50,13 @@ const manifestServer = (container: { }; }; +type AgentServerManifest = ReturnType; + export default manifestServer; export { NodesClaimsGet, + NodesClosestActiveConnectionsGet, NodesClosestLocalNodesGet, NodesConnectionSignalFinal, NodesConnectionSignalInitial, @@ -59,3 +66,5 @@ export { VaultsGitPackGet, VaultsScan, }; + +export type { AgentServerManifest }; diff --git a/src/nodes/agent/types.ts b/src/nodes/agent/types.ts index 7c4252501..abec3d5a4 100644 --- a/src/nodes/agent/types.ts +++ b/src/nodes/agent/types.ts @@ -7,6 +7,8 @@ import type { SignedTokenEncoded } from '../../tokens/types'; import type { ClaimIdEncoded, NodeIdEncoded, VaultIdEncoded } from '../../ids'; import type { VaultAction, VaultName } from '../../vaults/types'; import type { SignedNotification } from '../../notifications/types'; +import type { Host, Hostname, Port } from '../../network/types'; +import type { NodeContact } from '../../nodes/types'; type AgentRPCRequestParams = JSONRPCRequestParams; @@ -31,7 +33,23 @@ type AddressMessage = { port: number; }; -type NodeAddressMessage = NodeIdMessage & AddressMessage; +type NodeContactMessage = NodeIdMessage & { + nodeContact: NodeContact; +}; + +type ActiveConnectionDataMessage = { + nodeId: NodeIdEncoded; + connections: Record< + string, + { + host: Host; + hostName: Hostname | undefined; + port: Port; + timeout: number | undefined; + primary: boolean; + } + >; +}; type HolePunchRequestMessage = { sourceNodeIdEncoded: NodeIdEncoded; @@ -66,7 +84,8 @@ export type { AgentClaimMessage, NodeIdMessage, AddressMessage, - NodeAddressMessage, + NodeContactMessage, + ActiveConnectionDataMessage, HolePunchRequestMessage, HolePunchSignalMessage, SignedNotificationEncoded, diff --git a/src/nodes/errors.ts b/src/nodes/errors.ts index b5fd60c0c..30fef93a3 100644 --- a/src/nodes/errors.ts +++ b/src/nodes/errors.ts @@ -10,6 +10,21 @@ class ErrorNodeManagerNotRunning extends ErrorNodeManager { exitCode = sysexits.USAGE; } +class ErrorNodeManagerNodeIdOwn extends ErrorNodeManager { + static description = 'NodeId is the same as the current node'; + exitCode = sysexits.USAGE; +} + +class ErrorNodeManagerConnectionFailed extends ErrorNodeManager { + static description = 'Failed to find or establish a connection'; + exitCode = sysexits.TEMPFAIL; +} + +class ErrorNodeManagerFindNodeFailed extends ErrorNodeManager { + static description = 'Failed to find node'; + exitCode = sysexits.TEMPFAIL; +} + class ErrorNodeGraph extends ErrorNodes {} class ErrorNodeGraphRunning extends ErrorNodeGraph { @@ -32,8 +47,8 @@ class ErrorNodeGraphNodeIdNotFound extends ErrorNodeGraph { exitCode = sysexits.NOUSER; } -class ErrorNodeGraphOversizedBucket extends ErrorNodeGraph { - static description: 'Bucket invalidly contains more nodes than capacity'; +class ErrorNodeGraphBucketLimit extends ErrorNodeGraph { + static description: 'Node graph bucket limit reached'; exitCode = sysexits.USAGE; } @@ -188,12 +203,15 @@ export { ErrorNodes, ErrorNodeManager, ErrorNodeManagerNotRunning, + ErrorNodeManagerNodeIdOwn, + ErrorNodeManagerConnectionFailed, + ErrorNodeManagerFindNodeFailed, ErrorNodeGraph, ErrorNodeGraphRunning, ErrorNodeGraphNotRunning, ErrorNodeGraphDestroyed, ErrorNodeGraphNodeIdNotFound, - ErrorNodeGraphOversizedBucket, + ErrorNodeGraphBucketLimit, ErrorNodeGraphSameNodeId, ErrorNodeGraphBucketIndex, ErrorNodeConnection, diff --git a/src/nodes/types.ts b/src/nodes/types.ts index bdbb16e07..8add7555b 100644 --- a/src/nodes/types.ts +++ b/src/nodes/types.ts @@ -1,30 +1,64 @@ import type { NodeId, NodeIdString, NodeIdEncoded } from '../ids/types'; import type { Host, Hostname, Port } from '../network/types'; +import type { Opaque } from '../types'; /** * Key indicating which space the NodeGraph is in */ type NodeGraphSpace = '0' | '1'; +/** + * Node address scopes allows the classification of the address. + * Local means that the address is locally routable. + * Global means that the address is globally routable. + */ type NodeAddressScope = 'local' | 'global'; -type NodeAddress = { - host: Host | Hostname; - port: Port; - scopes: Array; -}; +type NodeAddress = [Host | Hostname, Port]; type NodeBucketIndex = number; -type NodeBucket = Array<[NodeId, NodeData]>; +type NodeBucket = Array<[NodeId, NodeContact]>; type NodeBucketMeta = { count: number; }; -type NodeData = { - address: NodeAddress; - lastUpdated: number; +/** + * Record of `NodeAddress` to `NodeData` for a single `NodeId`. + * Use `nodesUtils.parseNodeAddressKey` to parse + * `NodeAddressKey` to `NodeAddress`. + * Note that records don't have inherent order. + */ +type NodeContact = Record; + +type NodeContactAddress = Opaque<'NodeContactAddress', string>; + +/** + * This is the record value stored in the NodeGraph. + */ +type NodeContactAddressData = { + /** + * Indicates how the contact address was connected on its + * last connection establishment. The ICE procedure concurrently + * uses all methods to try to connect, however, whichever one + * succeeded first should be indicated here. When sharing this + * information to other nodes, it can hint towards whether a + * contact does not require signalling or relaying. + */ + mode: 'direct' | 'signal' | 'relay'; + /** + * Unix timestamp of when the connection was last active. + * This property should be set when the connection is first + * established, but it should also be updated as long as the + * connection is active. + */ + connectedTime: number; + /** + * Scopes can be used to classify the address. + * Multiple scopes is understood as set-union. + */ + scopes: Array; }; type SeedNodes = Record; @@ -43,11 +77,13 @@ export type { NodeIdEncoded, NodeAddressScope, NodeAddress, + NodeContact, + NodeContactAddress, + NodeContactAddressData, SeedNodes, NodeBucketIndex, NodeBucketMeta, NodeBucket, - NodeData, NodeGraphSpace, }; diff --git a/src/nodes/utils.ts b/src/nodes/utils.ts index 5871a81fa..653be9c61 100644 --- a/src/nodes/utils.ts +++ b/src/nodes/utils.ts @@ -1,8 +1,18 @@ -import type { NodeBucket, NodeBucketIndex, NodeId, SeedNodes } from './types'; -import type { Hostname, Port } from '../network/types'; -import type { Certificate, CertificatePEM } from '../keys/types'; -import type { KeyPath } from '@matrixai/db'; +import type { DBTransaction, KeyPath, LevelPath } from '@matrixai/db'; import type { X509Certificate } from '@peculiar/x509'; +import type { QUICClientCrypto, QUICServerCrypto } from '@matrixai/quic'; +import type { Key, Certificate, CertificatePEM } from '../keys/types'; +import type { Hostname, Port } from '../network/types'; +import type { + NodeAddress, + NodeContact, + NodeContactAddress, + NodeContactAddressData, + NodeBucket, + NodeBucketIndex, + NodeId, + SeedNodes, +} from './types'; import dns from 'dns'; import { utils as dbUtils } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; @@ -63,6 +73,19 @@ function bucketIndex(sourceNode: NodeId, targetNode: NodeId): NodeBucketIndex { return bucketIndex; } +/** + * Encodes NodeAddress to NodeAddressKey + */ +function nodeContactAddress([host, port]: NodeAddress): NodeContactAddress { + if (networkUtils.isHost(host)) { + const host_ = networkUtils.toCanonicalHost(host); + return `${host_}-${port}` as NodeContactAddress; + } else { + const hostname = networkUtils.toCanonicalHostname(host); + return `${hostname}-${port}` as NodeContactAddress; + } +} + /** * Encodes bucket index to bucket sublevel key */ @@ -89,35 +112,25 @@ function bucketDbKey(nodeId: NodeId): Buffer { return nodeId.toBuffer(); } -/** - * Creates key for buckets indexed by lastUpdated sublevel - */ -function lastUpdatedBucketsDbKey( - bucketIndex: NodeBucketIndex, - lastUpdated: number, - nodeId: NodeId, -): Buffer { - return Buffer.concat([ - sepBuffer, - Buffer.from(bucketKey(bucketIndex)), - sepBuffer, - lastUpdatedBucketDbKey(lastUpdated, nodeId), - ]); +function connectedKey(lastUpdated: number): Buffer { + return Buffer.from(lexi.pack(lastUpdated, 'hex')); } -/** - * Creates key for single bucket indexed by lastUpdated sublevel - */ -function lastUpdatedBucketDbKey(lastUpdated: number, nodeId: NodeId): Buffer { - return Buffer.concat([ - Buffer.from(lexi.pack(lastUpdated, 'hex')), - Buffer.from('-'), - nodeId.toBuffer(), - ]); +function parseConnectedKey(buffer: Buffer): number { + return lexi.unpack(buffer.toString()); } -function lastUpdatedKey(lastUpdated: number): Buffer { - return Buffer.from(lexi.pack(lastUpdated, 'hex')); +function parseNodeContactAddress(nodeContactAddress: string): NodeAddress { + // Take the last occurrence of `-` because the hostname may contain `-` + const lastDashIndex = nodeContactAddress.lastIndexOf('-'); + const hostOrHostname = nodeContactAddress.slice(0, lastDashIndex); + const port = nodeContactAddress.slice(lastDashIndex + 1); + return [hostOrHostname, parseInt(port, 10)] as NodeAddress; +} + +function parseNodeAddressKey(keyBuffer: Buffer): NodeAddress { + const key = keyBuffer.toString(); + return parseNodeContactAddress(key); } /** @@ -129,8 +142,9 @@ function parseBucketsDbKey(keyPath: KeyPath): { bucketIndex: NodeBucketIndex; bucketKey: string; nodeId: NodeId; + nodeContactAddress: NodeContactAddress; } { - const [bucketKeyPath, nodeIdKey] = keyPath; + const [bucketKeyPath, nodeIdKey, nodeContactAddress] = keyPath; if (bucketKeyPath == null || nodeIdKey == null) { throw new TypeError('Buffer is not an NodeGraph buckets key'); } @@ -141,6 +155,7 @@ function parseBucketsDbKey(keyPath: KeyPath): { bucketIndex, bucketKey, nodeId, + nodeContactAddress: nodeContactAddress as NodeContactAddress, }; } @@ -216,39 +231,37 @@ function nodeDistance(nodeId1: NodeId, nodeId2: NodeId): bigint { return utils.bytes2BigInt(distance); } +function nodeDistanceCmpFactory(targetNodeId: NodeId) { + const distances = {}; + return (nodeId1: NodeId, nodeId2: NodeId): -1 | 0 | 1 => { + const d1 = (distances[nodeId1] = + distances[nodeId1] ?? nodeDistance(targetNodeId, nodeId1)); + const d2 = (distances[nodeId2] = + distances[nodeId2] ?? nodeDistance(targetNodeId, nodeId2)); + if (d1 < d2) { + return -1; + } else if (d1 > d2) { + return 1; + } else { + return 0; + } + }; +} + function bucketSortByDistance( bucket: NodeBucket, nodeId: NodeId, order: 'asc' | 'desc' = 'asc', ): void { - const distances = {}; + const nodeDistanceCmp = nodeDistanceCmpFactory(nodeId); if (order === 'asc') { bucket.sort(([nodeId1], [nodeId2]) => { - const d1 = (distances[nodeId1] = - distances[nodeId1] ?? nodeDistance(nodeId, nodeId1)); - const d2 = (distances[nodeId2] = - distances[nodeId2] ?? nodeDistance(nodeId, nodeId2)); - if (d1 < d2) { - return -1; - } else if (d1 > d2) { - return 1; - } else { - return 0; - } + return nodeDistanceCmp(nodeId1, nodeId2); }); } else { bucket.sort(([nodeId1], [nodeId2]) => { - const d1 = (distances[nodeId1] = - distances[nodeId1] ?? nodeDistance(nodeId, nodeId1)); - const d2 = (distances[nodeId2] = - distances[nodeId2] ?? nodeDistance(nodeId, nodeId2)); - if (d1 > d2) { - return -1; - } else if (d1 < d2) { - return 1; - } else { - return 0; - } + // Invert the order + return nodeDistanceCmp(nodeId1, nodeId2) * -1; }); } } @@ -423,11 +436,10 @@ async function resolveSeednodes( /\..*/g, '', ) as ids.NodeIdEncoded; - seednodes[nodeId] = { - host: seednodeRecord.name as Hostname, - port: seednodeRecord.port as Port, - scopes: ['global'], - }; + seednodes[nodeId] = [ + seednodeRecord.name as Hostname, + seednodeRecord.port as Port, + ]; } } catch (e) { throw new nodesErrors.ErrorNodeLookupNotFound( @@ -665,20 +677,109 @@ async function verifyClientCertificateChain( return undefined; } +/** + * QUIC Client Crypto + * This uses the keys utilities which uses `allocUnsafeSlow`. + * This ensures that the underlying buffer is not shared. + * Also all node buffers satisfy the `ArrayBuffer` interface. + */ +const quicClientCrypto: QUICClientCrypto = { + ops: { + async randomBytes(data: ArrayBuffer): Promise { + const randomBytes = keysUtils.getRandomBytes(data.byteLength); + randomBytes.copy(utils.bufferWrap(data)); + }, + }, +}; + +/** + * QUIC Server Crypto + * This uses the keys utilities which uses `allocUnsafeSlow`. + * This ensures that the underlying buffer is not shared. + * Also all node buffers satisfy the `ArrayBuffer` interface. + */ +const quicServerCrypto: QUICServerCrypto = { + key: keysUtils.generateKey(), + ops: { + async sign(key: ArrayBuffer, data: ArrayBuffer): Promise { + return keysUtils.macWithKey( + utils.bufferWrap(key) as Key, + utils.bufferWrap(data), + ).buffer; + }, + async verify( + key: ArrayBuffer, + data: ArrayBuffer, + sig: ArrayBuffer, + ): Promise { + return keysUtils.authWithKey( + utils.bufferWrap(key) as Key, + utils.bufferWrap(data), + utils.bufferWrap(sig), + ); + }, + }, +}; + +async function* collectNodeContacts( + levelPath: LevelPath, + tran: DBTransaction, + options: { + reverse?: boolean; + lt?: LevelPath; + gt?: LevelPath; + limit?: number; + pathAdjust?: KeyPath; + } = {}, +): AsyncGenerator<[NodeId, NodeContact], void> { + let nodeId: NodeId | undefined = undefined; + let nodeContact: NodeContact = {}; + let count = 0; + for await (const [ + keyPath, + nodeContactAddressData, + ] of tran.iterator(levelPath, { + reverse: options.reverse, + lt: options.lt, + gt: options.gt, + valueAsBuffer: false, + })) { + const { nodeId: nodeIdCurrent, nodeContactAddress } = parseBucketsDbKey([ + ...(options.pathAdjust ?? []), + ...keyPath, + ]); + if (!(nodeId == null || nodeIdCurrent.equals(nodeId))) { + // Yield and tear + yield [nodeId, nodeContact]; + nodeContact = {}; + count++; + } + // Accumulate addresses and data + nodeContact[nodeContactAddress] = nodeContactAddressData; + nodeId = nodeIdCurrent; + if (options.limit != null && count >= options.limit) return; + } + // Yield remaining data if it exists + if (nodeId != null) yield [nodeId, nodeContact]; +} + export { sepBuffer, + nodeContactAddress, bucketIndex, bucketKey, bucketsDbKey, bucketDbKey, - lastUpdatedBucketsDbKey, - lastUpdatedBucketDbKey, - lastUpdatedKey, + connectedKey, + parseConnectedKey, + parseNodeContactAddress, + parseNodeAddressKey, parseBucketsDbKey, parseBucketDbKey, parseLastUpdatedBucketsDbKey, parseLastUpdatedBucketDbKey, nodeDistance, + nodeDistanceCmpFactory, bucketSortByDistance, generateRandomDistanceForBucket, xOrNodeId, @@ -693,6 +794,9 @@ export { parseSeedNodes, verifyServerCertificateChain, verifyClientCertificateChain, + quicClientCrypto, + quicServerCrypto, + collectNodeContacts, }; export { encodeNodeId, decodeNodeId } from '../ids'; diff --git a/src/notifications/NotificationsManager.ts b/src/notifications/NotificationsManager.ts index 92e1da96a..d9ea85bea 100644 --- a/src/notifications/NotificationsManager.ts +++ b/src/notifications/NotificationsManager.ts @@ -3,7 +3,6 @@ import type { NotificationId, Notification, NotificationData } from './types'; import type ACL from '../acl/ACL'; import type KeyRing from '../keys/KeyRing'; import type NodeManager from '../nodes/NodeManager'; -import type NodeConnectionManager from '../nodes/NodeConnectionManager'; import type { NodeId } from '../ids/types'; import Logger from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; @@ -40,7 +39,6 @@ class NotificationsManager { static async createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, messageCap = 10000, @@ -49,7 +47,6 @@ class NotificationsManager { }: { acl: ACL; db: DB; - nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; keyRing: KeyRing; messageCap?: number; @@ -63,7 +60,6 @@ class NotificationsManager { keyRing, logger, messageCap, - nodeConnectionManager, nodeManager, }); @@ -77,7 +73,6 @@ class NotificationsManager { protected db: DB; protected keyRing: KeyRing; protected nodeManager: NodeManager; - protected nodeConnectionManager: NodeConnectionManager; protected messageCap: number; /** @@ -101,7 +96,6 @@ class NotificationsManager { constructor({ acl, db, - nodeConnectionManager, nodeManager, keyRing, messageCap, @@ -109,7 +103,6 @@ class NotificationsManager { }: { acl: ACL; db: DB; - nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; keyRing: KeyRing; messageCap: number; @@ -120,7 +113,6 @@ class NotificationsManager { this.acl = acl; this.db = db; this.keyRing = keyRing; - this.nodeConnectionManager = nodeConnectionManager; this.nodeManager = nodeManager; } @@ -183,7 +175,7 @@ class NotificationsManager { notification, this.keyRing.keyPair, ); - await this.nodeConnectionManager.withConnF(nodeId, async (connection) => { + await this.nodeManager.withConnF(nodeId, async (connection) => { const client = connection.getClient(); await client.methods.notificationsSend({ signedNotificationEncoded: signedNotification, diff --git a/src/utils/utils.ts b/src/utils/utils.ts index ecbd4c8ed..2632d7f12 100644 --- a/src/utils/utils.ts +++ b/src/utils/utils.ts @@ -243,9 +243,13 @@ function signalPromise(signal: AbortSignal): Promise { reject(signal.reason); return; } - signal.addEventListener('abort', () => { - reject(signal.reason); - }); + signal.addEventListener( + 'abort', + () => { + reject(signal.reason); + }, + { once: true }, + ); }); } @@ -454,6 +458,13 @@ function lexiUnpackBuffer(b: Buffer): number { return lexi.unpack([...b]); } +/** + * Used to yield to the event loop to allow other micro tasks to process + */ +async function yieldMicro() { + return await new Promise((r) => queueMicrotask(r)); +} + export { AsyncFunction, GeneratorFunction, @@ -491,4 +502,5 @@ export { lexiUnpackBuffer, bufferWrap, isBufferSource, + yieldMicro, }; diff --git a/src/vaults/VaultInternal.ts b/src/vaults/VaultInternal.ts index 51d1d3ac2..38c830493 100644 --- a/src/vaults/VaultInternal.ts +++ b/src/vaults/VaultInternal.ts @@ -14,7 +14,7 @@ import type { } from './types'; import type KeyRing from '../keys/KeyRing'; import type { NodeId, NodeIdEncoded } from '../ids/types'; -import type NodeConnectionManager from '../nodes/NodeConnectionManager'; +import type NodeManager from '../nodes/NodeManager'; import type { RPCClient } from '@matrixai/rpc'; import type agentClientManifest from '../nodes/agent/callers'; import type { POJO } from '../types'; @@ -113,7 +113,7 @@ class VaultInternal { db, vaultsDbPath, keyRing, - nodeConnectionManager, + nodeManager, efs, logger = new Logger(this.name), tran, @@ -125,7 +125,7 @@ class VaultInternal { vaultsDbPath: LevelPath; efs: EncryptedFS; keyRing: KeyRing; - nodeConnectionManager: NodeConnectionManager; + nodeManager: NodeManager; logger?: Logger; tran?: DBTransaction; }): Promise { @@ -138,7 +138,7 @@ class VaultInternal { db, vaultsDbPath, keyRing, - nodeConnectionManager, + nodeManager, efs, logger, tran, @@ -165,7 +165,7 @@ class VaultInternal { let remoteVaultId: VaultId; let remote: RemoteInfo; try { - [vaultName, remoteVaultId] = await nodeConnectionManager.withConnF( + [vaultName, remoteVaultId] = await nodeManager.withConnF( targetNodeId, async (connection) => { const client = connection.getClient(); @@ -539,12 +539,12 @@ class VaultInternal { @ready(new vaultsErrors.ErrorVaultNotRunning()) public async pullVault({ - nodeConnectionManager, + nodeManager, pullNodeId, pullVaultNameOrId, tran, }: { - nodeConnectionManager: NodeConnectionManager; + nodeManager: NodeManager; pullNodeId?: NodeId; pullVaultNameOrId?: VaultId | VaultName; tran?: DBTransaction; @@ -552,7 +552,7 @@ class VaultInternal { if (tran == null) { return this.db.withTransactionF((tran) => this.pullVault({ - nodeConnectionManager, + nodeManager, pullNodeId, pullVaultNameOrId, tran, @@ -594,7 +594,7 @@ class VaultInternal { ); let remoteVaultId: VaultId; try { - remoteVaultId = await nodeConnectionManager.withConnF( + remoteVaultId = await nodeManager.withConnF( pullNodeId!, async (connection) => { const client = connection.getClient(); diff --git a/src/vaults/VaultManager.ts b/src/vaults/VaultManager.ts index 5a3771959..50f980664 100644 --- a/src/vaults/VaultManager.ts +++ b/src/vaults/VaultManager.ts @@ -11,7 +11,7 @@ import type { FileSystem } from '../types'; import type { PolykeyWorkerManagerInterface } from '../workers/types'; import type { NodeId } from '../ids/types'; import type KeyRing from '../keys/KeyRing'; -import type NodeConnectionManager from '../nodes/NodeConnectionManager'; +import type NodeManager from '../nodes/NodeManager'; import type GestaltGraph from '../gestalts/GestaltGraph'; import type NotificationsManager from '../notifications/NotificationsManager'; import type ACL from '../acl/ACL'; @@ -74,7 +74,7 @@ class VaultManager { db, acl, keyRing, - nodeConnectionManager, + nodeManager, gestaltGraph, notificationsManager, fs = require('fs'), @@ -85,7 +85,7 @@ class VaultManager { db: DB; acl: ACL; keyRing: KeyRing; - nodeConnectionManager: NodeConnectionManager; + nodeManager: NodeManager; gestaltGraph: GestaltGraph; notificationsManager: NotificationsManager; fs?: FileSystem; @@ -99,7 +99,7 @@ class VaultManager { db, acl, keyRing, - nodeConnectionManager, + nodeManager, gestaltGraph, notificationsManager, fs, @@ -118,7 +118,7 @@ class VaultManager { protected db: DB; protected acl: ACL; protected keyRing: KeyRing; - protected nodeConnectionManager: NodeConnectionManager; + protected nodeManager: NodeManager; protected gestaltGraph: GestaltGraph; protected notificationsManager: NotificationsManager; protected vaultsDbPath: LevelPath = [this.constructor.name]; @@ -136,7 +136,7 @@ class VaultManager { db, acl, keyRing, - nodeConnectionManager, + nodeManager, gestaltGraph, notificationsManager, fs, @@ -146,7 +146,7 @@ class VaultManager { db: DB; acl: ACL; keyRing: KeyRing; - nodeConnectionManager: NodeConnectionManager; + nodeManager: NodeManager; gestaltGraph: GestaltGraph; notificationsManager: NotificationsManager; fs: FileSystem; @@ -158,7 +158,7 @@ class VaultManager { this.db = db; this.acl = acl; this.keyRing = keyRing; - this.nodeConnectionManager = nodeConnectionManager; + this.nodeManager = nodeManager; this.gestaltGraph = gestaltGraph; this.notificationsManager = notificationsManager; this.fs = fs; @@ -706,7 +706,7 @@ class VaultManager { targetVaultNameOrId: vaultNameOrId, vaultId, db: this.db, - nodeConnectionManager: this.nodeConnectionManager, + nodeManager: this.nodeManager, vaultsDbPath: this.vaultsDbPath, keyRing: this.keyRing, efs: this.efs, @@ -786,7 +786,7 @@ class VaultManager { await tran.lock([...this.vaultsDbPath, vaultId].join('')); const vault = await this.getVault(vaultId, tran); await vault.pullVault({ - nodeConnectionManager: this.nodeConnectionManager, + nodeManager: this.nodeManager, pullNodeId, pullVaultNameOrId, tran, @@ -899,7 +899,7 @@ class VaultManager { vaultPermissions: VaultAction[]; }> { // Create a connection to another node - return yield* this.nodeConnectionManager.withConnG( + return yield* this.nodeManager.withConnG( targetNodeId, async function* (connection): AsyncGenerator<{ vaultName: VaultName; diff --git a/test-node-manager.ts b/test-node-manager.ts new file mode 100644 index 000000000..d97256589 --- /dev/null +++ b/test-node-manager.ts @@ -0,0 +1,74 @@ +import path from 'path'; +import { DB } from '@matrixai/db'; +import type { Host, Port, TLSConfig } from './src/network/types'; +import Logger, { formatting, LogLevel, StreamHandler } from '@matrixai/logger'; +import NodeManager from './src/nodes/NodeManager'; +import KeyRing from './src/keys/KeyRing'; +import Sigchain from './src/sigchain/Sigchain'; +import ACL from './src/acl/ACL'; +import GestaltGraph from './src/gestalts/GestaltGraph'; +import TaskManager from './src/tasks/TaskManager'; +import NodeGraph from './src/nodes/NodeGraph'; +import NodeConnectionManager from './src/nodes/NodeConnectionManager'; +import * as keysUtils from './src/keys/utils'; +import * as testsUtils from './tests/utils'; + +async function main() { + const logger = new Logger(`${NodeManager.name} test`, LogLevel.WARN, [ + new StreamHandler(), + ]); + const dataDir = './tmp'; + + let keyRing: KeyRing; + let db: DB; + let acl: ACL; + let gestaltGraph: GestaltGraph; + let nodeGraph: NodeGraph; + let sigchain: Sigchain; + let taskManager: TaskManager; + let tlsConfig: TLSConfig; + + const password = 'password'; + const keysPath = path.join(dataDir, 'keys'); + keyRing = await KeyRing.createKeyRing({ + password, + keysPath, + logger, + passwordOpsLimit: keysUtils.passwordOpsLimits.min, + passwordMemLimit: keysUtils.passwordMemLimits.min, + strictMemoryLock: false, + }); + const dbPath = path.join(dataDir, 'db'); + db = await DB.createDB({ + dbPath, + logger, + }); + acl = await ACL.createACL({ + db, + logger, + }); + gestaltGraph = await GestaltGraph.createGestaltGraph({ + db, + acl, + logger, + }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyRing, + logger, + }); + sigchain = await Sigchain.createSigchain({ + db, + keyRing, + logger, + }); + taskManager = await TaskManager.createTaskManager({ + db, + logger, + }); + tlsConfig = await testsUtils.createTLSConfig(keyRing.keyPair); + + +} + +void main(); diff --git a/tests/client/handlers/gestalts.test.ts b/tests/client/handlers/gestalts.test.ts index e0423940b..151dc5f75 100644 --- a/tests/client/handlers/gestalts.test.ts +++ b/tests/client/handlers/gestalts.test.ts @@ -16,6 +16,7 @@ import type { import type { SignedClaim } from '@/claims/types'; import type { Host } from '@/network/types'; import type { ClaimLinkIdentity } from '@/claims/payloads'; +import type { AgentServerManifest } from '@/nodes/agent/handlers'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -446,7 +447,8 @@ describe('gestaltsDiscoveryByIdentity', () => { // @ts-ignore: TLS not needed for this test tlsConfig: {}, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -462,6 +464,7 @@ describe('gestaltsDiscoveryByIdentity', () => { await nodeManager.start(); await nodeConnectionManager.start({ host: localhost as Host, + agentService: {} as AgentServerManifest, }); discovery = await Discovery.createDiscovery({ db, @@ -623,7 +626,8 @@ describe('gestaltsDiscoveryByNode', () => { // @ts-ignore: TLS not needed for this test tlsConfig: {}, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -637,7 +641,10 @@ describe('gestaltsDiscoveryByNode', () => { logger, }); await nodeManager.start(); - await nodeConnectionManager.start({ host: localhost as Host }); + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, + }); discovery = await Discovery.createDiscovery({ db, gestaltGraph, @@ -1235,7 +1242,8 @@ describe('gestaltsGestaltTrustByIdentity', () => { // @ts-ignore: TLS not needed for this test tlsConfig: {}, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -1249,7 +1257,10 @@ describe('gestaltsGestaltTrustByIdentity', () => { logger, }); await nodeManager.start(); - await nodeConnectionManager.start({ host: localhost as Host }); + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, + }); discovery = await Discovery.createDiscovery({ db, gestaltGraph, @@ -1610,7 +1621,8 @@ describe('gestaltsGestaltTrustByNode', () => { // @ts-ignore: TLS not needed for this test tlsConfig: {}, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -1624,12 +1636,19 @@ describe('gestaltsGestaltTrustByNode', () => { logger, }); await nodeManager.start(); - await nodeConnectionManager.start({ host: localhost as Host }); - await nodeManager.setNode(nodeIdRemote, { - host: node.agentServiceHost, - port: node.agentServicePort, - scopes: ['global'], + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, }); + await nodeManager.setNode( + nodeIdRemote, + [node.agentServiceHost, node.agentServicePort], + { + mode: 'direct', + connectedTime: 0, + scopes: ['global'], + }, + ); discovery = await Discovery.createDiscovery({ db, gestaltGraph, diff --git a/tests/client/handlers/keys.test.ts b/tests/client/handlers/keys.test.ts index 108488b73..eead4030a 100644 --- a/tests/client/handlers/keys.test.ts +++ b/tests/client/handlers/keys.test.ts @@ -567,7 +567,7 @@ describe('keysKeyPairRenew', () => { expect(mockedRefreshBuckets).toHaveBeenCalledTimes(0); expect(fwdTLSConfig1).toEqual(expectedTLSConfig1); expect(nodeId1.equals(nodeIdStatus1)).toBe(true); - const certChangeEventProm = testsUtils.promFromEvent( + const certChangeEventP = testsUtils.promFromEvent( pkAgent.certManager, keysEvents.EventCertManagerCertChange, ); @@ -576,7 +576,7 @@ describe('keysKeyPairRenew', () => { password: 'somepassphrase', }); // Awaiting change to propagate - await certChangeEventProm.p; + await certChangeEventP; // Wait some time after event for domains to update await utils.sleep(500); const rootKeyPair2 = pkAgent.keyRing.keyPair; @@ -695,7 +695,7 @@ describe('keysKeyPairReset', () => { expect(mockedRefreshBuckets).not.toHaveBeenCalled(); expect(fwdTLSConfig1).toEqual(expectedTLSConfig1); expect(nodeId1.equals(nodeIdStatus1)).toBe(true); - const certChangeEventProm = testsUtils.promFromEvent( + const certChangeEventP = testsUtils.promFromEvent( pkAgent.certManager, keysEvents.EventCertManagerCertChange, ); @@ -704,7 +704,7 @@ describe('keysKeyPairReset', () => { password: 'somepassphrase', }); // Awaiting change to propagate - await certChangeEventProm.p; + await certChangeEventP; // Wait some time after event for domains to update await utils.sleep(500); const rootKeyPair2 = pkAgent.keyRing.keyPair; diff --git a/tests/client/handlers/nodes.test.ts b/tests/client/handlers/nodes.test.ts index 2d482a5ba..3f66336ba 100644 --- a/tests/client/handlers/nodes.test.ts +++ b/tests/client/handlers/nodes.test.ts @@ -2,7 +2,7 @@ import type GestaltGraph from '@/gestalts/GestaltGraph'; import type { NodeIdEncoded } from '@/ids/types'; import type { TLSConfig, Host, Port } from '@/network/types'; import type { Notification } from '@/notifications/types'; -import type { NodeAddress } from '@/nodes/types'; +import type { AgentServerManifest } from '@/nodes/agent/handlers'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -100,11 +100,11 @@ describe('nodesAdd', () => { }); nodeConnectionManager = new NodeConnectionManager({ keyRing, - nodeGraph, // TLS not needed for this test tlsConfig: {} as TLSConfig, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -118,7 +118,10 @@ describe('nodesAdd', () => { logger, }); await nodeManager.start(); - await nodeConnectionManager.start({ host: localhost as Host }); + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, + }); await taskManager.startProcessing(); clientService = new ClientService({ tlsConfig, @@ -177,17 +180,17 @@ describe('nodesAdd', () => { ping: false, force: false, }); - const result = await nodeGraph.getNode( + const result = await nodeGraph.getNodeContact( nodesUtils.decodeNodeId( 'vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0', )!, ); expect(result).toBeDefined(); - expect(result!.address).toEqual({ - host: '127.0.0.1', - port: 11111, - scopes: ['global'], - }); + expect( + result![ + nodesUtils.nodeContactAddress(['127.0.0.1' as Host, 11111 as Port]) + ], + ).toBeDefined(); }); test('cannot add invalid node', async () => { // Invalid host @@ -314,11 +317,11 @@ describe('nodesClaim', () => { }); nodeConnectionManager = new NodeConnectionManager({ keyRing, - nodeGraph, // TLS not needed for this test tlsConfig: {} as TLSConfig, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -332,13 +335,15 @@ describe('nodesClaim', () => { logger, }); await nodeManager.start(); - await nodeConnectionManager.start({ host: localhost as Host }); + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, + }); await taskManager.startProcessing(); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -432,16 +437,21 @@ describe('nodesFind', () => { let nodeGraph: NodeGraph; let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; + let nodeManager: NodeManager; let sigchain: Sigchain; let mockedFindNode: jest.SpyInstance; beforeEach(async () => { mockedFindNode = jest - .spyOn(NodeConnectionManager.prototype, 'findNode') - .mockResolvedValue({ - host: '127.0.0.1' as Host, - port: 11111 as Port, - scopes: ['local'], - }); + .spyOn(NodeManager.prototype, 'findNode') + // [NodeAddress, NodeContactAddressData] + .mockResolvedValue([ + ['127.0.0.1' as Host, 11111 as Port], + { + mode: 'direct', + connectedTime: 0, + scopes: ['local'], + }, + ]); dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -477,14 +487,27 @@ describe('nodesFind', () => { }); nodeConnectionManager = new NodeConnectionManager({ keyRing, - nodeGraph, // TLS not needed for this test tlsConfig: {} as TLSConfig, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start({ host: localhost as Host }); + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, + }); + nodeManager = new NodeManager({ + db, + keyRing, + nodeGraph, + nodeConnectionManager, + taskManager, + gestaltGraph: {} as GestaltGraph, + sigchain: {} as Sigchain, + logger: logger.getChild(NodeManager.name), + }); await taskManager.startProcessing(); clientService = new ClientService({ tlsConfig, @@ -493,7 +516,7 @@ describe('nodesFind', () => { await clientService.start({ manifest: { nodesFind: new NodesFind({ - nodeConnectionManager, + nodeManager, }), }, host: localhost, @@ -537,9 +560,9 @@ describe('nodesFind', () => { nodeIdEncoded: 'vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0' as NodeIdEncoded, }); - const address = response.addresses.at(0); - expect(address?.host).toBe('127.0.0.1'); - expect(address?.port).toBe(11111); + const [host, port] = response.nodeAddress; + expect(host).toBe('127.0.0.1'); + expect(port).toBe(11111); }); test('cannot find an invalid node', async () => { await testsUtils.expectRemoteError( @@ -610,11 +633,11 @@ describe('nodesPing', () => { }); nodeConnectionManager = new NodeConnectionManager({ keyRing, - nodeGraph, // TLS not needed for this test tlsConfig: {} as TLSConfig, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -627,7 +650,10 @@ describe('nodesPing', () => { gestaltGraph: {} as GestaltGraph, logger, }); - await nodeConnectionManager.start({ host: localhost as Host }); + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, + }); await taskManager.startProcessing(); clientService = new ClientService({ tlsConfig, @@ -676,7 +702,7 @@ describe('nodesPing', () => { }); }); test('pings a node (offline)', async () => { - mockedPingNode.mockResolvedValue(false); + mockedPingNode.mockResolvedValue(undefined); const response = await rpcClient.methods.nodesPing({ nodeIdEncoded: 'vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0' as NodeIdEncoded, @@ -684,7 +710,7 @@ describe('nodesPing', () => { expect(response.success).toBeFalsy(); }); test('pings a node (online)', async () => { - mockedPingNode.mockResolvedValue(true); + mockedPingNode.mockResolvedValue([]); const response = await rpcClient.methods.nodesPing({ nodeIdEncoded: 'vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0' as NodeIdEncoded, @@ -758,11 +784,11 @@ describe('nodesGetAll', () => { }); nodeConnectionManager = new NodeConnectionManager({ keyRing, - nodeGraph, // TLS not needed for this test tlsConfig: {} as TLSConfig, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -776,7 +802,10 @@ describe('nodesGetAll', () => { logger, }); await nodeManager.start(); - await nodeConnectionManager.start({ host: localhost as Host }); + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, + }); await taskManager.startProcessing(); clientService = new ClientService({ tlsConfig, @@ -829,10 +858,12 @@ describe('nodesGetAll', () => { parseNodeId( 'vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0' as NodeIdEncoded, ), + ['127.0.0.1' as Host, 1111 as Port], { - host: networkUtils.parseHostOrHostname('127.0.0.1'), - port: networkUtils.parsePort(1111), - } as NodeAddress, + mode: 'direct', + connectedTime: 0, + scopes: ['local'], + }, ); const values: Array = []; const response = await rpcClient.methods.nodesGetAll({}); @@ -907,11 +938,11 @@ describe('nodesListConnections', () => { }); nodeConnectionManager = new NodeConnectionManager({ keyRing, - nodeGraph, // TLS not needed for this test tlsConfig: {} as TLSConfig, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -925,7 +956,10 @@ describe('nodesListConnections', () => { logger, }); await nodeManager.start(); - await nodeConnectionManager.start({ host: localhost as Host }); + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, + }); await taskManager.startProcessing(); clientService = new ClientService({ tlsConfig, @@ -977,6 +1011,8 @@ describe('nodesListConnections', () => { mockedConnection.mockReturnValue([ { nodeId: testsUtils.generateRandomNodeId(), + connectionId: 'someId', + primary: true, address: { host: '127.0.0.1', port: 11111, @@ -986,6 +1022,7 @@ describe('nodesListConnections', () => { timeout: undefined, }, ]); + const values: Array = []; const responses = await rpcClient.methods.nodesListConnections({}); for await (const response of responses) { diff --git a/tests/client/handlers/notifications.test.ts b/tests/client/handlers/notifications.test.ts index 266a419b6..32194b465 100644 --- a/tests/client/handlers/notifications.test.ts +++ b/tests/client/handlers/notifications.test.ts @@ -3,6 +3,7 @@ import type { Host, TLSConfig } from '@/network/types'; import type { General, Notification, VaultShare } from '@/notifications/types'; import type { VaultIdEncoded, NodeIdEncoded } from '@/ids/types'; import type { VaultName } from '@/vaults/types'; +import type { AgentServerManifest } from '@/nodes/agent/handlers'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -103,11 +104,11 @@ describe('notificationsClear', () => { }); nodeConnectionManager = new NodeConnectionManager({ keyRing, - nodeGraph, // TLS not needed for this test tlsConfig: {} as TLSConfig, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -121,13 +122,15 @@ describe('notificationsClear', () => { logger, }); await nodeManager.start(); - await nodeConnectionManager.start({ host: localhost as Host }); + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, + }); await taskManager.startProcessing(); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -258,11 +261,11 @@ describe('notificationsRead', () => { }); nodeConnectionManager = new NodeConnectionManager({ keyRing, - nodeGraph, // TLS not needed for this test tlsConfig: {} as TLSConfig, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -276,13 +279,15 @@ describe('notificationsRead', () => { logger, }); await nodeManager.start(); - await nodeConnectionManager.start({ host: localhost as Host }); + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, + }); await taskManager.start(); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -597,10 +602,7 @@ describe('notificationsSend', () => { let sigchain: Sigchain; let mockedSendNotification: jest.SpyInstance; beforeEach(async () => { - mockedSendNotification = jest.spyOn( - NodeConnectionManager.prototype, - 'withConnF', - ); + mockedSendNotification = jest.spyOn(NodeManager.prototype, 'withConnF'); dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -640,10 +642,10 @@ describe('notificationsSend', () => { }); nodeConnectionManager = new NodeConnectionManager({ keyRing, - nodeGraph, tlsConfig: {} as TLSConfig, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -657,13 +659,15 @@ describe('notificationsSend', () => { logger, }); await nodeManager.start(); - await nodeConnectionManager.start({ host: localhost as Host }); + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, + }); await taskManager.start(); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, diff --git a/tests/client/handlers/vaults.test.ts b/tests/client/handlers/vaults.test.ts index ab2cb15b4..562e50dfe 100644 --- a/tests/client/handlers/vaults.test.ts +++ b/tests/client/handlers/vaults.test.ts @@ -1,4 +1,3 @@ -import type NodeConnectionManager from '@/nodes/NodeConnectionManager'; import type { TLSConfig } from '@/network/types'; import type { FileSystem } from '@/types'; import type { VaultId } from '@/ids'; @@ -106,7 +105,7 @@ describe('vaultsClone', () => { db, acl: {} as ACL, keyRing: {} as KeyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph: {} as GestaltGraph, notificationsManager: {} as NotificationsManager, logger, @@ -170,7 +169,7 @@ describe('vaultsCreate and vaultsDelete and vaultsList', () => { db, acl: {} as ACL, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph: {} as GestaltGraph, notificationsManager: {} as NotificationsManager, logger, @@ -305,7 +304,7 @@ describe('vaultsLog', () => { db, acl: {} as ACL, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph: {} as GestaltGraph, notificationsManager: {} as NotificationsManager, logger, @@ -468,7 +467,6 @@ describe('vaultsPermissionSet and vaultsPermissionUnset and vaultsPermissionGet' await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager: {} as NodeConnectionManager, nodeManager: {} as NodeManager, keyRing, logger, @@ -479,7 +477,7 @@ describe('vaultsPermissionSet and vaultsPermissionUnset and vaultsPermissionGet' db, acl, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph, notificationsManager, logger, @@ -642,7 +640,6 @@ describe('vaultsPull', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager: {} as NodeConnectionManager, nodeManager: {} as NodeManager, keyRing, logger, @@ -653,7 +650,7 @@ describe('vaultsPull', () => { db, acl, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph, notificationsManager, logger, @@ -718,7 +715,7 @@ describe('vaultsRename', () => { db, acl: {} as ACL, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph: {} as GestaltGraph, notificationsManager: {} as NotificationsManager, logger, @@ -813,7 +810,7 @@ describe('vaultsScan', () => { db, acl: {} as ACL, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph: {} as GestaltGraph, notificationsManager: {} as NotificationsManager, logger, @@ -875,7 +872,7 @@ describe('vaultsSecretsEdit', () => { db, acl: {} as ACL, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph: {} as GestaltGraph, notificationsManager: {} as NotificationsManager, logger, @@ -988,7 +985,7 @@ describe('vaultsSecretsMkdir', () => { db, acl: {} as ACL, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph: {} as GestaltGraph, notificationsManager: {} as NotificationsManager, logger, @@ -1096,7 +1093,7 @@ describe('vaultsSecretsNew and vaultsSecretsDelete, vaultsSecretsGet', () => { db, acl: {} as ACL, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph: {} as GestaltGraph, notificationsManager: {} as NotificationsManager, logger, @@ -1231,7 +1228,7 @@ describe('vaultsSecretsNewDir and vaultsSecretsList', () => { db, acl: {} as ACL, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph: {} as GestaltGraph, notificationsManager: {} as NotificationsManager, logger, @@ -1359,7 +1356,7 @@ describe('vaultsSecretsRename', () => { db, acl: {} as ACL, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph: {} as GestaltGraph, notificationsManager: {} as NotificationsManager, logger, @@ -1473,7 +1470,7 @@ describe('vaultsSecretsStat', () => { db, acl: {} as ACL, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph: {} as GestaltGraph, notificationsManager: {} as NotificationsManager, logger, @@ -1592,7 +1589,7 @@ describe('vaultsVersion', () => { db, acl: {} as ACL, keyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: {} as NodeManager, gestaltGraph: {} as GestaltGraph, notificationsManager: {} as NotificationsManager, logger, diff --git a/tests/discovery/Discovery.test.ts b/tests/discovery/Discovery.test.ts index 52d08952f..8cda0b4f2 100644 --- a/tests/discovery/Discovery.test.ts +++ b/tests/discovery/Discovery.test.ts @@ -4,6 +4,7 @@ import type { Key } from '@/keys/types'; import type { SignedClaim } from '../../src/claims/types'; import type { ClaimLinkIdentity } from '@/claims/payloads'; import type { NodeId } from '../../src/ids'; +import type { AgentServerManifest } from '@/nodes/agent/handlers'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -154,10 +155,10 @@ describe('Discovery', () => { const tlsConfig = await createTLSConfig(keyRing.keyPair); nodeConnectionManager = new NodeConnectionManager({ keyRing, - nodeGraph, tlsConfig, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -173,6 +174,7 @@ describe('Discovery', () => { await nodeManager.start(); await nodeConnectionManager.start({ host: localhost as Host, + agentService: {} as AgentServerManifest, }); // Set up other gestalt nodeA = await PolykeyAgent.createPolykeyAgent({ @@ -206,11 +208,15 @@ describe('Discovery', () => { nodeIdA = nodeA.keyRing.getNodeId(); nodeIdB = nodeB.keyRing.getNodeId(); await testNodesUtils.nodesConnect(nodeA, nodeB); - await nodeGraph.setNode(nodeA.keyRing.getNodeId(), { - host: nodeA.agentServiceHost, - port: nodeA.agentServicePort, - scopes: ['global'], - }); + await nodeGraph.setNodeContactAddressData( + nodeA.keyRing.getNodeId(), + [nodeA.agentServiceHost, nodeA.agentServicePort], + { + mode: 'direct', + connectedTime: 0, + scopes: ['global'], + }, + ); await nodeB.acl.setNodeAction(nodeA.keyRing.getNodeId(), 'claim'); await nodeA.nodeManager.claimNode(nodeB.keyRing.getNodeId()); nodeA.identitiesManager.registerProvider(testProvider); diff --git a/tests/global.d.ts b/tests/global.d.ts index 1db39a105..67abdbb7b 100644 --- a/tests/global.d.ts +++ b/tests/global.d.ts @@ -8,7 +8,6 @@ */ declare var projectDir: string; declare var testDir: string; -declare var dataDir: string; declare var defaultTimeout: number; declare var failedConnectionTimeout: number; declare var maxTimeout: number; diff --git a/tests/globalSetup.ts b/tests/globalSetup.ts index fde412205..c51ef0b67 100644 --- a/tests/globalSetup.ts +++ b/tests/globalSetup.ts @@ -1,16 +1,11 @@ -/* eslint-disable no-console */ -import process from 'process'; - /** * Global setup for all jest tests * Side-effects are performed here * Jest does not support `@/` imports here */ async function setup() { + // eslint-disable-next-line no-console console.log('\nGLOBAL SETUP'); - // The globalDataDir is already created - const globalDataDir = process.env['GLOBAL_DATA_DIR']!; - console.log(`Global Data Dir: ${globalDataDir}`); } export default setup; diff --git a/tests/globalTeardown.ts b/tests/globalTeardown.ts index 0e3e5d30d..73dcecf2a 100644 --- a/tests/globalTeardown.ts +++ b/tests/globalTeardown.ts @@ -1,16 +1,11 @@ -/* eslint-disable no-console */ -import fs from 'fs'; - /** * Global teardown for all jest tests * Side-effects are performed here * Jest does not support `@/` imports here */ async function teardown() { + // eslint-disable-next-line no-console console.log('GLOBAL TEARDOWN'); - const globalDataDir = process.env['GLOBAL_DATA_DIR']!; - console.log(`Destroying Global Data Dir: ${globalDataDir}`); - await fs.promises.rm(globalDataDir, { recursive: true, force: true }); } export default teardown; diff --git a/tests/identities/TestProvider.ts b/tests/identities/TestProvider.ts index 02d8e54f0..80f3dd92d 100644 --- a/tests/identities/TestProvider.ts +++ b/tests/identities/TestProvider.ts @@ -83,13 +83,13 @@ class TestProvider extends Provider { authIdentityId: IdentityId, identityId: IdentityId, ): Promise { - let providerToken = await this.getToken(authIdentityId); + const providerToken = await this.getToken(authIdentityId); if (!providerToken) { throw new identitiesErrors.ErrorProviderUnauthenticated( `${authIdentityId} has not been authenticated`, ); } - providerToken = await this.checkToken(providerToken, authIdentityId); + await this.checkToken(providerToken, authIdentityId); const user = this.users[identityId]; if (!user) { return; @@ -107,13 +107,13 @@ class TestProvider extends Provider { authIdentityId: IdentityId, searchTerms: Array = [], ): AsyncGenerator { - let providerToken = await this.getToken(authIdentityId); + const providerToken = await this.getToken(authIdentityId); if (!providerToken) { throw new identitiesErrors.ErrorProviderUnauthenticated( `${authIdentityId} has not been authenticated`, ); } - providerToken = await this.checkToken(providerToken, authIdentityId); + await this.checkToken(providerToken, authIdentityId); for (const [k, v] of Object.entries(this.users) as Array< [ IdentityId, diff --git a/tests/keys/utils.ts b/tests/keys/utils.ts index d8a1d7809..1f2ef3f91 100644 --- a/tests/keys/utils.ts +++ b/tests/keys/utils.ts @@ -10,6 +10,7 @@ import type { MAC, } from '@/keys/types'; import type CertManager from '@/keys/CertManager'; +import type { KeyRing } from '@/keys'; import { fc } from '@fast-check/jest'; import { IterableX as Iterable } from 'ix/iterable'; import { AsyncIterableX as AsyncIterable } from 'ix/asynciterable'; @@ -21,6 +22,7 @@ import * as asymmetric from '@/keys/utils/asymmetric'; import * as jwk from '@/keys/utils/jwk'; import * as x509 from '@/keys/utils/x509'; import * as utils from '@/utils'; +import * as keysUtils from '@/keys/utils'; import * as testsIdsUtils from '../ids/utils'; const bufferArb = (constraints?: fc.IntArrayConstraints) => { @@ -352,6 +354,18 @@ class ResetCertWithNewKeyPairCommand implements CertManagerCommand { } } +/** + * Creates a fake KeyRing that only provides the `keyPair` and `getNodeId()` + */ +function createDummyKeyRing() { + const keyPair = keysUtils.generateKeyPair(); + const nodeId = keysUtils.publicKeyToNodeId(keyPair.publicKey); + return { + keyPair, + getNodeId: () => nodeId, + } as KeyRing; +} + export { bufferArb, keyArb, @@ -369,6 +383,7 @@ export { RenewCertWithNewKeyPairCommand, ResetCertWithCurrentKeyPairCommand, ResetCertWithNewKeyPairCommand, + createDummyKeyRing, }; export type { CertManagerModel, CertManagerCommand }; diff --git a/tests/network/utils.test.ts b/tests/network/utils.test.ts index e23ea4398..2813909fe 100644 --- a/tests/network/utils.test.ts +++ b/tests/network/utils.test.ts @@ -51,4 +51,75 @@ describe('utils', () => { networkUtils.resolveHostname('invalidHostname' as Hostname), ).resolves.toHaveLength(0); }); + test('canonicalizing IPs', async () => { + // IPv4 -> IPv4 + + // Local + expect(networkUtils.toCanonicalHost('127.0.0.1')).toBe('127.0.0.1'); + // Wildcard + expect(networkUtils.toCanonicalHost('0.0.0.0')).toBe('0.0.0.0'); + expect(networkUtils.toCanonicalHost('255.255.255.255')).toBe( + '255.255.255.255', + ); + expect(networkUtils.toCanonicalHost('74.125.43.99')).toBe('74.125.43.99'); + // Leading zeros are removed + expect(networkUtils.toCanonicalHost('192.168.001.001')).toBe('192.168.1.1'); + + // IPv6 -> IPv6 + + // Local + expect(networkUtils.toCanonicalHost('::1')).toBe('0:0:0:0:0:0:0:1'); + // Wildcard + expect(networkUtils.toCanonicalHost('::0')).toBe('0:0:0:0:0:0:0:0'); + // Lowercase + expect(networkUtils.toCanonicalHost('ABC:0:0:CD30:ABC:0:0:CD30')).toBe( + 'abc:0:0:cd30:abc:0:0:cd30', + ); + // Quad zeros are reduced to a single 0 + expect( + networkUtils.toCanonicalHost('0ABC:0000:0000:CD30:0ABC:0000:0000:CD30'), + ).toBe('abc:0:0:cd30:abc:0:0:cd30'); + // Double colon is expanded + expect(networkUtils.toCanonicalHost('FE80::0202:B3FF:FE1E:8329')).toBe( + 'fe80:0:0:0:202:b3ff:fe1e:8329', + ); + expect(networkUtils.toCanonicalHost('::1234:7f00:1')).toBe( + '0:0:0:0:0:1234:7f00:1', + ); + expect(networkUtils.toCanonicalHost('::1234:0:0')).toBe( + '0:0:0:0:0:1234:0:0', + ); + expect(networkUtils.toCanonicalHost('::1234:ffff:ffff')).toBe( + '0:0:0:0:0:1234:ffff:ffff', + ); + expect(networkUtils.toCanonicalHost('::1234:4a7d:2b63')).toBe( + '0:0:0:0:0:1234:4a7d:2b63', + ); + // Scoped + expect(networkUtils.toCanonicalHost('::1%eth1')).toBe( + '0:0:0:0:0:0:0:1%eth1', + ); + + // IPv4 mapped hex -> IPv4 + + expect(networkUtils.toCanonicalHost('::ffff:7f00:1')).toBe('127.0.0.1'); + expect(networkUtils.toCanonicalHost('::ffff:0:0')).toBe('0.0.0.0'); + expect(networkUtils.toCanonicalHost('::ffff:ffff:ffff')).toBe( + '255.255.255.255', + ); + expect(networkUtils.toCanonicalHost('::ffff:4a7d:2b63')).toBe( + '74.125.43.99', + ); + + // IPv4 mapped dec -> IPv4 + + expect(networkUtils.toCanonicalHost('::ffff:127.0.0.1')).toBe('127.0.0.1'); + expect(networkUtils.toCanonicalHost('::ffff:0.0.0.0')).toBe('0.0.0.0'); + expect(networkUtils.toCanonicalHost('::ffff:255.255.255.255')).toBe( + '255.255.255.255', + ); + expect(networkUtils.toCanonicalHost('::ffff:74.125.43.99')).toBe( + '74.125.43.99', + ); + }); }); diff --git a/tests/network/utils.ts b/tests/network/utils.ts new file mode 100644 index 000000000..17ae2ea53 --- /dev/null +++ b/tests/network/utils.ts @@ -0,0 +1,13 @@ +import type { Host, Hostname, Port } from '@/network/types'; +import { fc } from '@fast-check/jest'; + +const hostArb = fc.oneof(fc.ipV4(), fc.ipV6()) as fc.Arbitrary; + +const hostnameArb = fc.domain() as fc.Arbitrary; + +const portArb = fc.integer({ + min: 1, + max: 65_535, +}) as fc.Arbitrary; + +export { hostArb, hostnameArb, portArb }; diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index 1b23519c4..83ac97ca4 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -1,6 +1,8 @@ import type { Host, Port, TLSConfig } from '@/network/types'; import type { NodeId, NodeIdEncoded } from '@/ids'; import type { RPCStream } from '@matrixai/rpc'; +import type { AgentServerManifest } from '@/nodes/agent/handlers'; +import type { AgentClientManifest } from '@/nodes/agent/callers'; import { QUICServer, QUICSocket, events as quicEvents } from '@matrixai/quic'; import Logger, { formatting, LogLevel, StreamHandler } from '@matrixai/logger'; import { errors as quicErrors } from '@matrixai/quic'; @@ -22,7 +24,6 @@ describe(`${NodeConnection.name}`, () => { ), ]); const localHost = '127.0.0.1'; - const crypto = tlsTestUtils.createCrypto(); let serverTlsConfig: TLSConfig; let clientTlsConfig: TLSConfig; @@ -34,12 +35,12 @@ describe(`${NodeConnection.name}`, () => { let rpcServer: RPCServer; let clientSocket: QUICSocket; - const nodeConnections: Array> = []; + const nodeConnections: Array = []; /** * Adds created nodeConnections to the `nodeConnections` array for automated cleanup. * @param nc */ - const extractNodeConnection = (nc: NodeConnection) => { + const extractNodeConnection = (nc: NodeConnection) => { nodeConnections.push(nc); return nc; }; @@ -74,10 +75,7 @@ describe(`${NodeConnection.name}`, () => { verifyPeer: true, verifyCallback: nodesUtils.verifyClientCertificateChain, }, - crypto: { - key: keysUtils.generateKey(), - ops: crypto, - }, + crypto: nodesUtils.quicServerCrypto, socket: serverSocket, logger: logger.getChild(`${QUICServer.name}`), }); @@ -85,7 +83,7 @@ describe(`${NodeConnection.name}`, () => { fromError: networkUtils.fromError, logger: logger.getChild(`${RPCServer.name}`), }); - await rpcServer.start({ manifest: {} }); + await rpcServer.start({ manifest: {} as AgentServerManifest }); // Setting up handling logger.info('Setting up connection handling for server'); quicServer.addEventListener( @@ -125,9 +123,8 @@ describe(`${NodeConnection.name}`, () => { targetNodeIds: [serverNodeId], targetHost: localHost as Host, targetPort: quicServer.port as unknown as Port, - manifest: {}, + manifest: {} as AgentClientManifest, tlsConfig: clientTlsConfig, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }).then(extractNodeConnection); @@ -140,9 +137,8 @@ describe(`${NodeConnection.name}`, () => { targetNodeIds: [serverNodeId], targetHost: localHost as Host, targetPort: quicServer.port as unknown as Port, - manifest: {}, + manifest: {} as AgentClientManifest, tlsConfig: clientTlsConfig, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }).then(extractNodeConnection); @@ -153,10 +149,9 @@ describe(`${NodeConnection.name}`, () => { targetNodeIds: [serverNodeId], targetHost: localHost as Host, targetPort: 12345 as Port, - manifest: {}, + manifest: {} as AgentClientManifest, connectionKeepAliveTimeoutTime: 1000, tlsConfig: clientTlsConfig, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }, @@ -172,31 +167,29 @@ describe(`${NodeConnection.name}`, () => { targetNodeIds: [serverNodeId], targetHost: localHost as Host, targetPort: quicServer.port as unknown as Port, - manifest: {}, + manifest: {} as AgentClientManifest, connectionKeepAliveTimeoutTime: 100, tlsConfig: clientTlsConfig, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }, { timer: 100 }, ).then(extractNodeConnection); - const destroyProm = testsUtils.promFromEvent( + const destroyP = testsUtils.promFromEvent( nodeConnection, nodesEvents.EventNodeConnectionDestroyed, ); await serverSocket.stop({ force: true }); // Wait for destruction, may take 2+ seconds - await destroyProm.p; + await destroyP; }); test('get the root chain cert', async () => { const nodeConnection = await NodeConnection.createNodeConnection({ targetNodeIds: [serverNodeId], targetHost: localHost as Host, targetPort: quicServer.port as unknown as Port, - manifest: {}, + manifest: {} as AgentClientManifest, tlsConfig: clientTlsConfig, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }).then(extractNodeConnection); @@ -208,9 +201,8 @@ describe(`${NodeConnection.name}`, () => { targetNodeIds: [serverNodeId], targetHost: localHost as Host, targetPort: quicServer.port as unknown as Port, - manifest: {}, + manifest: {} as AgentClientManifest, tlsConfig: clientTlsConfig, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }).then(extractNodeConnection); @@ -218,30 +210,28 @@ describe(`${NodeConnection.name}`, () => { nodesUtils.encodeNodeId(nodeConnection.nodeId), ); }); - test('Should fail due to server rejecting client certificate (no certs)', async () => { const nodeConnection = await NodeConnection.createNodeConnection({ handleStream: () => {}, targetNodeIds: [serverNodeId], targetHost: localHost as Host, targetPort: quicServer.port as unknown as Port, - manifest: {}, + manifest: {} as AgentClientManifest, // @ts-ignore: TLS not used for this test tlsConfig: {}, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }).then(extractNodeConnection); - const destroyProm = testsUtils.promFromEvent( + const destroyP = testsUtils.promFromEvent( nodeConnection, nodesEvents.EventNodeConnectionDestroyed, ); - const errorProm = testsUtils.promFromEvent( + const errorP = testsUtils.promFromEvent( nodeConnection, nodesEvents.EventNodeConnectionError, ); - await destroyProm.p; - const evt = await errorProm.p; + await destroyP; + const evt = await errorP; expect(evt.detail.cause).toBeInstanceOf( quicErrors.ErrorQUICConnectionPeerTLS, ); @@ -251,9 +241,8 @@ describe(`${NodeConnection.name}`, () => { targetNodeIds: [clientNodeId], targetHost: localHost as Host, targetPort: quicServer.port as unknown as Port, - manifest: {}, + manifest: {} as AgentClientManifest, tlsConfig: clientTlsConfig, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }).then(extractNodeConnection); @@ -265,22 +254,21 @@ describe(`${NodeConnection.name}`, () => { targetNodeIds: [serverNodeId], targetHost: localHost as Host, targetPort: quicServer.port as unknown as Port, - manifest: {}, + manifest: {} as AgentClientManifest, connectionKeepAliveIntervalTime: 100, connectionKeepAliveTimeoutTime: 200, tlsConfig: clientTlsConfig, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }, { timer: 150 }, ).then(extractNodeConnection); - const destroyProm = testsUtils.promFromEvent( + const destroyP = testsUtils.promFromEvent( nodeConnection, nodesEvents.EventNodeConnectionDestroyed, ); await serverSocket.stop({ force: true }); - await destroyProm.p; + await destroyP; }); test('Should fail and destroy due to connection ending local', async () => { const nodeConnection = await NodeConnection.createNodeConnection( @@ -288,17 +276,16 @@ describe(`${NodeConnection.name}`, () => { targetNodeIds: [serverNodeId], targetHost: localHost as Host, targetPort: quicServer.port as unknown as Port, - manifest: {}, + manifest: {} as AgentClientManifest, connectionKeepAliveTimeoutTime: 200, connectionKeepAliveIntervalTime: 100, tlsConfig: clientTlsConfig, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }, { timer: 150 }, ).then(extractNodeConnection); - const destroyProm = testsUtils.promFromEvent( + const destroyP = testsUtils.promFromEvent( nodeConnection, nodesEvents.EventNodeConnectionDestroyed, ); @@ -307,7 +294,7 @@ describe(`${NodeConnection.name}`, () => { errorCode: 0, force: false, }); - await destroyProm.p; + await destroyP; }); test('Should fail and destroy due to connection ending remote', async () => { const nodeConnection = await NodeConnection.createNodeConnection( @@ -315,17 +302,16 @@ describe(`${NodeConnection.name}`, () => { targetNodeIds: [serverNodeId], targetHost: localHost as Host, targetPort: quicServer.port as unknown as Port, - manifest: {}, + manifest: {} as AgentClientManifest, connectionKeepAliveTimeoutTime: 200, connectionKeepAliveIntervalTime: 100, tlsConfig: clientTlsConfig, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }, { timer: 150 }, ).then(extractNodeConnection); - const destroyProm = testsUtils.promFromEvent( + const destroyP = testsUtils.promFromEvent( nodeConnection, nodesEvents.EventNodeConnectionDestroyed, ); @@ -338,10 +324,10 @@ describe(`${NodeConnection.name}`, () => { force: false, }); }); - await destroyProm.p; + await destroyP; }); test('should wrap reverse connection', async () => { - const nodeConnectionReverseProm = promise>(); + const nodeConnectionReverseProm = promise(); quicServer.removeEventListener( quicEvents.EventQUICConnectionStream.name, handleEventQUICConnectionStream, @@ -356,7 +342,7 @@ describe(`${NodeConnection.name}`, () => { const nodeConnection = NodeConnection.createNodeConnectionReverse({ nodeId, certChain, - manifest: {}, + manifest: {} as AgentClientManifest, quicConnection, logger, }); @@ -369,22 +355,21 @@ describe(`${NodeConnection.name}`, () => { targetNodeIds: [serverNodeId], targetHost: localHost as Host, targetPort: quicServer.port as unknown as Port, - manifest: {}, + manifest: {} as AgentClientManifest, tlsConfig: clientTlsConfig, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }).then(extractNodeConnection); - const destroyProm = testsUtils.promFromEvent( + const destroyP = testsUtils.promFromEvent( nodeConnection, nodesEvents.EventNodeConnectionDestroyed, ); const nodeConnectionReverse = await nodeConnectionReverseProm.p; await nodeConnectionReverse.destroy({ force: true }); - await destroyProm.p; + await destroyP; }); test('should handle reverse streams', async () => { - const nodeConnectionReverseProm = promise>(); + const nodeConnectionReverseProm = promise(); const reverseStreamProm = promise>(); quicServer.removeEventListener( quicEvents.EventQUICConnectionStream.name, @@ -400,7 +385,7 @@ describe(`${NodeConnection.name}`, () => { const nodeConnection = NodeConnection.createNodeConnectionReverse({ nodeId, certChain, - manifest: {}, + manifest: {} as AgentClientManifest, quicConnection, logger, }); @@ -421,9 +406,8 @@ describe(`${NodeConnection.name}`, () => { targetNodeIds: [serverNodeId], targetHost: localHost as Host, targetPort: quicServer.port as unknown as Port, - manifest: {}, + manifest: {} as AgentClientManifest, tlsConfig: clientTlsConfig, - crypto, quicSocket: clientSocket, logger: logger.getChild(`${NodeConnection.name}`), }).then(extractNodeConnection); @@ -447,11 +431,11 @@ describe(`${NodeConnection.name}`, () => { await writer2.write(Buffer.from('Hello!')); await forwardStreamProm.p; - const destroyProm = testsUtils.promFromEvent( + const destroyP = testsUtils.promFromEvent( nodeConnection, nodesEvents.EventNodeConnectionDestroyed, ); await nodeConnectionReverse.destroy({ force: true }); - await destroyProm.p; + await destroyP; }); }); diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts deleted file mode 100644 index 3347e9de2..000000000 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ /dev/null @@ -1,688 +0,0 @@ -import type { Host, Port, TLSConfig } from '@/network/types'; -import type { NodeId } from '@/ids'; -import type { NodeAddress, NodeBucket } from '@/nodes/types'; -import fs from 'fs'; -import path from 'path'; -import os from 'os'; -import Logger, { formatting, LogLevel, StreamHandler } from '@matrixai/logger'; -import { IdInternal } from '@matrixai/id'; -import { DB } from '@matrixai/db'; -import { PromiseCancellable } from '@matrixai/async-cancellable'; -import * as nodesUtils from '@/nodes/utils'; -import NodeConnectionManager from '@/nodes/NodeConnectionManager'; -import NodeConnection from '@/nodes/NodeConnection'; -import * as keysUtils from '@/keys/utils'; -import KeyRing from '@/keys/KeyRing'; -import ACL from '@/acl/ACL'; -import GestaltGraph from '@/gestalts/GestaltGraph'; -import NodeGraph from '@/nodes/NodeGraph'; -import * as nodesErrors from '@/nodes/errors'; -import Sigchain from '@/sigchain/Sigchain'; -import TaskManager from '@/tasks/TaskManager'; -import PolykeyAgent from '@/PolykeyAgent'; -import * as utils from '@/utils'; -import * as testNodesUtils from './utils'; -import * as tlsTestUtils from '../utils/tls'; - -describe(`${NodeConnectionManager.name} general test`, () => { - const logger = new Logger(`${NodeConnection.name} test`, LogLevel.WARN, [ - new StreamHandler( - formatting.format`${formatting.level}:${formatting.keys}:${formatting.msg}`, - ), - ]); - const localHost = '127.0.0.1'; - const password = 'password'; - - let tlsConfig: TLSConfig; - - const nodeIdGenerator = (number: number) => { - const idArray = new Uint8Array([ - 223, - 24, - 34, - 40, - 46, - 217, - 4, - 71, - 103, - 71, - 59, - 123, - 143, - 187, - 9, - 29, - 157, - 41, - 131, - 44, - 68, - 160, - 79, - 127, - 137, - 154, - 221, - 86, - 157, - 23, - 77, - number, - ]); - return IdInternal.create(idArray); - }; - - let dataDir: string; - let nodePathA: string; - let nodePathB: string; - - let remotePolykeyAgentA: PolykeyAgent; - let serverAddressA: NodeAddress; - let serverNodeIdA: NodeId; - let remotePolykeyAgentB: PolykeyAgent; - - let keyRing: KeyRing; - let db: DB; - let acl: ACL; - let gestaltGraph: GestaltGraph; - let nodeGraph: NodeGraph; - let sigchain: Sigchain; - let taskManager: TaskManager; - - let nodeConnectionManager: NodeConnectionManager; - - // Mocking the relay send - let mockedHolePunchReverse: jest.SpyInstance>; - let mockedPingNode: jest.SpyInstance>; - - beforeEach(async () => { - mockedHolePunchReverse = jest.spyOn( - NodeConnectionManager.prototype, - 'holePunch', - ); - mockedPingNode = jest.spyOn(NodeConnectionManager.prototype, 'pingNode'); - dataDir = await fs.promises.mkdtemp( - path.join(os.tmpdir(), 'polykey-test-'), - ); - - // Setting up remote node - nodePathA = path.join(dataDir, 'agentA'); - nodePathB = path.join(dataDir, 'agentB'); - remotePolykeyAgentA = await PolykeyAgent.createPolykeyAgent({ - password, - options: { - nodePath: nodePathA, - agentServiceHost: localHost, - clientServiceHost: localHost, - keys: { - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - }, - }, - logger: logger.getChild('AgentA'), - }); - serverNodeIdA = remotePolykeyAgentA.keyRing.getNodeId(); - serverAddressA = { - host: remotePolykeyAgentA.agentServiceHost as Host, - port: remotePolykeyAgentA.agentServicePort as Port, - scopes: ['global'], - }; - remotePolykeyAgentB = await PolykeyAgent.createPolykeyAgent({ - password, - options: { - nodePath: nodePathB, - agentServiceHost: localHost, - clientServiceHost: localHost, - keys: { - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - }, - }, - logger: logger.getChild('AgentB'), - }); - - // Setting up client dependencies - const keysPath = path.join(dataDir, 'keys'); - keyRing = await KeyRing.createKeyRing({ - password, - keysPath, - logger, - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - }); - tlsConfig = await tlsTestUtils.createTLSConfig(keyRing.keyPair); - const dbPath = path.join(dataDir, 'db'); - db = await DB.createDB({ - dbPath, - logger, - }); - acl = await ACL.createACL({ - db, - logger, - }); - gestaltGraph = await GestaltGraph.createGestaltGraph({ - db, - acl, - logger, - }); - nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, - }); - sigchain = await Sigchain.createSigchain({ - db, - keyRing, - logger, - }); - taskManager = await TaskManager.createTaskManager({ - db, - logger, - }); - }); - - afterEach(async () => { - logger.info('AFTER EACH'); - mockedHolePunchReverse.mockRestore(); - mockedPingNode.mockRestore(); - await taskManager.stopProcessing(); - await taskManager.stopTasks(); - await nodeConnectionManager?.stop(); - await sigchain.stop(); - await sigchain.destroy(); - await nodeGraph.stop(); - await nodeGraph.destroy(); - await gestaltGraph.stop(); - await gestaltGraph.destroy(); - await acl.stop(); - await acl.destroy(); - await db.stop(); - await db.destroy(); - await keyRing.stop(); - await keyRing.destroy(); - await taskManager.stop(); - - await remotePolykeyAgentA?.stop(); - await remotePolykeyAgentB?.stop(); - }); - - test('finds node (local)', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - - // Case 1: node already exists in the local node graph (no contact required) - const nodeId = testNodesUtils.generateRandomNodeId(); - const nodeAddress: NodeAddress = { - host: localHost as Host, - port: 11111 as Port, - scopes: ['global'], - }; - await nodeGraph.setNode(nodeId, nodeAddress); - // Expect no error thrown - const findNodePromise = nodeConnectionManager.findNode(nodeId); - await expect(findNodePromise).resolves.not.toThrowError(); - await expect(findNodePromise).resolves.toStrictEqual(nodeAddress); - - await nodeConnectionManager.stop(); - }); - test('finds node (contacts remote node)', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionKeepAliveTimeoutTime: 10000, - connectionKeepAliveIntervalTime: 1000, - tlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - // Mocking pinging to always return true - const mockedPingNode = jest.spyOn( - NodeConnectionManager.prototype, - 'pingNode', - ); - mockedPingNode.mockImplementation( - () => new PromiseCancellable((resolve) => resolve(true)), - ); - logger.info('DOING TEST'); - - await nodeGraph.setNode(serverNodeIdA, serverAddressA); - // Adding node information to remote node - const nodeId = testNodesUtils.generateRandomNodeId(); - const nodeAddress: NodeAddress = { - host: localHost as Host, - port: 11111 as Port, - scopes: ['global'], - }; - await remotePolykeyAgentA.nodeGraph.setNode(nodeId, nodeAddress); - - // Expect no error thrown - const findNodePromise = nodeConnectionManager.findNode(nodeId); - await expect(findNodePromise).resolves.not.toThrowError(); - await expect(findNodePromise).resolves.toStrictEqual(nodeAddress); - - await nodeConnectionManager.stop(); - }); - test('cannot find node (contacts remote node)', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionKeepAliveTimeoutTime: 10000, - connectionKeepAliveIntervalTime: 1000, - tlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - // Mocking pinging to always return true - const mockedPingNode = jest.spyOn( - NodeConnectionManager.prototype, - 'pingNode', - ); - mockedPingNode.mockImplementation( - () => new PromiseCancellable((resolve) => resolve(true)), - ); - - await nodeGraph.setNode(serverNodeIdA, serverAddressA); - // Adding node information to remote node - const nodeId = testNodesUtils.generateRandomNodeId(); - - // Expect no error thrown - const findNodePromise = nodeConnectionManager.findNode(nodeId); - await expect(findNodePromise).resolves.toBe(undefined); - - await nodeConnectionManager.stop(); - }); - test('receives 20 closest local nodes from connected target', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionKeepAliveTimeoutTime: 10000, - connectionKeepAliveIntervalTime: 1000, - tlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - // Mocking pinging to always return true - const mockedPingNode = jest.spyOn( - NodeConnectionManager.prototype, - 'pingNode', - ); - mockedPingNode.mockImplementation( - () => new PromiseCancellable((resolve) => resolve(true)), - ); - - await nodeGraph.setNode(serverNodeIdA, serverAddressA); - - // Now generate and add 20 nodes that will be close to this node ID - const addedClosestNodes: NodeBucket = []; - for (let i = 1; i < 101; i += 5) { - const closeNodeId = testNodesUtils.generateNodeIdForBucket( - serverNodeIdA, - i, - ); - const nodeAddress: NodeAddress = { - host: (i + '.' + i + '.' + i + '.' + i) as Host, - port: i as Port, - scopes: ['global'], - }; - await remotePolykeyAgentA.nodeGraph.setNode(closeNodeId, nodeAddress); - addedClosestNodes.push([ - closeNodeId, - { - address: nodeAddress, - lastUpdated: 0, - }, - ]); - } - // Now create and add 10 more nodes that are far away from this node - for (let i = 1; i <= 10; i++) { - const farNodeId = nodeIdGenerator(i); - const nodeAddress = { - host: `${i}.${i}.${i}.${i}`, - port: i, - } as NodeAddress; - await remotePolykeyAgentA.nodeGraph.setNode(farNodeId, nodeAddress); - } - - // Get the closest nodes to the target node - const closest = await nodeConnectionManager.getRemoteNodeClosestNodes( - serverNodeIdA, - serverNodeIdA, - ); - // Sort the received nodes on distance such that we can check its equality - // with addedClosestNodes - nodesUtils.bucketSortByDistance(closest, serverNodeIdA); - expect(closest.length).toBe(20); - expect(closest).toEqual(addedClosestNodes); - - await nodeConnectionManager.stop(); - }); - test('holePunchSignalRequest with no target node', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionKeepAliveTimeoutTime: 10000, - connectionKeepAliveIntervalTime: 1000, - tlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - - mockedHolePunchReverse.mockImplementation(() => { - return new PromiseCancellable((res) => { - res(); - }); - }); - - await nodeGraph.setNode(serverNodeIdA, serverAddressA); - - const targetNodeId = testNodesUtils.generateRandomNodeId(); - const relayNodeId = remotePolykeyAgentA.keyRing.getNodeId(); - - await expect( - nodeConnectionManager.connectionSignalInitial(targetNodeId, relayNodeId), - ).rejects.toThrow(); - await nodeConnectionManager.stop(); - }); - test('holePunchSignalRequest with target node', async () => { - // Establish connection between remote A and B - expect( - await remotePolykeyAgentA.nodeConnectionManager.pingNode( - remotePolykeyAgentB.keyRing.getNodeId(), - [ - { - host: remotePolykeyAgentB.agentServiceHost, - port: remotePolykeyAgentB.agentServicePort, - scopes: ['global'], - }, - ], - ), - ).toBeTrue(); - - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionKeepAliveTimeoutTime: 10000, - connectionKeepAliveIntervalTime: 1000, - tlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - - mockedHolePunchReverse.mockImplementation(() => { - return new PromiseCancellable((res) => { - res(); - }); - }); - - const serverNodeId = remotePolykeyAgentA.keyRing.getNodeId(); - const serverAddress: NodeAddress = { - host: remotePolykeyAgentA.agentServiceHost as Host, - port: remotePolykeyAgentA.agentServicePort as Port, - scopes: ['global'], - }; - await nodeGraph.setNode(serverNodeId, serverAddress); - - const targetNodeId = remotePolykeyAgentB.keyRing.getNodeId(); - const relayNodeId = remotePolykeyAgentA.keyRing.getNodeId(); - - await nodeConnectionManager.connectionSignalInitial( - targetNodeId, - relayNodeId, - ); - // Await the FAF signalling to finish. - const signalMapA = - // @ts-ignore: kidnap protected property - remotePolykeyAgentA.nodeConnectionManager.activeSignalFinalPs; - for (const p of signalMapA) { - await p; - } - const punchMapB = - // @ts-ignore: kidnap protected property - remotePolykeyAgentB.nodeConnectionManager.activeHolePunchPs; - for await (const [, p] of punchMapB) { - await p; - } - expect(mockedHolePunchReverse).toHaveBeenCalled(); - await nodeConnectionManager.stop(); - }); - test('holePunchSignalRequest is nonblocking', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionKeepAliveTimeoutTime: 10000, - connectionKeepAliveIntervalTime: 1000, - tlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - - const { p: waitP, resolveP: waitResolveP } = utils.promise(); - mockedHolePunchReverse.mockImplementation(() => { - return new PromiseCancellable(async (res) => { - await waitP; - res(); - }); - }); - - const serverNodeId = remotePolykeyAgentA.keyRing.getNodeId(); - const serverAddress: NodeAddress = { - host: remotePolykeyAgentA.agentServiceHost, - port: remotePolykeyAgentA.agentServicePort, - scopes: ['global'], - }; - await nodeGraph.setNode(serverNodeId, serverAddress); - // Establish connection between remote A and B - expect( - await remotePolykeyAgentA.nodeConnectionManager.pingNode( - remotePolykeyAgentB.keyRing.getNodeId(), - [ - { - host: remotePolykeyAgentB.agentServiceHost, - port: remotePolykeyAgentB.agentServicePort, - scopes: ['global'], - }, - ], - ), - ).toBeTrue(); - - const targetNodeId = remotePolykeyAgentB.keyRing.getNodeId(); - const relayNodeId = remotePolykeyAgentA.keyRing.getNodeId(); - // Creating 5 concurrent attempts - const holePunchSignalRequests = [1, 2, 3, 4, 5].map(() => - nodeConnectionManager.connectionSignalInitial(targetNodeId, relayNodeId), - ); - // All should resolve immediately and not block - await Promise.all(holePunchSignalRequests); - - // Await the FAF signalling to finish. - const signalMapA = - // @ts-ignore: kidnap protected property - remotePolykeyAgentA.nodeConnectionManager.activeSignalFinalPs; - for (const p of signalMapA) { - await p; - } - // Only one attempt is being made - const punchMapB = - // @ts-ignore: kidnap protected property - remotePolykeyAgentB.nodeConnectionManager.activeHolePunchPs; - expect(punchMapB.size).toBe(1); - // Allow the attempt to complete - waitResolveP(); - for await (const [, p] of punchMapB) { - await p; - } - // Only attempted once - expect(mockedHolePunchReverse).toHaveBeenCalledTimes(1); - await nodeConnectionManager.stop(); - }); - test('holePunchRequest single target with multiple ports is rate limited', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionKeepAliveTimeoutTime: 10000, - connectionKeepAliveIntervalTime: 1000, - tlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - - const { p: waitP, resolveP: waitResolveP } = utils.promise(); - mockedHolePunchReverse.mockImplementation(() => { - return new PromiseCancellable(async (res) => { - await waitP; - res(); - }); - }); - - nodeConnectionManager.handleNodesConnectionSignalFinal( - '127.0.0.1' as Host, - 55550 as Port, - ); - nodeConnectionManager.handleNodesConnectionSignalFinal( - '127.0.0.1' as Host, - 55551 as Port, - ); - nodeConnectionManager.handleNodesConnectionSignalFinal( - '127.0.0.1' as Host, - 55552 as Port, - ); - nodeConnectionManager.handleNodesConnectionSignalFinal( - '127.0.0.1' as Host, - 55553 as Port, - ); - nodeConnectionManager.handleNodesConnectionSignalFinal( - '127.0.0.1' as Host, - 55554 as Port, - ); - nodeConnectionManager.handleNodesConnectionSignalFinal( - '127.0.0.1' as Host, - 55555 as Port, - ); - - // @ts-ignore: protected property - expect(nodeConnectionManager.activeHolePunchPs.size).toBe(6); - // @ts-ignore: protected property - expect(nodeConnectionManager.activeHolePunchAddresses.size).toBe(1); - waitResolveP(); - // @ts-ignore: protected property - for await (const [, p] of nodeConnectionManager.activeHolePunchPs) { - await p; - } - - // Only attempted once - expect(mockedHolePunchReverse).toHaveBeenCalledTimes(6); - await nodeConnectionManager.stop(); - }); - test('holePunchSignalRequest rejects excessive requests', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionKeepAliveTimeoutTime: 10000, - connectionKeepAliveIntervalTime: 1000, - tlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - - mockedHolePunchReverse.mockImplementation(() => { - return new PromiseCancellable(async (res) => { - res(); - }); - }); - - expect( - await nodeConnectionManager.pingNode( - remotePolykeyAgentB.keyRing.getNodeId(), - [ - { - host: remotePolykeyAgentB.agentServiceHost, - port: remotePolykeyAgentB.agentServicePort, - scopes: ['global'], - }, - ], - ), - ).toBeTrue(); - const keyPair = keysUtils.generateKeyPair(); - const sourceNodeId = keysUtils.publicKeyToNodeId(keyPair.publicKey); - const targetNodeId = remotePolykeyAgentB.keyRing.getNodeId(); - const data = Buffer.concat([sourceNodeId, targetNodeId]); - const signature = keysUtils.signWithPrivateKey(keyPair, data); - await expect(async () => { - for (let i = 0; i < 30; i++) { - await nodeConnectionManager.handleNodesConnectionSignalInitial( - sourceNodeId, - targetNodeId, - { - host: '127.0.0.1' as Host, - port: 55555 as Port, - scopes: ['global'], - }, - signature.toString('base64url'), - ); - } - }).rejects.toThrow( - nodesErrors.ErrorNodeConnectionManagerRequestRateExceeded, - ); - - const signalMapA = - // @ts-ignore: kidnap protected property - nodeConnectionManager.activeSignalFinalPs; - for (const p of signalMapA.values()) { - await p; - } - const punchMapB = - // @ts-ignore: kidnap protected property - remotePolykeyAgentB.nodeConnectionManager.activeHolePunchPs; - for (const [, p] of punchMapB) { - await p; - } - - await nodeConnectionManager.stop(); - }); - test.todo('Handles reverse streams'); -}); diff --git a/tests/nodes/NodeConnectionManager.lifecycle.test.ts b/tests/nodes/NodeConnectionManager.lifecycle.test.ts deleted file mode 100644 index a7544be51..000000000 --- a/tests/nodes/NodeConnectionManager.lifecycle.test.ts +++ /dev/null @@ -1,780 +0,0 @@ -import type { Host, Port, TLSConfig } from '@/network/types'; -import type { NodeAddress } from '@/nodes/types'; -import type { NodeId, NodeIdEncoded, NodeIdString } from '@/ids'; -import type { ObjectEmpty } from '@'; -import type { - JSONRPCRequestParams, - JSONRPCResponseResult, -} from '@matrixai/rpc'; -import path from 'path'; -import fs from 'fs'; -import os from 'os'; -import { DB } from '@matrixai/db'; -import Logger, { formatting, LogLevel, StreamHandler } from '@matrixai/logger'; -import { UnaryHandler } from '@matrixai/rpc'; -import KeyRing from '@/keys/KeyRing'; -import NodeGraph from '@/nodes/NodeGraph'; -import * as nodesUtils from '@/nodes/utils'; -import * as keysUtils from '@/keys/utils'; -import NodeConnectionManager from '@/nodes/NodeConnectionManager'; -import { promise, sleep } from '@/utils'; -import * as nodesErrors from '@/nodes/errors'; -import NodeConnection from '@/nodes/NodeConnection'; -import * as tlsUtils from '../utils/tls'; - -describe(`${NodeConnectionManager.name} lifecycle test`, () => { - class Echo extends UnaryHandler< - ObjectEmpty, - JSONRPCRequestParams, - JSONRPCResponseResult - > { - public handle = async ( - input: JSONRPCRequestParams, - ): Promise => { - return input; - }; - } - - const logger = new Logger(`${NodeConnection.name} test`, LogLevel.WARN, [ - new StreamHandler( - formatting.format`${formatting.level}:${formatting.keys}:${formatting.msg}`, - ), - ]); - const localHost = '127.0.0.1' as Host; - const password = 'password'; - - let dataDir: string; - - let serverTlsConfig1: TLSConfig; - let serverTlsConfig2: TLSConfig; - let clientTlsConfig: TLSConfig; - let serverNodeId1: NodeId; - let serverNodeId2: NodeId; - let clientNodeId: NodeId; - let serverNodeIdEncoded1: NodeIdEncoded; - let serverNodeIdEncoded2: NodeIdEncoded; - let keyRingPeer1: KeyRing; - let keyRingPeer2: KeyRing; - let nodeConnectionManagerPeer1: NodeConnectionManager; - let nodeConnectionManagerPeer2: NodeConnectionManager; - let serverAddress1: NodeAddress; - - let keyRingClient: KeyRing; - let db: DB; - let nodeGraph: NodeGraph; - - let nodeConnectionManager: NodeConnectionManager; - - beforeEach(async () => { - dataDir = await fs.promises.mkdtemp( - path.join(os.tmpdir(), 'polykey-test-'), - ); - const keysPathPeer1 = path.join(dataDir, 'keysPeer1'); - const keysPathPeer2 = path.join(dataDir, 'keysPeer2'); - keyRingPeer1 = await KeyRing.createKeyRing({ - password, - keysPath: keysPathPeer1, - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - logger, - }); - keyRingPeer2 = await KeyRing.createKeyRing({ - password, - keysPath: keysPathPeer2, - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - logger, - }); - const keysPath = path.join(dataDir, 'keys'); - keyRingClient = await KeyRing.createKeyRing({ - password, - keysPath, - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - logger, - }); - const serverKeyPair1 = keyRingPeer1.keyPair; - const serverKeyPair2 = keyRingPeer2.keyPair; - const clientKeyPair = keyRingClient.keyPair; - serverNodeId1 = keysUtils.publicKeyToNodeId(serverKeyPair1.publicKey); - serverNodeId2 = keysUtils.publicKeyToNodeId(serverKeyPair2.publicKey); - clientNodeId = keysUtils.publicKeyToNodeId(clientKeyPair.publicKey); - serverNodeIdEncoded1 = nodesUtils.encodeNodeId(serverNodeId1); - serverNodeIdEncoded2 = nodesUtils.encodeNodeId(serverNodeId2); - serverTlsConfig1 = await tlsUtils.createTLSConfig(serverKeyPair1); - serverTlsConfig2 = await tlsUtils.createTLSConfig(serverKeyPair2); - clientTlsConfig = await tlsUtils.createTLSConfig(clientKeyPair); - nodeConnectionManagerPeer1 = new NodeConnectionManager({ - keyRing: keyRingPeer1, - logger: logger.getChild(`${NodeConnectionManager.name}Peer1`), - nodeGraph: {} as NodeGraph, - tlsConfig: serverTlsConfig1, - seedNodes: undefined, - }); - await nodeConnectionManagerPeer1.start({ - host: localHost, - manifest: { - echo: new Echo({}), - }, - }); - nodeConnectionManagerPeer2 = new NodeConnectionManager({ - keyRing: keyRingPeer2, - logger: logger.getChild(`${NodeConnectionManager.name}Peer2`), - nodeGraph: {} as NodeGraph, - tlsConfig: serverTlsConfig2, - seedNodes: undefined, - }); - await nodeConnectionManagerPeer2.start({ - host: localHost, - manifest: { - echo: new Echo({}), - }, - }); - - // Setting up client dependencies - const dbPath = path.join(dataDir, 'db'); - db = await DB.createDB({ - dbPath, - logger, - }); - nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing: keyRingClient, - logger, - }); - serverAddress1 = { - host: nodeConnectionManagerPeer1.host, - port: nodeConnectionManagerPeer1.port, - scopes: ['global'], - }; - }); - - afterEach(async () => { - await nodeConnectionManager?.stop(); - await nodeGraph.stop(); - await nodeGraph.destroy(); - await db.stop(); - await db.destroy(); - await keyRingClient.stop(); - await keyRingClient.destroy(); - - await nodeConnectionManagerPeer1.stop(); - await nodeConnectionManagerPeer2.stop(); - }); - - test('NodeConnectionManager readiness', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - - await nodeConnectionManager.stop(); - }); - test('NodeConnectionManager consecutive start stops', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - - await nodeConnectionManager.stop(); - await nodeConnectionManager.start({ - host: localHost, - }); - await nodeConnectionManager.stop(); - }); - - // FIXME: holding process open for a time. connectionKeepAliveIntervalTime holds the process open, failing to clean up? - test('acquireConnection should create connection', async () => { - await nodeGraph.setNode(serverNodeId1, serverAddress1); - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - nodeGraph, - connectionConnectTimeoutTime: 1000, - logger: logger.getChild(`${NodeConnectionManager.name}Local`), - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - - const acquire = - await nodeConnectionManager.acquireConnection(serverNodeId1); - const [release] = await acquire(); - expect(nodeConnectionManager.hasConnection(serverNodeId1)).toBeTrue(); - await release(); - await nodeConnectionManager.stop(); - }); - test('withConnF should create connection', async () => { - await nodeGraph.setNode(serverNodeId1, serverAddress1); - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - - await nodeConnectionManager.withConnF(serverNodeId1, async () => { - expect(nodeConnectionManager.hasConnection(serverNodeId1)).toBeTrue(); - }); - - await nodeConnectionManager.stop(); - }); - test('concurrent connections should result in only 1 connection', async () => { - // A connection is concurrently established in the forward and reverse - // direction, we only want one connection to exist afterwards. - - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - manifest: { - echo: new Echo({}), - }, - }); - const clientAddress: NodeAddress = { - host: nodeConnectionManager.host, - port: nodeConnectionManager.port, - scopes: ['global'], - }; - - const forwardConnectP = nodeConnectionManager.getMultiConnection( - [serverNodeId1], - [serverAddress1], - ); - const reverseConnectP = nodeConnectionManagerPeer1.getMultiConnection( - [clientNodeId], - [clientAddress], - ); - - await Promise.all([forwardConnectP, reverseConnectP]); - const promA = nodeConnectionManager.withConnF( - serverNodeId1, - async (connection) => { - await connection.getClient().unaryCaller('echo', { value: 'hello' }); - }, - ); - const promB = nodeConnectionManagerPeer1.withConnF( - clientNodeId, - async (connection) => { - await connection.getClient().unaryCaller('echo', { value: 'hello' }); - }, - ); - - // Should not throw any errors - await Promise.all([promA, promB]); - - await nodeConnectionManager.stop(); - }); - test('should list active connections', async () => { - await nodeGraph.setNode(serverNodeId1, serverAddress1); - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - - await nodeConnectionManager.withConnF(serverNodeId1, async () => { - expect(nodeConnectionManager.hasConnection(serverNodeId1)).toBeTrue(); - }); - - const connectionsList = nodeConnectionManager.listConnections(); - expect(connectionsList).toHaveLength(1); - expect(nodesUtils.encodeNodeId(connectionsList[0].nodeId)).toEqual( - serverNodeIdEncoded1, - ); - expect(connectionsList[0].address.host).toEqual( - nodeConnectionManagerPeer1.host, - ); - expect(connectionsList[0].address.port).toEqual( - nodeConnectionManagerPeer1.port, - ); - - await nodeConnectionManager.stop(); - }); - test('withConnG should create connection', async () => { - await nodeGraph.setNode(serverNodeId1, serverAddress1); - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - // @ts-ignore: kidnap protected property - const connectionMap = nodeConnectionManager.connections; - - const gen = nodeConnectionManager.withConnG( - serverNodeId1, - async function* (): AsyncGenerator { - expect(connectionMap.size).toBeGreaterThanOrEqual(1); - }, - ); - - for await (const _ of gen) { - // Do nothing - } - - await nodeConnectionManager.stop(); - }); - test('should fail to create connection to offline node', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - // @ts-ignore: kidnap protected property - const connectionMap = nodeConnectionManager.connections; - const randomNodeId = keysUtils.publicKeyToNodeId( - keysUtils.generateKeyPair().publicKey, - ); - const gen = nodeConnectionManager.withConnG( - randomNodeId, - async function* (): AsyncGenerator { - expect(connectionMap.size).toBeGreaterThanOrEqual(1); - }, - ); - - const prom = async () => { - for await (const _ of gen) { - // Do nothing - } - }; - await expect(prom).rejects.toThrow( - nodesErrors.ErrorNodeGraphNodeIdNotFound, - ); - - await nodeConnectionManager.stop(); - }); - test('connection should persist', async () => { - await nodeGraph.setNode(serverNodeId1, serverAddress1); - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - await nodeConnectionManager.withConnF(serverNodeId1, async () => { - // Do nothing - }); - expect(nodeConnectionManager.hasConnection(serverNodeId1)).toBeTrue(); - expect(nodeConnectionManager.listConnections()).toHaveLength(1); - await nodeConnectionManager.withConnF(serverNodeId1, async () => { - // Do nothing - }); - expect(nodeConnectionManager.hasConnection(serverNodeId1)).toBeTrue(); - expect(nodeConnectionManager.listConnections()).toHaveLength(1); - - await nodeConnectionManager.stop(); - }); - test('should create 1 connection with concurrent creates', async () => { - await nodeGraph.setNode(serverNodeId1, serverAddress1); - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - const waitProm = promise(); - const tryConnection = () => { - return nodeConnectionManager.withConnF(serverNodeId1, async () => { - // Do nothing - await waitProm.p; - }); - }; - const tryProm = Promise.all([ - tryConnection(), - tryConnection(), - tryConnection(), - tryConnection(), - tryConnection(), - ]); - waitProm.resolveP(); - await tryProm; - expect(nodeConnectionManager.hasConnection(serverNodeId1)).toBeTrue(); - expect(nodeConnectionManager.listConnections()).toHaveLength(1); - - await nodeConnectionManager.stop(); - }); - test('should destroy a connection', async () => { - await nodeGraph.setNode(serverNodeId1, serverAddress1); - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - await nodeConnectionManager.withConnF(serverNodeId1, async () => { - // Do nothing - }); - expect(nodeConnectionManager.hasConnection(serverNodeId1)).toBeTrue(); - expect(nodeConnectionManager.listConnections()).toHaveLength(1); - - // @ts-ignore: Kidnap protected property - const connectionMap = nodeConnectionManager.connections; - const connection = connectionMap.get( - serverNodeId1.toString() as NodeIdString, - ); - await connection!.connection.destroy({ force: true }); - - // Waiting for connection to clean up from map - await sleep(100); - expect(nodeConnectionManager.hasConnection(serverNodeId1)).toBeFalse(); - expect(nodeConnectionManager.listConnections()).toHaveLength(0); - - await nodeConnectionManager.stop(); - }); - test('stopping should destroy all connections', async () => { - await nodeGraph.setNode(serverNodeId1, serverAddress1); - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - await nodeConnectionManager.withConnF(serverNodeId1, async () => { - // Do nothing - }); - expect(nodeConnectionManager.hasConnection(serverNodeId1)).toBeTrue(); - expect(nodeConnectionManager.listConnections()).toHaveLength(1); - - // @ts-ignore: Kidnap protected property - const connectionMap = nodeConnectionManager.connections; - await nodeConnectionManager.stop(); - - // Waiting for connection to clean up from map - expect(connectionMap.size).toBe(0); - }); - test('should ping node with address', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - const result = await nodeConnectionManager.pingNode(serverNodeId1, [ - { - host: localHost as Host, - port: nodeConnectionManagerPeer1.port, - scopes: ['local'], - }, - ]); - expect(result).toBeTrue(); - expect(nodeConnectionManager.hasConnection(serverNodeId1)).toBeTrue(); - - await nodeConnectionManager.stop(); - }); - test('should fail to ping non existent node', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - const result = await nodeConnectionManager.pingNode( - serverNodeId1, - [ - { - host: localHost as Host, - port: 12345 as Port, - scopes: ['local'], - }, - ], - { timer: 100 }, - ); - expect(result).toBeFalse(); - expect(nodeConnectionManager.hasConnection(serverNodeId1)).toBeFalse(); - - await nodeConnectionManager.stop(); - }); - test('should fail to ping node if NodeId does not match', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - const result = await nodeConnectionManager.pingNode(clientNodeId, [ - { - host: localHost as Host, - port: nodeConnectionManagerPeer1.port, - scopes: ['local'], - }, - ]); - expect(result).toBeFalse(); - expect(nodeConnectionManager.hasConnection(clientNodeId)).toBeFalse(); - - await nodeConnectionManager.stop(); - }); - test('use multi-connection to connect to one node with multiple addresses', async () => { - await nodeGraph.setNode(serverNodeId1, serverAddress1); - await nodeGraph.setNode(serverNodeId2, serverAddress1); - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - - const connectedNodes = await nodeConnectionManager.getMultiConnection( - [serverNodeId1], - [ - { - host: '127.0.0.1' as Host, - port: nodeConnectionManagerPeer1.port, - scopes: ['global'], - }, - { - host: '127.0.0.2' as Host, - port: nodeConnectionManagerPeer1.port, - scopes: ['global'], - }, - { - host: '127.0.0.3' as Host, - port: nodeConnectionManagerPeer1.port, - scopes: ['global'], - }, - ], - { timer: 200 }, - ); - expect(connectedNodes.length).toBe(1); - expect(nodesUtils.encodeNodeId(connectedNodes[0])).toBe( - serverNodeIdEncoded1, - ); - - await nodeConnectionManager.stop(); - }); - test('use multi-connection to connect to multiple nodes with multiple addresses', async () => { - await nodeGraph.setNode(serverNodeId1, serverAddress1); - await nodeGraph.setNode(serverNodeId2, serverAddress1); - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - - const connectedNodes = await nodeConnectionManager.getMultiConnection( - [serverNodeId1, serverNodeId2], - [ - { - host: '127.0.0.1' as Host, - port: nodeConnectionManagerPeer1.port, - scopes: ['global'], - }, - { - host: '127.0.0.2' as Host, - port: nodeConnectionManagerPeer1.port, - scopes: ['global'], - }, - { - host: '127.0.0.3' as Host, - port: nodeConnectionManagerPeer1.port, - scopes: ['global'], - }, - { - host: '127.0.0.1' as Host, - port: nodeConnectionManagerPeer2.port, - scopes: ['global'], - }, - { - host: '127.0.0.2' as Host, - port: nodeConnectionManagerPeer2.port, - scopes: ['global'], - }, - { - host: '127.0.0.3' as Host, - port: nodeConnectionManagerPeer2.port, - scopes: ['global'], - }, - ], - { timer: 200 }, - ); - expect(connectedNodes.length).toBe(2); - const connectedIdStrings = connectedNodes.map((v) => - nodesUtils.encodeNodeId(v), - ); - expect(connectedIdStrings).toContain(serverNodeIdEncoded1); - expect(connectedIdStrings).toContain(serverNodeIdEncoded2); - - await nodeConnectionManager.stop(); - }); - test('use multi-connection to connect to multiple nodes with single address', async () => { - await nodeGraph.setNode(serverNodeId1, serverAddress1); - await nodeGraph.setNode(serverNodeId2, serverAddress1); - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - - const connectedNodes = await nodeConnectionManager.getMultiConnection( - [serverNodeId1, serverNodeId2], - [ - { - host: '127.0.0.1' as Host, - port: nodeConnectionManagerPeer1.port, - scopes: ['global'], - }, - ], - { timer: 200 }, - ); - expect(connectedNodes.length).toBe(1); - const connectedIdStrings = connectedNodes.map((v) => - nodesUtils.encodeNodeId(v), - ); - expect(connectedIdStrings).toContain(serverNodeIdEncoded1); - expect(connectedIdStrings).not.toContain(serverNodeIdEncoded2); - - await nodeConnectionManager.stop(); - }); - test.todo('multi-connection respects locking'); - test('multi-connection ends early when all nodes are connected to', async () => { - await nodeGraph.setNode(serverNodeId1, serverAddress1); - await nodeGraph.setNode(serverNodeId2, serverAddress1); - nodeConnectionManager = new NodeConnectionManager({ - keyRing: keyRingClient, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - await nodeConnectionManager.start({ - host: localHost, - }); - - const connectedNodesProm = nodeConnectionManager.getMultiConnection( - [serverNodeId1, serverNodeId2], - [ - { - host: '127.0.0.1' as Host, - port: nodeConnectionManagerPeer1.port, - scopes: ['global'], - }, - { - host: '127.0.0.2' as Host, - port: nodeConnectionManagerPeer1.port, - scopes: ['global'], - }, - { - host: '127.0.0.3' as Host, - port: nodeConnectionManagerPeer1.port, - scopes: ['global'], - }, - { - host: '127.0.0.1' as Host, - port: nodeConnectionManagerPeer2.port, - scopes: ['global'], - }, - { - host: '127.0.0.2' as Host, - port: nodeConnectionManagerPeer2.port, - scopes: ['global'], - }, - { - host: '127.0.0.3' as Host, - port: nodeConnectionManagerPeer2.port, - scopes: ['global'], - }, - ], - { timer: 2000 }, - ); - const result = await Promise.race([ - sleep(1000).then(() => false), - connectedNodesProm, - ]); - - if (result === false || result === true) { - // Wait for everything to settle - await connectedNodesProm.catch(() => {}); - throw Error( - 'connectedNodesProm did not resolve early after connecting to all nodeIds', - ); - } - - expect(result.length).toBe(2); - const connectedIdStrings = result.map((v) => nodesUtils.encodeNodeId(v)); - expect(connectedIdStrings).toContain(serverNodeIdEncoded1); - expect(connectedIdStrings).toContain(serverNodeIdEncoded2); - - await nodeConnectionManager.stop(); - }); -}); diff --git a/tests/nodes/NodeConnectionManager.mdns.test.ts b/tests/nodes/NodeConnectionManager.mdns.test.ts deleted file mode 100644 index 98918c19f..000000000 --- a/tests/nodes/NodeConnectionManager.mdns.test.ts +++ /dev/null @@ -1,182 +0,0 @@ -import type { Host, TLSConfig } from '@/network/types'; -import type { NodeId, NodeIdEncoded } from '@/ids'; -import path from 'path'; -import fs from 'fs'; -import os from 'os'; -import { DB } from '@matrixai/db'; -import { MDNS } from '@matrixai/mdns'; -import Logger, { formatting, LogLevel, StreamHandler } from '@matrixai/logger'; -import KeyRing from '@/keys/KeyRing'; -import NodeGraph from '@/nodes/NodeGraph'; -import NodeConnection from '@/nodes/NodeConnection'; -import NodeConnectionManager from '@/nodes/NodeConnectionManager'; -import * as nodesUtils from '@/nodes/utils'; -import * as keysUtils from '@/keys/utils'; -import config from '@/config'; -import * as tlsUtils from '../utils/tls'; - -describe(`${NodeConnectionManager.name} MDNS test`, () => { - const logger = new Logger(`${NodeConnection.name} test`, LogLevel.WARN, [ - new StreamHandler( - formatting.format`${formatting.level}:${formatting.keys}:${formatting.msg}`, - ), - ]); - const localHost = '::' as Host; - const password = 'password'; - - let dataDir: string; - - let serverTlsConfig: TLSConfig; - let clientTlsConfig: TLSConfig; - let serverNodeId: NodeId; - let clientNodeId: NodeId; - let serverNodeIdEncoded: NodeIdEncoded; - let clientNodeIdEncoded: NodeIdEncoded; - let keyRingPeer: KeyRing; - let mdnsPeer: MDNS; - let nodeConnectionManagerPeer: NodeConnectionManager; - - let keyRing: KeyRing; - let db: DB; - let nodeGraph: NodeGraph; - - let mdns: MDNS; - let nodeConnectionManager: NodeConnectionManager; - - beforeEach(async () => { - dataDir = await fs.promises.mkdtemp( - path.join(os.tmpdir(), 'polykey-test-'), - ); - const keysPathPeer = path.join(dataDir, 'keysPeer'); - keyRingPeer = await KeyRing.createKeyRing({ - password, - keysPath: keysPathPeer, - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - logger, - }); - const serverKeyPair = keyRingPeer.keyPair; - const clientKeyPair = keysUtils.generateKeyPair(); - serverNodeId = keysUtils.publicKeyToNodeId(serverKeyPair.publicKey); - clientNodeId = keysUtils.publicKeyToNodeId(clientKeyPair.publicKey); - serverNodeIdEncoded = nodesUtils.encodeNodeId(serverNodeId); - clientNodeIdEncoded = nodesUtils.encodeNodeId(clientNodeId); - serverTlsConfig = await tlsUtils.createTLSConfig(serverKeyPair); - clientTlsConfig = await tlsUtils.createTLSConfig(clientKeyPair); - - mdnsPeer = new MDNS({ - logger: logger.getChild(`${MDNS.name}Peer`), - }); - await mdnsPeer.start({ - id: serverNodeId.at(0), - hostname: serverNodeIdEncoded, - groups: config.defaultsSystem.mdnsGroups, - // This is different so that we do not conflict with the MDNS stack on other Polykey agents when running these tests - port: 64022, - }); - nodeConnectionManagerPeer = new NodeConnectionManager({ - keyRing: keyRingPeer, - logger: logger.getChild(`${NodeConnectionManager.name}Peer`), - nodeGraph: {} as NodeGraph, - tlsConfig: serverTlsConfig, - seedNodes: undefined, - mdns: mdnsPeer, - }); - await nodeConnectionManagerPeer.start({ - host: localHost, - }); - - // Setting up client dependencies - const keysPath = path.join(dataDir, 'keys'); - keyRing = await KeyRing.createKeyRing({ - password, - keysPath, - logger, - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - }); - const dbPath = path.join(dataDir, 'db'); - db = await DB.createDB({ - dbPath, - logger, - }); - nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, - }); - mdns = new MDNS({ - logger: logger.getChild(MDNS.name), - }); - await mdns.start({ - id: clientNodeId.at(0), - hostname: clientNodeIdEncoded, - groups: config.defaultsSystem.mdnsGroups, - // This is different so that we do not conflict with the MDNS stack on other Polykey agents when running these tests - port: 64022, - }); - }); - - afterEach(async () => { - await nodeConnectionManager?.stop(); - await nodeGraph.stop(); - await nodeGraph.destroy(); - await db.stop(); - await db.destroy(); - await keyRing.stop(); - await keyRing.destroy(); - await mdns.stop(); - - await nodeConnectionManagerPeer.stop(); - await mdnsPeer.stop(); - }); - - test('should find local node without seedNodes', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - nodeGraph, - mdns, - logger: logger.getChild(`${NodeConnectionManager.name}Local`), - tlsConfig: clientTlsConfig, - seedNodes: undefined, - }); - - await nodeConnectionManager.start({ - host: localHost, - }); - - // Expect no error thrown - const foundAddresses = - await nodeConnectionManager.findNodeLocal(serverNodeId); - - expect(foundAddresses).toBeArray(); - expect(foundAddresses).toIncludeAllPartialMembers([ - { port: nodeConnectionManagerPeer.port, scopes: ['local'] }, - ]); - - await nodeConnectionManager.stop(); - }); - test('acquireConnection should create local connection without seednodes', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - nodeGraph, - mdns, - logger: logger.getChild(`${NodeConnectionManager.name}Local`), - tlsConfig: clientTlsConfig, - seedNodes: undefined, - connectionConnectTimeoutTime: 1000, - }); - - await nodeConnectionManager.start({ - host: localHost, - }); - - const acquire = await nodeConnectionManager.acquireConnection(serverNodeId); - const [release] = await acquire(); - expect(nodeConnectionManager.hasConnection(serverNodeId)).toBeTrue(); - await release(); - await nodeConnectionManager.stop(); - }); -}); diff --git a/tests/nodes/NodeConnectionManager.seednodes.test.ts b/tests/nodes/NodeConnectionManager.seednodes.test.ts deleted file mode 100644 index 037d811ee..000000000 --- a/tests/nodes/NodeConnectionManager.seednodes.test.ts +++ /dev/null @@ -1,702 +0,0 @@ -import type { Host, Port, TLSConfig } from '@/network/types'; -import type { NodeId, NodeIdEncoded } from '@/ids'; -import type { NodeAddress, SeedNodes } from '@/nodes/types'; -import fs from 'fs'; -import path from 'path'; -import os from 'os'; -import Logger, { formatting, LogLevel, StreamHandler } from '@matrixai/logger'; -import { DB } from '@matrixai/db'; -import { PromiseCancellable } from '@matrixai/async-cancellable'; -import { events as quicEvents } from '@matrixai/quic'; -import * as nodesUtils from '@/nodes/utils'; -import NodeConnectionManager from '@/nodes/NodeConnectionManager'; -import NodeConnection from '@/nodes/NodeConnection'; -import * as keysUtils from '@/keys/utils'; -import KeyRing from '@/keys/KeyRing'; -import ACL from '@/acl/ACL'; -import GestaltGraph from '@/gestalts/GestaltGraph'; -import NodeGraph from '@/nodes/NodeGraph'; -import Sigchain from '@/sigchain/Sigchain'; -import TaskManager from '@/tasks/TaskManager'; -import NodeManager from '@/nodes/NodeManager'; -import PolykeyAgent from '@/PolykeyAgent'; -import * as utils from '@/utils'; -import * as testNodesUtils from './utils'; -import * as tlsTestUtils from '../utils/tls'; - -describe(`${NodeConnectionManager.name} seednodes test`, () => { - const logger = new Logger(`${NodeConnection.name} test`, LogLevel.WARN, [ - new StreamHandler( - formatting.format`${formatting.level}:${formatting.keys}:${formatting.msg}`, - ), - ]); - const localHost = '127.0.0.1'; - const testAddress: NodeAddress = { - host: '127.0.0.1' as Host, - port: 55555 as Port, - scopes: ['local'], - }; - const password = 'password'; - - function createPromiseCancellableNop() { - return () => new PromiseCancellable((resolve) => resolve()); - } - - let dataDir: string; - - let remotePolykeyAgent1: PolykeyAgent; - let remotePolykeyAgent2: PolykeyAgent; - let remoteAddress1: NodeAddress; - let remoteAddress2: NodeAddress; - let remoteNodeId1: NodeId; - let remoteNodeId2: NodeId; - let remoteNodeIdEncoded1: NodeIdEncoded; - - let keyRing: KeyRing; - let db: DB; - let acl: ACL; - let gestaltGraph: GestaltGraph; - let nodeGraph: NodeGraph; - let sigchain: Sigchain; - let taskManager: TaskManager; - let nodeConnectionManager: NodeConnectionManager; - let nodeManager: NodeManager; - let tlsConfig: TLSConfig; - - beforeEach(async () => { - dataDir = await fs.promises.mkdtemp( - path.join(os.tmpdir(), 'polykey-test-'), - ); - - // Setting up remote node - const nodePathA = path.join(dataDir, 'agentA'); - remotePolykeyAgent1 = await PolykeyAgent.createPolykeyAgent({ - password, - options: { - nodePath: nodePathA, - agentServiceHost: localHost, - clientServiceHost: localHost, - keys: { - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - }, - }, - logger: logger.getChild('Agent1'), - }); - remoteNodeId1 = remotePolykeyAgent1.keyRing.getNodeId(); - remoteNodeIdEncoded1 = nodesUtils.encodeNodeId(remoteNodeId1); - remoteAddress1 = { - host: remotePolykeyAgent1.agentServiceHost, - port: remotePolykeyAgent1.agentServicePort, - scopes: ['global'], - }; - - const nodePathB = path.join(dataDir, 'agentB'); - remotePolykeyAgent2 = await PolykeyAgent.createPolykeyAgent({ - password, - options: { - nodePath: nodePathB, - agentServiceHost: localHost, - clientServiceHost: localHost, - keys: { - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - }, - }, - logger: logger.getChild('Agent2'), - }); - remoteNodeId2 = remotePolykeyAgent2.keyRing.getNodeId(); - remoteAddress2 = { - host: remotePolykeyAgent2.agentServiceHost, - port: remotePolykeyAgent2.agentServicePort, - scopes: ['global'], - }; - - // Setting up client dependencies - const keysPath = path.join(dataDir, 'keys'); - keyRing = await KeyRing.createKeyRing({ - password, - keysPath, - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - logger, - }); - const dbPath = path.join(dataDir, 'db'); - db = await DB.createDB({ - dbPath, - logger, - }); - acl = await ACL.createACL({ - db, - logger, - }); - gestaltGraph = await GestaltGraph.createGestaltGraph({ - db, - acl, - logger, - }); - nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, - }); - sigchain = await Sigchain.createSigchain({ - db, - keyRing, - logger, - }); - taskManager = await TaskManager.createTaskManager({ - db, - logger, - }); - - tlsConfig = await tlsTestUtils.createTLSConfig(keyRing.keyPair); - tlsConfig = await tlsTestUtils.createTLSConfig(keyRing.keyPair); - }); - - afterEach(async () => { - await taskManager.stopProcessing(); - await taskManager.stopTasks(); - await nodeManager?.stop(); - await nodeConnectionManager?.stop(); - await sigchain.stop(); - await sigchain.destroy(); - await nodeGraph.stop(); - await nodeGraph.destroy(); - await gestaltGraph.stop(); - await gestaltGraph.destroy(); - await acl.stop(); - await acl.destroy(); - await taskManager.stop(); - await db.stop(); - await db.destroy(); - await keyRing.stop(); - await keyRing.destroy(); - - await remotePolykeyAgent1.stop(); - await remotePolykeyAgent2.stop(); - }); - - test('should synchronise nodeGraph', async () => { - const seedNodes = { - [remoteNodeIdEncoded1]: remoteAddress1, - }; - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionConnectTimeoutTime: 1000, - tlsConfig, - seedNodes, - }); - nodeManager = new NodeManager({ - db, - gestaltGraph, - keyRing, - nodeConnectionManager, - nodeGraph, - sigchain, - taskManager, - connectionConnectTimeoutTime: 1000, - logger, - }); - await nodeManager.start(); - // Add seed nodes to the nodeGraph - const setNodeProms = new Array>(); - for (const nodeIdEncoded in seedNodes) { - const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); - if (nodeId == null) utils.never(); - const setNodeProm = nodeManager.setNode( - nodeId, - seedNodes[nodeIdEncoded], - true, - ); - setNodeProms.push(setNodeProm); - } - await Promise.all(setNodeProms); - - const dummyNodeId = testNodesUtils.generateRandomNodeId(); - await remotePolykeyAgent1.nodeGraph.setNode(remoteNodeId2, remoteAddress2); - - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - - await nodeManager.syncNodeGraph(true, 2000); - expect(await nodeGraph.getNode(remoteNodeId1)).toBeDefined(); - expect(await nodeGraph.getNode(remoteNodeId2)).toBeDefined(); - expect(await nodeGraph.getNode(dummyNodeId)).toBeUndefined(); - - await nodeConnectionManager.stop(); - }); - test('syncNodeGraph handles connection rejections from peer', async () => { - // Force close connections. - // @ts-ignore: kidnap protected property - const quicServer = remotePolykeyAgent1.nodeConnectionManager.quicServer; - quicServer.addEventListener( - quicEvents.EventQUICServerConnection.name, - async (evt: quicEvents.EventQUICServerConnection) => { - await evt.detail.stop({ - isApp: true, - errorCode: 42, - reason: Buffer.from('life the universe and everything'), - force: true, - }); - }, - ); - - const seedNodes = { - [remoteNodeIdEncoded1]: remoteAddress1, - }; - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionConnectTimeoutTime: 1000, - tlsConfig, - seedNodes, - }); - nodeManager = new NodeManager({ - db, - gestaltGraph, - keyRing, - nodeConnectionManager, - nodeGraph, - sigchain, - taskManager, - connectionConnectTimeoutTime: 1000, - logger, - }); - await nodeManager.start(); - // Add seed nodes to the nodeGraph - const setNodeProms = new Array>(); - for (const nodeIdEncoded in seedNodes) { - const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); - if (nodeId == null) utils.never(); - const setNodeProm = nodeManager.setNode( - nodeId, - seedNodes[nodeIdEncoded], - true, - ); - setNodeProms.push(setNodeProm); - } - await Promise.all(setNodeProms); - - await remotePolykeyAgent1.nodeGraph.setNode(remoteNodeId2, remoteAddress2); - - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - - await nodeManager.syncNodeGraph(true, 2000); - - await nodeConnectionManager.stop(); - }); - test('syncNodeGraph handles own nodeId', async () => { - const localNodeId = keyRing.getNodeId(); - const seedNodes: SeedNodes = { - [nodesUtils.encodeNodeId(localNodeId)]: { - host: '127.0.0.1' as Host, - port: 55123 as Port, - scopes: ['global'], - }, - }; - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionConnectTimeoutTime: 1000, - tlsConfig, - seedNodes, - }); - nodeManager = new NodeManager({ - db, - gestaltGraph, - keyRing, - nodeConnectionManager, - nodeGraph, - sigchain, - taskManager, - connectionConnectTimeoutTime: 1000, - logger, - }); - await nodeManager.start(); - // Add seed nodes to the nodeGraph - const setNodeProms = new Array>(); - for (const nodeIdEncoded in seedNodes) { - const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); - if (nodeId == null) utils.never(); - const setNodeProm = nodeManager.setNode( - nodeId, - seedNodes[nodeIdEncoded], - true, - true, - ); - setNodeProms.push(setNodeProm); - } - await Promise.all(setNodeProms); - - await nodeConnectionManager.start({ - host: localHost as Host, - port: 55123 as Port, - }); - await taskManager.startProcessing(); - - // Completes without error - await nodeManager.syncNodeGraph(true, 2000); - - await nodeConnectionManager.stop(); - }); - test('syncNodeGraph handles offline seed node', async () => { - const seedNodes: SeedNodes = { - [nodesUtils.encodeNodeId(remoteNodeId2)]: { - host: '127.0.0.1' as Host, - port: 55124 as Port, - scopes: ['global'], - }, - }; - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionConnectTimeoutTime: 1000, - tlsConfig, - seedNodes, - }); - nodeManager = new NodeManager({ - db, - gestaltGraph, - keyRing, - nodeConnectionManager, - nodeGraph, - sigchain, - taskManager, - connectionConnectTimeoutTime: 1000, - logger, - }); - await nodeManager.start(); - // Add seed nodes to the nodeGraph - const setNodeProms = new Array>(); - for (const nodeIdEncoded in seedNodes) { - const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); - if (nodeId == null) utils.never(); - const setNodeProm = nodeManager.setNode( - nodeId, - seedNodes[nodeIdEncoded], - true, - true, - ); - setNodeProms.push(setNodeProm); - } - await Promise.all(setNodeProms); - - await nodeConnectionManager.start({ - host: localHost as Host, - port: 55123 as Port, - }); - await taskManager.startProcessing(); - - // Completes without error - await nodeManager.syncNodeGraph(true, 2000); - - await nodeConnectionManager.stop(); - }); - test('should call refreshBucket when syncing nodeGraph', async () => { - const seedNodes = { - [remoteNodeIdEncoded1]: remoteAddress1, - }; - const mockedRefreshBucket = jest.spyOn( - NodeManager.prototype, - 'refreshBucket', - ); - mockedRefreshBucket.mockImplementation(createPromiseCancellableNop()); - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionConnectTimeoutTime: 1000, - tlsConfig, - seedNodes, - }); - nodeManager = new NodeManager({ - db, - gestaltGraph, - keyRing, - nodeConnectionManager, - nodeGraph, - sigchain, - taskManager, - connectionConnectTimeoutTime: 1000, - logger, - }); - await nodeManager.start(); - // Add seed nodes to the nodeGraph - const setNodeProms = new Array>(); - for (const nodeIdEncoded in seedNodes) { - const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); - if (nodeId == null) utils.never(); - const setNodeProm = nodeManager.setNode( - nodeId, - seedNodes[nodeIdEncoded], - true, - ); - setNodeProms.push(setNodeProm); - } - await Promise.all(setNodeProms); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - - await remotePolykeyAgent1.nodeGraph.setNode(remoteNodeId2, remoteAddress2); - - await nodeManager.syncNodeGraph(true, 100); - expect(mockedRefreshBucket).toHaveBeenCalled(); - - await nodeConnectionManager.stop(); - }); - test('should handle an offline seed node when synchronising nodeGraph', async () => { - const randomNodeId1 = testNodesUtils.generateRandomNodeId(); - const randomNodeId2 = testNodesUtils.generateRandomNodeId(); - await remotePolykeyAgent1.nodeGraph.setNode(randomNodeId1, testAddress); - await remotePolykeyAgent1.nodeGraph.setNode(remoteNodeId2, remoteAddress2); - await remotePolykeyAgent2.nodeGraph.setNode(randomNodeId2, testAddress); - const mockedRefreshBucket = jest.spyOn( - NodeManager.prototype, - 'refreshBucket', - ); - mockedRefreshBucket.mockImplementation(createPromiseCancellableNop()); - - const seedNodes = { - [remoteNodeIdEncoded1]: remoteAddress1, - }; - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionConnectTimeoutTime: 1000, - tlsConfig, - seedNodes, - }); - nodeManager = new NodeManager({ - db, - gestaltGraph, - keyRing, - nodeConnectionManager, - nodeGraph, - sigchain, - taskManager, - connectionConnectTimeoutTime: 1000, - logger, - }); - await nodeManager.start(); - // Add seed nodes to the nodeGraph - const setNodeProms = new Array>(); - for (const nodeIdEncoded in seedNodes) { - const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); - if (nodeId == null) utils.never(); - const setNodeProm = nodeManager.setNode( - nodeId, - seedNodes[nodeIdEncoded], - true, - ); - setNodeProms.push(setNodeProm); - } - await Promise.all(setNodeProms); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - - // This should complete without error - await nodeManager.syncNodeGraph(true, 2000, { - timer: 15000, - }); - // Information on remotes are found - expect(await nodeGraph.getNode(remoteNodeId1)).toBeDefined(); - expect(await nodeGraph.getNode(remoteNodeId2)).toBeDefined(); - - await nodeConnectionManager.stop(); - }); - test('should expand the network when nodes enter', async () => { - const mockedRefreshBucket = jest.spyOn( - NodeManager.prototype, - 'refreshBucket', - ); - mockedRefreshBucket.mockImplementation(createPromiseCancellableNop()); - const seedNodes = { - [remoteNodeIdEncoded1]: remoteAddress1, - }; - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionConnectTimeoutTime: 1000, - tlsConfig, - seedNodes, - }); - nodeManager = new NodeManager({ - db, - gestaltGraph, - keyRing, - nodeConnectionManager, - nodeGraph, - sigchain, - taskManager, - connectionConnectTimeoutTime: 1000, - logger, - }); - await nodeManager.start(); - // Add seed nodes to the nodeGraph - const setNodeProms = new Array>(); - for (const nodeIdEncoded in seedNodes) { - const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); - if (nodeId == null) utils.never(); - const setNodeProm = nodeManager.setNode( - nodeId, - seedNodes[nodeIdEncoded], - true, - ); - setNodeProms.push(setNodeProm); - } - await Promise.all(setNodeProms); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - - await remotePolykeyAgent1.nodeGraph.setNode(remoteNodeId2, remoteAddress2); - - // We expect the following to happen - // 1. local asks remote 1 for list, remote1 returns information about remote 2 - // 2. local attempts a ping to remote 2 and forms a connection - // 3. due to connection establishment local and remote 2 add each others information to their node graph - - await nodeManager.syncNodeGraph(true, 500); - // Local and remote nodes should know each other now - expect(await nodeGraph.getNode(remoteNodeId2)).toBeDefined(); - expect( - await remotePolykeyAgent2.nodeGraph.getNode(keyRing.getNodeId()), - ).toBeDefined(); - - await nodeConnectionManager.stop(); - }); - test('refreshBucket delays should be reset after finding less than 20 nodes', async () => { - const seedNodes = { - [remoteNodeIdEncoded1]: remoteAddress1, - }; - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionConnectTimeoutTime: 1000, - tlsConfig, - seedNodes, - }); - nodeManager = new NodeManager({ - db, - gestaltGraph, - keyRing, - nodeConnectionManager, - nodeGraph, - sigchain, - taskManager, - connectionConnectTimeoutTime: 1000, - logger, - }); - await nodeManager.start(); - // Add seed nodes to the nodeGraph - const setNodeProms = new Array>(); - for (const nodeIdEncoded in seedNodes) { - const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); - if (nodeId == null) utils.never(); - const setNodeProm = nodeManager.setNode( - nodeId, - seedNodes[nodeIdEncoded], - true, - ); - setNodeProms.push(setNodeProm); - } - await Promise.all(setNodeProms); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await taskManager.startProcessing(); - - // Reset all the refresh bucket timers to a distinct time - for ( - let bucketIndex = 0; - bucketIndex < nodeGraph.nodeIdBits; - bucketIndex++ - ) { - await nodeManager.updateRefreshBucketDelay(bucketIndex, 10000, true); - } - - // Trigger a refreshBucket - await nodeManager.refreshBucket(1); - - for await (const task of taskManager.getTasks('asc', true, [ - 'refreshBucket', - ])) { - expect(task.delay).toBeGreaterThanOrEqual(50000); - } - - await nodeConnectionManager.stop(); - }); - test('simulating hole punching with a common node', async () => { - // We can't truly test this without a nat, so we're just going through the motions in this test. - // Will trigger signaling via the seed node, remotePolykeyAgent1 in this case. - const seedNodes = { - [remoteNodeIdEncoded1]: remoteAddress1, - }; - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - connectionConnectTimeoutTime: 1000, - tlsConfig, - seedNodes, - }); - nodeManager = new NodeManager({ - db, - gestaltGraph, - keyRing, - nodeConnectionManager, - nodeGraph, - sigchain, - taskManager, - connectionConnectTimeoutTime: 1000, - logger, - }); - await nodeManager.start(); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - - // Connect remote1 to remote2 - const result1 = await remotePolykeyAgent1.nodeConnectionManager.pingNode( - remoteNodeId2, - [remoteAddress2], - ); - expect(result1).toBeTrue(); - - // Establish connection to remotePolykeyAgent1 - const result2 = await nodeConnectionManager.pingNode(remoteNodeId1, [ - remoteAddress1, - ]); - expect(result2).toBeTrue(); - - // Now we attempt to connect to remotePolykeyAgent2 with signalling - const result3 = await nodeConnectionManager.pingNode(remoteNodeId2, [ - remoteAddress2, - ]); - expect(result3).toBeTrue(); - // Waiting for setNode to propagate - await utils.sleep(100); - }); -}); diff --git a/tests/nodes/NodeConnectionManager.test.ts b/tests/nodes/NodeConnectionManager.test.ts new file mode 100644 index 000000000..c5e80eb2b --- /dev/null +++ b/tests/nodes/NodeConnectionManager.test.ts @@ -0,0 +1,828 @@ +import type { Host, Port } from '@/network/types'; +import type { AgentServerManifest } from '@/nodes/agent/handlers'; +import type { KeyRing } from '@/keys'; +import type { NCMState } from './utils'; +import Logger, { formatting, LogLevel, StreamHandler } from '@matrixai/logger'; +import { Timer } from '@matrixai/timer'; +import { destroyed } from '@matrixai/async-init'; +import * as keysUtils from '@/keys/utils'; +import * as nodesEvents from '@/nodes/events'; +import * as nodesErrors from '@/nodes/errors'; +import NodeConnectionManager from '@/nodes/NodeConnectionManager'; +import NodesConnectionSignalFinal from '@/nodes/agent/handlers/NodesConnectionSignalFinal'; +import NodesConnectionSignalInitial from '@/nodes/agent/handlers/NodesConnectionSignalInitial'; +import * as nodesTestUtils from './utils'; +import * as keysTestUtils from '../keys/utils'; +import * as testsUtils from '../utils'; + +describe(`${NodeConnectionManager.name}`, () => { + const logger = new Logger( + `${NodeConnectionManager.name} test`, + LogLevel.WARN, + [ + new StreamHandler( + formatting.format`${formatting.level}:${formatting.keys}:${formatting.msg}`, + ), + ], + ); + const localHost = '127.0.0.1' as Host; + const dummyManifest = {} as AgentServerManifest; + const timeoutTime = 300; + + test('NodeConnectionManager readiness', async () => { + const keyPair = keysUtils.generateKeyPair(); + const nodeId = keysUtils.publicKeyToNodeId(keyPair.publicKey); + const tlsConfig = await testsUtils.createTLSConfig(keyPair); + const dummyKeyRing = { + getNodeId: () => nodeId, + keyPair, + } as KeyRing; + const nodeConnectionManager = new NodeConnectionManager({ + keyRing: dummyKeyRing, + logger: logger.getChild(NodeConnectionManager.name), + tlsConfig: tlsConfig, + }); + await nodeConnectionManager.start({ + agentService: dummyManifest, + host: localHost, + }); + + await nodeConnectionManager.stop(); + }); + test('NodeConnectionManager consecutive start stops', async () => { + const keyPair = keysUtils.generateKeyPair(); + const nodeId = keysUtils.publicKeyToNodeId(keyPair.publicKey); + const tlsConfig = await testsUtils.createTLSConfig(keyPair); + const dummyKeyRing = { + getNodeId: () => nodeId, + keyPair, + } as KeyRing; + const nodeConnectionManager = new NodeConnectionManager({ + keyRing: dummyKeyRing, + logger: logger.getChild(NodeConnectionManager.name), + tlsConfig: tlsConfig, + }); + await nodeConnectionManager.start({ + agentService: {} as AgentServerManifest, + host: localHost as Host, + }); + await nodeConnectionManager.stop(); + await nodeConnectionManager.start({ + agentService: {} as AgentServerManifest, + host: localHost as Host, + }); + await nodeConnectionManager.stop(); + }); + + // With constructed NCM and 1 peer + describe('With 1 peer', () => { + let ncmLocal: NCMState; + let ncmPeer1: NCMState; + + beforeEach(async () => { + ncmLocal = await nodesTestUtils.nodeConnectionManagerFactory({ + keyRing: keysTestUtils.createDummyKeyRing(), + createOptions: { + connectionIdleTimeoutTimeMin: 5000, + connectionIdleTimeoutTimeScale: 0, + connectionConnectTimeoutTime: timeoutTime, + }, + startOptions: { + host: localHost, + agentService: () => dummyManifest, + }, + logger: logger.getChild(`${NodeConnectionManager.name}Local`), + }); + + ncmPeer1 = await nodesTestUtils.nodeConnectionManagerFactory({ + keyRing: keysTestUtils.createDummyKeyRing(), + createOptions: { + connectionConnectTimeoutTime: timeoutTime, + }, + startOptions: { + host: localHost, + agentService: () => dummyManifest, + }, + logger: logger.getChild(`${NodeConnectionManager.name}Peer1`), + }); + }); + afterEach(async () => { + await ncmLocal.nodeConnectionManager.stop({ force: true }); + await ncmPeer1.nodeConnectionManager.stop({ force: true }); + }); + + test('can create a connection', async () => { + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + // Should exist in the map now. + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeTrue(); + }); + // FIXME: timeout not respecting `connectionConnectTimeoutTime`. + test('connection creation can time out', async () => { + await expect( + ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + 56666 as Port, + ), + ).rejects.toThrow(nodesErrors.ErrorNodeConnectionTimeout); + }); + test('connection creation can time out with time', async () => { + await expect( + ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + 56666 as Port, + { timer: 100 }, + ), + ).rejects.toThrow(nodesErrors.ErrorNodeConnectionTimeout); + }); + test('connection creation can time out with Timer', async () => { + await expect( + ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + 56666 as Port, + { timer: new Timer({ delay: 100 }) }, + ), + ).rejects.toThrow(nodesErrors.ErrorNodeConnectionTimeout); + }); + test('connection can be destroyed', async () => { + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + // Should exist in the map now. + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeTrue(); + await ncmLocal.nodeConnectionManager.destroyConnection( + ncmPeer1.nodeId, + true, + ); + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeFalse(); + }); + test('a node can have multiple connections', async () => { + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeTrue(); + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(1); + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeTrue(); + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(2); + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeTrue(); + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(3); + }); + test('specific connection for a node can be destroyed', async () => { + const connection1 = await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + const connection2 = await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + const connection3 = await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(3); + await ncmLocal.nodeConnectionManager.destroyConnection( + ncmPeer1.nodeId, + true, + connection2.connectionId, + ); + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(2); + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeTrue(); + await ncmLocal.nodeConnectionManager.destroyConnection( + ncmPeer1.nodeId, + true, + connection1.connectionId, + ); + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(1); + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeTrue(); + await ncmLocal.nodeConnectionManager.destroyConnection( + ncmPeer1.nodeId, + true, + connection3.connectionId, + ); + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(0); + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeFalse(); + }); + test('all connections for a node can be destroyed', async () => { + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(3); + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeTrue(); + await ncmLocal.nodeConnectionManager.destroyConnection( + ncmPeer1.nodeId, + true, + ); + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(0); + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeFalse(); + }); + test('connection is removed from map when connection ends', async () => { + const connectionPeerCreated = testsUtils.promFromEvent( + ncmPeer1.nodeConnectionManager, + nodesEvents.EventNodeConnectionManagerConnection, + ); + const connectionPeerDestroyed = testsUtils.promFromEvent( + ncmPeer1.nodeConnectionManager, + nodesEvents.EventNodeConnectionDestroyed, + ); + + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeTrue(); + // Allow time for peer connection to be created + await connectionPeerCreated; + const connectionPeer = ncmPeer1.nodeConnectionManager.getConnection( + ncmLocal.nodeId, + )!; + expect(connectionPeer).toBeDefined(); + // Trigger destruction of peer connection + await connectionPeer.connection.destroy({ force: true }); + // Allow time for connection to end + await connectionPeerDestroyed; + // Connections should be removed from map + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeFalse(); + expect( + ncmPeer1.nodeConnectionManager.hasConnection(ncmLocal.nodeId), + ).toBeFalse(); + }); + test('established connections can be used', async () => { + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + const connectionAndTimer = ncmLocal.nodeConnectionManager.getConnection( + ncmPeer1.nodeId, + ); + await ncmLocal.nodeConnectionManager.withConnF( + ncmPeer1.nodeId, + async () => { + expect(connectionAndTimer?.usageCount).toBe(1); + expect(connectionAndTimer?.timer).toBeNull(); + }, + ); + expect(connectionAndTimer?.usageCount).toBe(0); + expect(connectionAndTimer?.timer).toBeDefined(); + }); + test('only the lowest connectionId connection is used', async () => { + const connectionsPeerP = testsUtils.promFromEvents( + ncmPeer1.nodeConnectionManager, + nodesEvents.EventNodeConnectionManagerConnection, + 4, + ); + const connectionIdPs = [1, 2, 3, 4].map(async () => { + const connection = + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + return connection.connectionId; + }); + const connectionIds = await Promise.all(connectionIdPs); + connectionIds.sort(); + + await ncmLocal.nodeConnectionManager.withConnF( + ncmPeer1.nodeId, + async (connection) => { + expect(connection.connectionId).toBe(connectionIds[0]); + }, + ); + + await connectionsPeerP; + + // Lowest connection is deterministically the same for the peer too + await ncmPeer1.nodeConnectionManager.withConnF( + ncmLocal.nodeId, + async (connection) => { + expect(connection.connectionId).toBe(connectionIds[0]); + }, + ); + }); + test('when a connection is destroyed, the next lowest takes its place', async () => { + const connectionIdPs = [1, 2, 3, 4].map(async () => { + const connection = + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + return connection.connectionId; + }); + const connectionIds = await Promise.all(connectionIdPs); + connectionIds.sort(); + for (const connectionId of connectionIds) { + await ncmLocal.nodeConnectionManager.withConnF( + ncmPeer1.nodeId, + async (connection) => { + // Should always be the lowest alive connectionId + expect(connection.connectionId).toBe(connectionId); + }, + ); + await ncmLocal.nodeConnectionManager.destroyConnection( + ncmPeer1.nodeId, + true, + connectionId, + ); + } + }); + test('throws when connection is missing', async () => { + // TODO: check actual error thrown + await expect( + ncmLocal.nodeConnectionManager.withConnF( + ncmPeer1.nodeId, + async () => {}, + ), + ).rejects.toThrow(); + }); + test('can handle concurrent connections between local and peer', async () => { + const connectionsLocalP = testsUtils.promFromEvents( + ncmLocal.nodeConnectionManager, + nodesEvents.EventNodeConnectionManagerConnection, + 2, + ); + const connectionsPeer1P = testsUtils.promFromEvents( + ncmPeer1.nodeConnectionManager, + nodesEvents.EventNodeConnectionManagerConnection, + 2, + ); + await Promise.all([ + connectionsLocalP, + connectionsPeer1P, + ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ), + ncmPeer1.nodeConnectionManager.createConnection( + [ncmLocal.nodeId], + localHost, + ncmLocal.port, + ), + ]); + + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(2); + expect(ncmPeer1.nodeConnectionManager.connectionsActive()).toBe(2); + }); + test('can handle multiple concurrent connections between local and peer', async () => { + const connectionsLocalP = testsUtils.promFromEvents( + ncmLocal.nodeConnectionManager, + nodesEvents.EventNodeConnectionManagerConnection, + 6, + ); + const connectionsPeer1P = testsUtils.promFromEvents( + ncmPeer1.nodeConnectionManager, + nodesEvents.EventNodeConnectionManagerConnection, + 6, + ); + await Promise.all([ + connectionsLocalP, + connectionsPeer1P, + ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ), + ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ), + ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ), + ncmPeer1.nodeConnectionManager.createConnection( + [ncmLocal.nodeId], + localHost, + ncmLocal.port, + ), + ncmPeer1.nodeConnectionManager.createConnection( + [ncmLocal.nodeId], + localHost, + ncmLocal.port, + ), + ncmPeer1.nodeConnectionManager.createConnection( + [ncmLocal.nodeId], + localHost, + ncmLocal.port, + ), + ]); + + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(6); + expect(ncmPeer1.nodeConnectionManager.connectionsActive()).toBe(6); + }); + test('connection should timeout after connectionIdleTimeoutTime', async () => { + // Modify the timeout time value + const connectionDestroyProm = testsUtils.promFromEvent( + ncmLocal.nodeConnectionManager, + nodesEvents.EventNodeConnectionDestroyed, + ); + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + // Wait for timeout. + const timeStart = Date.now(); + await connectionDestroyProm; + const duration = Date.now() - timeStart; + expect(duration).toBeGreaterThan(4000); + expect(duration).toBeLessThan(7000); + }); + test('non primary connections should timeout with primary in use', async () => { + // Modify the timeout time value + const connectionDestroyProm1 = testsUtils.promFromEvents( + ncmLocal.nodeConnectionManager, + nodesEvents.EventNodeConnectionDestroyed, + 2, + ); + const connectionDestroyProm2 = testsUtils.promFromEvents( + ncmLocal.nodeConnectionManager, + nodesEvents.EventNodeConnectionDestroyed, + 3, + ); + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + // Wait for timeout. + await ncmLocal.nodeConnectionManager.withConnF( + ncmPeer1.nodeId, + async () => { + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(3); + await connectionDestroyProm1; + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(1); + }, + ); + await connectionDestroyProm2; + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(0); + }); + test('can list active connections', async () => { + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + + const connectionsList = ncmLocal.nodeConnectionManager.listConnections(); + expect(connectionsList).toHaveLength(3); + for (const connection of connectionsList) { + expect(connection.address.host).toBe(localHost); + expect(connection.address.port).toBe( + ncmPeer1.nodeConnectionManager.port, + ); + expect(connection.usageCount).toBe(0); + } + }); + test('stopping NodeConnectionManager should destroy all connections', async () => { + const connection1 = await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + const connection2 = await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + const connection3 = await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(3); + expect(connection1[destroyed]).toBeFalse(); + expect(connection2[destroyed]).toBeFalse(); + expect(connection3[destroyed]).toBeFalse(); + + await ncmLocal.nodeConnectionManager.stop({ force: true }); + + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(0); + expect(connection1[destroyed]).toBeTrue(); + expect(connection2[destroyed]).toBeTrue(); + expect(connection3[destroyed]).toBeTrue(); + }); + test('should createConnectionMultiple with single address', async () => { + await ncmLocal.nodeConnectionManager.createConnectionMultiple( + [ncmPeer1.nodeId], + [[localHost, ncmPeer1.port]], + ); + // Should exist in the map now. + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeTrue(); + }); + test('should createConnectionMultiple with multiple address', async () => { + await ncmLocal.nodeConnectionManager.createConnectionMultiple( + [ncmPeer1.nodeId], + [ + [localHost, ncmPeer1.port], + ['127.0.0.2' as Host, ncmPeer1.port], + ['127.0.0.3' as Host, ncmPeer1.port], + ], + ); + // Should exist in the map now. + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeTrue(); + }); + test('should createConnectionMultiple with failures', async () => { + await ncmLocal.nodeConnectionManager.createConnectionMultiple( + [ncmPeer1.nodeId], + [ + ['127.0.0.2' as Host, 12345 as Port], + ['127.0.0.3' as Host, 12346 as Port], + [localHost, ncmPeer1.port], + ], + ); + // Should exist in the map now. + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeTrue(); + }); + test('createConnectionMultiple fails to connect', async () => { + await expect( + ncmLocal.nodeConnectionManager.createConnectionMultiple( + [ncmPeer1.nodeId], + [ + ['127.0.0.2' as Host, 12345 as Port], + ['127.0.0.3' as Host, 12346 as Port], + ], + ), + ).rejects.toThrow(nodesErrors.ErrorNodeConnectionTimeout); + // Should not have connection + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer1.nodeId), + ).toBeFalse(); + }); + }); + + describe('With 2 peers', () => { + let ncmLocal: NCMState; + let ncmPeer1: NCMState; + let ncmPeer2: NCMState; + + beforeEach(async () => { + ncmLocal = await nodesTestUtils.nodeConnectionManagerFactory({ + keyRing: keysTestUtils.createDummyKeyRing(), + createOptions: { + connectionConnectTimeoutTime: timeoutTime, + }, + startOptions: { + host: localHost, + agentService: () => dummyManifest, + }, + logger: logger.getChild(`${NodeConnectionManager.name}Local`), + }); + + ncmPeer1 = await nodesTestUtils.nodeConnectionManagerFactory({ + keyRing: keysTestUtils.createDummyKeyRing(), + createOptions: { + connectionConnectTimeoutTime: timeoutTime, + }, + startOptions: { + host: localHost, + agentService: (nodeConnectionManager) => + ({ + nodesConnectionSignalFinal: new NodesConnectionSignalFinal({ + nodeConnectionManager, + logger, + }), + nodesConnectionSignalInitial: new NodesConnectionSignalInitial({ + nodeConnectionManager, + }), + }) as AgentServerManifest, + }, + logger: logger.getChild(`${NodeConnectionManager.name}Peer1`), + }); + + ncmPeer2 = await nodesTestUtils.nodeConnectionManagerFactory({ + keyRing: keysTestUtils.createDummyKeyRing(), + createOptions: { + connectionConnectTimeoutTime: timeoutTime, + }, + startOptions: { + host: localHost, + agentService: (nodeConnectionManager) => + ({ + nodesConnectionSignalFinal: new NodesConnectionSignalFinal({ + nodeConnectionManager, + logger, + }), + nodesConnectionSignalInitial: new NodesConnectionSignalInitial({ + nodeConnectionManager, + }), + }) as AgentServerManifest, + }, + logger: logger.getChild(`${NodeConnectionManager.name}Peer2`), + }); + }); + afterEach(async () => { + await ncmLocal.nodeConnectionManager.stop({ force: true }); + await ncmPeer1.nodeConnectionManager.stop({ force: true }); + await ncmPeer2.nodeConnectionManager.stop({ force: true }); + }); + + test('can create a connection with signaling', async () => { + // Create initial connections of local -> peer1 -> peer2 + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + await ncmPeer1.nodeConnectionManager.createConnection( + [ncmPeer2.nodeId], + localHost, + ncmPeer2.port, + ); + + // Should be able to create connection from local to peer2 using peer1 as signaller + await ncmLocal.nodeConnectionManager.createConnectionPunch( + ncmPeer2.nodeId, + ncmPeer1.nodeId, + ); + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer2.nodeId), + ).toBeTrue(); + }); + test('createConnectionPunch fails with no signaler', async () => { + // Can't signal without signaler connected + // TODO: check error type + await expect( + ncmLocal.nodeConnectionManager.createConnectionPunch( + ncmPeer2.nodeId, + ncmPeer1.nodeId, + ), + ).rejects.toThrow(); + }); + test('createConnectionPunch fails with signaller missing connection to target', async () => { + // Create initial connections of local -> peer1 + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + // Can't signal without signaler connected + // TODO: check error type + await expect( + ncmLocal.nodeConnectionManager.createConnectionPunch( + ncmPeer2.nodeId, + ncmPeer1.nodeId, + ), + ).rejects.toThrow(); + }); + test('can create multiple connections with signaling', async () => { + // Create initial connections of local -> peer1 -> peer2 + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + await ncmPeer1.nodeConnectionManager.createConnection( + [ncmPeer2.nodeId], + localHost, + ncmPeer2.port, + ); + const holePunchSpy = jest.spyOn( + ncmPeer2.nodeConnectionManager, + 'holePunch', + ); + + // Should be able to create connection from local to peer2 using peer1 as signaller + await Promise.all([ + ncmLocal.nodeConnectionManager.createConnectionPunch( + ncmPeer2.nodeId, + ncmPeer1.nodeId, + ), + ncmLocal.nodeConnectionManager.createConnectionPunch( + ncmPeer2.nodeId, + ncmPeer1.nodeId, + ), + ncmLocal.nodeConnectionManager.createConnectionPunch( + ncmPeer2.nodeId, + ncmPeer1.nodeId, + ), + ]); + expect( + ncmLocal.nodeConnectionManager.hasConnection(ncmPeer2.nodeId), + ).toBeTrue(); + // Should have 3 connections + 1 signaller + expect(ncmLocal.nodeConnectionManager.connectionsActive()).toBe(4); + // Hole punching was only attempted once + expect(holePunchSpy).toHaveBeenCalledTimes(1); + }); + test('can list active connections', async () => { + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer1.nodeId], + localHost, + ncmPeer1.port, + ); + await ncmLocal.nodeConnectionManager.createConnection( + [ncmPeer2.nodeId], + localHost, + ncmPeer2.port, + ); + + const result = ncmLocal.nodeConnectionManager.getClosestConnections( + ncmPeer2.nodeId, + 20, + ); + expect(result).toHaveLength(2); + }); + test.todo('signalling is non-blocking'); + test.todo('signalling is rate limited'); + }); +}); diff --git a/tests/nodes/NodeConnectionManager.timeout.test.ts b/tests/nodes/NodeConnectionManager.timeout.test.ts deleted file mode 100644 index 9d5bd3ae5..000000000 --- a/tests/nodes/NodeConnectionManager.timeout.test.ts +++ /dev/null @@ -1,296 +0,0 @@ -import type { Host, Port, TLSConfig } from '@/network/types'; -import type { NodeId } from '@/ids'; -import type { NodeAddress } from '@/nodes/types'; -import type { NodeIdString } from '@/ids'; -import fs from 'fs'; -import path from 'path'; -import os from 'os'; -import Logger, { formatting, LogLevel, StreamHandler } from '@matrixai/logger'; -import { DB } from '@matrixai/db'; -import NodeConnectionManager from '@/nodes/NodeConnectionManager'; -import NodeConnection from '@/nodes/NodeConnection'; -import * as keysUtils from '@/keys/utils'; -import KeyRing from '@/keys/KeyRing'; -import NodeGraph from '@/nodes/NodeGraph'; -import PolykeyAgent from '@/PolykeyAgent'; -import { sleep } from '@/utils'; -import { generateRandomNodeId } from './utils'; -import * as tlsTestUtils from '../utils/tls'; - -describe(`${NodeConnectionManager.name} timeout test`, () => { - const logger = new Logger( - `${NodeConnection.name} timeout test`, - LogLevel.WARN, - [ - new StreamHandler( - formatting.format`${formatting.level}:${formatting.keys}:${formatting.msg}`, - ), - ], - ); - const localHost = '127.0.0.1'; - const password = 'password'; - - let dataDir: string; - - let remotePolykeyAgent1: PolykeyAgent; - let remoteAddress1: NodeAddress; - let remoteNodeId1: NodeId; - - let keyRing: KeyRing; - let db: DB; - let nodeGraph: NodeGraph; - let nodeConnectionManager: NodeConnectionManager; - let tlsConfig: TLSConfig; - - beforeEach(async () => { - dataDir = await fs.promises.mkdtemp( - path.join(os.tmpdir(), 'polykey-test-'), - ); - - // Setting up remote node - const nodePathA = path.join(dataDir, 'agentA'); - remotePolykeyAgent1 = await PolykeyAgent.createPolykeyAgent({ - password, - options: { - nodePath: nodePathA, - agentServiceHost: localHost, - clientServiceHost: localHost, - keys: { - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - }, - }, - logger: logger.getChild('AgentA'), - }); - remoteNodeId1 = remotePolykeyAgent1.keyRing.getNodeId(); - remoteAddress1 = { - host: remotePolykeyAgent1.agentServiceHost, - port: remotePolykeyAgent1.agentServicePort, - scopes: ['global'], - }; - - // Setting up client dependencies - const keysPath = path.join(dataDir, 'keys'); - keyRing = await KeyRing.createKeyRing({ - password, - keysPath, - logger, - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - }); - const dbPath = path.join(dataDir, 'db'); - db = await DB.createDB({ - dbPath, - logger, - }); - nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, - }); - tlsConfig = await tlsTestUtils.createTLSConfig(keyRing.keyPair); - }); - - afterEach(async () => { - await nodeConnectionManager?.stop(); - await nodeGraph.stop(); - await nodeGraph.destroy(); - await db.stop(); - await db.destroy(); - await keyRing.stop(); - await keyRing.destroy(); - - await remotePolykeyAgent1.stop(); - }); - - test('connection should timeout after connectionIdleTimeoutTime', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig, - seedNodes: undefined, - connectionConnectTimeoutTime: 1000, - connectionIdleTimeoutTime: 100, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - - await nodeGraph.setNode(remoteNodeId1, remoteAddress1); - // @ts-ignore: kidnap connections - const connections = nodeConnectionManager.connections; - // @ts-ignore: kidnap connections - const connectionLocks = nodeConnectionManager.connectionLocks; - await nodeConnectionManager.withConnF(remoteNodeId1, async () => {}); - const connAndLock = connections.get( - remoteNodeId1.toString() as NodeIdString, - ); - // Check entry is in map and lock is released - expect(connAndLock).toBeDefined(); - expect(connectionLocks.isLocked(remoteNodeId1.toString())).toBeFalsy(); - expect(connAndLock?.timer).toBeDefined(); - expect(connAndLock?.connection).toBeDefined(); - - // Wait for timeout - await sleep(300); - - const finalConnAndLock = connections.get( - remoteNodeId1.toString() as NodeIdString, - ); - expect(finalConnAndLock).toBeUndefined(); - expect(connectionLocks.isLocked(remoteNodeId1.toString())).toBeFalsy(); - - await nodeConnectionManager.stop(); - }); - test('withConnection should extend timeout', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig, - seedNodes: undefined, - connectionIdleTimeoutTime: 1000, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - - await nodeGraph.setNode(remoteNodeId1, remoteAddress1); - - // @ts-ignore: kidnap connections - const connections = nodeConnectionManager.connections; - // @ts-ignore: kidnap connections - const connectionLocks = nodeConnectionManager.connectionLocks; - await nodeConnectionManager.withConnF(remoteNodeId1, async () => {}); - const connAndLock = connections.get( - remoteNodeId1.toString() as NodeIdString, - ); - // Check entry is in map and lock is released - expect(connAndLock).toBeDefined(); - expect(connectionLocks.isLocked(remoteNodeId1.toString())).toBeFalsy(); - expect(connAndLock?.timer).toBeDefined(); - expect(connAndLock?.connection).toBeDefined(); - - // WithConnection should extend timeout to 1500ms - await sleep(500); - await nodeConnectionManager.withConnF(remoteNodeId1, async () => { - // Do noting - }); - - // Connection should still exist after 1250 secs - await sleep(750); - const midConnAndLock = connections.get( - remoteNodeId1.toString() as NodeIdString, - ); - expect(midConnAndLock).toBeDefined(); - expect(connectionLocks.isLocked(remoteNodeId1.toString())).toBeFalsy(); - expect(midConnAndLock?.timer).toBeDefined(); - expect(midConnAndLock?.connection).toBeDefined(); - - // Should be dead after 1750 secs - await sleep(500); - const finalConnAndLock = connections.get( - remoteNodeId1.toString() as NodeIdString, - ); - expect(finalConnAndLock).not.toBeDefined(); - expect(connectionLocks.isLocked(remoteNodeId1.toString())).toBeFalsy(); - - await nodeConnectionManager.stop(); - }); - test('Connection can time out', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig, - seedNodes: undefined, - connectionIdleTimeoutTime: 5000, - connectionConnectTimeoutTime: 200, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - - const randomNodeId = generateRandomNodeId(); - await nodeGraph.setNode(randomNodeId, { - host: '127.0.0.1' as Host, - port: 12321 as Port, - scopes: ['local'], - }); - await expect( - nodeConnectionManager.withConnF(randomNodeId, async () => { - // Do nothing - }), - ).rejects.toThrow(); - }); - test('Connection can time out with passed in timer', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig, - seedNodes: undefined, - connectionIdleTimeoutTime: 5000, - connectionConnectTimeoutTime: 200, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - - const randomNodeId = generateRandomNodeId(); - await nodeGraph.setNode(randomNodeId, { - host: '127.0.0.1' as Host, - port: 12321 as Port, - scopes: ['local'], - }); - await expect( - nodeConnectionManager.withConnF( - randomNodeId, - async () => { - // Do nothing - }, - { - timer: 100, - }, - ), - ).rejects.toThrow(); - }); - test('Connection can time out with passed in timer and signal', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - logger: logger.getChild(NodeConnectionManager.name), - nodeGraph, - tlsConfig, - seedNodes: undefined, - connectionIdleTimeoutTime: 5000, - connectionConnectTimeoutTime: 200, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - - const randomNodeId = generateRandomNodeId(); - await nodeGraph.setNode(randomNodeId, { - host: '127.0.0.1' as Host, - port: 12321 as Port, - scopes: ['local'], - }); - const abortController = new AbortController(); - const ctx = { - timer: 100, - signal: abortController.signal, - }; - await expect( - nodeConnectionManager.withConnF( - randomNodeId, - async () => { - // Do nothing - }, - ctx, - ), - ).rejects.toThrow(); - }); -}); diff --git a/tests/nodes/NodeGraph.test.ts b/tests/nodes/NodeGraph.test.ts index 01717a5f4..ee213ed14 100644 --- a/tests/nodes/NodeGraph.test.ts +++ b/tests/nodes/NodeGraph.test.ts @@ -1,9 +1,9 @@ import type { + NodeContactAddress, + NodeContact, + NodeContactAddressData, NodeId, - NodeData, - NodeAddress, NodeBucket, - NodeBucketIndex, } from '@/nodes/types'; import type { Key } from '@/keys/types'; import os from 'os'; @@ -11,15 +11,16 @@ import path from 'path'; import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; -import { IdInternal } from '@matrixai/id'; -import { testProp, fc } from '@fast-check/jest'; +import { test, fc } from '@fast-check/jest'; import NodeGraph from '@/nodes/NodeGraph'; import KeyRing from '@/keys/KeyRing'; import * as keysUtils from '@/keys/utils'; -import * as nodesUtils from '@/nodes/utils'; import * as nodesErrors from '@/nodes/errors'; +import * as nodesUtils from '@/nodes/utils'; import * as utils from '@/utils'; +import { encodeNodeId } from '@/ids'; import * as testNodesUtils from './utils'; +import { nodeIdContactPairArb } from './utils'; describe(`${NodeGraph.name} test`, () => { const password = 'password'; @@ -31,6 +32,8 @@ describe(`${NodeGraph.name} test`, () => { let dbKey: Buffer; let dbPath: string; let db: DB; + let nodeGraph: NodeGraph; + beforeEach(async () => { dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), @@ -39,7 +42,7 @@ describe(`${NodeGraph.name} test`, () => { keyRing = await KeyRing.createKeyRing({ password, keysPath, - logger, + logger: logger.getChild(KeyRing.name), passwordOpsLimit: keysUtils.passwordOpsLimits.min, passwordMemLimit: keysUtils.passwordMemLimits.min, strictMemoryLock: false, @@ -48,7 +51,7 @@ describe(`${NodeGraph.name} test`, () => { dbPath = `${dataDir}/db`; db = await DB.createDB({ dbPath, - logger, + logger: logger.getChild(DB.name), crypto: { key: dbKey, ops: { @@ -67,8 +70,14 @@ describe(`${NodeGraph.name} test`, () => { }, }, }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyRing, + logger: logger.getChild(NodeGraph.name), + }); }); afterEach(async () => { + await nodeGraph.stop(); await db.stop(); await db.destroy(); await keyRing.stop(); @@ -77,1103 +86,1208 @@ describe(`${NodeGraph.name} test`, () => { recursive: true, }); }); - test('get, set and unset node IDs', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, - }); - let nodeId1: NodeId; - do { - nodeId1 = testNodesUtils.generateRandomNodeId(); - } while (nodeId1.equals(keyRing.getNodeId())); - let nodeId2: NodeId; - do { - nodeId2 = testNodesUtils.generateRandomNodeId(); - } while (nodeId2.equals(keyRing.getNodeId())); - await nodeGraph.setNode(nodeId1, { - host: '10.0.0.1', - port: 1234, - } as NodeAddress); - const nodeData1 = await nodeGraph.getNode(nodeId1); - expect(nodeData1).toStrictEqual({ - address: { - host: '10.0.0.1', - port: 1234, - }, - lastUpdated: expect.any(Number), + describe('setNodeContact', () => { + test.prop([testNodesUtils.nodeIdArb, testNodesUtils.nodeContactPairArb], { + numRuns: 20, + })('should set with single address', async (nodeId, nodeContactPair) => { + const nodeContact = { + [nodeContactPair.nodeContactAddress]: + nodeContactPair.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); }); - await utils.sleep(1000); - await nodeGraph.setNode(nodeId2, { - host: 'abc.com', - port: 8978, - } as NodeAddress); - const nodeData2 = await nodeGraph.getNode(nodeId2); - expect(nodeData2).toStrictEqual({ - address: { - host: 'abc.com', - port: 8978, + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactPairArb, + testNodesUtils.nodeContactPairArb, + ], + { numRuns: 20 }, + )( + 'should set with multiple addresses', + async (nodeId, nodeContactPair1, nodeContactPair2) => { + const nodeContact = { + [nodeContactPair1.nodeContactAddress]: + nodeContactPair1.nodeContactAddressData, + [nodeContactPair2.nodeContactAddress]: + nodeContactPair2.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); }, - lastUpdated: expect.any(Number), - }); - expect(nodeData2!.lastUpdated > nodeData1!.lastUpdated).toBe(true); - const nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); - expect(nodes).toHaveLength(2); - expect(nodes).toContainEqual([ - nodeId1, - { - address: { - host: '10.0.0.1', - port: 1234, - }, - lastUpdated: expect.any(Number), + ); + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactPairArb, + testNodesUtils.nodeContactPairArb, + ], + { numRuns: 10 }, + )( + 'should set with multiple nodes', + async (nodeId1, nodeId2, nodeContactPair1, nodeContactPair2) => { + const nodeContact1 = { + [nodeContactPair1.nodeContactAddress]: + nodeContactPair1.nodeContactAddressData, + }; + const nodeContact2 = { + [nodeContactPair2.nodeContactAddress]: + nodeContactPair2.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId1, nodeContact1); + await nodeGraph.setNodeContact(nodeId2, nodeContact2); }, - ]); - expect(nodes).toContainEqual([ - nodeId2, - { - address: { - host: 'abc.com', - port: 8978, + ); + test('should increment bucket count', async () => { + const nodeContact: NodeContact = { + ['someAddress' as NodeContactAddress]: { + mode: 'direct', + connectedTime: 0, + scopes: [], }, - lastUpdated: expect.any(Number), - }, - ]); - await nodeGraph.unsetNode(nodeId1); - expect(await nodeGraph.getNode(nodeId1)).toBeUndefined(); - expect(await utils.asyncIterableArray(nodeGraph.getNodes())).toStrictEqual([ - [ - nodeId2, - { - address: { - host: 'abc.com', - port: 8978, - }, - lastUpdated: expect.any(Number), + }; + expect(await nodeGraph.getBucketMetaProp(100, 'count')).toBe(0); + await nodeGraph.setNodeContact( + testNodesUtils.generateNodeIdForBucket(keyRing.getNodeId(), 100, 0), + nodeContact, + ); + expect(await nodeGraph.getBucketMetaProp(100, 'count')).toBe(1); + await nodeGraph.setNodeContact( + testNodesUtils.generateNodeIdForBucket(keyRing.getNodeId(), 100, 1), + nodeContact, + ); + expect(await nodeGraph.getBucketMetaProp(100, 'count')).toBe(2); + }); + test('should throw when bucket limit exceeded', async () => { + const nodeContact: NodeContact = { + ['someAddress' as NodeContactAddress]: { + mode: 'direct', + connectedTime: 0, + scopes: [], }, + }; + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + await nodeGraph.setNodeContact( + testNodesUtils.generateNodeIdForBucket(keyRing.getNodeId(), 100, i), + nodeContact, + ); + } + await expect( + nodeGraph.setNodeContact( + testNodesUtils.generateNodeIdForBucket( + keyRing.getNodeId(), + 100, + nodeGraph.nodeBucketLimit, + ), + nodeContact, + ), + ).rejects.toThrow(nodesErrors.ErrorNodeGraphBucketLimit); + }); + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactAddressArb, + testNodesUtils.nodeContactAddressDataArb, + testNodesUtils.nodeContactAddressDataArb, ], - ]); - await nodeGraph.unsetNode(nodeId2); - await nodeGraph.stop(); + { numRuns: 20 }, + )( + 'should update bucket lastUpdatedTime', + async ( + nodeId, + nodeContactAddress, + nodeContactAddressData1, + nodeContactAddressData2, + ) => { + await nodeGraph.setNodeContact(nodeId, { + [nodeContactAddress]: nodeContactAddressData1, + }); + expect(await nodeGraph.getConnectedTime(nodeId)).toBe( + nodeContactAddressData1.connectedTime, + ); + await nodeGraph.setNodeContact(nodeId, { + [nodeContactAddress]: nodeContactAddressData2, + }); + expect(await nodeGraph.getConnectedTime(nodeId)).toBe( + nodeContactAddressData2.connectedTime, + ); + }, + ); }); - test('get all nodes', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, - }); - let nodeIds = Array.from({ length: 25 }, () => { - return testNodesUtils.generateRandomNodeId(); + describe('getNodeContact', () => { + test.prop([testNodesUtils.nodeIdArb, testNodesUtils.nodeContactPairArb], { + numRuns: 20, + })('should get with single address', async (nodeId, nodeContactPair1) => { + const nodeContact = { + [nodeContactPair1.nodeContactAddress]: + nodeContactPair1.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); + expect(await nodeGraph.getNodeContact(nodeId)).toMatchObject(nodeContact); }); - nodeIds = nodeIds.filter((nodeId) => !nodeId.equals(keyRing.getNodeId())); - let bucketIndexes: Array; - let nodes: NodeBucket; - nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); - expect(nodes).toHaveLength(0); - for (const nodeId of nodeIds) { - await utils.sleep(100); - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: 55555, - } as NodeAddress); - } - nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); - expect(nodes).toHaveLength(25); - // Sorted by bucket indexes ascending - bucketIndexes = nodes.map(([nodeId]) => - nodesUtils.bucketIndex(keyRing.getNodeId(), nodeId), - ); - expect( - bucketIndexes.slice(1).every((bucketIndex, i) => { - return bucketIndexes[i] <= bucketIndex; - }), - ).toBe(true); - // Sorted by bucket indexes ascending explicitly - nodes = await utils.asyncIterableArray(nodeGraph.getNodes('asc')); - bucketIndexes = nodes.map(([nodeId]) => - nodesUtils.bucketIndex(keyRing.getNodeId(), nodeId), + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactPairArb, + testNodesUtils.nodeContactPairArb, + ], + { numRuns: 20 }, + )( + 'should get with multiple addresses', + async (nodeId, nodeContactPair1, nodeContactPair2) => { + const nodeContact = { + [nodeContactPair1.nodeContactAddress]: + nodeContactPair1.nodeContactAddressData, + [nodeContactPair2.nodeContactAddress]: + nodeContactPair2.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); + expect(await nodeGraph.getNodeContact(nodeId)).toMatchObject( + nodeContact, + ); + }, ); - expect( - bucketIndexes.slice(1).every((bucketIndex, i) => { - return bucketIndexes[i] <= bucketIndex; - }), - ).toBe(true); - nodes = await utils.asyncIterableArray(nodeGraph.getNodes('desc')); - expect(nodes).toHaveLength(25); - // Sorted by bucket indexes descending - bucketIndexes = nodes.map(([nodeId]) => - nodesUtils.bucketIndex(keyRing.getNodeId(), nodeId), + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactPairArb, + testNodesUtils.nodeContactPairArb, + ], + { numRuns: 10 }, + )( + 'should get with multiple nodes', + async (nodeId1, nodeId2, nodeContactPair1, nodeContactPair2) => { + const nodeContact1 = { + [nodeContactPair1.nodeContactAddress]: + nodeContactPair1.nodeContactAddressData, + }; + const nodeContact2 = { + [nodeContactPair2.nodeContactAddress]: + nodeContactPair2.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId1, nodeContact1); + await nodeGraph.setNodeContact(nodeId2, nodeContact2); + expect(await nodeGraph.getNodeContact(nodeId1)).toMatchObject( + nodeContact1, + ); + expect(await nodeGraph.getNodeContact(nodeId2)).toMatchObject( + nodeContact2, + ); + }, ); - expect( - bucketIndexes.slice(1).every((bucketIndex, i) => { - return bucketIndexes[i] >= bucketIndex; - }), - ).toBe(true); - await nodeGraph.stop(); }); - test('setting same node ID throws error', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, - }); - await expect( - nodeGraph.setNode(keyRing.getNodeId(), { - host: '127.0.0.1', - port: 55555, - } as NodeAddress), - ).rejects.toThrow(nodesErrors.ErrorNodeGraphSameNodeId); - await nodeGraph.stop(); + describe('getNodeContacts', () => { + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactArb, + testNodesUtils.nodeContactArb, + ], + { numRuns: 1 }, + )( + 'should get all nodeContacts', + async (nodeId1, nodeId2, nodeContact1, nodeContact2) => { + await nodeGraph.setNodeContact(nodeId1, nodeContact1); + await nodeGraph.setNodeContact(nodeId2, nodeContact2); + + const results: Array<[NodeId, NodeContact]> = []; + for await (const result of nodeGraph.getNodeContacts()) { + results.push(result); + } + expect(results.length).toBe(2); + for (const [nodeId, nodeContact] of results) { + if (nodeId1.equals(nodeId)) { + expect(Object.keys(nodeContact).length).toBe( + Object.keys(nodeContact1).length, + ); + expect(nodeContact).toMatchObject(nodeContact1); + } + if (nodeId2.equals(nodeId)) { + expect(Object.keys(nodeContact).length).toBe( + Object.keys(nodeContact2).length, + ); + expect(nodeContact).toMatchObject(nodeContact2); + } + } + }, + ); }); - test('get bucket with 1 node', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + describe('setNodeContactAddressData', () => { + test.prop([testNodesUtils.nodeIdArb, testNodesUtils.nodeContactPairArb], { + numRuns: 20, + })('should set with single address', async (nodeId, nodeContactPair) => { + await nodeGraph.setNodeContactAddressData( + nodeId, + nodeContactPair.nodeContactAddress, + nodeContactPair.nodeContactAddressData, + ); }); - let nodeId: NodeId; - do { - nodeId = testNodesUtils.generateRandomNodeId(); - } while (nodeId.equals(keyRing.getNodeId())); - // Set one node - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: 55555, - } as NodeAddress); - const bucketIndex = nodesUtils.bucketIndex(keyRing.getNodeId(), nodeId); - const bucket = await nodeGraph.getBucket(bucketIndex); - expect(bucket).toHaveLength(1); - expect(bucket[0]).toStrictEqual([ - nodeId, - { - address: { - host: '127.0.0.1', - port: 55555, - }, - lastUpdated: expect.any(Number), + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactPairArb, + testNodesUtils.nodeContactPairArb, + ], + { numRuns: 20 }, + )( + 'should set with multiple addresses', + async (nodeId, nodeContactPair1, nodeContactPair2) => { + await nodeGraph.setNodeContactAddressData( + nodeId, + nodeContactPair1.nodeContactAddress, + nodeContactPair1.nodeContactAddressData, + ); + await nodeGraph.setNodeContactAddressData( + nodeId, + nodeContactPair2.nodeContactAddress, + nodeContactPair2.nodeContactAddressData, + ); + }, + ); + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactPairArb, + testNodesUtils.nodeContactPairArb, + ], + { numRuns: 10 }, + )( + 'should set with multiple nodes', + async (nodeId1, nodeId2, nodeContactPair1, nodeContactPair2) => { + await nodeGraph.setNodeContactAddressData( + nodeId1, + nodeContactPair1.nodeContactAddress, + nodeContactPair1.nodeContactAddressData, + ); + await nodeGraph.setNodeContactAddressData( + nodeId2, + nodeContactPair2.nodeContactAddress, + nodeContactPair2.nodeContactAddressData, + ); }, - ]); - expect(await nodeGraph.getBucketMeta(bucketIndex)).toStrictEqual({ - count: 1, + ); + test('should increment bucket count', async () => { + const nodeContactAddress = 'someAddress' as NodeContactAddress; + const nodeContactAddressData: NodeContactAddressData = { + mode: 'direct', + connectedTime: 0, + scopes: [], + }; + expect(await nodeGraph.getBucketMetaProp(100, 'count')).toBe(0); + await nodeGraph.setNodeContactAddressData( + testNodesUtils.generateNodeIdForBucket(keyRing.getNodeId(), 100, 0), + nodeContactAddress, + nodeContactAddressData, + ); + expect(await nodeGraph.getBucketMetaProp(100, 'count')).toBe(1); + await nodeGraph.setNodeContactAddressData( + testNodesUtils.generateNodeIdForBucket(keyRing.getNodeId(), 100, 1), + nodeContactAddress, + nodeContactAddressData, + ); + expect(await nodeGraph.getBucketMetaProp(100, 'count')).toBe(2); }); - // Adjacent bucket should be empty - let bucketIndex_: number; - if (bucketIndex >= nodeId.length * 8 - 1) { - bucketIndex_ = bucketIndex - 1; - } else if (bucketIndex === 0) { - bucketIndex_ = bucketIndex + 1; - } else { - bucketIndex_ = bucketIndex + 1; - } - expect(await nodeGraph.getBucket(bucketIndex_)).toHaveLength(0); - expect(await nodeGraph.getBucketMeta(bucketIndex_)).toStrictEqual({ - count: 0, + test('should throw when bucket limit exceeded', async () => { + const nodeContactAddress = 'someAddress' as NodeContactAddress; + const nodeContactAddressData: NodeContactAddressData = { + mode: 'direct', + connectedTime: 0, + scopes: [], + }; + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + await nodeGraph.setNodeContactAddressData( + testNodesUtils.generateNodeIdForBucket(keyRing.getNodeId(), 100, i), + nodeContactAddress, + nodeContactAddressData, + ); + } + await expect( + nodeGraph.setNodeContactAddressData( + testNodesUtils.generateNodeIdForBucket( + keyRing.getNodeId(), + 100, + nodeGraph.nodeBucketLimit, + ), + nodeContactAddress, + nodeContactAddressData, + ), + ).rejects.toThrow(nodesErrors.ErrorNodeGraphBucketLimit); }); - await nodeGraph.stop(); + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactAddressArb, + testNodesUtils.nodeContactAddressArb, + testNodesUtils.nodeContactAddressDataArb, + testNodesUtils.nodeContactAddressDataArb, + ], + { numRuns: 20 }, + )( + 'should update bucket lastUpdatedTime', + async ( + nodeId, + nodeContactAddress1, + nodeContactAddress2, + nodeContactAddressData1, + nodeContactAddressData2, + ) => { + await nodeGraph.setNodeContactAddressData( + nodeId, + nodeContactAddress1, + nodeContactAddressData1, + ); + expect(await nodeGraph.getConnectedTime(nodeId)).toBe( + nodeContactAddressData1.connectedTime, + ); + await nodeGraph.setNodeContactAddressData( + nodeId, + nodeContactAddress1, + nodeContactAddressData2, + ); + expect(await nodeGraph.getConnectedTime(nodeId)).toBe( + nodeContactAddressData2.connectedTime, + ); + await nodeGraph.setNodeContactAddressData( + nodeId, + nodeContactAddress2, + nodeContactAddressData1, + ); + expect(await nodeGraph.getConnectedTime(nodeId)).toBe( + nodeContactAddressData1.connectedTime, + ); + }, + ); }); - test('get bucket with multiple nodes', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + describe('getNodeContactAddressData', () => { + test.prop([testNodesUtils.nodeIdArb, testNodesUtils.nodeContactPairArb], { + numRuns: 20, + })('should get with single address', async (nodeId, nodeContactPair) => { + const nodeContact = { + [nodeContactPair.nodeContactAddress]: + nodeContactPair.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); + + const result = await nodeGraph.getNodeContactAddressData( + nodeId, + nodeContactPair.nodeContactAddress, + ); + expect(result).toBeDefined(); + expect(result!).toMatchObject(nodeContactPair.nodeContactAddressData); }); - // Contiguous node IDs starting from 0 - let nodeIds = Array.from({ length: 25 }, (_, i) => - IdInternal.create( - utils.bigInt2Bytes(BigInt(i), keyRing.getNodeId().byteLength), - ), - ); - nodeIds = nodeIds.filter((nodeId) => !nodeId.equals(keyRing.getNodeId())); - for (const nodeId of nodeIds) { - await utils.sleep(100); - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: 55555, - } as NodeAddress); - } - // Use first and last buckets because node IDs may be split between buckets - const bucketIndexFirst = nodesUtils.bucketIndex( - keyRing.getNodeId(), - nodeIds[0], + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactPairArb, + testNodesUtils.nodeContactPairArb, + ], + { numRuns: 20 }, + )( + 'should get with multiple addresses', + async (nodeId, nodeContactPair1, nodeContactPair2) => { + const nodeContact = { + [nodeContactPair1.nodeContactAddress]: + nodeContactPair1.nodeContactAddressData, + [nodeContactPair2.nodeContactAddress]: + nodeContactPair2.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); + + const result1 = await nodeGraph.getNodeContactAddressData( + nodeId, + nodeContactPair1.nodeContactAddress, + ); + expect(result1).toBeDefined(); + expect(result1!).toMatchObject(nodeContactPair1.nodeContactAddressData); + const result2 = await nodeGraph.getNodeContactAddressData( + nodeId, + nodeContactPair2.nodeContactAddress, + ); + expect(result2).toBeDefined(); + expect(result2!).toMatchObject(nodeContactPair2.nodeContactAddressData); + }, ); - const bucketIndexLast = nodesUtils.bucketIndex( - keyRing.getNodeId(), - nodeIds[nodeIds.length - 1], + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactPairArb, + testNodesUtils.nodeContactPairArb, + ], + { numRuns: 10 }, + )( + 'should get with multiple nodes', + async (nodeId1, nodeId2, nodeContactPair1, nodeContactPair2) => { + const nodeContact1 = { + [nodeContactPair1.nodeContactAddress]: + nodeContactPair1.nodeContactAddressData, + }; + const nodeContact2 = { + [nodeContactPair2.nodeContactAddress]: + nodeContactPair2.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId1, nodeContact1); + await nodeGraph.setNodeContact(nodeId2, nodeContact2); + + const result1 = await nodeGraph.getNodeContactAddressData( + nodeId1, + nodeContactPair1.nodeContactAddress, + ); + expect(result1).toBeDefined(); + expect(result1!).toMatchObject(nodeContactPair1.nodeContactAddressData); + const result2 = await nodeGraph.getNodeContactAddressData( + nodeId2, + nodeContactPair2.nodeContactAddress, + ); + expect(result2).toBeDefined(); + expect(result2!).toMatchObject(nodeContactPair2.nodeContactAddressData); + }, ); - const bucketFirst = await nodeGraph.getBucket(bucketIndexFirst); - const bucketLast = await nodeGraph.getBucket(bucketIndexLast); - let bucket: NodeBucket; - let bucketIndex: NodeBucketIndex; - if (bucketFirst.length >= bucketLast.length) { - bucket = bucketFirst; - bucketIndex = bucketIndexFirst; - } else { - bucket = bucketLast; - bucketIndex = bucketIndexLast; - } - expect(bucket.length > 1).toBe(true); - let bucketNodeIds = bucket.map(([nodeId]) => nodeId); - // The node IDs must be sorted lexicographically - expect( - bucketNodeIds.slice(1).every((nodeId, i) => { - return Buffer.compare(bucketNodeIds[i], nodeId) < 1; - }), - ).toBe(true); - // Sort by node ID asc - bucket = await nodeGraph.getBucket(bucketIndex, 'nodeId', 'asc'); - bucketNodeIds = bucket.map(([nodeId]) => nodeId); - expect( - bucketNodeIds.slice(1).every((nodeId, i) => { - return Buffer.compare(bucketNodeIds[i], nodeId) < 0; - }), - ).toBe(true); - // Sort by node ID desc - bucket = await nodeGraph.getBucket(bucketIndex, 'nodeId', 'desc'); - bucketNodeIds = bucket.map(([nodeId]) => nodeId); - expect( - bucketNodeIds.slice(1).every((nodeId, i) => { - return Buffer.compare(bucketNodeIds[i], nodeId) > 0; - }), - ).toBe(true); - // Sort by distance asc - bucket = await nodeGraph.getBucket(bucketIndex, 'distance', 'asc'); - let bucketDistances = bucket.map(([nodeId]) => - nodesUtils.nodeDistance(keyRing.getNodeId(), nodeId), + }); + describe('unsetNodeContact', () => { + test.prop( + [ + testNodesUtils.nodeIdArb.noShrink(), + testNodesUtils.nodeContactPairArb.noShrink(), + ], + { numRuns: 20 }, + )( + 'can unsetNodeContact with single address', + async (nodeId, nodeContactPair) => { + const nodeContact = { + [nodeContactPair.nodeContactAddress]: + nodeContactPair.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); + + await nodeGraph.unsetNodeContact(nodeId); + expect(await nodeGraph.getNodeContact(nodeId)).toBeUndefined(); + expect( + await nodeGraph.getBucketMetaProp( + nodeGraph.bucketIndex(nodeId)[0], + 'count', + ), + ).toBe(0); + }, ); - expect( - bucketDistances.slice(1).every((distance, i) => { - return bucketDistances[i] <= distance; - }), - ).toBe(true); - // Sort by distance desc - bucket = await nodeGraph.getBucket(bucketIndex, 'distance', 'desc'); - bucketDistances = bucket.map(([nodeId]) => - nodesUtils.nodeDistance(keyRing.getNodeId(), nodeId), + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactPairArb, + testNodesUtils.nodeContactPairArb, + ], + { numRuns: 20 }, + )( + 'can unsetNodeContact with multiple addresses', + async (nodeId, nodeContactPair1, nodeContactPair2) => { + const nodeContact = { + [nodeContactPair1.nodeContactAddress]: + nodeContactPair1.nodeContactAddressData, + [nodeContactPair2.nodeContactAddress]: + nodeContactPair2.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); + + await nodeGraph.unsetNodeContact(nodeId); + expect(await nodeGraph.getNodeContact(nodeId)).toBeUndefined(); + expect( + await nodeGraph.getBucketMetaProp( + nodeGraph.bucketIndex(nodeId)[0], + 'count', + ), + ).toBe(0); + }, ); - expect( - bucketDistances.slice(1).every((distance, i) => { - return bucketDistances[i] >= distance; - }), - ).toBe(true); - // Sort by lastUpdated asc - bucket = await nodeGraph.getBucket(bucketIndex, 'lastUpdated', 'asc'); - let bucketLastUpdated = bucket.map(([, nodeData]) => nodeData.lastUpdated); - expect( - bucketLastUpdated.slice(1).every((lastUpdated, i) => { - return bucketLastUpdated[i] <= lastUpdated; - }), - ).toBe(true); - bucket = await nodeGraph.getBucket(bucketIndex, 'lastUpdated', 'desc'); - bucketLastUpdated = bucket.map(([, nodeData]) => nodeData.lastUpdated); - expect( - bucketLastUpdated.slice(1).every((lastUpdated, i) => { - return bucketLastUpdated[i] >= lastUpdated; - }), - ).toBe(true); - await nodeGraph.stop(); - }); - test('get all buckets', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + test('should decrement bucket count', async () => { + const nodeId = keyRing.getNodeId(); + const nodeId1 = testNodesUtils.generateNodeIdForBucket(nodeId, 100, 0); + const nodeId2 = testNodesUtils.generateNodeIdForBucket(nodeId, 100, 1); + const nodeContactAddress1 = 'someAddress1' as NodeContactAddress; + const nodeContactAddressData: NodeContactAddressData = { + mode: 'direct', + connectedTime: 0, + scopes: [], + }; + await nodeGraph.setNodeContact(nodeId1, { + [nodeContactAddress1]: nodeContactAddressData, + }); + await nodeGraph.setNodeContact(nodeId2, { + [nodeContactAddress1]: nodeContactAddressData, + }); + + expect(await nodeGraph.getBucketMetaProp(100, 'count')).toBe(2); + await nodeGraph.unsetNodeContact(nodeId1); + expect(await nodeGraph.getBucketMetaProp(100, 'count')).toBe(1); + await nodeGraph.unsetNodeContact(nodeId2); + expect(await nodeGraph.getBucketMetaProp(100, 'count')).toBe(0); }); - const now = utils.getUnixtime(); - for (let i = 0; i < 50; i++) { - await utils.sleep(50); - await nodeGraph.setNode(testNodesUtils.generateRandomNodeId(), { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress); - } - let bucketIndex_ = -1; - // Ascending order - for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( - 'nodeId', - 'asc', - )) { - expect(bucketIndex > bucketIndex_).toBe(true); - bucketIndex_ = bucketIndex; - expect(bucket.length > 0).toBe(true); - for (const [nodeId, nodeData] of bucket) { - expect(nodeId.byteLength).toBe(32); - expect(nodesUtils.bucketIndex(keyRing.getNodeId(), nodeId)).toBe( - bucketIndex, + test.prop( + [ + testNodesUtils.nodeIdArb.noShrink(), + testNodesUtils.nodeContactPairArb.noShrink(), + ], + { numRuns: 20 }, + )('should delete lastUpdatedTime', async (nodeId, nodeContactPair) => { + const nodeContact = { + [nodeContactPair.nodeContactAddress]: + nodeContactPair.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); + + expect(await nodeGraph.getConnectedTime(nodeId)).toBeDefined(); + await nodeGraph.unsetNodeContact(nodeId); + expect(await nodeGraph.getConnectedTime(nodeId)).toBeUndefined(); + }); + }); + describe('unsetNodeContactAddress', () => { + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactPairArb, + testNodesUtils.nodeContactPairArb, + ], + { numRuns: 20 }, + )( + 'should unset with multiple addresses', + async (nodeId, nodeContactPair1, nodeContactPair2) => { + const nodeContact = { + [nodeContactPair1.nodeContactAddress]: + nodeContactPair1.nodeContactAddressData, + [nodeContactPair2.nodeContactAddress]: + nodeContactPair2.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); + + await nodeGraph.unsetNodeContactAddress( + nodeId, + nodeContactPair1.nodeContactAddress, ); - expect(nodeData.address.host).toBe('127.0.0.1'); - // Port of 0 is not allowed - expect(nodeData.address.port > 0).toBe(true); - expect(nodeData.address.port < 2 ** 16).toBe(true); - expect(nodeData.lastUpdated >= now).toBe(true); - } - const bucketNodeIds = bucket.map(([nodeId]) => nodeId); - expect( - bucketNodeIds.slice(1).every((nodeId, i) => { - return Buffer.compare(bucketNodeIds[i], nodeId) < 0; - }), - ).toBe(true); - } - // There must have been at least 1 bucket - expect(bucketIndex_).not.toBe(-1); - // Descending order - bucketIndex_ = keyRing.getNodeId().length * 8; - for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( - 'nodeId', - 'desc', - )) { - expect(bucketIndex < bucketIndex_).toBe(true); - bucketIndex_ = bucketIndex; - expect(bucket.length > 0).toBe(true); - for (const [nodeId, nodeData] of bucket) { - expect(nodeId.byteLength).toBe(32); - expect(nodesUtils.bucketIndex(keyRing.getNodeId(), nodeId)).toBe( - bucketIndex, + expect( + await nodeGraph.getNodeContactAddressData( + nodeId, + nodeContactPair1.nodeContactAddress, + ), + ).toBeUndefined(); + expect( + await nodeGraph.getNodeContactAddressData( + nodeId, + nodeContactPair2.nodeContactAddress, + ), + ).toBeDefined(); + expect(await nodeGraph.getNodeContact(nodeId)).toBeDefined(); + expect( + await nodeGraph.getBucketMetaProp( + nodeGraph.bucketIndex(nodeId)[0], + 'count', + ), + ).toBeGreaterThanOrEqual(1); + }, + ); + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactPairArb, + testNodesUtils.nodeContactPairArb, + ], + { numRuns: 20 }, + )( + 'should remove node when all addresses are removed', + async (nodeId, nodeContactPair1, nodeContactPair2) => { + const nodeContact = { + [nodeContactPair1.nodeContactAddress]: + nodeContactPair1.nodeContactAddressData, + [nodeContactPair2.nodeContactAddress]: + nodeContactPair2.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); + + await nodeGraph.unsetNodeContactAddress( + nodeId, + nodeContactPair1.nodeContactAddress, ); - expect(nodeData.address.host).toBe('127.0.0.1'); - // Port of 0 is not allowed - expect(nodeData.address.port > 0).toBe(true); - expect(nodeData.address.port < 2 ** 16).toBe(true); - expect(nodeData.lastUpdated >= now).toBe(true); - } - const bucketNodeIds = bucket.map(([nodeId]) => nodeId); - expect( - bucketNodeIds.slice(1).every((nodeId, i) => { - return Buffer.compare(bucketNodeIds[i], nodeId) > 0; - }), - ).toBe(true); - } - expect(bucketIndex_).not.toBe(keyRing.getNodeId().length * 8); - // Distance ascending order - // Lower distance buckets first - bucketIndex_ = -1; - for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( - 'distance', - 'asc', - )) { - expect(bucketIndex > bucketIndex_).toBe(true); - bucketIndex_ = bucketIndex; - expect(bucket.length > 0).toBe(true); - for (const [nodeId, nodeData] of bucket) { - expect(nodeId.byteLength).toBe(32); - expect(nodesUtils.bucketIndex(keyRing.getNodeId(), nodeId)).toBe( - bucketIndex, + await nodeGraph.unsetNodeContactAddress( + nodeId, + nodeContactPair2.nodeContactAddress, ); - expect(nodeData.address.host).toBe('127.0.0.1'); - // Port of 0 is not allowed - expect(nodeData.address.port > 0).toBe(true); - expect(nodeData.address.port < 2 ** 16).toBe(true); - expect(nodeData.lastUpdated >= now).toBe(true); - } - const bucketDistances = bucket.map(([nodeId]) => - nodesUtils.nodeDistance(keyRing.getNodeId(), nodeId), - ); - // It's the LAST bucket that fails this + expect( + await nodeGraph.getNodeContactAddressData( + nodeId, + nodeContactPair1.nodeContactAddress, + ), + ).toBeUndefined(); + expect( + await nodeGraph.getNodeContactAddressData( + nodeId, + nodeContactPair2.nodeContactAddress, + ), + ).toBeUndefined(); + expect(await nodeGraph.getNodeContact(nodeId)).toBeUndefined(); + expect( + await nodeGraph.getBucketMetaProp( + nodeGraph.bucketIndex(nodeId)[0], + 'count', + ), + ).toBe(0); + }, + ); + test('should decrement bucket count', async () => { + const nodeId = testNodesUtils.generateRandomNodeId(); + const nodeContactAddress1 = 'someAddress1' as NodeContactAddress; + const nodeContactAddress2 = 'someAddress2' as NodeContactAddress; + const nodeContactAddressData: NodeContactAddressData = { + mode: 'direct', + connectedTime: 0, + scopes: [], + }; + const nodeContact: NodeContact = { + [nodeContactAddress1]: nodeContactAddressData, + [nodeContactAddress2]: nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); + expect( - bucketDistances.slice(1).every((distance, i) => { - return bucketDistances[i] <= distance; - }), - ).toBe(true); - } - // Distance descending order - // Higher distance buckets first - bucketIndex_ = keyRing.getNodeId().length * 8; - for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( - 'distance', - 'desc', - )) { - expect(bucketIndex < bucketIndex_).toBe(true); - bucketIndex_ = bucketIndex; - expect(bucket.length > 0).toBe(true); - for (const [nodeId, nodeData] of bucket) { - expect(nodeId.byteLength).toBe(32); - expect(nodesUtils.bucketIndex(keyRing.getNodeId(), nodeId)).toBe( - bucketIndex, - ); - expect(nodeData.address.host).toBe('127.0.0.1'); - // Port of 0 is not allowed - expect(nodeData.address.port > 0).toBe(true); - expect(nodeData.address.port < 2 ** 16).toBe(true); - expect(nodeData.lastUpdated >= now).toBe(true); - } - const bucketDistances = bucket.map(([nodeId]) => - nodesUtils.nodeDistance(keyRing.getNodeId(), nodeId), - ); + await nodeGraph.getBucketMetaProp( + nodeGraph.bucketIndex(nodeId)[0], + 'count', + ), + ).toBe(1); + await nodeGraph.unsetNodeContactAddress(nodeId, nodeContactAddress1); expect( - bucketDistances.slice(1).every((distance, i) => { - return bucketDistances[i] >= distance; - }), - ).toBe(true); - } - // Last updated ascending order - // Bucket index is ascending - bucketIndex_ = -1; - for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( - 'lastUpdated', - 'asc', - )) { - expect(bucketIndex > bucketIndex_).toBe(true); - bucketIndex_ = bucketIndex; - expect(bucket.length > 0).toBe(true); - for (const [nodeId, nodeData] of bucket) { - expect(nodeId.byteLength).toBe(32); - expect(nodesUtils.bucketIndex(keyRing.getNodeId(), nodeId)).toBe( - bucketIndex, - ); - expect(nodeData.address.host).toBe('127.0.0.1'); - // Port of 0 is not allowed - expect(nodeData.address.port > 0).toBe(true); - expect(nodeData.address.port < 2 ** 16).toBe(true); - expect(nodeData.lastUpdated >= now).toBe(true); - } - const bucketLastUpdated = bucket.map( - ([, nodeData]) => nodeData.lastUpdated, - ); + await nodeGraph.getBucketMetaProp( + nodeGraph.bucketIndex(nodeId)[0], + 'count', + ), + ).toBe(1); + await nodeGraph.unsetNodeContactAddress(nodeId, nodeContactAddress2); expect( - bucketLastUpdated.slice(1).every((lastUpdated, i) => { - return bucketLastUpdated[i] <= lastUpdated; - }), - ).toBe(true); - } - // Last updated descending order - // Bucket index is descending - bucketIndex_ = keyRing.getNodeId().length * 8; - for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( - 'lastUpdated', - 'desc', - )) { - expect(bucketIndex < bucketIndex_).toBe(true); - bucketIndex_ = bucketIndex; - expect(bucket.length > 0).toBe(true); - for (const [nodeId, nodeData] of bucket) { - expect(nodeId.byteLength).toBe(32); - expect(nodesUtils.bucketIndex(keyRing.getNodeId(), nodeId)).toBe( + await nodeGraph.getBucketMetaProp( + nodeGraph.bucketIndex(nodeId)[0], + 'count', + ), + ).toBe(0); + }); + test.prop( + [ + testNodesUtils.nodeIdArb, + testNodesUtils.nodeContactPairArb, + testNodesUtils.nodeContactPairArb, + ], + { numRuns: 20 }, + )( + 'should delete lastUpdatedTime', + async (nodeId, nodeContactPair1, nodeContactPair2) => { + const nodeContact = { + [nodeContactPair1.nodeContactAddress]: + nodeContactPair1.nodeContactAddressData, + [nodeContactPair2.nodeContactAddress]: + nodeContactPair2.nodeContactAddressData, + }; + await nodeGraph.setNodeContact(nodeId, nodeContact); + + expect(await nodeGraph.getConnectedTime(nodeId)).toBeDefined(); + await nodeGraph.unsetNodeContactAddress( + nodeId, + nodeContactPair1.nodeContactAddress, + ); + expect(await nodeGraph.getConnectedTime(nodeId)).toBeDefined(); + await nodeGraph.unsetNodeContactAddress( + nodeId, + nodeContactPair2.nodeContactAddress, + ); + // Only removed after all addresses are removed + expect(await nodeGraph.getConnectedTime(nodeId)).toBeUndefined(); + }, + ); + }); + describe('getBucket', () => { + test.prop( + [ + fc.integer({ min: 20, max: 254 }).noShrink(), + fc + .array(testNodesUtils.nodeContactArb, { minLength: 1, maxLength: 20 }) + .noShrink(), + ], + { numRuns: 1 }, + )('can get a bucket', async (bucketIndex, nodeContacts) => { + // Fill a bucket with data + const nodeIds: Map = new Map(); + for (let i = 0; i < nodeContacts.length; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + keyRing.getNodeId(), bucketIndex, + i, ); - expect(nodeData.address.host).toBe('127.0.0.1'); - // Port of 0 is not allowed - expect(nodeData.address.port > 0).toBe(true); - expect(nodeData.address.port < 2 ** 16).toBe(true); - expect(nodeData.lastUpdated >= now).toBe(true); + nodeIds.set(encodeNodeId(nodeId), nodeContacts[i]); + await nodeGraph.setNodeContact(nodeId, nodeContacts[i]); } - const bucketLastUpdated = bucket.map( - ([, nodeData]) => nodeData.lastUpdated, - ); - expect( - bucketLastUpdated.slice(1).every((lastUpdated, i) => { - return bucketLastUpdated[i] >= lastUpdated; - }), - ).toBe(true); - } - await nodeGraph.stop(); - }); - testProp( - 'reset buckets', - [testNodesUtils.uniqueNodeIdArb(3), testNodesUtils.nodeIdArrayArb(100)], - async (nodeIds, initialNodes) => { - const getNodeIdMock = jest.fn(); - const dummyKeyRing = { - getNodeId: getNodeIdMock, - } as unknown as KeyRing; - getNodeIdMock.mockImplementation(() => nodeIds[0]); - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing: dummyKeyRing, - logger, - }); - for (const nodeId of initialNodes) { - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress); + + // Getting the bucket + const bucket = await nodeGraph.getBucket(bucketIndex); + expect(bucket.length).toBe(nodeContacts.length); + for (const [nodeId, nodeContact] of bucket) { + expect(nodeContact).toMatchObject(nodeIds.get(encodeNodeId(nodeId))!); } - const buckets0 = await utils.asyncIterableArray(nodeGraph.getBuckets()); - // Reset the buckets according to the new node ID - // Note that this should normally be only executed when the key manager NodeID changes - // This means methods that use the KeyRing's node ID cannot be used here in this test - getNodeIdMock.mockImplementation(() => nodeIds[1]); - const nodeIdNew1 = nodeIds[1]; - await nodeGraph.resetBuckets(nodeIdNew1); - const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); - expect(buckets1.length > 0).toBe(true); - for (const [bucketIndex, bucket] of buckets1) { - expect(bucket.length > 0).toBe(true); - for (const [nodeId, nodeData] of bucket) { - expect(nodeId.byteLength).toBe(32); - expect(nodesUtils.bucketIndex(nodeIdNew1, nodeId)).toBe(bucketIndex); - expect(nodeData.address.host).toBe('127.0.0.1'); - // Port of 0 is not allowed - expect(nodeData.address.port > 0).toBe(true); - expect(nodeData.address.port < 2 ** 16).toBe(true); + }); + test.prop( + [ + fc.integer({ min: 20, max: 254 }).noShrink(), + fc + .array(testNodesUtils.nodeContactArb, { minLength: 1, maxLength: 20 }) + .noShrink(), + ], + { numRuns: 1 }, + )( + 'can get a bucket ordered by distance', + async (bucketIndex, nodeContacts) => { + // Fill a bucket with data + const nodeIdsContact: Map = new Map(); + const nodeIds: Array = []; + for (let i = 0; i < nodeContacts.length; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + keyRing.getNodeId(), + bucketIndex, + i, + ); + nodeIds.push(nodeId); + nodeIdsContact.set(encodeNodeId(nodeId), nodeContacts[i]); + await nodeGraph.setNodeContact(nodeId, nodeContacts[i]); } - } - expect(buckets1).not.toStrictEqual(buckets0); - // Resetting again should change the space - getNodeIdMock.mockImplementation(() => nodeIds[2]); - const nodeIdNew2 = nodeIds[2]; - await nodeGraph.resetBuckets(nodeIdNew2); - const buckets2 = await utils.asyncIterableArray(nodeGraph.getBuckets()); - expect(buckets2.length > 0).toBe(true); - for (const [bucketIndex, bucket] of buckets2) { - expect(bucket.length > 0).toBe(true); - for (const [nodeId, nodeData] of bucket) { - expect(nodeId.byteLength).toBe(32); - expect(nodesUtils.bucketIndex(nodeIdNew2, nodeId)).toBe(bucketIndex); - expect(nodeData.address.host).toBe('127.0.0.1'); - // Port of 0 is not allowed - expect(nodeData.address.port > 0).toBe(true); - expect(nodeData.address.port < 2 ** 16).toBe(true); + + // Getting the bucket + const bucket = await nodeGraph.getBucket(bucketIndex, 'distance'); + + // Checking data + expect(bucket.length).toBe(nodeContacts.length); + for (const [nodeId, nodeContact] of bucket) { + expect(nodeContact).toMatchObject( + nodeIdsContact.get(encodeNodeId(nodeId))!, + ); } - } - expect(buckets2).not.toStrictEqual(buckets1); - // Resetting to the same NodeId results in the same bucket structure - await nodeGraph.resetBuckets(nodeIdNew2); - const buckets3 = await utils.asyncIterableArray(nodeGraph.getBuckets()); - expect(buckets3).toStrictEqual(buckets2); - // Resetting to an existing NodeId - const nodeIdExisting = buckets3[0][1][0][0]; - let nodeIdExistingFound = false; - await nodeGraph.resetBuckets(nodeIdExisting); - const buckets4 = await utils.asyncIterableArray(nodeGraph.getBuckets()); - expect(buckets4.length > 0).toBe(true); - for (const [bucketIndex, bucket] of buckets4) { - expect(bucket.length > 0).toBe(true); - for (const [nodeId, nodeData] of bucket) { - if (nodeId.equals(nodeIdExisting)) { - nodeIdExistingFound = true; + + // Checking order + const nodeId = keyRing.getNodeId(); + nodeIds.sort((nodeIdA, nodeIdB) => { + const distA = nodesUtils.nodeDistance(nodeId, nodeIdA); + const distB = nodesUtils.nodeDistance(nodeId, nodeIdB); + if (distA < distB) { + return -1; + } else if (distA > distB) { + return 1; + } else { + return 0; } - expect(nodeId.byteLength).toBe(32); - expect(nodesUtils.bucketIndex(nodeIdExisting, nodeId)).toBe( + }); + + // Should have same sorted order + for (let i = 0; i < bucket.length; i++) { + expect(nodeIds[i].equals(bucket[i][0])).toBeTrue(); + } + }, + ); + test.prop( + [ + fc.integer({ min: 20, max: 254 }).noShrink(), + fc + .array(testNodesUtils.nodeContactArb, { minLength: 1, maxLength: 20 }) + .noShrink(), + ], + { numRuns: 1 }, + )( + 'can get a bucket ordered by lastUpdatedTime', + async (bucketIndex, nodeContacts) => { + // Fill a bucket with data + const nodeIdsContact: Map = new Map(); + const nodeIds: Array<{ + nodeId: NodeId; + lastUpdated: number; + }> = []; + for (let i = 0; i < nodeContacts.length; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + keyRing.getNodeId(), bucketIndex, + i, + ); + let lastUpdated = 0; + const nodeContact = nodeContacts[i]; + for (const addressData of Object.values(nodeContact)) { + if (lastUpdated < addressData.connectedTime) { + lastUpdated = addressData.connectedTime; + } + } + nodeIds.push({ + nodeId, + lastUpdated, + }); + nodeIdsContact.set(encodeNodeId(nodeId), nodeContacts[i]); + await nodeGraph.setNodeContact(nodeId, nodeContacts[i]); + } + + // Getting the bucket + const bucket = await nodeGraph.getBucket(bucketIndex, 'connected'); + + // Checking data + expect(bucket.length).toBe(nodeContacts.length); + for (const [nodeId, nodeContact] of bucket) { + expect(nodeContact).toMatchObject( + nodeIdsContact.get(encodeNodeId(nodeId))!, ); - expect(nodeData.address.host).toBe('127.0.0.1'); - // Port of 0 is not allowed - expect(nodeData.address.port > 0).toBe(true); - expect(nodeData.address.port < 2 ** 16).toBe(true); } + + // Checking order + nodeIds.sort((nodeA, nodeB) => { + if (nodeA.lastUpdated < nodeB.lastUpdated) { + return -1; + } else if (nodeA.lastUpdated > nodeB.lastUpdated) { + return 1; + } else { + return 0; + } + }); + + // Should have same sorted order + for (let i = 0; i < bucket.length; i++) { + expect(nodeIds[i].nodeId.equals(bucket[i][0])).toBeTrue(); + } + }, + ); + }); + describe('getBuckets', () => { + test.prop( + [ + fc + .uniqueArray(fc.integer({ min: 0, max: 255 }), { minLength: 1 }) + .noShrink(), + testNodesUtils.nodeContactArb, + ], + { numRuns: 1 }, + )('get all buckets', async (buckets, nodeContact) => { + const nodeId = keyRing.getNodeId(); + for (const bucket of buckets) { + await nodeGraph.setNodeContact( + testNodesUtils.generateNodeIdForBucket(nodeId, bucket, 0), + nodeContact, + ); } - expect(buckets4).not.toStrictEqual(buckets3); - // The existing node ID should not be put into the NodeGraph - expect(nodeIdExistingFound).toBe(false); - await nodeGraph.stop(); - }, - { numRuns: 1 }, - ); - testProp( - 'reset buckets should re-order the buckets', - [testNodesUtils.uniqueNodeIdArb(2)], - async (nodeIds) => { - const getNodeIdMock = jest.fn(); - const dummyKeyRing = { - getNodeId: getNodeIdMock, - } as unknown as KeyRing; - getNodeIdMock.mockImplementation(() => nodeIds[0]); - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing: dummyKeyRing, - fresh: true, - logger, - }); - for (let i = 1; i < 255 / 25; i += 50) { - const nodeId = nodesUtils.generateRandomNodeIdForBucket(nodeIds[0], i); - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress); + + const results: Array = []; + for await (const [index, nodeBucket] of nodeGraph.getBuckets()) { + results.push(index); + expect(nodeBucket.length).toBe(1); } - const buckets0 = await utils.asyncIterableArray(nodeGraph.getBuckets()); - // Reset the buckets according to the new node ID - // Note that this should normally be only executed when the key manager NodeID changes - // This means methods that use the KeyRing's node ID cannot be used here in this test - getNodeIdMock.mockImplementation(() => nodeIds[1]); - const nodeIdNew1 = nodeIds[1]; - await nodeGraph.resetBuckets(nodeIdNew1); - const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); - expect(buckets1).not.toStrictEqual(buckets0); - await nodeGraph.stop(); - }, - { numRuns: 20 }, - ); - testProp( - 'reset buckets should not corrupt data', - [testNodesUtils.uniqueNodeIdArb(2), testNodesUtils.nodeIdArrayArb(10)], - async (nodeIds, initialNodes) => { - const getNodeIdMock = jest.fn(); - const dummyKeyRing = { - getNodeId: getNodeIdMock, - } as unknown as KeyRing; - getNodeIdMock.mockImplementation(() => nodeIds[0]); - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing: dummyKeyRing, - fresh: true, - logger, - }); - const nodeAddresses: Map = new Map(); - for (const nodeId of initialNodes) { - const nodeAddress = { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress; - await nodeGraph.setNode(nodeId, nodeAddress); - nodeAddresses.set(nodeId.toString(), nodeAddress); + expect(results.length).toBe(buckets.length); + for (const bucketIndex of buckets) { + expect(results).toContain(bucketIndex); } - // Reset the buckets according to the new node ID - // Note that this should normally be only executed when the key manager NodeID changes - // This means methods that use the KeyRing's node ID cannot be used here in this test - getNodeIdMock.mockImplementation(() => nodeIds[1]); - const nodeIdNew1 = nodeIds[1]; - await nodeGraph.resetBuckets(nodeIdNew1); - const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); - expect(buckets1.length > 0).toBe(true); - for (const [bucketIndex, bucket] of buckets1) { - expect(bucket.length > 0).toBe(true); - for (const [nodeId, nodeData] of bucket) { - expect(nodeId.byteLength).toBe(32); - expect(nodesUtils.bucketIndex(nodeIdNew1, nodeId)).toBe(bucketIndex); - expect(nodeData.address.host).toBe('127.0.0.1'); - expect(nodeAddresses.get(nodeId.toString())).toBeDefined(); - expect(nodeAddresses.get(nodeId.toString())?.port).toBe( - nodeData.address.port, + }); + }); + describe('resetBuckets', () => { + let getNodeIdMock: jest.SpyInstance; + + beforeEach(() => { + getNodeIdMock = jest.spyOn(keyRing, 'getNodeId'); + }); + afterEach(() => { + getNodeIdMock.mockRestore(); + }); + + test('should rearrange buckets', async () => { + // Fill in buckets + const nodeContact: NodeContact = { + ['address1' as NodeContactAddress]: { + mode: 'signal', + connectedTime: 100, + scopes: ['global'], + }, + ['address2' as NodeContactAddress]: { + mode: 'direct', + connectedTime: 200, + scopes: ['local'], + }, + }; + const nodeId = keyRing.getNodeId(); + for (let i = 5; i < 255; i += 5) { + for (let j = 0; j < 5; j++) { + await nodeGraph.setNodeContact( + testNodesUtils.generateNodeIdForBucket(nodeId, i, j + 1), + nodeContact, ); } } - await nodeGraph.stop(); - }, - { numRuns: 20 }, - ); - testProp( - 'reset buckets to an existing node should remove node', - [ - testNodesUtils.nodeIdArb, - testNodesUtils.nodeIdArrayArb(20), - fc.integer({ min: 0, max: 19 }), - ], - async (nodeId, initialNodes, nodeIndex) => { - const getNodeIdMock = jest.fn(); - const dummyKeyRing = { - getNodeId: getNodeIdMock, - } as unknown as KeyRing; - getNodeIdMock.mockImplementation(() => nodeId); - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing: dummyKeyRing, - logger, - }); - for (const nodeId of initialNodes) { - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress); + + // Re-arrange + const oldNodeId = keyRing.getNodeId(); + getNodeIdMock.mockReturnValue( + testNodesUtils.generateNodeIdForBucket(oldNodeId, 100, 0), + ); + await nodeGraph.resetBuckets(); + + const bucketsNew: Array<[number, NodeBucket]> = []; + for await (const result of nodeGraph.getBuckets()) { + bucketsNew.push(result); } - // Reset the buckets according to the new node ID - // Note that this should normally be only executed when the key manager NodeID changes - // This means methods that use the KeyRing's node ID cannot be used here in this test - getNodeIdMock.mockImplementation(() => initialNodes[nodeIndex]); - const nodeIdNew1 = initialNodes[nodeIndex]; - await nodeGraph.resetBuckets(nodeIdNew1); - const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); - expect(buckets1.length > 0).toBe(true); - for (const [, bucket] of buckets1) { - expect(bucket.length > 0).toBe(true); - for (const [nodeId] of bucket) { - // The new node should not be in the graph - expect(nodeIdNew1.equals(nodeId)).toBeFalse(); - } + + // The fist 99 buckets should contain only 20 nodes taken from bucket 100 + let nodesLower = 0; + let nodesEqual = 0; + let nodesHigher = 0; + for (const [bucketIndex, bucket] of bucketsNew) { + const nodesNum = bucket.length; + if (bucketIndex < 100) nodesLower += nodesNum; + if (bucketIndex === 100) nodesEqual += nodesNum; + if (bucketIndex > 100) nodesHigher += nodesNum; } - await nodeGraph.stop(); - }, - { numRuns: 15 }, - ); - test('reset buckets is persistent', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + // 5 nodes in bucket 100 are shuffled into buckets 0-99 + expect(nodesLower).toBe(5); + // 95 nodes in buckets 0-99 are moved into bucket 100, limited by 20 + expect(nodesEqual).toBe(20); + // 150 nodes in higher buckets don't move around + expect(nodesHigher).toBe(150); + + // When we re-order a bucket we expect the following to happen + // 1. }); - const now = utils.getUnixtime(); - for (let i = 0; i < 100; i++) { - await nodeGraph.setNode(testNodesUtils.generateRandomNodeId(), { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress); - } - const nodeIdNew1 = testNodesUtils.generateRandomNodeId(); - await nodeGraph.resetBuckets(nodeIdNew1); - await nodeGraph.stop(); - await nodeGraph.start(); - const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); - expect(buckets1.length > 0).toBe(true); - for (const [bucketIndex, bucket] of buckets1) { - expect(bucket.length > 0).toBe(true); - for (const [nodeId, nodeData] of bucket) { - expect(nodeId.byteLength).toBe(32); - expect(nodesUtils.bucketIndex(nodeIdNew1, nodeId)).toBe(bucketIndex); - expect(nodeData.address.host).toBe('127.0.0.1'); - // Port of 0 is not allowed - expect(nodeData.address.port > 0).toBe(true); - expect(nodeData.address.port < 2 ** 16).toBe(true); - expect(nodeData.lastUpdated >= now).toBe(true); - } - } - const nodeIdNew2 = testNodesUtils.generateRandomNodeId(); - await nodeGraph.resetBuckets(nodeIdNew2); - await nodeGraph.stop(); - await nodeGraph.start(); - const buckets2 = await utils.asyncIterableArray(nodeGraph.getBuckets()); - expect(buckets2.length > 0).toBe(true); - for (const [bucketIndex, bucket] of buckets2) { - expect(bucket.length > 0).toBe(true); - for (const [nodeId, nodeData] of bucket) { - expect(nodeId.byteLength).toBe(32); - expect(nodesUtils.bucketIndex(nodeIdNew2, nodeId)).toBe(bucketIndex); - expect(nodeData.address.host).toBe('127.0.0.1'); - // Port of 0 is not allowed - expect(nodeData.address.port > 0).toBe(true); - expect(nodeData.address.port < 2 ** 16).toBe(true); - expect(nodeData.lastUpdated >= now).toBe(true); - } - } - expect(buckets2).not.toStrictEqual(buckets1); - await nodeGraph.stop(); }); - test('get closest nodes, 40 nodes lower than target, take 20', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + describe('getClosestNodes', () => { + const nodeContact: NodeContact = { + ['address1' as NodeContactAddress]: { + mode: 'signal', + connectedTime: 0, + scopes: [], + }, + }; + + test('40 nodes lower than target, take 20', async () => { + const baseNodeId = keyRing.getNodeId(); + const nodeIds: Array = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 50 + i, + i, + ); + nodeIds.push(nodeId); + await nodeGraph.setNodeContact(nodeId, nodeContact); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + nodeIds.sort(nodesUtils.nodeDistanceCmpFactory(targetNodeId)); + const nodeIdsEncoded = nodeIds.map((a) => nodesUtils.encodeNodeId(a)); + + const result = await nodeGraph.getClosestNodes(targetNodeId, 20); + + const closestNodesEncoded = result.map(([nodeId]) => + nodesUtils.encodeNodeId(nodeId), + ); + // Are the closest nodes out of all the nodes + expect(closestNodesEncoded).toEqual(nodeIdsEncoded.slice(0, 20)); }); - const baseNodeId = keyRing.getNodeId(); - const nodeIds: NodeBucket = []; - // Add 1 node to each bucket - for (let i = 0; i < 40; i++) { - const nodeId = testNodesUtils.generateNodeIdForBucket( + test('15 nodes lower than target, take 20', async () => { + const baseNodeId = keyRing.getNodeId(); + const nodeIds: Array = []; + // Add 1 node to each bucket + for (let i = 0; i < 15; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 50 + i, + i, + ); + nodeIds.push(nodeId); + await nodeGraph.setNodeContact(nodeId, nodeContact); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( baseNodeId, - 50 + i, - i, + 100, + 2, ); - nodeIds.push([nodeId, {} as NodeData]); - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress); - } - const targetNodeId = testNodesUtils.generateNodeIdForBucket( - baseNodeId, - 100, - 2, - ); - const result = await nodeGraph.getClosestNodes(targetNodeId, 20); - nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); - const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); - const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); - // Are the closest nodes out of all the nodes - expect(a.slice(0, b.length)).toEqual(b); + nodeIds.sort(nodesUtils.nodeDistanceCmpFactory(targetNodeId)); + const nodeIdsEncoded = nodeIds.map((a) => nodesUtils.encodeNodeId(a)); - // Check that the list is strictly ascending - const closestNodeDistances = result.map(([nodeId]) => - nodesUtils.nodeDistance(targetNodeId, nodeId), - ); - expect( - closestNodeDistances.slice(1).every((distance, i) => { - return closestNodeDistances[i] < distance; - }), - ).toBe(true); - await nodeGraph.stop(); - }); - test('get closest nodes, 15 nodes lower than target, take 20', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + const result = await nodeGraph.getClosestNodes(targetNodeId, 20); + + const closestNodesEncoded = result.map(([nodeId]) => + nodesUtils.encodeNodeId(nodeId), + ); + // Are the closest nodes out of all the nodes + expect(closestNodesEncoded).toEqual(nodeIdsEncoded.slice(0, 20)); }); - const baseNodeId = keyRing.getNodeId(); - const nodeIds: NodeBucket = []; - // Add 1 node to each bucket - for (let i = 0; i < 15; i++) { - const nodeId = testNodesUtils.generateNodeIdForBucket( + test('10 nodes lower than target, 30 nodes above, take 20', async () => { + const baseNodeId = keyRing.getNodeId(); + const nodeIds: Array = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 90 + i, + i, + ); + nodeIds.push(nodeId); + await nodeGraph.setNodeContact(nodeId, nodeContact); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( baseNodeId, - 50 + i, - i, + 100, + 2, ); - nodeIds.push([nodeId, {} as NodeData]); - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress); - } - const targetNodeId = testNodesUtils.generateNodeIdForBucket( - baseNodeId, - 100, - 2, - ); - const result = await nodeGraph.getClosestNodes(targetNodeId); - nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); - const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); - const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); - // Are the closest nodes out of all the nodes - expect(a.slice(0, b.length)).toEqual(b); + nodeIds.sort(nodesUtils.nodeDistanceCmpFactory(targetNodeId)); + const nodeIdsEncoded = nodeIds.map((a) => nodesUtils.encodeNodeId(a)); - // Check that the list is strictly ascending - const closestNodeDistances = result.map(([nodeId]) => - nodesUtils.nodeDistance(targetNodeId, nodeId), - ); - expect( - closestNodeDistances.slice(1).every((distance, i) => { - return closestNodeDistances[i] < distance; - }), - ).toBe(true); - await nodeGraph.stop(); - }); - test('get closest nodes, 10 nodes lower than target, 30 nodes above, take 20', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + const result = await nodeGraph.getClosestNodes(targetNodeId, 20); + + const closestNodesEncoded = result.map(([nodeId]) => + nodesUtils.encodeNodeId(nodeId), + ); + // Are the closest nodes out of all the nodes + expect(closestNodesEncoded).toEqual(nodeIdsEncoded.slice(0, 20)); }); - const baseNodeId = keyRing.getNodeId(); - const nodeIds: NodeBucket = []; - // Add 1 node to each bucket - for (let i = 0; i < 40; i++) { - const nodeId = testNodesUtils.generateNodeIdForBucket( + test('10 nodes lower than target, 30 nodes above, take 5', async () => { + const baseNodeId = keyRing.getNodeId(); + const nodeIds: Array = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 90 + i, + i, + ); + nodeIds.push(nodeId); + await nodeGraph.setNodeContact(nodeId, nodeContact); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( baseNodeId, - 90 + i, - i, + 100, + 2, ); - nodeIds.push([nodeId, {} as NodeData]); - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress); - } - const targetNodeId = testNodesUtils.generateNodeIdForBucket( - baseNodeId, - 100, - 2, - ); - const result = await nodeGraph.getClosestNodes(targetNodeId); - nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); - const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); - const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); - // Are the closest nodes out of all the nodes - expect(a.slice(0, b.length)).toEqual(b); + nodeIds.sort(nodesUtils.nodeDistanceCmpFactory(targetNodeId)); + const nodeIdsEncoded = nodeIds.map((a) => nodesUtils.encodeNodeId(a)); - // Check that the list is strictly ascending - const closestNodeDistances = result.map(([nodeId]) => - nodesUtils.nodeDistance(targetNodeId, nodeId), - ); - expect( - closestNodeDistances.slice(1).every((distance, i) => { - return closestNodeDistances[i] < distance; - }), - ).toBe(true); - await nodeGraph.stop(); - }); - test('get closest nodes, 10 nodes lower than target, 30 nodes above, take 5', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + const result = await nodeGraph.getClosestNodes(targetNodeId, 5); + + const closestNodesEncoded = result.map(([nodeId]) => + nodesUtils.encodeNodeId(nodeId), + ); + // Are the closest nodes out of all the nodes + expect(closestNodesEncoded).toEqual(nodeIdsEncoded.slice(0, 5)); }); - const baseNodeId = keyRing.getNodeId(); - const nodeIds: NodeBucket = []; - // Add 1 node to each bucket - for (let i = 0; i < 40; i++) { - const nodeId = testNodesUtils.generateNodeIdForBucket( + test('5 nodes lower than target, 10 nodes above, take 20', async () => { + const baseNodeId = keyRing.getNodeId(); + const nodeIds: Array = []; + // Add 1 node to each bucket + for (let i = 0; i < 15; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 95 + i, + i, + ); + nodeIds.push(nodeId); + await nodeGraph.setNodeContact(nodeId, nodeContact); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( baseNodeId, - 90 + i, - i, + 100, + 2, ); - nodeIds.push([nodeId, {} as NodeData]); - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress); - } - const targetNodeId = testNodesUtils.generateNodeIdForBucket( - baseNodeId, - 100, - 2, - ); - const result = await nodeGraph.getClosestNodes(targetNodeId, 5); - nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); - const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); - const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); - // Are the closest nodes out of all the nodes - expect(a.slice(0, b.length)).toEqual(b); + nodeIds.sort(nodesUtils.nodeDistanceCmpFactory(targetNodeId)); + const nodeIdsEncoded = nodeIds.map((a) => nodesUtils.encodeNodeId(a)); - // Check that the list is strictly ascending - const closestNodeDistances = result.map(([nodeId]) => - nodesUtils.nodeDistance(targetNodeId, nodeId), - ); - expect( - closestNodeDistances.slice(1).every((distance, i) => { - return closestNodeDistances[i] < distance; - }), - ).toBe(true); - await nodeGraph.stop(); - }); - test('get closest nodes, 5 nodes lower than target, 10 nodes above, take 20', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + const result = await nodeGraph.getClosestNodes(targetNodeId, 20); + + const closestNodesEncoded = result.map(([nodeId]) => + nodesUtils.encodeNodeId(nodeId), + ); + // Are the closest nodes out of all the nodes + expect(nodeIdsEncoded.slice(0, closestNodesEncoded.length)).toEqual( + closestNodesEncoded, + ); }); - const baseNodeId = keyRing.getNodeId(); - const nodeIds: NodeBucket = []; - // Add 1 node to each bucket - for (let i = 0; i < 15; i++) { - const nodeId = testNodesUtils.generateNodeIdForBucket( + test('40 nodes above target, take 20', async () => { + const baseNodeId = keyRing.getNodeId(); + const nodeIds: Array = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 101 + i, + i, + ); + nodeIds.push(nodeId); + await nodeGraph.setNodeContact(nodeId, nodeContact); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( baseNodeId, - 95 + i, - i, + 100, + 0, ); - nodeIds.push([nodeId, {} as NodeData]); - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress); - } - const targetNodeId = testNodesUtils.generateNodeIdForBucket( - baseNodeId, - 100, - 2, - ); - const result = await nodeGraph.getClosestNodes(targetNodeId); - nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); - const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); - const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); - // Are the closest nodes out of all the nodes - expect(a.slice(0, b.length)).toEqual(b); + nodeIds.sort(nodesUtils.nodeDistanceCmpFactory(targetNodeId)); + const nodeIdsEncoded = nodeIds.map((a) => nodesUtils.encodeNodeId(a)); - // Check that the list is strictly ascending - const closestNodeDistances = result.map(([nodeId]) => - nodesUtils.nodeDistance(targetNodeId, nodeId), - ); - expect( - closestNodeDistances.slice(1).every((distance, i) => { - return closestNodeDistances[i] < distance; - }), - ).toBe(true); - await nodeGraph.stop(); - }); - test('get closest nodes, 40 nodes above target, take 20', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + const result = await nodeGraph.getClosestNodes(targetNodeId, 20); + const closestNodesEncoded = result.map(([nodeId]) => + nodesUtils.encodeNodeId(nodeId), + ); + // Are the closest nodes out of all the nodes + expect(closestNodesEncoded).toEqual(nodeIdsEncoded.slice(0, 20)); }); - const baseNodeId = keyRing.getNodeId(); - const nodeIds: NodeBucket = []; - // Add 1 node to each bucket - for (let i = 0; i < 40; i++) { - const nodeId = testNodesUtils.generateNodeIdForBucket( + test('15 nodes above target, take 20', async () => { + const baseNodeId = keyRing.getNodeId(); + const nodeIds: Array = []; + // Add 1 node to each bucket + for (let i = 0; i < 15; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 101 + i, + i, + ); + nodeIds.push(nodeId); + await nodeGraph.setNodeContact(nodeId, nodeContact); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( baseNodeId, - 101 + i, - i, + 100, + 2, ); - nodeIds.push([nodeId, {} as NodeData]); - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress); - } - const targetNodeId = testNodesUtils.generateNodeIdForBucket( - baseNodeId, - 100, - 2, - ); - const result = await nodeGraph.getClosestNodes(targetNodeId); - nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); - const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); - const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); - // Are the closest nodes out of all the nodes - expect(a.slice(0, b.length)).toEqual(b); + nodeIds.sort(nodesUtils.nodeDistanceCmpFactory(targetNodeId)); + const nodeIdsEncoded = nodeIds.map((a) => nodesUtils.encodeNodeId(a)); - // Check that the list is strictly ascending - const closestNodeDistances = result.map(([nodeId]) => - nodesUtils.nodeDistance(targetNodeId, nodeId), - ); - expect( - closestNodeDistances.slice(1).every((distance, i) => { - return closestNodeDistances[i] < distance; - }), - ).toBe(true); - await nodeGraph.stop(); - }); - test('get closest nodes, 15 nodes above target, take 20', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + const result = await nodeGraph.getClosestNodes(targetNodeId, 20); + + const closestNodesEncoded = result.map(([nodeId]) => + nodesUtils.encodeNodeId(nodeId), + ); + // Are the closest nodes out of all the nodes + expect(nodeIdsEncoded.slice(0, closestNodesEncoded.length)).toEqual( + closestNodesEncoded, + ); + expect(closestNodesEncoded).toEqual(nodeIdsEncoded.slice(0, 20)); }); - const baseNodeId = keyRing.getNodeId(); - const nodeIds: NodeBucket = []; - // Add 1 node to each bucket - for (let i = 0; i < 15; i++) { - const nodeId = testNodesUtils.generateNodeIdForBucket( + test('no nodes, take 20', async () => { + const baseNodeId = keyRing.getNodeId(); + const targetNodeId = testNodesUtils.generateNodeIdForBucket( baseNodeId, - 101 + i, - i, + 100, + 2, ); - nodeIds.push([nodeId, {} as NodeData]); - await nodeGraph.setNode(nodeId, { - host: '127.0.0.1', - port: utils.getRandomInt(0, 2 ** 16), - } as NodeAddress); - } - const targetNodeId = testNodesUtils.generateNodeIdForBucket( - baseNodeId, - 100, - 2, - ); - const result = await nodeGraph.getClosestNodes(targetNodeId); - nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); - const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); - const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); - // Are the closest nodes out of all the nodes - expect(a.slice(0, b.length)).toEqual(b); - // Check that the list is strictly ascending - const closestNodeDistances = result.map(([nodeId]) => - nodesUtils.nodeDistance(targetNodeId, nodeId), - ); - expect( - closestNodeDistances.slice(1).every((distance, i) => { - return closestNodeDistances[i] < distance; - }), - ).toBe(true); - await nodeGraph.stop(); - }); - test('get closest nodes, no nodes, take 20', async () => { - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + const result = await nodeGraph.getClosestNodes(targetNodeId, 20); + expect(result).toHaveLength(0); }); - const baseNodeId = keyRing.getNodeId(); - const nodeIds: NodeBucket = []; - const targetNodeId = testNodesUtils.generateNodeIdForBucket( - baseNodeId, - 100, - 2, - ); - const result = await nodeGraph.getClosestNodes(targetNodeId); - nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); - const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); - const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); - // Are the closest nodes out of all the nodes - expect(a.slice(0, b.length)).toEqual(b); + }); + describe('nodesTotal', () => { + test.prop([fc.array(nodeIdContactPairArb, { maxLength: 20 }).noShrink()], { + numRuns: 1, + })('should get total nodes', async (nodes) => { + for (const { nodeId, nodeContact } of nodes) { + await nodeGraph.setNodeContact(nodeId, nodeContact); + } - // Check that the list is strictly ascending - const closestNodeDistances = result.map(([nodeId]) => - nodesUtils.nodeDistance(targetNodeId, nodeId), - ); - expect( - closestNodeDistances.slice(1).every((distance, i) => { - return closestNodeDistances[i] < distance; - }), - ).toBe(true); - await nodeGraph.stop(); + expect(await nodeGraph.nodesTotal()).toBe(nodes.length); + }); }); }); diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index b1d37e0be..fd88bc49c 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -1,692 +1,1296 @@ -import type { NodeAddress } from '@/nodes/types'; -import type { Host, Port, TLSConfig } from '@/network/types'; -import type { Task } from '@/tasks/types'; +import type { Host, Port } from '@/network/types'; +import type { AgentServerManifest } from '@/nodes/agent/handlers'; +import type nodeGraph from '@/nodes/NodeGraph'; +import type { NCMState } from './utils'; +import fs from 'fs'; import path from 'path'; -import { DB } from '@matrixai/db'; +import os from 'os'; import Logger, { formatting, LogLevel, StreamHandler } from '@matrixai/logger'; -import { PromiseCancellable } from '@matrixai/async-cancellable'; +import { DB } from '@matrixai/db'; +import { Semaphore } from '@matrixai/async-locks'; import * as keysUtils from '@/keys/utils'; -import NodeManager from '@/nodes/NodeManager'; -import Sigchain from '@/sigchain/Sigchain'; -import KeyRing from '@/keys/KeyRing'; -import ACL from '@/acl/ACL'; -import GestaltGraph from '@/gestalts/GestaltGraph'; -import NodeGraph from '@/nodes/NodeGraph'; -import TaskManager from '@/tasks/TaskManager'; -import NodeConnection from '@/nodes/NodeConnection'; +import * as nodesErrors from '@/nodes/errors'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; -import { never, promise, sleep } from '@/utils'; +import NodesConnectionSignalFinal from '@/nodes/agent/handlers/NodesConnectionSignalFinal'; +import NodesConnectionSignalInitial from '@/nodes/agent/handlers/NodesConnectionSignalInitial'; import * as nodesUtils from '@/nodes/utils'; -import PolykeyAgent from '@/PolykeyAgent'; -import * as testNodesUtils from './utils'; -import * as nodesTestUtils from '../nodes/utils'; -import * as tlsTestUtils from '../utils/tls'; +import { TaskManager } from '@/tasks'; +import { NodeConnection, NodeManager } from '@/nodes'; +import { GestaltGraph } from '@/gestalts'; +import { Sigchain } from '@/sigchain'; +import { KeyRing } from '@/keys'; +import NodeGraph from '@/nodes/NodeGraph'; +import { + NodesClosestActiveConnectionsGet, + NodesClosestLocalNodesGet, +} from '@/nodes/agent/handlers'; +import NodeConnectionQueue from '@/nodes/NodeConnectionQueue'; +import * as nodesTestUtils from './utils'; +import ACL from '../../src/acl/ACL'; +import * as testsUtils from '../utils'; -describe(`${NodeManager.name} test`, () => { - const logger = new Logger(`${NodeConnection.name} test`, LogLevel.WARN, [ +describe(`${NodeManager.name}`, () => { + const logger = new Logger(`${NodeManager.name} test`, LogLevel.WARN, [ new StreamHandler( formatting.format`${formatting.level}:${formatting.keys}:${formatting.msg}`, ), ]); - const localHost = '127.0.0.1'; - const port = 55556; const password = 'password'; - const mockedPingNode = jest.fn(); - const mockedIsSeedNode = jest.fn(); - const dummyNodeConnectionManager = { - connectionConnectTime: 5000, - pingTimeoutTime: 5000, - pingNode: mockedPingNode, - isSeedNode: mockedIsSeedNode, - addEventListener: jest.fn(), - removeEventListener: jest.fn(), - } as unknown as NodeConnectionManager; - const dummySigchain = {} as Sigchain; - - let keyRing: KeyRing; - let db: DB; - let acl: ACL; - let gestaltGraph: GestaltGraph; - let nodeGraph: NodeGraph; - let sigchain: Sigchain; - let taskManager: TaskManager; - - let tlsConfig: TLSConfig; - let nodeConnectionManager: NodeConnectionManager; - let nodeManager: NodeManager; - - let server: PolykeyAgent; + const localHost = '127.0.0.1' as Host; + const timeoutTime = 300; + + let dataDir: string; beforeEach(async () => { - // Setting up client dependencies - const keysPath = path.join(dataDir, 'keys'); - keyRing = await KeyRing.createKeyRing({ - password, - keysPath, - logger, - passwordOpsLimit: keysUtils.passwordOpsLimits.min, - passwordMemLimit: keysUtils.passwordMemLimits.min, - strictMemoryLock: false, - }); - const dbPath = path.join(dataDir, 'db'); - db = await DB.createDB({ - dbPath, - logger, - }); - acl = await ACL.createACL({ - db, - logger, + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + }); + afterEach(async () => { + await fs.promises.rm(dataDir, { + force: true, + recursive: true, }); - gestaltGraph = await GestaltGraph.createGestaltGraph({ - db, - acl, - logger, + }); + + test('NodeManager readiness', async () => { + let db: DB | undefined; + let taskManager: TaskManager | undefined; + let nodeManager: NodeManager | undefined; + try { + // Creating dependencies + const dbPath = path.join(dataDir, 'db'); + db = await DB.createDB({ + dbPath, + logger: logger.getChild(DB.name), + }); + taskManager = await TaskManager.createTaskManager({ + db, + logger: logger.getChild(TaskManager.name), + }); + + // Creating NodeManager + nodeManager = new NodeManager({ + db, + gestaltGraph: {} as GestaltGraph, + keyRing: {} as KeyRing, + nodeConnectionManager: { + addEventListener: (..._args) => {}, + removeEventListener: (..._args) => {}, + } as NodeConnectionManager, + nodeGraph: {} as nodeGraph, + sigchain: {} as Sigchain, + taskManager, + logger: logger.getChild(NodeManager.name), + }); + await nodeManager.start(); + await nodeManager.stop(); + // Await expect(async () => { + // await nodeManager.setNode(testsNodesUtils.generateRandomNodeId(), { + // host: '127.0.0.1' as Host, + // port: 55555 as Port, + // scopes: ['local'], + // }); + // }).rejects.toThrow(nodesErrors.ErrorNodeManagerNotRunning); + await nodeManager.start(); + await nodeManager.stop(); + } finally { + await db?.stop(); + await taskManager?.stop(); + await nodeManager?.stop(); + } + }); + + describe('with NodeManager', () => { + let basePath: string; + let keyRing: KeyRing; + let db: DB; + let acl: ACL; + let sigchain: Sigchain; + let gestaltGraph: GestaltGraph; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; + let taskManager: TaskManager; + let nodeManager: NodeManager; + + beforeEach(async () => { + basePath = path.join(dataDir, 'local'); + const keysPath = path.join(basePath, 'keys'); + keyRing = await KeyRing.createKeyRing({ + password, + keysPath, + passwordOpsLimit: keysUtils.passwordOpsLimits.min, + passwordMemLimit: keysUtils.passwordMemLimits.min, + strictMemoryLock: false, + logger: logger.getChild(KeyRing.name), + }); + const dbPath = path.join(basePath, 'db'); + db = await DB.createDB({ + dbPath, + logger: logger.getChild(DB.name), + }); + acl = await ACL.createACL({ + db, + logger: logger.getChild(ACL.name), + }); + sigchain = await Sigchain.createSigchain({ + db, + keyRing, + logger: logger.getChild(Sigchain.name), + }); + gestaltGraph = await GestaltGraph.createGestaltGraph({ + db, + acl, + logger: logger.getChild(GestaltGraph.name), + }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyRing, + logger: logger.getChild(NodeGraph.name), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyRing, + tlsConfig: await testsUtils.createTLSConfig(keyRing.keyPair), + logger: logger.getChild(NodeConnectionManager.name), + connectionConnectTimeoutTime: timeoutTime, + }); + await nodeConnectionManager.start({ + agentService: {} as AgentServerManifest, + host: localHost, + }); + taskManager = await TaskManager.createTaskManager({ + db, + logger: logger.getChild(TaskManager.name), + }); + + nodeManager = new NodeManager({ + db, + keyRing, + gestaltGraph, + nodeGraph, + nodeConnectionManager, + sigchain, + taskManager, + logger: logger.getChild(NodeManager.name), + }); + await nodeManager.start(); }); - nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyRing, - logger, + afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); + await nodeManager.stop(); + await nodeConnectionManager.stop(); + await nodeGraph.stop(); + await gestaltGraph.stop(); + await sigchain.stop(); + await acl.stop(); + await db.stop(); + await keyRing.stop(); + await taskManager.stop(); + await fs.promises.rm(basePath, { + force: true, + recursive: true, + }); }); - sigchain = await Sigchain.createSigchain({ - db, - keyRing, - logger, + + test('stopping NodeManager should cancel all tasks', async () => { + await nodeManager.stop(); + const tasks: Array = []; + for await (const task of taskManager.getTasks('asc', true, [ + nodeManager.tasksPath, + ])) { + tasks.push(task); + } + expect(tasks.length).toEqual(0); }); - taskManager = await TaskManager.createTaskManager({ - db, - logger, + test('task handler ids are not empty', async () => { + expect(nodeManager.gcBucketHandlerId).toEqual( + 'NodeManager.gcBucketHandler', + ); + expect(nodeManager.refreshBucketHandlerId).toEqual( + 'NodeManager.refreshBucketHandler', + ); }); - - tlsConfig = await tlsTestUtils.createTLSConfig(keyRing.keyPair); + test.todo('general tests for adding new nodes'); + // Previously these tests were + // 'should add a node when bucket has room' + // 'should update a node if node exists' + // 'should not add node if bucket is full and old node is alive' + // 'should add node if bucket is full, old node is alive and force is set' + // 'should add node if bucket is full and old node is dead' + // 'should add node when an incoming connection is established' + // 'should not add nodes to full bucket if pings succeeds' + // 'should add nodes to full bucket if pings fail' + // 'should not block when bucket is full' + // 'should update deadline when updating a bucket' }); + describe('with 1 peer', () => { + let basePath: string; + let keyRing: KeyRing; + let db: DB; + let acl: ACL; + let sigchain: Sigchain; + let gestaltGraph: GestaltGraph; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; + let taskManager: TaskManager; + let nodeManager: NodeManager; - afterEach(async () => { - await taskManager.stop(); - await nodeManager?.stop(); - await nodeConnectionManager?.stop(); - await sigchain.stop(); - await sigchain.destroy(); - await nodeGraph.stop(); - await nodeGraph.destroy(); - await gestaltGraph.stop(); - await gestaltGraph.destroy(); - await acl.stop(); - await acl.destroy(); - await taskManager.destroy(); - await db.stop(); - await db.destroy(); - await keyRing.stop(); - await keyRing.destroy(); - - await server?.stop(); - }); + let basePathPeer: string; + let keyRingPeer: KeyRing; + let dbPeer: DB; + let aclPeer: ACL; + let sigchainPeer: Sigchain; + let gestaltGraphPeer: GestaltGraph; + let nodeGraphPeer: NodeGraph; + let nodeConnectionManagerPeer: NodeConnectionManager; + let taskManagerPeer: TaskManager; + let nodeManagerPeer: NodeManager; - test('should add a node when bucket has room', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - taskManager, - logger, - }); + beforeEach(async () => { + basePath = path.join(dataDir, 'local'); + const keysPath = path.join(basePath, 'keys'); + keyRing = await KeyRing.createKeyRing({ + password, + keysPath, + passwordOpsLimit: keysUtils.passwordOpsLimits.min, + passwordMemLimit: keysUtils.passwordMemLimits.min, + strictMemoryLock: false, + logger: logger.getChild(KeyRing.name), + }); + const dbPath = path.join(basePath, 'db'); + db = await DB.createDB({ + dbPath, + logger: logger.getChild(DB.name), + }); + acl = await ACL.createACL({ + db, + logger: logger.getChild(ACL.name), + }); + sigchain = await Sigchain.createSigchain({ + db, + keyRing, + logger: logger.getChild(Sigchain.name), + }); + gestaltGraph = await GestaltGraph.createGestaltGraph({ + db, + acl, + logger: logger.getChild(GestaltGraph.name), + }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyRing, + logger: logger.getChild(NodeGraph.name), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyRing, + tlsConfig: await testsUtils.createTLSConfig(keyRing.keyPair), + logger: logger.getChild(NodeConnectionManager.name), + connectionConnectTimeoutTime: timeoutTime, + }); + await nodeConnectionManager.start({ + agentService: {} as AgentServerManifest, + host: localHost, + }); + taskManager = await TaskManager.createTaskManager({ + db, + logger: logger.getChild(TaskManager.name), + }); - await nodeManager.start(); - const localNodeId = keyRing.getNodeId(); - const bucketIndex = 100; - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - ); - await nodeManager.setNode(nodeId, {} as NodeAddress); + nodeManager = new NodeManager({ + db, + keyRing, + gestaltGraph, + nodeGraph, + nodeConnectionManager, + sigchain, + taskManager, + logger: logger.getChild(NodeManager.name), + }); + await nodeManager.start(); - // Checking bucket - const bucket = await nodeManager.getBucket(bucketIndex); - expect(bucket).toHaveLength(1); - }); - test('should update a node if node exists', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - taskManager, - logger, + basePathPeer = path.join(dataDir, 'peer'); + const keysPathPeer = path.join(basePathPeer, 'keys'); + keyRingPeer = await KeyRing.createKeyRing({ + password, + keysPath: keysPathPeer, + passwordOpsLimit: keysUtils.passwordOpsLimits.min, + passwordMemLimit: keysUtils.passwordMemLimits.min, + strictMemoryLock: false, + logger: logger.getChild(KeyRing.name), + }); + const dbPathPeer = path.join(basePathPeer, 'db'); + dbPeer = await DB.createDB({ + dbPath: dbPathPeer, + logger: logger.getChild(DB.name), + }); + aclPeer = await ACL.createACL({ + db: dbPeer, + logger: logger.getChild(ACL.name), + }); + sigchainPeer = await Sigchain.createSigchain({ + db: dbPeer, + keyRing: keyRingPeer, + logger: logger.getChild(Sigchain.name), + }); + gestaltGraphPeer = await GestaltGraph.createGestaltGraph({ + db: dbPeer, + acl: aclPeer, + logger: logger.getChild(GestaltGraph.name), + }); + nodeGraphPeer = await NodeGraph.createNodeGraph({ + db: dbPeer, + keyRing: keyRingPeer, + logger: logger.getChild(NodeGraph.name), + }); + nodeConnectionManagerPeer = new NodeConnectionManager({ + keyRing: keyRingPeer, + tlsConfig: await testsUtils.createTLSConfig(keyRingPeer.keyPair), + logger: logger.getChild(NodeConnectionManager.name), + connectionConnectTimeoutTime: timeoutTime, + }); + await nodeConnectionManagerPeer.start({ + agentService: {} as AgentServerManifest, + host: localHost, + }); + taskManagerPeer = await TaskManager.createTaskManager({ + db: dbPeer, + logger: logger.getChild(TaskManager.name), + }); + + nodeManagerPeer = new NodeManager({ + db: dbPeer, + keyRing: keyRingPeer, + gestaltGraph: gestaltGraphPeer, + nodeGraph: nodeGraphPeer, + nodeConnectionManager: nodeConnectionManagerPeer, + sigchain: sigchainPeer, + taskManager: taskManagerPeer, + logger: logger.getChild(NodeManager.name), + }); + + await nodeManagerPeer.start(); }); - await nodeManager.start(); + afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); + await nodeManager.stop(); + await nodeConnectionManager.stop(); + await nodeGraph.stop(); + await gestaltGraph.stop(); + await sigchain.stop(); + await acl.stop(); + await db.stop(); + await keyRing.stop(); + await taskManager.stop(); + await fs.promises.rm(basePath, { + force: true, + recursive: true, + }); - const localNodeId = keyRing.getNodeId(); - const bucketIndex = 100; - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - ); - await nodeManager.setNode(nodeId, { - host: '' as Host, - port: 11111 as Port, - scopes: ['global'], + await taskManagerPeer.stopProcessing(); + await taskManagerPeer.stopTasks(); + await nodeManagerPeer.stop(); + await nodeConnectionManagerPeer.stop(); + await nodeGraphPeer.stop(); + await gestaltGraphPeer.stop(); + await sigchainPeer.stop(); + await aclPeer.stop(); + await dbPeer.stop(); + await keyRingPeer.stop(); + await taskManagerPeer.stop(); + await fs.promises.rm(basePathPeer, { + force: true, + recursive: true, + }); }); - const nodeData = (await nodeGraph.getNode(nodeId))!; - // Seconds resolution so we wait more than 1 second - await sleep(1100); + describe('context functions', () => { + test('acquire Connection', async () => { + const nodeId = keyRingPeer.getNodeId(); + await nodeGraph.setNodeContactAddressData( + nodeId, + nodesUtils.nodeContactAddress([ + localHost, + nodeConnectionManagerPeer.port, + ]), + { + mode: 'direct', + connectedTime: 0, + scopes: ['global'], + }, + ); + const [resourceReleaser, nodeConnection] = + await nodeManager.acquireConnection(nodeId)(); + expect(nodeConnection).toBeInstanceOf(NodeConnection); + expect(nodeConnectionManager.hasConnection(nodeId)).toBeTrue(); + await resourceReleaser(); + }); + test('acquire Connection fails', async () => { + const nodeId = keyRingPeer.getNodeId(); + await expect(nodeManager.acquireConnection(nodeId)()).rejects.toThrow( + nodesErrors.ErrorNodeManagerConnectionFailed, + ); + }); + test('withConnF', async () => { + const nodeId = keyRingPeer.getNodeId(); + await nodeGraph.setNodeContactAddressData( + nodeId, + nodesUtils.nodeContactAddress([ + localHost, + nodeConnectionManagerPeer.port, + ]), + { + mode: 'direct', + connectedTime: 0, + scopes: ['global'], + }, + ); + + await nodeManager.withConnF(nodeId, async (conn) => { + expect(conn).toBeInstanceOf(NodeConnection); + }); + }); + test('withConnG', async () => { + const nodeId = keyRingPeer.getNodeId(); + await nodeGraph.setNodeContactAddressData( + nodeId, + nodesUtils.nodeContactAddress([ + localHost, + nodeConnectionManagerPeer.port, + ]), + { + mode: 'direct', + connectedTime: 0, + scopes: ['global'], + }, + ); - // Should update the node - await nodeManager.setNode(nodeId, { - host: '' as Host, - port: 22222 as Port, - scopes: ['global'], + const gen = nodeManager.withConnG( + nodeId, + async function* ( + conn, + ): AsyncGenerator { + expect(conn).toBeInstanceOf(NodeConnection); + }, + ); + + for await (const _ of gen) { + // Consume until done, should not throw + } + }); + }); + describe('pinging', () => { + test('pingNode success', async () => { + const nodeId = keyRingPeer.getNodeId(); + await nodeGraph.setNodeContactAddressData( + nodeId, + nodesUtils.nodeContactAddress([ + localHost, + nodeConnectionManagerPeer.port, + ]), + { + mode: 'direct', + connectedTime: 0, + scopes: ['global'], + }, + ); + await expect( + nodeManager.pingNode(nodeId, { timer: timeoutTime }), + ).resolves.toBeDefined(); + }); + test('pingNode success with existing connection', async () => { + const nodeId = keyRingPeer.getNodeId(); + await nodeGraph.setNodeContactAddressData( + nodeId, + nodesUtils.nodeContactAddress([ + localHost, + nodeConnectionManagerPeer.port, + ]), + { + mode: 'direct', + connectedTime: 0, + scopes: ['global'], + }, + ); + await expect( + nodeManager.pingNode(nodeId, { timer: timeoutTime }), + ).resolves.toBeDefined(); + await expect( + nodeManager.pingNode(nodeId, { timer: timeoutTime }), + ).resolves.toBeDefined(); + }); + test('pingNode fail', async () => { + const nodeId = keyRingPeer.getNodeId(); + await expect( + nodeManager.pingNode(nodeId, { timer: timeoutTime }), + ).resolves.toBeUndefined(); + }); + test('pingNodeAddress success', async () => { + const nodeId = keyRingPeer.getNodeId(); + await expect( + nodeManager.pingNodeAddress( + nodeId, + localHost, + nodeConnectionManagerPeer.port, + ), + ).resolves.toBeTrue(); + }); + test('pingNodeAddress success with existing connection', async () => { + const nodeId = keyRingPeer.getNodeId(); + await expect( + nodeManager.pingNodeAddress( + nodeId, + localHost, + nodeConnectionManagerPeer.port, + ), + ).resolves.toBeTrue(); + await expect( + nodeManager.pingNodeAddress( + nodeId, + localHost, + nodeConnectionManagerPeer.port, + ), + ).resolves.toBeTrue(); + expect(nodeConnectionManager.connectionsActive()).toBe(1); + }); + test('pingNodeAddress fail', async () => { + const nodeId = keyRingPeer.getNodeId(); + await expect( + nodeManager.pingNodeAddress(nodeId, localHost, 50000 as Port, { + timer: timeoutTime, + }), + ).resolves.toBeFalse(); + await expect( + nodeManager.pingNodeAddress( + keyRing.getNodeId(), + localHost, + nodeConnectionManagerPeer.port, + { timer: timeoutTime }, + ), + ).resolves.toBeFalse(); + }); }); + test.todo('requestChainData'); + test.todo('claimNode'); - const newNodeData = (await nodeGraph.getNode(nodeId))!; - expect(newNodeData.address.port).not.toEqual(nodeData.address.port); - expect(newNodeData.lastUpdated).not.toEqual(nodeData.lastUpdated); + // TODO: These require mdns integration with `NodeManager`. + test.todo('findNodeByMdns'); + test.todo('findNode with mdns'); }); - test('should not add node if bucket is full and old node is alive', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - taskManager, - logger, - }); - await nodeManager.start(); - - const localNodeId = keyRing.getNodeId(); - const bucketIndex = 100; - // Creating 20 nodes in bucket - for (let i = 1; i <= 20; i++) { - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - i, + describe('with peers in network', () => { + let basePath: string; + let keyRing: KeyRing; + let db: DB; + let acl: ACL; + let sigchain: Sigchain; + let gestaltGraph: GestaltGraph; + let nodeGraph: NodeGraph; + let nodeConnectionManager: NodeConnectionManager; + let taskManager: TaskManager; + let nodeManager: NodeManager; + + // Will create 6 peers forming a simple network + let ncmPeers: Array< + NCMState & { + db: DB; + keyRing: KeyRing; + nodeGraph: NodeGraph; + } + >; + async function linkConnection(a: number, b: number) { + const ncmA = ncmPeers[a]; + const ncmB = ncmPeers[b]; + await ncmA.nodeConnectionManager.createConnection( + [ncmB.nodeId], + localHost, + ncmB.port, ); - await nodeManager.setNode(nodeId, { port: i } as NodeAddress); } - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - ); - // Mocking ping - mockedPingNode.mockResolvedValue(true); - const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); - const oldestNode = await nodeGraph.getNode(oldestNodeId!); - // Waiting for a second to tick over - await sleep(1500); - // Adding a new node with bucket full - await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); - // Bucket still contains max nodes - const bucket = await nodeManager.getBucket(bucketIndex); - expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); - // New node was not added - const node = await nodeGraph.getNode(nodeId); - expect(node).toBeUndefined(); - // Oldest node was updated - const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); - expect(oldestNodeNew!.lastUpdated).not.toEqual(oldestNode!.lastUpdated); - }); - test('should add node if bucket is full, old node is alive and force is set', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - taskManager, - logger, - }); - await nodeManager.start(); - - const localNodeId = keyRing.getNodeId(); - const bucketIndex = 100; - // Creating 20 nodes in bucket - for (let i = 1; i <= 20; i++) { - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - i, - ); - await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + async function quickLinkConnection(structure: Array>) { + const linkPs: Array> = []; + for (const chain of structure) { + for (let i = 1; i < chain.length; i++) { + linkPs.push(linkConnection(chain[i - 1], chain[i])); + } + } + await Promise.all(linkPs); } - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - ); - // Mocking ping - const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); - nodeManagerPingMock.mockResolvedValue(true); - const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); - // Adding a new node with bucket full - await nodeManager.setNode( - nodeId, - { port: 55555 } as NodeAddress, - undefined, - true, - ); - // Bucket still contains max nodes - const bucket = await nodeManager.getBucket(bucketIndex); - expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); - // New node was added - const node = await nodeGraph.getNode(nodeId); - expect(node).toBeDefined(); - // Oldest node was removed - const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); - expect(oldestNodeNew).toBeUndefined(); - nodeManagerPingMock.mockRestore(); - }); - test('should add node if bucket is full and old node is dead', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - taskManager, - logger, - }); - await nodeManager.start(); - - const localNodeId = keyRing.getNodeId(); - const bucketIndex = 100; - // Creating 20 nodes in bucket - for (let i = 1; i <= 20; i++) { - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - i, - ); - await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + + async function linkGraph(a, b) { + const ncmA = ncmPeers[a]; + const ncmB = ncmPeers[b]; + const nodeContactAddressB = nodesUtils.nodeContactAddress([ + ncmB.nodeConnectionManager.host, + ncmB.nodeConnectionManager.port, + ]); + await ncmA.nodeGraph.setNodeContact(ncmB.keyRing.getNodeId(), { + [nodeContactAddressB]: { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + }); } - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - ); - // Mocking ping - const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); - nodeManagerPingMock.mockResolvedValue(false); - const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); - // Adding a new node with bucket full - await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); - // New node was added - const node = await nodeGraph.getNode(nodeId); - expect(node).toBeDefined(); - // Oldest node was removed - const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); - expect(oldestNodeNew).toBeUndefined(); - nodeManagerPingMock.mockRestore(); - }); - test('should add node when an incoming connection is established', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - nodeGraph, - tlsConfig, - logger, - }); - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager, - taskManager, - logger, - }); - await nodeManager.start(); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - server = await PolykeyAgent.createPolykeyAgent({ - password: 'password', - options: { - nodePath: path.join(dataDir, 'server'), - agentServiceHost: localHost, - clientServiceHost: localHost, - keys: { + async function quickLinkGraph(structure: Array>) { + for (const chain of structure) { + for (let i = 1; i < chain.length; i++) { + await linkGraph(chain[i - 1], chain[i]); + await linkGraph(chain[i], chain[i - 1]); + } + } + } + + beforeEach(async () => { + basePath = path.join(dataDir, 'local'); + const keysPath = path.join(basePath, 'keys'); + keyRing = await KeyRing.createKeyRing({ + password, + keysPath, + passwordOpsLimit: keysUtils.passwordOpsLimits.min, + passwordMemLimit: keysUtils.passwordMemLimits.min, + strictMemoryLock: false, + logger: logger.getChild(KeyRing.name), + }); + const dbPath = path.join(basePath, 'db'); + db = await DB.createDB({ + dbPath, + logger: logger.getChild(DB.name), + }); + acl = await ACL.createACL({ + db, + logger: logger.getChild(ACL.name), + }); + sigchain = await Sigchain.createSigchain({ + db, + keyRing, + logger: logger.getChild(Sigchain.name), + }); + gestaltGraph = await GestaltGraph.createGestaltGraph({ + db, + acl, + logger: logger.getChild(GestaltGraph.name), + }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyRing, + logger: logger.getChild(NodeGraph.name), + }); + nodeConnectionManager = new NodeConnectionManager({ + keyRing, + tlsConfig: await testsUtils.createTLSConfig(keyRing.keyPair), + logger: logger.getChild(NodeConnectionManager.name), + connectionConnectTimeoutTime: timeoutTime, + }); + await nodeConnectionManager.start({ + agentService: {} as AgentServerManifest, + host: localHost, + }); + taskManager = await TaskManager.createTaskManager({ + db, + logger: logger.getChild(TaskManager.name), + }); + + nodeManager = new NodeManager({ + db, + keyRing, + gestaltGraph, + nodeGraph, + nodeConnectionManager, + sigchain, + taskManager, + logger: logger.getChild(NodeManager.name), + }); + await nodeManager.start(); + + ncmPeers = []; + const createPs: Array> = []; + for (let i = 0; i < 5; i++) { + const db = await DB.createDB({ + dbPath: path.join(basePath, `db${i}`), + logger, + }); + const keyRing = await KeyRing.createKeyRing({ + keysPath: path.join(basePath, `key${i}`), + password, passwordOpsLimit: keysUtils.passwordOpsLimits.min, passwordMemLimit: keysUtils.passwordMemLimits.min, strictMemoryLock: false, - }, - }, - logger: logger, - }); - const serverNodeId = server.keyRing.getNodeId(); - const serverNodeAddress: NodeAddress = { - host: server.agentServiceHost, - port: server.agentServicePort, - scopes: ['global'], - }; - await nodeGraph.setNode(serverNodeId, serverNodeAddress); - - const expectedHost = nodeConnectionManager.host; - const expectedPort = nodeConnectionManager.port; - const expectedNodeId = keyRing.getNodeId(); - - const nodeData = await server.nodeGraph.getNode(expectedNodeId); - expect(nodeData).toBeUndefined(); - - // Now we want to connect to the server - await nodeConnectionManager.withConnF(serverNodeId, async () => { - // Do nothing - }); - // Wait for background logic to settle - await sleep(100); - const nodeData2 = await server.nodeGraph.getNode(expectedNodeId); - expect(nodeData2).toBeDefined(); - expect(nodeData2?.address.host).toEqual(expectedHost); - expect(nodeData2?.address.port).toEqual(expectedPort); - }); - test('should not add nodes to full bucket if pings succeeds', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - taskManager, - logger, - }); - await nodeManager.start(); - - const nodeId = keyRing.getNodeId(); - const address: NodeAddress = { - host: localHost as Host, - port: port as Port, - scopes: ['global'], - }; - // Let's fill a bucket - for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { - const newNode = nodesTestUtils.generateNodeIdForBucket(nodeId, 100, i); - await nodeManager.setNode(newNode, address); - } + logger, + }); + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyRing, + logger, + }); - // Helpers - const listBucket = async (bucketIndex: number) => { - const bucket = await nodeManager.getBucket(bucketIndex); - return bucket?.map(([nodeId]) => nodesUtils.encodeNodeId(nodeId)); - }; - - // Pings succeed, node not added - mockedPingNode.mockImplementation(async () => true); - const newNode = nodesTestUtils.generateNodeIdForBucket(nodeId, 100, 21); - await nodeManager.setNode(newNode, address, true); - expect(await listBucket(100)).not.toContain( - nodesUtils.encodeNodeId(newNode), - ); - }); - test('should add nodes to full bucket if pings fail', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - taskManager, - logger, - }); - await nodeManager.start(); - - const nodeId = keyRing.getNodeId(); - const address: NodeAddress = { - host: localHost as Host, - port: port as Port, - scopes: ['global'], - }; - // Let's fill a bucket - for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { - const newNode = nodesTestUtils.generateNodeIdForBucket(nodeId, 100, i); - await nodeManager.setNode(newNode, address); - } - // Wait for 2 secs for new nodes to be added with new times - await sleep(2000); - - // Helpers - const listBucket = async (bucketIndex: number) => { - const bucket = await nodeManager.getBucket(bucketIndex); - return bucket?.map(([nodeId]) => nodesUtils.encodeNodeId(nodeId)); - }; - - // Pings fail, new nodes get added - mockedPingNode.mockImplementation(async () => false); - const newNode1 = nodesTestUtils.generateNodeIdForBucket(nodeId, 100, 22); - const newNode2 = nodesTestUtils.generateNodeIdForBucket(nodeId, 100, 23); - const newNode3 = nodesTestUtils.generateNodeIdForBucket(nodeId, 100, 24); - await nodeManager.setNode(newNode1, address, true); - await nodeManager.setNode(newNode2, address, true); - await nodeManager.setNode(newNode3, address, true); - const list = await listBucket(100); - expect(list).toContain(nodesUtils.encodeNodeId(newNode1)); - expect(list).toContain(nodesUtils.encodeNodeId(newNode2)); - expect(list).toContain(nodesUtils.encodeNodeId(newNode3)); - }); - test('should not block when bucket is full', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - taskManager, - logger, + const peerP = nodesTestUtils + .nodeConnectionManagerFactory({ + keyRing, + createOptions: { + connectionConnectTimeoutTime: timeoutTime, + }, + startOptions: { + host: localHost, + agentService: (nodeConnectionManager) => + ({ + nodesConnectionSignalFinal: new NodesConnectionSignalFinal({ + nodeConnectionManager, + logger, + }), + nodesConnectionSignalInitial: + new NodesConnectionSignalInitial({ + nodeConnectionManager, + }), + nodesClosestActiveConnectionsGet: + new NodesClosestActiveConnectionsGet({ + nodeConnectionManager, + }), + nodesClosestLocalNodesGet: new NodesClosestLocalNodesGet({ + db, + nodeGraph, + }), + }) as AgentServerManifest, + }, + logger: logger.getChild(`${NodeConnectionManager.name}Peer${i}`), + }) + .then((peer) => { + ncmPeers[i] = { + ...peer, + db, + keyRing, + nodeGraph, + }; + }); + createPs.push(peerP); + } + await Promise.all(createPs); + // Sort in order of distance + const nodeDistanceCmp = nodesUtils.nodeDistanceCmpFactory( + keyRing.getNodeId(), + ); + ncmPeers.sort((a, b) => { + return nodeDistanceCmp(a.nodeId, b.nodeId); + }); }); - await nodeManager.start(); - - const nodeId = keyRing.getNodeId(); - const address: NodeAddress = { - host: localHost as Host, - port: port as Port, - scopes: ['global'], - }; - // Let's fill a bucket - for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { - const newNode = testNodesUtils.generateNodeIdForBucket(nodeId, 100, i); - await nodeManager.setNode(newNode, address); - } + afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); + await nodeManager.stop(); + await nodeConnectionManager.stop(); + await nodeGraph.stop(); + await gestaltGraph.stop(); + await sigchain.stop(); + await acl.stop(); + await db.stop(); + await keyRing.stop(); + await taskManager.stop(); + await fs.promises.rm(basePath, { + force: true, + recursive: true, + }); - // Set node does not block - const delayPing = promise(); - mockedPingNode.mockImplementation(async (_) => { - await delayPing.p; - return true; + const destroyPs: Array> = []; + for (const ncmPeer of ncmPeers) { + destroyPs.push(ncmPeer.nodeConnectionManager.stop({ force: true })); + destroyPs.push(ncmPeer.nodeGraph.stop()); + destroyPs.push(ncmPeer.keyRing.stop()); + destroyPs.push(ncmPeer.db.stop()); + } + await Promise.all(destroyPs); }); - const newNode4 = testNodesUtils.generateNodeIdForBucket(nodeId, 100, 25); - // Set manually to non-blocking - await expect( - nodeManager.setNode(newNode4, address, false), - ).resolves.toBeUndefined(); - delayPing.resolveP(); - }); - test('should update deadline when updating a bucket', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - taskManager, - logger, - }); - await nodeManager.start(); - const mockRefreshBucket = jest.spyOn( - NodeManager.prototype, - 'refreshBucket', - ); - mockRefreshBucket.mockImplementation( - () => new PromiseCancellable((resolve) => resolve()), - ); - // Getting starting value - const bucketIndex = 100; - let refreshBucketTask: Task | undefined; - for await (const task of taskManager.getTasks('asc', true, [ - nodeManager.basePath, - nodeManager.refreshBucketHandlerId, - `${bucketIndex}`, - ])) { - refreshBucketTask = task; - } - if (refreshBucketTask == null) never(); - const nodeId = nodesTestUtils.generateNodeIdForBucket( - keyRing.getNodeId(), - bucketIndex, - ); - await sleep(100); - await nodeManager.setNode(nodeId, {} as NodeAddress); - // Deadline should be updated - let refreshBucketTaskUpdated: Task | undefined; - for await (const task of taskManager.getTasks('asc', true, [ - nodeManager.basePath, - nodeManager.refreshBucketHandlerId, - `${bucketIndex}`, - ])) { - refreshBucketTaskUpdated = task; - } - if (refreshBucketTaskUpdated == null) never(); - expect(refreshBucketTaskUpdated?.delay).not.toEqual( - refreshBucketTask?.delay, - ); - }); - test('refreshBucket should not throw errors when network is empty', async () => { - nodeConnectionManager = new NodeConnectionManager({ - keyRing, - nodeGraph, - tlsConfig, - logger, - }); - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager, - taskManager, - logger, - }); - await nodeConnectionManager.start({ - host: localHost as Host, - }); - await nodeManager.start(); + describe('findNode by signalled connections', () => { + test('connection found in chain graph', async () => { + // Structure is an acyclic graph + // 0 -> 1 -> 2 -> 3 -> 4 + await quickLinkConnection([[0, 1, 2, 3, 4]]); + // Creating first connection to 0; + await nodeConnectionManager.createConnection( + [ncmPeers[0].nodeId], + localHost, + ncmPeers[0].port, + ); - await expect(nodeManager.refreshBucket(100)).resolves.not.toThrow(); - }); - test('refreshBucket tasks should have spread delays', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - taskManager, - logger, - }); - await nodeManager.start(); + const rateLimiter = new Semaphore(3); + const result = await nodeManager.findNodeBySignal( + ncmPeers[4].nodeId, + new NodeConnectionQueue( + keyRing.getNodeId(), + ncmPeers[4].nodeId, + 20, + rateLimiter, + rateLimiter, + ), + ); + expect(result).toBeDefined(); + const [[host, port]] = result!; + expect(host).toBe(localHost); + expect(port).toBe(ncmPeers[4].nodeConnectionManager.port); + }); + test('connection found in MST graph', async () => { + // Structure is an acyclic graph + // 0 -> 1 -> 2 + // 3 -> 1 -> 4 + await quickLinkConnection([ + [0, 1, 2], + [3, 1, 4], + ]); + // Creating first connection to 0; + await nodeConnectionManager.createConnection( + [ncmPeers[0].nodeId], + localHost, + ncmPeers[0].port, + ); - const mockRefreshBucket = jest.spyOn( - NodeManager.prototype, - 'refreshBucket', - ); - mockRefreshBucket.mockImplementation( - () => new PromiseCancellable((resolve) => resolve()), - ); - await nodeManager.start(); - // Getting starting value - const startingDelay = new Set(); - for await (const task of taskManager.getTasks('asc', true, [ - 'refreshBucket', - ])) { - startingDelay.add(task.delay); - } - expect(startingDelay.size).not.toBe(1); - // Updating delays should have spread - for ( - let bucketIndex = 0; - bucketIndex < nodeGraph.nodeIdBits; - bucketIndex++ - ) { - await nodeManager.updateRefreshBucketDelay(bucketIndex, undefined, true); - } - const updatedDelay = new Set(); - for await (const task of taskManager.getTasks('asc', true, [ - 'refreshBucket', - ])) { - updatedDelay.add(task.delay); - } - expect(updatedDelay.size).not.toBe(1); - }); - test('Stopping nodeManager should cancel all ephemeral tasks', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - taskManager, - logger, - }); - await nodeManager.start(); - await taskManager.stopProcessing(); - - // Creating dummy tasks - const task1 = await taskManager.scheduleTask({ - handlerId: nodeManager.pingAndSetNodeHandlerId, - lazy: false, - path: [nodeManager.basePath], + const rateLimiter = new Semaphore(3); + const result = await nodeManager.findNodeBySignal( + ncmPeers[4].nodeId, + new NodeConnectionQueue( + keyRing.getNodeId(), + ncmPeers[4].nodeId, + 20, + rateLimiter, + rateLimiter, + ), + ); + expect(result).toBeDefined(); + const [[host, port]] = result!; + expect(host).toBe(localHost); + expect(port).toBe(ncmPeers[4].nodeConnectionManager.port); + }); + test('connection found in cyclic graph', async () => { + // Structure is a ring with a branch + // 0 -> 1 -> 2 -> 3 -> 0 + // 4 -> 2 + await quickLinkConnection([ + [0, 1, 2, 3, 0], + [4, 2], + ]); + // Creating first connection to 0; + await nodeConnectionManager.createConnection( + [ncmPeers[0].nodeId], + localHost, + ncmPeers[0].port, + ); + + const rateLimiter = new Semaphore(3); + const result = await nodeManager.findNodeBySignal( + ncmPeers[4].nodeId, + new NodeConnectionQueue( + keyRing.getNodeId(), + ncmPeers[4].nodeId, + 20, + rateLimiter, + rateLimiter, + ), + ); + expect(result).toBeDefined(); + const [[host, port]] = result!; + expect(host).toBe(localHost); + expect(port).toBe(ncmPeers[4].nodeConnectionManager.port); + }); + test('finding self will do exhaustive search and not find self', async () => { + // Structure is branching + // 0 -> 1 -> 2 -> 3 + // 1 -> 4 + await quickLinkConnection([ + [0, 1, 2, 3], + [1, 4], + ]); + // Creating first connection to 0; + await nodeConnectionManager.createConnection( + [ncmPeers[0].nodeId], + localHost, + ncmPeers[0].port, + ); + + const rateLimiter = new Semaphore(3); + const resultP = nodeManager.findNodeBySignal( + keyRing.getNodeId(), + new NodeConnectionQueue( + keyRing.getNodeId(), + keyRing.getNodeId(), + 20, + rateLimiter, + rateLimiter, + ), + ); + await expect(resultP).rejects.toThrow( + nodesErrors.ErrorNodeManagerFindNodeFailed, + ); + // All connections made + expect(nodeConnectionManager.connectionsActive()).toBe(5); + }); + test('finding self will hit limit and not find self', async () => { + // Structure is a chain + // 0 -> 1 -> 2 -> 3 -> 4 + await quickLinkConnection([[0, 1, 2, 3, 4]]); + // Creating first connection to 0; + await nodeConnectionManager.createConnection( + [ncmPeers[0].nodeId], + localHost, + ncmPeers[0].port, + ); + + const rateLimiter = new Semaphore(3); + const resultP = nodeManager.findNodeBySignal( + keyRing.getNodeId(), + new NodeConnectionQueue( + keyRing.getNodeId(), + keyRing.getNodeId(), + 3, + rateLimiter, + rateLimiter, + ), + ); + await expect(resultP).rejects.toThrow( + nodesErrors.ErrorNodeManagerFindNodeFailed, + ); + // All connections made + expect(nodeConnectionManager.connectionsActive()).toBe(3); + }); + // FIXME: this is a bit in-determinate right now + test.skip('connection found in two attempts', async () => { + // Structure is a chain + // 0 -> 1 -> 2 -> 3 -> 4 + await quickLinkConnection([[0, 1, 2, 3, 4]]); + // Creating first connection to 0; + await nodeConnectionManager.createConnection( + [ncmPeers[0].nodeId], + localHost, + ncmPeers[0].port, + ); + + const rateLimiter = new Semaphore(1); + const path = await nodeManager.findNodeBySignal( + ncmPeers[4].nodeId, + new NodeConnectionQueue( + keyRing.getNodeId(), + ncmPeers[4].nodeId, + 3, + rateLimiter, + rateLimiter, + ), + ); + expect(path).toBeUndefined(); + // Should have initial connection + 3 new ones + expect(nodeConnectionManager.connectionsActive()).toBe(3); + + // 2nd attempt continues where we left off due to existing connections + const path2 = await nodeManager.findNodeBySignal( + ncmPeers[4].nodeId, + new NodeConnectionQueue( + keyRing.getNodeId(), + ncmPeers[4].nodeId, + 3, + rateLimiter, + rateLimiter, + ), + ); + expect(path2).toBeDefined(); + expect(path2!.length).toBe(2); + }); + test.todo('handles offline nodes'); }); - const task2 = await taskManager.scheduleTask({ - handlerId: nodeManager.pingAndSetNodeHandlerId, - lazy: false, - path: [nodeManager.basePath], + describe('findNode by direct connections', () => { + test('connection found in chain graph', async () => { + // Structure is an acyclic graph + // 0 -> 1 -> 2 -> 3 -> 4 + await quickLinkGraph([[0, 1, 2, 3, 4]]); + + // Setting up entry point + const nodeContactAddressB = nodesUtils.nodeContactAddress([ + ncmPeers[0].nodeConnectionManager.host, + ncmPeers[0].nodeConnectionManager.port, + ]); + await nodeGraph.setNodeContact(ncmPeers[0].keyRing.getNodeId(), { + [nodeContactAddressB]: { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + }); + + const rateLimiter = new Semaphore(3); + const result = await nodeManager.findNodeByDirect( + ncmPeers[4].nodeId, + new NodeConnectionQueue( + keyRing.getNodeId(), + ncmPeers[4].nodeId, + 20, + rateLimiter, + rateLimiter, + ), + ); + expect(result).toBeDefined(); + const [[host, port]] = result!; + expect(host).toBe(localHost); + expect(port).toBe(ncmPeers[4].nodeConnectionManager.port); + }); + test('connection found in MST graph', async () => { + // Structure is an acyclic graph + // 0 -> 1 -> 2 + // 3 -> 1 -> 4 + await quickLinkGraph([ + [0, 1, 2], + [3, 1, 4], + ]); + + // Setting up entry point + const nodeContactAddressB = nodesUtils.nodeContactAddress([ + ncmPeers[0].nodeConnectionManager.host, + ncmPeers[0].nodeConnectionManager.port, + ]); + await nodeGraph.setNodeContact(ncmPeers[0].keyRing.getNodeId(), { + [nodeContactAddressB]: { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + }); + + const rateLimiter = new Semaphore(3); + const result = await nodeManager.findNodeByDirect( + ncmPeers[4].nodeId, + new NodeConnectionQueue( + keyRing.getNodeId(), + ncmPeers[4].nodeId, + 20, + rateLimiter, + rateLimiter, + ), + ); + expect(result).toBeDefined(); + const [[host, port]] = result!; + expect(host).toBe(localHost); + expect(port).toBe(ncmPeers[4].nodeConnectionManager.port); + }); + test('connection found in cyclic graph', async () => { + // Structure is an acyclic graph + // 0 -> 1 -> 2 -> 3 -> 0 + // 4 -> 2 + await quickLinkGraph([ + [0, 1, 2, 3, 0], + [4, 2], + ]); + + // Setting up entry point + const nodeContactAddressB = nodesUtils.nodeContactAddress([ + ncmPeers[0].nodeConnectionManager.host, + ncmPeers[0].nodeConnectionManager.port, + ]); + await nodeGraph.setNodeContact(ncmPeers[0].keyRing.getNodeId(), { + [nodeContactAddressB]: { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + }); + + const rateLimiter = new Semaphore(3); + const result = await nodeManager.findNodeByDirect( + ncmPeers[4].nodeId, + new NodeConnectionQueue( + keyRing.getNodeId(), + ncmPeers[4].nodeId, + 20, + rateLimiter, + rateLimiter, + ), + ); + expect(result).toBeDefined(); + const [[host, port]] = result!; + expect(host).toBe(localHost); + expect(port).toBe(ncmPeers[4].nodeConnectionManager.port); + }); + test('finding self will do exhaustive search and not find self', async () => { + // Structure is an acyclic graph + // 0 -> 1 -> 2 -> 3 + // 1 -> 4 + await quickLinkGraph([ + [0, 1, 2, 3], + [1, 4], + ]); + + // Setting up entry point + const nodeContactAddressB = nodesUtils.nodeContactAddress([ + ncmPeers[0].nodeConnectionManager.host, + ncmPeers[0].nodeConnectionManager.port, + ]); + await nodeGraph.setNodeContact(ncmPeers[0].keyRing.getNodeId(), { + [nodeContactAddressB]: { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + }); + + const rateLimiter = new Semaphore(3); + const resultP = nodeManager.findNodeByDirect( + keyRing.getNodeId(), + new NodeConnectionQueue( + keyRing.getNodeId(), + keyRing.getNodeId(), + 20, + rateLimiter, + rateLimiter, + ), + ); + await expect(resultP).rejects.toThrow( + nodesErrors.ErrorNodeManagerFindNodeFailed, + ); + // All connections made + expect(nodeConnectionManager.connectionsActive()).toBe(5); + }); + test('finding self will hit limit and not find self', async () => { + // Structure is an acyclic graph + // 0 -> 1 -> 2 -> 3 -> 4 + await quickLinkGraph([[0, 1, 2, 3, 4]]); + + // Setting up entry point + const nodeContactAddressB = nodesUtils.nodeContactAddress([ + ncmPeers[0].nodeConnectionManager.host, + ncmPeers[0].nodeConnectionManager.port, + ]); + await nodeGraph.setNodeContact(ncmPeers[0].keyRing.getNodeId(), { + [nodeContactAddressB]: { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + }); + + const rateLimiter = new Semaphore(3); + const resultP = nodeManager.findNodeByDirect( + keyRing.getNodeId(), + new NodeConnectionQueue( + keyRing.getNodeId(), + keyRing.getNodeId(), + 20, + rateLimiter, + rateLimiter, + ), + ); + await expect(resultP).rejects.toThrow( + nodesErrors.ErrorNodeManagerFindNodeFailed, + ); + // All connections made + expect(nodeConnectionManager.connectionsActive()).toBe(5); + }); + // FIXME: needs to store made connections in nodeGraph for this to work + test.skip('connection found in two attempts', async () => { + // Structure is an acyclic graph + // 0 -> 1 -> 2 -> 3 -> 4 + await quickLinkGraph([[0, 1, 2, 3, 4]]); + + // Setting up entry point + const nodeContactAddressB = nodesUtils.nodeContactAddress([ + ncmPeers[0].nodeConnectionManager.host, + ncmPeers[0].nodeConnectionManager.port, + ]); + await nodeGraph.setNodeContact(ncmPeers[0].keyRing.getNodeId(), { + [nodeContactAddressB]: { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + }); + + const rateLimiter = new Semaphore(3); + const result1 = await nodeManager.findNodeByDirect( + ncmPeers[4].nodeId, + new NodeConnectionQueue( + keyRing.getNodeId(), + ncmPeers[4].nodeId, + 3, + rateLimiter, + rateLimiter, + ), + ); + expect(result1).toBeUndefined(); + // All connections made + expect(nodeConnectionManager.connectionsActive()).toBe(4); + + const result2 = await nodeManager.findNodeByDirect( + ncmPeers[4].nodeId, + new NodeConnectionQueue( + keyRing.getNodeId(), + ncmPeers[4].nodeId, + 3, + rateLimiter, + rateLimiter, + ), + ); + expect(result2).toBeDefined(); + // All connections made + expect(nodeConnectionManager.connectionsActive()).toBe(5); + }); + test.todo('handles offline nodes'); }); + describe('findNode by both', () => { + test('connection found in chain graph', async () => { + // Structure is an acyclic graph + // connections + // 0 -> 1, 2 -> 3 + // graph links + // 1 -> 2, 3 -> 4 + await quickLinkConnection([ + [0, 1], + [2, 3], + ]); + await quickLinkGraph([ + [1, 2], + [3, 4], + ]); + // Creating first connection to 0; + await nodeConnectionManager.createConnection( + [ncmPeers[0].nodeId], + localHost, + ncmPeers[0].port, + ); - // Stopping nodeManager should cancel any nodeManager tasks - await nodeManager.stop(); - const tasks: Array = []; - for await (const task of taskManager.getTasks('asc', true, [ - nodeManager.basePath, - ])) { - tasks.push(task); - } - expect(tasks.length).toEqual(0); - await expect(task1.promise()).toReject(); - await expect(task2.promise()).toReject(); - }); - test('Should have unique HandlerIds', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: dummySigchain, - keyRing, - gestaltGraph, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - taskManager, - logger, + const result = await nodeManager.findNode(ncmPeers[4].nodeId); + expect(result).toMatchObject([ + [localHost, ncmPeers[4].nodeConnectionManager.port], + { + mode: 'direct', + connectedTime: expect.any(Number), + scopes: expect.any(Array), + }, + ]); + }); + test('connection found with shortcut', async () => { + // Structure is an acyclic graph + // connections + // 0 -> 1 -> 2 -> 3 + // graph links + // 0 -> 4 + await quickLinkConnection([[0, 1, 2, 3]]); + await quickLinkGraph([[0, 4]]); + // Creating first connection to 0; + await nodeConnectionManager.createConnection( + [ncmPeers[0].nodeId], + localHost, + ncmPeers[0].port, + ); + + const result = await nodeManager.findNode(ncmPeers[4].nodeId); + expect(result).toMatchObject([ + [localHost, ncmPeers[4].nodeConnectionManager.port], + { + mode: 'direct', + connectedTime: expect.any(Number), + scopes: expect.any(Array), + }, + ]); + }); + test.todo('handles offline nodes'); }); - // This is a sanity check for a previous bug with SWC decorators causing Thing.name to be '' - expect(nodeManager.gcBucketHandlerId).not.toEqual( - nodeManager.refreshBucketHandlerId, - ); - expect(nodeManager.gcBucketHandlerId).not.toEqual( - nodeManager.pingAndSetNodeHandlerId, - ); - expect(nodeManager.refreshBucketHandlerId).not.toEqual( - nodeManager.pingAndSetNodeHandlerId, - ); + test.todo('network entry with syncNodeGraph'); + test.todo('network entry with syncNodeGraph handles offline nodes'); + test.todo('refresh buckets'); + test.todo('nodeGraph entry is updated when connection is made'); }); }); diff --git a/tests/nodes/agent/handlers/nodesClaimsGet.test.ts b/tests/nodes/agent/handlers/nodesClaimsGet.test.ts index bb0499ff8..354bef49b 100644 --- a/tests/nodes/agent/handlers/nodesClaimsGet.test.ts +++ b/tests/nodes/agent/handlers/nodesClaimsGet.test.ts @@ -23,7 +23,6 @@ describe('nodesClaimsGet', () => { new StreamHandler(), ]); const password = 'password'; - const crypto = tlsTestsUtils.createCrypto(); const localHost = '127.0.0.1'; let dataDir: string; @@ -86,10 +85,7 @@ describe('nodesClaimsGet', () => { cert: tlsConfig.certChainPem, verifyPeer: false, }, - crypto: { - key: keysUtils.generateKey(), - ops: crypto, - }, + crypto: nodesUtils.quicServerCrypto, logger, }); const handleStream = async ( @@ -149,9 +145,7 @@ describe('nodesClaimsGet', () => { logger, }); quicClient = await QUICClient.createQUICClient({ - crypto: { - ops: crypto, - }, + crypto: nodesUtils.quicClientCrypto, config: { verifyPeer: false, }, diff --git a/tests/nodes/agent/handlers/nodesClosestActiveConnectionsGet.test.ts b/tests/nodes/agent/handlers/nodesClosestActiveConnectionsGet.test.ts new file mode 100644 index 000000000..9789b2be1 --- /dev/null +++ b/tests/nodes/agent/handlers/nodesClosestActiveConnectionsGet.test.ts @@ -0,0 +1,156 @@ +import type { Host, Port } from '@/network/types'; +import type { Timer } from '@matrixai/timer'; +import type KeyRing from '@/keys/KeyRing'; +import type { NodeId, NodeIdString } from '@/ids'; +import type { AgentServerManifest } from '@/nodes/agent/handlers'; +import type { NodeConnection } from '@/nodes'; +import type { ActiveConnectionDataMessage } from '@/nodes/agent/types'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import * as keysUtils from '@/keys/utils'; +import NodesClosestActiveConnectionsGet from '@/nodes/agent/handlers/NodesClosestActiveConnectionsGet'; +import * as nodesUtils from '@/nodes/utils'; +import * as testsUtils from '../../../utils'; +import NodeConnectionManager from '../../../../src/nodes/NodeConnectionManager'; + +describe('nodesClosestLocalNode', () => { + const logger = new Logger('nodesClosestLocalNode test', LogLevel.WARN, [ + new StreamHandler(), + ]); + const localHost = '127.0.0.1' as Host; + const timeoutTime = 300; + + let nodeIdLocal: NodeId; + let keyRingDummyLocal: KeyRing; + let nodeConnectionManagerLocal: NodeConnectionManager; + + let nodeIdPeer1: NodeId; + let keyRingDummyPeer1: KeyRing; + let nodeConnectionManagerPeer1: NodeConnectionManager; + let portPeer1: Port; + + beforeEach(async () => { + const keyPairLocal = keysUtils.generateKeyPair(); + nodeIdLocal = keysUtils.publicKeyToNodeId(keyPairLocal.publicKey); + const tlsConfigLocal = await testsUtils.createTLSConfig(keyPairLocal); + keyRingDummyLocal = { + getNodeId: () => nodeIdLocal, + keyPair: keyPairLocal, + } as KeyRing; + nodeConnectionManagerLocal = new NodeConnectionManager({ + keyRing: keyRingDummyLocal, + logger: logger.getChild(`${NodeConnectionManager.name}Local`), + tlsConfig: tlsConfigLocal, + connectionIdleTimeoutTimeMin: 1000, + connectionIdleTimeoutTimeScale: 0, + connectionConnectTimeoutTime: timeoutTime, + }); + + const keyPairPeer1 = keysUtils.generateKeyPair(); + nodeIdPeer1 = keysUtils.publicKeyToNodeId(keyPairPeer1.publicKey); + const tlsConfigPeer1 = await testsUtils.createTLSConfig(keyPairPeer1); + keyRingDummyPeer1 = { + getNodeId: () => nodeIdPeer1, + keyPair: keyPairPeer1, + } as KeyRing; + nodeConnectionManagerPeer1 = new NodeConnectionManager({ + keyRing: keyRingDummyPeer1, + logger: logger.getChild(`${NodeConnectionManager.name}Peer1`), + tlsConfig: tlsConfigPeer1, + connectionConnectTimeoutTime: timeoutTime, + }); + + await Promise.all([ + nodeConnectionManagerLocal.start({ + agentService: {} as AgentServerManifest, + host: localHost, + }), + nodeConnectionManagerPeer1.start({ + agentService: { + nodesClosestActiveConnectionsGet: + new NodesClosestActiveConnectionsGet({ + nodeConnectionManager: nodeConnectionManagerPeer1, + }), + } as AgentServerManifest, + host: localHost, + }), + ]); + portPeer1 = nodeConnectionManagerPeer1.port; + }); + afterEach(async () => { + await nodeConnectionManagerLocal.stop({ force: true }); + await nodeConnectionManagerPeer1.stop({ force: true }); + }); + + test('should get closest active nodes', async () => { + // Need to mock sone nodes + const connection = await nodeConnectionManagerLocal.createConnection( + [nodeIdPeer1], + localHost, + portPeer1, + ); + + // Let's add some fake data. + // @ts-ignore: kidnap protected property + const existingConnections = nodeConnectionManagerPeer1.connections; + const dummyConnections: Map< + NodeIdString, + { + activeConnection: string; + connections: Record< + string, + { + connection: NodeConnection; + timer: Timer | null; + usageCount: number; + } + >; + } + > = new Map(); + // @ts-ignore: replace existing connections with dummy + nodeConnectionManagerPeer1.connections = dummyConnections; + + const targetNodeId = testsUtils.generateRandomNodeId(); + // Create some Ids in order of furthest first. + const dummyNodeIds: Array = []; + for (let i = 255; i >= 0; i -= 5) { + const nodeId = nodesUtils.generateRandomNodeIdForBucket(targetNodeId, i); + dummyNodeIds.unshift(nodeId); + const nodeIdString = nodeId.toString() as NodeIdString; + const connectionId = `connectionId-${i}`; + const entry = { + activeConnection: connectionId, + connections: { + [connectionId]: { + connection: { + connectionId, + host: localHost, + port: i, + destroy: () => {}, + } as NodeConnection, + timer: null, + usageCount: 0, + }, + }, + }; + dummyConnections.set(nodeIdString, entry); + } + + const resultStream = + await connection.rpcClient.methods.nodesClosestActiveConnectionsGet({ + nodeIdEncoded: nodesUtils.encodeNodeId(targetNodeId), + }); + const results: Array = []; + for await (const result of resultStream) { + results.push(result); + } + + // @ts-ignore: restore existing connections + nodeConnectionManagerPeer1.connections = existingConnections; + + expect(results).toHaveLength(20); + // Nodes should be in order of closest first + for (let i = 0; i < results.length; i++) { + expect(results[i].nodeId).toBe(nodesUtils.encodeNodeId(dummyNodeIds[i])); + } + }); +}); diff --git a/tests/nodes/agent/handlers/nodesClosestLocalNode.test.ts b/tests/nodes/agent/handlers/nodesClosestLocalNode.test.ts index 174c1afe9..dfe99b6d0 100644 --- a/tests/nodes/agent/handlers/nodesClosestLocalNode.test.ts +++ b/tests/nodes/agent/handlers/nodesClosestLocalNode.test.ts @@ -22,7 +22,6 @@ describe('nodesClosestLocalNode', () => { new StreamHandler(), ]); const password = 'password'; - const crypto = tlsTestsUtils.createCrypto(); const localHost = '127.0.0.1'; let dataDir: string; @@ -85,10 +84,7 @@ describe('nodesClosestLocalNode', () => { cert: tlsConfig.certChainPem, verifyPeer: false, }, - crypto: { - key: keysUtils.generateKey(), - ops: crypto, - }, + crypto: nodesUtils.quicServerCrypto, logger, }); const handleStream = async ( @@ -148,9 +144,7 @@ describe('nodesClosestLocalNode', () => { logger, }); quicClient = await QUICClient.createQUICClient({ - crypto: { - ops: crypto, - }, + crypto: nodesUtils.quicClientCrypto, config: { verifyPeer: false, }, @@ -173,11 +167,15 @@ describe('nodesClosestLocalNode', () => { const nodes: Array = []; for (let i = 0; i < 10; i++) { const nodeId = testNodesUtils.generateRandomNodeId(); - await nodeGraph.setNode(nodeId, { - host: 'localhost' as Host, - port: 55555 as Port, - scopes: ['local'], - }); + await nodeGraph.setNodeContactAddressData( + nodeId, + ['localhost' as Host, 55555 as Port], + { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + ); nodes.push(nodesUtils.encodeNodeId(nodeId)); } const nodeIdEncoded = nodesUtils.encodeNodeId( diff --git a/tests/nodes/agent/handlers/nodesConnectionSignalFinal.test.ts b/tests/nodes/agent/handlers/nodesConnectionSignalFinal.test.ts index e334b27e5..d8e861a37 100644 --- a/tests/nodes/agent/handlers/nodesConnectionSignalFinal.test.ts +++ b/tests/nodes/agent/handlers/nodesConnectionSignalFinal.test.ts @@ -14,7 +14,6 @@ describe('nodesHolePunchRequest', () => { const logger = new Logger('nodesHolePunchRequest test', LogLevel.WARN, [ new StreamHandler(), ]); - const crypto = tlsTestsUtils.createCrypto(); const localHost = '127.0.0.1'; let keyPair: KeyPair; @@ -58,10 +57,7 @@ describe('nodesHolePunchRequest', () => { verifyPeer: true, verifyCallback: async () => undefined, }, - crypto: { - key: keysUtils.generateKey(), - ops: crypto, - }, + crypto: nodesUtils.quicServerCrypto, logger, }); const handleStream = async ( @@ -121,9 +117,7 @@ describe('nodesHolePunchRequest', () => { logger, }); quicClient = await QUICClient.createQUICClient({ - crypto: { - ops: crypto, - }, + crypto: nodesUtils.quicClientCrypto, config: { key: tlsConfigClient.keyPrivatePem, cert: tlsConfigClient.certChainPem, diff --git a/tests/nodes/agent/handlers/nodesConnectionSignalInitial.test.ts b/tests/nodes/agent/handlers/nodesConnectionSignalInitial.test.ts index 01bd71a79..d3880af2d 100644 --- a/tests/nodes/agent/handlers/nodesConnectionSignalInitial.test.ts +++ b/tests/nodes/agent/handlers/nodesConnectionSignalInitial.test.ts @@ -14,7 +14,6 @@ describe('nodesHolePunchSignal', () => { const logger = new Logger('nodesHolePunchSignal test', LogLevel.WARN, [ new StreamHandler(), ]); - const crypto = tlsTestsUtils.createCrypto(); const localHost = '127.0.0.1'; let keyPair: KeyPair; @@ -57,10 +56,7 @@ describe('nodesHolePunchSignal', () => { verifyPeer: true, verifyCallback: async () => undefined, }, - crypto: { - key: keysUtils.generateKey(), - ops: crypto, - }, + crypto: nodesUtils.quicServerCrypto, logger, }); const handleStream = async ( @@ -120,9 +116,7 @@ describe('nodesHolePunchSignal', () => { logger, }); quicClient = await QUICClient.createQUICClient({ - crypto: { - ops: crypto, - }, + crypto: nodesUtils.quicClientCrypto, config: { key: tlsConfigClient.keyPrivatePem, cert: tlsConfigClient.certChainPem, diff --git a/tests/nodes/agent/handlers/nodesCrossSignClaim.test.ts b/tests/nodes/agent/handlers/nodesCrossSignClaim.test.ts index 83b5f43fc..5fb01045b 100644 --- a/tests/nodes/agent/handlers/nodesCrossSignClaim.test.ts +++ b/tests/nodes/agent/handlers/nodesCrossSignClaim.test.ts @@ -32,7 +32,6 @@ describe('nodesCrossSignClaim', () => { new StreamHandler(), ]); const password = 'password'; - const crypto = tlsTestsUtils.createCrypto(); const localHost = '127.0.0.1'; let dataDir: string; @@ -137,10 +136,7 @@ describe('nodesCrossSignClaim', () => { return undefined; }, }, - crypto: { - key: keysUtils.generateKey(), - ops: crypto, - }, + crypto: nodesUtils.quicServerCrypto, logger, }); const handleStream = async ( @@ -203,9 +199,7 @@ describe('nodesCrossSignClaim', () => { localNodeId = keysUtils.publicKeyToNodeId(clientKeyPair.publicKey); const tlsConfigClient = await tlsTestsUtils.createTLSConfig(clientKeyPair); quicClient = await QUICClient.createQUICClient({ - crypto: { - ops: crypto, - }, + crypto: nodesUtils.quicClientCrypto, config: { key: tlsConfigClient.keyPrivatePem, cert: tlsConfigClient.certChainPem, diff --git a/tests/nodes/agent/handlers/notificationsSend.test.ts b/tests/nodes/agent/handlers/notificationsSend.test.ts index e8d215fe6..406cccd1e 100644 --- a/tests/nodes/agent/handlers/notificationsSend.test.ts +++ b/tests/nodes/agent/handlers/notificationsSend.test.ts @@ -2,6 +2,7 @@ import type { Notification, SignedNotification } from '@/notifications/types'; import type { NodeId } from '@/ids'; import type GestaltGraph from '@/gestalts/GestaltGraph'; import type { Host } from '@/network/types'; +import type { AgentServerManifest } from '@/nodes/agent/handlers'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -34,7 +35,6 @@ describe('notificationsSend', () => { new StreamHandler(), ]); const password = 'password'; - const crypto = tlsTestsUtils.createCrypto(); const localHost = '127.0.0.1' as Host; let dataDir: string; @@ -122,9 +122,9 @@ describe('notificationsSend', () => { nodeConnectionManager = new NodeConnectionManager({ tlsConfig: tlsConfigClient, keyRing, - nodeGraph, connectionConnectTimeoutTime: 2000, - connectionIdleTimeoutTime: 2000, + connectionIdleTimeoutTimeMin: 2000, + connectionIdleTimeoutTimeScale: 0, logger: logger.getChild('NodeConnectionManager'), }); nodeManager = new NodeManager({ @@ -138,14 +138,16 @@ describe('notificationsSend', () => { logger, }); await nodeManager.start(); - await nodeConnectionManager.start({ host: localHost }); + await nodeConnectionManager.start({ + host: localHost, + agentService: {} as AgentServerManifest, + }); await taskManager.startProcessing(); notificationsManager = await NotificationsManager.createNotificationsManager({ db, keyRing, acl, - nodeConnectionManager, nodeManager, logger, }); @@ -173,10 +175,7 @@ describe('notificationsSend', () => { return undefined; }, }, - crypto: { - key: keysUtils.generateKey(), - ops: crypto, - }, + crypto: nodesUtils.quicServerCrypto, logger, }); const handleStream = async ( @@ -236,9 +235,7 @@ describe('notificationsSend', () => { logger, }); quicClient = await QUICClient.createQUICClient({ - crypto: { - ops: crypto, - }, + crypto: nodesUtils.quicClientCrypto, config: { key: tlsConfigClient.keyPrivatePem, cert: tlsConfigClient.certChainPem, diff --git a/tests/nodes/utils.test.ts b/tests/nodes/utils.test.ts index a0fc6a95e..2fe25ddf2 100644 --- a/tests/nodes/utils.test.ts +++ b/tests/nodes/utils.test.ts @@ -161,7 +161,7 @@ describe('nodes/utils', () => { const lastUpdated = utils.getUnixtime(); const nodeId = testNodesUtils.generateRandomNodeId(); const nodeIdKey = nodesUtils.bucketDbKey(nodeId); - const lastUpdatedKey = nodesUtils.lastUpdatedKey(lastUpdated); + const lastUpdatedKey = nodesUtils.connectedKey(lastUpdated); data.push({ bucketIndex, bucketKey, diff --git a/tests/nodes/utils.ts b/tests/nodes/utils.ts index 40fcb9646..cb88f3ee2 100644 --- a/tests/nodes/utils.ts +++ b/tests/nodes/utils.ts @@ -1,10 +1,22 @@ -import type { NodeId } from '@/nodes/types'; +import type { + NodeAddressScope, + NodeContactAddressData, + NodeId, +} from '@/nodes/types'; import type PolykeyAgent from '@/PolykeyAgent'; +import type Logger from '@matrixai/logger'; +import type { KeyRing } from '@/keys'; +import type { Host, Port } from '@/network/types'; +import type { AgentServerManifest } from '@/nodes/agent/handlers'; import { webcrypto } from 'crypto'; import { IdInternal } from '@matrixai/id'; import * as fc from 'fast-check'; import * as keysUtils from '@/keys/utils'; -import { bigInt2Bytes } from '@/utils'; +import * as utils from '@/utils'; +import * as nodesUtils from '@/nodes/utils'; +import { hostArb, hostnameArb, portArb } from '../network/utils'; +import NodeConnectionManager from '../../src/nodes/NodeConnectionManager'; +import * as testsUtils from '../utils'; /** * Generate random `NodeId` @@ -24,6 +36,14 @@ function generateRandomNodeId(readable: boolean = false): NodeId { } } +/** + * Generates a random unix timestamp between 0 and now. + */ +function generateRandomUnixtime() { + const now = utils.getUnixtime() + 1; + return Math.random() * (now - 0) + now; +} + /** * Generate a deterministic NodeId for a specific bucket given an existing NodeId * This requires solving the bucket index (`i`) and distance equation: @@ -49,7 +69,7 @@ function generateNodeIdForBucket( throw new RangeError('bucketOffset is beyond bucket size'); } // Offset position within the bucket - const distance = bigInt2Bytes( + const distance = utils.bigInt2Bytes( lowerBoundDistance + BigInt(bucketOffset), nodeId.byteLength, ); @@ -70,22 +90,31 @@ function generateNodeIdForBucket( */ async function nodesConnect(localNode: PolykeyAgent, remoteNode: PolykeyAgent) { // Add remote node's details to local node - await localNode.nodeManager.setNode(remoteNode.keyRing.getNodeId(), { - host: remoteNode.agentServiceHost, - port: remoteNode.agentServicePort, - scopes: ['global'], - }); + await localNode.nodeManager.setNode( + remoteNode.keyRing.getNodeId(), + [remoteNode.agentServiceHost, remoteNode.agentServicePort], + { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['local'], + }, + ); // Add local node's details to remote node - await remoteNode.nodeManager.setNode(localNode.keyRing.getNodeId(), { - host: localNode.agentServiceHost, - port: localNode.agentServicePort, - scopes: ['global'], - }); + await remoteNode.nodeManager.setNode( + localNode.keyRing.getNodeId(), + [localNode.agentServiceHost, localNode.agentServicePort], + { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['local'], + }, + ); } const nodeIdArb = fc .int8Array({ minLength: 32, maxLength: 32 }) - .map((value) => IdInternal.fromBuffer(Buffer.from(value))); + .map((value) => IdInternal.fromBuffer(Buffer.from(value))) + .noShrink(); const nodeIdArrayArb = (length: number) => fc.array(nodeIdArb, { maxLength: length, minLength: length }).noShrink(); @@ -103,6 +132,44 @@ const uniqueNodeIdArb = (length: number) => return false; }); +const nodeAddressArb = fc.tuple(fc.oneof(hostArb, hostnameArb), portArb); + +const nodeContactAddressArb = nodeAddressArb.map((value) => + nodesUtils.nodeContactAddress(value), +); + +const scopeArb = fc.constantFrom( + 'global', + 'local', +) as fc.Arbitrary; + +const scopesArb = fc.uniqueArray(scopeArb); + +const nodeContactAddressDataArb = fc.record({ + mode: fc.constantFrom('direct', 'signal', 'relay'), + connectedTime: fc.integer({ min: 0 }), + scopes: scopesArb, +}) as fc.Arbitrary; + +const nodeContactPairArb = fc.record({ + nodeContactAddress: nodeContactAddressArb, + nodeContactAddressData: nodeContactAddressDataArb, +}); + +const nodeContactArb = fc + .dictionary(nodeContactAddressArb, nodeContactAddressDataArb, { + minKeys: 1, + maxKeys: 5, + }) + .noShrink(); + +const nodeIdContactPairArb = fc + .record({ + nodeId: nodeIdArb, + nodeContact: nodeContactArb, + }) + .noShrink(); + /** * Signs using the 256-bit HMAC key * Web Crypto has to use the `CryptoKey` type. @@ -172,14 +239,100 @@ function createReasonConverters() { }; } +type NCMState = { + nodeId: NodeId; + nodeConnectionManager: NodeConnectionManager; + port: Port; +}; + +async function nodeConnectionManagerFactory({ + keyRing, + createOptions: { + connectionFindConcurrencyLimit, + connectionFindLocalTimeoutTime, + connectionIdleTimeoutTimeMin, + connectionIdleTimeoutTimeScale, + connectionConnectTimeoutTime, + connectionKeepAliveTimeoutTime, + connectionKeepAliveIntervalTime, + connectionHolePunchIntervalTime, + rpcParserBufferSize, + rpcCallTimeoutTime, + } = {}, + startOptions: { host, port, agentService }, + logger, +}: { + keyRing: KeyRing; + createOptions?: { + connectionFindConcurrencyLimit?: number; + connectionFindLocalTimeoutTime?: number; + connectionIdleTimeoutTimeMin?: number; + connectionIdleTimeoutTimeScale?: number; + connectionConnectTimeoutTime?: number; + connectionKeepAliveTimeoutTime?: number; + connectionKeepAliveIntervalTime?: number; + connectionHolePunchIntervalTime?: number; + rpcParserBufferSize?: number; + rpcCallTimeoutTime?: number; + }; + startOptions: { + host?: Host; + port?: Port; + agentService: (nodeConnectionManager) => AgentServerManifest; + }; + logger: Logger; +}): Promise { + const nodeId = keyRing.getNodeId(); + const tlsConfig = await testsUtils.createTLSConfig(keyRing.keyPair); + const nodeConnectionManager = new NodeConnectionManager({ + keyRing: keyRing, + logger: logger, + tlsConfig: tlsConfig, + connectionFindConcurrencyLimit, + connectionFindLocalTimeoutTime, + connectionIdleTimeoutTimeMin, + connectionIdleTimeoutTimeScale, + connectionConnectTimeoutTime, + connectionKeepAliveTimeoutTime, + connectionKeepAliveIntervalTime, + connectionHolePunchIntervalTime, + rpcParserBufferSize, + rpcCallTimeoutTime, + }); + + await nodeConnectionManager.start({ + agentService: agentService(nodeConnectionManager), + host, + port, + }); + + return { + nodeId, + nodeConnectionManager, + port: nodeConnectionManager.port, + }; +} + +export type { NCMState }; + export { generateRandomNodeId, + generateRandomUnixtime, generateNodeIdForBucket, nodesConnect, nodeIdArb, nodeIdArrayArb, uniqueNodeIdArb, + nodeAddressArb, + nodeContactAddressArb, + scopeArb, + scopesArb, + nodeContactAddressDataArb, + nodeContactPairArb, + nodeContactArb, + nodeIdContactPairArb, sign, verify, createReasonConverters, + nodeConnectionManagerFactory, }; diff --git a/tests/notifications/NotificationsManager.test.ts b/tests/notifications/NotificationsManager.test.ts index 57981c73d..3a39cd9d6 100644 --- a/tests/notifications/NotificationsManager.test.ts +++ b/tests/notifications/NotificationsManager.test.ts @@ -4,6 +4,7 @@ import type { VaultActions, VaultName } from '@/vaults/types'; import type { Notification, NotificationData } from '@/notifications/types'; import type { Key } from '@/keys/types'; import type GestaltGraph from '@/gestalts/GestaltGraph'; +import type { AgentServerManifest } from '@/nodes/agent/handlers'; import fs from 'fs'; import os from 'os'; import path from 'path'; @@ -112,7 +113,6 @@ describe('NotificationsManager', () => { }); const tlsConfig = await tlsTestsUtils.createTLSConfig(keyRing.keyPair); nodeConnectionManager = new NodeConnectionManager({ - nodeGraph, keyRing, tlsConfig, logger, @@ -128,7 +128,10 @@ describe('NotificationsManager', () => { logger, }); await nodeManager.start(); - await nodeConnectionManager.start({ host: localhost as Host }); + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, + }); await taskManager.start(); // Set up node for receiving notifications receiver = await PolykeyAgent.createPolykeyAgent({ @@ -145,11 +148,15 @@ describe('NotificationsManager', () => { }, logger, }); - await nodeGraph.setNode(receiver.keyRing.getNodeId(), { - host: receiver.agentServiceHost, - port: receiver.agentServicePort, - scopes: ['global'], - }); + await nodeGraph.setNodeContactAddressData( + receiver.keyRing.getNodeId(), + [receiver.agentServiceHost, receiver.agentServicePort], + { + mode: 'direct', + connectedTime: 0, + scopes: ['global'], + }, + ); }, globalThis.defaultTimeout); afterEach(async () => { await taskManager.stopProcessing(); @@ -173,7 +180,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -200,7 +206,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -264,7 +269,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -318,7 +322,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -385,7 +388,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -425,7 +427,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -461,7 +462,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -521,7 +521,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -580,7 +579,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -638,7 +636,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -699,7 +696,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, messageCap: 2, @@ -759,7 +755,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -793,7 +788,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -828,7 +822,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, @@ -886,7 +879,6 @@ describe('NotificationsManager', () => { await NotificationsManager.createNotificationsManager({ acl, db, - nodeConnectionManager, nodeManager, keyRing, logger, diff --git a/tests/tasks/utils.test.ts b/tests/tasks/utils.test.ts index 179cf91f5..afac0a779 100644 --- a/tests/tasks/utils.test.ts +++ b/tests/tasks/utils.test.ts @@ -94,5 +94,4 @@ describe('tasks/utils', () => { tasksUtils.decodeTaskId('vvvvvvvvvvvvvvvvvvvvvvvvvvs')?.equals(taskId3), ).toBe(true); }); - test; }); diff --git a/tests/utils/tls.ts b/tests/utils/tls.ts index 0561f282d..e43dd2456 100644 --- a/tests/utils/tls.ts +++ b/tests/utils/tls.ts @@ -7,9 +7,7 @@ import type { PrivateKeyPEM, } from '@/keys/types'; import type { TLSConfig } from '@/network/types'; -import type { ClientCryptoOps, ServerCryptoOps } from '@matrixai/quic'; import * as keysUtils from '@/keys/utils'; -import * as testNodesUtils from '../nodes/utils'; async function createTLSConfig( keyPair: KeyPair, @@ -65,16 +63,4 @@ async function createTLSConfigWithChain( }; } -function createCrypto(): ServerCryptoOps & ClientCryptoOps { - return { - randomBytes: async (data: ArrayBuffer) => { - const randomBytes = keysUtils.getRandomBytes(data.byteLength); - const dataBuf = Buffer.from(data); - dataBuf.write(randomBytes.toString('binary'), 'binary'); - }, - sign: testNodesUtils.sign, - verify: testNodesUtils.verify, - }; -} - -export { createTLSConfig, createTLSConfigWithChain, createCrypto }; +export { createTLSConfig, createTLSConfigWithChain }; diff --git a/tests/utils/utils.ts b/tests/utils/utils.ts index 5181f8a63..8a92f2b02 100644 --- a/tests/utils/utils.ts +++ b/tests/utils/utils.ts @@ -91,7 +91,7 @@ function promFromEvent< target: T, resolveEvent: new () => EResolve, rejectEvent?: new () => EReject, -) { +): Promise { const handleResolveEvent = (evt: EResolve) => prom.resolveP(evt); const handleRejectEvent = (evt: EReject) => prom.rejectP(evt); const prom = promise(); @@ -112,7 +112,38 @@ function promFromEvent< target.removeEventListener(rejectEvent.name, handleRejectEvent); } }); - return prom; + return prom.p; +} + +function promFromEvents< + EResolve extends Event = Event, + T extends EventTarget = EventTarget, +>( + target: T, + resolveEvent: new () => EResolve, + limit: number, +): Promise> { + const events = new Array(); + const { p, resolveP } = promise>(); + const handleResolveEvent = (evt: EResolve) => { + events.push(evt); + limit--; + if (limit <= 0) { + resolveP(events); + } + }; + target.addEventListener(resolveEvent.name, handleResolveEvent); + // Prevent unhandled rejection errors + void p + .then( + () => {}, + () => {}, + ) + .finally(() => { + // Clean up + target.removeEventListener(resolveEvent.name, handleResolveEvent); + }); + return p; } export { @@ -122,4 +153,5 @@ export { describeIf, trackTimers, promFromEvent, + promFromEvents, }; diff --git a/tests/vaults/VaultManager.test.ts b/tests/vaults/VaultManager.test.ts index 7291c7e44..ff9805a15 100644 --- a/tests/vaults/VaultManager.test.ts +++ b/tests/vaults/VaultManager.test.ts @@ -7,6 +7,8 @@ import type { } from '@/vaults/types'; import type NotificationsManager from '@/notifications/NotificationsManager'; import type { Host } from '@/network/types'; +import type { Sigchain } from '@/sigchain'; +import type { AgentServerManifest } from '@/nodes/agent/handlers'; import fs from 'fs'; import os from 'os'; import path from 'path'; @@ -19,6 +21,7 @@ import { RWLockWriter } from '@matrixai/async-locks'; import TaskManager from '@/tasks/TaskManager'; import ACL from '@/acl/ACL'; import GestaltGraph from '@/gestalts/GestaltGraph'; +import NodeManager from '@/nodes/NodeManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import KeyRing from '@/keys/KeyRing'; import PolykeyAgent from '@/PolykeyAgent'; @@ -61,6 +64,11 @@ describe('VaultManager', () => { const dummyKeyRing = { getNodeId: () => nodeId, } as KeyRing; + const dummyGestaltGraph = {} as GestaltGraph; + const dummySigchain = {} as Sigchain; + const dummyACL = {} as ACL; + const dummyNodeManager = {} as NodeManager; + const dummyNotificationsManager = {} as NotificationsManager; beforeEach(async () => { dataDir = await fs.promises.mkdtemp( @@ -86,10 +94,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -116,10 +124,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -136,10 +144,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -178,10 +186,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -211,10 +219,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -239,10 +247,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -271,10 +279,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -316,10 +324,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -342,10 +350,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -367,10 +375,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -392,10 +400,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -428,10 +436,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -472,6 +480,7 @@ describe('VaultManager', () => { let keyRing: KeyRing; let nodeGraph: NodeGraph; let nodeConnectionManager: NodeConnectionManager; + let nodeManager: NodeManager; let remoteKeynode1: PolykeyAgent, remoteKeynode2: PolykeyAgent; let localNodeId: NodeId; let taskManager: TaskManager; @@ -514,17 +523,24 @@ describe('VaultManager', () => { remoteKeynode2Id = remoteKeynode2.keyRing.getNodeId(); // Adding details to each agent - await remoteKeynode1.nodeGraph.setNode(remoteKeynode2Id, { - host: remoteKeynode2.agentServiceHost, - port: remoteKeynode2.agentServicePort, - scopes: ['global'], - }); - await remoteKeynode2.nodeGraph.setNode(remoteKeynode1Id, { - host: remoteKeynode1.agentServiceHost, - port: remoteKeynode1.agentServicePort, - scopes: ['global'], - }); - + await remoteKeynode1.nodeGraph.setNodeContactAddressData( + remoteKeynode2Id, + [remoteKeynode2.agentServiceHost, remoteKeynode2.agentServicePort], + { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + ); + await remoteKeynode2.nodeGraph.setNodeContactAddressData( + remoteKeynode1Id, + [remoteKeynode1.agentServiceHost, remoteKeynode1.agentServicePort], + { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + ); await remoteKeynode1.gestaltGraph.setNode({ nodeId: remoteKeynode2Id, }); @@ -572,27 +588,49 @@ describe('VaultManager', () => { const tlsConfig = await tlsTestsUtils.createTLSConfig(keyRing.keyPair); nodeConnectionManager = new NodeConnectionManager({ keyRing, - nodeGraph, tlsConfig, logger, }); - await nodeConnectionManager.start({ host: localhost as Host }); - await taskManager.startProcessing(); - await nodeGraph.setNode(remoteKeynode1Id, { - host: remoteKeynode1.agentServiceHost, - port: remoteKeynode1.agentServicePort, - scopes: ['global'], + await nodeConnectionManager.start({ + host: localhost as Host, + agentService: {} as AgentServerManifest, }); - await nodeGraph.setNode(remoteKeynode2Id, { - host: remoteKeynode2.agentServiceHost, - port: remoteKeynode2.agentServicePort, - scopes: ['global'], + nodeManager = new NodeManager({ + db, + keyRing, + nodeConnectionManager, + nodeGraph, + gestaltGraph: dummyGestaltGraph, + sigchain: dummySigchain, + taskManager, + logger, }); + await nodeManager.start(); + await taskManager.startProcessing(); + await nodeGraph.setNodeContactAddressData( + remoteKeynode1Id, + [remoteKeynode1.agentServiceHost, remoteKeynode1.agentServicePort], + { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + ); + await nodeGraph.setNodeContactAddressData( + remoteKeynode2Id, + [remoteKeynode2.agentServiceHost, remoteKeynode2.agentServicePort], + { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + ); }); afterEach(async () => { await taskManager.stopProcessing(); await taskManager.stopTasks(); await remoteKeynode1.vaultManager.destroyVault(remoteVaultId); + await nodeManager.stop(); await nodeConnectionManager.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); @@ -605,10 +643,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -667,10 +705,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -706,10 +744,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -749,10 +787,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -812,10 +850,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -836,10 +874,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -878,10 +916,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -966,10 +1004,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1099,10 +1137,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1173,10 +1211,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1230,10 +1268,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1256,10 +1294,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1372,10 +1410,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing, - nodeConnectionManager, + nodeManager, acl: {} as any, gestaltGraph: {} as any, - notificationsManager: {} as NotificationsManager, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1385,11 +1423,15 @@ describe('VaultManager', () => { const nodeId1 = keyRing.getNodeId(); // Letting nodeGraph know where the remote agent is - await nodeGraph.setNode(targetNodeId, { - host: remoteKeynode1.agentServiceHost, - port: remoteKeynode1.agentServicePort, - scopes: ['global'], - }); + await nodeGraph.setNodeContactAddressData( + targetNodeId, + [remoteKeynode1.agentServiceHost, remoteKeynode1.agentServicePort], + { + mode: 'direct', + connectedTime: Date.now(), + scopes: ['global'], + }, + ); await remoteKeynode1.gestaltGraph.setNode({ nodeId: nodeId1, @@ -1474,10 +1516,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - nodeConnectionManager: {} as NodeConnectionManager, + nodeManager: dummyNodeManager, acl, gestaltGraph, - notificationsManager: {} as NotificationsManager, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1539,10 +1581,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1582,10 +1624,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1624,10 +1666,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1663,10 +1705,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1684,10 +1726,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1715,10 +1757,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1743,10 +1785,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1773,10 +1815,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1790,10 +1832,10 @@ describe('VaultManager', () => { vaultManager2 = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1813,10 +1855,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), }); @@ -1836,10 +1878,10 @@ describe('VaultManager', () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyRing: dummyKeyRing, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, + gestaltGraph: dummyGestaltGraph, + nodeManager: dummyNodeManager, + acl: dummyACL, + notificationsManager: dummyNotificationsManager, db, logger: logger.getChild(VaultManager.name), });