Comparing version 1.4.1 to 1.6.0
@@ -17,2 +17,6 @@ import type { EntryNode } from './entry-node'; | ||
}; | ||
export type Versions = { | ||
sdk: string; | ||
rpcServer: string; | ||
}; | ||
export type Nodes = { | ||
@@ -22,2 +26,3 @@ entryNodes: EntryNode[]; | ||
matchedAt: string; | ||
versions: Versions; | ||
}; | ||
@@ -24,0 +29,0 @@ export type QuotaParams = { |
@@ -59,2 +59,6 @@ "use strict"; | ||
ed.infoOngoing--; | ||
// when info request takes longer than its timeout | ||
if (ed.infoOngoing < 0) { | ||
ed.infoOngoing = 0; | ||
} | ||
} | ||
@@ -61,0 +65,0 @@ exports.removeOngoingInfo = removeOngoingInfo; |
@@ -0,2 +1,4 @@ | ||
import * as DPapi from './dp-api'; | ||
import * as JRPC from './jrpc'; | ||
import * as Request from './request'; | ||
import * as Response from './response'; | ||
@@ -32,2 +34,4 @@ export * as DPapi from './dp-api'; | ||
* @param segmentLimit - limit the number of segment a request can use, fails requests that require a larger number | ||
* @param versionListener - if you need to know what the current versions of RPCh related components are | ||
* @param debugScope - programatically set debug scope for SDK | ||
*/ | ||
@@ -43,2 +47,4 @@ export type Ops = { | ||
readonly segmentLimit?: number; | ||
readonly versionListener?: (versions: DPapi.Versions) => void; | ||
readonly debugScope?: string; | ||
}; | ||
@@ -80,3 +86,3 @@ /** | ||
*/ | ||
isReady(timeout?: number): Promise<boolean>; | ||
isReady: (timeout?: number) => Promise<boolean>; | ||
/** | ||
@@ -86,3 +92,3 @@ * Send an **RPCrequest** via RPCh. | ||
*/ | ||
send(req: JRPC.Request, ops?: RequestOps): Promise<Response.Response>; | ||
send: (req: JRPC.Request, ops?: RequestOps) => Promise<Response.Response>; | ||
private sendSegment; | ||
@@ -104,2 +110,3 @@ private resendRequest; | ||
private checkSegmentLimit; | ||
private onVersions; | ||
} |
@@ -43,2 +43,3 @@ "use strict"; | ||
const nodes_collector_1 = __importDefault(require("./nodes-collector")); | ||
const version_1 = __importDefault(require("./version")); | ||
exports.DPapi = __importStar(require("./dp-api")); | ||
@@ -99,2 +100,92 @@ exports.EntryNode = __importStar(require("./entry-node")); | ||
}; | ||
/** | ||
* Resolves true when node pairs are awailable. | ||
* If no timeout specified, global timeout is used. | ||
*/ | ||
this.isReady = async (timeout) => { | ||
const t = timeout || this.ops.timeout; | ||
return this.nodesColl.ready(t).then((_) => true); | ||
}; | ||
/** | ||
* Send an **RPCrequest** via RPCh. | ||
* See **RequestOps** for overridable options. | ||
*/ | ||
this.send = async (req, ops) => { | ||
const reqOps = this.requestOps(ops); | ||
this.populateChainIds(ops?.provider); | ||
// TODO fixme | ||
// eslint-disable-next-line no-async-promise-executor | ||
return new Promise(async (resolve, reject) => { | ||
// sanity check provider url | ||
if (!Utils.isValidURL(reqOps.provider)) { | ||
return reject('Cannot parse provider URL'); | ||
} | ||
// sanity check mev protection provider url, if it is set | ||
if (this.ops.mevProtectionProvider) { | ||
if (!Utils.isValidURL(this.ops.mevProtectionProvider)) { | ||
return reject('Cannot parse mevProtectionProvider URL'); | ||
} | ||
} | ||
// gather entry - exit node pair | ||
const resNodes = await this.nodesColl | ||
.requestNodePair(reqOps.timeout) | ||
.catch((err) => { | ||
log.error('Error finding node pair', err); | ||
return reject(`Could not find node pair in ${reqOps.timeout} ms`); | ||
}); | ||
if (!resNodes) { | ||
return reject('Unexpected code flow - should never be here'); | ||
} | ||
const provider = this.determineProvider(reqOps, req); | ||
const headers = this.determineHeaders(provider, this.ops.mevKickbackAddress); | ||
// create request | ||
const { entryNode, exitNode, counterOffset } = resNodes; | ||
const id = RequestCache.generateId(this.requestCache); | ||
const resReq = Request.create({ | ||
id, | ||
provider, | ||
req, | ||
clientId: this.clientId, | ||
entryPeerId: entryNode.id, | ||
exitPeerId: exitNode.id, | ||
exitPublicKey: ethers_1.utils.arrayify(exitNode.pubKey), | ||
counterOffset, | ||
headers, | ||
hops: this.hops, | ||
}); | ||
if (Res.isErr(resReq)) { | ||
log.error('Error creating request', resReq.error); | ||
return reject('Unable to create request object'); | ||
} | ||
// split request to segments | ||
const { request, session } = resReq.res; | ||
const segments = Request.toSegments(request, session); | ||
const failMsg = this.checkSegmentLimit(segments.length); | ||
if (failMsg) { | ||
return reject(failMsg); | ||
} | ||
// set request expiration timer | ||
const timer = setTimeout(() => { | ||
log.error('Request %s expired after %dms timeout', request.id, reqOps.timeout); | ||
this.removeRequest(request); | ||
return reject('Request timed out'); | ||
}, reqOps.timeout); | ||
// track request | ||
const entry = RequestCache.add(this.requestCache, { | ||
request, | ||
resolve, | ||
reject, | ||
timer, | ||
session, | ||
}); | ||
this.nodesColl.requestStarted(request); | ||
// send request to hoprd | ||
log.info('sending request %s', request.id); | ||
// queue segment sending for all of them | ||
segments.forEach((s) => setTimeout(() => { | ||
this.nodesColl.segmentStarted(request, s); | ||
this.sendSegment(request, s, entryNode, entry); | ||
})); | ||
}); | ||
}; | ||
this.sendSegment = (request, segment, entryNode, cacheEntry) => { | ||
@@ -117,3 +208,3 @@ const bef = Date.now(); | ||
.catch((error) => { | ||
log.error('error sending segment', Segment.prettyPrint(segment), error); | ||
log.error('error sending %s: %s[%o]', Segment.prettyPrint(segment), JSON.stringify(error), error); | ||
this.nodesColl.segmentFailed(request, segment); | ||
@@ -123,2 +214,62 @@ this.resendRequest(request, entryNode, cacheEntry); | ||
}; | ||
this.resendRequest = (origReq, entryNode, cacheEntry) => { | ||
if (this.redoRequests.has(origReq.id)) { | ||
log.verbose('ignoring already triggered resend', origReq.id); | ||
return; | ||
} | ||
// TODO track request after segments have been sent | ||
this.removeRequest(origReq); | ||
const fallback = this.nodesColl.fallbackNodePair(entryNode); | ||
if (!fallback) { | ||
log.info('no fallback for resending request available'); | ||
return cacheEntry.reject('No fallback node pair to retry sending request'); | ||
} | ||
this.redoRequests.add(origReq.id); | ||
if (fallback.entryNode.id === origReq.entryPeerId) { | ||
log.info('fallback entry node same as original entry node - still trying'); | ||
} | ||
if (fallback.exitNode.id === origReq.exitPeerId) { | ||
log.info('fallback exit node same as original exit node - still trying'); | ||
} | ||
// generate new request | ||
const id = RequestCache.generateId(this.requestCache); | ||
const resReq = Request.create({ | ||
id, | ||
originalId: origReq.id, | ||
provider: origReq.provider, | ||
req: origReq.req, | ||
clientId: this.clientId, | ||
entryPeerId: fallback.entryNode.id, | ||
exitPeerId: fallback.exitNode.id, | ||
exitPublicKey: ethers_1.utils.arrayify(fallback.exitNode.pubKey), | ||
counterOffset: fallback.counterOffset, | ||
headers: origReq.headers, | ||
hops: origReq.hops, | ||
}); | ||
if (Res.isErr(resReq)) { | ||
log.error('error creating fallback request', resReq.error); | ||
return cacheEntry.reject('Unable to create fallback request object'); | ||
} | ||
// split request to segments | ||
const { request, session } = resReq.res; | ||
const segments = Request.toSegments(request, session); | ||
const failMsg = this.checkSegmentLimit(segments.length); | ||
if (failMsg) { | ||
this.removeRequest(request); | ||
return cacheEntry.reject(failMsg); | ||
} | ||
// track request | ||
const newCacheEntry = RequestCache.add(this.requestCache, { | ||
request, | ||
resolve: cacheEntry.resolve, | ||
reject: cacheEntry.reject, | ||
timer: cacheEntry.timer, | ||
session, | ||
}); | ||
this.nodesColl.requestStarted(request); | ||
// send request to hoprd | ||
log.info('resending request %s', request.id, 'for original', origReq.id); | ||
// send segments sequentially | ||
segments.forEach((s) => setTimeout(() => this.resendSegment(s, request, entryNode, newCacheEntry))); | ||
}; | ||
this.resendSegment = (segment, request, entryNode, cacheEntry) => { | ||
@@ -140,3 +291,3 @@ const bef = Date.now(); | ||
.catch((error) => { | ||
log.error('error resending segment', Segment.prettyPrint(segment), error); | ||
log.error('error resending %s: %s[%o]', Segment.prettyPrint(segment), JSON.stringify(error), error); | ||
this.nodesColl.segmentFailed(request, segment); | ||
@@ -203,3 +354,3 @@ this.removeRequest(request); | ||
this.responseError = (error, reqEntry) => { | ||
log.error('Error extracting message', error); | ||
log.error('error extracting message', error); | ||
this.nodesColl.requestFailed(reqEntry.request); | ||
@@ -253,2 +404,4 @@ return reqEntry.reject('Unable to process response'); | ||
segmentLimit: ops.segmentLimit ?? defaultOps.segmentLimit, | ||
versionListener: ops.versionListener, | ||
debugScope: ops.debugScope, | ||
}; | ||
@@ -296,183 +449,62 @@ }; | ||
}; | ||
this.ops = this.sdkOps(ops); | ||
this.requestCache = RequestCache.init(); | ||
this.segmentCache = SegmentCache.init(); | ||
this.hops = this.determineHops(!!this.ops.forceZeroHop); | ||
this.nodesColl = new nodes_collector_1.default(this.ops.discoveryPlatformEndpoint, this.clientId, ApplicationTag, this.onMessages, this.hops); | ||
this.fetchChainId(this.ops.provider); | ||
} | ||
/** | ||
* Resolves true when node pairs are awailable. | ||
* If no timeout specified, global timeout is used. | ||
*/ | ||
async isReady(timeout) { | ||
const t = timeout || this.ops.timeout; | ||
return this.nodesColl.ready(t).then((_) => true); | ||
} | ||
/** | ||
* Send an **RPCrequest** via RPCh. | ||
* See **RequestOps** for overridable options. | ||
*/ | ||
async send(req, ops) { | ||
const reqOps = this.requestOps(ops); | ||
this.populateChainIds(ops?.provider); | ||
// TODO fixme | ||
// eslint-disable-next-line no-async-promise-executor | ||
return new Promise(async (resolve, reject) => { | ||
// sanity check provider url | ||
if (!Utils.isValidURL(reqOps.provider)) { | ||
return reject('Cannot parse provider URL'); | ||
this.determineHops = (forceZeroHop) => { | ||
// defaults to multihop (novalue) | ||
if (forceZeroHop) { | ||
return 0; | ||
} | ||
// sanity check mev protection provider url, if it is set | ||
if (this.ops.mevProtectionProvider) { | ||
if (!Utils.isValidURL(this.ops.mevProtectionProvider)) { | ||
return reject('Cannot parse mevProtectionProvider URL'); | ||
} | ||
}; | ||
this.populateChainIds = (provider) => { | ||
if (!provider) { | ||
return; | ||
} | ||
// gather entry - exit node pair | ||
const resNodes = await this.nodesColl | ||
.requestNodePair(reqOps.timeout) | ||
.catch((err) => { | ||
log.error('Error finding node pair', err); | ||
return reject(`Could not find node pair in ${reqOps.timeout} ms`); | ||
}); | ||
if (!resNodes) { | ||
return reject('Unexpected code flow - should never be here'); | ||
if (this.chainIds.has(provider)) { | ||
return; | ||
} | ||
const provider = this.determineProvider(reqOps, req); | ||
const headers = this.determineHeaders(provider, this.ops.mevKickbackAddress); | ||
// create request | ||
const { entryNode, exitNode, counterOffset } = resNodes; | ||
const id = RequestCache.generateId(this.requestCache); | ||
const resReq = Request.create({ | ||
id, | ||
provider, | ||
req, | ||
clientId: this.clientId, | ||
entryPeerId: entryNode.id, | ||
exitPeerId: exitNode.id, | ||
exitPublicKey: ethers_1.utils.arrayify(exitNode.pubKey), | ||
counterOffset, | ||
headers, | ||
hops: this.hops, | ||
}); | ||
if (Res.isErr(resReq)) { | ||
log.error('Error creating request', resReq.error); | ||
return reject('Unable to create request object'); | ||
this.fetchChainId(provider); | ||
}; | ||
this.checkSegmentLimit = (segLength) => { | ||
const limit = this.ops.segmentLimit; | ||
if (limit > 0 && segLength > limit) { | ||
log.error('Request exceeds maximum amount of segments[%i] with %i segments', limit, segLength); | ||
const maxSize = Segment.MaxSegmentBody * limit; | ||
return `Request exceeds maximum size of ${maxSize}b`; | ||
} | ||
// split request to segments | ||
const { request, session } = resReq.res; | ||
const segments = Request.toSegments(request, session); | ||
const failMsg = this.checkSegmentLimit(segments.length); | ||
if (failMsg) { | ||
return reject(failMsg); | ||
}; | ||
this.onVersions = (versions) => { | ||
const vSdk = versions.sdk; | ||
const cmp = Utils.versionCompare(vSdk, version_1.default); | ||
if (Res.isOk(cmp)) { | ||
switch (cmp.res) { | ||
case Utils.VrsnCmp.Identical: | ||
log.verbose('RPCh SDK[v%s] is up to date', version_1.default); | ||
break; | ||
case Utils.VrsnCmp.PatchMismatch: | ||
log.info('RPCh SDK[v%s] can be updated to v%s.', version_1.default, vSdk); | ||
break; | ||
case Utils.VrsnCmp.MinorMismatch: | ||
log.warn('RPCh SDK[v%s] needs to update to v%s.', version_1.default, vSdk); | ||
break; | ||
case Utils.VrsnCmp.MajorMismatch: | ||
log.error('RPCh SDK[v%s] must be updated to v%s!', version_1.default, vSdk); | ||
break; | ||
} | ||
} | ||
// set request expiration timer | ||
const timer = setTimeout(() => { | ||
log.error('request expired', request.id); | ||
this.removeRequest(request); | ||
return reject('request timed out'); | ||
}, reqOps.timeout); | ||
// track request | ||
const entry = RequestCache.add(this.requestCache, { | ||
request, | ||
resolve, | ||
reject, | ||
timer, | ||
session, | ||
else { | ||
log.error('error comparing versions: %s', cmp.error); | ||
} | ||
// dont fetch exceptions on external code | ||
setTimeout(() => { | ||
this.ops.versionListener && this.ops.versionListener(versions); | ||
}); | ||
this.nodesColl.requestStarted(request); | ||
// send request to hoprd | ||
log.info('sending request %s', request.id); | ||
// queue segment sending for all of them | ||
segments.forEach((s) => setTimeout(() => { | ||
this.nodesColl.segmentStarted(request, s); | ||
this.sendSegment(request, s, entryNode, entry); | ||
})); | ||
}); | ||
}; | ||
this.ops = this.sdkOps(ops); | ||
this.ops.debugScope && Utils.setDebugScope(this.ops.debugScope); | ||
this.requestCache = RequestCache.init(); | ||
this.segmentCache = SegmentCache.init(); | ||
this.hops = this.determineHops(!!this.ops.forceZeroHop); | ||
this.nodesColl = new nodes_collector_1.default(this.ops.discoveryPlatformEndpoint, this.clientId, ApplicationTag, this.onMessages, this.onVersions, this.hops); | ||
this.fetchChainId(this.ops.provider); | ||
log.info('RPCh SDK[v%s] started', version_1.default); | ||
} | ||
resendRequest(origReq, entryNode, cacheEntry) { | ||
if (this.redoRequests.has(origReq.id)) { | ||
log.verbose('ignoring already triggered resend', origReq.id); | ||
return; | ||
} | ||
// TODO track request after segments have been sent | ||
this.removeRequest(origReq); | ||
const fallback = this.nodesColl.fallbackNodePair(entryNode); | ||
if (!fallback) { | ||
log.info('no fallback for resending request available'); | ||
return cacheEntry.reject('no fallback node pair to retry sending request'); | ||
} | ||
this.redoRequests.add(origReq.id); | ||
if (fallback.entryNode.id === origReq.entryPeerId) { | ||
log.info('fallback entry node same as original entry node - still trying'); | ||
} | ||
if (fallback.exitNode.id === origReq.exitPeerId) { | ||
log.info('fallback exit node same as original exit node - still trying'); | ||
} | ||
// generate new request | ||
const id = RequestCache.generateId(this.requestCache); | ||
const resReq = Request.create({ | ||
id, | ||
originalId: origReq.id, | ||
provider: origReq.provider, | ||
req: origReq.req, | ||
clientId: this.clientId, | ||
entryPeerId: fallback.entryNode.id, | ||
exitPeerId: fallback.exitNode.id, | ||
exitPublicKey: ethers_1.utils.arrayify(fallback.exitNode.pubKey), | ||
counterOffset: fallback.counterOffset, | ||
headers: origReq.headers, | ||
hops: origReq.hops, | ||
}); | ||
if (Res.isErr(resReq)) { | ||
log.info('Error creating fallback request', resReq.error); | ||
return cacheEntry.reject('unable to create fallback request object'); | ||
} | ||
// split request to segments | ||
const { request, session } = resReq.res; | ||
const segments = Request.toSegments(request, session); | ||
const failMsg = this.checkSegmentLimit(segments.length); | ||
if (failMsg) { | ||
this.removeRequest(request); | ||
return cacheEntry.reject(failMsg); | ||
} | ||
// track request | ||
const newCacheEntry = RequestCache.add(this.requestCache, { | ||
request, | ||
resolve: cacheEntry.resolve, | ||
reject: cacheEntry.reject, | ||
timer: cacheEntry.timer, | ||
session, | ||
}); | ||
this.nodesColl.requestStarted(request); | ||
// send request to hoprd | ||
log.info('resending request %i', request.id, 'for original', origReq.id); | ||
// send segments sequentially | ||
segments.forEach((s) => setTimeout(() => this.resendSegment(s, request, entryNode, newCacheEntry))); | ||
} | ||
determineHops(forceZeroHop) { | ||
// defaults to multihop (novalue) | ||
if (forceZeroHop) { | ||
return 0; | ||
} | ||
} | ||
populateChainIds(provider) { | ||
if (!provider) { | ||
return; | ||
} | ||
if (this.chainIds.has(provider)) { | ||
return; | ||
} | ||
this.fetchChainId(provider); | ||
} | ||
checkSegmentLimit(segLength) { | ||
const limit = this.ops.segmentLimit; | ||
if (limit > 0 && segLength > limit) { | ||
log.error('Request exceeds maximum amount of segments[%i] with %i segments', limit, segLength); | ||
const maxSize = Segment.MaxSegmentBody * limit; | ||
return `Request exceeds maximum size of ${maxSize}b`; | ||
} | ||
} | ||
} | ||
exports.default = SDK; |
@@ -44,2 +44,6 @@ import WebSocket = require('isomorphic-ws'); | ||
}; | ||
export type NodeError = { | ||
status: string; | ||
error: string; | ||
}; | ||
export type Channels = { | ||
@@ -67,3 +71,4 @@ all: Channel[]; | ||
}>; | ||
export declare function getPeers(conn: ConnInfo): Promise<Peers>; | ||
export declare function getPeers(conn: ConnInfo): Promise<Peers | NodeError>; | ||
export declare function getChannels(conn: ConnInfo): Promise<Channels>; | ||
export declare function isError(payload: NonNullable<unknown> | NodeError): payload is NodeError; |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.getChannels = exports.getPeers = exports.accountAddresses = exports.deleteMessages = exports.retrieveMessages = exports.version = exports.sendMessage = exports.connectWS = void 0; | ||
exports.isError = exports.getChannels = exports.getPeers = exports.accountAddresses = exports.deleteMessages = exports.retrieveMessages = exports.version = exports.sendMessage = exports.connectWS = void 0; | ||
const WebSocket = require("isomorphic-ws"); | ||
@@ -110,1 +110,5 @@ function connectWS(conn) { | ||
exports.getChannels = getChannels; | ||
function isError(payload) { | ||
return 'error' in payload; | ||
} | ||
exports.isError = isError; |
@@ -38,2 +38,3 @@ "use strict"; | ||
const MessagesFetchInterval = 333; // ms | ||
const InfoResponseTimeout = 10e3; // 10s | ||
function create(entryNode, exitNodesIt, applicationTag, messageListener, hops) { | ||
@@ -62,2 +63,3 @@ const entryData = EntryData.create(); | ||
clearInterval(np.fetchInterval); | ||
np.fetchInterval = undefined; | ||
} | ||
@@ -139,2 +141,3 @@ exports.destruct = destruct; | ||
function requestInfo(np, exitNode) { | ||
const message = `info-${np.entryNode.id}-${np.hops ?? '_'}`; | ||
NodeAPI.sendMessage({ | ||
@@ -146,3 +149,3 @@ ...np.entryNode, | ||
tag: np.applicationTag, | ||
message: `info-${np.entryNode.id}-${np.hops}`, | ||
message, | ||
}); | ||
@@ -153,2 +156,13 @@ EntryData.addOngoingInfo(np.entryData); | ||
} | ||
// stop checking for info resp at after this | ||
// will still be able to receive info resp if messages went over this route | ||
setTimeout(() => { | ||
EntryData.removeOngoingInfo(np.entryData); | ||
checkStopInterval(np); | ||
const exitData = np.exitDatas.get(exitNode.id); | ||
if (!exitData) { | ||
return np.log.error('requestInfo ExitData mismatch for %s', exitNode.id); | ||
} | ||
exitData.infoFail = true; | ||
}, InfoResponseTimeout); | ||
} | ||
@@ -166,2 +180,5 @@ function prettyPrint(np) { | ||
const exStrs = Array.from(np.exitDatas).map(([id, d]) => { | ||
const v = d.version; | ||
const ctrOff = d.counterOffset?.toFixed(2) || 0; | ||
const info = d.infoFail ? 'fail' : `${d.infoLatSec}s`; | ||
const o = d.requestsOngoing.length; | ||
@@ -177,3 +194,3 @@ const tot = d.requestsHistory.length; | ||
const nId = (0, utils_1.shortPeerId)(id); | ||
return `${nId}[${str}]`; | ||
return `${nId}[v${v},o:${ctrOff}ms,i:${info},${str}]`; | ||
}); | ||
@@ -225,3 +242,3 @@ const segStr = prettyOngoingNumbers(np, segOngoing, segLats.length, segTotal, (0, utils_1.average)(segLats)); | ||
.catch((err) => { | ||
np.log.error('Error fetching node messages', JSON.stringify(err)); | ||
np.log.error('Error fetching node messages: %s[%o]', JSON.stringify(err), err); | ||
np.entryData.fetchMessagesErrors++; | ||
@@ -250,2 +267,3 @@ }); | ||
exitData.infoLatSec = Math.abs(receivedAt - Math.floor(counter / 1000)); | ||
exitData.infoFail = false; | ||
EntryData.removeOngoingInfo(np.entryData); | ||
@@ -252,0 +270,0 @@ }); |
@@ -20,2 +20,2 @@ import * as NodeMatch from './node-match'; | ||
export declare function fallbackRoutePair(nodePairs: Map<string, NodePair.NodePair>, exclude: EntryNode): Res.Result<NodeSelection>; | ||
export declare function prettyPrint(res: Res.Result<NodeSelection>): string; | ||
export declare function prettyPrint(sel: NodeSelection): string; |
@@ -31,3 +31,3 @@ "use strict"; | ||
const utils_1 = require("./utils"); | ||
const ExitNodesCompatVersions = ['0.12']; | ||
const ExitNodesCompatVersions = ['0.13']; | ||
/** | ||
@@ -53,10 +53,6 @@ * Try to distribute evenly with best route pairs preferred. | ||
exports.fallbackRoutePair = fallbackRoutePair; | ||
function prettyPrint(res) { | ||
if (Res.isOk(res)) { | ||
const sel = res.res; | ||
const eId = (0, utils_1.shortPeerId)(sel.match.entryNode.id); | ||
const xId = (0, utils_1.shortPeerId)(sel.match.exitNode.id); | ||
return `${eId} > ${xId} (via ${sel.via})`; | ||
} | ||
return `${res.error}`; | ||
function prettyPrint(sel) { | ||
const eId = (0, utils_1.shortPeerId)(sel.match.entryNode.id); | ||
const xId = (0, utils_1.shortPeerId)(sel.match.exitNode.id); | ||
return `${eId} > ${xId} (via ${sel.via})`; | ||
} | ||
@@ -63,0 +59,0 @@ exports.prettyPrint = prettyPrint; |
@@ -0,1 +1,2 @@ | ||
import * as DPapi from './dp-api'; | ||
import * as Request from './request'; | ||
@@ -6,2 +7,3 @@ import * as Segment from './segment'; | ||
import type { NodeMatch } from './node-match'; | ||
export type VersionListener = (versions: DPapi.Versions) => void; | ||
export default class NodesCollector { | ||
@@ -12,2 +14,3 @@ private readonly discoveryPlatformEndpoint; | ||
private readonly messageListener; | ||
private readonly versionListener; | ||
private readonly hops?; | ||
@@ -18,3 +21,3 @@ private readonly nodePairs; | ||
private ongoingFetchPairs; | ||
constructor(discoveryPlatformEndpoint: string, clientId: string, applicationTag: number, messageListener: MessageListener, hops?: number | undefined); | ||
constructor(discoveryPlatformEndpoint: string, clientId: string, applicationTag: number, messageListener: MessageListener, versionListener: VersionListener, hops?: number | undefined); | ||
destruct: () => void; | ||
@@ -21,0 +24,0 @@ /** |
@@ -37,3 +37,3 @@ "use strict"; | ||
class NodesCollector { | ||
constructor(discoveryPlatformEndpoint, clientId, applicationTag, messageListener, hops) { | ||
constructor(discoveryPlatformEndpoint, clientId, applicationTag, messageListener, versionListener, hops) { | ||
this.discoveryPlatformEndpoint = discoveryPlatformEndpoint; | ||
@@ -43,2 +43,3 @@ this.clientId = clientId; | ||
this.messageListener = messageListener; | ||
this.versionListener = versionListener; | ||
this.hops = hops; | ||
@@ -66,7 +67,7 @@ this.nodePairs = new Map(); | ||
if (Res.isOk(res)) { | ||
log.verbose('ready with route pair', NodeSel.prettyPrint(res)); | ||
log.verbose('ready with route pair: %s', NodeSel.prettyPrint(res.res)); | ||
return resolve(true); | ||
} | ||
if (elapsed > timeout) { | ||
log.error('Timeout waiting for ready', elapsed, res.error); | ||
log.error('timeout after %d waiting for ready: %s', elapsed, res.error); | ||
return reject(`timeout after ${elapsed} ms`); | ||
@@ -90,7 +91,7 @@ } | ||
if (Res.isOk(res)) { | ||
log.verbose('found route pair', NodeSel.prettyPrint(res)); | ||
log.verbose('found route pair: %s', NodeSel.prettyPrint(res.res)); | ||
return resolve(res.res.match); | ||
} | ||
if (elapsed > timeout) { | ||
log.error('Timeout waiting for node pair', elapsed, res.error); | ||
log.error('timeout after %d waiting for node pair: %s', elapsed, res.error); | ||
return reject(`timeout after ${elapsed} ms`); | ||
@@ -109,3 +110,3 @@ } | ||
if (Res.isOk(res)) { | ||
log.verbose('found fallback route pair', NodeSel.prettyPrint(res)); | ||
log.verbose('found fallback route pair: %s', NodeSel.prettyPrint(res.res)); | ||
return res.res.match; | ||
@@ -117,7 +118,7 @@ } | ||
if (!np) { | ||
log.error('requestStarted', Request.prettyPrint(req), 'on non existing node pair'); | ||
log.error('started %s on non existing node pair', Request.prettyPrint(req)); | ||
return; | ||
} | ||
NodePair.requestStarted(np, req); | ||
log.verbose('requestStarted', Request.prettyPrint(req), NodePair.prettyPrint(np)); | ||
log.verbose('started %s on %s', Request.prettyPrint(req), NodePair.prettyPrint(np)); | ||
}; | ||
@@ -127,7 +128,7 @@ this.requestSucceeded = (req, responseTime) => { | ||
if (!np) { | ||
log.error('requestSucceeded', Request.prettyPrint(req), 'on non existing node pair'); | ||
log.error('successful %s on non existing node pair', Request.prettyPrint(req)); | ||
return; | ||
} | ||
NodePair.requestSucceeded(np, req, responseTime); | ||
log.verbose('requestSucceeded', Request.prettyPrint(req), NodePair.prettyPrint(np)); | ||
log.info('successful %s on %s', Request.prettyPrint(req), NodePair.prettyPrint(np)); | ||
}; | ||
@@ -137,7 +138,7 @@ this.requestFailed = (req) => { | ||
if (!np) { | ||
log.error('requestFailed', Request.prettyPrint(req), 'on non exiting node pair'); | ||
log.error('failed %s on non exiting node pair', Request.prettyPrint(req)); | ||
return; | ||
} | ||
NodePair.requestFailed(np, req); | ||
log.verbose('requestFailed', Request.prettyPrint(req), NodePair.prettyPrint(np)); | ||
log.warn('failed %s on %s', Request.prettyPrint(req), NodePair.prettyPrint(np)); | ||
}; | ||
@@ -147,7 +148,7 @@ this.segmentStarted = (req, seg) => { | ||
if (!np) { | ||
log.error('segmentStarted', Segment.prettyPrint(seg), 'on non existing node pair'); | ||
log.error('started %s on non existing node pair', Segment.prettyPrint(seg)); | ||
return; | ||
} | ||
NodePair.segmentStarted(np, seg); | ||
log.verbose('segmentStarted', Segment.prettyPrint(seg), NodePair.prettyPrint(np)); | ||
log.verbose('started %s on %s', Segment.prettyPrint(seg), NodePair.prettyPrint(np)); | ||
}; | ||
@@ -157,7 +158,7 @@ this.segmentSucceeded = (req, seg, responseTime) => { | ||
if (!np) { | ||
log.error('segmentSucceeded', Segment.prettyPrint(seg), 'on non existing node pair'); | ||
log.error('successful %s on non existing node pair', Segment.prettyPrint(seg), ''); | ||
return; | ||
} | ||
NodePair.segmentSucceeded(np, seg, responseTime); | ||
log.verbose('segmentSucceeded', Segment.prettyPrint(seg), NodePair.prettyPrint(np)); | ||
log.info('successful %s on %s', Segment.prettyPrint(seg), NodePair.prettyPrint(np)); | ||
}; | ||
@@ -167,11 +168,11 @@ this.segmentFailed = (req, seg) => { | ||
if (!np) { | ||
log.error('segmentFailed', Segment.prettyPrint(seg), 'on non existing node pair'); | ||
log.error('failed %s on non existing node pair', Segment.prettyPrint(seg)); | ||
return; | ||
} | ||
NodePair.segmentFailed(np, seg); | ||
log.verbose('segmentFailed', Segment.prettyPrint(seg), NodePair.prettyPrint(np)); | ||
log.warn('failed %s on %s', Segment.prettyPrint(seg), NodePair.prettyPrint(np)); | ||
}; | ||
this.fetchNodePairs = () => { | ||
if (this.ongoingFetchPairs) { | ||
log.verbose('fetchNodePairs ongoing'); | ||
log.verbose('discovering node pairs ongoing'); | ||
return; | ||
@@ -181,4 +182,3 @@ } | ||
if (diff < NodePairFetchTimeout) { | ||
log.verbose('fetchNodePairs too early - need to wait', NodePairFetchTimeout - diff, 'ms'); | ||
return; | ||
log.verbose('discovering node pairs too early - need to wait %dms', NodePairFetchTimeout - diff); | ||
} | ||
@@ -194,6 +194,6 @@ this.ongoingFetchPairs = true; | ||
if (err.message === DPapi.NoMoreNodes) { | ||
log.info('No node pairs available'); | ||
log.warn('no node pairs available'); | ||
} | ||
else { | ||
log.error('Error fetching nodes', err); | ||
log.error('error fetching node pairs: %s[%o]', JSON.stringify(err), err); | ||
} | ||
@@ -217,3 +217,4 @@ }) | ||
this.nodePairs.forEach((np) => NodePair.discover(np)); | ||
log.verbose('Discovered %d node-pairs with %d exits', this.nodePairs.size, lookupExitNodes.size); | ||
log.info('discovered %d node-pairs with %d exits', this.nodePairs.size, lookupExitNodes.size); | ||
this.versionListener(nodes.versions); | ||
}; | ||
@@ -220,0 +221,0 @@ this.fetchNodePairs(); |
@@ -118,3 +118,3 @@ "use strict"; | ||
const xId = (0, utils_1.shortPeerId)(req.exitPeerId); | ||
const prov = req.provider.substring(0, 14); | ||
const prov = req.provider; | ||
const attrs = [req.id, `${eId}>${xId}`]; | ||
@@ -125,4 +125,4 @@ if (id) { | ||
attrs.push(prov); | ||
return `req[${attrs.join(',')}]`; | ||
return `request[${attrs.join(',')}]`; | ||
} | ||
exports.prettyPrint = prettyPrint; |
@@ -14,1 +14,2 @@ export type ResultOk<V> = { | ||
export declare function isErr<V, X>(res: Result<V, X>): res is ResultErr<X>; | ||
export declare function assertOk<V, X>(res: Result<V, X>): asserts res is ResultOk<V>; |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.isErr = exports.isOk = exports.err = exports.ok = void 0; | ||
exports.assertOk = exports.isErr = exports.isOk = exports.err = exports.ok = void 0; | ||
function ok(res) { | ||
@@ -20,1 +20,7 @@ return { success: true, res }; | ||
exports.isErr = isErr; | ||
function assertOk(res) { | ||
if (isErr(res)) { | ||
throw new Error('Not an OK result'); | ||
} | ||
} | ||
exports.assertOk = assertOk; |
import debug from 'debug'; | ||
import * as Res from './result'; | ||
export declare enum VrsnCmp { | ||
Identical = 0, | ||
PatchMismatch = 1, | ||
MinorMismatch = 2, | ||
MajorMismatch = 3 | ||
} | ||
export declare function shortPeerId(peerId: string): string; | ||
@@ -10,2 +17,5 @@ export declare function randomEl<T>(arr: T[]): T; | ||
verbose: debug.Debugger; | ||
warn: debug.Debugger; | ||
}; | ||
export declare function versionCompare(ref: string, version: string): Res.Result<VrsnCmp>; | ||
export declare function setDebugScope(scope: string): void; |
"use strict"; | ||
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { | ||
if (k2 === undefined) k2 = k; | ||
var desc = Object.getOwnPropertyDescriptor(m, k); | ||
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { | ||
desc = { enumerable: true, get: function() { return m[k]; } }; | ||
} | ||
Object.defineProperty(o, k2, desc); | ||
}) : (function(o, m, k, k2) { | ||
if (k2 === undefined) k2 = k; | ||
o[k2] = m[k]; | ||
})); | ||
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { | ||
Object.defineProperty(o, "default", { enumerable: true, value: v }); | ||
}) : function(o, v) { | ||
o["default"] = v; | ||
}); | ||
var __importStar = (this && this.__importStar) || function (mod) { | ||
if (mod && mod.__esModule) return mod; | ||
var result = {}; | ||
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); | ||
__setModuleDefault(result, mod); | ||
return result; | ||
}; | ||
var __importDefault = (this && this.__importDefault) || function (mod) { | ||
@@ -6,4 +29,12 @@ return (mod && mod.__esModule) ? mod : { "default": mod }; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.logger = exports.isValidURL = exports.average = exports.randomEl = exports.shortPeerId = void 0; | ||
exports.setDebugScope = exports.versionCompare = exports.logger = exports.isValidURL = exports.average = exports.randomEl = exports.shortPeerId = exports.VrsnCmp = void 0; | ||
const debug_1 = __importDefault(require("debug")); | ||
const Res = __importStar(require("./result")); | ||
var VrsnCmp; | ||
(function (VrsnCmp) { | ||
VrsnCmp[VrsnCmp["Identical"] = 0] = "Identical"; | ||
VrsnCmp[VrsnCmp["PatchMismatch"] = 1] = "PatchMismatch"; | ||
VrsnCmp[VrsnCmp["MinorMismatch"] = 2] = "MinorMismatch"; | ||
VrsnCmp[VrsnCmp["MajorMismatch"] = 3] = "MajorMismatch"; | ||
})(VrsnCmp || (exports.VrsnCmp = VrsnCmp = {})); | ||
function shortPeerId(peerId) { | ||
@@ -41,12 +72,45 @@ return `.${peerId.substring(peerId.length - 4)}`; | ||
namespaces.unshift('rpch'); | ||
const base = (0, debug_1.default)(namespaces.join(':')); | ||
base.log = console.log.bind(console); | ||
const verbose = base.extend('verbose'); | ||
const error = base.extend('error'); | ||
const ns = namespaces.join(':'); | ||
const verbose = (0, debug_1.default)(`${ns}:verbose`); | ||
verbose.log = console.log.bind(console); | ||
const info = (0, debug_1.default)(`${ns}:info`); | ||
info.log = console.info.bind(console); | ||
const warn = (0, debug_1.default)(`${ns}:warn`); | ||
warn.log = console.warn.bind(console); | ||
const error = (0, debug_1.default)(`${ns}:error`); | ||
error.log = console.error.bind(console); | ||
return { | ||
error, | ||
info: base, | ||
info, | ||
verbose, | ||
warn, | ||
}; | ||
} | ||
exports.logger = logger; | ||
function versionCompare(ref, version) { | ||
const r = ref.split('.'); | ||
if (r.length < 3) { | ||
return Res.err('invalid ref'); | ||
} | ||
const v = version.split('.'); | ||
if (v.length < 3) { | ||
return Res.err('invalid version'); | ||
} | ||
const [rMj, rMn, rP] = r; | ||
const [vMj, vMn, vP] = v; | ||
if (parseInt(rMj, 10) !== parseInt(vMj, 10)) { | ||
return Res.ok(VrsnCmp.MajorMismatch); | ||
} | ||
if (parseInt(rMn, 10) !== parseInt(vMn, 10)) { | ||
return Res.ok(VrsnCmp.MinorMismatch); | ||
} | ||
if (parseInt(rP, 10) !== parseInt(vP, 10)) { | ||
return Res.ok(VrsnCmp.PatchMismatch); | ||
} | ||
return Res.ok(VrsnCmp.Identical); | ||
} | ||
exports.versionCompare = versionCompare; | ||
function setDebugScope(scope) { | ||
debug_1.default.enable(scope); | ||
} | ||
exports.setDebugScope = setDebugScope; |
# @rpch/sdk | ||
## 1.6.0 | ||
### Minor Changes | ||
- cda0447: inform user about version mismatches and improve logging | ||
## 1.5.3 | ||
### Patch Changes | ||
- fix info resp dangling response polling | ||
increase exit-node request purging timeout | ||
## 1.5.2 | ||
### Patch Changes | ||
- better timeout logs | ||
## 1.5.1 | ||
### Patch Changes | ||
- fix zero hop info req | ||
## 1.5.0 | ||
### Minor Changes | ||
- crypto protocol update | ||
## 1.4.0 | ||
@@ -4,0 +35,0 @@ |
{ | ||
"name": "@rpch/sdk", | ||
"version": "1.4.1", | ||
"version": "1.6.0", | ||
"license": "LGPL-3.0", | ||
@@ -24,3 +24,4 @@ "main": "./build/index.js", | ||
"scripts": { | ||
"build": "tsc", | ||
"prebuild": "node -p \"'export default \\'' + require('./package.json').version + '\\';'\" > src/version.ts", | ||
"build": "yarn prebuild && tsc", | ||
"dev": "tsc --watch", | ||
@@ -38,3 +39,3 @@ "format": "prettier --write src/ .eslintrc.js jest.config.ts package.json tsconfig.json", | ||
"dependencies": { | ||
"@rpch/compat-crypto": "^0.7.0", | ||
"@rpch/compat-crypto": "^0.8.0", | ||
"async-retry": "^1.3.3", | ||
@@ -41,0 +42,0 @@ "debug": "^4.3.4", |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
126790
49
3149
+ Added@rpch/compat-crypto@0.8.0(transitive)
- Removed@rpch/compat-crypto@0.7.0(transitive)
Updated@rpch/compat-crypto@^0.8.0