@toruslabs/torus.js
Advanced tools
Comparing version 15.0.5 to 15.1.0-0
@@ -5,2 +5,3 @@ 'use strict'; | ||
GET_OR_SET_KEY: "GetPubKeyOrKeyAssign", | ||
VERIFIER_LOOKUP: "VerifierLookupRequest", | ||
COMMITMENT_REQUEST: "CommitmentRequest", | ||
@@ -7,0 +8,0 @@ IMPORT_SHARES: "ImportShares", |
@@ -43,2 +43,16 @@ 'use strict'; | ||
}; | ||
const normalizeLookUpResult = result => { | ||
const finalResult = { | ||
keys: [] | ||
}; | ||
if (result && result.keys && result.keys.length > 0) { | ||
const finalKey = result.keys[0]; | ||
finalResult.keys = [{ | ||
pub_key_X: finalKey.pub_key_X, | ||
pub_key_Y: finalKey.pub_key_Y, | ||
address: finalKey.address | ||
}]; | ||
} | ||
return finalResult; | ||
}; | ||
const kCombinations = (s, k) => { | ||
@@ -180,4 +194,5 @@ let set = s; | ||
exports.normalizeKeysResult = normalizeKeysResult; | ||
exports.normalizeLookUpResult = normalizeLookUpResult; | ||
exports.retryCommitment = retryCommitment; | ||
exports.thresholdSame = thresholdSame; | ||
exports.waitFor = waitFor; |
@@ -49,3 +49,3 @@ 'use strict'; | ||
}); | ||
const errorResult = common.thresholdSame(lookupPubKeys.map(x2 => x2 && x2.error), minThreshold); | ||
const errorResult = common.thresholdSame(lookupResults.map(x2 => x2 && x2.error), minThreshold); | ||
const keyResult = common.thresholdSame(lookupPubKeys.map(x3 => x3 && common.normalizeKeysResult(x3.result)), minThreshold); | ||
@@ -113,57 +113,58 @@ | ||
}; | ||
async function retrieveOrImportShare(params) { | ||
const VerifierLookupRequest = async params => { | ||
const { | ||
legacyMetadataHost, | ||
enableOneKey, | ||
ecCurve, | ||
keyType, | ||
allowHost, | ||
network, | ||
clientId, | ||
endpoints, | ||
nodePubkeys, | ||
verifier, | ||
verifierId, | ||
keyType | ||
} = params; | ||
const minThreshold = ~~(endpoints.length / 2) + 1; | ||
const lookupPromises = endpoints.map(x => httpHelpers.post(x, httpHelpers.generateJsonRPCObject(constants.JRPC_METHODS.VERIFIER_LOOKUP, { | ||
verifier, | ||
verifier_id: verifierId.toString(), | ||
key_type: keyType, | ||
client_time: Math.floor(Date.now() / 1000).toString() | ||
}), {}, { | ||
logTracingHeader: config.config.logRequestTracing | ||
}).catch(err => loglevel.error(`${constants.JRPC_METHODS.GET_OR_SET_KEY} request failed`, err))); | ||
const result = await some.Some(lookupPromises, async lookupResults => { | ||
const lookupPubKeys = lookupResults.filter(x1 => { | ||
if (x1 && !x1.error) { | ||
return x1; | ||
} | ||
return false; | ||
}); | ||
const errorResult = common.thresholdSame(lookupResults.map(x2 => x2 && x2.error), minThreshold); | ||
const keyResult = common.thresholdSame(lookupPubKeys.map(x3 => x3 && common.normalizeLookUpResult(x3.result)), minThreshold); | ||
const serverTimeOffsets = []; | ||
if (keyResult || errorResult) { | ||
const serverTimeOffset = keyResult ? common.calculateMedian(serverTimeOffsets) : 0; | ||
return Promise.resolve({ | ||
keyResult, | ||
serverTimeOffset, | ||
errorResult | ||
}); | ||
} | ||
return Promise.reject(new Error(`invalid lookup result: ${JSON.stringify(lookupResults)} | ||
)} for verifier: ${verifier}, verifierId: ${verifierId}`)); | ||
}); | ||
return result; | ||
}; | ||
const commitmentRequest = async params => { | ||
const { | ||
idToken, | ||
endpoints, | ||
indexes, | ||
keyType, | ||
verifier, | ||
verifierParams, | ||
idToken, | ||
overrideExistingKey, | ||
newImportedShares, | ||
extraParams, | ||
useDkg = true, | ||
serverTimeOffset | ||
pubKeyX, | ||
pubKeyY, | ||
finalImportedShares, | ||
overrideExistingKey | ||
} = params; | ||
await httpHelpers.get(allowHost, { | ||
headers: { | ||
verifier, | ||
verifierid: verifierParams.verifier_id, | ||
network, | ||
clientid: clientId, | ||
enablegating: "true" | ||
} | ||
}, { | ||
useAPIKey: true | ||
}); | ||
const promiseArr = []; | ||
// generate temporary private and public key that is used to secure receive shares | ||
const sessionAuthKey = eccrypto.generatePrivate(); | ||
const pubKey = eccrypto.getPublic(sessionAuthKey).toString("hex"); | ||
const pubKeyX = pubKey.slice(2, 66); | ||
const pubKeyY = pubKey.slice(66); | ||
let finalImportedShares = []; | ||
const tokenCommitment = common.keccak256(Buffer.from(idToken, "utf8")); | ||
const threeFourthsThreshold = ~~(endpoints.length * 3 / 4) + 1; | ||
const halfThreshold = ~~(endpoints.length / 2) + 1; | ||
if ((newImportedShares === null || newImportedShares === void 0 ? void 0 : newImportedShares.length) > 0) { | ||
if (newImportedShares.length !== endpoints.length) { | ||
throw new Error("Invalid imported shares length"); | ||
} | ||
finalImportedShares = newImportedShares; | ||
} else if (!useDkg) { | ||
// TODO: why use getrandombytes here? | ||
const bufferKey = keyType === constants$1.KEY_TYPE.SECP256K1 ? common.generatePrivateKey(ecCurve, Buffer) : await random.getRandomBytes(32); | ||
const generatedShares = await keyUtils.generateShares(ecCurve, keyType, serverTimeOffset, indexes, nodePubkeys, Buffer.from(bufferKey)); | ||
finalImportedShares = [...finalImportedShares, ...generatedShares]; | ||
} | ||
const tokenCommitment = common.keccak256(Buffer.from(idToken, "utf8")); | ||
const promiseArr = []; | ||
// make commitment requests to endpoints | ||
@@ -196,48 +197,21 @@ for (let i = 0; i < endpoints.length; i += 1) { | ||
} | ||
// send share request once k + t number of commitment requests have completed | ||
return some.Some(promiseArr, resultArr => { | ||
const completedRequests = resultArr.filter(x => { | ||
if (!x || typeof x !== "object") { | ||
return false; | ||
} | ||
if (x.error) { | ||
return false; | ||
} | ||
return true; | ||
}); | ||
if (finalImportedShares.length > 0) { | ||
// this is a optimization is for imported keys | ||
// for new imported keys registration we need to wait for all nodes to agree on commitment | ||
// for fetching existing imported keys we can rely on threshold nodes commitment | ||
if (overrideExistingKey && completedRequests.length === endpoints.length) { | ||
const requiredNodeResult = completedRequests.find(resp => { | ||
if (resp) { | ||
return true; | ||
} | ||
return new Promise((resolve, reject) => { | ||
// send share request once k + t number of commitment requests have completed | ||
some.Some(promiseArr, resultArr => { | ||
const completedRequests = resultArr.filter(x => { | ||
if (!x || typeof x !== "object") { | ||
return false; | ||
}); | ||
if (requiredNodeResult) { | ||
return Promise.resolve(resultArr); | ||
} | ||
} else if (!overrideExistingKey && completedRequests.length >= threeFourthsThreshold) { | ||
const nodeSigs = []; | ||
for (let i = 0; i < completedRequests.length; i += 1) { | ||
const x = completedRequests[i]; | ||
if (!x || typeof x !== "object" || x.error) { | ||
continue; | ||
} | ||
if (x) nodeSigs.push(x.result); | ||
if (x.error) { | ||
return false; | ||
} | ||
const existingPubKey = common.thresholdSame(nodeSigs.map(x => x && x.pub_key_x), halfThreshold); | ||
const proxyEndpointNum = common.getProxyCoordinatorEndpointIndex(endpoints, verifier, verifierParams.verifier_id); | ||
// for import shares, proxy node response is required. | ||
// proxy node returns metadata. | ||
// if user's account already | ||
const requiredNodeIndex = indexes[proxyEndpointNum].toString(10); | ||
// if not a existing key we need to wait for nodes to agree on commitment | ||
if (existingPubKey || !existingPubKey && completedRequests.length === endpoints.length) { | ||
return true; | ||
}); | ||
if (finalImportedShares.length > 0) { | ||
// this is a optimization is for imported keys | ||
// for new imported keys registration we need to wait for all nodes to agree on commitment | ||
// for fetching existing imported keys we can rely on threshold nodes commitment | ||
if (overrideExistingKey && completedRequests.length === endpoints.length) { | ||
const requiredNodeResult = completedRequests.find(resp => { | ||
var _resp$result; | ||
if (resp && ((_resp$result = resp.result) === null || _resp$result === void 0 ? void 0 : _resp$result.nodeindex) === requiredNodeIndex) { | ||
if (resp) { | ||
return true; | ||
@@ -250,22 +224,119 @@ } | ||
} | ||
} else if (!overrideExistingKey && completedRequests.length >= threeFourthsThreshold) { | ||
const nodeSigs = []; | ||
for (let i = 0; i < completedRequests.length; i += 1) { | ||
const x = completedRequests[i]; | ||
if (!x || typeof x !== "object" || x.error) { | ||
continue; | ||
} | ||
if (x) nodeSigs.push(x.result); | ||
} | ||
const existingPubKey = common.thresholdSame(nodeSigs.map(x => x && x.pub_key_x), halfThreshold); | ||
const proxyEndpointNum = common.getProxyCoordinatorEndpointIndex(endpoints, verifier, verifierParams.verifier_id); | ||
// for import shares, proxy node response is required. | ||
// proxy node returns metadata. | ||
// if user's account already | ||
const requiredNodeIndex = indexes[proxyEndpointNum].toString(10); | ||
// if not a existing key we need to wait for nodes to agree on commitment | ||
if (existingPubKey || !existingPubKey && completedRequests.length === endpoints.length) { | ||
const requiredNodeResult = completedRequests.find(resp => { | ||
var _resp$result; | ||
if (resp && ((_resp$result = resp.result) === null || _resp$result === void 0 ? void 0 : _resp$result.nodeindex) === requiredNodeIndex) { | ||
return true; | ||
} | ||
return false; | ||
}); | ||
if (requiredNodeResult) { | ||
return Promise.resolve(resultArr); | ||
} | ||
} | ||
} | ||
} | ||
} else if (completedRequests.length >= threeFourthsThreshold) { | ||
// this case is for dkg keys | ||
const requiredNodeResult = completedRequests.find(resp => { | ||
if (resp) { | ||
return true; | ||
} else if (completedRequests.length >= threeFourthsThreshold) { | ||
// this case is for dkg keys | ||
const requiredNodeResult = completedRequests.find(resp => { | ||
if (resp) { | ||
return true; | ||
} | ||
return false; | ||
}); | ||
if (requiredNodeResult) { | ||
return Promise.resolve(resultArr); | ||
} | ||
return false; | ||
}); | ||
if (requiredNodeResult) { | ||
return Promise.resolve(resultArr); | ||
} | ||
return Promise.reject(new Error(`invalid commitment results ${JSON.stringify(resultArr)}`)); | ||
}).then(resultArr => { | ||
return resolve(resultArr); | ||
}).catch(reject); | ||
}); | ||
}; | ||
async function retrieveOrImportShare(params) { | ||
const { | ||
legacyMetadataHost, | ||
enableOneKey, | ||
ecCurve, | ||
keyType, | ||
allowHost, | ||
network, | ||
clientId, | ||
endpoints, | ||
nodePubkeys, | ||
indexes, | ||
verifier, | ||
verifierParams, | ||
idToken, | ||
overrideExistingKey, | ||
newImportedShares, | ||
extraParams, | ||
useDkg = true, | ||
serverTimeOffset, | ||
checkCommitment = true | ||
} = params; | ||
await httpHelpers.get(allowHost, { | ||
headers: { | ||
verifier, | ||
verifierid: verifierParams.verifier_id, | ||
network, | ||
clientid: clientId, | ||
enablegating: "true" | ||
} | ||
return Promise.reject(new Error(`invalid commitment results ${JSON.stringify(resultArr)}`)); | ||
}).then(responses => { | ||
const promiseArrRequest = []; | ||
const nodeSigs = []; | ||
for (let i = 0; i < responses.length; i += 1) { | ||
const x = responses[i]; | ||
}, { | ||
useAPIKey: true | ||
}); | ||
// generate temporary private and public key that is used to secure receive shares | ||
const sessionAuthKey = eccrypto.generatePrivate(); | ||
const pubKey = eccrypto.getPublic(sessionAuthKey).toString("hex"); | ||
const sessionPubX = pubKey.slice(2, 66); | ||
const sessionPubY = pubKey.slice(66); | ||
let finalImportedShares = []; | ||
const halfThreshold = ~~(endpoints.length / 2) + 1; | ||
if ((newImportedShares === null || newImportedShares === void 0 ? void 0 : newImportedShares.length) > 0) { | ||
if (newImportedShares.length !== endpoints.length) { | ||
throw new Error("Invalid imported shares length"); | ||
} | ||
finalImportedShares = newImportedShares; | ||
} else if (!useDkg) { | ||
const bufferKey = keyType === constants$1.KEY_TYPE.SECP256K1 ? common.generatePrivateKey(ecCurve, Buffer) : await random.getRandomBytes(32); | ||
const generatedShares = await keyUtils.generateShares(ecCurve, keyType, serverTimeOffset, indexes, nodePubkeys, Buffer.from(bufferKey)); | ||
finalImportedShares = [...finalImportedShares, ...generatedShares]; | ||
} | ||
let commitmentRequestResult = []; | ||
let isExistingKey; | ||
const nodeSigs = []; | ||
if (checkCommitment) { | ||
commitmentRequestResult = await commitmentRequest({ | ||
idToken, | ||
endpoints, | ||
indexes, | ||
keyType, | ||
verifier, | ||
verifierParams, | ||
pubKeyX: sessionPubX, | ||
pubKeyY: sessionPubY, | ||
finalImportedShares, | ||
overrideExistingKey | ||
}); | ||
for (let i = 0; i < commitmentRequestResult.length; i += 1) { | ||
const x = commitmentRequestResult[i]; | ||
if (!x || typeof x !== "object" || x.error) { | ||
@@ -276,265 +347,286 @@ continue; | ||
} | ||
// if user's account already | ||
const existingPubKey = common.thresholdSame(nodeSigs.map(x => x && x.pub_key_x), halfThreshold); | ||
// can only import shares if override existing key is allowed or for new non dkg registration | ||
const canImportedShares = overrideExistingKey || !useDkg && !existingPubKey; | ||
if (canImportedShares) { | ||
const proxyEndpointNum = common.getProxyCoordinatorEndpointIndex(endpoints, verifier, verifierParams.verifier_id); | ||
const items = []; | ||
for (let i = 0; i < endpoints.length; i += 1) { | ||
const x = responses[i]; | ||
if (!x || typeof x !== "object" || x.error) { | ||
continue; | ||
} | ||
const importedShare = finalImportedShares[i]; | ||
items.push(_objectSpread(_objectSpread({}, verifierParams), {}, { | ||
idtoken: idToken, | ||
nodesignatures: nodeSigs, | ||
verifieridentifier: verifier, | ||
pub_key_x: importedShare.oauth_pub_key_x, | ||
pub_key_y: importedShare.oauth_pub_key_y, | ||
signing_pub_key_x: importedShare.signing_pub_key_x, | ||
signing_pub_key_y: importedShare.signing_pub_key_y, | ||
encrypted_share: importedShare.encrypted_share, | ||
encrypted_share_metadata: importedShare.encrypted_share_metadata, | ||
node_index: importedShare.node_index, | ||
key_type: importedShare.key_type, | ||
nonce_data: importedShare.nonce_data, | ||
nonce_signature: importedShare.nonce_signature, | ||
sss_endpoint: endpoints[i] | ||
}, extraParams)); | ||
isExistingKey = !!common.thresholdSame(nodeSigs.map(x => x && x.pub_key_x), halfThreshold); | ||
} else if (!checkCommitment && finalImportedShares.length > 0) { | ||
// in case not allowed to override existing key for import request | ||
// check if key exists | ||
if (!overrideExistingKey) { | ||
var _keyLookupResult$erro, _keyLookupResult$keyR; | ||
const keyLookupResult = await VerifierLookupRequest({ | ||
endpoints, | ||
verifier, | ||
verifierId: verifierParams.verifier_id, | ||
keyType | ||
}); | ||
if (keyLookupResult.errorResult && !((_keyLookupResult$erro = keyLookupResult.errorResult) !== null && _keyLookupResult$erro !== void 0 && (_keyLookupResult$erro = _keyLookupResult$erro.data) !== null && _keyLookupResult$erro !== void 0 && _keyLookupResult$erro.includes("Verifier + VerifierID has not yet been assigned"))) { | ||
throw new Error(`node results do not match at first lookup ${JSON.stringify(keyLookupResult.keyResult || {})}, ${JSON.stringify(keyLookupResult.errorResult || {})}`); | ||
} | ||
const p = httpHelpers.post(endpoints[proxyEndpointNum], httpHelpers.generateJsonRPCObject(constants.JRPC_METHODS.IMPORT_SHARES, { | ||
if (((_keyLookupResult$keyR = keyLookupResult.keyResult) === null || _keyLookupResult$keyR === void 0 || (_keyLookupResult$keyR = _keyLookupResult$keyR.keys) === null || _keyLookupResult$keyR === void 0 ? void 0 : _keyLookupResult$keyR.length) > 0) { | ||
isExistingKey = !!keyLookupResult.keyResult.keys[0]; | ||
} | ||
} | ||
} | ||
const promiseArrRequest = []; | ||
const canImportedShares = overrideExistingKey || !useDkg && !isExistingKey; | ||
if (canImportedShares) { | ||
const proxyEndpointNum = common.getProxyCoordinatorEndpointIndex(endpoints, verifier, verifierParams.verifier_id); | ||
const items = []; | ||
for (let i = 0; i < endpoints.length; i += 1) { | ||
const importedShare = finalImportedShares[i]; | ||
if (!importedShare) { | ||
throw new Error(`invalid imported share at index ${i}`); | ||
} | ||
items.push(_objectSpread(_objectSpread({}, verifierParams), {}, { | ||
idtoken: idToken, | ||
nodesignatures: nodeSigs, | ||
verifieridentifier: verifier, | ||
pub_key_x: importedShare.oauth_pub_key_x, | ||
pub_key_y: importedShare.oauth_pub_key_y, | ||
signing_pub_key_x: importedShare.signing_pub_key_x, | ||
signing_pub_key_y: importedShare.signing_pub_key_y, | ||
encrypted_share: importedShare.encrypted_share, | ||
encrypted_share_metadata: importedShare.encrypted_share_metadata, | ||
node_index: importedShare.node_index, | ||
key_type: importedShare.key_type, | ||
nonce_data: importedShare.nonce_data, | ||
nonce_signature: importedShare.nonce_signature, | ||
sss_endpoint: endpoints[i] | ||
}, extraParams)); | ||
} | ||
const p = httpHelpers.post(endpoints[proxyEndpointNum], httpHelpers.generateJsonRPCObject(constants.JRPC_METHODS.IMPORT_SHARES, { | ||
encrypted: "yes", | ||
use_temp: true, | ||
verifieridentifier: verifier, | ||
temppubx: nodeSigs.length === 0 && !checkCommitment ? sessionPubX : "", | ||
// send session pub key x only if node signatures are not available (Ie. in non commitment flow) | ||
temppuby: nodeSigs.length === 0 && !checkCommitment ? sessionPubY : "", | ||
// send session pub key y only if node signatures are not available (Ie. in non commitment flow) | ||
item: items, | ||
key_type: keyType, | ||
one_key_flow: true | ||
}), {}, { | ||
logTracingHeader: config.config.logRequestTracing | ||
}).catch(err => loglevel.error("share req", err)); | ||
promiseArrRequest.push(p); | ||
} else { | ||
for (let i = 0; i < endpoints.length; i += 1) { | ||
const p = httpHelpers.post(endpoints[i], httpHelpers.generateJsonRPCObject(constants.JRPC_METHODS.GET_SHARE_OR_KEY_ASSIGN, { | ||
encrypted: "yes", | ||
use_temp: true, | ||
item: items, | ||
key_type: keyType, | ||
distributed_metadata: true, | ||
verifieridentifier: verifier, | ||
temppubx: nodeSigs.length === 0 && !checkCommitment ? sessionPubX : "", | ||
// send session pub key x only if node signatures are not available (Ie. in non commitment flow) | ||
temppuby: nodeSigs.length === 0 && !checkCommitment ? sessionPubY : "", | ||
// send session pub key y only if node signatures are not available (Ie. in non commitment flow) | ||
item: [_objectSpread(_objectSpread({}, verifierParams), {}, { | ||
idtoken: idToken, | ||
key_type: keyType, | ||
nodesignatures: nodeSigs, | ||
verifieridentifier: verifier | ||
}, extraParams)], | ||
client_time: Math.floor(Date.now() / 1000).toString(), | ||
one_key_flow: true | ||
}), {}, { | ||
logTracingHeader: config.config.logRequestTracing | ||
}).catch(err => loglevel.error("share req", err)); | ||
}); | ||
promiseArrRequest.push(p); | ||
} else { | ||
for (let i = 0; i < endpoints.length; i += 1) { | ||
const x = responses[i]; | ||
if (!x || typeof x !== "object" || x.error) { | ||
continue; | ||
} | ||
const p = httpHelpers.post(endpoints[i], httpHelpers.generateJsonRPCObject(constants.JRPC_METHODS.GET_SHARE_OR_KEY_ASSIGN, { | ||
encrypted: "yes", | ||
use_temp: true, | ||
key_type: keyType, | ||
distributed_metadata: true, | ||
item: [_objectSpread(_objectSpread({}, verifierParams), {}, { | ||
idtoken: idToken, | ||
key_type: keyType, | ||
nodesignatures: nodeSigs, | ||
verifieridentifier: verifier | ||
}, extraParams)], | ||
client_time: Math.floor(Date.now() / 1000).toString(), | ||
one_key_flow: true | ||
}), {}, { | ||
logTracingHeader: config.config.logRequestTracing | ||
}); | ||
promiseArrRequest.push(p); | ||
} | ||
} | ||
} | ||
return some.Some(promiseArrRequest, async (shareResponseResult, sharedState) => { | ||
let thresholdNonceData; | ||
return some.Some(promiseArrRequest, async (shareResponseResult, sharedState) => { | ||
let shareResponses = []; | ||
// for import shares case, where result is an array | ||
if (shareResponseResult.length === 1 && shareResponseResult[0] && Array.isArray(shareResponseResult[0].result)) { | ||
// this is for import shares | ||
const importedSharesResult = shareResponseResult[0]; | ||
shareResponseResult[0].result.forEach(res => { | ||
shareResponses.push({ | ||
id: importedSharesResult.id, | ||
jsonrpc: "2.0", | ||
result: res, | ||
error: importedSharesResult.error | ||
}); | ||
let shareResponses = []; | ||
// for import shares case, where result is an array | ||
if (shareResponseResult.length === 1 && shareResponseResult[0] && Array.isArray(shareResponseResult[0].result)) { | ||
// this is for import shares | ||
const importedSharesResult = shareResponseResult[0]; | ||
shareResponseResult[0].result.forEach(res => { | ||
shareResponses.push({ | ||
id: importedSharesResult.id, | ||
jsonrpc: "2.0", | ||
result: res, | ||
error: importedSharesResult.error | ||
}); | ||
} else { | ||
shareResponses = shareResponseResult; | ||
}); | ||
} else { | ||
shareResponses = shareResponseResult; | ||
} | ||
// check if threshold number of nodes have returned the same user public key | ||
const completedRequests = shareResponses.filter(x => { | ||
if (!x || typeof x !== "object") { | ||
return false; | ||
} | ||
// check if threshold number of nodes have returned the same user public key | ||
const completedRequests = shareResponses.filter(x => { | ||
if (!x || typeof x !== "object") { | ||
return false; | ||
if (x.error) { | ||
return false; | ||
} | ||
return true; | ||
}); | ||
const pubkeys = shareResponses.map(x => { | ||
if (x && x.result && x.result.keys[0].public_key) { | ||
return x.result.keys[0].public_key; | ||
} | ||
return undefined; | ||
}); | ||
const thresholdPublicKey = common.thresholdSame(pubkeys, halfThreshold); | ||
if (!thresholdPublicKey) { | ||
throw new Error("invalid result from nodes, threshold number of public key results are not matching"); | ||
} | ||
shareResponses.forEach(x => { | ||
const requiredShareResponse = x && x.result && x.result.keys[0].public_key && x.result.keys[0]; | ||
if (requiredShareResponse && !thresholdNonceData && !verifierParams.extended_verifier_id) { | ||
var _requiredShareRespons; | ||
const currentPubKey = requiredShareResponse.public_key; | ||
const pubNonce = (_requiredShareRespons = requiredShareResponse.nonce_data) === null || _requiredShareRespons === void 0 || (_requiredShareRespons = _requiredShareRespons.pubNonce) === null || _requiredShareRespons === void 0 ? void 0 : _requiredShareRespons.x; | ||
if (pubNonce && currentPubKey.X === thresholdPublicKey.X) { | ||
thresholdNonceData = requiredShareResponse.nonce_data; | ||
} | ||
if (x.error) { | ||
return false; | ||
} | ||
return true; | ||
}); | ||
const pubkeys = shareResponses.map(x => { | ||
if (x && x.result && x.result.keys[0].public_key) { | ||
return x.result.keys[0].public_key; | ||
} | ||
return undefined; | ||
}); | ||
const thresholdPublicKey = common.thresholdSame(pubkeys, halfThreshold); | ||
if (!thresholdPublicKey) { | ||
throw new Error("invalid result from nodes, threshold number of public key results are not matching"); | ||
} | ||
shareResponses.forEach(x => { | ||
const requiredShareResponse = x && x.result && x.result.keys[0].public_key && x.result.keys[0]; | ||
if (requiredShareResponse && !thresholdNonceData && !verifierParams.extended_verifier_id) { | ||
var _requiredShareRespons; | ||
const currentPubKey = requiredShareResponse.public_key; | ||
const pubNonce = (_requiredShareRespons = requiredShareResponse.nonce_data) === null || _requiredShareRespons === void 0 || (_requiredShareRespons = _requiredShareRespons.pubNonce) === null || _requiredShareRespons === void 0 ? void 0 : _requiredShareRespons.x; | ||
if (pubNonce && currentPubKey.X === thresholdPublicKey.X) { | ||
thresholdNonceData = requiredShareResponse.nonce_data; | ||
}); | ||
const thresholdReqCount = canImportedShares ? endpoints.length : halfThreshold; | ||
// optimistically run lagrange interpolation once threshold number of shares have been received | ||
// this is matched against the user public key to ensure that shares are consistent | ||
// Note: no need of thresholdMetadataNonce for extended_verifier_id key | ||
if (completedRequests.length >= thresholdReqCount && thresholdPublicKey) { | ||
const sharePromises = []; | ||
const sessionTokenSigPromises = []; | ||
const sessionTokenPromises = []; | ||
const nodeIndexes = []; | ||
const sessionTokenData = []; | ||
const isNewKeyResponses = []; | ||
const serverTimeOffsetResponses = []; | ||
for (let i = 0; i < completedRequests.length; i += 1) { | ||
var _currentShareResponse; | ||
const currentShareResponse = completedRequests[i]; | ||
const { | ||
session_tokens: sessionTokens, | ||
session_token_metadata: sessionTokenMetadata, | ||
session_token_sigs: sessionTokenSigs, | ||
session_token_sig_metadata: sessionTokenSigMetadata, | ||
keys, | ||
is_new_key: isNewKey, | ||
server_time_offset: serverTimeOffsetResponse | ||
} = currentShareResponse.result; | ||
isNewKeyResponses.push({ | ||
isNewKey, | ||
publicKey: ((_currentShareResponse = currentShareResponse.result) === null || _currentShareResponse === void 0 || (_currentShareResponse = _currentShareResponse.keys[0]) === null || _currentShareResponse === void 0 || (_currentShareResponse = _currentShareResponse.public_key) === null || _currentShareResponse === void 0 ? void 0 : _currentShareResponse.X) || "" | ||
}); | ||
serverTimeOffsetResponses.push(serverTimeOffsetResponse || "0"); | ||
if ((sessionTokenSigs === null || sessionTokenSigs === void 0 ? void 0 : sessionTokenSigs.length) > 0) { | ||
var _sessionTokenSigMetad; | ||
// decrypt sessionSig if enc metadata is sent | ||
if (sessionTokenSigMetadata && (_sessionTokenSigMetad = sessionTokenSigMetadata[0]) !== null && _sessionTokenSigMetad !== void 0 && _sessionTokenSigMetad.ephemPublicKey) { | ||
sessionTokenSigPromises.push(metadataUtils.decryptNodeData(sessionTokenSigMetadata[0], sessionTokenSigs[0], sessionAuthKey).catch(err => loglevel.error("session sig decryption", err))); | ||
} else { | ||
sessionTokenSigPromises.push(Promise.resolve(Buffer.from(sessionTokenSigs[0], "hex"))); | ||
} | ||
} else { | ||
sessionTokenSigPromises.push(Promise.resolve(undefined)); | ||
} | ||
}); | ||
const thresholdReqCount = canImportedShares ? endpoints.length : halfThreshold; | ||
// optimistically run lagrange interpolation once threshold number of shares have been received | ||
// this is matched against the user public key to ensure that shares are consistent | ||
// Note: no need of thresholdMetadataNonce for extended_verifier_id key | ||
if (completedRequests.length >= thresholdReqCount && thresholdPublicKey) { | ||
const sharePromises = []; | ||
const sessionTokenSigPromises = []; | ||
const sessionTokenPromises = []; | ||
const nodeIndexes = []; | ||
const sessionTokenData = []; | ||
const isNewKeyResponses = []; | ||
const serverTimeOffsetResponses = []; | ||
for (let i = 0; i < completedRequests.length; i += 1) { | ||
var _currentShareResponse; | ||
const currentShareResponse = completedRequests[i]; | ||
const { | ||
session_tokens: sessionTokens, | ||
session_token_metadata: sessionTokenMetadata, | ||
session_token_sigs: sessionTokenSigs, | ||
session_token_sig_metadata: sessionTokenSigMetadata, | ||
keys, | ||
is_new_key: isNewKey, | ||
server_time_offset: serverTimeOffsetResponse | ||
} = currentShareResponse.result; | ||
isNewKeyResponses.push({ | ||
isNewKey, | ||
publicKey: ((_currentShareResponse = currentShareResponse.result) === null || _currentShareResponse === void 0 || (_currentShareResponse = _currentShareResponse.keys[0]) === null || _currentShareResponse === void 0 || (_currentShareResponse = _currentShareResponse.public_key) === null || _currentShareResponse === void 0 ? void 0 : _currentShareResponse.X) || "" | ||
}); | ||
serverTimeOffsetResponses.push(serverTimeOffsetResponse || "0"); | ||
if ((sessionTokenSigs === null || sessionTokenSigs === void 0 ? void 0 : sessionTokenSigs.length) > 0) { | ||
var _sessionTokenSigMetad; | ||
// decrypt sessionSig if enc metadata is sent | ||
if (sessionTokenSigMetadata && (_sessionTokenSigMetad = sessionTokenSigMetadata[0]) !== null && _sessionTokenSigMetad !== void 0 && _sessionTokenSigMetad.ephemPublicKey) { | ||
sessionTokenSigPromises.push(metadataUtils.decryptNodeData(sessionTokenSigMetadata[0], sessionTokenSigs[0], sessionAuthKey).catch(err => loglevel.error("session sig decryption", err))); | ||
} else { | ||
sessionTokenSigPromises.push(Promise.resolve(Buffer.from(sessionTokenSigs[0], "hex"))); | ||
} | ||
if ((sessionTokens === null || sessionTokens === void 0 ? void 0 : sessionTokens.length) > 0) { | ||
var _sessionTokenMetadata; | ||
// decrypt session token if enc metadata is sent | ||
if (sessionTokenMetadata && (_sessionTokenMetadata = sessionTokenMetadata[0]) !== null && _sessionTokenMetadata !== void 0 && _sessionTokenMetadata.ephemPublicKey) { | ||
sessionTokenPromises.push(metadataUtils.decryptNodeData(sessionTokenMetadata[0], sessionTokens[0], sessionAuthKey).catch(err => loglevel.error("session token sig decryption", err))); | ||
} else { | ||
sessionTokenSigPromises.push(Promise.resolve(undefined)); | ||
sessionTokenPromises.push(Promise.resolve(Buffer.from(sessionTokens[0], "base64"))); | ||
} | ||
if ((sessionTokens === null || sessionTokens === void 0 ? void 0 : sessionTokens.length) > 0) { | ||
var _sessionTokenMetadata; | ||
// decrypt session token if enc metadata is sent | ||
if (sessionTokenMetadata && (_sessionTokenMetadata = sessionTokenMetadata[0]) !== null && _sessionTokenMetadata !== void 0 && _sessionTokenMetadata.ephemPublicKey) { | ||
sessionTokenPromises.push(metadataUtils.decryptNodeData(sessionTokenMetadata[0], sessionTokens[0], sessionAuthKey).catch(err => loglevel.error("session token sig decryption", err))); | ||
} else { | ||
sessionTokenPromises.push(Promise.resolve(Buffer.from(sessionTokens[0], "base64"))); | ||
} | ||
} else { | ||
sessionTokenPromises.push(Promise.resolve(undefined)); | ||
} | ||
if ((keys === null || keys === void 0 ? void 0 : keys.length) > 0) { | ||
const latestKey = currentShareResponse.result.keys[0]; | ||
nodeIndexes.push(new BN(latestKey.node_index)); | ||
if (latestKey.share_metadata) { | ||
sharePromises.push(metadataUtils.decryptNodeDataWithPadding(latestKey.share_metadata, Buffer.from(latestKey.share, "base64").toString("binary"), sessionAuthKey).catch(err => loglevel.error("share decryption", err))); | ||
} | ||
} else { | ||
nodeIndexes.push(undefined); | ||
sharePromises.push(Promise.resolve(undefined)); | ||
} | ||
} else { | ||
sessionTokenPromises.push(Promise.resolve(undefined)); | ||
} | ||
const allPromises = await Promise.all(sharePromises.concat(sessionTokenSigPromises).concat(sessionTokenPromises)); | ||
const sharesResolved = allPromises.slice(0, sharePromises.length); | ||
const sessionSigsResolved = allPromises.slice(sharePromises.length, sharePromises.length + sessionTokenSigPromises.length); | ||
const sessionTokensResolved = allPromises.slice(sharePromises.length + sessionTokenSigPromises.length, allPromises.length); | ||
const validSigs = sessionSigsResolved.filter(sig => { | ||
if (sig) { | ||
return true; | ||
if ((keys === null || keys === void 0 ? void 0 : keys.length) > 0) { | ||
const latestKey = currentShareResponse.result.keys[0]; | ||
nodeIndexes.push(new BN(latestKey.node_index)); | ||
if (latestKey.share_metadata) { | ||
sharePromises.push(metadataUtils.decryptNodeDataWithPadding(latestKey.share_metadata, Buffer.from(latestKey.share, "base64").toString("binary"), sessionAuthKey).catch(err => loglevel.error("share decryption", err))); | ||
} | ||
return false; | ||
}); | ||
if (!verifierParams.extended_verifier_id && validSigs.length < halfThreshold) { | ||
throw new Error(`Insufficient number of signatures from nodes, required: ${halfThreshold}, found: ${validSigs.length}`); | ||
} else { | ||
nodeIndexes.push(undefined); | ||
sharePromises.push(Promise.resolve(undefined)); | ||
} | ||
const validTokens = sessionTokensResolved.filter(token => { | ||
if (token) { | ||
return true; | ||
} | ||
return false; | ||
} | ||
const allPromises = await Promise.all(sharePromises.concat(sessionTokenSigPromises).concat(sessionTokenPromises)); | ||
const sharesResolved = allPromises.slice(0, sharePromises.length); | ||
const sessionSigsResolved = allPromises.slice(sharePromises.length, sharePromises.length + sessionTokenSigPromises.length); | ||
const sessionTokensResolved = allPromises.slice(sharePromises.length + sessionTokenSigPromises.length, allPromises.length); | ||
const validSigs = sessionSigsResolved.filter(sig => { | ||
if (sig) { | ||
return true; | ||
} | ||
return false; | ||
}); | ||
if (!verifierParams.extended_verifier_id && validSigs.length < halfThreshold) { | ||
throw new Error(`Insufficient number of signatures from nodes, required: ${halfThreshold}, found: ${validSigs.length}`); | ||
} | ||
const validTokens = sessionTokensResolved.filter(token => { | ||
if (token) { | ||
return true; | ||
} | ||
return false; | ||
}); | ||
if (!verifierParams.extended_verifier_id && validTokens.length < halfThreshold) { | ||
throw new Error(`Insufficient number of session tokens from nodes, required: ${halfThreshold}, found: ${validTokens.length}`); | ||
} | ||
sessionTokensResolved.forEach((x, index) => { | ||
if (!x || !sessionSigsResolved[index]) sessionTokenData.push(undefined);else sessionTokenData.push({ | ||
token: x.toString("base64"), | ||
signature: sessionSigsResolved[index].toString("hex"), | ||
node_pubx: completedRequests[index].result.node_pubx, | ||
node_puby: completedRequests[index].result.node_puby | ||
}); | ||
if (!verifierParams.extended_verifier_id && validTokens.length < halfThreshold) { | ||
throw new Error(`Insufficient number of session tokens from nodes, required: ${halfThreshold}, found: ${validTokens.length}`); | ||
} | ||
sessionTokensResolved.forEach((x, index) => { | ||
if (!x || !sessionSigsResolved[index]) sessionTokenData.push(undefined);else sessionTokenData.push({ | ||
token: x.toString("base64"), | ||
signature: sessionSigsResolved[index].toString("hex"), | ||
node_pubx: completedRequests[index].result.node_pubx, | ||
node_puby: completedRequests[index].result.node_puby | ||
}); | ||
if (sharedState.resolved) return undefined; | ||
const decryptedShares = sharesResolved.reduce((acc, curr, index) => { | ||
if (curr) { | ||
acc.push({ | ||
index: nodeIndexes[index], | ||
value: new BN(curr) | ||
}); | ||
}); | ||
if (sharedState.resolved) return undefined; | ||
const decryptedShares = sharesResolved.reduce((acc, curr, index) => { | ||
if (curr) { | ||
acc.push({ | ||
index: nodeIndexes[index], | ||
value: new BN(curr) | ||
}); | ||
} | ||
return acc; | ||
}, []); | ||
// run lagrange interpolation on all subsets, faster in the optimistic scenario than berlekamp-welch due to early exit | ||
const allCombis = common.kCombinations(decryptedShares.length, halfThreshold); | ||
let privateKey = null; | ||
for (let j = 0; j < allCombis.length; j += 1) { | ||
const currentCombi = allCombis[j]; | ||
const currentCombiShares = decryptedShares.filter((_, index) => currentCombi.includes(index)); | ||
const shares = currentCombiShares.map(x => x.value); | ||
const indices = currentCombiShares.map(x => x.index); | ||
const derivedPrivateKey = langrangeInterpolatePoly.lagrangeInterpolation(ecCurve, shares, indices); | ||
if (!derivedPrivateKey) continue; | ||
const decryptedPubKey = keyUtils.derivePubKey(ecCurve, derivedPrivateKey); | ||
const decryptedPubKeyX = decryptedPubKey.getX(); | ||
const decryptedPubKeyY = decryptedPubKey.getY(); | ||
if (decryptedPubKeyX.cmp(new BN(thresholdPublicKey.X, 16)) === 0 && decryptedPubKeyY.cmp(new BN(thresholdPublicKey.Y, 16)) === 0) { | ||
privateKey = derivedPrivateKey; | ||
break; | ||
} | ||
} | ||
if (privateKey === undefined || privateKey === null) { | ||
throw new Error("could not derive private key"); | ||
return acc; | ||
}, []); | ||
// run lagrange interpolation on all subsets, faster in the optimistic scenario than berlekamp-welch due to early exit | ||
const allCombis = common.kCombinations(decryptedShares.length, halfThreshold); | ||
let privateKey = null; | ||
for (let j = 0; j < allCombis.length; j += 1) { | ||
const currentCombi = allCombis[j]; | ||
const currentCombiShares = decryptedShares.filter((_, index) => currentCombi.includes(index)); | ||
const shares = currentCombiShares.map(x => x.value); | ||
const indices = currentCombiShares.map(x => x.index); | ||
const derivedPrivateKey = langrangeInterpolatePoly.lagrangeInterpolation(ecCurve, shares, indices); | ||
if (!derivedPrivateKey) continue; | ||
const decryptedPubKey = keyUtils.derivePubKey(ecCurve, derivedPrivateKey); | ||
const decryptedPubKeyX = decryptedPubKey.getX(); | ||
const decryptedPubKeyY = decryptedPubKey.getY(); | ||
if (decryptedPubKeyX.cmp(new BN(thresholdPublicKey.X, 16)) === 0 && decryptedPubKeyY.cmp(new BN(thresholdPublicKey.Y, 16)) === 0) { | ||
privateKey = derivedPrivateKey; | ||
break; | ||
} | ||
let isNewKey = false; | ||
isNewKeyResponses.forEach(x => { | ||
if (x.isNewKey === "true" && x.publicKey.toLowerCase() === thresholdPublicKey.X.toLowerCase()) { | ||
isNewKey = true; | ||
} | ||
}); | ||
// Convert each string timestamp to a number | ||
const serverOffsetTimes = serverTimeOffsetResponses.map(timestamp => Number.parseInt(timestamp, 10)); | ||
return { | ||
privateKey, | ||
sessionTokenData, | ||
thresholdNonceData, | ||
nodeIndexes, | ||
thresholdPubKey: thresholdPublicKey, | ||
isNewKey, | ||
serverTimeOffsetResponse: serverTimeOffset || common.calculateMedian(serverOffsetTimes) | ||
}; | ||
} | ||
if (completedRequests.length < thresholdReqCount) { | ||
throw new Error(`Waiting for results from more nodes, pending: ${thresholdReqCount - completedRequests.length}`); | ||
if (privateKey === undefined || privateKey === null) { | ||
throw new Error("could not derive private key"); | ||
} | ||
throw new Error(`Invalid results, threshold pub key: ${thresholdPublicKey}, nonce data found: ${!!thresholdNonceData}, extended verifierId: ${verifierParams.extended_verifier_id}`); | ||
}); | ||
let isNewKey = false; | ||
isNewKeyResponses.forEach(x => { | ||
if (x.isNewKey === "true" && x.publicKey.toLowerCase() === thresholdPublicKey.X.toLowerCase()) { | ||
isNewKey = true; | ||
} | ||
}); | ||
// Convert each string timestamp to a number | ||
const serverOffsetTimes = serverTimeOffsetResponses.map(timestamp => Number.parseInt(timestamp, 10)); | ||
return { | ||
privateKey, | ||
sessionTokenData, | ||
thresholdNonceData, | ||
nodeIndexes, | ||
thresholdPubKey: thresholdPublicKey, | ||
isNewKey, | ||
serverTimeOffsetResponse: serverTimeOffset || common.calculateMedian(serverOffsetTimes) | ||
}; | ||
} | ||
if (completedRequests.length < thresholdReqCount) { | ||
throw new Error(`Waiting for results from more nodes, pending: ${thresholdReqCount - completedRequests.length}`); | ||
} | ||
throw new Error(`Invalid results, threshold pub key: ${thresholdPublicKey}, nonce data found: ${!!thresholdNonceData}, extended verifierId: ${verifierParams.extended_verifier_id}`); | ||
}).then(async res => { | ||
@@ -721,2 +813,3 @@ var _nonceResult; | ||
exports.GetPubKeyOrKeyAssign = GetPubKeyOrKeyAssign; | ||
exports.VerifierLookupRequest = VerifierLookupRequest; | ||
exports.retrieveOrImportShare = retrieveOrImportShare; |
@@ -33,2 +33,3 @@ 'use strict'; | ||
exports.normalizeKeysResult = common.normalizeKeysResult; | ||
exports.normalizeLookUpResult = common.normalizeLookUpResult; | ||
exports.retryCommitment = common.retryCommitment; | ||
@@ -65,2 +66,3 @@ exports.thresholdSame = common.thresholdSame; | ||
exports.GetPubKeyOrKeyAssign = nodeUtils.GetPubKeyOrKeyAssign; | ||
exports.VerifierLookupRequest = nodeUtils.VerifierLookupRequest; | ||
exports.retrieveOrImportShare = nodeUtils.retrieveOrImportShare; |
@@ -83,3 +83,4 @@ 'use strict'; | ||
useDkg, | ||
extraParams = {} | ||
extraParams = {}, | ||
checkCommitment = true | ||
} = params; | ||
@@ -132,3 +133,4 @@ if (nodePubkeys.length === 0) { | ||
nodePubkeys, | ||
extraParams | ||
extraParams, | ||
checkCommitment | ||
}); | ||
@@ -161,3 +163,4 @@ } | ||
endpoints, | ||
extraParams = {} | ||
extraParams = {}, | ||
checkCommitment = true | ||
} = params; | ||
@@ -215,3 +218,4 @@ if (constants.LEGACY_NETWORKS_ROUTE_MAP[this.network]) { | ||
nodePubkeys, | ||
extraParams | ||
extraParams, | ||
checkCommitment | ||
}); | ||
@@ -218,0 +222,0 @@ } |
const JRPC_METHODS = { | ||
GET_OR_SET_KEY: "GetPubKeyOrKeyAssign", | ||
VERIFIER_LOOKUP: "VerifierLookupRequest", | ||
COMMITMENT_REQUEST: "CommitmentRequest", | ||
@@ -4,0 +5,0 @@ IMPORT_SHARES: "ImportShares", |
@@ -41,2 +41,16 @@ import { KEY_TYPE } from '@toruslabs/constants'; | ||
}; | ||
const normalizeLookUpResult = result => { | ||
const finalResult = { | ||
keys: [] | ||
}; | ||
if (result && result.keys && result.keys.length > 0) { | ||
const finalKey = result.keys[0]; | ||
finalResult.keys = [{ | ||
pub_key_X: finalKey.pub_key_X, | ||
pub_key_Y: finalKey.pub_key_Y, | ||
address: finalKey.address | ||
}]; | ||
} | ||
return finalResult; | ||
}; | ||
const kCombinations = (s, k) => { | ||
@@ -169,2 +183,2 @@ let set = s; | ||
export { calculateMedian, encParamsBufToHex, encParamsHexToBuf, generatePrivateKey, getKeyCurve, getProxyCoordinatorEndpointIndex, kCombinations, keccak256, normalizeKeysResult, retryCommitment, thresholdSame, waitFor }; | ||
export { calculateMedian, encParamsBufToHex, encParamsHexToBuf, generatePrivateKey, getKeyCurve, getProxyCoordinatorEndpointIndex, kCombinations, keccak256, normalizeKeysResult, normalizeLookUpResult, retryCommitment, thresholdSame, waitFor }; |
@@ -11,3 +11,3 @@ import _objectSpread from '@babel/runtime/helpers/objectSpread2'; | ||
import { Some } from '../some.js'; | ||
import { generatePrivateKey, keccak256, retryCommitment, thresholdSame, getProxyCoordinatorEndpointIndex, normalizeKeysResult, calculateMedian, kCombinations } from './common.js'; | ||
import { generatePrivateKey, thresholdSame, getProxyCoordinatorEndpointIndex, normalizeKeysResult, calculateMedian, normalizeLookUpResult, keccak256, retryCommitment, kCombinations } from './common.js'; | ||
import { generateShares, derivePubKey, generateAddressFromPrivKey, generateAddressFromPubKey } from './keyUtils.js'; | ||
@@ -48,3 +48,3 @@ import { lagrangeInterpolation } from './langrangeInterpolatePoly.js'; | ||
}); | ||
const errorResult = thresholdSame(lookupPubKeys.map(x2 => x2 && x2.error), minThreshold); | ||
const errorResult = thresholdSame(lookupResults.map(x2 => x2 && x2.error), minThreshold); | ||
const keyResult = thresholdSame(lookupPubKeys.map(x3 => x3 && normalizeKeysResult(x3.result)), minThreshold); | ||
@@ -112,57 +112,58 @@ | ||
}; | ||
async function retrieveOrImportShare(params) { | ||
const VerifierLookupRequest = async params => { | ||
const { | ||
legacyMetadataHost, | ||
enableOneKey, | ||
ecCurve, | ||
keyType, | ||
allowHost, | ||
network, | ||
clientId, | ||
endpoints, | ||
nodePubkeys, | ||
verifier, | ||
verifierId, | ||
keyType | ||
} = params; | ||
const minThreshold = ~~(endpoints.length / 2) + 1; | ||
const lookupPromises = endpoints.map(x => post(x, generateJsonRPCObject(JRPC_METHODS.VERIFIER_LOOKUP, { | ||
verifier, | ||
verifier_id: verifierId.toString(), | ||
key_type: keyType, | ||
client_time: Math.floor(Date.now() / 1000).toString() | ||
}), {}, { | ||
logTracingHeader: config.logRequestTracing | ||
}).catch(err => log.error(`${JRPC_METHODS.GET_OR_SET_KEY} request failed`, err))); | ||
const result = await Some(lookupPromises, async lookupResults => { | ||
const lookupPubKeys = lookupResults.filter(x1 => { | ||
if (x1 && !x1.error) { | ||
return x1; | ||
} | ||
return false; | ||
}); | ||
const errorResult = thresholdSame(lookupResults.map(x2 => x2 && x2.error), minThreshold); | ||
const keyResult = thresholdSame(lookupPubKeys.map(x3 => x3 && normalizeLookUpResult(x3.result)), minThreshold); | ||
const serverTimeOffsets = []; | ||
if (keyResult || errorResult) { | ||
const serverTimeOffset = keyResult ? calculateMedian(serverTimeOffsets) : 0; | ||
return Promise.resolve({ | ||
keyResult, | ||
serverTimeOffset, | ||
errorResult | ||
}); | ||
} | ||
return Promise.reject(new Error(`invalid lookup result: ${JSON.stringify(lookupResults)} | ||
)} for verifier: ${verifier}, verifierId: ${verifierId}`)); | ||
}); | ||
return result; | ||
}; | ||
const commitmentRequest = async params => { | ||
const { | ||
idToken, | ||
endpoints, | ||
indexes, | ||
keyType, | ||
verifier, | ||
verifierParams, | ||
idToken, | ||
overrideExistingKey, | ||
newImportedShares, | ||
extraParams, | ||
useDkg = true, | ||
serverTimeOffset | ||
pubKeyX, | ||
pubKeyY, | ||
finalImportedShares, | ||
overrideExistingKey | ||
} = params; | ||
await get(allowHost, { | ||
headers: { | ||
verifier, | ||
verifierid: verifierParams.verifier_id, | ||
network, | ||
clientid: clientId, | ||
enablegating: "true" | ||
} | ||
}, { | ||
useAPIKey: true | ||
}); | ||
const promiseArr = []; | ||
// generate temporary private and public key that is used to secure receive shares | ||
const sessionAuthKey = generatePrivate(); | ||
const pubKey = getPublic(sessionAuthKey).toString("hex"); | ||
const pubKeyX = pubKey.slice(2, 66); | ||
const pubKeyY = pubKey.slice(66); | ||
let finalImportedShares = []; | ||
const tokenCommitment = keccak256(Buffer.from(idToken, "utf8")); | ||
const threeFourthsThreshold = ~~(endpoints.length * 3 / 4) + 1; | ||
const halfThreshold = ~~(endpoints.length / 2) + 1; | ||
if ((newImportedShares === null || newImportedShares === void 0 ? void 0 : newImportedShares.length) > 0) { | ||
if (newImportedShares.length !== endpoints.length) { | ||
throw new Error("Invalid imported shares length"); | ||
} | ||
finalImportedShares = newImportedShares; | ||
} else if (!useDkg) { | ||
// TODO: why use getrandombytes here? | ||
const bufferKey = keyType === KEY_TYPE.SECP256K1 ? generatePrivateKey(ecCurve, Buffer) : await getRandomBytes(32); | ||
const generatedShares = await generateShares(ecCurve, keyType, serverTimeOffset, indexes, nodePubkeys, Buffer.from(bufferKey)); | ||
finalImportedShares = [...finalImportedShares, ...generatedShares]; | ||
} | ||
const tokenCommitment = keccak256(Buffer.from(idToken, "utf8")); | ||
const promiseArr = []; | ||
// make commitment requests to endpoints | ||
@@ -195,48 +196,21 @@ for (let i = 0; i < endpoints.length; i += 1) { | ||
} | ||
// send share request once k + t number of commitment requests have completed | ||
return Some(promiseArr, resultArr => { | ||
const completedRequests = resultArr.filter(x => { | ||
if (!x || typeof x !== "object") { | ||
return false; | ||
} | ||
if (x.error) { | ||
return false; | ||
} | ||
return true; | ||
}); | ||
if (finalImportedShares.length > 0) { | ||
// this is a optimization is for imported keys | ||
// for new imported keys registration we need to wait for all nodes to agree on commitment | ||
// for fetching existing imported keys we can rely on threshold nodes commitment | ||
if (overrideExistingKey && completedRequests.length === endpoints.length) { | ||
const requiredNodeResult = completedRequests.find(resp => { | ||
if (resp) { | ||
return true; | ||
} | ||
return new Promise((resolve, reject) => { | ||
// send share request once k + t number of commitment requests have completed | ||
Some(promiseArr, resultArr => { | ||
const completedRequests = resultArr.filter(x => { | ||
if (!x || typeof x !== "object") { | ||
return false; | ||
}); | ||
if (requiredNodeResult) { | ||
return Promise.resolve(resultArr); | ||
} | ||
} else if (!overrideExistingKey && completedRequests.length >= threeFourthsThreshold) { | ||
const nodeSigs = []; | ||
for (let i = 0; i < completedRequests.length; i += 1) { | ||
const x = completedRequests[i]; | ||
if (!x || typeof x !== "object" || x.error) { | ||
continue; | ||
} | ||
if (x) nodeSigs.push(x.result); | ||
if (x.error) { | ||
return false; | ||
} | ||
const existingPubKey = thresholdSame(nodeSigs.map(x => x && x.pub_key_x), halfThreshold); | ||
const proxyEndpointNum = getProxyCoordinatorEndpointIndex(endpoints, verifier, verifierParams.verifier_id); | ||
// for import shares, proxy node response is required. | ||
// proxy node returns metadata. | ||
// if user's account already | ||
const requiredNodeIndex = indexes[proxyEndpointNum].toString(10); | ||
// if not a existing key we need to wait for nodes to agree on commitment | ||
if (existingPubKey || !existingPubKey && completedRequests.length === endpoints.length) { | ||
return true; | ||
}); | ||
if (finalImportedShares.length > 0) { | ||
// this is a optimization is for imported keys | ||
// for new imported keys registration we need to wait for all nodes to agree on commitment | ||
// for fetching existing imported keys we can rely on threshold nodes commitment | ||
if (overrideExistingKey && completedRequests.length === endpoints.length) { | ||
const requiredNodeResult = completedRequests.find(resp => { | ||
var _resp$result; | ||
if (resp && ((_resp$result = resp.result) === null || _resp$result === void 0 ? void 0 : _resp$result.nodeindex) === requiredNodeIndex) { | ||
if (resp) { | ||
return true; | ||
@@ -249,22 +223,119 @@ } | ||
} | ||
} else if (!overrideExistingKey && completedRequests.length >= threeFourthsThreshold) { | ||
const nodeSigs = []; | ||
for (let i = 0; i < completedRequests.length; i += 1) { | ||
const x = completedRequests[i]; | ||
if (!x || typeof x !== "object" || x.error) { | ||
continue; | ||
} | ||
if (x) nodeSigs.push(x.result); | ||
} | ||
const existingPubKey = thresholdSame(nodeSigs.map(x => x && x.pub_key_x), halfThreshold); | ||
const proxyEndpointNum = getProxyCoordinatorEndpointIndex(endpoints, verifier, verifierParams.verifier_id); | ||
// for import shares, proxy node response is required. | ||
// proxy node returns metadata. | ||
// if user's account already | ||
const requiredNodeIndex = indexes[proxyEndpointNum].toString(10); | ||
// if not a existing key we need to wait for nodes to agree on commitment | ||
if (existingPubKey || !existingPubKey && completedRequests.length === endpoints.length) { | ||
const requiredNodeResult = completedRequests.find(resp => { | ||
var _resp$result; | ||
if (resp && ((_resp$result = resp.result) === null || _resp$result === void 0 ? void 0 : _resp$result.nodeindex) === requiredNodeIndex) { | ||
return true; | ||
} | ||
return false; | ||
}); | ||
if (requiredNodeResult) { | ||
return Promise.resolve(resultArr); | ||
} | ||
} | ||
} | ||
} | ||
} else if (completedRequests.length >= threeFourthsThreshold) { | ||
// this case is for dkg keys | ||
const requiredNodeResult = completedRequests.find(resp => { | ||
if (resp) { | ||
return true; | ||
} else if (completedRequests.length >= threeFourthsThreshold) { | ||
// this case is for dkg keys | ||
const requiredNodeResult = completedRequests.find(resp => { | ||
if (resp) { | ||
return true; | ||
} | ||
return false; | ||
}); | ||
if (requiredNodeResult) { | ||
return Promise.resolve(resultArr); | ||
} | ||
return false; | ||
}); | ||
if (requiredNodeResult) { | ||
return Promise.resolve(resultArr); | ||
} | ||
return Promise.reject(new Error(`invalid commitment results ${JSON.stringify(resultArr)}`)); | ||
}).then(resultArr => { | ||
return resolve(resultArr); | ||
}).catch(reject); | ||
}); | ||
}; | ||
async function retrieveOrImportShare(params) { | ||
const { | ||
legacyMetadataHost, | ||
enableOneKey, | ||
ecCurve, | ||
keyType, | ||
allowHost, | ||
network, | ||
clientId, | ||
endpoints, | ||
nodePubkeys, | ||
indexes, | ||
verifier, | ||
verifierParams, | ||
idToken, | ||
overrideExistingKey, | ||
newImportedShares, | ||
extraParams, | ||
useDkg = true, | ||
serverTimeOffset, | ||
checkCommitment = true | ||
} = params; | ||
await get(allowHost, { | ||
headers: { | ||
verifier, | ||
verifierid: verifierParams.verifier_id, | ||
network, | ||
clientid: clientId, | ||
enablegating: "true" | ||
} | ||
return Promise.reject(new Error(`invalid commitment results ${JSON.stringify(resultArr)}`)); | ||
}).then(responses => { | ||
const promiseArrRequest = []; | ||
const nodeSigs = []; | ||
for (let i = 0; i < responses.length; i += 1) { | ||
const x = responses[i]; | ||
}, { | ||
useAPIKey: true | ||
}); | ||
// generate temporary private and public key that is used to secure receive shares | ||
const sessionAuthKey = generatePrivate(); | ||
const pubKey = getPublic(sessionAuthKey).toString("hex"); | ||
const sessionPubX = pubKey.slice(2, 66); | ||
const sessionPubY = pubKey.slice(66); | ||
let finalImportedShares = []; | ||
const halfThreshold = ~~(endpoints.length / 2) + 1; | ||
if ((newImportedShares === null || newImportedShares === void 0 ? void 0 : newImportedShares.length) > 0) { | ||
if (newImportedShares.length !== endpoints.length) { | ||
throw new Error("Invalid imported shares length"); | ||
} | ||
finalImportedShares = newImportedShares; | ||
} else if (!useDkg) { | ||
const bufferKey = keyType === KEY_TYPE.SECP256K1 ? generatePrivateKey(ecCurve, Buffer) : await getRandomBytes(32); | ||
const generatedShares = await generateShares(ecCurve, keyType, serverTimeOffset, indexes, nodePubkeys, Buffer.from(bufferKey)); | ||
finalImportedShares = [...finalImportedShares, ...generatedShares]; | ||
} | ||
let commitmentRequestResult = []; | ||
let isExistingKey; | ||
const nodeSigs = []; | ||
if (checkCommitment) { | ||
commitmentRequestResult = await commitmentRequest({ | ||
idToken, | ||
endpoints, | ||
indexes, | ||
keyType, | ||
verifier, | ||
verifierParams, | ||
pubKeyX: sessionPubX, | ||
pubKeyY: sessionPubY, | ||
finalImportedShares, | ||
overrideExistingKey | ||
}); | ||
for (let i = 0; i < commitmentRequestResult.length; i += 1) { | ||
const x = commitmentRequestResult[i]; | ||
if (!x || typeof x !== "object" || x.error) { | ||
@@ -275,265 +346,286 @@ continue; | ||
} | ||
// if user's account already | ||
const existingPubKey = thresholdSame(nodeSigs.map(x => x && x.pub_key_x), halfThreshold); | ||
// can only import shares if override existing key is allowed or for new non dkg registration | ||
const canImportedShares = overrideExistingKey || !useDkg && !existingPubKey; | ||
if (canImportedShares) { | ||
const proxyEndpointNum = getProxyCoordinatorEndpointIndex(endpoints, verifier, verifierParams.verifier_id); | ||
const items = []; | ||
for (let i = 0; i < endpoints.length; i += 1) { | ||
const x = responses[i]; | ||
if (!x || typeof x !== "object" || x.error) { | ||
continue; | ||
} | ||
const importedShare = finalImportedShares[i]; | ||
items.push(_objectSpread(_objectSpread({}, verifierParams), {}, { | ||
idtoken: idToken, | ||
nodesignatures: nodeSigs, | ||
verifieridentifier: verifier, | ||
pub_key_x: importedShare.oauth_pub_key_x, | ||
pub_key_y: importedShare.oauth_pub_key_y, | ||
signing_pub_key_x: importedShare.signing_pub_key_x, | ||
signing_pub_key_y: importedShare.signing_pub_key_y, | ||
encrypted_share: importedShare.encrypted_share, | ||
encrypted_share_metadata: importedShare.encrypted_share_metadata, | ||
node_index: importedShare.node_index, | ||
key_type: importedShare.key_type, | ||
nonce_data: importedShare.nonce_data, | ||
nonce_signature: importedShare.nonce_signature, | ||
sss_endpoint: endpoints[i] | ||
}, extraParams)); | ||
isExistingKey = !!thresholdSame(nodeSigs.map(x => x && x.pub_key_x), halfThreshold); | ||
} else if (!checkCommitment && finalImportedShares.length > 0) { | ||
// in case not allowed to override existing key for import request | ||
// check if key exists | ||
if (!overrideExistingKey) { | ||
var _keyLookupResult$erro, _keyLookupResult$keyR; | ||
const keyLookupResult = await VerifierLookupRequest({ | ||
endpoints, | ||
verifier, | ||
verifierId: verifierParams.verifier_id, | ||
keyType | ||
}); | ||
if (keyLookupResult.errorResult && !((_keyLookupResult$erro = keyLookupResult.errorResult) !== null && _keyLookupResult$erro !== void 0 && (_keyLookupResult$erro = _keyLookupResult$erro.data) !== null && _keyLookupResult$erro !== void 0 && _keyLookupResult$erro.includes("Verifier + VerifierID has not yet been assigned"))) { | ||
throw new Error(`node results do not match at first lookup ${JSON.stringify(keyLookupResult.keyResult || {})}, ${JSON.stringify(keyLookupResult.errorResult || {})}`); | ||
} | ||
const p = post(endpoints[proxyEndpointNum], generateJsonRPCObject(JRPC_METHODS.IMPORT_SHARES, { | ||
if (((_keyLookupResult$keyR = keyLookupResult.keyResult) === null || _keyLookupResult$keyR === void 0 || (_keyLookupResult$keyR = _keyLookupResult$keyR.keys) === null || _keyLookupResult$keyR === void 0 ? void 0 : _keyLookupResult$keyR.length) > 0) { | ||
isExistingKey = !!keyLookupResult.keyResult.keys[0]; | ||
} | ||
} | ||
} | ||
const promiseArrRequest = []; | ||
const canImportedShares = overrideExistingKey || !useDkg && !isExistingKey; | ||
if (canImportedShares) { | ||
const proxyEndpointNum = getProxyCoordinatorEndpointIndex(endpoints, verifier, verifierParams.verifier_id); | ||
const items = []; | ||
for (let i = 0; i < endpoints.length; i += 1) { | ||
const importedShare = finalImportedShares[i]; | ||
if (!importedShare) { | ||
throw new Error(`invalid imported share at index ${i}`); | ||
} | ||
items.push(_objectSpread(_objectSpread({}, verifierParams), {}, { | ||
idtoken: idToken, | ||
nodesignatures: nodeSigs, | ||
verifieridentifier: verifier, | ||
pub_key_x: importedShare.oauth_pub_key_x, | ||
pub_key_y: importedShare.oauth_pub_key_y, | ||
signing_pub_key_x: importedShare.signing_pub_key_x, | ||
signing_pub_key_y: importedShare.signing_pub_key_y, | ||
encrypted_share: importedShare.encrypted_share, | ||
encrypted_share_metadata: importedShare.encrypted_share_metadata, | ||
node_index: importedShare.node_index, | ||
key_type: importedShare.key_type, | ||
nonce_data: importedShare.nonce_data, | ||
nonce_signature: importedShare.nonce_signature, | ||
sss_endpoint: endpoints[i] | ||
}, extraParams)); | ||
} | ||
const p = post(endpoints[proxyEndpointNum], generateJsonRPCObject(JRPC_METHODS.IMPORT_SHARES, { | ||
encrypted: "yes", | ||
use_temp: true, | ||
verifieridentifier: verifier, | ||
temppubx: nodeSigs.length === 0 && !checkCommitment ? sessionPubX : "", | ||
// send session pub key x only if node signatures are not available (Ie. in non commitment flow) | ||
temppuby: nodeSigs.length === 0 && !checkCommitment ? sessionPubY : "", | ||
// send session pub key y only if node signatures are not available (Ie. in non commitment flow) | ||
item: items, | ||
key_type: keyType, | ||
one_key_flow: true | ||
}), {}, { | ||
logTracingHeader: config.logRequestTracing | ||
}).catch(err => log.error("share req", err)); | ||
promiseArrRequest.push(p); | ||
} else { | ||
for (let i = 0; i < endpoints.length; i += 1) { | ||
const p = post(endpoints[i], generateJsonRPCObject(JRPC_METHODS.GET_SHARE_OR_KEY_ASSIGN, { | ||
encrypted: "yes", | ||
use_temp: true, | ||
item: items, | ||
key_type: keyType, | ||
distributed_metadata: true, | ||
verifieridentifier: verifier, | ||
temppubx: nodeSigs.length === 0 && !checkCommitment ? sessionPubX : "", | ||
// send session pub key x only if node signatures are not available (Ie. in non commitment flow) | ||
temppuby: nodeSigs.length === 0 && !checkCommitment ? sessionPubY : "", | ||
// send session pub key y only if node signatures are not available (Ie. in non commitment flow) | ||
item: [_objectSpread(_objectSpread({}, verifierParams), {}, { | ||
idtoken: idToken, | ||
key_type: keyType, | ||
nodesignatures: nodeSigs, | ||
verifieridentifier: verifier | ||
}, extraParams)], | ||
client_time: Math.floor(Date.now() / 1000).toString(), | ||
one_key_flow: true | ||
}), {}, { | ||
logTracingHeader: config.logRequestTracing | ||
}).catch(err => log.error("share req", err)); | ||
}); | ||
promiseArrRequest.push(p); | ||
} else { | ||
for (let i = 0; i < endpoints.length; i += 1) { | ||
const x = responses[i]; | ||
if (!x || typeof x !== "object" || x.error) { | ||
continue; | ||
} | ||
const p = post(endpoints[i], generateJsonRPCObject(JRPC_METHODS.GET_SHARE_OR_KEY_ASSIGN, { | ||
encrypted: "yes", | ||
use_temp: true, | ||
key_type: keyType, | ||
distributed_metadata: true, | ||
item: [_objectSpread(_objectSpread({}, verifierParams), {}, { | ||
idtoken: idToken, | ||
key_type: keyType, | ||
nodesignatures: nodeSigs, | ||
verifieridentifier: verifier | ||
}, extraParams)], | ||
client_time: Math.floor(Date.now() / 1000).toString(), | ||
one_key_flow: true | ||
}), {}, { | ||
logTracingHeader: config.logRequestTracing | ||
}); | ||
promiseArrRequest.push(p); | ||
} | ||
} | ||
} | ||
return Some(promiseArrRequest, async (shareResponseResult, sharedState) => { | ||
let thresholdNonceData; | ||
return Some(promiseArrRequest, async (shareResponseResult, sharedState) => { | ||
let shareResponses = []; | ||
// for import shares case, where result is an array | ||
if (shareResponseResult.length === 1 && shareResponseResult[0] && Array.isArray(shareResponseResult[0].result)) { | ||
// this is for import shares | ||
const importedSharesResult = shareResponseResult[0]; | ||
shareResponseResult[0].result.forEach(res => { | ||
shareResponses.push({ | ||
id: importedSharesResult.id, | ||
jsonrpc: "2.0", | ||
result: res, | ||
error: importedSharesResult.error | ||
}); | ||
let shareResponses = []; | ||
// for import shares case, where result is an array | ||
if (shareResponseResult.length === 1 && shareResponseResult[0] && Array.isArray(shareResponseResult[0].result)) { | ||
// this is for import shares | ||
const importedSharesResult = shareResponseResult[0]; | ||
shareResponseResult[0].result.forEach(res => { | ||
shareResponses.push({ | ||
id: importedSharesResult.id, | ||
jsonrpc: "2.0", | ||
result: res, | ||
error: importedSharesResult.error | ||
}); | ||
} else { | ||
shareResponses = shareResponseResult; | ||
}); | ||
} else { | ||
shareResponses = shareResponseResult; | ||
} | ||
// check if threshold number of nodes have returned the same user public key | ||
const completedRequests = shareResponses.filter(x => { | ||
if (!x || typeof x !== "object") { | ||
return false; | ||
} | ||
// check if threshold number of nodes have returned the same user public key | ||
const completedRequests = shareResponses.filter(x => { | ||
if (!x || typeof x !== "object") { | ||
return false; | ||
if (x.error) { | ||
return false; | ||
} | ||
return true; | ||
}); | ||
const pubkeys = shareResponses.map(x => { | ||
if (x && x.result && x.result.keys[0].public_key) { | ||
return x.result.keys[0].public_key; | ||
} | ||
return undefined; | ||
}); | ||
const thresholdPublicKey = thresholdSame(pubkeys, halfThreshold); | ||
if (!thresholdPublicKey) { | ||
throw new Error("invalid result from nodes, threshold number of public key results are not matching"); | ||
} | ||
shareResponses.forEach(x => { | ||
const requiredShareResponse = x && x.result && x.result.keys[0].public_key && x.result.keys[0]; | ||
if (requiredShareResponse && !thresholdNonceData && !verifierParams.extended_verifier_id) { | ||
var _requiredShareRespons; | ||
const currentPubKey = requiredShareResponse.public_key; | ||
const pubNonce = (_requiredShareRespons = requiredShareResponse.nonce_data) === null || _requiredShareRespons === void 0 || (_requiredShareRespons = _requiredShareRespons.pubNonce) === null || _requiredShareRespons === void 0 ? void 0 : _requiredShareRespons.x; | ||
if (pubNonce && currentPubKey.X === thresholdPublicKey.X) { | ||
thresholdNonceData = requiredShareResponse.nonce_data; | ||
} | ||
if (x.error) { | ||
return false; | ||
} | ||
return true; | ||
}); | ||
const pubkeys = shareResponses.map(x => { | ||
if (x && x.result && x.result.keys[0].public_key) { | ||
return x.result.keys[0].public_key; | ||
} | ||
return undefined; | ||
}); | ||
const thresholdPublicKey = thresholdSame(pubkeys, halfThreshold); | ||
if (!thresholdPublicKey) { | ||
throw new Error("invalid result from nodes, threshold number of public key results are not matching"); | ||
} | ||
shareResponses.forEach(x => { | ||
const requiredShareResponse = x && x.result && x.result.keys[0].public_key && x.result.keys[0]; | ||
if (requiredShareResponse && !thresholdNonceData && !verifierParams.extended_verifier_id) { | ||
var _requiredShareRespons; | ||
const currentPubKey = requiredShareResponse.public_key; | ||
const pubNonce = (_requiredShareRespons = requiredShareResponse.nonce_data) === null || _requiredShareRespons === void 0 || (_requiredShareRespons = _requiredShareRespons.pubNonce) === null || _requiredShareRespons === void 0 ? void 0 : _requiredShareRespons.x; | ||
if (pubNonce && currentPubKey.X === thresholdPublicKey.X) { | ||
thresholdNonceData = requiredShareResponse.nonce_data; | ||
}); | ||
const thresholdReqCount = canImportedShares ? endpoints.length : halfThreshold; | ||
// optimistically run lagrange interpolation once threshold number of shares have been received | ||
// this is matched against the user public key to ensure that shares are consistent | ||
// Note: no need of thresholdMetadataNonce for extended_verifier_id key | ||
if (completedRequests.length >= thresholdReqCount && thresholdPublicKey) { | ||
const sharePromises = []; | ||
const sessionTokenSigPromises = []; | ||
const sessionTokenPromises = []; | ||
const nodeIndexes = []; | ||
const sessionTokenData = []; | ||
const isNewKeyResponses = []; | ||
const serverTimeOffsetResponses = []; | ||
for (let i = 0; i < completedRequests.length; i += 1) { | ||
var _currentShareResponse; | ||
const currentShareResponse = completedRequests[i]; | ||
const { | ||
session_tokens: sessionTokens, | ||
session_token_metadata: sessionTokenMetadata, | ||
session_token_sigs: sessionTokenSigs, | ||
session_token_sig_metadata: sessionTokenSigMetadata, | ||
keys, | ||
is_new_key: isNewKey, | ||
server_time_offset: serverTimeOffsetResponse | ||
} = currentShareResponse.result; | ||
isNewKeyResponses.push({ | ||
isNewKey, | ||
publicKey: ((_currentShareResponse = currentShareResponse.result) === null || _currentShareResponse === void 0 || (_currentShareResponse = _currentShareResponse.keys[0]) === null || _currentShareResponse === void 0 || (_currentShareResponse = _currentShareResponse.public_key) === null || _currentShareResponse === void 0 ? void 0 : _currentShareResponse.X) || "" | ||
}); | ||
serverTimeOffsetResponses.push(serverTimeOffsetResponse || "0"); | ||
if ((sessionTokenSigs === null || sessionTokenSigs === void 0 ? void 0 : sessionTokenSigs.length) > 0) { | ||
var _sessionTokenSigMetad; | ||
// decrypt sessionSig if enc metadata is sent | ||
if (sessionTokenSigMetadata && (_sessionTokenSigMetad = sessionTokenSigMetadata[0]) !== null && _sessionTokenSigMetad !== void 0 && _sessionTokenSigMetad.ephemPublicKey) { | ||
sessionTokenSigPromises.push(decryptNodeData(sessionTokenSigMetadata[0], sessionTokenSigs[0], sessionAuthKey).catch(err => log.error("session sig decryption", err))); | ||
} else { | ||
sessionTokenSigPromises.push(Promise.resolve(Buffer.from(sessionTokenSigs[0], "hex"))); | ||
} | ||
} else { | ||
sessionTokenSigPromises.push(Promise.resolve(undefined)); | ||
} | ||
}); | ||
const thresholdReqCount = canImportedShares ? endpoints.length : halfThreshold; | ||
// optimistically run lagrange interpolation once threshold number of shares have been received | ||
// this is matched against the user public key to ensure that shares are consistent | ||
// Note: no need of thresholdMetadataNonce for extended_verifier_id key | ||
if (completedRequests.length >= thresholdReqCount && thresholdPublicKey) { | ||
const sharePromises = []; | ||
const sessionTokenSigPromises = []; | ||
const sessionTokenPromises = []; | ||
const nodeIndexes = []; | ||
const sessionTokenData = []; | ||
const isNewKeyResponses = []; | ||
const serverTimeOffsetResponses = []; | ||
for (let i = 0; i < completedRequests.length; i += 1) { | ||
var _currentShareResponse; | ||
const currentShareResponse = completedRequests[i]; | ||
const { | ||
session_tokens: sessionTokens, | ||
session_token_metadata: sessionTokenMetadata, | ||
session_token_sigs: sessionTokenSigs, | ||
session_token_sig_metadata: sessionTokenSigMetadata, | ||
keys, | ||
is_new_key: isNewKey, | ||
server_time_offset: serverTimeOffsetResponse | ||
} = currentShareResponse.result; | ||
isNewKeyResponses.push({ | ||
isNewKey, | ||
publicKey: ((_currentShareResponse = currentShareResponse.result) === null || _currentShareResponse === void 0 || (_currentShareResponse = _currentShareResponse.keys[0]) === null || _currentShareResponse === void 0 || (_currentShareResponse = _currentShareResponse.public_key) === null || _currentShareResponse === void 0 ? void 0 : _currentShareResponse.X) || "" | ||
}); | ||
serverTimeOffsetResponses.push(serverTimeOffsetResponse || "0"); | ||
if ((sessionTokenSigs === null || sessionTokenSigs === void 0 ? void 0 : sessionTokenSigs.length) > 0) { | ||
var _sessionTokenSigMetad; | ||
// decrypt sessionSig if enc metadata is sent | ||
if (sessionTokenSigMetadata && (_sessionTokenSigMetad = sessionTokenSigMetadata[0]) !== null && _sessionTokenSigMetad !== void 0 && _sessionTokenSigMetad.ephemPublicKey) { | ||
sessionTokenSigPromises.push(decryptNodeData(sessionTokenSigMetadata[0], sessionTokenSigs[0], sessionAuthKey).catch(err => log.error("session sig decryption", err))); | ||
} else { | ||
sessionTokenSigPromises.push(Promise.resolve(Buffer.from(sessionTokenSigs[0], "hex"))); | ||
} | ||
if ((sessionTokens === null || sessionTokens === void 0 ? void 0 : sessionTokens.length) > 0) { | ||
var _sessionTokenMetadata; | ||
// decrypt session token if enc metadata is sent | ||
if (sessionTokenMetadata && (_sessionTokenMetadata = sessionTokenMetadata[0]) !== null && _sessionTokenMetadata !== void 0 && _sessionTokenMetadata.ephemPublicKey) { | ||
sessionTokenPromises.push(decryptNodeData(sessionTokenMetadata[0], sessionTokens[0], sessionAuthKey).catch(err => log.error("session token sig decryption", err))); | ||
} else { | ||
sessionTokenSigPromises.push(Promise.resolve(undefined)); | ||
sessionTokenPromises.push(Promise.resolve(Buffer.from(sessionTokens[0], "base64"))); | ||
} | ||
if ((sessionTokens === null || sessionTokens === void 0 ? void 0 : sessionTokens.length) > 0) { | ||
var _sessionTokenMetadata; | ||
// decrypt session token if enc metadata is sent | ||
if (sessionTokenMetadata && (_sessionTokenMetadata = sessionTokenMetadata[0]) !== null && _sessionTokenMetadata !== void 0 && _sessionTokenMetadata.ephemPublicKey) { | ||
sessionTokenPromises.push(decryptNodeData(sessionTokenMetadata[0], sessionTokens[0], sessionAuthKey).catch(err => log.error("session token sig decryption", err))); | ||
} else { | ||
sessionTokenPromises.push(Promise.resolve(Buffer.from(sessionTokens[0], "base64"))); | ||
} | ||
} else { | ||
sessionTokenPromises.push(Promise.resolve(undefined)); | ||
} | ||
if ((keys === null || keys === void 0 ? void 0 : keys.length) > 0) { | ||
const latestKey = currentShareResponse.result.keys[0]; | ||
nodeIndexes.push(new BN(latestKey.node_index)); | ||
if (latestKey.share_metadata) { | ||
sharePromises.push(decryptNodeDataWithPadding(latestKey.share_metadata, Buffer.from(latestKey.share, "base64").toString("binary"), sessionAuthKey).catch(err => log.error("share decryption", err))); | ||
} | ||
} else { | ||
nodeIndexes.push(undefined); | ||
sharePromises.push(Promise.resolve(undefined)); | ||
} | ||
} else { | ||
sessionTokenPromises.push(Promise.resolve(undefined)); | ||
} | ||
const allPromises = await Promise.all(sharePromises.concat(sessionTokenSigPromises).concat(sessionTokenPromises)); | ||
const sharesResolved = allPromises.slice(0, sharePromises.length); | ||
const sessionSigsResolved = allPromises.slice(sharePromises.length, sharePromises.length + sessionTokenSigPromises.length); | ||
const sessionTokensResolved = allPromises.slice(sharePromises.length + sessionTokenSigPromises.length, allPromises.length); | ||
const validSigs = sessionSigsResolved.filter(sig => { | ||
if (sig) { | ||
return true; | ||
if ((keys === null || keys === void 0 ? void 0 : keys.length) > 0) { | ||
const latestKey = currentShareResponse.result.keys[0]; | ||
nodeIndexes.push(new BN(latestKey.node_index)); | ||
if (latestKey.share_metadata) { | ||
sharePromises.push(decryptNodeDataWithPadding(latestKey.share_metadata, Buffer.from(latestKey.share, "base64").toString("binary"), sessionAuthKey).catch(err => log.error("share decryption", err))); | ||
} | ||
return false; | ||
}); | ||
if (!verifierParams.extended_verifier_id && validSigs.length < halfThreshold) { | ||
throw new Error(`Insufficient number of signatures from nodes, required: ${halfThreshold}, found: ${validSigs.length}`); | ||
} else { | ||
nodeIndexes.push(undefined); | ||
sharePromises.push(Promise.resolve(undefined)); | ||
} | ||
const validTokens = sessionTokensResolved.filter(token => { | ||
if (token) { | ||
return true; | ||
} | ||
return false; | ||
} | ||
const allPromises = await Promise.all(sharePromises.concat(sessionTokenSigPromises).concat(sessionTokenPromises)); | ||
const sharesResolved = allPromises.slice(0, sharePromises.length); | ||
const sessionSigsResolved = allPromises.slice(sharePromises.length, sharePromises.length + sessionTokenSigPromises.length); | ||
const sessionTokensResolved = allPromises.slice(sharePromises.length + sessionTokenSigPromises.length, allPromises.length); | ||
const validSigs = sessionSigsResolved.filter(sig => { | ||
if (sig) { | ||
return true; | ||
} | ||
return false; | ||
}); | ||
if (!verifierParams.extended_verifier_id && validSigs.length < halfThreshold) { | ||
throw new Error(`Insufficient number of signatures from nodes, required: ${halfThreshold}, found: ${validSigs.length}`); | ||
} | ||
const validTokens = sessionTokensResolved.filter(token => { | ||
if (token) { | ||
return true; | ||
} | ||
return false; | ||
}); | ||
if (!verifierParams.extended_verifier_id && validTokens.length < halfThreshold) { | ||
throw new Error(`Insufficient number of session tokens from nodes, required: ${halfThreshold}, found: ${validTokens.length}`); | ||
} | ||
sessionTokensResolved.forEach((x, index) => { | ||
if (!x || !sessionSigsResolved[index]) sessionTokenData.push(undefined);else sessionTokenData.push({ | ||
token: x.toString("base64"), | ||
signature: sessionSigsResolved[index].toString("hex"), | ||
node_pubx: completedRequests[index].result.node_pubx, | ||
node_puby: completedRequests[index].result.node_puby | ||
}); | ||
if (!verifierParams.extended_verifier_id && validTokens.length < halfThreshold) { | ||
throw new Error(`Insufficient number of session tokens from nodes, required: ${halfThreshold}, found: ${validTokens.length}`); | ||
} | ||
sessionTokensResolved.forEach((x, index) => { | ||
if (!x || !sessionSigsResolved[index]) sessionTokenData.push(undefined);else sessionTokenData.push({ | ||
token: x.toString("base64"), | ||
signature: sessionSigsResolved[index].toString("hex"), | ||
node_pubx: completedRequests[index].result.node_pubx, | ||
node_puby: completedRequests[index].result.node_puby | ||
}); | ||
if (sharedState.resolved) return undefined; | ||
const decryptedShares = sharesResolved.reduce((acc, curr, index) => { | ||
if (curr) { | ||
acc.push({ | ||
index: nodeIndexes[index], | ||
value: new BN(curr) | ||
}); | ||
}); | ||
if (sharedState.resolved) return undefined; | ||
const decryptedShares = sharesResolved.reduce((acc, curr, index) => { | ||
if (curr) { | ||
acc.push({ | ||
index: nodeIndexes[index], | ||
value: new BN(curr) | ||
}); | ||
} | ||
return acc; | ||
}, []); | ||
// run lagrange interpolation on all subsets, faster in the optimistic scenario than berlekamp-welch due to early exit | ||
const allCombis = kCombinations(decryptedShares.length, halfThreshold); | ||
let privateKey = null; | ||
for (let j = 0; j < allCombis.length; j += 1) { | ||
const currentCombi = allCombis[j]; | ||
const currentCombiShares = decryptedShares.filter((_, index) => currentCombi.includes(index)); | ||
const shares = currentCombiShares.map(x => x.value); | ||
const indices = currentCombiShares.map(x => x.index); | ||
const derivedPrivateKey = lagrangeInterpolation(ecCurve, shares, indices); | ||
if (!derivedPrivateKey) continue; | ||
const decryptedPubKey = derivePubKey(ecCurve, derivedPrivateKey); | ||
const decryptedPubKeyX = decryptedPubKey.getX(); | ||
const decryptedPubKeyY = decryptedPubKey.getY(); | ||
if (decryptedPubKeyX.cmp(new BN(thresholdPublicKey.X, 16)) === 0 && decryptedPubKeyY.cmp(new BN(thresholdPublicKey.Y, 16)) === 0) { | ||
privateKey = derivedPrivateKey; | ||
break; | ||
} | ||
} | ||
if (privateKey === undefined || privateKey === null) { | ||
throw new Error("could not derive private key"); | ||
return acc; | ||
}, []); | ||
// run lagrange interpolation on all subsets, faster in the optimistic scenario than berlekamp-welch due to early exit | ||
const allCombis = kCombinations(decryptedShares.length, halfThreshold); | ||
let privateKey = null; | ||
for (let j = 0; j < allCombis.length; j += 1) { | ||
const currentCombi = allCombis[j]; | ||
const currentCombiShares = decryptedShares.filter((_, index) => currentCombi.includes(index)); | ||
const shares = currentCombiShares.map(x => x.value); | ||
const indices = currentCombiShares.map(x => x.index); | ||
const derivedPrivateKey = lagrangeInterpolation(ecCurve, shares, indices); | ||
if (!derivedPrivateKey) continue; | ||
const decryptedPubKey = derivePubKey(ecCurve, derivedPrivateKey); | ||
const decryptedPubKeyX = decryptedPubKey.getX(); | ||
const decryptedPubKeyY = decryptedPubKey.getY(); | ||
if (decryptedPubKeyX.cmp(new BN(thresholdPublicKey.X, 16)) === 0 && decryptedPubKeyY.cmp(new BN(thresholdPublicKey.Y, 16)) === 0) { | ||
privateKey = derivedPrivateKey; | ||
break; | ||
} | ||
let isNewKey = false; | ||
isNewKeyResponses.forEach(x => { | ||
if (x.isNewKey === "true" && x.publicKey.toLowerCase() === thresholdPublicKey.X.toLowerCase()) { | ||
isNewKey = true; | ||
} | ||
}); | ||
// Convert each string timestamp to a number | ||
const serverOffsetTimes = serverTimeOffsetResponses.map(timestamp => Number.parseInt(timestamp, 10)); | ||
return { | ||
privateKey, | ||
sessionTokenData, | ||
thresholdNonceData, | ||
nodeIndexes, | ||
thresholdPubKey: thresholdPublicKey, | ||
isNewKey, | ||
serverTimeOffsetResponse: serverTimeOffset || calculateMedian(serverOffsetTimes) | ||
}; | ||
} | ||
if (completedRequests.length < thresholdReqCount) { | ||
throw new Error(`Waiting for results from more nodes, pending: ${thresholdReqCount - completedRequests.length}`); | ||
if (privateKey === undefined || privateKey === null) { | ||
throw new Error("could not derive private key"); | ||
} | ||
throw new Error(`Invalid results, threshold pub key: ${thresholdPublicKey}, nonce data found: ${!!thresholdNonceData}, extended verifierId: ${verifierParams.extended_verifier_id}`); | ||
}); | ||
let isNewKey = false; | ||
isNewKeyResponses.forEach(x => { | ||
if (x.isNewKey === "true" && x.publicKey.toLowerCase() === thresholdPublicKey.X.toLowerCase()) { | ||
isNewKey = true; | ||
} | ||
}); | ||
// Convert each string timestamp to a number | ||
const serverOffsetTimes = serverTimeOffsetResponses.map(timestamp => Number.parseInt(timestamp, 10)); | ||
return { | ||
privateKey, | ||
sessionTokenData, | ||
thresholdNonceData, | ||
nodeIndexes, | ||
thresholdPubKey: thresholdPublicKey, | ||
isNewKey, | ||
serverTimeOffsetResponse: serverTimeOffset || calculateMedian(serverOffsetTimes) | ||
}; | ||
} | ||
if (completedRequests.length < thresholdReqCount) { | ||
throw new Error(`Waiting for results from more nodes, pending: ${thresholdReqCount - completedRequests.length}`); | ||
} | ||
throw new Error(`Invalid results, threshold pub key: ${thresholdPublicKey}, nonce data found: ${!!thresholdNonceData}, extended verifierId: ${verifierParams.extended_verifier_id}`); | ||
}).then(async res => { | ||
@@ -719,2 +811,2 @@ var _nonceResult; | ||
export { GetPubKeyOrKeyAssign, retrieveOrImportShare }; | ||
export { GetPubKeyOrKeyAssign, VerifierLookupRequest, retrieveOrImportShare }; |
@@ -6,3 +6,3 @@ export { JRPC_METHODS, SAPPHIRE_DEVNET_METADATA_URL, SAPPHIRE_METADATA_URL } from './constants.js'; | ||
export { default as Torus } from './torus.js'; | ||
export { calculateMedian, encParamsBufToHex, encParamsHexToBuf, generatePrivateKey, getKeyCurve, getProxyCoordinatorEndpointIndex, kCombinations, keccak256, normalizeKeysResult, retryCommitment, thresholdSame, waitFor } from './helpers/common.js'; | ||
export { calculateMedian, encParamsBufToHex, encParamsHexToBuf, generatePrivateKey, getKeyCurve, getProxyCoordinatorEndpointIndex, kCombinations, keccak256, normalizeKeysResult, normalizeLookUpResult, retryCommitment, thresholdSame, waitFor } from './helpers/common.js'; | ||
export { GetOrSetNonceError } from './helpers/errorUtils.js'; | ||
@@ -12,2 +12,2 @@ export { derivePubKey, encodeEd25519Point, generateAddressFromPrivKey, generateAddressFromPubKey, generateEd25519KeyData, generateSecp256k1KeyData, generateShares, getEd25519ExtendedPublicKey, getEncryptionEC, getPostboxKeyFrom1OutOf1, stripHexPrefix, toChecksumAddress } from './helpers/keyUtils.js'; | ||
export { convertMetadataToNonce, decryptNodeData, decryptNodeDataWithPadding, decryptSeedData, generateMetadataParams, generateNonceMetadataParams, getMetadata, getNonce, getOrSetNonce, getOrSetSapphireMetadataNonce, getSecpKeyFromEd25519 } from './helpers/metadataUtils.js'; | ||
export { GetPubKeyOrKeyAssign, retrieveOrImportShare } from './helpers/nodeUtils.js'; | ||
export { GetPubKeyOrKeyAssign, VerifierLookupRequest, retrieveOrImportShare } from './helpers/nodeUtils.js'; |
@@ -81,3 +81,4 @@ import _defineProperty from '@babel/runtime/helpers/defineProperty'; | ||
useDkg, | ||
extraParams = {} | ||
extraParams = {}, | ||
checkCommitment = true | ||
} = params; | ||
@@ -130,3 +131,4 @@ if (nodePubkeys.length === 0) { | ||
nodePubkeys, | ||
extraParams | ||
extraParams, | ||
checkCommitment | ||
}); | ||
@@ -159,3 +161,4 @@ } | ||
endpoints, | ||
extraParams = {} | ||
extraParams = {}, | ||
checkCommitment = true | ||
} = params; | ||
@@ -213,3 +216,4 @@ if (LEGACY_NETWORKS_ROUTE_MAP[this.network]) { | ||
nodePubkeys, | ||
extraParams | ||
extraParams, | ||
checkCommitment | ||
}); | ||
@@ -216,0 +220,0 @@ } |
export declare const JRPC_METHODS: { | ||
GET_OR_SET_KEY: string; | ||
VERIFIER_LOOKUP: string; | ||
COMMITMENT_REQUEST: string; | ||
@@ -4,0 +5,0 @@ IMPORT_SHARES: string; |
import { JRPCResponse } from "@toruslabs/constants"; | ||
import { Ecies } from "@toruslabs/eccrypto"; | ||
import { ec as EC } from "elliptic"; | ||
import { CommitmentRequestResult, EciesHex, KeyType, VerifierLookupResponse } from "../interfaces"; | ||
import { CommitmentRequestResult, EciesHex, GetORSetKeyResponse, KeyType, VerifierLookupResponse } from "../interfaces"; | ||
export declare function keccak256(a: Buffer): string; | ||
export declare const generatePrivateKey: (ecCurve: EC, buf: typeof Buffer) => Buffer; | ||
export declare const getKeyCurve: (keyType: KeyType) => EC; | ||
export declare const normalizeKeysResult: (result: VerifierLookupResponse) => Pick<VerifierLookupResponse, "keys" | "is_new_key">; | ||
export declare const normalizeKeysResult: (result: GetORSetKeyResponse) => Pick<GetORSetKeyResponse, "keys" | "is_new_key">; | ||
export declare const normalizeLookUpResult: (result: VerifierLookupResponse) => Pick<VerifierLookupResponse, "keys">; | ||
export declare const kCombinations: (s: number | number[], k: number) => number[][]; | ||
@@ -10,0 +11,0 @@ export declare const thresholdSame: <T>(arr: T[], t: number) => T | undefined; |
import { INodePub, TORUS_NETWORK_TYPE } from "@toruslabs/constants"; | ||
import { ec } from "elliptic"; | ||
import { ImportedShare, KeyLookupResult, KeyType, TorusKey, VerifierParams } from "../interfaces"; | ||
import { ImportedShare, KeyLookupResult, KeyType, TorusKey, VerifierLookupResult, VerifierParams } from "../interfaces"; | ||
import { TorusUtilsExtraParams } from "../TorusUtilsExtraParams"; | ||
@@ -13,2 +13,8 @@ export declare const GetPubKeyOrKeyAssign: (params: { | ||
}) => Promise<KeyLookupResult>; | ||
export declare const VerifierLookupRequest: (params: { | ||
endpoints: string[]; | ||
verifier: string; | ||
verifierId: string; | ||
keyType: KeyType; | ||
}) => Promise<VerifierLookupResult>; | ||
export declare function retrieveOrImportShare(params: { | ||
@@ -31,4 +37,5 @@ legacyMetadataHost: string; | ||
nodePubkeys: INodePub[]; | ||
extraParams: TorusUtilsExtraParams; | ||
newImportedShares?: ImportedShare[]; | ||
extraParams: TorusUtilsExtraParams; | ||
checkCommitment?: boolean; | ||
}): Promise<TorusKey>; |
@@ -62,3 +62,3 @@ import type { INodePub, TORUS_NETWORK_TYPE } from "@toruslabs/constants"; | ||
} | ||
export interface VerifierLookupResponse { | ||
export interface GetORSetKeyResponse { | ||
keys: { | ||
@@ -75,2 +75,12 @@ pub_key_X: string; | ||
} | ||
export interface VerifierLookupResponse { | ||
keys: { | ||
pub_key_X: string; | ||
pub_key_Y: string; | ||
signing_pub_key_X?: string; | ||
signing_pub_key_Y?: string; | ||
address: string; | ||
}[]; | ||
server_time_offset?: string; | ||
} | ||
export interface CommitmentRequestResult { | ||
@@ -95,8 +105,13 @@ signature: string; | ||
export interface KeyLookupResult { | ||
keyResult: Pick<VerifierLookupResponse, "keys" | "is_new_key">; | ||
keyResult: Pick<GetORSetKeyResponse, "keys" | "is_new_key">; | ||
nodeIndexes: number[]; | ||
serverTimeOffset: number; | ||
errorResult: JRPCResponse<VerifierLookupResponse>["error"]; | ||
errorResult: JRPCResponse<GetORSetKeyResponse>["error"]; | ||
nonceResult?: GetOrSetNonceResult; | ||
} | ||
export interface VerifierLookupResult { | ||
keyResult: Pick<VerifierLookupResponse, "keys">; | ||
serverTimeOffset: number; | ||
errorResult: JRPCResponse<VerifierLookupResponse>["error"]; | ||
} | ||
export type EciesHex = { | ||
@@ -255,2 +270,3 @@ [key in keyof Ecies]: string; | ||
extraParams?: TorusUtilsExtraParams; | ||
checkCommitment?: boolean; | ||
} | ||
@@ -266,2 +282,3 @@ export interface RetrieveSharesParams { | ||
useDkg?: boolean; | ||
checkCommitment?: boolean; | ||
} |
{ | ||
"name": "@toruslabs/torus.js", | ||
"version": "15.0.5", | ||
"version": "15.1.0-0", | ||
"description": "Handle communication with torus nodes", | ||
@@ -5,0 +5,0 @@ "main": "dist/lib.cjs/index.js", |
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is too big to display
No v1
QualityPackage is not semver >=1. This means it is not stable and does not support ^ ranges.
Found 1 instance in 1 package
656676
9437
1