@algolia/transporter
Advanced tools
Comparing version 4.0.0-beta.9 to 4.0.0-beta.10
@@ -5,28 +5,97 @@ 'use strict'; | ||
// If an host is down, it will remain down for 5 mins. | ||
const DOWNTIME = 5 * 60 * 1000; | ||
function createHost(options) { | ||
const host = { | ||
function createMappedRequestOptions(requestOptions, timeout) { | ||
const options = requestOptions || {}; | ||
// eslint-disable-next-line functional/prefer-readonly-type | ||
const data = {}; | ||
Object.keys(options).forEach(key => { | ||
if (['timeout', 'headers', 'queryParameters', 'data', 'cacheable'].indexOf(key) === -1) { | ||
data[key] = options[key]; // eslint-disable-line functional/immutable-data | ||
} | ||
}); | ||
return { | ||
data, | ||
timeout: options.timeout || timeout, | ||
headers: options.headers || {}, | ||
queryParameters: options.queryParameters || {}, | ||
cacheable: options.cacheable, | ||
}; | ||
} | ||
const CallEnum = { | ||
Read: 1, | ||
Write: 2, | ||
Any: 3, | ||
}; | ||
const HostStatusEnum = { | ||
Up: 1, | ||
Down: 2, | ||
Timeouted: 3, | ||
}; | ||
// By default, API Clients at Algolia have expiration delay | ||
// of 5 mins. In the JavaScript client, we have 2 mins. | ||
const EXPIRATION_DELAY = 2 * 60 * 1000; | ||
function createStatefulHost(host, status = HostStatusEnum.Up) { | ||
return { | ||
...host, | ||
status, | ||
lastUpdate: Date.now(), | ||
}; | ||
} | ||
function isStatefulHostUp(host) { | ||
return host.status === HostStatusEnum.Up || Date.now() - host.lastUpdate > EXPIRATION_DELAY; | ||
} | ||
function isStatefulHostTimeouted(host) { | ||
return (host.status === HostStatusEnum.Timeouted && Date.now() - host.lastUpdate <= EXPIRATION_DELAY); | ||
} | ||
function createStatelessHost(options) { | ||
return { | ||
protocol: options.protocol || 'https', | ||
url: options.url, | ||
accept: options.accept, | ||
downDate: 0, | ||
up: true, | ||
setAsDown() { | ||
// eslint-disable-next-line functional/immutable-data | ||
host.downDate = Date.now(); | ||
// eslint-disable-next-line functional/immutable-data | ||
host.up = false; | ||
}, | ||
isUp() { | ||
if (!host.up && Date.now() - host.downDate > DOWNTIME) { | ||
// eslint-disable-next-line functional/immutable-data | ||
host.up = true; | ||
} | ||
return host.up; | ||
}, | ||
}; | ||
return host; | ||
} | ||
function createRetryableOptions(hostsCache, statelessHosts | ||
// eslint-disable-next-line functional/prefer-readonly-type | ||
) { | ||
return Promise.all(statelessHosts.map(statelessHost => { | ||
return hostsCache.get(statelessHost, () => { | ||
return Promise.resolve(createStatefulHost(statelessHost)); | ||
}); | ||
})).then(statefulHosts => { | ||
const hostsUp = statefulHosts.filter(host => isStatefulHostUp(host)); | ||
const hostsTimeouted = statefulHosts.filter(host => isStatefulHostTimeouted(host)); | ||
/** | ||
* Note, we put the hosts that previously timeouted on the end of the list. | ||
*/ | ||
const hostsAvailable = [...hostsUp, ...hostsTimeouted]; | ||
const statelessHostsAvailable = hostsAvailable.length > 0 | ||
? hostsAvailable.map(host => createStatelessHost(host)) | ||
: statelessHosts; | ||
return { | ||
getTimeout(timeoutsCount, baseTimeout) { | ||
/** | ||
* Imagine that you have 4 hosts, if timeouts will increase | ||
* on the following way: 1 (timeouted) > 4 (timeouted) > 5 (200) | ||
* | ||
* Note that, the very next request, we start from the previous timeout | ||
* | ||
* 6 (timeouted) > 7 (timeouted) > 8 ... | ||
* | ||
* This strategy may need to be reviewed, but is the strategy on the our | ||
* current v3 version. | ||
*/ | ||
const timeoutMultiplier = hostsTimeouted.length === 0 && timeoutsCount === 0 | ||
? 1 | ||
: hostsTimeouted.length + 3 + timeoutsCount; | ||
return timeoutMultiplier * baseTimeout; | ||
}, | ||
statelessHosts: statelessHostsAvailable, | ||
}; | ||
}); | ||
} | ||
const isNetworkError = ({ isTimedOut, status }) => { | ||
@@ -43,7 +112,4 @@ return !isTimedOut && ~~status === 0; | ||
}; | ||
const decision = (host, response, outcomes) => { | ||
const retryDecision = (response, outcomes) => { | ||
if (isRetryable(response)) { | ||
if (!response.isTimedOut) { | ||
host.setAsDown(); | ||
} | ||
return outcomes.onRetry(response); | ||
@@ -57,31 +123,25 @@ } | ||
function getAvailableHosts(hostsCache, statelessHosts | ||
/* eslint-disable functional/prefer-readonly-type */ | ||
) { | ||
return Promise.all(statelessHosts.map(host => hostsCache | ||
.get({ url: host.url }, () => Promise.resolve(host)) | ||
.then((hit) => { | ||
// eslint-disable-next-line functional/immutable-data | ||
return Object.assign(host, { | ||
downDate: hit.downDate, | ||
up: hit.up, | ||
}); | ||
}))).then(statefulHosts => statefulHosts.filter(host => host.isUp()).reverse()); | ||
} | ||
function execute(transporter, statelessHosts, request, requestOptions) { | ||
/* eslint-disable functional/prefer-readonly-type */ | ||
const stackTrace = []; | ||
// eslint-disable-next-line functional/no-let | ||
let timeoutRetries = 0; | ||
function retryableRequest(transporter, statelessHosts, request, requestOptions) { | ||
const stackTrace = []; // eslint-disable-line functional/prefer-readonly-type | ||
/** | ||
* First we prepare the payload that do not depend from hosts. | ||
*/ | ||
const data = serializeData(request, requestOptions); | ||
const headers = { ...transporter.headers, ...requestOptions.headers }; | ||
const method = request.method; | ||
const retry = (hosts) => { | ||
// eslint-disable-next-line functional/immutable-data | ||
const host = hosts.pop(); | ||
const queryParameters = { | ||
...transporter.queryParameters, | ||
...requestOptions.queryParameters, | ||
'x-algolia-agent': transporter.userAgent.value, | ||
}; | ||
let timeoutsCount = 0; // eslint-disable-line functional/no-let | ||
const retry = (hosts, // eslint-disable-line functional/prefer-readonly-type | ||
getTimeout) => { | ||
/** | ||
* We iterate on each host, until there is no host left. | ||
*/ | ||
const host = hosts.pop(); // eslint-disable-line functional/immutable-data | ||
if (host === undefined) { | ||
throw createRetryError(stackTrace); | ||
} | ||
const timeoutAdjuster = timeoutRetries + 1; | ||
const payload = { | ||
@@ -91,9 +151,5 @@ data, | ||
method, | ||
url: serializeUrl(host, request.path, { | ||
...transporter.queryParameters, | ||
...requestOptions.queryParameters, | ||
'x-algolia-agent': transporter.userAgent.value, | ||
}), | ||
connectTimeout: timeoutAdjuster * transporter.timeouts.connect, | ||
socketTimeout: timeoutAdjuster * (requestOptions.timeout || 0), | ||
url: serializeUrl(host, request.path, queryParameters), | ||
connectTimeout: getTimeout(timeoutsCount, transporter.timeouts.connect), | ||
responseTimeout: getTimeout(timeoutsCount, requestOptions.timeout), | ||
}; | ||
@@ -108,13 +164,30 @@ const decisions = { | ||
triesLeft: hosts.length, | ||
timeoutRetries, | ||
}; | ||
// eslint-disable-next-line functional/immutable-data | ||
stackTrace.push(stackFrame); | ||
/** | ||
* The stackFrace is pushed to the stackTrace so we | ||
* can have information about the failures once a | ||
* retry error is thrown. | ||
*/ | ||
stackTrace.push(stackFrame); // eslint-disable-line functional/immutable-data | ||
/** | ||
* If response is a timeout, we increaset the number of | ||
* timeouts so we can increase the timeout later. | ||
*/ | ||
if (response.isTimedOut) { | ||
timeoutRetries++; | ||
timeoutsCount++; // eslint-disable-line no-param-reassign | ||
} | ||
return Promise.all([ | ||
/** | ||
* Failures are individually send the logger, allowing | ||
* the end user to debug / store stack frames even | ||
* when a retry error does not happen. | ||
*/ | ||
transporter.logger.debug('Retryable failure', stackFrame), | ||
transporter.hostsCache.set({ url: host.url }, host), | ||
]).then(() => retry(hosts)); | ||
/** | ||
* We also store the state of the host in failure cases. If the host, is | ||
* down it will remain down for the next 2 minutes. In a timeout situation, | ||
* this host will be added end of the list of hosts on the next request. | ||
*/ | ||
transporter.hostsCache.set(host, createStatefulHost(host, response.isTimedOut ? HostStatusEnum.Timeouted : HostStatusEnum.Down)), | ||
]).then(() => retry(hosts, getTimeout)); | ||
}, | ||
@@ -126,6 +199,16 @@ onFail(response) { | ||
return transporter.requester.send(payload).then(response => { | ||
return decision(host, response, decisions); | ||
return retryDecision(response, decisions); | ||
}); | ||
}; | ||
return getAvailableHosts(transporter.hostsCache, statelessHosts).then(hosts => retry(hosts)); | ||
/** | ||
* Finally, for each retryable host perform request until we got a non | ||
* retryable response. Some notes here: | ||
* | ||
* 1. The reverse here is applied so we can apply a `pop` later on => more performant. | ||
* 2. We also get from the retryable options a timeout multiplier that is tailored | ||
* for the current context. | ||
*/ | ||
return createRetryableOptions(transporter.hostsCache, statelessHosts).then(options => { | ||
return retry([...options.statelessHosts].reverse(), options.getTimeout); | ||
}); | ||
} | ||
@@ -158,25 +241,73 @@ | ||
// eslint-disable-next-line functional/immutable-data | ||
transporter.hosts = values.map(hostOptions => createHost(hostOptions)); | ||
transporter.hosts = values.map(hostOptions => createStatelessHost(hostOptions)); | ||
}, | ||
read(request, requestOptions) { | ||
const mappedRequestOptions = mapRequestOptions(requestOptions, transporter.timeouts.read); | ||
const key = { request, mappedRequestOptions }; | ||
const createRequest = () => { | ||
return execute(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Read) !== 0), request, mappedRequestOptions); | ||
/** | ||
* First, we compute the user request options. Now, keep in mind, | ||
* that using request options the user is able to modified the intire | ||
* payload of the request. Such as headers, query parameters, and others. | ||
*/ | ||
const mappedRequestOptions = createMappedRequestOptions(requestOptions, transporter.timeouts.read); | ||
const createRetryableRequest = () => { | ||
/** | ||
* Then, we prepare a function factory that contains the construction of | ||
* the retryable request. At this point, we may *not* perform the actual | ||
* request. But we want to have the function factory ready. | ||
*/ | ||
return retryableRequest(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Read) !== 0), request, mappedRequestOptions); | ||
}; | ||
/** | ||
* Once we have the function factory ready, we need to determine of the | ||
* request is "cacheable" - should be cached. Note that, once again, | ||
* the user can force this option. | ||
*/ | ||
const cacheable = mappedRequestOptions.cacheable !== undefined | ||
? mappedRequestOptions.cacheable | ||
: request.cacheable; | ||
/** | ||
* If is not "cacheable", we immediatly trigger the retryable request, no | ||
* need to check cache implementations. | ||
*/ | ||
if (cacheable !== true) { | ||
return createRequest(); | ||
return createRetryableRequest(); | ||
} | ||
/** | ||
* If the request is "cacheable", we need to first compute the key to ask | ||
* the cache implementations if this request is on progress or if the | ||
* response already exists on the cache. | ||
*/ | ||
const key = { | ||
request, | ||
mappedRequestOptions, | ||
transporter: { | ||
queryParameters: transporter.queryParameters, | ||
headers: transporter.headers, | ||
}, | ||
}; | ||
/** | ||
* With the computed key, we first ask the responses cache | ||
* implemention if this request was been resolved before. | ||
*/ | ||
return transporter.responsesCache.get(key, () => { | ||
/** | ||
* If the request has never resolved before, we actually ask if there | ||
* is a current request with the same key on progress. | ||
*/ | ||
return transporter.requestsCache.get(key, () => { | ||
return (transporter.requestsCache | ||
.set(key, createRequest()) | ||
/** | ||
* Finally, if there is no request in progress with the same key, | ||
* this `createRetryableRequest()` will actually trigger the | ||
* retryable request. | ||
*/ | ||
.set(key, createRetryableRequest()) | ||
.then(response => Promise.all([transporter.requestsCache.delete(key), response]), err => Promise.all([transporter.requestsCache.delete(key), Promise.reject(err)])) | ||
// @todo Maybe remove this alias, and understand why we need it. | ||
.then(promiseResults => promiseResults[1])); | ||
.then(([_, response]) => response)); | ||
}); | ||
}, { | ||
/** | ||
* Of course, once we get this response back from the server, we | ||
* tell response cache to actually store the received response | ||
* to be used later. | ||
*/ | ||
miss: response => transporter.responsesCache.set(key, response), | ||
@@ -186,3 +317,7 @@ }); | ||
write(request, requestOptions) { | ||
return execute(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Write) !== 0), request, mapRequestOptions(requestOptions, transporter.timeouts.write)); | ||
/** | ||
* On write requests, no cache mechanisms are applied, and we | ||
* proxy the request immediately to the requester. | ||
*/ | ||
return retryableRequest(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Write) !== 0), request, createMappedRequestOptions(requestOptions, transporter.timeouts.write)); | ||
}, | ||
@@ -239,29 +374,2 @@ }; | ||
function mapRequestOptions(requestOptions, timeout) { | ||
const options = requestOptions || {}; | ||
// eslint-disable-next-line functional/prefer-readonly-type | ||
const data = {}; | ||
Object.keys(options).forEach(key => { | ||
if (['timeout', 'headers', 'queryParameters', 'data', 'cacheable'].indexOf(key) === -1) { | ||
data[key] = options[key]; // eslint-disable-line functional/immutable-data | ||
} | ||
}); | ||
return { | ||
data, | ||
timeout: options.timeout || timeout, | ||
headers: options.headers || {}, | ||
queryParameters: options.queryParameters || {}, | ||
cacheable: options.cacheable, | ||
}; | ||
} | ||
function popRequestOption(requestOptions, key, defaultValue) { | ||
if (requestOptions !== undefined && key in requestOptions) { | ||
const value = requestOptions[key]; | ||
// eslint-disable-next-line no-param-reassign, functional/immutable-data | ||
delete requestOptions[key]; | ||
return value; | ||
} | ||
return defaultValue; | ||
} | ||
// eslint-disable-next-line functional/prefer-readonly-type | ||
@@ -316,13 +424,10 @@ function encode(format, ...args) { | ||
const CallEnum = { | ||
Read: 1, | ||
Write: 2, | ||
Any: 3, | ||
}; | ||
exports.CallEnum = CallEnum; | ||
exports.HostStatusEnum = HostStatusEnum; | ||
exports.createApiError = createApiError; | ||
exports.createDeserializationError = createDeserializationError; | ||
exports.createHost = createHost; | ||
exports.createMappedRequestOptions = createMappedRequestOptions; | ||
exports.createRetryError = createRetryError; | ||
exports.createStatefulHost = createStatefulHost; | ||
exports.createStatelessHost = createStatelessHost; | ||
exports.createTransporter = createTransporter; | ||
@@ -332,6 +437,6 @@ exports.createUserAgent = createUserAgent; | ||
exports.deserializeSuccess = deserializeSuccess; | ||
exports.mapRequestOptions = mapRequestOptions; | ||
exports.popRequestOption = popRequestOption; | ||
exports.isStatefulHostTimeouted = isStatefulHostTimeouted; | ||
exports.isStatefulHostUp = isStatefulHostUp; | ||
exports.serializeData = serializeData; | ||
exports.serializeQueryParameters = serializeQueryParameters; | ||
exports.serializeUrl = serializeUrl; |
@@ -22,6 +22,10 @@ import { Cache } from '@algolia/cache-common'; | ||
export declare function createHost(options: HostOptions): Host; | ||
export declare function createMappedRequestOptions(requestOptions?: RequestOptions, timeout?: number): MappedRequestOptions; | ||
export declare function createRetryError(stackTrace: readonly StackFrame[]): RetryError; | ||
export declare function createStatefulHost(host: StatelessHost, status?: HostStatusType): StatefulHost; | ||
export declare function createStatelessHost(options: HostOptions): StatelessHost; | ||
export declare function createTransporter(options: TransporterOptions): Transporter; | ||
@@ -43,12 +47,2 @@ | ||
export declare type Host = { | ||
downDate: number; | ||
up: boolean; | ||
readonly protocol: string; | ||
readonly url: string; | ||
readonly accept: CallType; | ||
readonly setAsDown: () => void; | ||
readonly isUp: () => boolean; | ||
}; | ||
export declare type HostOptions = { | ||
@@ -60,2 +54,12 @@ readonly url: string; | ||
export declare const HostStatusEnum: { | ||
readonly [key: string]: HostStatusType; | ||
}; | ||
export declare type HostStatusType = 1 | 2 | 3; | ||
export declare function isStatefulHostTimeouted(host: StatefulHost): boolean; | ||
export declare function isStatefulHostUp(host: StatefulHost): boolean; | ||
export declare type MappedRequestOptions = { | ||
@@ -75,6 +79,2 @@ readonly cacheable: boolean | undefined; | ||
export declare function mapRequestOptions(requestOptions?: RequestOptions, timeout?: number): MappedRequestOptions; | ||
export declare function popRequestOption<TRequestOption = undefined>(requestOptions: RequestOptions | undefined, key: string, defaultValue?: TRequestOption): TRequestOption; | ||
export declare type QueryParameters = { | ||
@@ -93,12 +93,15 @@ readonly [key: string]: string; | ||
/** | ||
* If the given request should be stored within the cache. By default, only | ||
* [search] and [searchForFacetValues] requests are stored. | ||
* If the given request should persist on cache. Keep in mind, that | ||
* some methods may have this option enabled by default. | ||
*/ | ||
readonly cacheable?: boolean; | ||
/** | ||
* A specific timeout for the request. | ||
* Custom timeout for the request. Note that, in normal situacions | ||
* the given timeout will be applied. But the transporter layer may | ||
* increase this timeout if there is need for it. | ||
*/ | ||
readonly timeout?: number; | ||
/** | ||
* A specific headers for the request. | ||
* Custom headers for the request. This headers are | ||
* going to be merged the transporter headers. | ||
*/ | ||
@@ -109,3 +112,4 @@ readonly headers?: { | ||
/** | ||
* Specific query parameters for the request. | ||
* Custom query paramters for the request. This query parameters are | ||
* going to be merged the transporter query parameters. | ||
*/ | ||
@@ -115,2 +119,6 @@ readonly queryParameters?: { | ||
}; | ||
/** | ||
* Additional request body values. It's only taken in | ||
* consideration in `POST` and `PUT` requests. | ||
*/ | ||
[key: string]: any; | ||
@@ -120,2 +128,6 @@ }; | ||
export declare type RetryError = Error & { | ||
/** | ||
* Contains report of stack frames of the | ||
* execution of a certain request. | ||
*/ | ||
readonly stackTrace: readonly StackFrame[]; | ||
@@ -130,3 +142,3 @@ }; | ||
export declare function serializeUrl(host: Host, path: string, queryParameters: { | ||
export declare function serializeUrl(host: StatelessHost, path: string, queryParameters: { | ||
readonly [key: string]: string; | ||
@@ -138,7 +150,26 @@ }): string; | ||
readonly response: Response; | ||
readonly host: Host; | ||
readonly host: StatelessHost; | ||
readonly triesLeft: number; | ||
readonly timeoutRetries: number; | ||
}; | ||
export declare type StatefulHost = StatelessHost & { | ||
/** | ||
* Holds the last time this host failed in milliseconds elapsed | ||
* since the UNIX epoch. This failure can be because of an | ||
* timeout error or a because the host is not available. | ||
*/ | ||
readonly lastUpdate: number; | ||
/** | ||
* Holds the host status. Note that, depending of the `lastUpdate` | ||
* an host may be considered as `Up` on the transporter layer. | ||
*/ | ||
readonly status: HostStatusType; | ||
}; | ||
export declare type StatelessHost = { | ||
readonly protocol: string; | ||
readonly url: string; | ||
readonly accept: CallType; | ||
}; | ||
export declare type Timeouts = { | ||
@@ -163,3 +194,3 @@ readonly connect: number; | ||
*/ | ||
hosts: readonly Host[]; | ||
hosts: readonly StatelessHost[]; | ||
readonly addHeaders: (headers: Headers) => void; | ||
@@ -166,0 +197,0 @@ readonly addQueryParameters: (queryParameters: QueryParameters) => void; |
@@ -1,27 +0,96 @@ | ||
// If an host is down, it will remain down for 5 mins. | ||
const DOWNTIME = 5 * 60 * 1000; | ||
function createHost(options) { | ||
const host = { | ||
function createMappedRequestOptions(requestOptions, timeout) { | ||
const options = requestOptions || {}; | ||
// eslint-disable-next-line functional/prefer-readonly-type | ||
const data = {}; | ||
Object.keys(options).forEach(key => { | ||
if (['timeout', 'headers', 'queryParameters', 'data', 'cacheable'].indexOf(key) === -1) { | ||
data[key] = options[key]; // eslint-disable-line functional/immutable-data | ||
} | ||
}); | ||
return { | ||
data, | ||
timeout: options.timeout || timeout, | ||
headers: options.headers || {}, | ||
queryParameters: options.queryParameters || {}, | ||
cacheable: options.cacheable, | ||
}; | ||
} | ||
const CallEnum = { | ||
Read: 1, | ||
Write: 2, | ||
Any: 3, | ||
}; | ||
const HostStatusEnum = { | ||
Up: 1, | ||
Down: 2, | ||
Timeouted: 3, | ||
}; | ||
// By default, API Clients at Algolia have expiration delay | ||
// of 5 mins. In the JavaScript client, we have 2 mins. | ||
const EXPIRATION_DELAY = 2 * 60 * 1000; | ||
function createStatefulHost(host, status = HostStatusEnum.Up) { | ||
return { | ||
...host, | ||
status, | ||
lastUpdate: Date.now(), | ||
}; | ||
} | ||
function isStatefulHostUp(host) { | ||
return host.status === HostStatusEnum.Up || Date.now() - host.lastUpdate > EXPIRATION_DELAY; | ||
} | ||
function isStatefulHostTimeouted(host) { | ||
return (host.status === HostStatusEnum.Timeouted && Date.now() - host.lastUpdate <= EXPIRATION_DELAY); | ||
} | ||
function createStatelessHost(options) { | ||
return { | ||
protocol: options.protocol || 'https', | ||
url: options.url, | ||
accept: options.accept, | ||
downDate: 0, | ||
up: true, | ||
setAsDown() { | ||
// eslint-disable-next-line functional/immutable-data | ||
host.downDate = Date.now(); | ||
// eslint-disable-next-line functional/immutable-data | ||
host.up = false; | ||
}, | ||
isUp() { | ||
if (!host.up && Date.now() - host.downDate > DOWNTIME) { | ||
// eslint-disable-next-line functional/immutable-data | ||
host.up = true; | ||
} | ||
return host.up; | ||
}, | ||
}; | ||
return host; | ||
} | ||
function createRetryableOptions(hostsCache, statelessHosts | ||
// eslint-disable-next-line functional/prefer-readonly-type | ||
) { | ||
return Promise.all(statelessHosts.map(statelessHost => { | ||
return hostsCache.get(statelessHost, () => { | ||
return Promise.resolve(createStatefulHost(statelessHost)); | ||
}); | ||
})).then(statefulHosts => { | ||
const hostsUp = statefulHosts.filter(host => isStatefulHostUp(host)); | ||
const hostsTimeouted = statefulHosts.filter(host => isStatefulHostTimeouted(host)); | ||
/** | ||
* Note, we put the hosts that previously timeouted on the end of the list. | ||
*/ | ||
const hostsAvailable = [...hostsUp, ...hostsTimeouted]; | ||
const statelessHostsAvailable = hostsAvailable.length > 0 | ||
? hostsAvailable.map(host => createStatelessHost(host)) | ||
: statelessHosts; | ||
return { | ||
getTimeout(timeoutsCount, baseTimeout) { | ||
/** | ||
* Imagine that you have 4 hosts, if timeouts will increase | ||
* on the following way: 1 (timeouted) > 4 (timeouted) > 5 (200) | ||
* | ||
* Note that, the very next request, we start from the previous timeout | ||
* | ||
* 6 (timeouted) > 7 (timeouted) > 8 ... | ||
* | ||
* This strategy may need to be reviewed, but is the strategy on the our | ||
* current v3 version. | ||
*/ | ||
const timeoutMultiplier = hostsTimeouted.length === 0 && timeoutsCount === 0 | ||
? 1 | ||
: hostsTimeouted.length + 3 + timeoutsCount; | ||
return timeoutMultiplier * baseTimeout; | ||
}, | ||
statelessHosts: statelessHostsAvailable, | ||
}; | ||
}); | ||
} | ||
const isNetworkError = ({ isTimedOut, status }) => { | ||
@@ -38,7 +107,4 @@ return !isTimedOut && ~~status === 0; | ||
}; | ||
const decision = (host, response, outcomes) => { | ||
const retryDecision = (response, outcomes) => { | ||
if (isRetryable(response)) { | ||
if (!response.isTimedOut) { | ||
host.setAsDown(); | ||
} | ||
return outcomes.onRetry(response); | ||
@@ -52,31 +118,25 @@ } | ||
function getAvailableHosts(hostsCache, statelessHosts | ||
/* eslint-disable functional/prefer-readonly-type */ | ||
) { | ||
return Promise.all(statelessHosts.map(host => hostsCache | ||
.get({ url: host.url }, () => Promise.resolve(host)) | ||
.then((hit) => { | ||
// eslint-disable-next-line functional/immutable-data | ||
return Object.assign(host, { | ||
downDate: hit.downDate, | ||
up: hit.up, | ||
}); | ||
}))).then(statefulHosts => statefulHosts.filter(host => host.isUp()).reverse()); | ||
} | ||
function execute(transporter, statelessHosts, request, requestOptions) { | ||
/* eslint-disable functional/prefer-readonly-type */ | ||
const stackTrace = []; | ||
// eslint-disable-next-line functional/no-let | ||
let timeoutRetries = 0; | ||
function retryableRequest(transporter, statelessHosts, request, requestOptions) { | ||
const stackTrace = []; // eslint-disable-line functional/prefer-readonly-type | ||
/** | ||
* First we prepare the payload that do not depend from hosts. | ||
*/ | ||
const data = serializeData(request, requestOptions); | ||
const headers = { ...transporter.headers, ...requestOptions.headers }; | ||
const method = request.method; | ||
const retry = (hosts) => { | ||
// eslint-disable-next-line functional/immutable-data | ||
const host = hosts.pop(); | ||
const queryParameters = { | ||
...transporter.queryParameters, | ||
...requestOptions.queryParameters, | ||
'x-algolia-agent': transporter.userAgent.value, | ||
}; | ||
let timeoutsCount = 0; // eslint-disable-line functional/no-let | ||
const retry = (hosts, // eslint-disable-line functional/prefer-readonly-type | ||
getTimeout) => { | ||
/** | ||
* We iterate on each host, until there is no host left. | ||
*/ | ||
const host = hosts.pop(); // eslint-disable-line functional/immutable-data | ||
if (host === undefined) { | ||
throw createRetryError(stackTrace); | ||
} | ||
const timeoutAdjuster = timeoutRetries + 1; | ||
const payload = { | ||
@@ -86,9 +146,5 @@ data, | ||
method, | ||
url: serializeUrl(host, request.path, { | ||
...transporter.queryParameters, | ||
...requestOptions.queryParameters, | ||
'x-algolia-agent': transporter.userAgent.value, | ||
}), | ||
connectTimeout: timeoutAdjuster * transporter.timeouts.connect, | ||
socketTimeout: timeoutAdjuster * (requestOptions.timeout || 0), | ||
url: serializeUrl(host, request.path, queryParameters), | ||
connectTimeout: getTimeout(timeoutsCount, transporter.timeouts.connect), | ||
responseTimeout: getTimeout(timeoutsCount, requestOptions.timeout), | ||
}; | ||
@@ -103,13 +159,30 @@ const decisions = { | ||
triesLeft: hosts.length, | ||
timeoutRetries, | ||
}; | ||
// eslint-disable-next-line functional/immutable-data | ||
stackTrace.push(stackFrame); | ||
/** | ||
* The stackFrace is pushed to the stackTrace so we | ||
* can have information about the failures once a | ||
* retry error is thrown. | ||
*/ | ||
stackTrace.push(stackFrame); // eslint-disable-line functional/immutable-data | ||
/** | ||
* If response is a timeout, we increaset the number of | ||
* timeouts so we can increase the timeout later. | ||
*/ | ||
if (response.isTimedOut) { | ||
timeoutRetries++; | ||
timeoutsCount++; // eslint-disable-line no-param-reassign | ||
} | ||
return Promise.all([ | ||
/** | ||
* Failures are individually send the logger, allowing | ||
* the end user to debug / store stack frames even | ||
* when a retry error does not happen. | ||
*/ | ||
transporter.logger.debug('Retryable failure', stackFrame), | ||
transporter.hostsCache.set({ url: host.url }, host), | ||
]).then(() => retry(hosts)); | ||
/** | ||
* We also store the state of the host in failure cases. If the host, is | ||
* down it will remain down for the next 2 minutes. In a timeout situation, | ||
* this host will be added end of the list of hosts on the next request. | ||
*/ | ||
transporter.hostsCache.set(host, createStatefulHost(host, response.isTimedOut ? HostStatusEnum.Timeouted : HostStatusEnum.Down)), | ||
]).then(() => retry(hosts, getTimeout)); | ||
}, | ||
@@ -121,6 +194,16 @@ onFail(response) { | ||
return transporter.requester.send(payload).then(response => { | ||
return decision(host, response, decisions); | ||
return retryDecision(response, decisions); | ||
}); | ||
}; | ||
return getAvailableHosts(transporter.hostsCache, statelessHosts).then(hosts => retry(hosts)); | ||
/** | ||
* Finally, for each retryable host perform request until we got a non | ||
* retryable response. Some notes here: | ||
* | ||
* 1. The reverse here is applied so we can apply a `pop` later on => more performant. | ||
* 2. We also get from the retryable options a timeout multiplier that is tailored | ||
* for the current context. | ||
*/ | ||
return createRetryableOptions(transporter.hostsCache, statelessHosts).then(options => { | ||
return retry([...options.statelessHosts].reverse(), options.getTimeout); | ||
}); | ||
} | ||
@@ -153,25 +236,73 @@ | ||
// eslint-disable-next-line functional/immutable-data | ||
transporter.hosts = values.map(hostOptions => createHost(hostOptions)); | ||
transporter.hosts = values.map(hostOptions => createStatelessHost(hostOptions)); | ||
}, | ||
read(request, requestOptions) { | ||
const mappedRequestOptions = mapRequestOptions(requestOptions, transporter.timeouts.read); | ||
const key = { request, mappedRequestOptions }; | ||
const createRequest = () => { | ||
return execute(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Read) !== 0), request, mappedRequestOptions); | ||
/** | ||
* First, we compute the user request options. Now, keep in mind, | ||
* that using request options the user is able to modified the intire | ||
* payload of the request. Such as headers, query parameters, and others. | ||
*/ | ||
const mappedRequestOptions = createMappedRequestOptions(requestOptions, transporter.timeouts.read); | ||
const createRetryableRequest = () => { | ||
/** | ||
* Then, we prepare a function factory that contains the construction of | ||
* the retryable request. At this point, we may *not* perform the actual | ||
* request. But we want to have the function factory ready. | ||
*/ | ||
return retryableRequest(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Read) !== 0), request, mappedRequestOptions); | ||
}; | ||
/** | ||
* Once we have the function factory ready, we need to determine of the | ||
* request is "cacheable" - should be cached. Note that, once again, | ||
* the user can force this option. | ||
*/ | ||
const cacheable = mappedRequestOptions.cacheable !== undefined | ||
? mappedRequestOptions.cacheable | ||
: request.cacheable; | ||
/** | ||
* If is not "cacheable", we immediatly trigger the retryable request, no | ||
* need to check cache implementations. | ||
*/ | ||
if (cacheable !== true) { | ||
return createRequest(); | ||
return createRetryableRequest(); | ||
} | ||
/** | ||
* If the request is "cacheable", we need to first compute the key to ask | ||
* the cache implementations if this request is on progress or if the | ||
* response already exists on the cache. | ||
*/ | ||
const key = { | ||
request, | ||
mappedRequestOptions, | ||
transporter: { | ||
queryParameters: transporter.queryParameters, | ||
headers: transporter.headers, | ||
}, | ||
}; | ||
/** | ||
* With the computed key, we first ask the responses cache | ||
* implemention if this request was been resolved before. | ||
*/ | ||
return transporter.responsesCache.get(key, () => { | ||
/** | ||
* If the request has never resolved before, we actually ask if there | ||
* is a current request with the same key on progress. | ||
*/ | ||
return transporter.requestsCache.get(key, () => { | ||
return (transporter.requestsCache | ||
.set(key, createRequest()) | ||
/** | ||
* Finally, if there is no request in progress with the same key, | ||
* this `createRetryableRequest()` will actually trigger the | ||
* retryable request. | ||
*/ | ||
.set(key, createRetryableRequest()) | ||
.then(response => Promise.all([transporter.requestsCache.delete(key), response]), err => Promise.all([transporter.requestsCache.delete(key), Promise.reject(err)])) | ||
// @todo Maybe remove this alias, and understand why we need it. | ||
.then(promiseResults => promiseResults[1])); | ||
.then(([_, response]) => response)); | ||
}); | ||
}, { | ||
/** | ||
* Of course, once we get this response back from the server, we | ||
* tell response cache to actually store the received response | ||
* to be used later. | ||
*/ | ||
miss: response => transporter.responsesCache.set(key, response), | ||
@@ -181,3 +312,7 @@ }); | ||
write(request, requestOptions) { | ||
return execute(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Write) !== 0), request, mapRequestOptions(requestOptions, transporter.timeouts.write)); | ||
/** | ||
* On write requests, no cache mechanisms are applied, and we | ||
* proxy the request immediately to the requester. | ||
*/ | ||
return retryableRequest(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Write) !== 0), request, createMappedRequestOptions(requestOptions, transporter.timeouts.write)); | ||
}, | ||
@@ -234,29 +369,2 @@ }; | ||
function mapRequestOptions(requestOptions, timeout) { | ||
const options = requestOptions || {}; | ||
// eslint-disable-next-line functional/prefer-readonly-type | ||
const data = {}; | ||
Object.keys(options).forEach(key => { | ||
if (['timeout', 'headers', 'queryParameters', 'data', 'cacheable'].indexOf(key) === -1) { | ||
data[key] = options[key]; // eslint-disable-line functional/immutable-data | ||
} | ||
}); | ||
return { | ||
data, | ||
timeout: options.timeout || timeout, | ||
headers: options.headers || {}, | ||
queryParameters: options.queryParameters || {}, | ||
cacheable: options.cacheable, | ||
}; | ||
} | ||
function popRequestOption(requestOptions, key, defaultValue) { | ||
if (requestOptions !== undefined && key in requestOptions) { | ||
const value = requestOptions[key]; | ||
// eslint-disable-next-line no-param-reassign, functional/immutable-data | ||
delete requestOptions[key]; | ||
return value; | ||
} | ||
return defaultValue; | ||
} | ||
// eslint-disable-next-line functional/prefer-readonly-type | ||
@@ -311,8 +419,2 @@ function encode(format, ...args) { | ||
const CallEnum = { | ||
Read: 1, | ||
Write: 2, | ||
Any: 3, | ||
}; | ||
export { CallEnum, createApiError, createDeserializationError, createHost, createRetryError, createTransporter, createUserAgent, deserializeFailure, deserializeSuccess, mapRequestOptions, popRequestOption, serializeData, serializeQueryParameters, serializeUrl }; | ||
export { CallEnum, HostStatusEnum, createApiError, createDeserializationError, createMappedRequestOptions, createRetryError, createStatefulHost, createStatelessHost, createTransporter, createUserAgent, deserializeFailure, deserializeSuccess, isStatefulHostTimeouted, isStatefulHostUp, serializeData, serializeQueryParameters, serializeUrl }; |
{ | ||
"name": "@algolia/transporter", | ||
"version": "4.0.0-beta.9", | ||
"version": "4.0.0-beta.10", | ||
"private": false, | ||
@@ -16,6 +16,6 @@ "description": "Promise-based transporter layer with embedded retry strategy.", | ||
"dependencies": { | ||
"@algolia/cache-common": "4.0.0-beta.9", | ||
"@algolia/logger-common": "4.0.0-beta.9", | ||
"@algolia/requester-common": "4.0.0-beta.9" | ||
"@algolia/cache-common": "4.0.0-beta.10", | ||
"@algolia/logger-common": "4.0.0-beta.10", | ||
"@algolia/requester-common": "4.0.0-beta.10" | ||
} | ||
} |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
41858
989
+ Added@algolia/cache-common@4.0.0-beta.10(transitive)
+ Added@algolia/logger-common@4.0.0-beta.10(transitive)
+ Added@algolia/requester-common@4.0.0-beta.10(transitive)
- Removed@algolia/cache-common@4.0.0-beta.9(transitive)
- Removed@algolia/logger-common@4.0.0-beta.9(transitive)
- Removed@algolia/requester-common@4.0.0-beta.9(transitive)