New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@parcel/core

Package Overview
Dependencies
Maintainers
1
Versions
900
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@parcel/core - npm Package Compare versions

Comparing version 2.0.0-dev.1584 to 2.0.0-dev.1600

test/requests/ConfigRequest.test.js

35

lib/requests/ConfigRequest.js

@@ -18,2 +18,9 @@ "use strict";

}
function _rust() {
const data = require("@parcel/rust");
_rust = function () {
return data;
};
return data;
}
var _serializer = require("../serializer.js");

@@ -38,9 +45,2 @@ function _logger() {

var _assetUtils = require("../assetUtils");
function _rust() {
const data = require("@parcel/rust");
_rust = function () {
return data;
};
return data;
}
function _profiler() {

@@ -56,2 +56,9 @@ const data = require("@parcel/profiler");

var _buildCache = require("../buildCache");
function _featureFlags() {
const data = require("@parcel/feature-flags");
_featureFlags = function () {
return data;
};
return data;
}
function _getRequireWildcardCache(e) { if ("function" != typeof WeakMap) return null; var r = new WeakMap(), t = new WeakMap(); return (_getRequireWildcardCache = function (e) { return e ? t : r; })(e); }

@@ -115,3 +122,3 @@ function _interopRequireWildcard(e, r) { if (!r && e && e.__esModule) return e; if (null === e || "object" != typeof e && "function" != typeof e) return { default: e }; var t = _getRequireWildcardCache(r); if (t && t.has(e)) return t.get(e); var n = { __proto__: null }, a = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var u in e) if ("default" !== u && Object.prototype.hasOwnProperty.call(e, u)) { var i = a ? Object.getOwnPropertyDescriptor(e, u) : null; i && (i.get || i.set) ? Object.defineProperty(n, u, i) : n[u] = e[u]; } return n.default = e, t && t.set(e, n), n; }

// If there are no invalidations, then no need to create a node.
if (invalidateOnFileChange.size === 0 && invalidateOnConfigKeyChange.length === 0 && invalidateOnFileCreate.length === 0 && invalidateOnOptionChange.size === 0 && !invalidateOnStartup && !invalidateOnBuild) {
if (invalidateOnFileChange.size === 0 && invalidateOnConfigKeyChange.length === 0 && invalidateOnFileCreate.length === 0 && invalidateOnOptionChange.size === 0 && invalidateOnEnvChange.size === 0 && !invalidateOnStartup && !invalidateOnBuild) {
return;

@@ -126,2 +133,14 @@ }

}) => {
if ((0, _featureFlags().getFeatureFlag)('parcelV3')) {
return (0, _rust().napiRunConfigRequest)({
id: configRequest.id,
invalidateOnBuild: configRequest.invalidateOnBuild,
invalidateOnConfigKeyChange: configRequest.invalidateOnConfigKeyChange,
invalidateOnFileCreate: configRequest.invalidateOnFileCreate,
invalidateOnEnvChange: Array.from(configRequest.invalidateOnEnvChange),
invalidateOnOptionChange: Array.from(configRequest.invalidateOnOptionChange),
invalidateOnStartup: configRequest.invalidateOnStartup,
invalidateOnFileChange: Array.from(configRequest.invalidateOnFileChange)
}, api, options);
}
for (let filePath of invalidateOnFileChange) {

@@ -128,0 +147,0 @@ api.invalidateOnFileUpdate(filePath);

@@ -322,3 +322,3 @@ "use strict";

throw new (_diagnostic().default)({
diagnostic: errors.flatMap(e => e.diagnostics)
diagnostic: errors.flatMap(e => e.diagnostics ?? (0, _diagnostic().errorToDiagnostic)(e))
});

@@ -390,3 +390,12 @@ }

function validateConfigFile(config, relativePath) {
validateNotEmpty(config, relativePath);
try {
validateNotEmpty(config, relativePath);
} catch (e) {
throw new (_diagnostic().default)({
diagnostic: {
message: e.message,
origin: '@parcel/core'
}
});
}
_utils().validateSchema.diagnostic(_ParcelConfig.default, {

@@ -393,0 +402,0 @@ data: config,

@@ -76,2 +76,5 @@ "use strict";

};
class FSBailoutError extends Error {
name = 'FSBailoutError';
}
const FILE = 0;

@@ -137,2 +140,6 @@ const REQUEST = 1;

const keyFromOptionContentKey = contentKey => contentKey.slice('option:'.length);
// This constant is chosen by local profiling the time to serialise n nodes and tuning until an average time of ~50 ms per blob.
// The goal is to free up the event loop periodically to allow interruption by the user.
const NODES_PER_BLOB = 2 ** 14;
class RequestGraph extends _graph().ContentGraph {

@@ -151,2 +158,3 @@ invalidNodeIds = new Set();

configKeyNodes = new Map();
nodesPerBlob = NODES_PER_BLOB;

@@ -199,2 +207,3 @@ // $FlowFixMe[prop-missing]

}
this.removeCachedRequestChunkForNode(nodeId);
return nodeId;

@@ -504,3 +513,3 @@ }

});
throw new Error('Responding to file system events exceeded threshold, start with empty cache.');
throw new FSBailoutError('Responding to file system events exceeded threshold, start with empty cache.');
}

@@ -630,10 +639,6 @@ }

removeCachedRequestChunkForNode(nodeId) {
this.cachedRequestChunks.delete(Math.floor(nodeId / NODES_PER_BLOB));
this.cachedRequestChunks.delete(Math.floor(nodeId / this.nodesPerBlob));
}
}
// This constant is chosen by local profiling the time to serialise n nodes and tuning until an average time of ~50 ms per blob.
// The goal is to free up the event loop periodically to allow interruption by the user.
exports.RequestGraph = RequestGraph;
const NODES_PER_BLOB = 2 ** 14;
class RequestTracker {

@@ -921,6 +926,11 @@ stats = new Map();

}
for (let i = 0; i * NODES_PER_BLOB < cacheableNodes.length; i += 1) {
let nodeCountsPerBlob = [];
for (let i = 0; i * this.graph.nodesPerBlob < cacheableNodes.length; i += 1) {
let nodesStartIndex = i * this.graph.nodesPerBlob;
let nodesEndIndex = Math.min((i + 1) * this.graph.nodesPerBlob, cacheableNodes.length);
nodeCountsPerBlob.push(nodesEndIndex - nodesStartIndex);
if (!this.graph.hasCachedRequestChunk(i)) {
// We assume the request graph nodes are immutable and won't change
queue.add(() => serialiseAndSet(getRequestGraphNodeKey(i, cacheKey), cacheableNodes.slice(i * NODES_PER_BLOB, (i + 1) * NODES_PER_BLOB)).then(() => {
let nodesToCache = cacheableNodes.slice(nodesStartIndex, nodesEndIndex);
queue.add(() => serialiseAndSet(getRequestGraphNodeKey(i, cacheKey), nodesToCache).then(() => {
// Succeeded in writing to disk, save that we have completed this chunk

@@ -939,2 +949,3 @@ this.graph.setCachedRequestChunk(i);

...serialisedGraph,
nodeCountsPerBlob,
nodes: undefined

@@ -972,7 +983,7 @@ });

cacheDir,
projectRoot,
watchDir,
watchBackend
}) {
const uniqueDirs = [...new Set([...watchIgnore, ...['.git', '.hg'], cacheDir])];
const ignore = uniqueDirs.map(dir => _path2().default.resolve(projectRoot, dir));
const ignore = uniqueDirs.map(dir => _path2().default.resolve(watchDir, dir));
return {

@@ -996,13 +1007,12 @@ ignore,

};
let i = 0;
let nodePromises = [];
while (await cache.hasLargeBlob(getRequestGraphNodeKey(i, cacheKey))) {
nodePromises.push(getAndDeserialize(getRequestGraphNodeKey(i, cacheKey)));
i += 1;
}
let serializedRequestGraph = await getAndDeserialize(requestGraphKey);
let nodePromises = serializedRequestGraph.nodeCountsPerBlob.map(async (nodesCount, i) => {
let nodes = await getAndDeserialize(getRequestGraphNodeKey(i, cacheKey));
_assert().default.equal(nodes.length, nodesCount, 'RequestTracker node chunk: invalid node count');
return nodes;
});
return {
requestGraph: RequestGraph.deserialize({
...serializedRequestGraph,
nodes: (await Promise.all(nodePromises)).flatMap(nodeChunk => nodeChunk)
nodes: (await Promise.all(nodePromises)).flat()
}),

@@ -1019,34 +1029,38 @@ // This is used inside parcel query for `.inspectCache`

let requestGraphKey = `requestGraph-${cacheKey}`;
let timeout;
const snapshotPath = _path2().default.join(options.cacheDir, `snapshot-${cacheKey}` + '.txt');
if (await options.cache.hasLargeBlob(requestGraphKey)) {
let {
requestGraph
} = await readAndDeserializeRequestGraph(options.cache, requestGraphKey, cacheKey);
let opts = getWatcherOptions(options);
let snapshotPath = _path2().default.join(options.cacheDir, `snapshot-${cacheKey}` + '.txt');
let timeout = setTimeout(() => {
_logger().default.warn({
try {
let {
requestGraph
} = await readAndDeserializeRequestGraph(options.cache, requestGraphKey, cacheKey);
let opts = getWatcherOptions(options);
timeout = setTimeout(() => {
_logger().default.warn({
origin: '@parcel/core',
message: `Retrieving file system events since last build...\nThis can take upto a minute after branch changes or npm/yarn installs.`
});
}, 5000);
let startTime = Date.now();
let events = await options.inputFS.getEventsSince(options.watchDir, snapshotPath, opts);
clearTimeout(timeout);
_logger().default.verbose({
origin: '@parcel/core',
message: `Retrieving file system events since last build...\nThis can take upto a minute after branch changes or npm/yarn installs.`
message: `File system event count: ${events.length}`,
meta: {
trackableEvent: 'watcher_events_count',
watcherEventCount: events.length,
duration: Date.now() - startTime
}
});
}, 5000);
let startTime = Date.now();
let events = await options.inputFS.getEventsSince(options.watchDir, snapshotPath, opts);
clearTimeout(timeout);
_logger().default.verbose({
origin: '@parcel/core',
message: `File system event count: ${events.length}`,
meta: {
trackableEvent: 'watcher_events_count',
watcherEventCount: events.length,
duration: Date.now() - startTime
}
});
requestGraph.invalidateUnpredictableNodes();
requestGraph.invalidateOnBuildNodes();
requestGraph.invalidateEnvNodes(options.env);
requestGraph.invalidateOptionNodes(options);
try {
requestGraph.invalidateUnpredictableNodes();
requestGraph.invalidateOnBuildNodes();
requestGraph.invalidateEnvNodes(options.env);
requestGraph.invalidateOptionNodes(options);
await requestGraph.respondToFSEvents(options.unstableFileInvalidations || events, options, 10000);
return requestGraph;
} catch (e) {
// Prevent logging fs events took too long warning
clearTimeout(timeout);
logErrorOnBailout(options, snapshotPath, e);
// This error means respondToFSEvents timed out handling the invalidation events

@@ -1058,2 +1072,23 @@ // In this case we'll return a fresh RequestGraph

return new RequestGraph();
}
function logErrorOnBailout(options, snapshotPath, e) {
if (e.message && e.message.includes('invalid clockspec')) {
const snapshotContents = options.inputFS.readFileSync(snapshotPath, 'utf-8');
_logger().default.warn({
origin: '@parcel/core',
message: `Error reading clockspec from snapshot, building with clean cache.`,
meta: {
snapshotContents: snapshotContents,
trackableEvent: 'invalid_clockspec_error'
}
});
} else if (!(e instanceof FSBailoutError)) {
_logger().default.warn({
origin: '@parcel/core',
message: `Unexpected error loading cache from disk, building with clean cache.`,
meta: {
trackableEvent: 'cache_load_error'
}
});
}
}
{
"name": "@parcel/core",
"version": "2.0.0-dev.1584+ee048fa0b",
"version": "2.0.0-dev.1600+931c4470d",
"license": "MIT",

@@ -28,17 +28,17 @@ "publishConfig": {

"@mischnic/json-sourcemap": "^0.1.0",
"@parcel/cache": "2.0.0-dev.1586+ee048fa0b",
"@parcel/diagnostic": "2.0.0-dev.1586+ee048fa0b",
"@parcel/events": "2.0.0-dev.1586+ee048fa0b",
"@parcel/feature-flags": "2.12.1-dev.3209+ee048fa0b",
"@parcel/fs": "2.0.0-dev.1586+ee048fa0b",
"@parcel/graph": "3.2.1-dev.3209+ee048fa0b",
"@parcel/logger": "2.0.0-dev.1586+ee048fa0b",
"@parcel/package-manager": "2.0.0-dev.1586+ee048fa0b",
"@parcel/plugin": "2.0.0-dev.1586+ee048fa0b",
"@parcel/profiler": "2.12.1-dev.3209+ee048fa0b",
"@parcel/rust": "2.12.1-dev.3209+ee048fa0b",
"@parcel/cache": "2.0.0-dev.1602+931c4470d",
"@parcel/diagnostic": "2.0.0-dev.1602+931c4470d",
"@parcel/events": "2.0.0-dev.1602+931c4470d",
"@parcel/feature-flags": "2.12.1-dev.3225+931c4470d",
"@parcel/fs": "2.0.0-dev.1602+931c4470d",
"@parcel/graph": "3.2.1-dev.3225+931c4470d",
"@parcel/logger": "2.0.0-dev.1602+931c4470d",
"@parcel/package-manager": "2.0.0-dev.1602+931c4470d",
"@parcel/plugin": "2.0.0-dev.1602+931c4470d",
"@parcel/profiler": "2.12.1-dev.3225+931c4470d",
"@parcel/rust": "2.12.1-dev.3225+931c4470d",
"@parcel/source-map": "^2.1.1",
"@parcel/types": "2.0.0-dev.1586+ee048fa0b",
"@parcel/utils": "2.0.0-dev.1586+ee048fa0b",
"@parcel/workers": "2.0.0-dev.1586+ee048fa0b",
"@parcel/types": "2.0.0-dev.1602+931c4470d",
"@parcel/utils": "2.0.0-dev.1602+931c4470d",
"@parcel/workers": "2.0.0-dev.1602+931c4470d",
"base-x": "^3.0.8",

@@ -61,3 +61,3 @@ "browserslist": "^4.6.6",

},
"gitHead": "ee048fa0b95364a320c040d012673766869f4c87"
"gitHead": "931c4470dbc2fea7094f67019a22486e3de7658b"
}

@@ -20,2 +20,3 @@ // @flow

import type {ProjectPath} from '../projectPath';
import {napiRunConfigRequest} from '@parcel/rust';

@@ -34,2 +35,3 @@ import {serializeRaw} from '../serializer.js';

import {createBuildCache} from '../buildCache';
import {getFeatureFlag} from '@parcel/feature-flags';

@@ -166,2 +168,3 @@ export type PluginWithLoadConfig = {

invalidateOnOptionChange.size === 0 &&
invalidateOnEnvChange.size === 0 &&
!invalidateOnStartup &&

@@ -177,2 +180,26 @@ !invalidateOnBuild

run: async ({api, options}) => {
if (getFeatureFlag('parcelV3')) {
return napiRunConfigRequest(
{
id: configRequest.id,
invalidateOnBuild: configRequest.invalidateOnBuild,
invalidateOnConfigKeyChange:
configRequest.invalidateOnConfigKeyChange,
invalidateOnFileCreate: configRequest.invalidateOnFileCreate,
invalidateOnEnvChange: Array.from(
configRequest.invalidateOnEnvChange,
),
invalidateOnOptionChange: Array.from(
configRequest.invalidateOnOptionChange,
),
invalidateOnStartup: configRequest.invalidateOnStartup,
invalidateOnFileChange: Array.from(
configRequest.invalidateOnFileChange,
),
},
api,
options,
);
}
for (let filePath of invalidateOnFileChange) {

@@ -179,0 +206,0 @@ api.invalidateOnFileUpdate(filePath);

@@ -30,2 +30,3 @@ // @flow strict-local

md,
errorToDiagnostic,
} from '@parcel/diagnostic';

@@ -471,3 +472,3 @@ import {parse} from 'json5';

throw new ThrowableDiagnostic({
diagnostic: errors.flatMap(e => e.diagnostics),
diagnostic: errors.flatMap(e => e.diagnostics ?? errorToDiagnostic(e)),
});

@@ -579,3 +580,12 @@ }

) {
validateNotEmpty(config, relativePath);
try {
validateNotEmpty(config, relativePath);
} catch (e) {
throw new ThrowableDiagnostic({
diagnostic: {
message: e.message,
origin: '@parcel/core',
},
});
}

@@ -582,0 +592,0 @@ validateSchema.diagnostic(

@@ -68,2 +68,6 @@ // @flow strict-local

class FSBailoutError extends Error {
name: string = 'FSBailoutError';
}
export type RequestGraphEdgeType = $Values<typeof requestGraphEdgeTypes>;

@@ -267,2 +271,6 @@

// This constant is chosen by local profiling the time to serialise n nodes and tuning until an average time of ~50 ms per blob.
// The goal is to free up the event loop periodically to allow interruption by the user.
const NODES_PER_BLOB = 2 ** 14;
export class RequestGraph extends ContentGraph<

@@ -284,2 +292,3 @@ RequestGraphNode,

configKeyNodes: Map<ProjectPath, Set<NodeId>> = new Map();
nodesPerBlob: number = NODES_PER_BLOB;

@@ -334,2 +343,4 @@ // $FlowFixMe[prop-missing]

this.removeCachedRequestChunkForNode(nodeId);
return nodeId;

@@ -862,3 +873,3 @@ }

});
throw new Error(
throw new FSBailoutError(
'Responding to file system events exceeded threshold, start with empty cache.',

@@ -1034,10 +1045,6 @@ );

removeCachedRequestChunkForNode(nodeId: number): void {
this.cachedRequestChunks.delete(Math.floor(nodeId / NODES_PER_BLOB));
this.cachedRequestChunks.delete(Math.floor(nodeId / this.nodesPerBlob));
}
}
// This constant is chosen by local profiling the time to serialise n nodes and tuning until an average time of ~50 ms per blob.
// The goal is to free up the event loop periodically to allow interruption by the user.
const NODES_PER_BLOB = 2 ** 14;
export default class RequestTracker {

@@ -1430,5 +1437,21 @@ graph: RequestGraph;

for (let i = 0; i * NODES_PER_BLOB < cacheableNodes.length; i += 1) {
let nodeCountsPerBlob = [];
for (
let i = 0;
i * this.graph.nodesPerBlob < cacheableNodes.length;
i += 1
) {
let nodesStartIndex = i * this.graph.nodesPerBlob;
let nodesEndIndex = Math.min(
(i + 1) * this.graph.nodesPerBlob,
cacheableNodes.length,
);
nodeCountsPerBlob.push(nodesEndIndex - nodesStartIndex);
if (!this.graph.hasCachedRequestChunk(i)) {
// We assume the request graph nodes are immutable and won't change
let nodesToCache = cacheableNodes.slice(nodesStartIndex, nodesEndIndex);
queue

@@ -1438,6 +1461,3 @@ .add(() =>

getRequestGraphNodeKey(i, cacheKey),
cacheableNodes.slice(
i * NODES_PER_BLOB,
(i + 1) * NODES_PER_BLOB,
),
nodesToCache,
).then(() => {

@@ -1460,2 +1480,3 @@ // Succeeded in writing to disk, save that we have completed this chunk

...serialisedGraph,
nodeCountsPerBlob,
nodes: undefined,

@@ -1495,3 +1516,3 @@ });

cacheDir,
projectRoot,
watchDir,
watchBackend,

@@ -1501,3 +1522,3 @@ }: ParcelOptions): WatcherOptions {

const uniqueDirs = [...new Set([...watchIgnore, ...vcsDirs, cacheDir])];
const ignore = uniqueDirs.map(dir => path.resolve(projectRoot, dir));
const ignore = uniqueDirs.map(dir => path.resolve(watchDir, dir));

@@ -1531,15 +1552,20 @@ return {ignore, backend: watchBackend};

let i = 0;
let nodePromises = [];
while (await cache.hasLargeBlob(getRequestGraphNodeKey(i, cacheKey))) {
nodePromises.push(getAndDeserialize(getRequestGraphNodeKey(i, cacheKey)));
i += 1;
}
let serializedRequestGraph = await getAndDeserialize(requestGraphKey);
let nodePromises = serializedRequestGraph.nodeCountsPerBlob.map(
async (nodesCount, i) => {
let nodes = await getAndDeserialize(getRequestGraphNodeKey(i, cacheKey));
invariant.equal(
nodes.length,
nodesCount,
'RequestTracker node chunk: invalid node count',
);
return nodes;
},
);
return {
requestGraph: RequestGraph.deserialize({
...serializedRequestGraph,
nodes: (await Promise.all(nodePromises)).flatMap(nodeChunk => nodeChunk),
nodes: (await Promise.all(nodePromises)).flat(),
}),

@@ -1558,44 +1584,44 @@ // This is used inside parcel query for `.inspectCache`

let requestGraphKey = `requestGraph-${cacheKey}`;
let timeout;
const snapshotKey = `snapshot-${cacheKey}`;
const snapshotPath = path.join(options.cacheDir, snapshotKey + '.txt');
if (await options.cache.hasLargeBlob(requestGraphKey)) {
let {requestGraph} = await readAndDeserializeRequestGraph(
options.cache,
requestGraphKey,
cacheKey,
);
try {
let {requestGraph} = await readAndDeserializeRequestGraph(
options.cache,
requestGraphKey,
cacheKey,
);
let opts = getWatcherOptions(options);
let snapshotKey = `snapshot-${cacheKey}`;
let snapshotPath = path.join(options.cacheDir, snapshotKey + '.txt');
let opts = getWatcherOptions(options);
let timeout = setTimeout(() => {
logger.warn({
timeout = setTimeout(() => {
logger.warn({
origin: '@parcel/core',
message: `Retrieving file system events since last build...\nThis can take upto a minute after branch changes or npm/yarn installs.`,
});
}, 5000);
let startTime = Date.now();
let events = await options.inputFS.getEventsSince(
options.watchDir,
snapshotPath,
opts,
);
clearTimeout(timeout);
logger.verbose({
origin: '@parcel/core',
message: `Retrieving file system events since last build...\nThis can take upto a minute after branch changes or npm/yarn installs.`,
message: `File system event count: ${events.length}`,
meta: {
trackableEvent: 'watcher_events_count',
watcherEventCount: events.length,
duration: Date.now() - startTime,
},
});
}, 5000);
let startTime = Date.now();
let events = await options.inputFS.getEventsSince(
options.watchDir,
snapshotPath,
opts,
);
clearTimeout(timeout);
logger.verbose({
origin: '@parcel/core',
message: `File system event count: ${events.length}`,
meta: {
trackableEvent: 'watcher_events_count',
watcherEventCount: events.length,
duration: Date.now() - startTime,
},
});
requestGraph.invalidateUnpredictableNodes();
requestGraph.invalidateOnBuildNodes();
requestGraph.invalidateEnvNodes(options.env);
requestGraph.invalidateOptionNodes(options);
requestGraph.invalidateUnpredictableNodes();
requestGraph.invalidateOnBuildNodes();
requestGraph.invalidateEnvNodes(options.env);
requestGraph.invalidateOptionNodes(options);
try {
await requestGraph.respondToFSEvents(

@@ -1608,2 +1634,5 @@ options.unstableFileInvalidations || events,

} catch (e) {
// Prevent logging fs events took too long warning
clearTimeout(timeout);
logErrorOnBailout(options, snapshotPath, e);
// This error means respondToFSEvents timed out handling the invalidation events

@@ -1617,1 +1646,29 @@ // In this case we'll return a fresh RequestGraph

}
function logErrorOnBailout(
options: ParcelOptions,
snapshotPath: string,
e: Error,
): void {
if (e.message && e.message.includes('invalid clockspec')) {
const snapshotContents = options.inputFS.readFileSync(
snapshotPath,
'utf-8',
);
logger.warn({
origin: '@parcel/core',
message: `Error reading clockspec from snapshot, building with clean cache.`,
meta: {
snapshotContents: snapshotContents,
trackableEvent: 'invalid_clockspec_error',
},
});
} else if (!(e instanceof FSBailoutError)) {
logger.warn({
origin: '@parcel/core',
message: `Unexpected error loading cache from disk, building with clean cache.`,
meta: {
trackableEvent: 'cache_load_error',
},
});
}
}

@@ -89,3 +89,3 @@ // @flow strict-local

describe('parcel.unstable_transform()', () => {
it('should transforms simple file', async () => {
it('should transform simple file', async () => {
let parcel = createParcel({workerFarm});

@@ -96,3 +96,3 @@ let res = await parcel.unstable_transform({

let code = await res[0].getCode();
assert(code.includes('exports.default = "test"'));
assert(code.includes(`exports.default = 'test'`));
});

@@ -108,5 +108,5 @@

assert(code.includes('require("./index.js")'));
assert(code.includes('new URL("index.js", "file:" + __filename);'));
assert(code.includes('import("index.js")'));
assert(code.includes(`require("./index.js")`));
assert(code.includes(`new URL("index.js", "file:" + __filename);`));
assert(code.includes(`import('index.js')`));
});

@@ -113,0 +113,0 @@ });

@@ -312,5 +312,8 @@ // @flow

it('should throw error on empty config file', () => {
assert.throws(() => {
validateConfigFile({}, '.parcelrc');
}, /.parcelrc can't be empty/);
assert.throws(
() => {
validateConfigFile({}, '.parcelrc');
},
{name: 'Error', message: ".parcelrc can't be empty"},
);
});

@@ -317,0 +320,0 @@ });

@@ -310,2 +310,133 @@ // @flow strict-local

});
it('should ignore stale node chunks from cache', async () => {
let tracker = new RequestTracker({farm, options});
// Set the nodes per blob low so we can ensure multiple files without
// creating 17,000 nodes
tracker.graph.nodesPerBlob = 2;
tracker.graph.addNode({type: 0, id: 'some-file-node-1'});
tracker.graph.addNode({type: 0, id: 'some-file-node-2'});
tracker.graph.addNode({type: 0, id: 'some-file-node-3'});
tracker.graph.addNode({type: 0, id: 'some-file-node-4'});
tracker.graph.addNode({type: 0, id: 'some-file-node-5'});
await tracker.writeToCache();
// Create a new request tracker that shouldn't look at the old cache files
tracker = new RequestTracker({farm, options});
assert.equal(tracker.graph.nodes.length, 0);
tracker.graph.addNode({type: 0, id: 'some-file-node-1'});
await tracker.writeToCache();
// Init a request tracker that should only read the relevant cache files
tracker = await RequestTracker.init({farm, options});
assert.equal(tracker.graph.nodes.length, 1);
});
it('should init with multiple node chunks', async () => {
let tracker = new RequestTracker({farm, options});
// Set the nodes per blob low so we can ensure multiple files without
// creating 17,000 nodes
tracker.graph.nodesPerBlob = 2;
tracker.graph.addNode({type: 0, id: 'some-file-node-1'});
tracker.graph.addNode({type: 0, id: 'some-file-node-2'});
tracker.graph.addNode({type: 0, id: 'some-file-node-3'});
tracker.graph.addNode({type: 0, id: 'some-file-node-4'});
tracker.graph.addNode({type: 0, id: 'some-file-node-5'});
await tracker.writeToCache();
tracker = await RequestTracker.init({farm, options});
assert.equal(tracker.graph.nodes.length, 5);
});
it('should write new nodes to cache', async () => {
let tracker = new RequestTracker({farm, options});
tracker.graph.addNode({
type: 0,
id: 'test-file',
});
await tracker.writeToCache();
assert.equal(tracker.graph.nodes.length, 1);
tracker.graph.addNode({
type: 0,
id: 'test-file-2',
});
await tracker.writeToCache();
assert.equal(tracker.graph.nodes.length, 2);
// Create a new tracker from cache
tracker = await RequestTracker.init({farm, options});
await tracker.writeToCache();
assert.equal(tracker.graph.nodes.length, 2);
});
it('should write updated nodes to cache', async () => {
let tracker = new RequestTracker({farm, options});
let contentKey = 'abc';
await tracker.runRequest({
id: contentKey,
type: 7,
run: async ({api}: {api: RunAPI<string | void>, ...}) => {
let result = await Promise.resolve('a');
api.storeResult(result);
},
input: null,
});
assert.equal(await tracker.getRequestResult(contentKey), 'a');
await tracker.writeToCache();
await tracker.runRequest(
{
id: contentKey,
type: 7,
run: async ({api}: {api: RunAPI<string | void>, ...}) => {
let result = await Promise.resolve('b');
api.storeResult(result);
},
input: null,
},
{force: true},
);
assert.equal(await tracker.getRequestResult(contentKey), 'b');
await tracker.writeToCache();
// Create a new tracker from cache
tracker = await RequestTracker.init({farm, options});
assert.equal(await tracker.getRequestResult(contentKey), 'b');
});
it('should write invalidated nodes to cache', async () => {
let tracker = new RequestTracker({farm, options});
let contentKey = 'abc';
await tracker.runRequest({
id: contentKey,
type: 7,
run: () => {},
input: null,
});
let nodeId = tracker.graph.getNodeIdByContentKey(contentKey);
assert.equal(tracker.graph.getNode(nodeId)?.invalidateReason, 0);
await tracker.writeToCache();
tracker.graph.invalidateNode(nodeId, 1);
assert.equal(tracker.graph.getNode(nodeId)?.invalidateReason, 1);
await tracker.writeToCache();
// Create a new tracker from cache
tracker = await RequestTracker.init({farm, options});
assert.equal(tracker.graph.getNode(nodeId)?.invalidateReason, 1);
});
});

@@ -58,2 +58,3 @@ // @flow strict-local

configKeyInvalidation: false,
parcelV3: false,
dfsFasterRefactor: false,

@@ -60,0 +61,0 @@ },

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc