New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@fluidframework/driver-utils

Package Overview
Dependencies
Maintainers
3
Versions
590
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@fluidframework/driver-utils - npm Package Compare versions

Comparing version 0.37.0 to 0.37.1

2

dist/packageVersion.d.ts

@@ -8,3 +8,3 @@ /*!

export declare const pkgName = "@fluidframework/driver-utils";
export declare const pkgVersion = "0.37.0";
export declare const pkgVersion = "0.37.1";
//# sourceMappingURL=packageVersion.d.ts.map

@@ -11,3 +11,3 @@ "use strict";

exports.pkgName = "@fluidframework/driver-utils";
exports.pkgVersion = "0.37.0";
exports.pkgVersion = "0.37.1";
//# sourceMappingURL=packageVersion.js.map

@@ -55,3 +55,3 @@ "use strict";

common_utils_1.assert(this.to !== undefined, 0x104 /* "undefined end point for parallel fetch" */);
common_utils_1.assert(this.nextToDeliver === this.to, 0x105 /* "unexpected end point for parallel fetch" */);
common_utils_1.assert(this.nextToDeliver >= this.to, 0x105 /* "unexpected end point for parallel fetch" */);
this.working = false;

@@ -71,2 +71,3 @@ this.endEvent.resolve();

this.results.delete(this.nextToDeliver);
common_utils_1.assert(value.length <= this.payloadSize, 0x1d9 /* "addRequestCore() should break into smaller chunks" */);
this.nextToDeliver += value.length;

@@ -77,3 +78,2 @@ this.responseCallback(value);

if (this.working) {
common_utils_1.assert(this.requestsInFlight !== 0 || this.results.size === 0, 0x106 /* "in unexpected state after dispatching results" */);
if (this.requestsInFlight === 0) {

@@ -165,4 +165,24 @@ // we should have dispatched everything, no matter whether we knew about the end or not.

if (this.working) {
if (payload.length !== 0) {
this.results.set(from, payload);
const fromOrig = from;
const length = payload.length;
let fullChunk = (requestedLength <= length); // we can possible get more than we asked.
if (length !== 0) {
// We can get more than we asked for!
// This can screw up logic in dispatch!
// So push only batch size, and keep the rest for later - if conditions are favorable, we
// will be able to use it. If not (parallel request overlapping these ops), it's easier to
// discard them and wait for another (overlapping) request to come in later.
if (requestedLength < length) {
// This is error in a sense that it's not expected and likely points bug in other layer.
// This layer copes with this situation just fine.
this.logger.sendTelemetryEvent({
eventName: "ParallelRequests_Over",
from,
to,
length,
});
}
const data = payload.splice(0, requestedLength);
this.results.set(from, data);
from += data.length;
}

@@ -182,4 +202,2 @@ else {

}
let fullChunk = (requestedLength <= payload.length); // we can possible get more than we asked.
from += payload.length;
if (!partial && !fullChunk) {

@@ -189,3 +207,2 @@ if (!this.knewTo) {

// The END
common_utils_1.assert(!this.knewTo, 0x111 /* "should not know futher boundary at end" */);
this.to = from;

@@ -201,7 +218,7 @@ }

// This layer will just keep trying until it gets full set.
this.logger.sendErrorEvent({
eventName: "ParallelRequestsPartial",
from,
this.logger.sendPerformanceEvent({
eventName: "ParallelRequests_Partial",
from: fromOrig,
to,
length: payload.length,
length,
});

@@ -211,2 +228,7 @@ }

// we can go after full chunk at the end if we received partial chunk, or more than asked
// Also if we got more than we asked to, we can actually use those ops!
if (payload.length !== 0) {
this.results.set(from, payload);
from += payload.length;
}
this.latestRequested = from;

@@ -213,0 +235,0 @@ fullChunk = true;

@@ -8,3 +8,3 @@ /*!

export declare const pkgName = "@fluidframework/driver-utils";
export declare const pkgVersion = "0.37.0";
export declare const pkgVersion = "0.37.1";
//# sourceMappingURL=packageVersion.d.ts.map

@@ -8,3 +8,3 @@ /*!

export const pkgName = "@fluidframework/driver-utils";
export const pkgVersion = "0.37.0";
export const pkgVersion = "0.37.1";
//# sourceMappingURL=packageVersion.js.map

@@ -52,3 +52,3 @@ /*!

assert(this.to !== undefined, 0x104 /* "undefined end point for parallel fetch" */);
assert(this.nextToDeliver === this.to, 0x105 /* "unexpected end point for parallel fetch" */);
assert(this.nextToDeliver >= this.to, 0x105 /* "unexpected end point for parallel fetch" */);
this.working = false;

@@ -68,2 +68,3 @@ this.endEvent.resolve();

this.results.delete(this.nextToDeliver);
assert(value.length <= this.payloadSize, 0x1d9 /* "addRequestCore() should break into smaller chunks" */);
this.nextToDeliver += value.length;

@@ -74,3 +75,2 @@ this.responseCallback(value);

if (this.working) {
assert(this.requestsInFlight !== 0 || this.results.size === 0, 0x106 /* "in unexpected state after dispatching results" */);
if (this.requestsInFlight === 0) {

@@ -162,4 +162,24 @@ // we should have dispatched everything, no matter whether we knew about the end or not.

if (this.working) {
if (payload.length !== 0) {
this.results.set(from, payload);
const fromOrig = from;
const length = payload.length;
let fullChunk = (requestedLength <= length); // we can possible get more than we asked.
if (length !== 0) {
// We can get more than we asked for!
// This can screw up logic in dispatch!
// So push only batch size, and keep the rest for later - if conditions are favorable, we
// will be able to use it. If not (parallel request overlapping these ops), it's easier to
// discard them and wait for another (overlapping) request to come in later.
if (requestedLength < length) {
// This is error in a sense that it's not expected and likely points bug in other layer.
// This layer copes with this situation just fine.
this.logger.sendTelemetryEvent({
eventName: "ParallelRequests_Over",
from,
to,
length,
});
}
const data = payload.splice(0, requestedLength);
this.results.set(from, data);
from += data.length;
}

@@ -179,4 +199,2 @@ else {

}
let fullChunk = (requestedLength <= payload.length); // we can possible get more than we asked.
from += payload.length;
if (!partial && !fullChunk) {

@@ -186,3 +204,2 @@ if (!this.knewTo) {

// The END
assert(!this.knewTo, 0x111 /* "should not know futher boundary at end" */);
this.to = from;

@@ -198,7 +215,7 @@ }

// This layer will just keep trying until it gets full set.
this.logger.sendErrorEvent({
eventName: "ParallelRequestsPartial",
from,
this.logger.sendPerformanceEvent({
eventName: "ParallelRequests_Partial",
from: fromOrig,
to,
length: payload.length,
length,
});

@@ -208,2 +225,7 @@ }

// we can go after full chunk at the end if we received partial chunk, or more than asked
// Also if we got more than we asked to, we can actually use those ops!
if (payload.length !== 0) {
this.results.set(from, payload);
from += payload.length;
}
this.latestRequested = from;

@@ -210,0 +232,0 @@ fullChunk = true;

{
"name": "@fluidframework/driver-utils",
"version": "0.37.0",
"version": "0.37.1",
"description": "Collection of utility functions for Fluid drivers",

@@ -59,8 +59,8 @@ "homepage": "https://fluidframework.com",

"@fluidframework/common-utils": "^0.29.0",
"@fluidframework/core-interfaces": "^0.37.0",
"@fluidframework/driver-definitions": "^0.37.0",
"@fluidframework/core-interfaces": "^0.37.1",
"@fluidframework/driver-definitions": "^0.37.1",
"@fluidframework/gitresources": "^0.1022.0",
"@fluidframework/protocol-base": "^0.1022.0",
"@fluidframework/protocol-definitions": "^0.1022.0",
"@fluidframework/telemetry-utils": "^0.37.0",
"@fluidframework/telemetry-utils": "^0.37.1",
"assert": "^2.0.0",

@@ -72,4 +72,4 @@ "uuid": "^8.3.1"

"@fluidframework/eslint-config-fluid": "^0.23.0",
"@fluidframework/mocha-test-setup": "^0.37.0",
"@fluidframework/runtime-utils": "^0.37.0",
"@fluidframework/mocha-test-setup": "^0.37.1",
"@fluidframework/runtime-utils": "^0.37.1",
"@microsoft/api-extractor": "^7.13.1",

@@ -76,0 +76,0 @@ "@types/assert": "^1.5.2",

@@ -9,2 +9,2 @@ /*!

export const pkgName = "@fluidframework/driver-utils";
export const pkgVersion = "0.37.0";
export const pkgVersion = "0.37.1";

@@ -65,3 +65,3 @@ /*!

assert(this.to !== undefined, 0x104 /* "undefined end point for parallel fetch" */);
assert(this.nextToDeliver === this.to, 0x105 /* "unexpected end point for parallel fetch" */);
assert(this.nextToDeliver >= this.to, 0x105 /* "unexpected end point for parallel fetch" */);
this.working = false;

@@ -83,2 +83,3 @@ this.endEvent.resolve();

this.results.delete(this.nextToDeliver);
assert(value.length <= this.payloadSize, 0x1d9 /* "addRequestCore() should break into smaller chunks" */);
this.nextToDeliver += value.length;

@@ -90,5 +91,2 @@ this.responseCallback(value);

if (this.working) {
assert(this.requestsInFlight !== 0 || this.results.size === 0,
0x106 /* "in unexpected state after dispatching results" */);
if (this.requestsInFlight === 0) {

@@ -199,4 +197,25 @@ // we should have dispatched everything, no matter whether we knew about the end or not.

if (this.working) {
if (payload.length !== 0) {
this.results.set(from, payload);
const fromOrig = from;
const length = payload.length;
let fullChunk = (requestedLength <= length); // we can possible get more than we asked.
if (length !== 0) {
// We can get more than we asked for!
// This can screw up logic in dispatch!
// So push only batch size, and keep the rest for later - if conditions are favorable, we
// will be able to use it. If not (parallel request overlapping these ops), it's easier to
// discard them and wait for another (overlapping) request to come in later.
if (requestedLength < length) {
// This is error in a sense that it's not expected and likely points bug in other layer.
// This layer copes with this situation just fine.
this.logger.sendTelemetryEvent({
eventName: "ParallelRequests_Over",
from,
to,
length,
});
}
const data = payload.splice(0, requestedLength);
this.results.set(from, data);
from += data.length;
} else {

@@ -217,5 +236,2 @@ // 1. empty (partial) chunks should not be returned by various caching / adapter layers -

let fullChunk = (requestedLength <= payload.length); // we can possible get more than we asked.
from += payload.length;
if (!partial && !fullChunk) {

@@ -225,3 +241,2 @@ if (!this.knewTo) {

// The END
assert(!this.knewTo, 0x111 /* "should not know futher boundary at end" */);
this.to = from;

@@ -237,7 +252,7 @@ }

// This layer will just keep trying until it gets full set.
this.logger.sendErrorEvent({
eventName: "ParallelRequestsPartial",
from,
this.logger.sendPerformanceEvent({
eventName: "ParallelRequests_Partial",
from: fromOrig,
to,
length: payload.length,
length,
});

@@ -248,2 +263,8 @@ }

// we can go after full chunk at the end if we received partial chunk, or more than asked
// Also if we got more than we asked to, we can actually use those ops!
if (payload.length !== 0) {
this.results.set(from, payload);
from += payload.length;
}
this.latestRequested = from;

@@ -250,0 +271,0 @@ fullChunk = true;

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc