Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@automerge/automerge-repo

Package Overview
Dependencies
Maintainers
4
Versions
69
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@automerge/automerge-repo - npm Package Compare versions

Comparing version 1.1.4 to 1.1.5

dist/helpers/tests/storage-adapter-tests.d.ts

6

dist/DocHandle.js

@@ -204,4 +204,6 @@ import * as A from "@automerge/automerge/next";

#statePromise(awaitStates) {
const awaitStatesArray = Array.isArray(awaitStates) ? awaitStates : [awaitStates];
return waitFor(this.#machine, s => awaitStatesArray.some((state) => s.matches(state)),
const awaitStatesArray = Array.isArray(awaitStates)
? awaitStates
: [awaitStates];
return waitFor(this.#machine, s => awaitStatesArray.some(state => s.matches(state)),
// use a longer delay here so as not to race with other delays

@@ -208,0 +210,0 @@ { timeout: this.#timeoutDelay * 2 });

@@ -14,3 +14,3 @@ import type { NetworkAdapterInterface } from "../../network/NetworkAdapterInterface.js";

*/
export declare function runAdapterTests(_setup: SetupFn, title?: string): void;
export declare function runNetworkAdapterTests(_setup: SetupFn, title?: string): void;
type Network = NetworkAdapterInterface | NetworkAdapterInterface[];

@@ -17,0 +17,0 @@ export type SetupFn = () => Promise<{

@@ -18,3 +18,3 @@ import assert from "assert";

*/
export function runAdapterTests(_setup, title) {
export function runNetworkAdapterTests(_setup, title) {
// Wrap the provided setup function

@@ -27,3 +27,3 @@ const setup = async () => {

};
describe(`Adapter acceptance tests ${title ? `(${title})` : ""}`, () => {
describe(`Network adapter acceptance tests ${title ? `(${title})` : ""}`, () => {
it("can sync 2 repos", async () => {

@@ -30,0 +30,0 @@ const doTest = async (a, b) => {

@@ -85,2 +85,10 @@ import { EventEmitter } from "eventemitter3";

storageId: () => Promise<StorageId | undefined>;
/**
* Waits for Repo to finish write changes to disk.
* @hidden this API is experimental and may change
* @param documents - if provided, only waits for the specified documents
* @param timeout - if provided, the maximum time to wait in milliseconds (rejects on timeout)
* @returns Promise<void>
*/
flush(documents?: DocumentId[], timeout?: number): Promise<void>;
}

@@ -87,0 +95,0 @@ export interface RepoConfig {

@@ -386,2 +386,26 @@ import { next as Automerge } from "@automerge/automerge";

};
/**
* Waits for Repo to finish write changes to disk.
* @hidden this API is experimental and may change
* @param documents - if provided, only waits for the specified documents
* @param timeout - if provided, the maximum time to wait in milliseconds (rejects on timeout)
* @returns Promise<void>
*/
async flush(documents, timeout) {
if (!this.storageSubsystem) {
return Promise.resolve();
}
const handles = documents
? documents.map(id => this.#handleCache[id])
: Object.values(this.#handleCache);
return Promise.all(handles.map(async (handle) => {
const doc = handle.docSync();
if (!doc) {
return;
}
return this.storageSubsystem.flush(handle.documentId, doc, timeout);
})).then(() => {
/* No-op. To return `voi`d and not `void[]` */
});
}
}

@@ -51,3 +51,8 @@ import * as A from "@automerge/automerge/next";

saveSyncState(documentId: DocumentId, storageId: StorageId, syncState: A.SyncState): Promise<void>;
/**
* Waiting for document state to be written to disk.
* @deprecated because it will be changed soon.
*/
flush(documentId: DocumentId, doc: A.Doc<unknown>, timeout?: number): Promise<void>;
}
//# sourceMappingURL=StorageSubsystem.d.ts.map

@@ -8,2 +8,3 @@ import * as A from "@automerge/automerge/next";

import * as Uuid from "uuid";
import { EventEmitter } from "eventemitter3";
/**

@@ -23,2 +24,3 @@ * The storage subsystem is responsible for saving and loading Automerge documents to and from

#log = debug(`automerge-repo:storage-subsystem`);
#saved = new EventEmitter();
constructor(storageAdapter) {

@@ -129,2 +131,3 @@ this.#storageAdapter = storageAdapter;

this.#storedHeads.set(documentId, A.getHeads(doc));
this.#saved.emit("saved");
}

@@ -192,2 +195,26 @@ /**

/**
* Waiting for document state to be written to disk.
* @deprecated because it will be changed soon.
*/
async flush(documentId, doc, timeout) {
return new Promise((resolve, reject) => {
let timeoutId;
if (timeout) {
timeoutId = setTimeout(() => {
this.#saved.off("saved", checkIfSaved);
reject(new Error("Timed out waiting for save"));
}, timeout);
}
const checkIfSaved = () => {
if (!this.#shouldSave(documentId, doc)) {
this.#saved.off("saved", checkIfSaved);
clearTimeout(timeoutId);
resolve();
}
};
this.#saved.on("saved", checkIfSaved);
checkIfSaved();
});
}
/**
* Returns true if the document has changed since the last time it was saved.

@@ -194,0 +221,0 @@ */

@@ -155,3 +155,3 @@ import * as A from "@automerge/automerge/next";

beginSync(peerIds) {
const noPeersWithDocument = peerIds.every((peerId) => this.#peerDocumentStatuses[peerId] in ["unavailable", "wants"]);
const noPeersWithDocument = peerIds.every(peerId => this.#peerDocumentStatuses[peerId] in ["unavailable", "wants"]);
// At this point if we don't have anything in our storage, we need to use an empty doc to sync

@@ -158,0 +158,0 @@ // with; but we don't want to surface that state to the front end

{
"name": "@automerge/automerge-repo",
"version": "1.1.4",
"version": "1.1.5",
"description": "A repository object to manage a collection of automerge documents",

@@ -58,3 +58,3 @@ "repository": "https://github.com/automerge/automerge-repo/tree/master/packages/automerge-repo",

},
"gitHead": "e819a7ed6302ce34f372b119ea3e8bab052e3af6"
"gitHead": "64edfaea5e53e77cd9158fc1df52fea85801db71"
}

@@ -247,8 +247,10 @@ import * as A from "@automerge/automerge/next"

#statePromise(awaitStates: HandleState | HandleState[]) {
const awaitStatesArray = Array.isArray(awaitStates) ? awaitStates : [awaitStates]
const awaitStatesArray = Array.isArray(awaitStates)
? awaitStates
: [awaitStates]
return waitFor(
this.#machine,
s => awaitStatesArray.some((state) => s.matches(state)),
s => awaitStatesArray.some(state => s.matches(state)),
// use a longer delay here so as not to race with other delays
{timeout: this.#timeoutDelay * 2}
{ timeout: this.#timeoutDelay * 2 }
)

@@ -255,0 +257,0 @@ }

@@ -20,3 +20,3 @@ import assert from "assert"

*/
export function runAdapterTests(_setup: SetupFn, title?: string): void {
export function runNetworkAdapterTests(_setup: SetupFn, title?: string): void {
// Wrap the provided setup function

@@ -32,3 +32,5 @@ const setup = async () => {

describe(`Adapter acceptance tests ${title ? `(${title})` : ""}`, () => {
describe(`Network adapter acceptance tests ${
title ? `(${title})` : ""
}`, () => {
it("can sync 2 repos", async () => {

@@ -35,0 +37,0 @@ const doTest = async (

@@ -5,5 +5,5 @@ import { next as Automerge } from "@automerge/automerge"

import {
generateAutomergeUrl,
interpretAsDocumentId,
parseAutomergeUrl,
generateAutomergeUrl,
interpretAsDocumentId,
parseAutomergeUrl,
} from "./AutomergeUrl.js"

@@ -14,3 +14,6 @@ import { DocHandle, DocHandleEncodedChangePayload } from "./DocHandle.js"

import { throttle } from "./helpers/throttle.js"
import { NetworkAdapterInterface, type PeerMetadata } from "./network/NetworkAdapterInterface.js"
import {
NetworkAdapterInterface,
type PeerMetadata,
} from "./network/NetworkAdapterInterface.js"
import { NetworkSubsystem } from "./network/NetworkSubsystem.js"

@@ -507,2 +510,29 @@ import { RepoMessage } from "./network/messages.js"

}
/**
* Waits for Repo to finish write changes to disk.
* @hidden this API is experimental and may change
* @param documents - if provided, only waits for the specified documents
* @param timeout - if provided, the maximum time to wait in milliseconds (rejects on timeout)
* @returns Promise<void>
*/
async flush(documents?: DocumentId[], timeout?: number): Promise<void> {
if (!this.storageSubsystem) {
return Promise.resolve()
}
const handles = documents
? documents.map(id => this.#handleCache[id])
: Object.values(this.#handleCache)
return Promise.all(
handles.map(async handle => {
const doc = handle.docSync()
if (!doc) {
return
}
return this.storageSubsystem!.flush(handle.documentId, doc, timeout)
})
).then(() => {
/* No-op. To return `voi`d and not `void[]` */
})
}
}

@@ -509,0 +539,0 @@

@@ -11,2 +11,3 @@ import * as A from "@automerge/automerge/next"

import * as Uuid from "uuid"
import { EventEmitter } from "eventemitter3"

@@ -32,2 +33,4 @@ /**

#saved = new EventEmitter<{ saved: () => void }>()
constructor(storageAdapter: StorageAdapterInterface) {

@@ -161,2 +164,3 @@ this.#storageAdapter = storageAdapter

this.#storedHeads.set(documentId, A.getHeads(doc))
this.#saved.emit("saved")
}

@@ -252,2 +256,30 @@

/**
* Waiting for document state to be written to disk.
* @deprecated because it will be changed soon.
*/
async flush(documentId: DocumentId, doc: A.Doc<unknown>, timeout?: number) {
return new Promise<void>((resolve, reject) => {
let timeoutId: NodeJS.Timeout
if (timeout) {
timeoutId = setTimeout(() => {
this.#saved.off("saved", checkIfSaved)
reject(new Error("Timed out waiting for save"))
}, timeout)
}
const checkIfSaved = () => {
if (!this.#shouldSave(documentId, doc)) {
this.#saved.off("saved", checkIfSaved)
clearTimeout(timeoutId)
resolve()
}
}
this.#saved.on("saved", checkIfSaved)
checkIfSaved()
})
}
/**
* Returns true if the document has changed since the last time it was saved.

@@ -254,0 +286,0 @@ */

@@ -231,3 +231,3 @@ import * as A from "@automerge/automerge/next"

const noPeersWithDocument = peerIds.every(
(peerId) => this.#peerDocumentStatuses[peerId] in ["unavailable", "wants"]
peerId => this.#peerDocumentStatuses[peerId] in ["unavailable", "wants"]
)

@@ -234,0 +234,0 @@

@@ -1,2 +0,2 @@

import { pause } from "../../src/helpers/pause.js";
import { pause } from "../../src/helpers/pause.js"
import { Message, NetworkAdapter, PeerId } from "../../src/index.js"

@@ -6,12 +6,12 @@

#startReady: boolean
#sendMessage?: SendMessageFn;
#sendMessage?: SendMessageFn
constructor(opts: Options = {startReady: true}) {
constructor(opts: Options = { startReady: true }) {
super()
this.#startReady = opts.startReady;
this.#sendMessage = opts.sendMessage;
this.#startReady = opts.startReady
this.#sendMessage = opts.sendMessage
}
connect(peerId: PeerId) {
this.peerId = peerId;
this.peerId = peerId
if (this.#startReady) {

@@ -25,32 +25,34 @@ this.emit("ready", { network: this })

peerCandidate(peerId: PeerId) {
this.emit('peer-candidate', { peerId, peerMetadata: {} });
this.emit("peer-candidate", { peerId, peerMetadata: {} })
}
override send(message: Message) {
this.#sendMessage?.(message);
this.#sendMessage?.(message)
}
receive(message: Message) {
this.emit('message', message);
this.emit("message", message)
}
static createConnectedPair({ latency = 10 }: { latency?: number} = {}) {
static createConnectedPair({ latency = 10 }: { latency?: number } = {}) {
const adapter1: DummyNetworkAdapter = new DummyNetworkAdapter({
startReady: true,
sendMessage: (message: Message) => pause(latency).then(() => adapter2.receive(message)),
});
sendMessage: (message: Message) =>
pause(latency).then(() => adapter2.receive(message)),
})
const adapter2: DummyNetworkAdapter = new DummyNetworkAdapter({
startReady: true,
sendMessage: (message: Message) => pause(latency).then(() => adapter1.receive(message)),
});
sendMessage: (message: Message) =>
pause(latency).then(() => adapter1.receive(message)),
})
return [adapter1, adapter2];
return [adapter1, adapter2]
}
}
type SendMessageFn = (message: Message) => void;
type SendMessageFn = (message: Message) => void
type Options = {
startReady?: boolean;
sendMessage?: SendMessageFn;
startReady?: boolean
sendMessage?: SendMessageFn
}

@@ -1,2 +0,6 @@

import { Chunk, StorageAdapterInterface, type StorageKey } from "../../src/index.js"
import {
Chunk,
StorageAdapterInterface,
type StorageKey,
} from "../../src/index.js"

@@ -3,0 +7,0 @@ export class DummyStorageAdapter implements StorageAdapterInterface {

@@ -379,4 +379,3 @@ import { next as A } from "@automerge/automerge"

// wait because storage id is not initialized immediately
await pause()
await repo.flush()

@@ -464,2 +463,118 @@ const initialKeys = storage.keys()

describe("flush behaviour", () => {
const setup = () => {
let blockedSaves = []
let resume = () => {
blockedSaves.forEach(resolve => resolve())
blockedSaves = []
}
const pausedStorage = new DummyStorageAdapter()
{
const originalSave = pausedStorage.save.bind(pausedStorage)
pausedStorage.save = async (...args) => {
await new Promise(resolve => {
console.log("made a promise", ...args[0])
blockedSaves.push(resolve)
})
await pause(0)
// otherwise all the save promises resolve together
// which prevents testing flushing a single docID
console.log("resuming save", ...args[0])
return originalSave(...args)
}
}
const repo = new Repo({
storage: pausedStorage,
network: [],
})
// Create a pair of handles
const handle = repo.create<{ foo: string }>({ foo: "first" })
const handle2 = repo.create<{ foo: string }>({ foo: "second" })
return { resume, pausedStorage, repo, handle, handle2 }
}
it("should not be in a new repo yet because the storage is slow", async () => {
const { pausedStorage, repo, handle, handle2 } = setup()
expect((await handle.doc()).foo).toEqual("first")
expect((await handle2.doc()).foo).toEqual("second")
// Reload repo
const repo2 = new Repo({
storage: pausedStorage,
network: [],
})
// Could not find the document that is not yet saved because of slow storage.
const reloadedHandle = repo2.find<{ foo: string }>(handle.url)
expect(pausedStorage.keys()).to.deep.equal([])
expect(await reloadedHandle.doc()).toEqual(undefined)
})
it("should be visible to a new repo after flush()", async () => {
const { resume, pausedStorage, repo, handle, handle2 } = setup()
const flushPromise = repo.flush()
resume()
await flushPromise
// Check that the data is now saved.
expect(pausedStorage.keys().length).toBeGreaterThan(0)
{
// Reload repo
const repo = new Repo({
storage: pausedStorage,
network: [],
})
expect(
(await repo.find<{ foo: string }>(handle.documentId).doc()).foo
).toEqual("first")
expect(
(await repo.find<{ foo: string }>(handle2.documentId).doc()).foo
).toEqual("second")
}
})
it("should only block on flushing requested documents", async () => {
const { resume, pausedStorage, repo, handle, handle2 } = setup()
const flushPromise = repo.flush([handle.documentId])
resume()
await flushPromise
// Check that the data is now saved.
expect(pausedStorage.keys().length).toBeGreaterThan(0)
{
// Reload repo
const repo = new Repo({
storage: pausedStorage,
network: [],
})
expect(
(await repo.find<{ foo: string }>(handle.documentId).doc()).foo
).toEqual("first")
// Really, it's okay if the second one is also flushed but I'm forcing the issue
// in the test storage engine above to make sure the behaviour is as documented
expect(
await repo.find<{ foo: string }>(handle2.documentId).doc()
).toEqual(undefined)
}
})
it("should time out with failure after a specified delay", async () => {
const { resume, pausedStorage, repo, handle, handle2 } = setup()
const flushPromise = repo.flush([handle.documentId], 10)
expect(flushPromise).rejects.toThrowError("Timed out waiting for save")
// Check that the data is now saved.
expect(pausedStorage.keys().length).toBe(0)
})
})
describe("with peers (linear network)", async () => {

@@ -466,0 +581,0 @@ it("n-peers connected in a line", async () => {

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc