New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

ddeep-core

Package Overview
Dependencies
Maintainers
1
Versions
21
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

ddeep-core - npm Package Compare versions

Comparing version 1.0.8 to 1.0.9

CODE_OF_CONDUCT.md

40

ddeep.config.js
module.exports = {
// Set storage to false to disable persistent data storage
"storage": true,
/* Set storage to true to enable persistent data storage */
"storage": false,
// Set the port you want to run the peer on
/* Set the port you want to run the peer on */
"port": 9999,
// set logs to false if you don't want to see real-tiem logs in your peer
"logs": true,
/*
Set a list of IP adresses (of peers, servers, or websites) that are able to connect to this core
this can help prevent cross-site connections to your core
*/
"whitelist": [],
// Add your huggingFace token to be used with AI smart policies
/* Add your huggingFace token to be used with AI smart policies */
"hf": null,
// Set a checkpoint interval timer in ms to make a recovery checkpoint of the database
// example: setting "checkpoint" to 60000 will make a point of recover every 1 minute
// this works onyl with persistent storage enabled
"checkpoint": null
/*
Set a checkpoint interval timer in ms to make a recovery checkpoint of the database
example: setting "checkpoint" to 60000 will make a point of recover every minute
this works onyl with persistent storage enabled
*/
"checkpoint": null,
/*
Set a reset_graph interval timer in ms to clear the core's cached graph
example: setting "reset_graph" to 60000 will clear the graph data cache every minute
*/
"reset_graph": null,
/*
Set a reset_listeners interval timers in ms to clear the core's lisetners
Listeners record all nodes being listened to with all peer's IDs listeting to them
and while a peer is removed from the listeners when It's disconnected,
It's "strongly recommended" to use 'resset_listeners' to keep things clear and avoid possible issues
you can disable this option by setting it to null or 0
*/
"reset_listeners": 6000000,
}

@@ -1,30 +0,58 @@

/*
This file was not modified, for license see https://github.com/amark/gun/blob/master/LICENSE.md
/*
This file was modified, for license see https://github.com/amark/gun/blob/master/LICENSE.md
*/
function Dup() {
var dup = { s: {} }, opt = { max: 1000, age: 1000 * 9 };
dup.check = function (id) {
return dup.s[id] ? dup.track(id) : false;
};
dup.track = function (id) {
dup.s[id] = (+new Date());
if (!dup.to) {
dup.to = setTimeout(function () {
Object.keys(dup.s).forEach(function (time, id) {
if (opt.age > ((+new Date()) - Number(time))) {
return;
}
delete dup.s[id];
});
dup.to = null;
}, opt.age);
const dup = { s: {} }; const opt = { max: 1000, age: 1000 * 9 };
let cache = {};
let tracked = new Set();
dup.check = function(id) {
if (cache[id]) {
return cache[id];
}
const result = dup.s[id];
cache[id] = result;
return result;
};
dup.track = function(id) {
if (tracked.has(id)) {
return id;
}
tracked.add(id);
dup.s[id] = (+new Date());
if (!dup.to) {
dup.to = setTimeout(function() {
for (const [id, time] of Object.entries(dup.s)) {
if (opt.age > ((+new Date()) - Number(time))) {
continue;
}
tracked.delete(id);
delete dup.s[id];
}
return id;
};
return dup;
dup.to = null;
}, opt.age);
}
return id;
};
dup.destroy = function() {
clearTimeout(dup.to);
cache = {};
tracked.clear();
};
return dup;
}
Dup.random = function () { return Math.random().toString(36).slice(-6); };
Dup.random = function() { return Math.random().toString(36).slice(-6) };
try {
module.exports = Dup;
}
catch (e) { };
module.exports = Dup;
} catch (e) { };
var PE = require('./peers/emitter'); // peers emitter
var store = require('./storage/store'); // read and write data to storage
var Dup = require('./dup'), dup = Dup(); // check and track data
var SCANNER = require('./policies/scanner.ts'); // scan and process policies
var SCANNER = require('./policies/scanner2.ts'); // scan and process policies
var RFG = require('./storage/get_from_graph'); // read data from pooling graph
var listen = require('./peers/listen'); // add listeners to peers
var policies = require('../policies.config'); // policies

@@ -17,17 +16,15 @@ type msg = {

var get = async function (peer:any, msg:msg, graph:any) {
var get = function (peer: any, msg: msg, graph: any, storage: true|false) {
var soul:any = msg?.get["#"];
var soul:string = msg?.get["#"];
var prop = msg?.get["."];
if (prop) soul = `${soul}.${prop}`;
try {
(soul) ? soul = soul.split('/') : null;
(prop) ? soul.push(prop) : null;
var ack = RFG(msg.get, graph);
if (ack) {
SCANNER(soul, "get", policies, ack, () => {
if (peer) listen(soul, peer);
SCANNER(soul, "get", ack, () => {
listen(soul, peer);
PE.emit('get', peer, {

@@ -42,6 +39,6 @@ '#': dup.track(Dup.random()),

if (!ack){
store.get(msg.get, async (err:any, ack:any) => {
SCANNER(soul, "get", policies, ack, () => {
if (peer) listen(soul, peer);
if (!ack && storage){
store.get(msg.get, (err:any, ack:any) => {
SCANNER(soul, "get", ack, () => {
listen(soul, peer);
PE.emit('get', peer, {

@@ -57,6 +54,6 @@ '#': dup.track(Dup.random()),

} catch (err) {};
} catch (err) {}; // no need to do anything here...
};
module.exports = get;
module.exports = get;

@@ -1,67 +0,69 @@

/*
/*
This file was not modified, for license see https://github.com/amark/gun/blob/master/LICENSE.md
*/
function HAM(machineState, incomingState, currentState, incomingValue, currentValue) {
function HAM (machineState, incomingState, currentState, incomingValue, currentValue) {
if (machineState < incomingState) {
return { defer: true };
}
if (incomingState < currentState) {
return { historical: true };
}
if (machineState < incomingState) return { defer: true };
if (incomingState < currentState) return { historical: true };
if (currentState < incomingState) return { converge: true, incoming: true };
if (currentState < incomingState) {
return { converge: true, incoming: true };
}
if (incomingState === currentState) {
if (incomingState === currentState) {
let res
var res;
incomingValue = JSON.stringify(incomingValue) || ''
currentValue = JSON.stringify(currentValue) || '';
incomingValue = JSON.stringify(incomingValue) || "";
currentValue = JSON.stringify(currentValue) || "";
(incomingValue === currentValue)
? res = { state: true }
: (incomingValue < currentValue)
? res = { converge: true, current: true }
: (currentValue < incomingValue)
? res = { converge: true, incoming: true }
: res = false
(incomingValue === currentValue) ? res = { state: true }
: (incomingValue < currentValue) ? res = { converge: true, current: true }
: (currentValue < incomingValue) ? res = { converge: true, incoming: true }
: res = false;
if (res) { return res };
}
if (res) { return res };
}
return { err: "Invalid CRDT Data: " + incomingValue + " to " + currentValue + " at " + incomingState + " to " + currentState };
return { err: 'Invalid CRDT Data: ' + incomingValue + ' to ' + currentValue + ' at ' + incomingState + ' to ' + currentState }
}
HAM.mix = (change, graph) => {
const machine = (+new Date()); let diff
var machine = (+new Date), diff;
Object.keys(change).forEach((soul) => {
const node = change[soul]
Object.keys(change).forEach((soul) => {
Object.keys(node).forEach((key) => {
const val = node[key]
if (key === '_') { return };
var node = change[soul];
const state = node._['>'][key]
const was = (graph[soul] || { _: { '>': {} } })._['>'][key] || -Infinity
const known = (graph[soul] || {})[key]
const ham = HAM(machine, state, was, val, known)
Object.keys(node).forEach((key) => {
if (!ham.incoming && ham.defer) {
console.error('DEFER', key, val);
return;
}
var val = node[key];
if ('_' == key) return;
(diff || (diff = {}))[soul] = diff[soul] || node
var state = node._['>'][key];
var was = (graph[soul] || { _: { '>': {} } })._['>'][key] || -Infinity;
var known = (graph[soul] || {})[key];
var ham = HAM(machine, state, was, val, known);
graph[soul] = graph[soul] || node
graph[soul][key] = diff[soul][key] = val
graph[soul]._['>'][key] = diff[soul]._['>'][key] = state
})
})
if (!ham.incoming) {
if (ham.defer) console.log("DEFER", key, val);
return;
}
(diff || (diff = {}))[soul] = diff[soul] || node;
graph[soul] = graph[soul] || node;
graph[soul][key] = diff[soul][key] = val;
graph[soul]._['>'][key] = diff[soul]._['>'][key] = state;
});
});
process.graph = diff;
return diff;
process.graph = diff
return diff
}
try { module.exports = HAM } catch (e) { };
module.exports = HAM;

@@ -10,43 +10,47 @@ var EVENTS = require('events');

try {
peer.socket.send(JSON.stringify(data));
} catch (err) {}; // we don't really need to do anything here. but we don't want any errors if there are problems sending data to peers
process.PEERS[peer].socket.send(JSON.stringify(data));
} catch (err) {}; // we don't really need to do anything here.
});
PE.on('put', (nodes, data) => {
PE.on('put', function (graph, data) {
var peers = process.PEERS;
var peers = [];
var listening_peers = [];
var nodes = [];
var props;
var dynamic_graph;
peers.forEach(peer => {
if (graph.includes('.')) {
nodes = graph.split('.')[0].split('/');
props = graph.split('.')[1];
} else {
nodes = graph.split('/');
}
var listeners = peer.listeners;
var mappingNodes;
var send = false;
if (listeners){
mappingNodes = (nodes.length > listeners.length) ? nodes
: (nodes.length < listeners.length) ? listeners
: nodes;
mappingNodes.forEach( (node) => {
var listenerValue = listeners[nodes.indexOf(node)];
send = (!listenerValue || listenerValue === node) ? true
: false;
});
if (send) {
try {
peer.socket.send(JSON.stringify(data))
} catch (err) {} // we don't really need to do anything here. but we don't want any errors if there are problems sending data to peers
nodes.forEach(node => {
if (!dynamic_graph) {
dynamic_graph = node;
}else {
dynamic_graph = `${dynamic_graph}/${node}`;
}
if (process.listeners[dynamic_graph]) {
if (listening_peers.indexOf(process.listeners[dynamic_graph])) {
listening_peers.push(...process.listeners[dynamic_graph]);
}
}
});
if (props) {
dynamic_graph = `${dynamic_graph}.${props}`;
if (process.listeners[dynamic_graph]) {
listening_peers.push(...process.listeners[dynamic_graph]);
}
}
});
listening_peers.forEach(peer => {
try {
process.PEERS[peer].socket.send(JSON.stringify(data));
} catch (err) {};
})

@@ -53,0 +57,0 @@ });

@@ -1,7 +0,19 @@

function listen (soul, peer) {
if (peer && soul) {
process.PEERS[process.PEERS.indexOf(peer)].listeners.push(...soul);
function listen(graph, peer) {
if (!peer || !graph) { return };
if (!process.PEERS[peer]) { return };
if (process.listeners[graph]) {
if (process.listeners[graph].indexOf(peer) === -1) {
process.listeners[graph].push(peer);
}
}
else if (!process.listeners[graph]) {
process.listeners[graph] = [peer];
}
}
module.exports = listen;
import { HfInference } from "@huggingface/inference";
const opt = require("../../ddeep.config");
var opt = require("../../ddeep.config");

@@ -5,0 +5,0 @@ // Process AI text classification

@@ -0,13 +1,4 @@

require('./scanner2');
var _processPolicy = require("./processor");
var processPolicy = require("./processor.ts");
type policySchema = {
"name": string,
"operations": Array<string>,
"type": "check" | "smart",
"graph": Array<string>,
"check": Function,
"rules": Array<string>
}
// scan policies for a specific node path and operation and process valid policies

@@ -17,3 +8,2 @@ function scanPolicies (nodes:Array<string>, operation:string, policies:any, data:any, cb:Function) {

// Define the res that will change and be returned later
let res:any = true;
let processedPolicies:Array<string> = [];

@@ -72,3 +62,3 @@ var anyApplied:true|false = false;

var res = await processPolicy(policy, data);
var res = await _processPolicy(policy, data);

@@ -75,0 +65,0 @@ // Throw error if res is not a valid (true || false)

@@ -5,4 +5,3 @@ var PE = require('./peers/emitter'); // peers emitter

var HAM = require('./ham'); // conflict resolution algorithm
var SCANNER = require('./policies/scanner'); // scan and process policies
var policies = require('../policies.config'); // policies
var SCANNER = require('./policies/scanner2.ts'); // scan and process policies

@@ -14,29 +13,41 @@ type putMsg = {

var put = async function (msg:putMsg, graph:any, storage:true|false) {
var put = function (msg: putMsg, graph: any, storage: true|false) {
try {
var soul = msg.put[Object.keys(msg.put)[0]]._["#"];
var soul: any;
// var prop = msg.put[Object.keys(msg.put)[0]]._["."];
// if (prop) soul = `${soul}.${prop}`;
(soul) ? soul = soul.split('/') : null;
for (var key in msg.put) {
var node = msg.put[key]._['#'];
soul = node;
}
SCANNER(soul, "put", policies, msg.put, () => {
SCANNER(soul, "put", msg.put, () => {
var change = HAM.mix(msg.put, graph);
(storage) ? store.put(change, function (err:any, ok:any) {
// if storage is enabled, save data and stream it
if (storage) {
store.put(change, function (err:any, ok:any) {
(err) ? console.log(err.red) : null;
if (err) {
console.log(err.red);
}
PE.emit('put', soul, {
'#': dup.track(Dup.random()),
'@': msg['#'],
err: err,
ok: ok,
put: msg.put
});
})
}
}) : null;
PE.emit('put', soul, {
'#': dup.track(Dup.random()),
'@': msg['#'],
err: null,
ok: 1,
put: msg.put
});
});
} catch (err) {};
} catch (err) {}; // no need to do anything here...

@@ -43,0 +54,0 @@ }

@@ -1,17 +0,41 @@

var GET = require('./get.ts'); // get nodes data from cache and store
var PUT = require('./put.ts'); // get nodes data from cache and store
// just basics
require('colors');
var fs = require('fs');
// commands interface
var readline = require('readline'); // process inputs
var CP = require('../lib/commands/processor'); // Commands processor
// storage and data operations
var GET = require('./get');
var PUT = require('./put');
var recovery = require('./storage/checkpoint'); // build recover checkpoints
var DUP = require('./dup'), dup = DUP(); // check and track data
var recovery = require('./storage/checkpoint.ts'); // build recover checkpoints
var opt = require('../ddeep.config'); // peer configurations
// Setup process peers and graph to build some pooling functionality
process.PEERS = [];
let graph = {};
// configurations
var opt = require('../ddeep.config'); // ddeep configurations
const { listeners } = require('process');
// create command interface inputs
var interface_prompt = readline.createInterface({
input: process.stdin,
output: process.stdout,
})
// Setup opt
let graph = {};
var port = opt.port || 9999;
var storage = opt.storage || false;
var checkpoint = opt.checkpoint || false;
var logs = opt.logs || false;
var graph_timer = opt.reset_graph || 0;
var listeners_timer = opt.reset_listeners || 0;
var whitelist = opt.whitelist || [];
// add options to the process
process.PEERS = {};
process.storage = storage;
process.port = port;
process.checkpoint = checkpoint;
process.listeners = {};
// Create the server

@@ -21,35 +45,103 @@ const fastify = require('fastify')();

// Call a chackpoint recovery if enabled
(storage && checkpoint) ? recovery(checkpoint) : null;
(logs) ? console.log({ listening: true, port, storage }) : null;
// start recovery function if a checkpoint timer is in palce and storage enabled
if (storage && checkpoint) {
recovery(checkpoint);
}
// handle new connections to ws
fastify.register (async function (fastify) {
// clear graph based on the reset_graph timer
if (Number(graph_timer) > 0) {
clear_graph(graph_timer);
}
fastify.get('/', { websocket: true }, peer => {
// clear listeners based on the reset_listeners timer
if (Number(listeners_timer) > 0) {
clear_listeners(listeners_timer);
}
// register fastify server
fastify.register(async function (fastify_socket) {
try {
// read command interface entry and options
fs.readFile('./lib/entry/ascii.txt', {}, (error, content) => {
console.clear();
if (error) {
return;
} else if (content) {
content = content.toString();
console.log("\n", `${content}`.blue, "\n");
}
console.log("port -> ".yellow, `${port}`.gray);
console.log("storage -> ".yellow, `${storage}`.gray, "\n");
receive_command();
})
} catch (err) {
// I'm afraid some cloud hosting services would cause some issues.
// but no need to do anything.
};
// handle simple http serving
fastify_socket.get('/', (req, reply) => {
reply.send(`open socket connections to /ddeep`);
})
// handle new socket connections
fastify_socket.get('/ddeep', { websocket: true }, (peer, req) => {
// get the IP address of the peer connecting to the core
var peer_ip = req.socket.remoteAddress;
// check if ip address is in the whitelist to be able to connect
if (whitelist.length > 0 && whitelist.indexOf(peer_ip) === -1) {
peer.socket.send('ACCESS DENIED: you are not allowed to connect to this core...');
peer.socket.close();
}
// push the new peer
peer.listeners = [];
peer._id = (Date.now()*Math.random()).toString(36);
process.PEERS.push(peer);
var _id = 'peer:' + (Date.now() * Math.random()).toString(36);
peer._id = _id;
process.PEERS[_id] = peer;
// handle messages
peer.socket.on('message', data => {
peer.socket.on('message', (data) => {
// parse message to JSON
var msg = JSON.parse(data);
if (dup.check(msg['#'])) {return};
// check message's ID. return if already tracked, and track it if new
if (dup.check(msg['#'])) { return };
dup.track(msg['#']);
(msg.put) ? PUT(msg, graph, storage)
: (msg.get) ? GET(peer, msg, graph)
: null;
// handle put data
if (msg.put) {
PUT(msg, graph, process.storage);
}
// handle get data
else if (msg.get) {
GET(peer._id, msg, graph, process.storage);
}
});
// pop peer when connection is closed
// delete peer when connection is closed
peer.socket.on('close', () => {
process.PEERS.pop(process.PEERS.indexOf(peer));
try {
delete process.PEERS[peer._id];
peer.listeners.forEach(listener => {
console.log(listener);
delete process.listeners[listener][process.listeners[listener].indexOf(peer._id)];
process.listeners[listener] = process.listeners.pop(process.listeners[listener].indexOf(peer._id));
})
} catch (err) {} // no need to do anything
})
});

@@ -59,45 +151,46 @@

// listen to config port using fastify
fastify.listen({ port }, err => {
if (err) {
fastify.log.error(err)
}
})
if (err) {
console.error(err);
process.exit(1);
}
});
/*
function receive_command () {
for developemt! (by Kais Radwan)
interface_prompt.question('ddeep > ', async (command) => {
the new data schema should be based on the following schema:
if (command) {
command = command.split(" ");
await CP.emit(command[0], command);
}
put data:
receive_command();
})
{
collection: {
#: dup.track(Dup.random()),
_: {
'#': 'collection_id/node_id',
'.': 'proprty',
'>': {
data.*: Date.now()
}
},
_a: ddeep.auth(), // implemented later
persistent: "weak" || "strong" || "cache" || null,
put: {...data}
}
};
// clear graph every ms
function clear_graph (timer) {
if (timer < 1000) {
console.log('\nCancelling clear_graph as it is less than 1000ms and would cause issues\n'.red);
return;
}
setTimeout( () => {
graph = {};
clear_graph(timer);
}, timer);
}
get data:
{
collection: {
get: {
'#': 'collection_id' || 'collection_id/node_id',
?? '.': 'property'
},
_a: ddeep.auth(), // implemented later
opt: {...} // implemented later
}
// clear listeners every ms
function clear_listeners (timer) {
if (timer < 1000) {
console.log('\nCancelling clear_listeners as it is less than 1000ms and would cause issues\n'.red);
return;
}
*/
setTimeout( () => {
process.listeners = {};
clear_listeners(timer);
}, timer);
}

@@ -1,26 +0,22 @@

var get_from_graph = (lex, graph) => {
const get_from_graph = (lex, graph) => {
const soul = lex['#']
const key = lex['.']
let node = graph[soul]
const ack = {}
var soul = lex['#'];
var key = lex['.'];
var node = graph[soul];
var ack = {};
if (!node) { return null };
if (!node) {return null};
if (key) {
let tmp = node[key]
if (!tmp) { return null };
if (key) {
(node = { _: node._ })[key] = tmp
tmp = node._['>'];
(node._['>'] = {})[key] = tmp[key]
}
var tmp = node[key];
if (!tmp) {return null};
(node = { _: node._ })[key] = tmp;
tmp = node._['>'];
(node._['>'] = {})[key] = tmp[key];
}
ack[soul] = node;
return ack;
ack[soul] = node
return ack
}
module.exports = get_from_graph;
module.exports = get_from_graph

@@ -5,43 +5,41 @@ /*

; (function () {
function Radisk (opt) {
opt = opt || {}
opt.log = opt.log || console.log
opt.file = String(opt.file || 'ddeep_data')
const has = (Radisk.has || (Radisk.has = {}))[opt.file]
if (has) { return has }
function Radisk(opt) {
opt.max = opt.max || (opt.memory ? (opt.memory * 999 * 999) : 300000000) * 0.3
opt.until = opt.until || opt.wait || 250
opt.batch = opt.batch || (10 * 1000)
opt.chunk = opt.chunk || (1024 * 1024 * 1) // 1MB
opt.code = opt.code || {}
opt.code.from = opt.code.from || '!'
opt.jsonify = true
opt = opt || {};
opt.log = opt.log || console.log;
opt.file = String(opt.file || 'ddeep_data');
var has = (Radisk.has || (Radisk.has = {}))[opt.file];
if (has) { return has }
function ename (t) { return encodeURIComponent(t).replace(/\*/g, '%2A') } // TODO: Hash this also, but allow migration!
function atomic (v) { return u !== v && (!v || typeof v !== 'object') }
const timediate = ('' + u === typeof setImmediate) ? setTimeout : setImmediate
const puff = setTimeout.turn || timediate; var u
const map = Radix.object
let ST = 0
opt.max = opt.max || (opt.memory ? (opt.memory * 999 * 999) : 300000000) * 0.3;
opt.until = opt.until || opt.wait || 250;
opt.batch = opt.batch || (10 * 1000);
opt.chunk = opt.chunk || (1024 * 1024 * 1); // 1MB
opt.code = opt.code || {};
opt.code.from = opt.code.from || '!';
opt.jsonify = true;
if (!opt.store) {
return opt.log('ERROR: Radisk needs `opt.store` interface with `{get: fn, put: fn (, list: fn)}`!')
}
if (!opt.store.put) {
return opt.log('ERROR: Radisk needs `store.put` interface with `(file, data, cb)`!')
}
if (!opt.store.get) {
return opt.log('ERROR: Radisk needs `store.get` interface with `(file, cb)`!')
}
if (!opt.store.list) {
// opt.log("WARNING: `store.list` interface might be needed!");
}
function ename(t) { return encodeURIComponent(t).replace(/\*/g, '%2A') } // TODO: Hash this also, but allow migration!
function atomic(v) { return u !== v && (!v || 'object' != typeof v) }
var timediate = ('' + u === typeof setImmediate) ? setTimeout : setImmediate;
var puff = setTimeout.turn || timediate, u;
var map = Radix.object;
var ST = 0;
if (!opt.store) {
return opt.log("ERROR: Radisk needs `opt.store` interface with `{get: fn, put: fn (, list: fn)}`!");
}
if (!opt.store.put) {
return opt.log("ERROR: Radisk needs `store.put` interface with `(file, data, cb)`!");
}
if (!opt.store.get) {
return opt.log("ERROR: Radisk needs `store.get` interface with `(file, cb)`!");
}
if (!opt.store.list) {
// opt.log("WARNING: `store.list` interface might be needed!");
}
if ('' + u != typeof require) { require('./yson') }
var parse = JSON.parseAsync || function (t, cb, r) { var u; try { cb(u, JSON.parse(t, r)) } catch (e) { cb(e) } }
var json = JSON.stringifyAsync || function (v, cb, r, s) { var u; try { cb(u, JSON.stringify(v, r, s)) } catch (e) { cb(e) } }
/*
if ('' + u !== typeof require) { require('./yson') }
const parse = JSON.parseAsync || function (t, cb, r) { let u; try { cb(u, JSON.parse(t, r)) } catch (e) { cb(e) } }
const json = JSON.stringifyAsync || function (v, cb, r, s) { let u; try { cb(u, JSON.stringify(v, r, s)) } catch (e) { cb(e) } }
/*
Any and all storage adapters should...

@@ -51,97 +49,96 @@ 1. Because writing to disk takes time, we should batch data to disk. This improves performance, and reduces potential disk corruption.

*/
var r = function (key, data, cb, tag, DBG) {
if ('function' === typeof data) {
var o = cb || {};
cb = data;
r.read(key, cb, o, DBG || tag);
return;
}
r.save(key, data, cb, tag, DBG);
}
r.save = function (key, data, cb, tag, DBG) {
var s = { key: key }, tags, f, d, q;
s.find = function (file) {
var tmp;
s.file = file || (file = opt.code.from);
DBG && (DBG = DBG[file] = DBG[file] || {});
DBG && (DBG.sf = DBG.sf || +new Date);
if (tmp = r.disk[file]) { s.mix(u, tmp); return }
r.parse(file, s.mix, u, DBG);
}
s.mix = function (err, disk) {
DBG && (DBG.sml = +new Date);
DBG && (DBG.sm = DBG.sm || +new Date);
if (s.err = err || s.err) { cb(err); return } // TODO: HANDLE BATCH EMIT
var file = s.file = (disk || '').file || s.file, tmp;
if (!disk && file !== opt.code.from) { // corrupt file?
r.find.bad(file); // remove from dir list
r.save(key, data, cb, tag); // try again
return;
}
(disk = r.disk[file] || (r.disk[file] = disk || Radix())).file || (disk.file = file);
if (opt.compare) {
data = opt.compare(disk(key), data, key, file);
if (u === data) { cb(err, -1); return } // TODO: HANDLE BATCH EMIT
}
(s.disk = disk)(key, data);
if (tag) {
(tmp = (tmp = disk.tags || (disk.tags = {}))[tag] || (tmp[tag] = r.tags[tag] || (r.tags[tag] = {})))[file] || (tmp[file] = r.one[tag] || (r.one[tag] = cb));
cb = null;
}
DBG && (DBG.st = DBG.st || +new Date);
//console.only.i && console.log('mix', disk.Q);
if (disk.Q) { cb && disk.Q.push(cb); return } disk.Q = (cb ? [cb] : []);
disk.to = setTimeout(s.write, opt.until);
}
s.write = function () {
DBG && (DBG.sto = DBG.sto || +new Date);
var file = f = s.file, disk = d = s.disk;
q = s.q = disk.Q;
tags = s.tags = disk.tags;
delete disk.Q;
delete r.disk[file];
delete disk.tags;
//console.only.i && console.log('write', file, disk, 'was saving:', key, data);
r.write(file, disk, s.ack, u, DBG);
}
s.ack = function (err, ok) {
DBG && (DBG.sa = DBG.sa || +new Date);
DBG && (DBG.sal = q.length);
var ack, tmp;
// TODO!!!! CHANGE THIS INTO PUFF!!!!!!!!!!!!!!!!
for (var id in r.tags) {
if (!r.tags.hasOwnProperty(id)) { continue } var tag = r.tags[id];
if ((tmp = r.disk[f]) && (tmp = tmp.tags) && tmp[tag]) { continue }
ack = tag[f];
delete tag[f];
var ne; for (var k in tag) { if (tag.hasOwnProperty(k)) { ne = true; break } } // is not empty?
if (ne) { continue } //if(!obj_empty(tag)){ continue }
delete r.tags[tag];
ack && ack(err, ok);
}
!q && (q = '');
var l = q.length, i = 0;
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
var S = +new Date;
for (; i < l; i++) { (ack = q[i]) && ack(err, ok) }
console.STAT && console.STAT(S, +new Date - S, 'rad acks', ename(s.file));
console.STAT && console.STAT(S, q.length, 'rad acks #', ename(s.file));
}
cb || (cb = function (err, ok) { // test delete!
if (!err) { return }
});
//console.only.i && console.log('save', key);
r.find(key, s.find);
}
r.disk = {};
r.one = {};
r.tags = {};
const r = function (key, data, cb, tag, DBG) {
if (typeof data === 'function') {
const o = cb || {}
cb = data
r.read(key, cb, o, DBG || tag)
return
}
r.save(key, data, cb, tag, DBG)
}
r.save = function (key, data, cb, tag, DBG) {
const s = { key }; let tags; let f; let d; let q
s.find = function (file) {
let tmp
s.file = file || (file = opt.code.from)
DBG && (DBG = DBG[file] = DBG[file] || {})
DBG && (DBG.sf = DBG.sf || +new Date())
if (tmp = r.disk[file]) { s.mix(u, tmp); return }
r.parse(file, s.mix, u, DBG)
}
s.mix = function (err, disk) {
DBG && (DBG.sml = +new Date())
DBG && (DBG.sm = DBG.sm || +new Date())
if (s.err = err || s.err) { cb(err); return } // TODO: HANDLE BATCH EMIT
const file = s.file = (disk || '').file || s.file; let tmp
if (!disk && file !== opt.code.from) { // corrupt file?
r.find.bad(file) // remove from dir list
r.save(key, data, cb, tag) // try again
return
}
(disk = r.disk[file] || (r.disk[file] = disk || Radix())).file || (disk.file = file)
if (opt.compare) {
data = opt.compare(disk(key), data, key, file)
if (u === data) { cb(err, -1); return } // TODO: HANDLE BATCH EMIT
}
(s.disk = disk)(key, data)
if (tag) {
(tmp = (tmp = disk.tags || (disk.tags = {}))[tag] || (tmp[tag] = r.tags[tag] || (r.tags[tag] = {})))[file] || (tmp[file] = r.one[tag] || (r.one[tag] = cb))
cb = null
}
DBG && (DBG.st = DBG.st || +new Date())
// console.only.i && console.log('mix', disk.Q);
if (disk.Q) { cb && disk.Q.push(cb); return } disk.Q = (cb ? [cb] : [])
disk.to = setTimeout(s.write, opt.until)
}
s.write = function () {
DBG && (DBG.sto = DBG.sto || +new Date())
const file = f = s.file; const disk = d = s.disk
q = s.q = disk.Q
tags = s.tags = disk.tags
delete disk.Q
delete r.disk[file]
delete disk.tags
r.write(file, disk, s.ack, u, DBG)
}
s.ack = function (err, ok) {
DBG && (DBG.sa = DBG.sa || +new Date())
DBG && (DBG.sal = q.length)
let ack, tmp
// TODO!!!! CHANGE THIS INTO PUFF!!!!!!!!!!!!!!!!
for (const id in r.tags) {
if (!r.tags.hasOwnProperty(id)) { continue } const tag = r.tags[id]
if ((tmp = r.disk[f]) && (tmp = tmp.tags) && tmp[tag]) { continue }
ack = tag[f]
delete tag[f]
var ne; for (const k in tag) { if (tag.hasOwnProperty(k)) { ne = true; break } } // is not empty?
if (ne) { continue } // if(!obj_empty(tag)){ continue }
delete r.tags[tag]
ack && ack(err, ok)
}
!q && (q = '')
const l = q.length; let i = 0
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
// TODO: PERF: Why is acks so slow, what work do they do??? CHECK THIS!!
const S = +new Date()
for (; i < l; i++) { (ack = q[i]) && ack(err, ok) }
console.STAT && console.STAT(S, +new Date() - S, 'rad acks', ename(s.file))
console.STAT && console.STAT(S, q.length, 'rad acks #', ename(s.file))
}
cb || (cb = function (err, ok) { // test delete!
if (!err) { }
})
// console.only.i && console.log('save', key);
r.find(key, s.find)
}
r.disk = {}
r.one = {}
r.tags = {}
/*
/*
Any storage engine at some point will have to do a read in order to write.

@@ -152,200 +149,200 @@ This is true of even systems that use an append only log, if they support updates.

*/
var RWC = 0;
r.write = function (file, rad, cb, o, DBG) {
if (!rad) { cb('No radix!'); return }
o = ('object' == typeof o) ? o : { force: o };
var f = function Fractal() { }, a, b;
f.text = '';
f.file = file = rad.file || (rad.file = file);
if (!file) { cb('What file?'); return }
f.write = function () {
var text = rad.raw = f.text;
r.disk[file = rad.file || f.file || file] = rad;
var S = +new Date;
DBG && (DBG.wd = S);
//console.only.i && console.log('add', file);
r.find.add(file, function add(err) {
DBG && (DBG.wa = +new Date);
if (err) { cb(err); return }
//console.only.i && console.log('disk', file, text);
opt.store.put(ename(file), text, function safe(err, ok) {
DBG && (DBG.wp = +new Date);
console.STAT && console.STAT(S, ST = +new Date - S, "wrote disk", JSON.stringify(file), ++RWC, 'total all writes.');
//console.only.i && console.log('done', err, ok || 1, cb);
cb(err, ok || 1);
if (!rad.Q) { delete r.disk[file] } // VERY IMPORTANT! Clean up memory, but not if there is already queued writes on it!
});
});
}
f.split = function () {
var S = +new Date;
DBG && (DBG.wf = S);
f.text = '';
if (!f.count) {
f.count = 0;
Radix.map(rad, function count() { f.count++ }); // TODO: Perf? Any faster way to get total length?
}
DBG && (DBG.wfc = f.count);
f.limit = Math.ceil(f.count / 2);
var SC = f.count;
f.count = 0;
DBG && (DBG.wf1 = +new Date);
f.sub = Radix();
Radix.map(rad, f.slice, { reverse: 1 }); // IMPORTANT: DO THIS IN REVERSE, SO LAST HALF OF DATA MOVED TO NEW FILE BEFORE DROPPING FROM CURRENT FILE.
DBG && (DBG.wf2 = +new Date);
r.write(f.end, f.sub, f.both, o);
DBG && (DBG.wf3 = +new Date);
f.hub = Radix();
Radix.map(rad, f.stop);
DBG && (DBG.wf4 = +new Date);
r.write(rad.file, f.hub, f.both, o);
DBG && (DBG.wf5 = +new Date);
console.STAT && console.STAT(S, +new Date - S, "rad split", ename(rad.file), SC);
return true;
}
f.slice = function (val, key) {
f.sub(f.end = key, val);
if (f.limit <= (++f.count)) { return true }
}
f.stop = function (val, key) {
if (key >= f.end) { return true }
f.hub(key, val);
}
f.both = function (err, ok) {
DBG && (DBG.wfd = +new Date);
if (b) { cb(err || b); return }
if (a) { cb(err, ok); return }
a = true;
b = err;
}
f.each = function (val, key, k, pre) {
if (u !== val) { f.count++ }
if (opt.max <= (val || '').length) { return cb("Data too big!"), true }
var enc = Radisk.encode(pre.length) + '#' + Radisk.encode(k) + (u === val ? '' : ':' + Radisk.encode(val)) + '\n';
if ((opt.chunk < f.text.length + enc.length) && (1 < f.count) && !o.force) {
return f.split();
}
f.text += enc;
}
//console.only.i && console.log('writing');
if (opt.jsonify) { r.write.jsonify(f, rad, cb, o, DBG); return } // temporary testing idea
if (!Radix.map(rad, f.each, true)) { f.write() }
}
let RWC = 0
r.write = function (file, rad, cb, o, DBG) {
if (!rad) { cb('No radix!'); return }
o = (typeof o === 'object') ? o : { force: o }
const f = function Fractal () { }; let a; let b
f.text = ''
f.file = file = rad.file || (rad.file = file)
if (!file) { cb('What file?'); return }
f.write = function () {
const text = rad.raw = f.text
r.disk[file = rad.file || f.file || file] = rad
const S = +new Date()
DBG && (DBG.wd = S)
// console.only.i && console.log('add', file);
r.find.add(file, function add (err) {
DBG && (DBG.wa = +new Date())
if (err) { cb(err); return }
// console.only.i && console.log('disk', file, text);
opt.store.put(ename(file), text, function safe (err, ok) {
DBG && (DBG.wp = +new Date())
console.STAT && console.STAT(S, ST = +new Date() - S, 'wrote disk', JSON.stringify(file), ++RWC, 'total all writes.')
// console.only.i && console.log('done', err, ok || 1, cb);
cb(err, ok || 1)
if (!rad.Q) { delete r.disk[file] } // VERY IMPORTANT! Clean up memory, but not if there is already queued writes on it!
})
})
}
f.split = function () {
const S = +new Date()
DBG && (DBG.wf = S)
f.text = ''
if (!f.count) {
f.count = 0
Radix.map(rad, function count () { f.count++ }) // TODO: Perf? Any faster way to get total length?
}
DBG && (DBG.wfc = f.count)
f.limit = Math.ceil(f.count / 2)
const SC = f.count
f.count = 0
DBG && (DBG.wf1 = +new Date())
f.sub = Radix()
Radix.map(rad, f.slice, { reverse: 1 }) // IMPORTANT: DO THIS IN REVERSE, SO LAST HALF OF DATA MOVED TO NEW FILE BEFORE DROPPING FROM CURRENT FILE.
DBG && (DBG.wf2 = +new Date())
r.write(f.end, f.sub, f.both, o)
DBG && (DBG.wf3 = +new Date())
f.hub = Radix()
Radix.map(rad, f.stop)
DBG && (DBG.wf4 = +new Date())
r.write(rad.file, f.hub, f.both, o)
DBG && (DBG.wf5 = +new Date())
console.STAT && console.STAT(S, +new Date() - S, 'rad split', ename(rad.file), SC)
return true
}
f.slice = function (val, key) {
f.sub(f.end = key, val)
if (f.limit <= (++f.count)) { return true }
}
f.stop = function (val, key) {
if (key >= f.end) { return true }
f.hub(key, val)
}
f.both = function (err, ok) {
DBG && (DBG.wfd = +new Date())
if (b) { cb(err || b); return }
if (a) { cb(err, ok); return }
a = true
b = err
}
f.each = function (val, key, k, pre) {
if (u !== val) { f.count++ }
if (opt.max <= (val || '').length) { return cb('Data too big!'), true }
const enc = Radisk.encode(pre.length) + '#' + Radisk.encode(k) + (u === val ? '' : ':' + Radisk.encode(val)) + '\n'
if ((opt.chunk < f.text.length + enc.length) && (f.count > 1) && !o.force) {
return f.split()
}
f.text += enc
}
// console.only.i && console.log('writing');
if (opt.jsonify) { r.write.jsonify(f, rad, cb, o, DBG); return } // temporary testing idea
if (!Radix.map(rad, f.each, true)) { f.write() }
}
r.write.jsonify = function (f, rad, cb, o, DBG) {
var raw;
var S = +new Date;
DBG && (DBG.w = S);
try {
raw = JSON.stringify(rad.$);
} catch (e) { cb("Cannot radisk!"); return }
DBG && (DBG.ws = +new Date);
console.STAT && console.STAT(S, +new Date - S, "rad stringified JSON");
if (opt.chunk < raw.length && !o.force) {
var c = 0;
Radix.map(rad, function () {
if (c++) { return true } // more than 1 item
});
if (c > 1) {
return f.split();
}
}
f.text = raw;
f.write();
}
r.write.jsonify = function (f, rad, cb, o, DBG) {
let raw
const S = +new Date()
DBG && (DBG.w = S)
try {
raw = JSON.stringify(rad.$)
} catch (e) { cb('Cannot radisk!'); return }
DBG && (DBG.ws = +new Date())
console.STAT && console.STAT(S, +new Date() - S, 'rad stringified JSON')
if (opt.chunk < raw.length && !o.force) {
let c = 0
Radix.map(rad, function () {
if (c++) { return true } // more than 1 item
})
if (c > 1) {
return f.split()
}
}
f.text = raw
f.write()
}
r.range = function (tree, o) {
if (!tree || !o) { return }
if (u === o.start && u === o.end) { return tree }
if (atomic(tree)) { return tree }
var sub = Radix();
Radix.map(tree, function (v, k) { sub(k, v) }, o); // ONLY PLACE THAT TAKES TREE, maybe reduce API for better perf?
return sub('');
}
r.range = function (tree, o) {
if (!tree || !o) { return }
if (u === o.start && u === o.end) { return tree }
if (atomic(tree)) { return tree }
const sub = Radix()
Radix.map(tree, function (v, k) { sub(k, v) }, o) // ONLY PLACE THAT TAKES TREE, maybe reduce API for better perf?
return sub('')
}
; (function () {
r.read = function (key, cb, o, DBG) {
o = o || {};
var g = { key: key };
g.find = function (file) {
var tmp;
g.file = file || (file = opt.code.from);
DBG && (DBG = DBG[file] = DBG[file] || {});
DBG && (DBG.rf = DBG.rf || +new Date);
if (tmp = r.disk[g.file = file]) { g.check(u, tmp); return }
r.parse(file, g.check, u, DBG);
}
g.get = function (err, disk, info) {
DBG && (DBG.rgl = +new Date);
DBG && (DBG.rg = DBG.rg || +new Date);
if (g.err = err || g.err) { cb(err); return }
var file = g.file = (disk || '').file || g.file;
if (!disk && file !== opt.code.from) { // corrupt file?
r.find.bad(file); // remove from dir list
r.read(key, cb, o); // try again
return;
}
disk = r.disk[file] || (r.disk[file] = disk);
if (!disk) { cb(file === opt.code.from ? u : "No file!"); return }
disk.file || (disk.file = file);
var data = r.range(disk(key), o);
DBG && (DBG.rr = +new Date);
o.unit = disk.unit;
o.chunks = (o.chunks || 0) + 1;
o.parsed = (o.parsed || 0) + ((info || '').parsed || (o.chunks * opt.chunk));
o.more = 1;
o.next = u;
Radix.map(r.list, function next(v, f) {
if (!v || file === f) { return }
o.next = f;
return 1;
}, o.reverse ? { reverse: 1, end: file } : { start: file });
DBG && (DBG.rl = +new Date);
if (!o.next) { o.more = 0 }
if (o.next) {
if (!o.reverse && ((key < o.next && 0 != o.next.indexOf(key)) || (u !== o.end && (o.end || '\uffff') < o.next))) { o.more = 0 }
if (o.reverse && ((key > o.next && 0 != key.indexOf(o.next)) || ((u !== o.start && (o.start || '') > o.next && file <= o.start)))) { o.more = 0 }
}
//console.log(5, process.memoryUsage().heapUsed);
if (!o.more) { cb(g.err, data, o); return }
if (data) { cb(g.err, data, o) }
if (o.parsed >= o.limit) { return }
var S = +new Date;
DBG && (DBG.rm = S);
var next = o.next;
timediate(function () {
console.STAT && console.STAT(S, +new Date - S, 'rad more');
r.parse(next, g.check);
}, 0);
}
g.check = function (err, disk, info) {
//console.log(4, process.memoryUsage().heapUsed);
g.get(err, disk, info);
if (!disk || disk.check) { return } disk.check = 1;
var S = +new Date;
(info || (info = {})).file || (info.file = g.file);
Radix.map(disk, function (val, key) {
// assume in memory for now, since both write/read already call r.find which will init it.
r.find(key, function (file) {
if ((file || (file = opt.code.from)) === info.file) { return }
var id = ('' + Math.random()).slice(-3);
puff(function () {
r.save(key, val, function ack(err, ok) {
if (err) { r.save(key, val, ack); return } // ad infinitum???
// TODO: NOTE!!! Mislocated data could be because of a synchronous `put` from the `g.get(` other than perf shouldn't we do the check first before acking?
console.STAT && console.STAT("MISLOCATED DATA CORRECTED", id, ename(key), ename(info.file), ename(file));
});
}, 0);
})
});
console.STAT && console.STAT(S, +new Date - S, "rad check");
}
r.find(key || (o.reverse ? (o.end || '') : (o.start || '')), g.find);
}
function rev(a, b) { return b }
var revo = { reverse: true };
}());
; (function () {
r.read = function (key, cb, o, DBG) {
o = o || {}
const g = { key }
g.find = function (file) {
let tmp
g.file = file || (file = opt.code.from)
DBG && (DBG = DBG[file] = DBG[file] || {})
DBG && (DBG.rf = DBG.rf || +new Date())
if (tmp = r.disk[g.file = file]) { g.check(u, tmp); return }
r.parse(file, g.check, u, DBG)
}
g.get = function (err, disk, info) {
DBG && (DBG.rgl = +new Date())
DBG && (DBG.rg = DBG.rg || +new Date())
if (g.err = err || g.err) { cb(err); return }
const file = g.file = (disk || '').file || g.file
if (!disk && file !== opt.code.from) { // corrupt file?
r.find.bad(file) // remove from dir list
r.read(key, cb, o) // try again
return
}
disk = r.disk[file] || (r.disk[file] = disk)
if (!disk) { cb(file === opt.code.from ? u : 'No file!'); return }
disk.file || (disk.file = file)
const data = r.range(disk(key), o)
DBG && (DBG.rr = +new Date())
o.unit = disk.unit
o.chunks = (o.chunks || 0) + 1
o.parsed = (o.parsed || 0) + ((info || '').parsed || (o.chunks * opt.chunk))
o.more = 1
o.next = u
Radix.map(r.list, function next (v, f) {
if (!v || file === f) { return }
o.next = f
return 1
}, o.reverse ? { reverse: 1, end: file } : { start: file })
DBG && (DBG.rl = +new Date())
if (!o.next) { o.more = 0 }
if (o.next) {
if (!o.reverse && ((key < o.next && o.next.indexOf(key) != 0) || (u !== o.end && (o.end || '\uffff') < o.next))) { o.more = 0 }
if (o.reverse && ((key > o.next && key.indexOf(o.next) != 0) || ((u !== o.start && (o.start || '') > o.next && file <= o.start)))) { o.more = 0 }
}
// console.log(5, process.memoryUsage().heapUsed);
if (!o.more) { cb(g.err, data, o); return }
if (data) { cb(g.err, data, o) }
if (o.parsed >= o.limit) { return }
const S = +new Date()
DBG && (DBG.rm = S)
const next = o.next
timediate(function () {
console.STAT && console.STAT(S, +new Date() - S, 'rad more')
r.parse(next, g.check)
}, 0)
}
g.check = function (err, disk, info) {
// console.log(4, process.memoryUsage().heapUsed);
g.get(err, disk, info)
if (!disk || disk.check) { return } disk.check = 1
const S = +new Date();
(info || (info = {})).file || (info.file = g.file)
Radix.map(disk, function (val, key) {
// assume in memory for now, since both write/read already call r.find which will init it.
r.find(key, function (file) {
if ((file || (file = opt.code.from)) === info.file) { return }
const id = ('' + Math.random()).slice(-3)
puff(function () {
r.save(key, val, function ack (err, ok) {
if (err) { r.save(key, val, ack); return } // ad infinitum???
// TODO: NOTE!!! Mislocated data could be because of a synchronous `put` from the `g.get(` other than perf shouldn't we do the check first before acking?
console.STAT && console.STAT('MISLOCATED DATA CORRECTED', id, ename(key), ename(info.file), ename(file))
})
}, 0)
})
})
console.STAT && console.STAT(S, +new Date() - S, 'rad check')
}
r.find(key || (o.reverse ? (o.end || '') : (o.start || '')), g.find)
}
function rev (a, b) { return b }
const revo = { reverse: true }
}())
; (function () {
/*
; (function () {
/*
Let us start by assuming we are the only process that is

@@ -357,258 +354,257 @@ changing the directory or bucket. Not because we do not want

*/
var RPC = 0;
var Q = {}, s = String.fromCharCode(31);
r.parse = function (file, cb, raw, DBG) {
var q;
if (!file) { return cb(); }
if (q = Q[file]) { q.push(cb); return } q = Q[file] = [cb];
var p = function Parse() { }, info = { file: file };
(p.disk = Radix()).file = file;
p.read = function (err, data) {
var tmp;
DBG && (DBG.rpg = +new Date);
console.STAT && console.STAT(S, +new Date - S, 'read disk', JSON.stringify(file), ++RPC, 'total all parses.');
//console.log(2, process.memoryUsage().heapUsed);
if ((p.err = err) || (p.not = !data)) {
delete Q[file];
p.map(q, p.ack);
return;
}
if ('string' !== typeof data) {
try {
if (opt.max <= data.length) {
p.err = "Chunk too big!";
} else {
data = data.toString(); // If it crashes, it crashes here. How!?? We check size first!
}
} catch (e) { p.err = e }
if (p.err) {
delete Q[file];
p.map(q, p.ack);
return;
}
}
info.parsed = data.length;
DBG && (DBG.rpl = info.parsed);
DBG && (DBG.rpa = q.length);
S = +new Date;
if (!(opt.jsonify || '{' === data[0])) {
p.radec(err, data);
return;
}
parse(data, function (err, tree) {
//console.log(3, process.memoryUsage().heapUsed);
if (!err) {
delete Q[file];
p.disk.$ = tree;
console.STAT && (ST = +new Date - S) > 9 && console.STAT(S, ST, 'rad parsed JSON');
DBG && (DBG.rpd = +new Date);
p.map(q, p.ack); // hmmm, v8 profiler can't see into this cause of try/catch?
return;
}
if ('{' === data[0]) {
delete Q[file];
p.err = tmp || "JSON error!";
p.map(q, p.ack);
return;
}
p.radec(err, data);
});
}
p.map = function () { // switch to setTimeout.each now?
if (!q || !q.length) { return }
//var i = 0, l = q.length, ack;
var S = +new Date;
var err = p.err, data = p.not ? u : p.disk;
var i = 0, ack; while (i < 9 && (ack = q[i++])) { ack(err, data, info) } // too much?
console.STAT && console.STAT(S, +new Date - S, 'rad packs', ename(file));
console.STAT && console.STAT(S, i, 'rad packs #', ename(file));
if (!(q = q.slice(i)).length) { return }
puff(p.map, 0);
}
p.ack = function (cb) {
if (!cb) { return }
if (p.err || p.not) {
cb(p.err, u, info);
return;
}
cb(u, p.disk, info);
}
p.radec = function (err, data) {
delete Q[file];
S = +new Date;
var tmp = p.split(data), pre = [], i, k, v;
if (!tmp || 0 !== tmp[1]) {
p.err = "File '" + file + "' does not have root radix! ";
p.map(q, p.ack);
return;
}
while (tmp) {
k = v = u;
i = tmp[1];
tmp = p.split(tmp[2]) || '';
if ('#' == tmp[0]) {
k = tmp[1];
pre = pre.slice(0, i);
if (i <= pre.length) {
pre.push(k);
}
}
tmp = p.split(tmp[2]) || '';
if ('\n' == tmp[0]) { continue }
if ('=' == tmp[0] || ':' == tmp[0]) { v = tmp[1] }
if (u !== k && u !== v) { p.disk(pre.join(''), v) }
tmp = p.split(tmp[2]);
}
console.STAT && console.STAT(S, +new Date - S, 'parsed RAD');
p.map(q, p.ack);
};
p.split = function (t) {
if (!t) { return }
var l = [], o = {}, i = -1, a = '', b, c;
i = t.indexOf(s);
if (!t[i]) { return }
a = t.slice(0, i);
l[0] = a;
l[1] = b = Radisk.decode(t.slice(i), o);
l[2] = t.slice(i + o.i);
return l;
}
if (r.disk) { raw || (raw = (r.disk[file] || '').raw) }
var S = +new Date, SM, SL;
DBG && (DBG.rp = S);
if (raw) { return puff(function () { p.read(u, raw) }, 0) }
opt.store.get(ename(file), p.read);
// TODO: What if memory disk gets filled with updates, and we get an old one back?
}
}());
let RPC = 0
const Q = {}; const s = String.fromCharCode(31)
r.parse = function (file, cb, raw, DBG) {
let q
if (!file) { return cb() }
if (q = Q[file]) { q.push(cb); return } q = Q[file] = [cb]
const p = function Parse () { }; const info = { file };
(p.disk = Radix()).file = file
p.read = function (err, data) {
let tmp
DBG && (DBG.rpg = +new Date())
console.STAT && console.STAT(S, +new Date() - S, 'read disk', JSON.stringify(file), ++RPC, 'total all parses.')
// console.log(2, process.memoryUsage().heapUsed);
if ((p.err = err) || (p.not = !data)) {
delete Q[file]
p.map(q, p.ack)
return
}
if (typeof data !== 'string') {
try {
if (opt.max <= data.length) {
p.err = 'Chunk too big!'
} else {
data = data.toString() // If it crashes, it crashes here. How!?? We check size first!
}
} catch (e) { p.err = e }
if (p.err) {
delete Q[file]
p.map(q, p.ack)
return
}
}
info.parsed = data.length
DBG && (DBG.rpl = info.parsed)
DBG && (DBG.rpa = q.length)
S = +new Date()
if (!(opt.jsonify || data[0] === '{')) {
p.radec(err, data)
return
}
parse(data, function (err, tree) {
// console.log(3, process.memoryUsage().heapUsed);
if (!err) {
delete Q[file]
p.disk.$ = tree
console.STAT && (ST = +new Date() - S) > 9 && console.STAT(S, ST, 'rad parsed JSON')
DBG && (DBG.rpd = +new Date())
p.map(q, p.ack) // hmmm, v8 profiler can't see into this cause of try/catch?
return
}
if (data[0] === '{') {
delete Q[file]
p.err = tmp || 'JSON error!'
p.map(q, p.ack)
return
}
p.radec(err, data)
})
}
p.map = function () { // switch to setTimeout.each now?
if (!q || !q.length) { return }
// var i = 0, l = q.length, ack;
const S = +new Date()
const err = p.err; const data = p.not ? u : p.disk
let i = 0; let ack; while (i < 9 && (ack = q[i++])) { ack(err, data, info) } // too much?
console.STAT && console.STAT(S, +new Date() - S, 'rad packs', ename(file))
console.STAT && console.STAT(S, i, 'rad packs #', ename(file))
if (!(q = q.slice(i)).length) { return }
puff(p.map, 0)
}
p.ack = function (cb) {
if (!cb) { return }
if (p.err || p.not) {
cb(p.err, u, info)
return
}
cb(u, p.disk, info)
}
p.radec = function (err, data) {
delete Q[file]
S = +new Date()
let tmp = p.split(data); let pre = []; let i; let k; let v
if (!tmp || tmp[1] !== 0) {
p.err = "File '" + file + "' does not have root radix! "
p.map(q, p.ack)
return
}
while (tmp) {
k = v = u
i = tmp[1]
tmp = p.split(tmp[2]) || ''
if (tmp[0] == '#') {
k = tmp[1]
pre = pre.slice(0, i)
if (i <= pre.length) {
pre.push(k)
}
}
tmp = p.split(tmp[2]) || ''
if (tmp[0] == '\n') { continue }
if (tmp[0] == '=' || tmp[0] == ':') { v = tmp[1] }
if (u !== k && u !== v) { p.disk(pre.join(''), v) }
tmp = p.split(tmp[2])
}
console.STAT && console.STAT(S, +new Date() - S, 'parsed RAD')
p.map(q, p.ack)
}
p.split = function (t) {
if (!t) { return }
const l = []; const o = {}; let i = -1; let a = ''; let b; let c
i = t.indexOf(s)
if (!t[i]) { return }
a = t.slice(0, i)
l[0] = a
l[1] = b = Radisk.decode(t.slice(i), o)
l[2] = t.slice(i + o.i)
return l
}
if (r.disk) { raw || (raw = (r.disk[file] || '').raw) }
var S = +new Date(); let SM; let SL
DBG && (DBG.rp = S)
if (raw) { return puff(function () { p.read(u, raw) }, 0) }
opt.store.get(ename(file), p.read)
// TODO: What if memory disk gets filled with updates, and we get an old one back?
}
}())
; (function () {
var dir, f = String.fromCharCode(28), Q;
r.find = function (key, cb) {
if (!dir) {
if (Q) { Q.push([key, cb]); return } Q = [[key, cb]];
r.parse(f, init);
return;
}
Radix.map(r.list = dir, function (val, key) {
if (!val) { return }
return cb(key) || true;
}, { reverse: 1, end: key }) || cb(opt.code.from);
}
r.find.add = function (file, cb) {
var has = dir(file);
if (has || file === f) { cb(u, 1); return }
dir(file, 1);
cb.found = (cb.found || 0) + 1;
r.write(f, dir, function (err, ok) {
if (err) { cb(err); return }
cb.found = (cb.found || 0) - 1;
if (0 !== cb.found) { return }
cb(u, 1);
}, true);
}
r.find.bad = function (file, cb) {
dir(file, 0);
r.write(f, dir, cb || noop);
}
function init(err, disk) {
if (err) {
opt.log('list', err);
setTimeout(function () { r.parse(f, init) }, 1000);
return;
}
if (disk) { drain(disk); return }
dir = dir || disk || Radix();
if (!opt.store.list) { drain(dir); return }
// import directory.
opt.store.list(function (file) {
if (!file) { drain(dir); return }
r.find.add(file, noop);
});
}
function drain(rad, tmp) {
dir = dir || rad;
dir.file = f;
tmp = Q; Q = null;
map(tmp, function (arg) {
r.find(arg[0], arg[1]);
});
}
}());
; (function () {
let dir; const f = String.fromCharCode(28); let Q
r.find = function (key, cb) {
if (!dir) {
if (Q) { Q.push([key, cb]); return } Q = [[key, cb]]
r.parse(f, init)
return
}
Radix.map(r.list = dir, function (val, key) {
if (!val) { return }
return cb(key) || true
}, { reverse: 1, end: key }) || cb(opt.code.from)
}
r.find.add = function (file, cb) {
const has = dir(file)
if (has || file === f) { cb(u, 1); return }
dir(file, 1)
cb.found = (cb.found || 0) + 1
r.write(f, dir, function (err, ok) {
if (err) { cb(err); return }
cb.found = (cb.found || 0) - 1
if (cb.found !== 0) { return }
cb(u, 1)
}, true)
}
r.find.bad = function (file, cb) {
dir(file, 0)
r.write(f, dir, cb || noop)
}
function init (err, disk) {
if (err) {
opt.log('list', err)
setTimeout(function () { r.parse(f, init) }, 1000)
return
}
if (disk) { drain(disk); return }
dir = dir || disk || Radix()
if (!opt.store.list) { drain(dir); return }
// import directory.
opt.store.list(function (file) {
if (!file) { drain(dir); return }
r.find.add(file, noop)
})
}
function drain (rad, tmp) {
dir = dir || rad
dir.file = f
var tmp2 = Q; Q = null
map(tmp2, function (arg) {
r.find(arg[0], arg[1])
})
}
}())
try { !Gun.window && require('./radmigtmp')(r) } catch (e) { }
try { require('./radmigtmp')(r) } catch (e) { }
var noop = function () { }, RAD, u;
Radisk.has[opt.file] = r;
return r;
}
var noop = function () { }; let RAD; var u
Radisk.has[opt.file] = r
return r
}
; (function () {
var _ = String.fromCharCode(31), u;
Radisk.encode = function (d, o, s) {
s = s || _;
var t = s, tmp;
if (typeof d == 'string') {
var i = d.indexOf(s);
while (i != -1) { t += s; i = d.indexOf(s, i + 1) }
return t + '"' + d + s;
} else
if (d && d['#'] && 1 == Object.keys(d).length) {
return t + '#' + tmp + t;
} else
if ('number' == typeof d) {
return t + '+' + (d || 0) + t;
} else
if (null === d) {
return t + ' ' + t;
} else
if (true === d) {
return t + '+' + t;
} else
if (false === d) {
return t + '-' + t;
}// else
//if(binary){}
}
Radisk.decode = function (t, o, s) {
s = s || _;
var d = '', i = -1, n = 0, c, p;
if (s !== t[0]) { return }
while (s === t[++i]) { ++n }
p = t[c = n] || true;
while (--n >= 0) { i = t.indexOf(s, i + 1) }
if (i == -1) { i = t.length }
d = t.slice(c + 1, i);
if (o) { o.i = i + 1 }
if ('"' === p) {
return d;
} else
if ('#' === p) {
return { '#': d };
} else
if ('+' === p) {
if (0 === d.length) {
return true;
}
return parseFloat(d);
} else
if (' ' === p) {
return null;
} else
if ('-' === p) {
return false;
}
}
}());
; (function () {
const _ = String.fromCharCode(31); let u
Radisk.encode = function (d, o, s) {
s = s || _
let t = s; let tmp
if (typeof d === 'string') {
let i = d.indexOf(s)
while (i != -1) { t += s; i = d.indexOf(s, i + 1) }
return t + '"' + d + s
} else
if (d && d['#'] && Object.keys(d).length == 1) {
return t + '#' + tmp + t
} else
if (typeof d === 'number') {
return t + '+' + (d || 0) + t
} else
if (d === null) {
return t + ' ' + t
} else
if (d === true) {
return t + '+' + t
} else
if (d === false) {
return t + '-' + t
}// else
// if(binary){}
}
Radisk.decode = function (t, o, s) {
s = s || _
let d = ''; let i = -1; let n = 0; let c; let p
if (s !== t[0]) { return }
while (s === t[++i]) { ++n }
p = t[c = n] || true
while (--n >= 0) { i = t.indexOf(s, i + 1) }
if (i == -1) { i = t.length }
d = t.slice(c + 1, i)
if (o) { o.i = i + 1 }
if (p === '"') {
return d
} else
if (p === '#') {
return { '#': d }
} else
if (p === '+') {
if (d.length === 0) {
return true
}
return parseFloat(d)
} else
if (p === ' ') {
return null
} else
if (p === '-') {
return false
}
}
}())
if (typeof window !== "undefined") {
var Radix = window.Radix;
window.Radisk = Radisk;
} else {
var Radix = require('./radix');
try { module.exports = Radisk } catch (e) { }
}
if (typeof window !== 'undefined') {
var Radix = window.Radix
window.Radisk = Radisk
} else {
var Radix = require('./radix')
try { module.exports = Radisk } catch (e) { }
}
Radisk.Radix = Radix;
}());
Radisk.Radix = Radix
}())

@@ -5,121 +5,111 @@ /*

; (function () {
function Radix () {
const radix = function (key, val, t) {
radix.unit = 0;
(!t && u !== val) ? radix.last = ('' + key < radix.last) ? radix.last : '' + key && delete (radix.$ || {})[_] : null
t = t || radix.$ || (radix.$ = {})
if (!key && Object.keys(t).length) { return t }
key = '' + key
let i = 0; const l = key.length - 1; let k = key[i]; let at; let tmp
while (!(at = t[k]) && i < l) {
k += key[++i]
}
if (!at) {
if (!each(t, function (r, s) {
let ii = 0; let kk = ''
if ((s || '').length) {
while (s[ii] == key[ii]) {
kk += s[ii++]
}
}
if (kk) {
if (u === val) {
if (ii <= l) { return }
(tmp || (tmp = {}))[s.slice(ii)] = r
return r
}
const __ = {}
__[s.slice(ii)] = r
ii = key.slice(ii);
(ii === '') ? (__[''] = val) : ((__[ii] = {})[''] = val)
t[kk] = __
if (Radix.debug && '' + kk === 'undefined') { console.log(0, kk); debugger }
delete t[s]
return true
}
})) {
if (u === val) { return }
(t[k] || (t[k] = {}))[''] = val
if (Radix.debug && '' + k === 'undefined') { console.log(1, k); debugger }
}
if (u === val) {
return tmp
}
} else
if (i == l) {
if (u === val) { return (u === (tmp = at[''])) ? at : ((radix.unit = 1) && tmp) } // temporary help??
at[''] = val
} else {
if (u !== val) { delete at[_] };
return radix(key.slice(++i), val, at || (at = {}))
}
}
return radix
};
function Radix() {
var radix = function (key, val, t) {
radix.unit = 0;
// if (!t && u !== val) {
// radix.last = ('' + key < radix.last) ? radix.last : '' + key;
// delete (radix.$ || {})[_];
// }
(!t && u !== val) ? radix.last = ('' + key < radix.last) ? radix.last : '' + key && delete (radix.$ || {})[_] : null;
t = t || radix.$ || (radix.$ = {});
if (!key && Object.keys(t).length) { return t }
key = '' + key;
var i = 0, l = key.length - 1, k = key[i], at, tmp;
while (!(at = t[k]) && i < l) {
k += key[++i];
}
if (!at) {
if (!each(t, function (r, s) {
var ii = 0, kk = '';
if ((s || '').length) {
while (s[ii] == key[ii]) {
kk += s[ii++];
}
}
if (kk) {
if (u === val) {
if (ii <= l) { return }
(tmp || (tmp = {}))[s.slice(ii)] = r;
return r;
}
var __ = {};
__[s.slice(ii)] = r;
ii = key.slice(ii);
('' === ii) ? (__[''] = val) : ((__[ii] = {})[''] = val);
t[kk] = __;
if (Radix.debug && 'undefined' === '' + kk) { console.log(0, kk); debugger }
delete t[s];
return true;
}
})) {
if (u === val) { return; }
(t[k] || (t[k] = {}))[''] = val;
if (Radix.debug && 'undefined' === '' + k) { console.log(1, k); debugger }
}
if (u === val) {
return tmp;
}
} else
if (i == l) {
if (u === val) { return (u === (tmp = at[''])) ? at : ((radix.unit = 1) && tmp) } // temporary help??
at[''] = val;
} else {
(u !== val) ? delete at[_] : null;
return radix(key.slice(++i), val, at || (at = {}));
}
}
return radix;
};
Radix.map = function rap (radix, cb, opt, pre) {
try {
pre = pre || [] // TODO: BUG: most out-of-memory crashes come from here.
const t = (typeof radix === 'function') ? radix.$ || {} : radix
//! opt && console.log("WHAT IS T?", JSON.stringify(t).length);
if (!t) { return }
if (typeof t === 'string') { if (Radix.debug) { throw ['BUG:', radix, cb, opt, pre] } return }
let keys = (t[_] || no).sort || (t[_] = (function $ () { $.sort = Object.keys(t).sort(); return $ }())).sort; let rev // ONLY 17% of ops are pre-sorted!
// var keys = Object.keys(t).sort();
opt = (opt === true) ? { branch: true } : (opt || {})
if (rev = opt.reverse) { keys = keys.slice(0).reverse() }
const start = opt.start; const end = opt.end; const END = '\uffff'
let i = 0; const l = keys.length
for (; i < l; i++) {
const key = keys[i]; const tree = t[key]; var tmp; var p; var pt
if (!tree || key === '' || _ === key || key === 'undefined') { continue }
p = pre.slice(0); p.push(key)
pt = p.join('')
if (u !== start && pt < (start || '').slice(0, pt.length)) { continue }
if (u !== end && (end || END) < pt) { continue }
if (rev) { // children must be checked first when going in reverse.
tmp = rap(tree, cb, opt, p)
if (u !== tmp) { return tmp }
}
if (u !== (tmp = tree[''])) {
let yes = 1
if (u !== start && pt < (start || '')) { yes = 0 }
if (u !== end && pt > (end || END)) { yes = 0 }
if (yes) {
tmp = cb(tmp, pt, key, pre)
if (u !== tmp) { return tmp }
}
} else
if (opt.branch) {
tmp = cb(u, pt, key, pre)
if (u !== tmp) { return tmp }
}
pre = p
if (!rev) {
tmp = rap(tree, cb, opt, pre)
if (u !== tmp) { return tmp }
}
pre.pop()
}
} catch (e) { console.error(e) }
}
Radix.map = function rap(radix, cb, opt, pre) {
try {
pre = pre || []; // TODO: BUG: most out-of-memory crashes come from here.
var t = ('function' == typeof radix) ? radix.$ || {} : radix;
//!opt && console.log("WHAT IS T?", JSON.stringify(t).length);
if (!t) { return }
if ('string' == typeof t) { if (Radix.debug) { throw ['BUG:', radix, cb, opt, pre] } return; }
var keys = (t[_] || no).sort || (t[_] = function $() { $.sort = Object.keys(t).sort(); return $ }()).sort, rev; // ONLY 17% of ops are pre-sorted!
//var keys = Object.keys(t).sort();
opt = (true === opt) ? { branch: true } : (opt || {});
if (rev = opt.reverse) { keys = keys.slice(0).reverse() }
var start = opt.start, end = opt.end, END = '\uffff';
var i = 0, l = keys.length;
for (; i < l; i++) {
var key = keys[i], tree = t[key], tmp, p, pt;
if (!tree || '' === key || _ === key || 'undefined' === key) { continue }
p = pre.slice(0); p.push(key);
pt = p.join('');
if (u !== start && pt < (start || '').slice(0, pt.length)) { continue }
if (u !== end && (end || END) < pt) { continue }
if (rev) { // children must be checked first when going in reverse.
tmp = rap(tree, cb, opt, p);
if (u !== tmp) { return tmp }
}
if (u !== (tmp = tree[''])) {
var yes = 1;
if (u !== start && pt < (start || '')) { yes = 0 }
if (u !== end && pt > (end || END)) { yes = 0 }
if (yes) {
tmp = cb(tmp, pt, key, pre);
if (u !== tmp) { return tmp }
}
} else
if (opt.branch) {
tmp = cb(u, pt, key, pre);
if (u !== tmp) { return tmp }
}
pre = p;
if (!rev) {
tmp = rap(tree, cb, opt, pre);
if (u !== tmp) { return tmp }
}
pre.pop();
}
} catch (e) { console.error(e); }
};
if (typeof window !== "undefined") {
window.Radix = Radix;
} else {
try { module.exports = Radix } catch (e) { }
}
var each = Radix.object = function (o, f, r) {
for (var k in o) {
if (!o.hasOwnProperty(k)) { continue }
if ((r = f(o[k], k)) !== u) { return r }
}
}, no = {}, u;
var _ = String.fromCharCode(24);
}());
try { module.exports = Radix } catch (e) { };
var each = Radix.object = function (o, f, r) {
for (const k in o) {
if (!o.hasOwnProperty(k)) { continue }
if ((r = f(o[k], k)) !== u) { return r }
}
}; var no = {}; let u
var _ = String.fromCharCode(24)
}())

@@ -1,38 +0,44 @@

/*
Recover the data to a point of recovery. It's as simply as copying a directory
*/
// Recover the data to a point of recovery. It's as simple as copying a directory.
const { program } = require('commander');
require('colors');
const fs = require('fs');
var {program} = require("commander");
var colors = require("colors");
var fs = require("fs");
// Define the command line options.
program
.option('-p <point>')
.option('-h');
.option('-p <point>', 'Recover data to a recovery checkpoint.')
.option('-h', 'Display this help message.');
// Parse the command line options.
program.parse();
var options = program.opts();
var point = options.p;
var help = options.h || options.help;
// Get the options.
const options = program.opts();
const point = options.p;
const help = options.h || options.help;
(help) ?
console.log(`
-p <point> Recover data to a recovery checkpoint
// If the help flag is set, display the help message and exit.
if (help) {
console.log(`
-p <point> Recover data to a recovery checkpoint.
-h Display this help message
`)
-h Display this help message.
`);
process.exit(0);
}
: (!point) ? console.error("\n:(".red + ` Please set the point you want to recover to.\n\ncheck '/recovery' directory to see all available points of recovery and then pass '-p POINT'`)
// If the point option is not set, display an error message and exit.
if (!point) {
console.error('\n:('.red + ' Please set the point you want to recover to.\n\ncheck \'/recovery\' directory to see all available points of recovery and then pass \'-p POINT\'');
process.exit(1);
}
: (point) ?
fs.cp(`./recovery/${point}`, "./ddeep_data", { recursive: true }, (err) => {
if (err) {
console.error(err);
return;
}
// Copy the recovery point directory to the deep data directory.
fs.cp(`./recovery/${point}`, './ddeep_data', { recursive: true }, (err) => {
if (err) {
console.error(err);
return;
}
console.log({success: `restored to checkpoint '${point}' successfully!`});
})
: null;
// Log a success message.
console.log({ success: `restored to checkpoint '${point}' successfully!` });
});
/*
This file was modified. for license see https://github.com/amark/gun/blob/master/LICENSE.md
*/
var Radix = require('./radix');
var Radisk = require('./radisk');
var fs = require('fs');
const Radix = require('./radix');
const Radisk = require('./radisk');
const fs = require('fs');
function Store(opt) {
function Store (opt) {
opt = opt || {}
opt.file = 'ddeep_data';
opt = opt || {};
opt.file = String(opt.file || 'ddeep_data');
const store = function Store () { }
var store = function Store() { };
store.put = function (file, data, cb) {
const random = Math.random().toString(36).slice(-3)
fs.writeFile(opt.file + '-' + random + '.tmp', data, function (err, ok) {
if (err) { return cb(err) }
fs.rename(opt.file + '-' + random + '.tmp', opt.file + '/' + file, cb)
})
}
store.put = function (file, data, cb) {
var random = Math.random().toString(36).slice(-3)
fs.writeFile(opt.file + '-' + random + '.tmp', data, function (err, ok) {
if (err) { return cb(err) }
fs.rename(opt.file + '-' + random + '.tmp', opt.file + '/' + file, cb);
});
};
store.get = function (file, cb) {
fs.readFile(opt.file + '/' + file, (err, data) => {
if (err) {
if ((err.code || '').toUpperCase() === 'ENOENT') {
return cb()
}
console.log('ERROR:', err)
}
if (data) { data = data.toString() }
cb(err, data)
})
}
store.get = function (file, cb) {
fs.readFile(opt.file + '/' + file, (err, data) => {
if (err) {
if ('ENOENT' === (err.code || '').toUpperCase()) {
return cb();
}
console.log("ERROR:", err)
}
if (data) { data = data.toString() }
cb(err, data);
});
};
store.list = function (cb, match) {
fs.readdir(opt.file, function (err, dir) {
dir.forEach(cb)
cb() // Stream interface requires a final call to know when to be done.
})
}
store.list = function (cb, match) {
fs.readdir(opt.file, function (err, dir) {
dir.forEach(cb);
cb(); // Stream interface requires a final call to know when to be done.
});
};
if (!fs.existsSync(opt.file)) { fs.mkdirSync(opt.file) }
return store;
if (!fs.existsSync(opt.file)) { fs.mkdirSync(opt.file) }
return store
}
var rad = Radisk({ store: Store() });
const rad = Radisk({ store: Store() })
var API = {};
const API = {}
API.put = function (graph, cb) {
if (!graph) { return }
var c = 0;
Object.keys(graph).forEach(function (soul) {
var node = graph[soul];
Object.keys(node).forEach(function (key) {
if ('_' == key) { return }
c++
var val = node[key], state = node._['>'][key];
rad(soul + '.' + key, JSON.stringify([val, state]), ack);
});
});
function ack(err, ok) {
c--;
if (ack.err) { return }
if (ack.err = err) {
cb(err || 'ERROR!');
return;
}
if (0 < c) { return }
cb(ack.err, 1);
}
if (!graph) { return }
let c = 0
Object.keys(graph).forEach(function (soul) {
const node = graph[soul]
Object.keys(node).forEach(function (key) {
if (key == '_') { return }
c++
const val = node[key]; const state = node._['>'][key]
rad(soul + '.' + key, JSON.stringify([val, state]), ack)
})
})
function ack (err, ok) {
c--
if (ack.err) { return }
if (ack.err = err) {
cb(err || 'ERROR!')
return
}
if (c > 0) { return }
cb(ack.err, 1)
}
}
API.get = function (lex, cb) {
if (!lex) { return }
var soul = lex['#'];
var key = lex['.'] || '';
var tmp = soul + '.' + key;
var node;
rad(tmp, function (err, val) {
var graph;
if (val) {
Radix.map(val, each);
if (!node) { each(val, key) }
graph = {};
graph[soul] = node;
}
cb(err, graph);
});
function each(val, key) {
var data = JSON.parse(val);
node = node || { _: { '#': soul, '>': {} } };
node[key] = data[0];
node._['>'][key] = data[1];
}
if (!lex) { return }
const soul = lex['#']
const key = lex['.'] || ''
const tmp = soul + '.' + key
let node
rad(tmp, function (err, val) {
let graph
if (val) {
Radix.map(val, each)
if (!node) { each(val, key) }
graph = {}
graph[soul] = node
}
cb(err, graph)
})
function each (val, key) {
const data = JSON.parse(val)
node = node || { _: { '#': soul, '>': {} } }
node[key] = data[0]
node._['>'][key] = data[1]
}
}
module.exports = API;
module.exports = API

@@ -5,239 +5,239 @@ /*

; (function () {
// JSON: JavaScript Object Notation
// YSON: Yielding javaScript Object Notation
var yson = {}, u, sI = setTimeout.turn || (typeof setImmediate != '' + u && setImmediate) || setTimeout;
// JSON: JavaScript Object Notation
// YSON: Yielding javaScript Object Notation
const yson = {}; let u; const sI = setTimeout.turn || (typeof setImmediate !== '' + u && setImmediate) || setTimeout
yson.parseAsync = function (text, done, revive, M) {
if ('string' != typeof text) { try { done(u, JSON.parse(text)) } catch (e) { done(e) } return }
var ctx = { i: 0, text: text, done: done, l: text.length, up: [] };
M = M || 1024 * 32;
parse();
function parse() {
//var S = +new Date;
var s = ctx.text;
var i = ctx.i, l = ctx.l, j = 0;
var w = ctx.w, b, tmp;
while (j++ < M) {
var c = s[i++];
if (i > l) {
ctx.end = true;
break;
yson.parseAsync = function (text, done, revive, M) {
if (typeof text !== 'string') { try { done(u, JSON.parse(text)) } catch (e) { done(e) } return }
const ctx = { i: 0, text, done, l: text.length, up: [] }
M = M || 1024 * 32
parse()
function parse () {
// var S = +new Date;
const s = ctx.text
let i = ctx.i; const l = ctx.l; let j = 0
let w = ctx.w; let b; let tmp
while (j++ < M) {
let c = s[i++]
if (i > l) {
ctx.end = true
break
}
if (w) {
i = s.indexOf('"', i - 1); c = s[i]
tmp = 0; while (s[i - (++tmp)] == '\\') { }; tmp = !(tmp % 2)// tmp = ('\\' == s[i-1]); // json is stupid
b = b || tmp
if (c == '"' && !tmp) {
w = u
tmp = ctx.s
if (ctx.a) {
tmp = s.slice(ctx.sl, i)
if (b || (1 + tmp.indexOf('\\'))) { tmp = JSON.parse('"' + tmp + '"') } // escape + unicode :( handling
if (ctx.at instanceof Array) {
ctx.at.push(ctx.s = tmp)
} else {
if (!ctx.at) { ctx.end = j = M; tmp = u }
(ctx.at || {})[ctx.s] = ctx.s = tmp
}
ctx.s = u
} else {
ctx.s = s.slice(ctx.sl, i)
if (b || (1 + ctx.s.indexOf('\\'))) { ctx.s = JSON.parse('"' + ctx.s + '"') } // escape + unicode :( handling
}
ctx.a = b = u
}
++i
} else {
switch (c) {
case '"':
ctx.sl = i
w = true
break
case ':':
ctx.ai = i
ctx.a = true
break
case ',':
if (ctx.a || ctx.at instanceof Array) {
if (tmp = s.slice(ctx.ai, i - 1)) {
if (u !== (tmp = value(tmp))) {
(ctx.at instanceof Array)
? ctx.at.push(tmp)
: ctx.at[ctx.s] = tmp
}
}
if (w) {
i = s.indexOf('"', i - 1); c = s[i];
tmp = 0; while ('\\' == s[i - (++tmp)]) { }; tmp = !(tmp % 2);//tmp = ('\\' == s[i-1]); // json is stupid
b = b || tmp;
if ('"' == c && !tmp) {
w = u;
tmp = ctx.s;
if (ctx.a) {
tmp = s.slice(ctx.sl, i);
if (b || (1 + tmp.indexOf('\\'))) { tmp = JSON.parse('"' + tmp + '"') } // escape + unicode :( handling
if (ctx.at instanceof Array) {
ctx.at.push(ctx.s = tmp);
} else {
if (!ctx.at) { ctx.end = j = M; tmp = u }
(ctx.at || {})[ctx.s] = ctx.s = tmp;
}
ctx.s = u;
} else {
ctx.s = s.slice(ctx.sl, i);
if (b || (1 + ctx.s.indexOf('\\'))) { ctx.s = JSON.parse('"' + ctx.s + '"'); } // escape + unicode :( handling
}
ctx.a = b = u;
}
ctx.a = u
if (ctx.at instanceof Array) {
ctx.a = true
ctx.ai = i
}
break
case '{':
ctx.up.push(ctx.at || (ctx.at = {}))
if (ctx.at instanceof Array) {
ctx.at.push(ctx.at = {})
} else
if (u !== (tmp = ctx.s)) {
ctx.at[tmp] = ctx.at = {}
}
ctx.a = u
break
case '}':
if (ctx.a) {
if (tmp = s.slice(ctx.ai, i - 1)) {
if (u !== (tmp = value(tmp))) {
if (ctx.at instanceof Array) {
ctx.at.push(tmp)
} else {
if (!ctx.at) { ctx.end = j = M; tmp = u }
(ctx.at || {})[ctx.s] = tmp
}
++i;
} else {
switch (c) {
case '"':
ctx.sl = i;
w = true;
break;
case ':':
ctx.ai = i;
ctx.a = true;
break;
case ',':
if (ctx.a || ctx.at instanceof Array) {
if (tmp = s.slice(ctx.ai, i - 1)) {
if (u !== (tmp = value(tmp))) {
(ctx.at instanceof Array) ? ctx.at.push(tmp)
: ctx.at[ctx.s] = tmp;
}
}
}
ctx.a = u;
if (ctx.at instanceof Array) {
ctx.a = true;
ctx.ai = i;
}
break;
case '{':
ctx.up.push(ctx.at || (ctx.at = {}));
if (ctx.at instanceof Array) {
ctx.at.push(ctx.at = {});
} else
if (u !== (tmp = ctx.s)) {
ctx.at[tmp] = ctx.at = {};
}
ctx.a = u;
break;
case '}':
if (ctx.a) {
if (tmp = s.slice(ctx.ai, i - 1)) {
if (u !== (tmp = value(tmp))) {
if (ctx.at instanceof Array) {
ctx.at.push(tmp);
} else {
if (!ctx.at) { ctx.end = j = M; tmp = u }
(ctx.at || {})[ctx.s] = tmp;
}
}
}
}
ctx.a = u;
ctx.at = ctx.up.pop();
break;
case '[':
if (u !== (tmp = ctx.s)) {
ctx.up.push(ctx.at);
ctx.at[tmp] = ctx.at = [];
} else
if (!ctx.at) {
ctx.up.push(ctx.at = []);
}
ctx.a = true;
ctx.ai = i;
break;
case ']':
if (ctx.a) {
if (tmp = s.slice(ctx.ai, i - 1)) {
if (u !== (tmp = value(tmp))) {
if (ctx.at instanceof Array) {
ctx.at.push(tmp);
} else {
ctx.at[ctx.s] = tmp;
}
}
}
}
ctx.a = u;
ctx.at = ctx.up.pop();
break;
}
}
}
ctx.a = u
ctx.at = ctx.up.pop()
break
case '[':
if (u !== (tmp = ctx.s)) {
ctx.up.push(ctx.at)
ctx.at[tmp] = ctx.at = []
} else
if (!ctx.at) {
ctx.up.push(ctx.at = [])
}
ctx.a = true
ctx.ai = i
break
case ']':
if (ctx.a) {
if (tmp = s.slice(ctx.ai, i - 1)) {
if (u !== (tmp = value(tmp))) {
if (ctx.at instanceof Array) {
ctx.at.push(tmp)
} else {
ctx.at[ctx.s] = tmp
}
}
}
}
ctx.s = u;
ctx.i = i;
ctx.w = w;
if (ctx.end) {
tmp = ctx.at;
if (u === tmp) {
try {
tmp = JSON.parse(text)
} catch (e) { return ctx.done(e) }
}
ctx.done(u, tmp);
} else {
sI(parse);
}
}
ctx.a = u
ctx.at = ctx.up.pop()
break
}
}
}
function value(s) {
var n = parseFloat(s);
if (!isNaN(n)) {
return n;
}
ctx.s = u
ctx.i = i
ctx.w = w
if (ctx.end) {
tmp = ctx.at
if (u === tmp) {
try {
tmp = JSON.parse(text)
} catch (e) { return ctx.done(e) }
}
s = s.trim();
if ('true' == s) {
return true;
}
if ('false' == s) {
return false;
}
if ('null' == s) {
return null;
}
ctx.done(u, tmp)
} else {
sI(parse)
}
}
}
function value (s) {
const n = parseFloat(s)
if (!isNaN(n)) {
return n
}
s = s.trim()
if (s == 'true') {
return true
}
if (s == 'false') {
return false
}
if (s == 'null') {
return null
}
}
yson.stringifyAsync = function (data, done, replacer, space, ctx) {
ctx = ctx || {};
ctx.text = ctx.text || "";
ctx.up = [ctx.at = { d: data }];
ctx.done = done;
ctx.i = 0;
var j = 0;
ify();
function ify() {
var at = ctx.at, data = at.d, add = '', tmp;
if (at.i && (at.i - at.j) > 0) { add += ',' }
if (u !== (tmp = at.k)) { add += JSON.stringify(tmp) + ':' } //'"'+tmp+'":' } // only if backslash
switch (typeof data) {
case 'boolean':
add += '' + data;
break;
case 'string':
add += JSON.stringify(data); //ctx.text += '"'+data+'"';//JSON.stringify(data); // only if backslash
break;
case 'number':
add += (isNaN(data) ? 'null' : data);
break;
case 'object':
if (!data) {
add += 'null';
break;
}
if (data instanceof Array) {
add += '[';
at = { i: -1, as: data, up: at, j: 0 };
at.l = data.length;
ctx.up.push(ctx.at = at);
break;
}
if ('function' != typeof (data || '').toJSON) {
add += '{';
at = { i: -1, ok: Object.keys(data).sort(), as: data, up: at, j: 0 };
at.l = at.ok.length;
ctx.up.push(ctx.at = at);
break;
}
if (tmp = data.toJSON()) {
add += tmp;
break;
}
// let this & below pass into default case...
case 'function':
if (at.as instanceof Array) {
add += 'null';
break;
}
default: // handle wrongly added leading `,` if previous item not JSON-able.
add = '';
at.j++;
}
ctx.text += add;
while (1 + at.i >= at.l) {
ctx.text += (at.ok ? '}' : ']');
at = ctx.at = at.up;
}
if (++at.i < at.l) {
if (tmp = at.ok) {
at.d = at.as[at.k = tmp[at.i]];
} else {
at.d = at.as[at.i];
}
if (++j < 9) { return ify() } else { j = 0 }
sI(ify);
return;
}
ctx.done(u, ctx.text);
yson.stringifyAsync = function (data, done, replacer, space, ctx) {
ctx = ctx || {}
ctx.text = ctx.text || ''
ctx.up = [ctx.at = { d: data }]
ctx.done = done
ctx.i = 0
let j = 0
ify()
function ify () {
let at = ctx.at; const data = at.d; let add = ''; let tmp
if (at.i && (at.i - at.j) > 0) { add += ',' }
if (u !== (tmp = at.k)) { add += JSON.stringify(tmp) + ':' } // '"'+tmp+'":' } // only if backslash
switch (typeof data) {
case 'boolean':
add += '' + data
break
case 'string':
add += JSON.stringify(data) // ctx.text += '"'+data+'"';//JSON.stringify(data); // only if backslash
break
case 'number':
add += (isNaN(data) ? 'null' : data)
break
case 'object':
if (!data) {
add += 'null'
break
}
if (data instanceof Array) {
add += '['
at = { i: -1, as: data, up: at, j: 0 }
at.l = data.length
ctx.up.push(ctx.at = at)
break
}
if (typeof (data || '').toJSON !== 'function') {
add += '{'
at = { i: -1, ok: Object.keys(data).sort(), as: data, up: at, j: 0 }
at.l = at.ok.length
ctx.up.push(ctx.at = at)
break
}
if (tmp = data.toJSON()) {
add += tmp
break
}
// let this & below pass into default case...
case 'function':
if (at.as instanceof Array) {
add += 'null'
break
}
default: // handle wrongly added leading `,` if previous item not JSON-able.
add = ''
at.j++
}
ctx.text += add
while (1 + at.i >= at.l) {
ctx.text += (at.ok ? '}' : ']')
at = ctx.at = at.up
}
if (++at.i < at.l) {
if (tmp = at.ok) {
at.d = at.as[at.k = tmp[at.i]]
} else {
at.d = at.as[at.i]
}
if (++j < 9) { return ify() } else { j = 0 }
sI(ify)
return
}
ctx.done(u, ctx.text)
}
if (typeof window != '' + u) { window.YSON = yson }
try { if (typeof module != '' + u) { module.exports = yson } } catch (e) { }
if (typeof JSON != '' + u) {
JSON.parseAsync = yson.parseAsync;
JSON.stringifyAsync = yson.stringifyAsync;
}
}());
}
if (typeof window !== '' + u) { window.YSON = yson }
try { if (typeof module !== '' + u) { module.exports = yson } } catch (e) { }
if (typeof JSON !== '' + u) {
JSON.parseAsync = yson.parseAsync
JSON.stringifyAsync = yson.stringifyAsync
}
}())

@@ -1,2 +0,2 @@

// Add your own extensions here and use them in your code and policies using `ddeepExt.load()`
// Add your own extensions here and use them in your code and policies using
module.exports = [

@@ -6,2 +6,13 @@

];
];
/*
how to load extensions:
var extensions = require('./lib/ext/require');
extensions.load('extension_name');
(change the require path based on your file's location. this works with policies perfectly)
*/
{
"name": "ddeep-core",
"version": "1.0.8",
"version": "1.0.9",
"description": "Decentralized real-time peer-to-peer data network",

@@ -10,9 +10,15 @@ "author": {

},
"engines": {
"node": ">=16.13.0 || >=18.0.0"
},
"scripts": {
"build-start": "./node_modules/.bin/esbuild dev/serve.js --bundle --outfile=dist/build.js --platform=node && node ./dist/build.js",
"start": "node ./dist/build.js",
"init": "node ./init/init.js",
"build": "./node_modules/.bin/esbuild dev/serve.js --bundle --outfile=dist/build.js --platform=node"
"build": "./node_modules/.bin/esbuild dev/serve.js --bundle --outfile=dist/build.js --platform=node",
"bytenode-build": "bytenode --compile ./dist/build.js",
"bytenode-start": "bytenode ./dist/build.jsc"
},
"bin": {
"ddeep-init": "./init/init.js"
"ddeep-init": "./lib/init/init.js"
},

@@ -28,6 +34,7 @@ "main": "./dist/build.js",

"dependencies": {
"@fastify/static": "^6.11.2",
"@fastify/websocket": "^8.2.0",
"@huggingface/inference": "^2.6.1",
"bytenode": "^1.5.1",
"colors": "^1.4.0",
"directory-tree": "^3.5.1",
"esbuild": "^0.19.4",

@@ -34,0 +41,0 @@ "fastify": "^4.23.2"

@@ -0,13 +1,14 @@

var POLICY = require('./dev/policies/policy_builder');
var extensions = require("./lib/ext/require");
/*
ddeepExt gives you the ability to load extensions to your peer's code and policies.
exntesions gives you the ability to load extensions to your peer's code and policies.
you can add your own extensions to the 'extensions.config.js' file or use built-in extensions.
Use ddeepExt.load(extension_name: string) to load an extension.
Use exntesions.load(extension_name) to load and use an extension.
*/
var ddeepExt = require("./ext/require");
module.exports = [
// your policies goes here
// your policies go here
]
];

@@ -0,27 +1,44 @@

<div align="center">
<img align="center" src="https://opencollective-production.s3.us-west-1.amazonaws.com/account-long-description/dedb8939-f88e-4099-8d41-8680f0a67ef3/ddeep_big.png" height="200px" />
</div>
# ddeep-core
<br>
<p align="center">Decentralized real-time peer-to-peer data network</p>
![Static Badge](https://img.shields.io/badge/beta_release-current?style=flat&color=slateblue) [![Socket Badge](https://socket.dev/api/badge/npm/package/ddeep-core)](https://socket.dev/npm/package/ddeep-core) ![NPM Downloads](https://img.shields.io/npm/dm/ddeep-core) ![NPM License](https://img.shields.io/npm/l/ddeep-core) ![GitHub issues](https://img.shields.io/github/issues/kais-radwan/ddeep-core) ![npm dev dependency version (scoped)](https://img.shields.io/npm/dependency-version/ddeep-core/dev/typescript) ![npm (prod) dependency version (scoped)](https://img.shields.io/npm/dependency-version/ddeep-core/esbuild) ![Snyk](https://scrutinizer-ci.com/g/kais-radwan/ddeep-core/badges/build.png?b=main)
<div align="center">
> This is a beta version !
![Static Badge](https://img.shields.io/badge/alpha_release-current?style=flat-square&color=slateblue)
![NPM Downloads](https://img.shields.io/npm/dm/ddeep-core?style=flat-square)
![NPM License](https://img.shields.io/npm/l/ddeep-core?style=flat-square)
![GitHub issues](https://img.shields.io/github/issues/kais-radwan/ddeep-core?style=flat-square)
![typescript version](https://img.shields.io/npm/dependency-version/ddeep-core/dev/typescript?style=flat-square)
![esbuild version](https://img.shields.io/npm/dependency-version/ddeep-core/esbuild?style=flat-square)
![Scrutinizer build](https://img.shields.io/scrutinizer/build/g/kais-radwan/ddeep-core/main?style=flat-square)
![code quality](https://img.shields.io/scrutinizer/quality/g/kais-radwan/ddeep-core/main?style=flat-square)
[![Socket Badge](https://socket.dev/api/badge/npm/package/ddeep-core?style=flat-square)](https://socket.dev/npm/package/ddeep-core)
[![Static Badge](https://img.shields.io/badge/chat-on_matrix-lightgreen?style=flat-square)](https://matrix.to/#/@multineon:gitter.im)
🚀 Decentralized real-time peer-to-peer data network to run your database, peer, and relay all at once.
</div>
ddeep-core is used to run a back-end environment to save and sync decentralized graph data across peers, powered with flexible extensions, real-time connections, AI-powered policies, and more....
> This is an alpha version !
- Real-time peer-to-peer connections ⚡️
<img src="https://github.com/kais-radwan/ddeep-core/blob/development/dev/assets/core_terminal.png?raw=true" style="border-radius: 10px;" />
- AI-powered data policies and scopes 🔒
Decentralized real-time peer-to-peer data core, used to save and sync decentralized graph data across connected peers, with features like:
- Extensions, so you can add your own code and packages to ddeep 🧩
- Persistent data storage with recovery checkpoints
- Fast & Scalable by design ☄️
- Scoped and AI-powered data policies
- Full customizability and flexibility 🔧
- Real-time low latency connections
you can use ddeep-core as your full project's back-end, and soon you will be able to handle routing to build API functionalities 🔥
- Security features like IP whitelists, data policies, smart listeners so one device can't overload the core with connections
- Command-line interactive interface to manage your network
- `ddeep-core` works perfectly with Gun JS.
## Installation
We recommend you clone this repository and run `npm install`.
We recommend you clone this repository and run `npm install` and then `npm start` to start ddeep-core.

@@ -40,49 +57,48 @@ Using npm, you can install ddeep-core globally:

- [ddeep-core](#ddeep-core)
- [Installation](#installation)
- [Table of contents](#table-of-contents)
- [Getting started](#getting-started)
- [node \& npm](#node--npm)
- [docker](#docker)
- [build](#build)
- [Beta](#beta)
- [Configurations](#configurations)
- [Policies](#policies)
- [Add policies](#add-policies)
- [Schema](#schema)
- [Check policies](#check-policies)
- [Usage](#usage)
- [Smart policies](#smart-policies)
- [HuggingFace token](#huggingface-token)
- [Usage](#usage-1)
- [`smart_check` extension](#smart_check-extension)
- [Extensions](#extensions)
- [Add extensions](#add-extensions)
- [Schema](#schema-1)
- [Write your extensions](#write-your-extensions)
- [Use your extensions](#use-your-extensions)
- [Restore checkpoints](#restore-checkpoints)
- [Infrastructure](#infrastructure)
- [Communications](#communications)
- [CRDT](#crdt)
- [Storage](#storage)
- [Policies](#policies-1)
- [Check policies](#check-policies-1)
- [AI-powered policies](#ai-powered-policies)
- [Decentralized world](#decentralized-world)
- [Development](#development)
- [`dev` directory](#dev-directory)
- [License](#license)
- [NOTICE](#notice)
- [Thanks](#thanks)
- [The idea of ddeep-core](#the-idea-of-ddeep-core)
- [Ddeep ecosystem](#ddeep-ecosystem)
- [Gun](#gun)
- [Contact us](#contact-us)
- [Installation](#installation)
- [Table of contents](#table-of-contents)
- [Getting started](#getting-started)
- [node \& npm](#node--npm)
- [docker](#docker)
- [build](#build)
- [Alpha](#alpha)
- [Configurations](#configurations)
- [Policies](#policies)
- [Add policies](#add-policies)
- [Schema](#schema)
- [Check policies](#check-policies)
- [Usage](#usage)
- [Smart policies](#smart-policies)
- [HuggingFace token](#huggingface-token)
- [Usage](#usage-1)
- [`smart_check` extension](#smart_check-extension)
- [Extensions](#extensions)
- [Add extensions](#add-extensions)
- [Schema](#schema-1)
- [Write your extensions](#write-your-extensions)
- [Use your extensions](#use-your-extensions)
- [Restore checkpoints](#restore-checkpoints)
- [Infrastructure](#infrastructure)
- [Communications](#communications)
- [CRDT](#crdt)
- [Storage](#storage)
- [Policies](#policies-1)
- [Check policies](#check-policies-1)
- [AI-powered policies](#ai-powered-policies)
- [Decentralized world](#decentralized-world)
- [Development](#development)
- [`dev` directory](#dev-directory)
- [License](#license)
- [NOTICE](#notice)
- [Thanks](#thanks)
- [The idea of ddeep-core](#the-idea-of-ddeep-core)
- [Ddeep ecosystem](#ddeep-ecosystem)
- [Gun](#gun)
- [Contact us](#contact-us)
## Getting started
### node & npm
### node & bytenode
To start your network using npm, just run:
To start your network using node, just run:

@@ -93,13 +109,17 @@ ```bash

or simply using node:
or using bytenode:
```bash
node ./dist/build.js
node start-bytenode
```
### docker
You can also run it in [Docker](docker.com), first build the docker image:
You can run ddeep-code in a [Docker](docker.com) container, you can pull the image from Docker hub:
```bash
docker pull multineon/ddeep-core
```
or build it from the source (recommended) :
```bash
docker build -t ddeep-core .
```
and now you can run a docker container:
and now you can run ddeep-core as a docker container:
```bash

@@ -116,2 +136,14 @@ docker run -d ddeep-core

or using bytenode:
```bash
npm run bytenode-build
```
or simply do this:
```bash
npm run build-start
```
to build your new configurations and start the server.
Currently, everytime you make a change on your configurations, policies, extensions, or code, you need to build ddeep-core again, thanks to [esbuild](https://www.npmjs.com/package/esbuild) the build will usually be ready in under one second.

@@ -121,5 +153,5 @@

### Beta
### Alpha
This project is a beta as it's still in its early stage and there are a lot of more amazing ideas coming to it, but anyway for now we don't recommend using it in production.
This project is still in `alpha testing` and in its early stages, so we are testing possible issues and bugs, if you face any issue please contact us or report it on Github.

@@ -129,26 +161,50 @@ ## Configurations

> You need to build the code using `npm run build` after everytime you update your configurations. this won't be the case in future versions.
> You need to build the code using `npm run build` after everytime you update your configurations.
in the `ddeep.config.js` you'll find comments explaining what every option does, and this is an example of the all default options:
```javascript
module.exports = {
// Set storage to false to disable persistent data storage
"storage": true,
"storage": false,
// Set the port you want to run the peer on
"port": 9999,
"port": 9999,
// set logs to false if you don't want to see real-tiem logs in your peer
"logs": true,
"whitelist": [],
// Add your huggingFace token to be used with AI smart policies
"hf": null,
"hf": null,
// Set a checkpoint interval timer in ms to make a recovery checkpoint of the database
// example: setting "checkpoint" to 60000 will make a point of recover every 1 minute
// this works onyl with persistent storage enabled
"checkpoint": null
"checkpoint": null,
"reset_graph": null,
"reset_listeners": 6000000
}
```
## Interactive interface
When you start `ddeepc-core` it opens a command-line interface where you can manage your network, let's see available commands:
- `list peers`: lists all connected peers
- `list listeners`: lists graph listeners and the peers listening to them
- `peer PEER_ID`: shows a peer info
- `clear`: clears the terminal
- `clear peers`: clears all listening peers
- `clear graph`: clears the cached graph data
- `clear listeners`: clears all the listeners
- `info`: shows the configurations info
- `run CODE`: executes a nodeJS code inside the code's process
- `exec CODE`: executes a command in your operating system (tested with Linux)
## Policies

@@ -167,17 +223,12 @@

```javascript
{
name: string,
operations: ['get', 'put'],
graph: Array<string>
type: "check" || "smart",
check: Function // return true or fales
}
POLICY(
type: 'check'|'smart',
operations: ['get', 'put'],
graph: string,
callback: Function // return true or false
)
```
There are two types of policies, check policies and smart policies, so let's discover how every policy works.
the `graph` property accept an array of nodes. if you apply a policy to `["people"]` node it's applied to all nodes under 'people', but if you apply a policy to `["people", "kais"]` the policy will only be applied to the node 'kais' under 'people', and so on.
the `graph` property accept a string of nodes the policies is applied to. if you apply a policy to `people` it's applied to all nodes under `people`, but if you apply a policy to `people/kais` the policy will only be applied to the node `kais` under `people`, and so on.

@@ -194,15 +245,9 @@ ### Check policies

module.exports = [
{
name: "policy1",
operations: ["put"],
node: ["people", "kais"],
type: "check",
check (data) {
return (data) ? true : false;
}
POLICY(
'check', ['put'], 'people/kais',
(data) => {
return (data.name) ? true : false;
}
)

@@ -212,7 +257,7 @@ ]

this policy will be applied to `put` operations to the node 'kais' inside the node 'people' and it checks if the data we are putting have a 'plan' property or not, if it does, the data operation will be granted and the data will be added otherwise the operation will be cancelled.
this policy will be applied to `put` operations to the node `kais` under `people` and it checks if the data we are putting have a `name` or not, if it does, the data operation will be granted and the data will be added otherwise the operation will be cancelled.
the `data` argument passed to the check contains the data being putted if the operation is `put`, and the data is being `getted` is the operation is `get`.
the `data` argument passed to the checking function contains the data being putted if the operation is `put`, and the data is being `getted` if the operation is `get`.
what matters is that the `check()` function has to return `true` or `false`, if returned `true` the opeartion will be processed, and if returned `false` the opeartion will be ignored.
what matters is that the checking function has to return `true` or `false`, if returned `true` the opeartion will be processed, and if returned `false` the opeartion will be ignored.

@@ -222,5 +267,5 @@ for example this is also a valid `check()` policy function:

```javascript
check (data) {
if (data.plan === "pro") return true;
if (data.plan !== "pro") return false;
(data) => {
if (data.plan === "pro") {return true};
if (data.plan !== "pro") {return false};
}

@@ -246,22 +291,21 @@ ```

{
name: "smart policy",
operations: ["get", "put"],
graph: ["posts"],
type: "smart",
POLICY(
'smart', ['get', 'put'], 'posts',
(classes) => {
var smart_check = extensions.load('smart_check');
check: (classes) => {
var smartCheck = ddeepExt.load('smart_check');
return smart_check(classes, [
[ "anger", "<0.5", true ],
[ "anger", ">0.5", false ]
]);
return smartCheck(classes, [
[ "anger", "<0.5", true ],
[ "anger", ">0.5", false ]
]);
};
}
)
]
```
the policy above is applied to all nodes under "posts" and it blocks all data that contains angry inputs from being added or read.
the policy above is applied to all nodes under `posts` and it blocks all data that contains angry inputs from being added or read.

@@ -272,12 +316,12 @@ #### `smart_check` extension

the extension can be loaded using `ddeepExt.load` and it's imported to your policies by default, this is how `smart_check` is used:
the extension can be loaded using `extensions.load` and it's imported to your policies by default, this is how `smart_check` is used:
```javascript
var smartCheck = ddeepExt.load('smart_check');
var smartCheck = extensions.load('smart_check');
return smartCheck(classes, [
[class:string, condition:string, return:true||false]
[class: string, condition: string, return: true|false]
])
```
- **Classes**: passed to policy's `check()` if the policy type is set to `smart`.
- **Classes**: passed to the policy's function if the policy type is set to `smart` instead of the data in `check policies`.

@@ -331,12 +375,18 @@ - **Class**: have to be a valid class name.

Now you can use your extension in your policies or any other file using `ddeepExt.load(extension_name)`. example:
Now you can use your extension in your policies or any other file using `extensions.load(extension_name)`. example:
```javascript
var get_object_keys = ddeepExt.load('object-keys');
var get_object_keys = extensions.load('object-keys');
```
`ddeepExt` is imported by default to your policies but if you want to use your extension in other files, you can require it:
`extensions` is imported by default to your policies but if you want to use your extension in other files, you can require it:
```javascript
var ddeepExt = require('./ext/require');
var extensions = require('./lib/ext/require');
```
## Whitelisted IPs
in the `ddeep.config.js` you can add a list of IP adresses (of peers, servers, or websites) that are able to connect to your core or server.
this can help you prevent cross-site connections to your core, if the list in empty this option will be ignored.
## Restore checkpoints

@@ -406,2 +456,4 @@ If you are using persistent storage, you can setup a checkpoint in the `ddeep.config.js` so the system will create a restore checkpoint based on the options you give it. (more explained in the `ddeep.config.js` file itself).

- [bytenode](https://github.com/bytenode/bytenode) for building a great bytecode compiler for NodeJS.
## The idea of ddeep-core

@@ -408,0 +460,0 @@

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc