New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

abstract-algorithm

Package Overview
Dependencies
Maintainers
1
Versions
27
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

abstract-algorithm - npm Package Compare versions

Comparing version 0.1.6 to 0.1.7

examples/base.js

6

package.json
{
"name": "abstract-algorithm",
"version": "0.1.6",
"version": "0.1.7",
"description": "Abstract-algorithm for optional reduction of stratifiable lambda terms",
"main": "src/api.js",
"main": "src/abstract-algorithm.js",
"scripts": {

@@ -26,4 +26,4 @@ "test": "node test.js"

"dependencies": {
"lambda-calculus": "^1.0.3"
"lambda-calculus": "^1.0.4"
}
}

@@ -18,6 +18,12 @@ # Lamping's Abstract Algorithm

```javascript
const A = require("abstract-algorithm");
const term = "(s.z.(s (s z)) s.z.(s (s z)))"; // church-encoded exponentiation of 2^2
const termNf = A.netToLam(A.reduceNet(A.lamToNet(term)));
console.log(termNf);
const algo = require("abstract-algorithm");
// church-encoded exponentiation of 2^2
const term = `
two: s.z.(s (s z))
(two two)
`;
// evaluates optimally
console.log(algo(term));
```

@@ -27,4 +33,12 @@

Here is a [crappy handwritten explanation](images/handwritten_example.jpg) of how it works. [Here](src/example.js) is a working example on how to use this lib to reduce untyped λ-terms. [Here](src/fast-reducer.js) is an inlined reducer (less pretty, but faster and with GC - the exported lib actually uses it), and [here](stuff/wasmReducer.js) is a poor attempt at making a WASM reducer (tip: it is not any faster, which is weird, because the [C](stuff/ic.c) version is 10x faster than the JS one). [Here](https://github.com/MaiaVictor/parallel_lambda_computer_tests) is an attempt to flatten the graph-reduction algorithm to a linear automata, in such a way that it could, in theory, be implemented as a massivelly parallel ASIC where each 128-bit memory cell is actually a nano-CPU capable of updating its state by looking at its neighbors, in such a way that it causes the emergent global behavior of the memory to converge to the normal form of the initial graph.
Crappy handwritten examples:
Note this only works with stratified terms, which roughly means that the algorithm breaks when you have duplications inside of duplications. To make it work with arbitrary terms, you'd need to augment the system with a complex machinery known as oracle. There are many ways to do it, but all of them ruin the performance. I personally prefer to call non-stratified terms defective and ignore them. It is not hard to avoid those terms, you just have to be cautious about the implicit duplication of arguments performed by beta-reduction (which is the root of all evil).
- [Numbers](images/handwritten_example.jpg?raw=true)
- [Pairs](images/pairs_on_inets.jpg?raw=true)
- [Either](images/either_on_inets.jpg?raw=true)
[Here](src/example.js) is a working example on how to use this lib to reduce untyped λ-terms. [Here](src/fast-reducer.js) is an inlined reducer (less pretty, but faster and with GC - the exported lib actually uses it), and [here](stuff/wasmReducer.js) is a poor attempt at making a WASM reducer (tip: it is not any faster, which is weird, because the [C](stuff/ic.c) version is 10x faster than the JS one). [Here](https://github.com/MaiaVictor/parallel_lambda_computer_tests) is an attempt to flatten the graph-reduction algorithm to a linear automata, in such a way that it could, in theory, be implemented as a massivelly parallel ASIC where each 128-bit memory cell is actually a nano-CPU capable of updating its state by looking at its neighbors, in such a way that it causes the emergent global behavior of the memory to converge to the normal form of the initial graph.
Note this only works with [stratified terms](https://www.reddit.com/r/haskell/comments/6phxvb/cleaned_up_implementation_of_lampings_abstract/dkq57yx/?context=1), which roughly means that the algorithm breaks when you have duplications inside of duplications. To make it work with arbitrary terms, you'd need to augment the system with a complex machinery known as oracle. There are many ways to do it, but all of them ruin the performance. I personally prefer to call non-stratified terms defective and ignore them. It is not hard to avoid those terms, you just have to be cautious about the implicit duplication of arguments performed by beta-reduction (which is the root of all evil). Note that the subset of oracle-free terms [is still Turing complete](https://github.com/MaiaVictor/articles/tree/master/0000-oracle-free-terms-are-turing-complete).

@@ -1,120 +0,26 @@

// Implements symmetric interaction combinators with infinite
// node colors. By encoding a λ-calculus term to a interaction
// net, this becomes Lamping's Abstract Algorithm.
// Performs the abstract algorithm by converting λ-terms to
// interaction combinators, reducing, then converting back.
function Wire(node, port) {
return (node << 2) | port;
}
var E = require("./lambda-encoder.js");
var L = require("lambda-calculus");
var I = require("./interaction-combinators.js");
function node(wire) {
return wire >>> 2;
// String -> Net
// Converts a lambda-calculus string to an interaction net
function lambdaToNet(lam) {
return E.encode(L.fromString(lam));
}
function port(wire) {
return wire & 3;
// Net -> String
// Converts an interaction net to a lambda-calculus string
function netToLambda(net) {
return L.toString(E.decode(net));
}
function flip(mem, w) {
return mem[w];
// String -> {term: String, stats: {rewrites: Number, loops: Number}}
// Reduces a lambda-calculus string to normal form
module.exports = function(lambda, debug) {
var net = I.reduce(lambdaToNet(lambda));
var term = netToLambda(net);
return {term: term, stats: net.stats};
}
function Node(mem, kind) {
var node = mem.length / 4;
mem.push(Wire(node, 0), Wire(node, 1), Wire(node, 2), kind << 2);
return node;
}
function kind(mem, node) {
return mem[node * 4 + 3] >>> 2;
}
function meta(mem, node) {
return (mem[node * 4 + 3] >>> 0) & 3;
}
function setMeta(mem, node, meta) {
return mem[node * 4 + 3] = mem[node * 4 + 3] & 0xFFFFFFFC | meta;
}
function link(mem, a, b) {
mem[a] = b;
mem[b] = a;
}
// This walks through the graph looking for redexes, following the logical flow
// of information, in such a way that only redexes that interfere on the normal
// form are reduced.
function reduce(net) {
let visit = [net.ptr];
let prev, next, back;
while (visit.length > 0) {
prev = visit.pop();
next = flip(net.mem, prev);
prev = flip(net.mem, next);
if (meta(net.mem, node(prev)) === 3) {
continue;
}
if (port(prev) === 0) {
if (port(next) === 0 && node(next) !== node(prev)){
back = flip(net.mem, Wire(node(next), meta(net.mem, node(next))));
rewrite(net.mem, node(next), node(prev));
visit.push(flip(net.mem, back));
} else {
setMeta(net.mem, node(prev), 3);
visit.push(flip(net.mem, Wire(node(prev), 2)));
visit.push(flip(net.mem, Wire(node(prev), 1)));
}
} else {
setMeta(net.mem, node(prev), port(prev));
visit.push(flip(net.mem, Wire(node(prev), 0)));
}
}
return net;
}
// This performs the reduction of redexes. It, thus, implements annihilation
// and commutation, as described on Lafont's paper on interaction combinators.
// It is the heart of algorithm. In theory, the reduce() function above isn't
// necessary; you could just store an array of redexes and keep applying this
// function on them. You'd lose the lazy aspect of the algorithm, but you could,
// in turn, perform reductions in parallel. There is an inherent tradeoff
// between laziness and parallelization, because, by reducing nodes in parallel,
// you inevitably reduce redexes which do not influence on the normal form.
function rewrite(mem, x, y) {
if (kind(mem,x) === kind(mem,y)){
// a b a b
// \ / \ /
// A -- B --> X
// / \ / \
// c d c d
link(mem, flip(mem, Wire(x, 1)), flip(mem, Wire(y, 1)));
link(mem, flip(mem, Wire(x, 2)), flip(mem, Wire(y, 2)));
} else {
// a d a - B --- A - d
// \ / \ /
// A -- B --> X
// / \ / \
// b c b - B --- A - c
var x1 = Node(mem, kind(mem, x)), x2 = Node(mem, kind(mem, x));
var y1 = Node(mem, kind(mem, y)), y2 = Node(mem, kind(mem, y));
link(mem, flip(mem, Wire(y1, 0)), flip(mem, Wire(x, 1)));
link(mem, flip(mem, Wire(y2, 0)), flip(mem, Wire(x, 2)));
link(mem, flip(mem, Wire(x1, 0)), flip(mem, Wire(y, 1)));
link(mem, flip(mem, Wire(x2, 0)), flip(mem, Wire(y, 2)));
link(mem, flip(mem, Wire(x1, 1)), flip(mem, Wire(y1, 1)));
link(mem, flip(mem, Wire(x1, 2)), flip(mem, Wire(y2, 1)));
link(mem, flip(mem, Wire(x2, 1)), flip(mem, Wire(y1, 2)));
link(mem, flip(mem, Wire(x2, 2)), flip(mem, Wire(y2, 2)));
}
}
module.exports = {
reduce: reduce,
Node: Node,
link: link,
flip: flip,
kind: kind,
Wire: Wire,
node: node,
port: port
};

@@ -1,5 +0,5 @@

// Encode/decode λ-terms to/from interaction nets
// Encode/decode λ-terms to/from interaction nets.
var L = require("lambda-calculus");
var A = require("./abstract-algorithm.js");
var I = require("./interaction-combinators.js");

@@ -12,26 +12,26 @@ function encode(term) {

case L.LAM:
var fun = A.Node(m,1);
var era = A.Node(m,0);
A.link(m, A.Wire(fun,1), A.Wire(era,0));
A.link(m, A.Wire(era,1), A.Wire(era,2));
var fun = I.Node(m,1);
var era = I.Node(m,0);
I.link(m, I.Wire(fun,1), I.Wire(era,0));
I.link(m, I.Wire(era,1), I.Wire(era,2));
var bod = encode(term.body, [fun].concat(scope));
A.link(m, A.Wire(fun,2), bod);
return A.Wire(fun,0);
I.link(m, I.Wire(fun,2), bod);
return I.Wire(fun,0);
case L.APP:
var app = A.Node(m,1);
var app = I.Node(m,1);
var fun = encode(term.left, scope);
A.link(m, A.Wire(app,0), fun);
I.link(m, I.Wire(app,0), fun);
var arg = encode(term.right, scope);
A.link(m, A.Wire(app,1), arg);
return A.Wire(app,2);
I.link(m, I.Wire(app,1), arg);
return I.Wire(app,2);
case L.VAR:
var lam = scope[term.index];
if (A.kind(m,A.node(A.flip(m,A.Wire(lam,1)))) === 0) {
return A.Wire(lam,1);
if (I.kind(m,I.node(I.flip(m,I.Wire(lam,1)))) === 0) {
return I.Wire(lam,1);
} else {
var dup = A.Node(m, ++kind);
var arg = A.flip(m, A.Wire(lam,1));
A.link(m,A.Wire(dup,1), A.flip(m,A.Wire(lam,1)));
A.link(m,A.Wire(dup,0), A.Wire(lam,1));
return A.Wire(dup,2);
var dup = I.Node(m, ++kind);
var arg = I.flip(m, I.Wire(lam,1));
I.link(m,I.Wire(dup,1), I.flip(m,I.Wire(lam,1)));
I.link(m,I.Wire(dup,0), I.Wire(lam,1));
return I.Wire(dup,2);
}

@@ -45,10 +45,10 @@ };

return (function go(next, exit, depth){
var prev = A.flip(net.mem, next);
var prevPort = A.port(prev);
var prevNode = A.node(prev);
if (A.kind(net.mem, prevNode) === 1) {
var prev = I.flip(net.mem, next);
var prevPort = I.port(prev);
var prevNode = I.node(prev);
if (I.kind(net.mem, prevNode) === 1) {
switch (prevPort) {
case 0:
nodeDepth[prevNode] = depth;
return L.Lam(go(A.Wire(prevNode,2), exit, depth +1 ));
return L.Lam(go(I.Wire(prevNode,2), exit, depth +1 ));
case 1:

@@ -58,8 +58,8 @@ return L.Var(depth - nodeDepth[prevNode] - 1);

return L.App(
go(A.Wire(prevNode,0), exit, depth),
go(A.Wire(prevNode,1), exit, depth));
go(I.Wire(prevNode,0), exit, depth),
go(I.Wire(prevNode,1), exit, depth));
}
} else {
return go(
A.Wire(prevNode, prevPort > 0 ? 0 : exit.head),
I.Wire(prevNode, prevPort > 0 ? 0 : exit.head),
prevPort > 0 ? {head: prevPort, tail: exit} : exit.tail,

@@ -66,0 +66,0 @@ depth);

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc