hyperdrive-http
Advanced tools
Comparing version 3.5.0 to 4.0.0
46
cli.js
#!/usr/bin/env node | ||
var fs = require('fs') | ||
var http = require('http') | ||
var memdb = require('memdb') | ||
var hypercore = require('hypercore') | ||
var swarm = require('hyperdiscovery') | ||
var hyperdriveHttp = require('.') | ||
var path = require('path') | ||
var ram = require('random-access-memory') | ||
var hyperdrive = require('hyperdrive') | ||
var discovery = require('hyperdiscovery') | ||
var serve = require('.') | ||
var core = hypercore(memdb()) | ||
var feed = core.createFeed(process.argv[2]) | ||
var onrequest = hyperdriveHttp(feed) | ||
var server = http.createServer() | ||
var key = process.argv[2] | ||
var storage = ram | ||
var port = 8080 | ||
swarm(feed) | ||
server.listen(8000) | ||
server.on('request', onrequest) | ||
if (!key) { | ||
console.log('key or path to a dat required') | ||
process.exit(1) | ||
} | ||
console.log('Visit http://localhost:8000 to see feed/metadata') | ||
try { | ||
fs.stat(path.join(key, '.dat'), function (err, stat) { | ||
if (err) return start | ||
storage = path.join(key, '.dat') | ||
key = null | ||
start() | ||
}) | ||
} catch (e) { start() } | ||
function start () { | ||
var archive = hyperdrive(storage, key, {sparse: true}) | ||
var server = http.createServer(serve(archive, {live: true})) | ||
server.listen(port) | ||
console.log(`Visit http://localhost:${port} to see archive`) | ||
if (key) { | ||
archive.ready(function () { | ||
discovery(archive, {live: true}) | ||
}) | ||
} | ||
} |
var http = require('http') | ||
var path = require('path') | ||
var memdb = require('memdb') | ||
var fs = require('fs') | ||
var hyperdrive = require('hyperdrive') | ||
var raf = require('random-access-file') | ||
var hyperdriveHttp = require('.') | ||
var ram = require('random-access-memory') | ||
var serve = require('.') | ||
var drive = hyperdrive(memdb()) | ||
var archive = drive.createArchive({ | ||
file: function (name) { | ||
return raf(path.join(__dirname, name)) | ||
} | ||
}) | ||
var onrequest = hyperdriveHttp(archive) | ||
var server = http.createServer() | ||
var archive = hyperdrive(ram) | ||
archive.append('readme.md') | ||
archive.append('package.json') | ||
archive.append('index.js') | ||
var server = http.createServer(serve(archive, {exposeHeaders: true, live: true})) | ||
archive.writeFile('readme.md', fs.readFileSync('readme.md')) | ||
archive.writeFile('package.json', fs.readFileSync('package.json')) | ||
archive.writeFile('index.js', fs.readFileSync('index.js')) | ||
archive.writeFile('foo/index.html', '<h1>INDEX PAGE YO</h1>') | ||
server.listen(8000) | ||
server.on('request', onrequest) | ||
console.info('Now listening on localhost:8000') | ||
console.info('Now listening on http://localhost:8000') | ||
console.info('Visit in your browser to see metadata') |
245
index.js
@@ -1,156 +0,149 @@ | ||
var assert = require('assert') | ||
var stream = require('stream') | ||
var toHTML = require('directory-index-html') | ||
var pump = require('pump') | ||
var TimeoutStream = require('through-timeout') | ||
var cbTimeout = require('callback-timeout') | ||
var mime = require('mime') | ||
var rangeParser = require('range-parser') | ||
var ndjson = require('ndjson') | ||
var encoding = require('dat-encoding') | ||
var through2 = require('through2') | ||
var debug = require('debug')('hyperhttp') | ||
var range = require('range-parser') | ||
var qs = require('querystring') | ||
var pkg = require('./package') | ||
module.exports = function (getArchive, opts) { | ||
assert.ok(getArchive, 'hyperhttp: getArchive|archive required') | ||
module.exports = serve | ||
var archive | ||
if (typeof (getArchive) !== 'function') { | ||
// Make a getArchive function to get the single archive by default | ||
archive = getArchive | ||
getArchive = function (datUrl, cb) { | ||
cb(null, archive) | ||
} | ||
} | ||
// Sanity check =) | ||
assert.equal(typeof getArchive, 'function', 'hyperhttp: getArchive must be function') | ||
function serve (archive, opts) { | ||
if (!opts) opts = {} | ||
var that = onrequest | ||
that.parse = parse | ||
that.get = function (req, res, archive, opts) { | ||
if (archive) return serveFeedOrArchive(req, res, archive) | ||
var datUrl = parse(req) | ||
getArchive(datUrl, function (err, archive) { | ||
if (err) return onerror(err) | ||
serveFeedOrArchive(req, res, archive, datUrl) | ||
}) | ||
} | ||
that.file = function (req, res, archive, filename) { | ||
if (archive) return serveFile(req, res, archive, filename) | ||
var datUrl = parse(req) | ||
getArchive(datUrl, function (err, archive) { | ||
if (err) return onerror(err) | ||
serveFile(req, res, archive, datUrl.filename) | ||
}) | ||
} | ||
return onrequest | ||
return that | ||
function onrequest (req, res) { | ||
var datUrl = parse(req) | ||
if (!datUrl) return onerror(404, res) // TODO: explain error in res | ||
var name = decodeURI(req.url.split('?')[0]) | ||
var query = qs.parse(req.url.split('?')[1] || '') | ||
getArchive(datUrl, function (err, archive) { | ||
if (err) return onerror(err, res) // TODO: explain error in res | ||
if (!archive) return onerror(404, res) // TODO: explain error in res | ||
var wait = (query.wait && Number(query.wait.toString())) || 0 | ||
var have = archive.metadata ? archive.metadata.length : -1 | ||
if (datUrl.op === 'upload') { | ||
var ws = archive.createFileWriteStream('file') | ||
ws.on('finish', () => res.end(encoding.encode(archive.key))) | ||
pump(req, ws) | ||
return | ||
} else if (!datUrl.filename || !archive.metadata) { | ||
// serve archive or hypercore feed | ||
serveFeedOrArchive(req, res, archive, datUrl).pipe(res) | ||
} else { | ||
serveFile(req, res, archive, datUrl.filename) | ||
} | ||
}) | ||
if (wait <= have) return ready() | ||
waitFor(archive, wait, ready) | ||
function ready () { | ||
var arch = /^\d+$/.test(query.version) ? archive.checkout(Number(query.version)) : archive | ||
if (name[name.length - 1] === '/') ondirectory(arch, name, req, res, opts) | ||
else onfile(arch, name, req, res) | ||
} | ||
} | ||
} | ||
function parse (req) { | ||
var segs = req.url.split('/').filter(Boolean) | ||
var key = archive | ||
? encoding.encode(archive.key) | ||
: segs.shift() | ||
var filename = segs.join('/') | ||
var op = 'get' | ||
function onfile (archive, name, req, res) { | ||
archive.stat(name, function (err, st) { | ||
if (err) return onerror(res, 404, err) | ||
try { | ||
// check if we are serving archive at root | ||
key = key.replace(/\.changes$/, '') | ||
encoding.decode(key) | ||
} catch (e) { | ||
filename = segs.length ? [key].concat(segs).join('/') : key | ||
key = null | ||
if (st.isDirectory()) { | ||
res.statusCode = 302 | ||
res.setHeader('Location', name + '/') | ||
return | ||
} | ||
if (/\.changes$/.test(req.url)) { | ||
op = 'changes' | ||
if (filename) filename = filename.replace(/\.changes$/, '') | ||
} else if (req.method === 'POST') { | ||
op = 'upload' | ||
var r = req.headers.range && range(st.size, req.headers.range)[0] | ||
res.setHeader('Accept-Ranges', 'bytes') | ||
res.setHeader('Content-Type', mime.lookup(name)) | ||
if (r) { | ||
res.statusCode = 206 | ||
res.setHeader('Content-Range', 'bytes ' + r.start + '-' + r.end + '/' + st.size) | ||
res.setHeader('Content-Length', r.end - r.start + 1) | ||
} else { | ||
res.setHeader('Content-Length', st.size) | ||
} | ||
var results = { | ||
key: key, | ||
filename: filename, | ||
op: op | ||
} | ||
debug('parse() results', results) | ||
return results | ||
} | ||
if (req.method === 'HEAD') return res.end() | ||
pump(archive.createReadStream(name, r), res) | ||
}) | ||
} | ||
function serveFeedOrArchive (req, res, archive, urlOpts) { | ||
debug('serveFeedOrArchive', archive.key.toString('hex')) | ||
var opts = { live: urlOpts.op === 'changes' } | ||
var through = new stream.PassThrough() | ||
var src = archive.metadata ? archive.list(opts) : archive.createReadStream(opts) | ||
var timeout = TimeoutStream({ | ||
objectMode: true, | ||
duration: 10000 | ||
}, () => { | ||
onerror(404, res) | ||
src.destroy() | ||
function ondirectory (archive, name, req, res, opts) { | ||
archive.stat(name + 'index.html', function (err) { | ||
if (err) return ondirectoryindex(archive, name, req, res, opts) | ||
onfile(archive, name + 'index.html', req, res) | ||
}) | ||
} | ||
res.setHeader('Content-Type', 'application/json') | ||
if (archive.metadata) return pump(src, timeout, ndjson.serialize(), through) | ||
return pump(src, timeout, through2.obj(function (chunk, enc, cb) { | ||
cb(null, chunk.toString()) | ||
}), ndjson.serialize(), through) | ||
function ondirectoryindex (archive, name, req, res, opts) { | ||
list(archive, name, function (err, entries) { | ||
if (err) entries = [] | ||
var wait = archive.metadata ? archive.metadata.length + 1 : 0 | ||
var script = ` | ||
function liveUpdate () { | ||
var xhr = new XMLHttpRequest() | ||
xhr.open("GET", "${name}?wait=${wait}", true) | ||
xhr.onload = function () { | ||
document.open() | ||
document.write(xhr.responseText) | ||
document.close() | ||
} | ||
xhr.onerror = function () { | ||
setTimeout(liveUpdate, 1000) | ||
} | ||
xhr.send(null) | ||
} | ||
liveUpdate() | ||
` | ||
var html = toHTML({directory: name, script: (!opts.live || archive._checkout) ? null : script}, entries) | ||
res.setHeader('Content-Type', 'text/html; charset=utf-8') | ||
res.setHeader('Content-Length', Buffer.byteLength(html)) | ||
if (opts.exposeHeaders) { | ||
res.setHeader('Hyperdrive-Key', archive.key.toString('hex')) | ||
res.setHeader('Hyperdrive-Version', archive.version) | ||
res.setHeader('Hyperdrive-Http-Version', pkg.version) | ||
} | ||
res.end(html) | ||
}) | ||
} | ||
function serveFile (req, res, archive, filename) { | ||
debug('serveFile', archive.key.toString('hex'), 'filename', [filename]) | ||
function waitFor (archive, until, cb) { // this feels a bit hacky, TODO: make less complicated? | ||
archive.setMaxListeners(0) | ||
if (!archive.metadata) archive.once('ready', waitFor.bind(null, archive, until, cb)) | ||
if (archive.metadata.length >= until) return cb() | ||
archive.metadata.setMaxListeners(0) | ||
archive.metadata.once('append', waitFor.bind(null, archive, until, cb)) | ||
} | ||
archive.get(filename, cbTimeout((err, entry) => { | ||
if (err && err.code === 'ETIMEDOUT') return onerror(404, res) | ||
if (err || !entry || entry.type !== 'file') return onerror(404, res) | ||
debug('serveFile, got entry', entry) | ||
function onerror (res, status, err) { | ||
res.statusCode = status | ||
res.end(err.stack) | ||
} | ||
var range = req.headers.range && rangeParser(entry.length, req.headers.range)[0] | ||
function list (archive, name, cb) { | ||
archive.readdir(name, function (err, names) { | ||
if (err) return cb(err) | ||
res.setHeader('Access-Ranges', 'bytes') | ||
res.setHeader('Content-Type', mime.lookup(filename)) | ||
var error = null | ||
var missing = names.length | ||
var entries = [] | ||
if (!range || range < 0) { | ||
res.setHeader('Content-Length', entry.length) | ||
if (req.method === 'HEAD') return res.end() | ||
debug('serveFile, returning file') | ||
return pump(archive.createFileReadStream(entry), res) | ||
} else { | ||
res.statusCode = 206 | ||
res.setHeader('Content-Length', range.end - range.start + 1) | ||
res.setHeader('Content-Range', 'bytes ' + range.start + '-' + range.end + '/' + entry.length) | ||
if (req.method === 'HEAD') return res.end() | ||
return pump(archive.createFileReadStream(entry, {start: range.start, end: range.end + 1}), res) | ||
if (!missing) return cb(null, []) | ||
for (var i = 0; i < names.length; i++) stat(name + names[i], names[i]) | ||
function stat (name, base) { | ||
archive.stat(name, function (err, st) { | ||
if (err) error = err | ||
if (st) { | ||
entries.push({ | ||
type: st.isDirectory() ? 'directory' : 'file', | ||
name: base, | ||
size: st.size, | ||
mtime: st.mtime | ||
}) | ||
} | ||
if (--missing) return | ||
if (error) return cb(error) | ||
cb(null, entries.sort(sort)) | ||
}) | ||
} | ||
}, 10000)) | ||
}) | ||
} | ||
function onerror (status, res) { | ||
if (typeof status !== 'number') status = 404 | ||
res.statusCode = status | ||
res.end() | ||
function sort (a, b) { | ||
return a.name.localeCompare(b.name) | ||
} |
{ | ||
"name": "hyperdrive-http", | ||
"version": "3.5.0", | ||
"version": "4.0.0", | ||
"description": "Handle Hyper[drive|core] HTTP Requests", | ||
"main": "index.js", | ||
"scripts": { | ||
"test": "standard && tape test/*.js | tap-spec" | ||
"test": "standard" | ||
}, | ||
@@ -17,11 +17,6 @@ "keywords": [ | ||
"dependencies": { | ||
"callback-timeout": "^3.0.0", | ||
"dat-encoding": "^4.0.0", | ||
"debug": "^2.6.1", | ||
"directory-index-html": "^2.1.0", | ||
"mime": "^1.3.4", | ||
"ndjson": "^1.4.3", | ||
"pump": "^1.0.1", | ||
"range-parser": "^1.2.0", | ||
"through-timeout": "^1.0.0", | ||
"through2": "^2.0.1" | ||
"pump": "^1.0.2", | ||
"range-parser": "^1.2.0" | ||
}, | ||
@@ -37,12 +32,7 @@ "repository": { | ||
"devDependencies": { | ||
"collect-stream": "^1.2.1", | ||
"hypercore": "^4.7.0", | ||
"hyperdrive": "^7.0.0", | ||
"memdb": "^1.3.1", | ||
"random-access-file": "^1.2.0", | ||
"request": "^2.73.0", | ||
"standard": "^8.5.0", | ||
"tap-spec": "^4.1.1", | ||
"tape": "^4.6.0" | ||
"hyperdiscovery": "^1.3.0", | ||
"hyperdrive": "^8.0.0", | ||
"random-access-memory": "^2.3.0", | ||
"standard": "^9.0.2" | ||
} | ||
} |
105
readme.md
# Hyperdrive Http | ||
Serve a [hyperdrive](https://github.com/mafintosh/hyperdrive) archive or [hypercore](https://github.com/mafintosh/hypercore) feed over HTTP. For an example of use, see [dat.haus](https://github.com/juliangruber/dat.haus). | ||
Serve a [hyperdrive](https://github.com/mafintosh/hyperdrive) archive over HTTP. For an example of use, see [dat.haus](https://github.com/juliangruber/dat.haus). | ||
@@ -19,49 +19,20 @@ [![Travis](https://api.travis-ci.org/joehand/hyperdrive-http.svg)](https://travis-ci.org/joehand/hyperdrive-http) | ||
To use hyperdrive-http you will need to: | ||
* Create your own http server | ||
* Setup your hyperdrive archive(s) | ||
* Connect to the swarm before serving archive | ||
* Setup your hyperdrive archive | ||
* For remote archives, connect to the swarm | ||
### API | ||
## API | ||
Hyperdrive works with many archives/feeds or a single archive. | ||
Hyperdrive works with many archives/feeds or a single archive. | ||
#### Multiple Archives | ||
#### Options | ||
If you have multiple archives, you will need to look the archive to return using the key. | ||
- `exposeHeaders` - If set to `true`, hyperdrive-http will add custom `Hyperdrive-` HTTP headers to directory listing requests (default: `false`): | ||
```http | ||
Hyperdrive-Key: de2a51bbaf8a5545eff82c999f15e1fd29637b3f16db94633cb6e2e0c324f833 | ||
Hyperdrive-Version: 4 | ||
``` | ||
- `live` - If set to `true` will reload a directly listing if the archive receives updates. | ||
Initiate with an archive lookup function: | ||
`var onrequest = hyperdriveHttp(getArchive)` | ||
The archive lookup function may look like this: | ||
```js | ||
var getArchive = function (datInfo, cb) { | ||
// datInfo = { | ||
// key: archive.key, | ||
// filename: filename.txt // If file is requested in URL | ||
// op: 'get' or 'changes' | ||
// } | ||
// Find the archive to return: | ||
var archive = cache.get(datInfo.key) | ||
if (!archive) { | ||
archive = drive.createArchive(datInfo.key) | ||
// Make sure you join the swarm before callback | ||
sw.join(archive.discoveryKey) | ||
} | ||
cb(null, archive) // callback with your found archive | ||
} | ||
``` | ||
#### Single Archive | ||
Hyperdrive-http works great with a single archive too. It exposes the metadata at the root path and files are available without using the key. | ||
Pass a single archive on initiation: | ||
`var onrequest = hyperdriveHttp(archive)` | ||
Now your archive metadata will be available at http://example.com/ | ||
#### Hypercore Feed(s) | ||
You can also use a hypercore feed: `hyperdriveHttp(feed)` (or using a similar getArchive function) | ||
### URL Format | ||
@@ -71,50 +42,14 @@ | ||
#### Multiple archives on one site | ||
* Get metadata for archive: `http://dat.haus/c5dbfe5521d8dddba683544ee4b1c7f6ce1c7b23bd387bd850397e4aaf9afbd9/` | ||
* Get file from archive: `http://dat.haus/c5dbfe5521d8dddba683544ee4b1c7f6ce1c7b23bd387bd850397e4aaf9afbd9/filename.pdf` | ||
* Upload file: `POST http://archive-example.com/` or `POST http://archive-example.com/c5dbfe5521d8dddba683544ee4b1c7f6ce1c7b23bd387bd850397e4aaf9afbd9` | ||
#### Single Archive Mode | ||
* Get metadata for archive: `http://archive-example.com/` | ||
* Get archive listing: `http://archive-example.com/` | ||
* Get file from archive: `http://archive-example.com/filename.pdf` | ||
* Upload file: `POST http://archive-example.com/` | ||
#### Hypercore Mode | ||
If a directory in the archive contains an `index.html` page that file is returned instead of the directory listing. | ||
For hypercore feeds, the data is available with the same logic as above for a single or multiple feeds. | ||
## CLI | ||
There is also a CLI that can be used for demo + testing. Pass it a dat link or a path to an existing dat folder: | ||
## Example | ||
```javascript | ||
var hyperdriveHttp = require('hyperdrive-http') | ||
var getArchive = function (datInfo, cb) { | ||
// find the archive to serve | ||
var discoveryKey = crypto.createHmac('sha256', Buffer(datInfo.key, 'hex')).update('hypercore').digest('hex') | ||
var archive = cache.get(discoveryKey) | ||
if (!archive) { | ||
archive = drive.createArchive(datInfo.key) | ||
// connect to swarm, if necessary | ||
sw.join(archive.discoveryKey) | ||
} | ||
cb(null, archive) // callback with your found archive | ||
} | ||
var onrequest = hyperdriveHttp(getArchive) | ||
var server = http.createServer() | ||
server.listen(8000) | ||
server.on('request', onrequest) | ||
``` | ||
Pass an archive lookup function for the first argument of `hyperdriveHttp`. The function is called with `datInfo` and a callback. | ||
```javascript | ||
datInfo = { | ||
key: archive.key, | ||
filename: someFile.txt, | ||
op: 'get' // or 'changes' | ||
} | ||
``` | ||
node cli.js <dat-key> | ||
node cli.js /path/do/existing/dat | ||
``` |
@@ -38,3 +38,3 @@ var http = require('http') | ||
request.get(getUrl).on('error', function () { | ||
return | ||
}) | ||
@@ -41,0 +41,0 @@ }) |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Native code
Supply chain riskContains native code (e.g., compiled binaries or shared libraries). Including native code can obscure malicious behavior.
Found 7 instances in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
40876
4
4
22
516
54
3
12
+ Addeddirectory-index-html@^2.1.0
+ Addeddirectory-index-html@2.1.0(transitive)
- Removedcallback-timeout@^3.0.0
- Removeddat-encoding@^4.0.0
- Removeddebug@^2.6.1
- Removedndjson@^1.4.3
- Removedthrough-timeout@^1.0.0
- Removedthrough2@^2.0.1
- Removedcallback-timeout@3.0.1(transitive)
- Removedcore-util-is@1.0.3(transitive)
- Removeddat-encoding@4.0.2(transitive)
- Removeddebug@2.6.9(transitive)
- Removeddefine-error@1.1.0(transitive)
- Removedinherits@2.0.4(transitive)
- Removedisarray@1.0.0(transitive)
- Removedjson-stringify-safe@5.0.1(transitive)
- Removedminimist@1.2.8(transitive)
- Removedms@2.0.0(transitive)
- Removedndjson@1.5.0(transitive)
- Removedprocess-nextick-args@2.0.1(transitive)
- Removedreadable-stream@2.3.8(transitive)
- Removedsafe-buffer@5.1.25.2.1(transitive)
- Removedsplit2@2.2.0(transitive)
- Removedstring_decoder@1.1.1(transitive)
- Removedthrough-timeout@1.0.0(transitive)
- Removedthrough2@2.0.5(transitive)
- Removedutil-deprecate@1.0.2(transitive)
- Removedxtend@4.0.2(transitive)
Updatedpump@^1.0.2