localtunnel
Advanced tools
Comparing version 0.0.1 to 0.0.2
162
client.js
@@ -5,40 +5,67 @@ // builtin | ||
var request = require('request'); | ||
var EventEmitter = require('events').EventEmitter; | ||
var argv = require('optimist') | ||
.usage('Usage: $0 --port [num]') | ||
.demand(['port']) | ||
.options('host', { | ||
default: 'http://localtunnel.me', | ||
describe: 'upstream server providing forwarding' | ||
}) | ||
.describe('port', 'internal http server port') | ||
.argv; | ||
// request upstream url and connection info | ||
var request_url = function(params, cb) { | ||
request(params, function(err, res, body) { | ||
if (err) { | ||
cb(err); | ||
} | ||
// local port | ||
var local_port = argv.port; | ||
cb(null, body); | ||
}); | ||
}; | ||
// optionally override the upstream server | ||
var upstream = url.parse(argv.host); | ||
var connect = function(opt) { | ||
var ev = new EventEmitter(); | ||
// query options | ||
var opt = { | ||
host: upstream.hostname, | ||
port: upstream.port || 80, | ||
path: '/', | ||
json: true | ||
}; | ||
// local port | ||
var local_port = opt.port; | ||
var base_uri = 'http://' + opt.host + ':' + opt.port + opt.path; | ||
var base_uri = opt.host + '/'; | ||
var internal; | ||
var upstream; | ||
var prev_id; | ||
// optionally override the upstream server | ||
var upstream = url.parse(opt.host); | ||
(function connect_proxy() { | ||
opt.uri = base_uri + ((prev_id) ? prev_id : '?new'); | ||
// no subdomain at first, maybe use requested domain | ||
var assigned_domain = opt.subdomain; | ||
request(opt, function(err, res, body) { | ||
// connect to upstream given connection parameters | ||
var tunnel = function (remote_host, remote_port, max_conn) { | ||
var count = 0; | ||
// open 5 connections to the localtunnel server | ||
// allows for resources to be served faster | ||
for (var count = 0 ; count < max_conn ; ++count) { | ||
var upstream = duplex(remote_host, remote_port, 'localhost', local_port); | ||
upstream.once('end', function() { | ||
// all upstream connections have been closed | ||
if (--count <= 0) { | ||
tunnel(remote_host, remote_port, max_conn); | ||
} | ||
}); | ||
upstream.on('error', function(err) { | ||
console.error(err); | ||
}); | ||
} | ||
}; | ||
var params = { | ||
path: '/', | ||
json: true | ||
}; | ||
// where to quest | ||
params.uri = base_uri + ((assigned_domain) ? assigned_domain : '?new'); | ||
request_url(params, function(err, body) { | ||
if (err) { | ||
console.error('upstream not available: %s', err.message); | ||
return process.exit(-1); | ||
ev.emit('error', new Error('tunnel server not available: %s, retry 1s', err.message)); | ||
// retry interval for id request | ||
return setTimeout(function() { | ||
connect_proxy(opt); | ||
}, 1000); | ||
} | ||
@@ -48,50 +75,61 @@ | ||
var port = body.port; | ||
var host = opt.host; | ||
var host = upstream.hostname; | ||
// store the id so we can try to get the same one | ||
prev_id = body.id; | ||
assigned_domain = body.id; | ||
console.log('your url is: %s', body.url); | ||
tunnel(host, port, body.max_conn_count || 1); | ||
// connect to remote tcp server | ||
upstream = net.createConnection(port, host); | ||
ev.emit('url', body.url); | ||
}); | ||
// reconnect internal | ||
connect_internal(); | ||
return ev; | ||
}; | ||
upstream.on('end', function() { | ||
console.log('> upstream connection terminated'); | ||
var duplex = function(remote_host, remote_port, local_host, local_port) { | ||
var ev = new EventEmitter(); | ||
// sever connection to internal server | ||
// on reconnect we will re-establish | ||
internal.end(); | ||
// connect to remote tcp server | ||
var upstream = net.createConnection(remote_port, remote_host); | ||
var internal; | ||
// when upstream connection is closed, close other associated connections | ||
upstream.once('end', function() { | ||
ev.emit('error', new Error('upstream connection terminated')); | ||
// sever connection to internal server | ||
// on reconnect we will re-establish | ||
internal.end(); | ||
ev.emit('end'); | ||
}); | ||
upstream.on('error', function(err) { | ||
ev.emit('error', err); | ||
}); | ||
(function connect_internal() { | ||
internal = net.createConnection(local_port, local_host); | ||
internal.on('error', function() { | ||
ev.emit('error', new Error('error connecting to local server. retrying in 1s')); | ||
setTimeout(function() { | ||
connect_proxy(); | ||
connect_internal(); | ||
}, 1000); | ||
}); | ||
}); | ||
})(); | ||
function connect_internal() { | ||
internal.on('end', function() { | ||
ev.emit('error', new Error('disconnected from local server. retrying in 1s')); | ||
setTimeout(function() { | ||
connect_internal(); | ||
}, 1000); | ||
}); | ||
internal = net.createConnection(local_port); | ||
internal.on('error', function(err) { | ||
console.log('error connecting to local server. retrying in 1s'); | ||
upstream.pipe(internal).pipe(upstream); | ||
})(); | ||
setTimeout(function() { | ||
connect_internal(); | ||
}, 1000); | ||
}); | ||
return ev; | ||
} | ||
internal.on('end', function() { | ||
console.log('disconnected from local server. retrying in 1s'); | ||
setTimeout(function() { | ||
connect_internal(); | ||
}, 1000); | ||
}); | ||
module.exports.connect = connect; | ||
upstream.pipe(internal); | ||
internal.pipe(upstream); | ||
} | ||
@@ -5,3 +5,3 @@ { | ||
"description": "expose localhost to the world", | ||
"version": "0.0.1", | ||
"version": "0.0.2", | ||
"repository": { | ||
@@ -16,3 +16,5 @@ "type": "git", | ||
}, | ||
"devDependencies": {}, | ||
"devDependencies": { | ||
"mocha": "1.6.0" | ||
}, | ||
"optionalDependencies": {}, | ||
@@ -22,5 +24,9 @@ "engines": { | ||
}, | ||
"scripts": { | ||
"test": "mocha --ui qunit -- test", | ||
"start": "./bin/server" | ||
}, | ||
"bin": { | ||
"lt": "./bin/lt" | ||
"lt": "./bin/client" | ||
} | ||
} |
@@ -1,2 +0,2 @@ | ||
# localtunnel # | ||
# localtunnel [![Build Status](https://secure.travis-ci.org/shtylman/localtunnel.png)](http://travis-ci.org/shtylman/localtunnel) # | ||
@@ -24,1 +24,26 @@ localtunnel exposes your localhost to the world for easy testing and sharing! No need to mess with DNS or deploy just to have others test out your changes. | ||
You can restart your local server all you want, ```lt``` is smart enough to detect this and reconnect once it is back. | ||
## API ## | ||
The localtunnel client is also usable through an API (test integration, automation, etc) | ||
```javascript | ||
var lt_client = require('localtunnel').client; | ||
var client = lt_client.connect({ | ||
// the localtunnel server | ||
host: 'http://localtunnel.com', | ||
// your local application port | ||
port: 12345 | ||
}); | ||
// when your are assigned a url | ||
client.on('url', function(url) { | ||
// you can now make http requests to the url | ||
// they will be proxied to your local server on port [12345] | ||
}); | ||
client.on('error', function(err) { | ||
// uh oh! | ||
}); | ||
``` |
305
server.js
@@ -6,17 +6,3 @@ | ||
var url = require('url'); | ||
var FreeList = require('freelist').FreeList; | ||
var argv = require('optimist') | ||
.usage('Usage: $0 --port [num]') | ||
.options('port', { | ||
default: '80', | ||
describe: 'listen on this port for outside requests' | ||
}) | ||
.argv; | ||
if (argv.help) { | ||
require('optimist').showHelp(); | ||
process.exit(); | ||
} | ||
// here be dragons | ||
@@ -27,15 +13,8 @@ var HTTPParser = process.binding('http_parser').HTTPParser; | ||
// vendor | ||
var log = require('book'); | ||
var chars = 'abcdefghiklmnopqrstuvwxyz'; | ||
function rand_id() { | ||
var randomstring = ''; | ||
for (var i=0; i<4; ++i) { | ||
var rnum = Math.floor(Math.random() * chars.length); | ||
randomstring += chars[rnum]; | ||
} | ||
// local | ||
var rand_id = require('./lib/rand_id'); | ||
return randomstring; | ||
} | ||
var server = http.createServer(); | ||
@@ -46,5 +25,3 @@ | ||
// id -> list of sockets waiting for a valid response | ||
var wait_list = {}; | ||
// available parsers | ||
var parsers = http.parsers; | ||
@@ -58,14 +35,17 @@ | ||
var current = clients[socket.subdomain].current; | ||
var response_socket = socket.respond_socket; | ||
if (!response_socket) { | ||
log.error('no response socket assigned for http response from backend'); | ||
return; | ||
} | ||
if (!current) { | ||
log.error('no current for http response from backend'); | ||
// pass the response from our client back to the requesting socket | ||
response_socket.write(d.slice(start, end)); | ||
if (socket.for_websocket) { | ||
return; | ||
} | ||
// send the goodies | ||
current.write(d.slice(start, end)); | ||
// invoke parsing so we know when all the goodies have been sent | ||
var parser = current.out_parser; | ||
// invoke parsing so we know when the response is complete | ||
var parser = response_socket.out_parser; | ||
parser.socket = socket; | ||
@@ -75,3 +55,3 @@ | ||
if (ret instanceof Error) { | ||
debug('parse error'); | ||
log.error(ret); | ||
freeParser(parser, req); | ||
@@ -107,7 +87,4 @@ socket.destroy(ret); | ||
var for_client = false; | ||
var client_id; | ||
var request; | ||
// parser handles incoming requests for the socket | ||
// the request is what lets us know if we proxy or not | ||
var parser = parsers.alloc(); | ||
@@ -117,2 +94,9 @@ parser.socket = socket; | ||
function our_request(req) { | ||
var res = new ServerResponse(req); | ||
res.assignSocket(socket); | ||
self.emit('request', req, res); | ||
return; | ||
} | ||
// a full request is complete | ||
@@ -123,42 +107,44 @@ // we wait for the response from the server | ||
log.trace('request', req.url); | ||
request = req; | ||
for_client = false; | ||
// default is that the data is not for the client | ||
delete parser.sock; | ||
delete parser.buffer; | ||
delete parser.client; | ||
var hostname = req.headers.host; | ||
if (!hostname) { | ||
log.trace('no hostname: %j', req.headers); | ||
// normal processing if not proxy | ||
var res = new ServerResponse(req); | ||
return our_request(req); | ||
} | ||
// TODO(shtylman) skip favicon for now, it caused problems | ||
if (req.url === '/favicon.ico') { | ||
return; | ||
} | ||
var match = hostname.match(/^([a-z]{4})[.].*/); | ||
if (!match) { | ||
return our_request(req); | ||
} | ||
res.assignSocket(parser.socket); | ||
self.emit('request', req, res); | ||
return; | ||
var client_id = match[1]; | ||
var client = clients[client_id]; | ||
// requesting a subdomain that doesn't exist | ||
if (!client) { | ||
return socket.end(); | ||
} | ||
var match = hostname.match(/^([a-z]{4})[.].*/); | ||
parser.client = client; | ||
if (!match) { | ||
// normal processing if not proxy | ||
var res = new ServerResponse(req); | ||
// assigned socket for the client | ||
var sock = client.sockets.shift(); | ||
// TODO(shtylman) skip favicon for now, it caused problems | ||
if (req.url === '/favicon.ico') { | ||
return; | ||
} | ||
res.assignSocket(parser.socket); | ||
self.emit('request', req, res); | ||
// no free sockets, queue | ||
if (!sock) { | ||
parser.buffer = true; | ||
return; | ||
} | ||
client_id = match[1]; | ||
for_client = true; | ||
// for tcp proxying | ||
parser.sock = sock; | ||
// set who we will respond back to | ||
sock.respond_socket = socket; | ||
var out_parser = parsers.alloc(); | ||
@@ -168,4 +154,5 @@ out_parser.reinitialize(HTTPParser.RESPONSE); | ||
// we have a response | ||
out_parser.onIncoming = function(res) { | ||
// we have completed a response | ||
// the tcp socket is free again | ||
out_parser.onIncoming = function (res) { | ||
res.on('end', function() { | ||
@@ -177,13 +164,26 @@ log.trace('done with response for: %s', req.url); | ||
var next = wait_list[client_id].shift(); | ||
// unset the response | ||
delete sock.respond_socket; | ||
clients[client_id].current = next; | ||
var next = client.waiting.shift(); | ||
if (!next) { | ||
// return socket to available | ||
client.sockets.push(sock); | ||
return; | ||
} | ||
// write original bytes that we held cause client was busy | ||
clients[client_id].write(next.queue); | ||
// reuse avail socket for next connection | ||
sock.respond_socket = next; | ||
// needed to know when this response will be done | ||
out_parser.reinitialize(HTTPParser.RESPONSE); | ||
next.out_parser = out_parser; | ||
// write original bytes we held cause we were busy | ||
sock.write(next.queue); | ||
// continue with other bytes | ||
next.resume(); | ||
return; | ||
}); | ||
@@ -196,2 +196,5 @@ }; | ||
socket.ondata = function(d, start, end) { | ||
// run through request parser to determine if we should pass to tcp | ||
// onIncoming will be run before this returns | ||
var ret = parser.execute(d, start, end - start); | ||
@@ -201,3 +204,3 @@ | ||
if (ret instanceof Error) { | ||
debug('parse error'); | ||
log.error(ret); | ||
socket.destroy(ret); | ||
@@ -207,48 +210,64 @@ return; | ||
// only write data if previous request to this client is done? | ||
log.trace('%s %s', parser.incoming && parser.incoming.upgrade, for_client); | ||
// websocket stuff | ||
if (parser.incoming && parser.incoming.upgrade) { | ||
log.trace('upgrade request'); | ||
// what if the subdomains are treated differently | ||
// as individual channels to the backend if available? | ||
// how can I do that? | ||
parser.finish(); | ||
if (parser.incoming && parser.incoming.upgrade) { | ||
// websocket shit | ||
} | ||
var hostname = parser.incoming.headers.host; | ||
// wtf do you do with upgraded connections? | ||
var match = hostname.match(/^([a-z]{4})[.].*/); | ||
if (!match) { | ||
return our_request(req); | ||
} | ||
// forward the data to the backend | ||
if (for_client) { | ||
var client_id = match[1]; | ||
var client = clients[client_id]; | ||
// requesting a subdomain that doesn't exist | ||
if (!client) { | ||
return; | ||
var sock = client.sockets.shift(); | ||
sock.respond_socket = socket; | ||
sock.for_websocket = true; | ||
socket.ondata = function(d, start, end) { | ||
sock.write(d.slice(start, end)); | ||
}; | ||
socket.end = function() { | ||
log.trace('websocket end'); | ||
delete sock.respond_socket; | ||
client.sockets.push(sock); | ||
} | ||
// if the client is already processing something | ||
// then new connections need to go into pause mode | ||
// and when they are revived, then they can send data along | ||
if (client.current && client.current !== socket) { | ||
log.trace('pausing', request.url); | ||
// prevent new data from gathering for this connection | ||
// we are waiting for a response to a previous request | ||
socket.pause(); | ||
sock.write(d.slice(start, end)); | ||
var copy = Buffer(end - start); | ||
d.copy(copy, 0, start, end); | ||
socket.queue = copy; | ||
return; | ||
} | ||
wait_list[client_id].push(socket); | ||
// if no available socket, buffer the request for later | ||
if (parser.buffer) { | ||
return; | ||
} | ||
// pause any further data on this socket | ||
socket.pause(); | ||
// this socket needs to receive responses | ||
client.current = socket; | ||
// copy the current data since we have already received it | ||
var copy = Buffer(end - start); | ||
d.copy(copy, 0, start, end); | ||
socket.queue = copy; | ||
// send through tcp tunnel | ||
client.write(d.slice(start, end)); | ||
// add socket to queue | ||
parser.client.waiting.push(socket); | ||
return; | ||
} | ||
if (!parser.sock) { | ||
return; | ||
} | ||
// assert, respond socket should be set | ||
// send through tcp tunnel | ||
// responses will go back to the respond_socket | ||
parser.sock.write(d.slice(start, end)); | ||
}; | ||
@@ -260,3 +279,3 @@ | ||
if (ret instanceof Error) { | ||
log.trace('parse error'); | ||
log.error(ret); | ||
socket.destroy(ret); | ||
@@ -288,4 +307,8 @@ return; | ||
res.end(); | ||
return; | ||
} | ||
// at this point, the client is requesting a new tunnel setup | ||
// either generate an id or use the one they requested | ||
var match = req.url.match(/\/([a-z]{4})?/); | ||
@@ -303,14 +326,14 @@ | ||
var id = requested_id || rand_id(); | ||
if (wait_list[id]) { | ||
// new id | ||
id = rand_id(); | ||
} | ||
// generate new shit for client | ||
if (wait_list[id]) { | ||
wait_list[id].forEach(function(waiting) { | ||
waiting.end(); | ||
}); | ||
} | ||
// maximum number of tcp connections the client can setup | ||
// each tcp channel allows for more parallel requests | ||
var max_tcp_sockets = 4; | ||
// sockets is a list of available sockets for the connection | ||
// waiting is? | ||
var client = clients[id] = { | ||
sockets: [], | ||
waiting: [] | ||
}; | ||
var client_server = net.createServer(); | ||
@@ -324,3 +347,8 @@ client_server.listen(function() { | ||
res.writeHead(200, { 'Content-Type': 'application/json' }); | ||
res.end(JSON.stringify({ url: url, id: id, port: port })); | ||
res.end(JSON.stringify({ | ||
url: url, | ||
id: id, | ||
port: port, | ||
max_conn_count: max_tcp_sockets | ||
})); | ||
}); | ||
@@ -333,22 +361,43 @@ | ||
// no longer accepting connections for this id | ||
client_server.on('close', function() { | ||
delete clients[id]; | ||
}); | ||
var count = 0; | ||
client_server.on('connection', function(socket) { | ||
// who the info should route back to | ||
socket.subdomain = id; | ||
// no more socket connections allowed | ||
if (count++ >= max_tcp_sockets) { | ||
return socket.end(); | ||
} | ||
log.trace('new connection for id: %s', id); | ||
// multiplexes socket data out to clients | ||
socket.ondata = socketOnData; | ||
// no need to close the client server | ||
clearTimeout(conn_timeout); | ||
log.trace('new connection for id: %s', id); | ||
clients[id] = socket; | ||
wait_list[id] = []; | ||
// add socket to pool for this id | ||
var idx = client.sockets.push(socket) - 1; | ||
socket.on('end', function() { | ||
delete clients[id]; | ||
socket.on('close', function(had_error) { | ||
count--; | ||
client.sockets.splice(idx, 1); | ||
// no more sockets for this ident | ||
if (client.sockets.length === 0) { | ||
delete clients[id]; | ||
} | ||
}); | ||
// close will be emitted after this | ||
socket.on('error', function(err) { | ||
log.error(err); | ||
}); | ||
}); | ||
client_server.on('err', function(err) { | ||
client_server.on('error', function(err) { | ||
log.error(err); | ||
@@ -358,5 +407,3 @@ }); | ||
server.listen(argv.port, function() { | ||
log.info('server listening on port: %d', server.address().port); | ||
}); | ||
module.exports = server; | ||
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
19987
10
482
49
1
5