Socket
Socket
Sign inDemoInstall

http2

Package Overview
Dependencies
0
Maintainers
1
Versions
44
Alerts
File Explorer

Advanced tools

Install Socket

Detect and block malicious and high-risk dependencies

Install

Comparing version 0.3.1 to 0.4.0

11

HISTORY.md
Version history
===============
### 0.4.0 (2013-09-09) ###
* Upgrade to the latest draft: [draft-ietf-httpbis-http2-06][draft-06]
* Support for HTTP trailers
* Support for TLS SNI (Server Name Indication)
* Improved stream scheduling algorithm
* [Blog post](http://gabor.molnar.es/blog/2013/09/09/gsoc-week-number-12/)
* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.4.0.tar.gz)
[draft-06]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-06
### 0.3.1 (2013-09-03) ###

@@ -5,0 +16,0 @@

948

lib/compressor.js

@@ -1,87 +0,223 @@

// HTTP/2 compression is implemented by two [Transform Stream][1] subclasses that operate in
// [object mode][2]: the Compressor and the Decompressor. These provide a layer between the
// [framer](framer.html) and the [connection handling component](connection.html) that
// generates/parses binary header data.
// The implementation of the [HTTP/2 Header Compression][http2-compression] spec is separated from
// the 'integration' part which handles HEADERS and PUSH_PROMISE frames. The compression itself is
// implemented in the first part of the file, and consists of three classes: `HeaderTable`,
// `HeaderSetDecompressor` and `HeaderSetCompressor`. The two latter classes are
// [Transform Stream][node-transform] subclasses that operate in [object mode][node-objectmode].
// These transform chunks of binary data into `[name, value]` pairs and vice versa, and store their
// state in `HeaderTable` instances.
//
// Compression functionality is separated from the integration part. The latter is implemented in
// the last part of the file, while the larger part of the file is an implementation of the [HTTP/2
// Header Compression][3] spec. Both Compressor and Decompressor store their compression related
// state in CompressionContext objects. It is always accessed using methods that guarantee that
// it remains in a valid state.
// The 'integration' part is also implemented by two [Transform Stream][node-transform] subclasses
// that operate in [object mode][node-objectmode]: the `Compressor` and the `Decompressor`. These
// provide a layer between the [framer](framer.html) and the
// [connection handling component](connection.html).
//
// [1]: http://nodejs.org/api/stream.html#stream_class_stream_transform
// [2]: http://nodejs.org/api/stream.html#stream_new_stream_readable_options
// [3]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-00
// [node-transform]: http://nodejs.org/api/stream.html#stream_class_stream_transform
// [node-objectmode]: http://nodejs.org/api/stream.html#stream_new_stream_readable_options
// [http2-compression]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-03
var Transform = require('stream').Transform;
exports.CompressionContext = CompressionContext;
exports.HeaderTable = HeaderTable;
exports.HeaderSetCompressor = HeaderSetCompressor;
exports.HeaderSetDecompressor = HeaderSetDecompressor;
exports.Compressor = Compressor;
exports.Decompressor = Decompressor;
exports.Compressor = Compressor;
// Compression Context
// ===================
var TransformStream = require('stream').Transform;
var assert = process.env.HTTP2_ASSERT ? require('assert') : function noop() {};
var util = require('util');
// A `CompressionContext` consists of the following tables:
// Header compression
// ==================
// The HeaderTable class
// ---------------------
// The [Header Table][headertable] is a component used to associate headers to index values. It is
// basically an ordered list of `[name, value]` pairs, so it's implemented as a subclass of `Array`.
// [headertable]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-03#section-3.1.2
function HeaderTable(log, table, limit) {
var self = table.map(entryFromPair);
self._log = log;
self._limit = limit || DEFAULT_HEADER_TABLE_LIMIT;
self._size = tableSize(self);
self.add = HeaderTable.prototype.add;
return self;
}
// There are few more sets that are needed for the compression/decompression process that are all
// subsets of the Header Table, and are implemented as flags on header table entries:
//
// * Header Table (`this._table`) that is limited in size (`this._limit`)
// * Reference Set (`this._reference`)
// * Working Set (`this._working`)
// * [Reference Set][referenceset]: contains a group of headers used as a reference for the
// differential encoding of a new set of headers. (`reference` flag)
// * Emitted headers: the headers that are already emitted as part of the current decompression
// process (not part of the spec, `emitted` flag)
// * Headers to be kept: headers that should not be removed as the last step of the encoding process
// (not part of the spec, `keep` flag)
//
// Header Table and Reference Set entries are `[name, value]` pairs (where both are strings), while
// Working Set entries are objects with two properties: `index` (a number) and `pair` (a pair).
// [referenceset]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-03#section-3.1.3
function entryFromPair(pair) {
var entry = pair.slice();
entry.reference = false;
entry.emitted = false;
entry.keep = false;
entry._size = size(entry);
return entry;
}
// The encoder decides how to update the header table and as such can control how much memory is
// used by the header table. To limit the memory requirements on the decoder side, the header table
// size is bounded.
//
// There are only two methods that modifies the state of the tables: `reinitialize()` and
// `execute(command)`.
// * The default header table size limit is 4096 bytes.
// * The size of an entry is defined as follows: the size of an entry is the sum of its name's
// length in bytes, of its value's length in bytes and of 32 bytes.
// * The size of a header table is the sum of the size of its entries.
var DEFAULT_HEADER_TABLE_LIMIT = 4096;
function CompressionContext(log, table, limit) {
this._log = log;
this._table = table.slice();
this._limit = limit || DEFAULT_HEADER_TABLE_LIMIT;
this._reference = [];
this._working = [];
function size(entry) {
return new Buffer(entry[0] + entry[1], 'utf8').length + 32;
}
// The `equal(pair1, pair2)` static method decides if two headers are considered equal. Name
// comparison is case insensitive while value comparison is case sensitive.
CompressionContext.equal = function(pair1, pair2) {
return (pair1[0].toLowerCase() === pair2[0].toLowerCase()) && (pair1[1] === pair2[1]);
};
function tableSize(table) {
var size = 0;
for (var i = 0; i < table.length; i++) {
size += table[i]._size;
}
return size;
}
// `getWorkingSet()` returns the current working set as an array of `[name, value]` pairs.
CompressionContext.prototype.getWorkingSet = function getWorkingSet() {
return this._working.map(function(entry) {
return entry.pair;
});
// The `add(index, entry)` can be used to [manage the header table][tablemgmt]:
// [tablemgmt]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-03#section-3.2.4
//
// * if `index` is `Infinite` it pushes the new `entry` at the end of the table
// * otherwise, it replaces the entry with the given `index` with the new `entry`
// * before doing such a modification, it has to be ensured that the header table size will stay
// lower than or equal to the header table size limit. To achieve this, repeatedly, the first
// entry of the header table is removed, until enough space is available for the modification.
HeaderTable.prototype.add = function(index, entry) {
var limit = this._limit - entry._size;
var droppedEntries = [];
while ((this._size > limit) && (this.length > 0)) {
var dropped = this.shift();
this._size -= dropped._size;
droppedEntries.push(dropped);
}
if (this._size <= limit) {
index -= droppedEntries.length;
if (index < 0) {
this.unshift(entry);
} else {
this.splice(index, 1, entry); // this is like push() if index is Infinity
}
this._size += entry._size;
}
return droppedEntries;
};
// `reinitialize()` must be called between parsing/generating header blocks.
CompressionContext.prototype.reinitialize = function reinitialize() {
var self = this;
// Initial header tables
// ---------------------
// * It first executes the steps needed to *end the processing of the previous block*.
// The new reference set of headers is computed by removing from the working set all the headers
// that are not present in the header table.
this._reference = this._working.filter(function(entry) {
return self._table.indexOf(entry.pair) !== -1;
}).map(function(entry) {
return entry.pair;
});
// ### [Initial request table][requesttable] ###
// [requesttable]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-03#appendix-B.1
HeaderTable.initialRequestTable = [
[ ':scheme' , 'http' ],
[ ':scheme' , 'https' ],
[ ':host' , '' ],
[ ':path' , '/' ],
[ ':method' , 'get' ],
[ 'accept' , '' ],
[ 'accept-charset' , '' ],
[ 'accept-encoding' , '' ],
[ 'accept-language' , '' ],
[ 'cookie' , '' ],
[ 'if-modified-since' , '' ],
[ 'user-agent' , '' ],
[ 'referer' , '' ],
[ 'authorization' , '' ],
[ 'allow' , '' ],
[ 'cache-control' , '' ],
[ 'connection' , '' ],
[ 'content-length' , '' ],
[ 'content-type' , '' ],
[ 'date' , '' ],
[ 'expect' , '' ],
[ 'from' , '' ],
[ 'if-match' , '' ],
[ 'if-none-match' , '' ],
[ 'if-range' , '' ],
[ 'if-unmodified-since' , '' ],
[ 'max-forwards' , '' ],
[ 'proxy-authorization' , '' ],
[ 'range' , '' ],
[ 'via' , '' ]
];
// * Then *prepares the processing of the next block*.
// The reference set of headers is interpreted into the working set of headers: for each header
// in the reference set, an entry is added to the working set, containing the header name, its
// value, and its current index in the header table.
this._working = this._reference.map(function(pair) {
var index = self._table.indexOf(pair);
return { index: index, pair: pair };
});
// ### [Initial response table][responsetable] ###
// [responsetable]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-03#appendix-B.2
HeaderTable.initialResponseTable = [
[ ':status' , '200' ],
[ 'age' , '' ],
[ 'cache-control' , '' ],
[ 'content-length' , '' ],
[ 'content-type' , '' ],
[ 'date' , '' ],
[ 'etag' , '' ],
[ 'expires' , '' ],
[ 'last-modified' , '' ],
[ 'server' , '' ],
[ 'set-cookie' , '' ],
[ 'vary' , '' ],
[ 'via' , '' ],
[ 'access-control-allow-origin' , '' ],
[ 'accept-ranges' , '' ],
[ 'allow' , '' ],
[ 'connection' , '' ],
[ 'content-disposition' , '' ],
[ 'content-encoding' , '' ],
[ 'content-language' , '' ],
[ 'content-location' , '' ],
[ 'content-range' , '' ],
[ 'link' , '' ],
[ 'location' , '' ],
[ 'proxy-authenticate' , '' ],
[ 'refresh' , '' ],
[ 'retry-after' , '' ],
[ 'strict-transport-security' , '' ],
[ 'transfer-encoding' , '' ],
[ 'www-authenticate' , '' ]
];
// The HeaderSetDecompressor class
// -------------------------------
// A `HeaderSetDecompressor` instance is a transform stream that can be used to *decompress a
// single header set*. Its input is a stream of binary data chunks and its output is a stream of
// `[name, value]` pairs.
//
// Currently, it is not a proper streaming decompressor implementation, since it buffer its input
// until the end os the stream, and then processes the whole header block at once.
util.inherits(HeaderSetDecompressor, TransformStream);
function HeaderSetDecompressor(log, table) {
TransformStream.call(this, { objectMode: true });
this._log = log.child({ component: 'compressor' });
this._table = table;
this._chunks = [];
}
// `_transform` is the implementation of the [corresponding virtual function][_transform] of the
// TransformStream class. It collects the data chunks for later processing.
// [_transform]: http://nodejs.org/api/stream.html#stream_transform_transform_chunk_encoding_callback
HeaderSetDecompressor.prototype._transform = function _transform(chunk, encoding, callback) {
this._chunks.push(chunk);
callback();
};
// `execute(command)` executes the given command ([header representation][1]): updates the Header
// Table and the Working Set.
// [1]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-00#section-3.3
// `execute(rep)` executes the given [header representation][representation].
// [representation]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-03#section-3.1.5
// The *JavaScript object representation* of a command:
// The *JavaScript object representation* of a header representation:
//

@@ -104,281 +240,218 @@ // {

// { name: 'A', value: 'Z', index: 123 } // substitution indexing
HeaderSetDecompressor.prototype._execute = function _execute(rep) {
this._log.trace({ key: rep.name, value: rep.value, index: rep.index },
'Executing header representation');
CompressionContext.prototype.execute = function execute(command) {
this._log.trace({ key: command.name, value: command.value, index: command.index },
'Executing a header representation');
var index, entry, pair;
var index, pair;
// * An _indexed representation_ corresponding to an entry _present_ in the reference set
// entails the following actions:
// * The entry is removed from the reference set.
// * An _indexed representation_ corresponding to an entry _not present_ in the reference set
// entails the following actions:
// * The header corresponding to the entry is emitted.
// * The entry is added to the reference set.
if (typeof rep.value === 'number') {
index = rep.value;
entry = this._table[index];
// * For an indexed representation, it checks whether the index is present in the working set.
// If true, the corresponding entry is removed from the working set. If several entries correspond
// to this encoded index, all these entries are removed from the working set. If the index is not
// present in the working set, it is used to retrieve the corresponding header from the Header
// Table, and a new entry is added to the working set representing this header.
if (typeof command.value === 'number') {
index = command.value;
var filtered = this._working.filter(function(entry) {
return entry.index !== index;
});
if (filtered.length === this._working.length) {
pair = this._table[index];
this._working.push({ index: index, pair: pair });
if (entry.reference) {
entry.reference = false;
} else {
this._working = filtered;
entry.reference = true;
entry.emitted = true;
pair = entry.slice();
this.push(pair);
}
}
// * For a literal representation, a new entry is added to the working set representing this
// header. If the literal representation specifies that the header is to be indexed, the header is
// added accordingly to the header table, and its index is included in the entry in the working
// set. Otherwise, the entry in the working set contains an undefined index.
// * A _literal representation_ that is _not added_ to the header table entails the following
// action:
// * The header is emitted.
// * A _literal representation_ that is _added_ to the header table entails the following further
// actions:
// * The header is added to the header table, at the location defined by the representation.
// * The new entry is added to the reference set.
else {
if (typeof command.name === 'number') {
pair = [this._table[command.name][0], command.value];
if (typeof rep.name === 'number') {
pair = [this._table[rep.name][0], rep.value];
} else {
pair = [command.name, command.value];
pair = [rep.name, rep.value];
}
if (command.index !== -1) {
if (command.index === Infinity) {
this._table.push(pair);
} else {
this._table.splice(command.index, 1, pair);
}
this._enforceSizeBound(); // TODO: The order of these two
index = this._table.indexOf(pair); // TODO: operation is not well defined!
index = rep.index;
if (index !== -1) {
entry = entryFromPair(pair);
entry.reference = true;
entry.emitted = true;
this._table.add(index, entry);
}
this._working.push({ index: index, pair: pair });
this.push(pair);
}
};
// `_isShadowed` determines the reachability of a given index in the Header Table. An entry in the
// Header Table is shadowed if there's an entry in the Working Set with the same ID.
CompressionContext.prototype._isShadowed = function(index) {
return this._working.some(function(entry) {
return entry.index === index;
});
};
// `_flush` is the implementation of the [corresponding virtual function][_flush] of the
// TransformStream class. The whole decompressing process is done in `_flush`. It gets called when
// the input stream is over.
// [_flush]: http://nodejs.org/api/stream.html#stream_transform_flush_callback
HeaderSetDecompressor.prototype._flush = function _flush(callback) {
var buffer = concat(this._chunks);
// `generateAddCommand` tries to find a compact command (header representation) for the given
// `[name, value]` pair that causes the decoder to add the given pair to the Working Set.
CompressionContext.prototype.generateAddCommand = function(pair) {
var equal = CompressionContext.equal.bind(null, pair);
var fullMatch = this._table.filter(equal);
if (fullMatch.length !== 0) {
var fullIndex = this._table.indexOf(fullMatch[0]);
if (!this._isShadowed(fullIndex)) {
return {
name: fullIndex,
value: fullIndex,
index: -1
};
}
// * processes the header representations
buffer.cursor = 0;
while (buffer.cursor < buffer.length) {
this._execute(HeaderSetDecompressor.header(buffer));
}
var name = pair[0].toLowerCase();
var nameMatch = this._table.filter(function(entry) {
return entry[0].toLowerCase() === name;
});
if (nameMatch.length !== 0) {
var nameIndex = this._table.indexOf(nameMatch[0]);
if (!this._isShadowed(nameIndex)) {
return {
name: nameIndex,
value: pair[1],
index: nameIndex
};
// * [emits the reference set](http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-03#section-3.2.2)
for (var index = 0; index < this._table.length; index++) {
var entry = this._table[index];
if (entry.reference && !entry.emitted) {
this.push(entry.slice());
}
entry.emitted = false;
}
return {
name: name,
value: pair[1],
index: Infinity
};
callback();
};
// `generateRemoveCommand` generates a command (an Indexed Header Representation) that causes the
// decoder to drop the given pair from the Working Set.
CompressionContext.prototype.generateRemoveCommand = function(pair) {
var match;
// The HeaderSetCompressor class
// -----------------------------
for (var i = 0; i < this._working.length; i++) {
if (this._working[i].pair === pair) {
match = this._working[i];
break;
}
}
return {
name: match.index,
value: match.index,
index: -1
};
};
// The header table size can be bounded so as to limit the memory requirements.
// The `_enforceSizeBound()` private method drops the entries that are over the limit
// (`this._limit`).
// A `HeaderSetCompressor` instance is a transform stream that can be used to *compress a single
// header set*. Its input is a stream of `[name, value]` pairs and its output is a stream of
// binary data chunks.
//
// The header table size is defined as the sum of the size of each entry of the table. The size
// of an entry is the sum of the length in bytes of its name, of value's length in bytes and of
// 32 bytes (for accounting for the entry structure overhead).
CompressionContext.prototype._enforceSizeBound = function() {
var table = this._table;
var size = 0;
for (var i = 0; i < table.length; i++) {
if (table[i].size === undefined) {
table[i].size = new Buffer(table[i][0] + table[i][1], 'utf8').length + 32;
}
size += table[i].size;
}
while (size > this._limit) {
var dropped = table.shift();
size -= dropped.size;
}
};
// [Decompression process](http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-00#section-3.4)
// =======================
// The decompression process is always done by a `Decompressor` object.
// It is a real streaming compressor, since it does not wait until the header set is complete.
//
// The compression related mutable state is stored in a contained `CompressionContext` object.
// The initial value of it's Header Table depends on which side of the connection is it on.
function Decompressor(type, log) {
this._log = log.child({ component: 'decompressor' });
// The compression algorithm is (intentionally) not specified by the spec. Therefore, the current
// compression algorithm can probably be improved in the future.
var initialTable = (type === 'REQUEST') ? CompressionContext.initialRequestTable
: CompressionContext.initialResponseTable;
this._context = new CompressionContext(this._log, initialTable);
util.inherits(HeaderSetCompressor, TransformStream);
function HeaderSetCompressor(log, table) {
TransformStream.call(this, { objectMode: true });
this._initializeStream();
this._log = log.child({ component: 'compressor' });
this._table = table;
this.push = TransformStream.prototype.push.bind(this);
}
Decompressor.prototype = Object.create(Transform.prototype, { constructor: { value: Decompressor } });
// The `decompress` method takes a buffer, and returns the decoded header set.
//
// According to the spec, to ensure a correct decoding of a set of headers, the following steps or
// equivalent ones MUST be executed by the decoder.
Decompressor.prototype.decompress = function decompress(buffer) {
this._log.trace({ data: buffer }, 'Starting header decompression');
HeaderSetCompressor.prototype.send = function send(rep) {
this._log.trace({ key: rep.name, value: rep.value, index: rep.index },
'Emitting header representation');
// * First, upon starting the decoding of a new set of headers, the reference set of headers is
// interpreted into the working set of headers
this._context.reinitialize();
// * Then, the header representations are processed in their order of occurrence in the frame.
// The decoding process of the header representations are defined in the `execute(command)`
// method of the `CompressionContext` class.
buffer.cursor = 0;
while (buffer.cursor < buffer.length) {
this._context.execute(Decompressor.header(buffer));
if (!rep.chunks) {
rep.chunks = HeaderSetCompressor.header(rep);
}
rep.chunks.forEach(this.push);
};
// * When all the header representations have been processed, the working set contains all the
// headers of the set of headers.
var pairs = this._context.getWorkingSet();
// `_transform` is the implementation of the [corresponding virtual function][_transform] of the
// TransformStream class. It processes the input headers one by one:
// [_transform]: http://nodejs.org/api/stream.html#stream_transform_transform_chunk_encoding_callback
HeaderSetCompressor.prototype._transform = function _transform(pair, encoding, callback) {
var name = pair[0].toLowerCase();
var value = pair[1];
var entry, rep;
// * The working set entries are `[name, value]` pairs. As a last step, these are converted to the
// usual header set format used in node.js: `{ name1: value1, name2: [value2, value3], ... }`
var headers = {};
for (var i = 0; i < pairs.length; i++) {
var name = pairs[i][0];
var value = pairs[i][1];
if (name in headers) {
if (headers[name] instanceof Array) {
headers[name].push(value);
} else {
headers[name] = [headers[name], value];
// * tries to find full (name, value) or name match in the header table
var nameMatch = -1, fullMatch = -1;
for (var index = 0; index < this._table.length; index++) {
entry = this._table[index];
if (entry[0] === name) {
if (entry[1] === value) {
fullMatch = index;
break;
} else if (nameMatch === -1) {
nameMatch = index;
}
} else {
headers[name] = value;
}
}
this._log.trace({ headers: headers }, 'Header decompression is done');
return headers;
};
// Compression process
// ===================
// * if there's full match, it will be an indexed representation (or more than one) depending
// on its presence in the reference, the emitted and the keep set
if (fullMatch !== -1) {
rep = { name: fullMatch, value: fullMatch, index: -1 };
// The decompression process is always done by a `Compressor` object.
//
// The compression related mutable state is stored in a contained `CompressionContext` object.
// The initial value of it's Header Table depends on which side of the connection is it on.
function Compressor(type, log) {
this._log = log.child({ component: 'compressor' });
if (!entry.reference) {
this.send(rep);
entry.reference = true;
entry.emitted = true;
}
var initialTable = (type === 'REQUEST') ? CompressionContext.initialRequestTable
: CompressionContext.initialResponseTable;
this._context = new CompressionContext(this._log, initialTable);
else if (entry.keep) {
this.send(rep);
this.send(rep);
this.send(rep);
this.send(rep);
entry.keep = false;
entry.emitted = true;
}
this._initializeStream();
}
Compressor.prototype = Object.create(Transform.prototype, { constructor: { value: Compressor } });
else if (entry.emitted) {
this.send(rep);
this.send(rep);
}
// The `compress` method takes a header set and returns an array of buffers containing the
// encoded binary data.
//
// The inverse of the decoding process goes follows:
Compressor.prototype.compress = function compress(headers) {
this._log.trace({ headers: headers }, 'Starting header compression');
var i;
// * First, the usual node.js header set format (`{ name1: value1, name2: [value2, value3], ... }`)
// has to be converted to `[name, value]` pairs.
var pairs = [];
for (var name in headers) {
var value = headers[name];
if (value instanceof Array) {
for (i = 0; i< value.length; i++) {
pairs.push([String(name), String(value[i])]);
}
} else {
pairs.push([String(name), String(value)]);
else {
entry.keep = true;
}
}
// * Before generating commands that make the working set equal to the generated pair set,
// the reference set and the working set has to be reinitialized.
this._context.reinitialize();
var working = this._context.getWorkingSet(), command, commands = [];
// * otherwise, it will be a literal representation (with a name index if there's a name match)
else {
entry = entryFromPair(pair);
entry.emitted = true;
// * The first commands remove the unneeded headers from the working set.
for (i = 0; i < working.length; i++) {
if (!pairs.some(CompressionContext.equal.bind(null, working[i]))) {
command = this._context.generateRemoveCommand(working[i]);
this._context.execute(command);
commands.push(command);
var insertIndex;
if (entry._size > this._table._limit / 2) {
insertIndex = -1;
} else if (nameMatch !== -1) {
insertIndex = nameMatch;
} else {
insertIndex = Infinity;
}
}
// * Then the headers that are not present in the working set yet are added.
for (i = 0; i < pairs.length; i++) {
if (!working.some(CompressionContext.equal.bind(null, pairs[i]))) {
command = this._context.generateAddCommand(pairs[i]);
this._context.execute(command);
commands.push(command);
if (insertIndex !== -1) {
entry.reference = true;
var droppedEntries = this._table.add(insertIndex, entry);
for (index = 0; index < droppedEntries.length; index++) {
var dropped = droppedEntries[index];
if (dropped.keep) {
rep = { name: index, value: index, index: -1 };
this.send(rep);
this.send(rep);
}
}
}
this.send({ name: (nameMatch !== -1) ? nameMatch : name, value: value, index: insertIndex });
}
// * The last step is the serialization of the generated commands.
var buffers = [];
for (i = 0; i < commands.length; i++) {
buffers.push(Compressor.header(commands[i]));
callback();
};
// `_flush` is the implementation of the [corresponding virtual function][_flush] of the
// TransformStream class. It gets called when there's no more header to compress. The final step:
// [_flush]: http://nodejs.org/api/stream.html#stream_transform_flush_callback
HeaderSetCompressor.prototype._flush = function _flush(callback) {
// * removing entries from the header set that are not marked to be kept or emitted
for (var index = 0; index < this._table.length; index++) {
var entry = this._table[index];
if (entry.reference && !entry.keep && !entry.emitted) {
this.send({ name: index, value: index, index: -1 });
entry.reference = false;
}
entry.keep = false;
entry.emitted = false;
}
var buffer = concat(Array.prototype.concat.apply([], buffers)); // [[bufs]] -> [bufs] -> buf
this._log.trace({ data: buffer }, 'Header compression is done');
return buffer;
callback();
};
// [Detailed Format](http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-00#section-4)
// =================
// [Detailed Format](http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-03#section-4)
// -----------------
// Integer representation
// ----------------------
// ### Integer representation ###
//

@@ -396,3 +469,3 @@ // The algorithm to represent an integer I is as follows:

Compressor.integer = function writeInteger(I, N) {
HeaderSetCompressor.integer = function writeInteger(I, N) {
var limit = Math.pow(2,N) - 1;

@@ -438,3 +511,3 @@ if (I < limit) {

Decompressor.integer = function readInteger(buffer, N) {
HeaderSetDecompressor.integer = function readInteger(buffer, N) {
var limit = Math.pow(2,N) - 1;

@@ -459,4 +532,3 @@

// String literal representation
// -----------------------------
// ### String literal representation ###
//

@@ -470,10 +542,10 @@ // Literal **strings** can represent header names or header values. They are encoded in two parts:

Compressor.string = function writeString(str) {
HeaderSetCompressor.string = function writeString(str) {
var encodedString = new Buffer(str, 'utf8');
var encodedLength = Compressor.integer(encodedString.length, 0);
var encodedLength = HeaderSetCompressor.integer(encodedString.length, 0);
return encodedLength.concat(encodedString);
};
Decompressor.string = function readString(buffer) {
var length = Decompressor.integer(buffer, 0);
HeaderSetDecompressor.string = function readString(buffer) {
var length = HeaderSetDecompressor.integer(buffer, 0);
var str = buffer.toString('utf8', buffer.cursor, buffer.cursor + length);

@@ -484,7 +556,6 @@ buffer.cursor += length;

// [Header represenations](http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-00#section-4.3)
// -----------------------
// ### Header represenations ###
// The JavaScript object representation is described near the
// `CompressionContext.prototype.execute()` method definition.
// `HeaderTable.prototype.execute()` method definition.
//

@@ -531,3 +602,3 @@ // **All binary header representations** start with a prefix signaling the representation type and

Compressor.header = function writeHeader(header) {
HeaderSetCompressor.header = function writeHeader(header) {
var representation, buffers = [];

@@ -546,17 +617,17 @@

if (representation === representations.indexed) {
buffers.push(Compressor.integer(header.value, representation.prefix));
buffers.push(HeaderSetCompressor.integer(header.value, representation.prefix));
} else {
if (typeof header.name === 'number') {
buffers.push(Compressor.integer(header.name + 1, representation.prefix));
buffers.push(HeaderSetCompressor.integer(header.name + 1, representation.prefix));
} else {
buffers.push(Compressor.integer(0, representation.prefix));
buffers.push(Compressor.string(header.name));
buffers.push(HeaderSetCompressor.integer(0, representation.prefix));
buffers.push(HeaderSetCompressor.string(header.name));
}
if (representation === representations.literalSubstitution) {
buffers.push(Compressor.integer(header.index, 0));
buffers.push(HeaderSetCompressor.integer(header.index, 0));
}
buffers.push(Compressor.string(header.value));
buffers.push(HeaderSetCompressor.string(header.value));
}

@@ -569,3 +640,3 @@

Decompressor.header = function readHeader(buffer) {
HeaderSetDecompressor.header = function readHeader(buffer) {
var representation, header = {};

@@ -587,13 +658,13 @@

if (representation === representations.indexed) {
header.value = header.name = Decompressor.integer(buffer, representation.prefix);
header.value = header.name = HeaderSetDecompressor.integer(buffer, representation.prefix);
header.index = -1;
} else {
header.name = Decompressor.integer(buffer, representation.prefix) - 1;
header.name = HeaderSetDecompressor.integer(buffer, representation.prefix) - 1;
if (header.name === -1) {
header.name = Decompressor.string(buffer);
header.name = HeaderSetDecompressor.string(buffer);
}
if (representation === representations.literalSubstitution) {
header.index = Decompressor.integer(buffer, 0);
header.index = HeaderSetDecompressor.integer(buffer, 0);
} else if (representation === representations.literalIncremental) {

@@ -605,3 +676,3 @@ header.index = Infinity;

header.value = Decompressor.string(buffer);
header.value = HeaderSetDecompressor.string(buffer);
}

@@ -612,7 +683,7 @@

// The compression layer
// =====================
// Integration with HTTP/2
// =======================
// This section describes the interaction between the compressor/decompressor and the rest of the
// HTTP/2 implementation. The Compressor and the Decompressor makes up a layer between the
// HTTP/2 implementation. The `Compressor` and the `Decompressor` makes up a layer between the
// [framer](framer.html) and the [connection handling component](connection.html). They let most

@@ -637,40 +708,82 @@ // frames pass through, except HEADERS and PUSH_PROMISE frames. They convert the frames between

// The Compressor class
// --------------------
// The Compressor transform stream is basically stateless.
Compressor.prototype._initializeStream = function _initializeStream() {
Transform.call(this, { objectMode: true });
util.inherits(Compressor, TransformStream);
function Compressor(type, log) {
TransformStream.call(this, { objectMode: true });
this._log = log.child({ component: 'compressor' });
assert((type === 'REQUEST') || (type === 'RESPONSE'));
var initialTable = (type === 'REQUEST') ? HeaderTable.initialRequestTable
: HeaderTable.initialResponseTable;
this._table = new HeaderTable(this._log, initialTable);
}
// `compress` takes a header set, and compresses it using a new `HeaderSetCompressor` stream
// instance. This means that from now on, the advantages of streaming header encoding are lost,
// but the API becomes simpler.
Compressor.prototype.compress = function compress(headers) {
var compressor = new HeaderSetCompressor(this._log, this._table);
for (var name in headers) {
var value = headers[name];
if (value instanceof Array) {
for (var i = 0; i< value.length; i++) {
compressor.write([String(name), String(value[i])]);
}
} else {
compressor.write([String(name), String(value)]);
}
}
compressor.end();
var chunk, chunks = [];
while (chunk = compressor.read()) {
chunks.push(chunk);
}
return concat(chunks);
};
// When a `frame` arrives
Compressor.prototype._transform = function _transform(frame, encoding, done) {
// When it receives a HEADERS or PUSH_PROMISE frame
// * and it is a HEADERS or PUSH_PROMISE frame
// * it generates a header block using the compress method
// * cuts the header block into `chunks` that are not larger than `MAX_HTTP_PAYLOAD_SIZE`
// * for each chunk, it pushes out a chunk frame that is identical to the original, except
// the `data` property which holds the given chunk, the type of the frame which is always
// CONTINUATION except for the first frame, and the END_HEADERS/END_PUSH_STREAM flag that
// marks the last frame and the END_STREAM flag which is always false before the end
if (frame.type === 'HEADERS' || frame.type === 'PUSH_PROMISE') {
// * it generates a header block using the compress method
var buffer = this.compress(frame.headers);
// * cuts the header block into `chunks` that are not larger than `MAX_HTTP_PAYLOAD_SIZE`
var chunks = cut(buffer, MAX_HTTP_PAYLOAD_SIZE);
// * for each chunk, it pushes out a chunk frame that is identical to the original, except
// the `data` property which holds the given chunk, the END_HEADERS/END_PUSH_STREAM flag that
// marks the last frame and the END_STREAM flag which is always false before the end
for (var i = 0; i < chunks.length; i++) {
var flags = shallowCopy(frame.flags);
if (i === chunks.length - 1) {
flags['END_' + frame.type] = true;
var chunkFrame;
var first = (i === 0);
var last = (i === chunks.length - 1);
if (first) {
chunkFrame = util._extend({}, frame);
chunkFrame.flags = util._extend({}, frame.flags);
chunkFrame.flags['END_' + frame.type] = last;
} else {
flags['END_' + frame.type] = false;
flags['END_STREAM'] = false;
chunkFrame = {
type: 'CONTINUATION',
flags: { END_HEADERS: last },
stream: frame.stream
};
}
if (chunkFrame.type !== 'PUSH_PROMISE') {
chunkFrame.flags.END_STREAM = last && frame.END_STREAM;
}
chunkFrame.data = chunks[i];
this.push({
type: frame.type,
flags: flags,
stream: frame.stream,
priority: frame.priority,
promised_stream: frame.promised_stream,
data: chunks[i]
});
this.push(chunkFrame);
}
}
// Otherwise, the frame is forwarded without taking any action
// * otherwise, the frame is forwarded without taking any action
else {

@@ -683,2 +796,5 @@ this.push(frame);

// The Decompressor class
// ----------------------
// The Decompressor is a stateful transform stream, since it has to collect multiple frames first,

@@ -690,8 +806,41 @@ // and the decoding comes after unifying the payload of those frames.

// and `this._stream` respectively.
Decompressor.prototype._initializeStream = function _initializeStream() {
Transform.call(this, { objectMode: true });
util.inherits(Decompressor, TransformStream);
function Decompressor(type, log) {
TransformStream.call(this, { objectMode: true });
this._log = log.child({ component: 'compressor' });
assert((type === 'REQUEST') || (type === 'RESPONSE'));
var initialTable = (type === 'REQUEST') ? HeaderTable.initialRequestTable
: HeaderTable.initialResponseTable;
this._table = new HeaderTable(this._log, initialTable);
this._inProgress = false;
this._type = undefined;
this._stream = undefined;
this._frames = undefined;
this._base = undefined;
}
// `decompress` takes a full header block, and decompresses it using a new `HeaderSetDecompressor`
// stream instance. This means that from now on, the advantages of streaming header decoding are
// lost, but the API becomes simpler.
Decompressor.prototype.decompress = function decompress(block) {
var decompressor = new HeaderSetDecompressor(this._log, this._table);
decompressor.end(block);
var headers = {};
var pair;
while (pair = decompressor.read()) {
var name = pair[0];
var value = pair[1];
if (name in headers) {
if (headers[name] instanceof Array) {
headers[name].push(value);
} else {
headers[name] = [headers[name], value];
}
} else {
headers[name] = value;
}
}
return headers;
};

@@ -702,5 +851,5 @@

// * and the collection process is already `_inProgress`, the frame is simply stored, except if
// it's an illegal frame
// it's an illegal frame
if (this._inProgress) {
if ((frame.type !== this._type) || (frame.stream !== this._stream)) {
if ((frame.type !== 'CONTINUATION') || (frame.stream !== this._base.stream)) {
this._log.error('A series of HEADER frames were not continuous');

@@ -714,7 +863,6 @@ this.emit('error', 'PROTOCOL_ERROR');

// * and the collection process is not `_inProgress`, but the new frame's type is HEADERS or
// PUSH_PROMISE, a new collection process begins
// PUSH_PROMISE, a new collection process begins
else if ((frame.type === 'HEADERS') || (frame.type === 'PUSH_PROMISE')) {
this._inProgress = true;
this._type = frame.type;
this._stream = frame.stream;
this._base = frame;
this._frames = [frame];

@@ -728,5 +876,5 @@ }

// When the frame signals that it's the last in the series, the header block chunks are
// concatenated, the headers are decompressed, and a new frame gets pushed out with the
// decompressed headers.
// * When the frame signals that it's the last in the series, the header block chunks are
// concatenated, the headers are decompressed, and a new frame gets pushed out with the
// decompressed headers.
if (this._inProgress && (frame.flags.END_HEADERS || frame.flags.END_PUSH_PROMISE)) {

@@ -743,10 +891,3 @@ var buffer = concat(this._frames.map(function(frame) {

}
this.push({
type: frame.type,
flags: frame.flags,
stream: frame.stream,
priority: frame.priority,
promised_stream: frame.promised_stream,
headers: headers
});
this.push(util._extend({ headers: headers }, this._base));
this._inProgress = false;

@@ -758,86 +899,4 @@ }

// [Initial header names](http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-00#appendix-A)
// ======================
CompressionContext.initialRequestTable = [
[ ':scheme' , 'http' ],
[ ':scheme' , 'https' ],
[ ':host' , '' ],
[ ':path' , '/' ],
[ ':method' , 'get' ],
[ 'accept' , '' ],
[ 'accept-charset' , '' ],
[ 'accept-encoding' , '' ],
[ 'accept-language' , '' ],
[ 'cookie' , '' ],
[ 'if-modified-since' , '' ],
[ 'keep-alive' , '' ],
[ 'user-agent' , '' ],
[ 'proxy-connection' , '' ],
[ 'referer' , '' ],
[ 'accept-datetime' , '' ],
[ 'authorization' , '' ],
[ 'allow' , '' ],
[ 'cache-control' , '' ],
[ 'connection' , '' ],
[ 'content-length' , '' ],
[ 'content-md5' , '' ],
[ 'content-type' , '' ],
[ 'date' , '' ],
[ 'expect' , '' ],
[ 'from' , '' ],
[ 'if-match' , '' ],
[ 'if-none-match' , '' ],
[ 'if-range' , '' ],
[ 'if-unmodified-since' , '' ],
[ 'max-forwards' , '' ],
[ 'pragma' , '' ],
[ 'proxy-authorization' , '' ],
[ 'range' , '' ],
[ 'te' , '' ],
[ 'upgrade' , '' ],
[ 'via' , '' ],
[ 'warning' , '' ]
];
CompressionContext.initialResponseTable = [
[ ':status' , '200' ],
[ 'age' , '' ],
[ 'cache-control' , '' ],
[ 'content-length' , '' ],
[ 'content-type' , '' ],
[ 'date' , '' ],
[ 'etag' , '' ],
[ 'expires' , '' ],
[ 'last-modified' , '' ],
[ 'server' , '' ],
[ 'set-cookie' , '' ],
[ 'vary' , '' ],
[ 'via' , '' ],
[ 'access-control-allow-origin' , '' ],
[ 'accept-ranges' , '' ],
[ 'allow' , '' ],
[ 'connection' , '' ],
[ 'content-disposition' , '' ],
[ 'content-encoding' , '' ],
[ 'content-language' , '' ],
[ 'content-location' , '' ],
[ 'content-md5' , '' ],
[ 'content-range' , '' ],
[ 'link' , '' ],
[ 'location' , '' ],
[ 'p3p' , '' ],
[ 'pragma' , '' ],
[ 'proxy-authenticate' , '' ],
[ 'refresh' , '' ],
[ 'retry-after' , '' ],
[ 'strict-transport-security' , '' ],
[ 'trailer' , '' ],
[ 'transfer-encoding' , '' ],
[ 'warning' , '' ],
[ 'www-authenticate' , '' ]
];
// Helper functions
// ----------------
// ================

@@ -870,10 +929,1 @@ // Concatenate an array of buffers into a new buffer

}
// Shallow copy inspired by underscore's [clone](http://underscorejs.org/#clone)
function shallowCopy(object) {
var clone = {};
for (var key in object) {
clone[key] = object[key];
}
return clone;
}

@@ -46,5 +46,2 @@ var assert = process.env.HTTP2_ASSERT ? require('assert') : function noop() {};

// * settings management
this._initializeSettingsManagement(settings);
// * lifecycle management

@@ -56,2 +53,5 @@ this._initializeLifecycleManagement();

// * settings management
this._initializeSettingsManagement(settings);
// * multiplexing

@@ -100,6 +100,6 @@ }

// * streams are stored in two data structures:
// * `_streamsIn` is an id -> stream map of the streams that are allowed to receive frames.
// * `_streamsOut` is the list of all streams ordered by priority.
this._streamsIn = [];
this._streamsOut = [];
// * `_streamIds` is an id -> stream map of the streams that are allowed to receive frames.
// * `_streamPriorities` is a priority -> [stream] map of stream that allowed to send frames.
this._streamIds = [];
this._streamPriorities = [];

@@ -111,3 +111,3 @@ // * The next outbound stream ID and the last inbound stream id

// * Calling `_writeControlFrame` when there's an incoming stream with 0 as stream ID
this._streamsIn[0] = { upstream: { write: this._writeControlFrame.bind(this) } };
this._streamIds[0] = { upstream: { write: this._writeControlFrame.bind(this) } };

@@ -118,9 +118,5 @@ // * By default, the number of concurrent outbound streams is not limited. The `_streamLimit` can

this._streamLimit = Infinity;
this.on('SETTINGS_MAX_CONCURRENT_STREAMS', this._updateStreamLimit);
this.on('RECEIVING_SETTINGS_MAX_CONCURRENT_STREAMS', this._updateStreamLimit);
};
Connection.prototype._getIdOf = function _getIdOf(stream) {
return this._streamsIn.indexOf(stream);
};
// `_writeControlFrame` is called when there's an incoming frame in the `_control` stream. It

@@ -157,7 +153,7 @@ // broadcasts the message by creating an event on it.

// 1. var stream = new Stream(this._log);
// 2. this._enableReceiving(stream, id);
// 2. this._enableSending(stream);
// 2. this._allocateId(stream, id);
// 2. this._allocatePriority(stream);
// Enabling receiving
Connection.prototype._enableReceiving = function _enableReceiving(stream, id) {
// Allocating an ID to a stream
Connection.prototype._allocateId = function _allocateId(stream, id) {
// * initiated stream without definite ID

@@ -182,7 +178,7 @@ if (id === undefined) {

assert(!(id in this._streamsIn));
assert(!(id in this._streamIds));
// * adding to `this._streamsIn`
this._log.trace({ s: stream, stream_id: id }, 'Enabling receiving for a stream.');
this._streamsIn[id] = stream;
// * adding to `this._streamIds`
this._log.trace({ s: stream, stream_id: id }, 'Allocation ID for stream.');
this._streamIds[id] = stream;
this.emit('new_stream', stream, id);

@@ -193,6 +189,6 @@

// Enabling sending
Connection.prototype._enableSending = function _enableSending(stream) {
this._log.trace({ s: stream }, 'Enabling sending for a stream.');
this._insert(stream);
// Allocating a priority to a stream, and managing priority changes
Connection.prototype._allocatePriority = function _allocatePriority(stream) {
this._log.trace({ s: stream }, 'Allocation priority for stream.');
this._insert(stream, stream._priority);
stream.on('priority', this._reprioritize.bind(this, stream));

@@ -203,19 +199,20 @@ stream.upstream.on('readable', this.read.bind(this, 0));

// `_insert(stream)` inserts `stream` in `_streamsOut` in a place determined by `stream._priority`
Connection.prototype._insert = function _insert(stream) {
var streams = this._streamsOut;
var index = 0;
while ((index < streams.length) && (streams[index]._priority <= stream._priority)) {
index += 1;
Connection.prototype._insert = function _insert(stream, priority) {
if (priority in this._streamPriorities) {
this._streamPriorities[priority].push(stream);
} else {
this._streamPriorities[priority] = [stream];
}
streams.splice(index, 0, stream);
};
// `_reprioritize(stream)` moves `stream` to the apprioriate place in `_streamsOut` (according to
// its `_priority`)
Connection.prototype._reprioritize = function _reprioritize(stream) {
var index = this._streamsOut.indexOf(stream);
Connection.prototype._reprioritize = function _reprioritize(stream, priority) {
var bucket = this._streamPriorities[stream._priority];
var index = bucket.indexOf(stream);
assert(index !== -1);
this._streamsOut.splice(index, 1);
this._insert(stream);
bucket.splice(index, 1);
if (bucket.length === 0) {
delete this._streamPriorities[stream._priority];
}
this._insert(stream, priority);
};

@@ -229,4 +226,4 @@

var stream = new Stream(this._log);
this._enableReceiving(stream, id);
this._enableSending(stream);
this._allocateId(stream, id);
this._allocatePriority(stream);
this.emit('stream', stream, id);

@@ -243,3 +240,3 @@

var stream = new Stream(this._log);
this._enableSending(stream);
this._allocatePriority(stream);

@@ -257,38 +254,60 @@ return stream;

// * Looping through the active streams in priority order and forwarding frames from streams
stream_loop:
for (var i = 0; i < this._streamsOut.length; i++) {
var stream = this._streamsOut[i];
var id = this._getIdOf(stream);
var frame;
while (frame = stream.upstream.read()) {
if (this._streamCount + frame.count_change > this._streamLimit) {
stream.upstream.unshift(frame);
continue stream_loop;
}
// * Looping through priority `bucket`s in priority order.
priority_loop:
for (var priority in this._streamPriorities) {
var bucket = this._streamPriorities[priority].slice();
if (id === -1) {
id = this._enableReceiving(stream);
// * Forwarding frames from buckets with round-robin scheduling.
// 1. pulling out frame
// 2. if there's no frame, remove this stream from `buckets`
// 3. if forwarding this frame would make `streamCount` greater than `streamLimit`, remove
// this stream
// 4. assigning an ID to the frame (allocating an ID to the stream if there isn't already)
// 5. if forwarding a PUSH_PROMISE, allocate ID to the promised stream
// 6. forwarding the frame, changing `streamCount` as appropriate
// 7. stepping to the next stream if there's still more frame needed in the output buffer
var index = 0;
while (bucket.length > 0) {
index = index % bucket.length;
var stream = bucket[index];
var frame = stream.upstream.read();
if (!frame) {
bucket.splice(index, 1);
}
frame.stream = id;
if (frame.type === 'PUSH_PROMISE') {
setImmediate(this._enableSending.bind(this, frame.promised_stream));
frame.promised_stream = this._enableReceiving(frame.promised_stream);
else if (this._streamCount + frame.count_change > this._streamLimit) {
stream.upstream.unshift(frame);
bucket.splice(index, 1);
}
this._log.trace({ s: stream, frame: frame }, 'Trying to forward outgoing frame');
var remainder = null;
var moreNeeded = this._push(frame, function(remainderFrame) {
stream.upstream.unshift(remainder = remainderFrame);
});
else {
var id = this._streamIds.indexOf(stream);
if (id === -1) {
frame.stream = this._allocateId(stream);
} else {
frame.stream = id;
}
if (!remainder) {
this._changeStreamCount(frame.count_change);
}
if (frame.type === 'PUSH_PROMISE') {
this._allocatePriority(frame.promised_stream);
frame.promised_stream = this._allocateId(frame.promised_stream);
}
if (moreNeeded === null) {
continue stream_loop;
} else if (moreNeeded === false) {
break stream_loop;
this._log.trace({ s: stream, frame: frame }, 'Trying to forward outgoing frame');
var remainder = null;
var moreNeeded = this._push(frame, function(remainderFrame) {
stream.upstream.unshift(remainder = remainderFrame);
});
if (!remainder) {
this._changeStreamCount(frame.count_change);
}
if (moreNeeded === null) {
bucket.splice(index, 1);
} else if (moreNeeded === false) {
break priority_loop;
} else {
index += 1;
}
}

@@ -307,3 +326,3 @@ }

// * gets the appropriate stream from the stream registry
var stream = this._streamsIn[frame.stream];
var stream = this._streamIds[frame.stream];

@@ -358,3 +377,3 @@ // * or creates one if it's not in `this.streams`

for (var name in frame.settings) {
this.emit(name, frame.settings[name]);
this.emit('RECEIVING_' + name, frame.settings[name]);
}

@@ -370,2 +389,5 @@ };

});
for (var name in settings) {
this.emit('SENDING_' + name, settings[name]);
}
};

@@ -467,9 +489,11 @@

stream.upstream.setInitialWindow(this._initialStreamWindowSize);
if (this._remoteFlowControlDisabled) {
stream.upstream.disableRemoteFlowControl();
}
});
this.on('SETTINGS_INITIAL_WINDOW_SIZE', this._setInitialStreamWindowSize);
this.on('SETTINGS_FLOW_CONTROL_OPTIONS', this._setStreamFlowControl);
this._streamsIn[0].upstream.setInitialWindow = function noop() {};
// Flow control for incoming frames is not yet supported, and is turned off in the initial
// SETTINGS frame.
this.on('RECEIVING_SETTINGS_INITIAL_WINDOW_SIZE', this._setInitialStreamWindowSize);
this.on('RECEIVING_SETTINGS_FLOW_CONTROL_OPTIONS', this._setLocalFlowControl);
this.on('SENDING_SETTINGS_FLOW_CONTROL_OPTIONS', this._setRemoteFlowControl);
this._streamIds[0].upstream.setInitialWindow = function noop() {};
this._streamIds[0].upstream.disableRemoteFlowControl = function noop() {};
};

@@ -491,3 +515,3 @@

this._initialStreamWindowSize = size;
this._streamsIn.forEach(function(stream) {
this._streamIds.forEach(function(stream) {
stream.upstream.setInitialWindow(size);

@@ -500,3 +524,3 @@ });

// for turning off flow control since it can not be turned on.
Connection.prototype._setStreamFlowControl = function _setStreamFlowControl(disable) {
Connection.prototype._setLocalFlowControl = function _setLocalFlowControl(disable) {
if (disable) {

@@ -510,1 +534,13 @@ this._increaseWindow(Infinity);

};
Connection.prototype._setRemoteFlowControl = function _setRemoteFlowControl(disable) {
if (disable) {
this.disableRemoteFlowControl();
this._streamIds.forEach(function(stream) {
stream.upstream.disableRemoteFlowControl();
});
} else if (this._remoteFlowControlDisabled) {
this._log.error('Trying to re-enable flow control after it was turned off.');
throw new Error('Trying to re-enable flow control after it was turned off.');
}
};

@@ -119,2 +119,4 @@ var assert = process.env.HTTP2_ASSERT ? require('assert') : function noop() {};

var MAX_HTTP_PAYLOAD_SIZE = 16383;
Endpoint.prototype._initializeDataFlow = function _initializeDataFlow(role, settings) {

@@ -132,4 +134,4 @@ var firstStreamId, compressorRole, decompressorRole;

this._serializer = new Serializer(this._log);
this._deserializer = new Deserializer(this._log);
this._serializer = new Serializer(this._log, MAX_HTTP_PAYLOAD_SIZE);
this._deserializer = new Deserializer(this._log, MAX_HTTP_PAYLOAD_SIZE);
this._compressor = new Compressor(compressorRole, this._log);

@@ -136,0 +138,0 @@ this._decompressor = new Decompressor(decompressorRole, this._log);

@@ -81,3 +81,3 @@ var assert = process.env.HTTP2_ASSERT ? require('assert') : function noop() {};

this._received = 0;
this._remoteFlowControlDisabled = true;
this._remoteFlowControlDisabled = false;
}

@@ -139,13 +139,6 @@ Flow.prototype = Object.create(Duplex.prototype, { constructor: { value: Flow } });

// Remote flow control is currently disabled by default, but in the future, it may be turned off
// using the `disableRemoteFlowControl` method.
// Must be called after sending a SETTINGS frame that turns off flow control on the remote side.
Flow.prototype.disableRemoteFlowControl = function disableRemoteFlowControl() {
this._log.debug('Turning off remote flow control');
this._remoteFlowControlDisabled = true;
this.push({
type: 'WINDOW_UPDATE',
stream: this._flowControlId,
flags: {
END_FLOW_CONTROL: true
}
});
};

@@ -152,0 +145,0 @@

@@ -25,4 +25,5 @@ // The framer consists of two [Transform Stream][1] subclasses that operate in [object mode][2]:

function Serializer(log) {
function Serializer(log, sizeLimit) {
this._log = log.child({ component: 'serializer' });
this._sizeLimit = sizeLimit || MAX_PAYLOAD_SIZE;
Transform.call(this, { objectMode: true });

@@ -44,2 +45,4 @@ }

assert(buffers[0].readUInt16BE(0) <= this._sizeLimit, 'Frame too large!');
for (var i = 0; i < buffers.length; i++) {

@@ -66,4 +69,5 @@ if (logData) {

function Deserializer(log) {
function Deserializer(log, sizeLimit) {
this._log = log.child({ component: 'deserializer' });
this._sizeLimit = sizeLimit || MAX_PAYLOAD_SIZE;
Transform.call(this, { objectMode: true });

@@ -112,3 +116,7 @@ this._next(COMMON_HEADER_SIZE);

Deserializer.commonHeader(this._buffer, this._frame);
this._next(this._frame.length);
if (this._frame.length <= this._sizeLimit) {
this._next(this._frame.length);
} else {
this.emit('error', 'FRAME_TOO_LARGE');
}
}

@@ -148,3 +156,3 @@

// Additional size limits can be set by specific application uses. HTTP limits the frame size to
// 16,383 octets. This limitation is enforced on the connection layer.
// 16,383 octets.
//

@@ -205,3 +213,3 @@ // 0 1 2 3

}
assert(size < MAX_PAYLOAD_SIZE, 'Too large frame: ' + size + ' bytes');
assert(size <= MAX_PAYLOAD_SIZE, 'Frame too large!');
headerBuffer.writeUInt16BE(size, 0);

@@ -425,8 +433,8 @@

//
// A SETTINGS frame is not required to include every defined setting; senders can include only those
// parameters for which it has accurate values and a need to convey. When multiple parameters are
// sent, they SHOULD be sent in order of numerically lowest ID to highest ID. A single SETTINGS
// frame MUST NOT contain multiple values for the same ID. If the receiver of a SETTINGS frame
// discovers multiple values for the same ID, it MUST ignore all values for that ID except the first
// one.
// Each setting in a SETTINGS frame replaces the existing value for that setting. Settings are
// processed in the order in which they appear, and a receiver of a SETTINGS frame does not need to
// maintain any state other than the current value of settings. Therefore, the value of a setting
// is the last value that is seen by a receiver. This permits the inclusion of the same settings
// multiple times in the same SETTINGS frame, though doing so does nothing other than waste
// connection capacity.

@@ -462,7 +470,8 @@ Serializer.SETTINGS = function writeSettings(frame, buffers) {

var setting = definedSettings[id];
var value = buffer.readUInt32BE(i*8 + 4);
if (!setting || (setting.name in frame.settings)) {
continue;
if (setting) {
var value = buffer.readUInt32BE(i*8 + 4);
frame.settings[setting.name] = setting.flag ? Boolean(value & 0x1) : value;
} else {
/* Unknown setting, ignoring */
}
frame.settings[setting.name] = setting.flag ? Boolean(value & 0x1) : value;
}

@@ -613,11 +622,7 @@ };

//
// The WINDOW_UPDATE frame defines the following flags:
//
// * END_FLOW_CONTROL (0x1):
// Bit 1 being set indicates that flow control for the identified stream
// or connection has been ended; subsequent frames do not need to be flow controlled.
// The WINDOW_UPDATE frame does not define any flags.
frameTypes[0x9] = 'WINDOW_UPDATE';
frameFlags.WINDOW_UPDATE = ['END_FLOW_CONTROL'];
frameFlags.WINDOW_UPDATE = [];

@@ -642,2 +647,32 @@ typeSpecificAttributes.WINDOW_UPDATE = ['window_size'];

// [CONTINUATION](http://http2.github.io/http2-spec/#CONTINUATION)
// ------------------------------------------------------------
//
// The CONTINUATION frame (type=0xA) is used to continue a sequence of header block fragments.
//
// The CONTINUATION frame defines the following flags:
//
// * END_STREAM (0x1):
// Bit 1 being set indicates that this frame is the last that the endpoint will send for the
// identified stream.
// * RESERVED (0x2):
// Bit 2 is reserved for future use.
// * END_HEADERS (0x4):
// The END_HEADERS bit indicates that this frame ends the sequence of header block fragments
// necessary to provide a complete set of headers.
frameTypes[0xA] = 'CONTINUATION';
frameFlags.CONTINUATION = ['END_STREAM', 'RESERVED', 'END_HEADERS'];
typeSpecificAttributes.CONTINUATION = ['headers', 'data'];
Serializer.CONTINUATION = function writeContinuation(frame, buffers) {
buffers.push(frame.data);
};
Deserializer.CONTINUATION = function readContinuation(buffer, frame) {
frame.data = buffer;
};
// [Error Codes](http://http2.github.io/http2-spec/#ErrorCodes)

@@ -644,0 +679,0 @@ // ------------------------------------------------------------

@@ -10,3 +10,4 @@ // Public API

//
// Additional and modified API elements:
// Additional and modified API elements
// ------------------------------------
//

@@ -73,27 +74,21 @@ // - **Class: http2.Server**

//
// API elements not yet implemented:
// API elements not yet implemented
// --------------------------------
//
// - **Class: http2.Server**
// - **Event: 'checkContinue'**
// - **server.maxHeadersCount**
//
// - **Class: http2.ServerResponse**
// - **response.writeContinue()**
// - **response.addTrailers(headers)**
// API elements that are not applicable to HTTP/2
// ----------------------------------------------
//
// - **Class: http2.ClientRequest**
// - **Event: 'continue'**
// The reason may be deprecation of certain HTTP/1.1 features, or that some API elements simply
// don't make sense when using HTTP/2. These will not be present when a request is done with HTTP/2,
// but will function normally when falling back to using HTTP/1.1.
//
// - **Class: http2.IncomingMessage**
// - **message.trailers**
//
// API elements that are not applicable to HTTP/2 (deprecated functionality, or for other reason).
// These will not be present when a request is done with HTTP/2, but will function normally when
// falling back to using HTTP/1.1
//
// - **Class: http2.Server**
// - **Event: 'checkContinue'**: not in the spec, yet (see [http-spec#18][expect-continue])
// - **Event: 'upgrade'**: upgrade is deprecated in HTTP/2
// - **Event: 'timeout'**: HTTP/2 sockets won't timeout because of application level keepalive
// (PING frames)
// - **Event: 'connect'**: CONNECT method is not supported in current draft of HTTP/2
// - **Event: 'connect'**: not in the spec, yet (see [http-spec#230][connect])
// - **server.setTimeout(msecs, callback)**

@@ -105,2 +100,3 @@ // - **server.timeout**

// - **Event: 'timeout'**
// - **response.writeContinue()**
// - **response.writeHead(statusCode, [reasonPhrase], [headers])**: reasonPhrase will always be

@@ -117,2 +113,3 @@ // ignored since [it's not supported in HTTP/2][3]

// - **Event: 'connect'**
// - **Event: 'continue'**
// - **request.setTimeout(timeout, [callback])**

@@ -129,2 +126,4 @@ // - **request.setNoDelay([noDelay])**

// [3]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-04#section-8.1.3
// [expect-continue]: https://github.com/http2/http2-spec/issues/18
// [connect]: https://github.com/http2/http2-spec/issues/230

@@ -149,5 +148,15 @@ // Common server and client side code

var deprecatedHeaders = [
'connection',
'host',
'keep-alive',
'proxy-connection',
'te',
'transfer-encoding',
'upgrade'
];
// The implemented version of the HTTP/2 specification is [draft 04][1].
// [1]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-04
var implementedVersion = 'HTTP-draft-04/2.0';
var implementedVersion = 'HTTP-draft-06/2.0';

@@ -194,2 +203,6 @@ // Logging

// * `this.headers` will store the regular headers (and none of the special colon headers)
this.headers = {};
this.trailers = undefined;
// * Other metadata is filled in when the headers arrive.

@@ -200,2 +213,35 @@ stream.once('headers', this._onHeaders.bind(this));

// [Request Header Fields](http://tools.ietf.org/html/draft-ietf-httpbis-http2-05#section-8.1.2.1)
// * `headers` argument: HTTP/2.0 request and response header fields carry information as a series
// of key-value pairs. This includes the target URI for the request, the status code for the
// response, as well as HTTP header fields.
IncomingMessage.prototype._onHeaders = function _onHeaders(headers) {
// * An HTTP/2.0 request or response MUST NOT include any of the following header fields:
// Connection, Host, Keep-Alive, Proxy-Connection, TE, Transfer-Encoding, and Upgrade. A server
// MUST treat the presence of any of these header fields as a stream error of type
// PROTOCOL_ERROR.
for (var i = 0; i < deprecatedHeaders.length; i++) {
var key = deprecatedHeaders[i];
if (key in headers) {
this._log.error({ key: key, value: headers[key] }, 'Deprecated header found');
this.stream.emit('error', 'PROTOCOL_ERROR');
return;
}
}
// * Store the _regular_ headers in `this.headers`
for (var name in headers) {
if (name[0] !== ':') {
this.headers[name] = headers[name];
}
}
// * The next header block, if any, will represent the trailers
this.stream.once('headers', this._onTrailers.bind(this));
};
IncomingMessage.prototype._onTrailers = function _onTrailers(trailers) {
this.trailers = trailers;
};
IncomingMessage.prototype.setTimeout = noop;

@@ -211,2 +257,3 @@

this._headers = {};
this._trailers = undefined;
this.headersSent = false;

@@ -228,2 +275,9 @@

if (this.stream) {
if (this._trailers) {
if (this.request) {
this.request.addTrailers(this._trailers);
} else {
this.stream.headers(this._trailers);
}
}
this.stream.end();

@@ -239,3 +293,7 @@ } else {

} else {
this._headers[name.toLowerCase()] = value;
name = name.toLowerCase();
if (deprecatedHeaders.indexOf(name) !== -1) {
throw new Error('Cannot set deprecated header: ' + name);
}
this._headers[name] = value;
}

@@ -256,2 +314,6 @@ };

OutgoingMessage.prototype.addTrailers = function addTrailers(trailers) {
this._trailers = trailers;
};
OutgoingMessage.prototype.setTimeout = noop;

@@ -289,3 +351,3 @@

this._server.on('secureConnection', function(socket) {
if (socket.npnProtocol === implementedVersion) {
if (socket.npnProtocol === implementedVersion && socket.servername) {
start(socket);

@@ -320,4 +382,6 @@ } else {

this._log.info({ e: endpoint, client: socket.remoteAddress + ':' + socket.remotePort },
'New incoming HTTP/2 connection');
this._log.info({ e: endpoint,
client: socket.remoteAddress + ':' + socket.remotePort,
SNI: socket.servername
}, 'New incoming HTTP/2 connection');

@@ -340,4 +404,6 @@ endpoint.pipe(socket).pipe(endpoint);

Server.prototype._fallback = function _fallback(socket) {
this._log.info({ client: socket.remoteAddress + ':' + socket.remotePort, protocol: socket.npnProtocol },
'Falling back to simple HTTPS');
this._log.info({ client: socket.remoteAddress + ':' + socket.remotePort,
protocol: socket.npnProtocol,
SNI: socket.servername
}, 'Falling back to simple HTTPS');

@@ -398,2 +464,9 @@ for (var i = 0; i < this._originalSocketListeners.length; i++) {

// `addContext` is used to add Server Name Indication contexts
Server.prototype.addContext = function addContext(hostname, credentials) {
if (this._mode === 'tls') {
this._server.addContext(hostname, credentials);
}
};
function createServer(options, requestListener) {

@@ -427,26 +500,2 @@ if (typeof options === 'function') {

IncomingRequest.prototype._onHeaders = function _onHeaders(headers) {
// * An HTTP/2.0 request MUST NOT include any of the following header fields: Connection, Host,
// Keep-Alive, Proxy-Connection, TE, Transfer-Encoding, and Upgrade. A server MUST treat the
// presence of any of these header fields as a stream error of type PROTOCOL_ERROR.
var deprecatedHeaders = [
'connection',
'host',
'keep-alive',
'proxy-connection',
'te',
'transfer-encoding',
'upgrade'
];
for (var i = 0; i < deprecatedHeaders.length; i++) {
var key = deprecatedHeaders[i];
if (key in headers) {
this._log.error({ key: key, value: headers[key] }, 'Deprecated header found');
this.stream.emit('error', 'PROTOCOL_ERROR');
return;
}
}
// `this.headers` will store the regular headers (and none of the special colon headers)
this.headers = {};
// * The ":method" header field includes the HTTP method

@@ -468,14 +517,10 @@ // * The ":scheme" header field includes the scheme portion of the target URI

};
for (var name in headers) {
if (name in mapping) {
var value = headers[name];
if ((typeof value !== 'string') || (value.length === 0)) {
this._log.error({ key: name, value: value }, 'Invalid header field');
this.stream.emit('error', 'PROTOCOL_ERROR');
return;
}
this[mapping[name]] = value;
} else {
this.headers[name] = headers[name];
for (var name in mapping) {
var value = headers[name];
if ((typeof value !== 'string') || (value.length === 0)) {
this._log.error({ key: name, value: value }, 'Invalid or missing special header field');
this.stream.emit('error', 'PROTOCOL_ERROR');
return;
}
this[mapping[name]] = value;
}

@@ -486,3 +531,6 @@

// * Signaling that the header arrived.
// * Handling regular headers.
IncomingMessage.prototype._onHeaders.call(this, headers);
// * Signaling that the headers arrived.
this._log.info({ method: this.method, scheme: this.scheme, host: this.host,

@@ -516,10 +564,9 @@ path: this.url, headers: this.headers }, 'Incoming request');

headers = headers || {};
for (var name in headers) {
this._headers[name.toLowerCase()] = headers[name];
this.setHeader(name, headers[name]);
}
headers = this._headers;
if (this.sendDate && !('date' in this._headers)) {
this._headers.date = (new Date()).toUTCString();
headers.date = (new Date()).toUTCString();
}

@@ -529,6 +576,5 @@

this._headers[':status'] = this.statusCode = statusCode;
headers[':status'] = this.statusCode = statusCode;
this.stream.headers(this._headers);
this.stream.headers(headers);
this.headersSent = true;

@@ -681,2 +727,3 @@ };

options.NPNProtocols = [implementedVersion, 'http/1.1', 'http/1.0'];
options.servername = options.host; // Server Name Indication
options.agent = this._httpsAgent;

@@ -771,3 +818,2 @@ var httpsRequest = https.request(options);

this.stream = undefined;
this.headersSent = true;
}

@@ -781,6 +827,6 @@ OutgoingRequest.prototype = Object.create(OutgoingMessage.prototype, { constructor: { value: OutgoingRequest } });

var headers = {};
for (var key in options.headers) {
headers[key] = options.headers[key];
this.setHeader(key, options.headers[key]);
}
var headers = this._headers;
delete headers.host;

@@ -800,2 +846,3 @@

this.stream.headers(headers);
this.headersSent = true;

@@ -892,8 +939,6 @@ this.emit('socket', this.stream);

// [Response Header Fields](http://tools.ietf.org/html/draft-ietf-httpbis-http2-05#section-8.1.2.2)
// * `headers` argument: HTTP/2.0 request and response header fields carry information as a series
// of key-value pairs. This includes the target URI for the request, the status code for the
// response, as well as HTTP header fields.
IncomingResponse.prototype._onHeaders = function _onHeaders(headers) {
// * HTTP/2.0 request and response header fields carry information as a series of key-value pairs.
// This includes the target URI for the request, the status code for the response, as well as
// HTTP header fields.
this.headers = headers;
// * A single ":status" header field is defined that carries the HTTP status code field. This

@@ -912,6 +957,8 @@ // header field MUST be included in all responses.

this.statusCode = statusCode;
delete headers[':status'];
// * Signaling that the header arrived.
this._log.info({ status: statusCode, headers: headers}, 'Incoming response');
// * Handling regular headers.
IncomingMessage.prototype._onHeaders.call(this, headers);
// * Signaling that the headers arrived.
this._log.info({ status: this.statusCode, headers: this.headers}, 'Incoming response');
this.emit('ready');

@@ -918,0 +965,0 @@ };

@@ -126,4 +126,4 @@ var assert = process.env.HTTP2_ASSERT ? require('assert') : function noop() {};

this._log.debug({ priority: priority }, 'Changing priority');
this.emit('priority', priority);
this._priority = priority;
this.emit('priority', priority);
}

@@ -130,0 +130,0 @@ };

{
"name": "http2",
"version": "0.3.1",
"version": "0.4.0",
"description": "An HTTP/2 client and server implementation",

@@ -5,0 +5,0 @@ "main": "lib/index.js",

node-http2
==========
An HTTP/2 server implementation for node.js, developed as a [Google Summer of Code
An HTTP/2 ([draft-ietf-httpbis-http2-06](http://tools.ietf.org/html/draft-ietf-httpbis-http2-06))
client and server implementation for node.js, developed as a [Google Summer of Code
project](https://google-melange.appspot.com/gsoc/project/google/gsoc2013/molnarg/5001).

@@ -124,8 +125,8 @@

To generate a code coverage report, run `npm test --coverage` (which runs very slowly, be patient).
Code coverage summary as of version 0.3.1:
Code coverage summary as of version 0.4.0:
```
Statements : 94.4% ( 1400/1483 )
Branches : 87.75% ( 523/596 )
Functions : 93.2% ( 192/206 )
Lines : 94.38% ( 1394/1477 )
Statements : 93.44% ( 1482/1586 )
Branches : 86.13% ( 559/649 )
Functions : 92.34% ( 193/209 )
Lines : 93.42% ( 1476/1580 )
```

@@ -132,0 +133,0 @@

@@ -5,3 +5,5 @@ var expect = require('chai').expect;

var compressor = require('../lib/compressor');
var CompressionContext = compressor.CompressionContext;
var HeaderTable = compressor.HeaderTable;
var HeaderSetCompressor = compressor.HeaderSetCompressor;
var HeaderSetDecompressor = compressor.HeaderSetDecompressor;
var Compressor = compressor.Compressor;

@@ -45,7 +47,7 @@ var Decompressor = compressor.Decompressor;

header: {
name: 12,
name: 11,
value: 'my-user-agent',
index: Infinity
},
buffer: new Buffer('4D' + '0D6D792D757365722D6167656E74', 'hex')
buffer: new Buffer('4C' + '0D6D792D757365722D6167656E74', 'hex')
}, {

@@ -60,14 +62,14 @@ header: {

header: {
name: 38,
value: 38,
name: 30,
value: 30,
index: -1
},
buffer: new Buffer('A6', 'hex')
buffer: new Buffer('9e', 'hex')
}, {
header: {
name: 40,
value: 40,
name: 32,
value: 32,
index: -1
},
buffer: new Buffer('A8', 'hex')
buffer: new Buffer('a0', 'hex')
}, {

@@ -77,19 +79,19 @@ header: {

value: '/my-example/resources/script.js',
index: 38
index: 30
},
buffer: new Buffer('0426' + '1F2F6D792D6578616D706C652F7265736F75726365732F7363726970742E6A73', 'hex')
buffer: new Buffer('041e' + '1F2F6D792D6578616D706C652F7265736F75726365732F7363726970742E6A73', 'hex')
}, {
header: {
name: 40,
name: 32,
value: 'second',
index: Infinity
},
buffer: new Buffer('5F0A' + '067365636F6E64', 'hex')
buffer: new Buffer('5F02' + '067365636F6E64', 'hex')
}, {
header: {
name: 40,
name: 32,
value: 'third',
index: -1
},
buffer: new Buffer('7F0A' + '057468697264', 'hex')
buffer: new Buffer('7F02' + '057468697264', 'hex')
}];

@@ -115,3 +117,3 @@

'user-agent': 'my-user-agent',
'x-my-header': ['second', 'third']
'x-my-header': ['third', 'second']
},

@@ -129,16 +131,6 @@ buffer: test_headers[7].buffer

describe('compressor.js', function() {
describe('CompressionContext', function() {
describe('static method .equal([name1, value1], [name2, value2])', function() {
var equal = CompressionContext.equal;
it('decides if the two headers are considered equal', function() {
expect(equal(['name', 'value'], ['name', 'value'])).to.be.equal(true);
expect(equal(['name', 'value'], ['nAmE', 'value'])).to.be.equal(true);
expect(equal(['NaMe', 'value'], ['nAmE', 'value'])).to.be.equal(true);
expect(equal(['name', 'VaLuE'], ['name', 'value'])).to.be.equal(false);
expect(equal(['NaMe', 'VaLuE'], ['name', 'value'])).to.be.equal(false);
});
});
describe('HeaderTable', function() {
});
describe('Compressor', function() {
describe('HeaderSetCompressor', function() {
describe('static method .integer(I, N)', function() {

@@ -148,3 +140,3 @@ it('should return an array of buffers that represent the N-prefix coded form of the integer I', function() {

var test = test_strings[i];
expect(util.concat(Compressor.string(test.string))).to.deep.equal(test.buffer);
expect(util.concat(HeaderSetCompressor.string(test.string))).to.deep.equal(test.buffer);
}

@@ -157,3 +149,3 @@ });

var test = test_strings[i];
expect(util.concat(Compressor.string(test.string))).to.deep.equal(test.buffer);
expect(util.concat(HeaderSetCompressor.string(test.string))).to.deep.equal(test.buffer);
}

@@ -166,3 +158,3 @@ });

var test = test_headers[i];
expect(util.concat(Compressor.header(test.header))).to.deep.equal(test.buffer);
expect(util.concat(HeaderSetCompressor.header(test.header))).to.deep.equal(test.buffer);
}

@@ -173,3 +165,3 @@ });

describe('Decompressor', function() {
describe('HeaderSetDecompressor', function() {
describe('static method .integer(buffer, N)', function() {

@@ -180,3 +172,3 @@ it('should return the parsed N-prefix coded number and increase the cursor property of buffer', function() {

test.buffer.cursor = 0;
expect(Decompressor.integer(test.buffer, test.N)).to.equal(test.I);
expect(HeaderSetDecompressor.integer(test.buffer, test.N)).to.equal(test.I);
expect(test.buffer.cursor).to.equal(test.buffer.length);

@@ -191,3 +183,3 @@ }

test.buffer.cursor = 0;
expect(Decompressor.string(test.buffer)).to.equal(test.string);
expect(HeaderSetDecompressor.string(test.buffer)).to.equal(test.string);
expect(test.buffer.cursor).to.equal(test.buffer.length);

@@ -202,3 +194,3 @@ }

test.buffer.cursor = 0;
expect(Decompressor.header(test.buffer)).to.deep.equal(test.header);
expect(HeaderSetDecompressor.header(test.buffer)).to.deep.equal(test.header);
expect(test.buffer.cursor).to.equal(test.buffer.length);

@@ -208,2 +200,4 @@ }

});
});
describe('Decompressor', function() {
describe('method decompress(buffer)', function() {

@@ -253,6 +247,4 @@ it('should return the parsed header set in { name1: value1, name2: [value2, value3], ... } format', function() {

var decompressed = decompressor.decompress(compressed);
expect(headers).to.deep.equal(decompressed);
expect(decompressed).to.deep.equal(headers);
expect(compressor._table).to.deep.equal(decompressor._table);
expect(compressor._reference).to.deep.equal(decompressor._reference);
expect(compressor._working).to.deep.equal(compressor._working);
}

@@ -259,0 +251,0 @@ });

@@ -12,14 +12,14 @@ var expect = require('chai').expect;

var MAX_PRIORITY = Math.pow(2, 31) - 1;
var MAX_RANDOM_PRIORITY = 10;
function randomPriority() {
return Math.floor(Math.random() * (MAX_PRIORITY + 1));
return Math.floor(Math.random() * (MAX_RANDOM_PRIORITY + 1));
}
function expectPriorityOrder(streams) {
var previousPriority = -1;
for (var j = 0; j < streams.length; j++) {
var priority = streams[j]._priority;
expect(priority).to.be.at.least(previousPriority);
previousPriority = priority;
}
function expectPriorityOrder(priorities) {
priorities.forEach(function(bucket, priority) {
bucket.forEach(function(stream) {
expect(stream._priority).to.be.equal(priority);
});
});
}

@@ -30,37 +30,35 @@

describe('method ._insert(stream)', function() {
it('should insert the stream in _streamsOut in a place determined by stream._priority', function() {
it('should insert the stream in _streamPriorities in a place determined by stream._priority', function() {
var streams = [];
var connection = Object.create(Connection.prototype, { _streamsOut: { value: streams }});
var connection = Object.create(Connection.prototype, { _streamPriorities: { value: streams }});
var streamCount = 10;
// Inserting streams with random priority
for (var i = 0; i < streamCount; i++) {
var stream = { _priority: randomPriority() };
connection._insert(stream);
connection._insert(stream, stream._priority);
expect(connection._streamPriorities[stream._priority]).to.include(stream);
}
// Resulting _streamsOut should be ordered by priority
expect(streams.length).to.equal(streamCount);
expectPriorityOrder(streams);
expectPriorityOrder(connection._streamPriorities);
});
});
describe('method ._reprioritize(stream)', function() {
it('should eject and then insert the stream in _streamsOut in a place determined by stream._priority', function() {
it('should eject and then insert the stream in _streamPriorities in a place determined by stream._priority', function() {
var streams = [];
var connection = Object.create(Connection.prototype, { _streamsOut: { value: streams }});
var connection = Object.create(Connection.prototype, { _streamPriorities: { value: streams }});
var streamCount = 10;
var oldPriority, newPriority, stream;
// Inserting streams with random priority
for (var i = 0; i < streamCount; i++) {
var stream = { _priority: randomPriority() };
connection._insert(stream);
oldPriority = randomPriority();
while ((newPriority = randomPriority()) === oldPriority);
stream = { _priority: oldPriority };
connection._insert(stream, oldPriority);
connection._reprioritize(stream, newPriority);
stream._priority = newPriority;
expect(connection._streamPriorities[newPriority]).to.include(stream);
expect(connection._streamPriorities[oldPriority] || []).to.not.include(stream);
}
// Reprioritizing stream
stream = streams[Math.floor(Math.random() * streamCount)];
stream._priority = randomPriority();
connection._reprioritize(stream);
// Resulting _streamsOut should be ordered by priority
expect(streams.length).to.equal(streamCount);
expectPriorityOrder(streams);

@@ -79,4 +77,4 @@ });

connection._setStreamFlowControl(true);
connection._setStreamFlowControl(false);
connection._setLocalFlowControl(true);
connection._setLocalFlowControl(false);
});

@@ -93,3 +91,3 @@ });

connection._setStreamFlowControl(true);
connection._setLocalFlowControl(true);
connection._setInitialStreamWindowSize(10);

@@ -102,4 +100,4 @@ });

connection._setStreamFlowControl(true);
connection._setStreamFlowControl(true);
connection._setLocalFlowControl(true);
connection._setLocalFlowControl(true);
});

@@ -111,3 +109,3 @@ });

connection._setStreamFlowControl(false);
connection._setLocalFlowControl(false);
});

@@ -114,0 +112,0 @@ });

@@ -168,11 +168,2 @@ var expect = require('chai').expect;

describe('disabling remote flow control', function() {
it('should work as expected', function(done) {
flow1.disableRemoteFlowControl();
setTimeout(function() {
expect(flow2._window).to.be.equal(Infinity);
done();
});
});
});
describe('sending a large data stream', function() {

@@ -179,0 +170,0 @@ it('should work as expected', function(done) {

@@ -17,3 +17,4 @@ var expect = require('chai').expect;

GOAWAY: ['last_stream', 'error'],
WINDOW_UPDATE: ['window_size']
WINDOW_UPDATE: ['window_size'],
CONTINUATION: ['data']
};

@@ -133,3 +134,3 @@

type: 'WINDOW_UPDATE',
flags: { END_FLOW_CONTROL: false },
flags: { },
stream: 10,

@@ -141,2 +142,13 @@ length: 4,

buffer: new Buffer('0004' + '09' + '00' + '0000000A' + '12345678', 'hex')
}, {
frame: {
type: 'CONTINUATION',
flags: { END_STREAM: false, RESERVED: false, END_HEADERS: true },
stream: 10,
length: 4,
data: new Buffer('12345678', 'hex')
},
// length + type + flags + stream + content
buffer: new Buffer('0004' + '0A' + '04' + '0000000A' + '12345678', 'hex')
}];

@@ -143,0 +155,0 @@

@@ -348,2 +348,34 @@ var expect = require('chai').expect;

});
describe('request and response with trailers', function() {
it('should work as expected', function(done) {
var path = '/x';
var message = 'Hello world';
var requestTrailers = { 'content-md5': 'x' };
var responseTrailers = { 'content-md5': 'y' };
var server = http2.createServer(options, function(request, response) {
expect(request.url).to.equal(path);
request.on('data', util.noop);
request.once('end', function() {
expect(request.trailers).to.deep.equal(requestTrailers);
response.write(message);
response.addTrailers(responseTrailers);
response.end();
});
});
server.listen(1241, function() {
var request = http2.request('https://localhost:1241' + path);
request.addTrailers(requestTrailers);
request.end();
request.on('response', function(response) {
response.on('data', util.noop);
response.once('end', function() {
expect(response.trailers).to.deep.equal(responseTrailers);
done();
});
});
});
});
});
describe('server push', function() {

@@ -350,0 +382,0 @@ it('should work as expected', function(done) {

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc