Socket
Socket
Sign inDemoInstall

mux.js

Package Overview
Dependencies
Maintainers
6
Versions
103
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

mux.js - npm Package Compare versions

Comparing version 4.5.0 to 4.5.1

lib/mp4/frame-utils.js

27

lib/codecs/h264.js

@@ -19,2 +19,9 @@ 'use strict';

/*
* Scans a byte stream and triggers a data event with the NAL units found.
* @param {Object} data Event received from H264Stream
* @param {Uint8Array} data.data The h264 byte stream to be scanned
*
* @see H264Stream.push
*/
this.push = function(data) {

@@ -153,2 +160,13 @@ var swapBuffer;

/*
* Pushes a packet from a stream onto the NalByteStream
*
* @param {Object} packet - A packet received from a stream
* @param {Uint8Array} packet.data - The raw bytes of the packet
* @param {Number} packet.dts - Decode timestamp of the packet
* @param {Number} packet.pts - Presentation timestamp of the packet
* @param {Number} packet.trackId - The id of the h264 track this packet came from
* @param {('video'|'audio')} packet.type - The type of packet
*
*/
this.push = function(packet) {

@@ -165,2 +183,10 @@ if (packet.type !== 'video') {

/*
* Identify NAL unit types and pass on the NALU, trackId, presentation and decode timestamps
* for the NALUs to the next stream component.
* Also, preprocess caption and sequence parameter NALUs.
*
* @param {Uint8Array} data - A NAL unit identified by `NalByteStream.push`
* @see NalByteStream.push
*/
nalByteStream.on('data', function(data) {

@@ -198,2 +224,3 @@ var

}
// This triggers data on the H264Stream
self.trigger('data', event);

@@ -200,0 +227,0 @@ });

15

lib/m2ts/m2ts.js

@@ -40,2 +40,5 @@ /**

/**
* Split a stream of data into M2TS packets
**/
this.push = function(bytes) {

@@ -85,2 +88,5 @@ var

/**
* Passes identified M2TS packets to the TransportParseStream to be parsed
**/
this.flush = function() {

@@ -205,3 +211,3 @@ // If the buffer contains a whole packet when we are being flushed, emit it

/**
* Deliver a new MP2T packet to the stream.
* Deliver a new MP2T packet to the next stream in the pipeline.
*/

@@ -349,2 +355,5 @@ this.push = function(packet) {

},
/**
* Pass completely parsed PES packets to the next stream in the pipeline
**/
flushStream = function(stream, type, forceFlush) {

@@ -398,2 +407,6 @@ var

/**
* Identifies M2TS packet types and parses PES packets using metadata
* parsed from the PMT
**/
this.push = function(data) {

@@ -400,0 +413,0 @@ ({

375

lib/mp4/transmuxer.js

@@ -15,2 +15,4 @@ /**

var mp4 = require('./mp4-generator.js');
var frameUtils = require('./frame-utils');
var trackDecodeInfo = require('./track-decode-info');
var m2ts = require('../m2ts/m2ts.js');

@@ -47,28 +49,6 @@ var AdtsStream = require('../codecs/adts.js');

var
createDefaultSample,
isLikelyAacData,
collectDtsInfo,
clearDtsInfo,
calculateTrackBaseMediaDecodeTime,
arrayEquals,
sumFrameByteLengths;
/**
* Default sample object
* see ISO/IEC 14496-12:2012, section 8.6.4.3
*/
createDefaultSample = function() {
return {
size: 0,
flags: {
isLeading: 0,
dependsOn: 1,
isDependedOn: 0,
hasRedundancy: 0,
degradationPriority: 0,
isNonSyncSample: 1
}
};
};
isLikelyAacData = function(data) {

@@ -144,3 +124,3 @@ if ((data[0] === 'I'.charCodeAt(0)) &&

this.push = function(data) {
collectDtsInfo(track, data);
trackDecodeInfo.collectDtsInfo(track, data);

@@ -183,4 +163,4 @@ if (track) {

frames = this.trimAdtsFramesByEarliestDts_(adtsFrames);
track.baseMediaDecodeTime =
calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);
track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(
track, options.keepOriginalTimestamps);

@@ -207,3 +187,3 @@ this.prefixWithSilence_(track, frames);

clearDtsInfo(track);
trackDecodeInfo.clearDtsInfo(track);

@@ -355,4 +335,12 @@ this.trigger('data', {track: track, boxes: boxes});

/**
* Constructs a ISO BMFF segment given H264 nalUnits
* @param {Object} nalUnit A data event representing a nalUnit
* @param {String} nalUnit.nalUnitType
* @param {Object} nalUnit.config Properties for a mp4 track
* @param {Uint8Array} nalUnit.data The nalUnit bytes
* @see lib/codecs/h264.js
**/
this.push = function(nalUnit) {
collectDtsInfo(track, nalUnit);
trackDecodeInfo.collectDtsInfo(track, nalUnit);

@@ -379,2 +367,6 @@ // record the track config

/**
* Pass constructed ISO BMFF track and boxes on to the
* next stream in the pipeline
**/
this.flush = function() {

@@ -408,4 +400,4 @@ var

// (group-of-pictures)
frames = this.groupNalsIntoFrames_(nalUnits);
gops = this.groupFramesIntoGops_(frames);
frames = frameUtils.groupNalsIntoFrames(nalUnits);
gops = frameUtils.groupFramesIntoGops(frames);

@@ -445,3 +437,3 @@ // If the first frame of this fragment is not a keyframe we have

// If we didn't find a candidate gop fall back to keyframe-pulling
gops = this.extendFirstKeyFrame_(gops);
gops = frameUtils.extendFirstKeyFrame(gops);
}

@@ -482,3 +474,3 @@ }

// when recalculated before sending off to CoalesceStream
clearDtsInfo(track);
trackDecodeInfo.clearDtsInfo(track);

@@ -488,13 +480,13 @@ gops = alignedGops;

collectDtsInfo(track, gops);
trackDecodeInfo.collectDtsInfo(track, gops);
// First, we have to build the index from byte locations to
// samples (that is, frames) in the video data
track.samples = this.generateSampleTable_(gops);
track.samples = frameUtils.generateSampleTable(gops);
// Concatenate the video data and construct the mdat
mdat = mp4.mdat(this.concatenateNalData_(gops));
mdat = mp4.mdat(frameUtils.concatenateNalData(gops));
track.baseMediaDecodeTime =
calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);
track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(
track, options.keepOriginalTimestamps);

@@ -546,3 +538,3 @@ this.trigger('processedGopsInfo', gops.map(function(gop) {

this.resetStream_ = function() {
clearDtsInfo(track);
trackDecodeInfo.clearDtsInfo(track);

@@ -608,213 +600,2 @@ // reset config and pps because they may differ across segments

this.extendFirstKeyFrame_ = function(gops) {
var currentGop;
if (!gops[0][0].keyFrame && gops.length > 1) {
// Remove the first GOP
currentGop = gops.shift();
gops.byteLength -= currentGop.byteLength;
gops.nalCount -= currentGop.nalCount;
// Extend the first frame of what is now the
// first gop to cover the time period of the
// frames we just removed
gops[0][0].dts = currentGop.dts;
gops[0][0].pts = currentGop.pts;
gops[0][0].duration += currentGop.duration;
}
return gops;
};
// Convert an array of nal units into an array of frames with each frame being
// composed of the nal units that make up that frame
// Also keep track of cummulative data about the frame from the nal units such
// as the frame duration, starting pts, etc.
this.groupNalsIntoFrames_ = function(nalUnits) {
var
i,
currentNal,
currentFrame = [],
frames = [];
currentFrame.byteLength = 0;
for (i = 0; i < nalUnits.length; i++) {
currentNal = nalUnits[i];
// Split on 'aud'-type nal units
if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') {
// Since the very first nal unit is expected to be an AUD
// only push to the frames array when currentFrame is not empty
if (currentFrame.length) {
currentFrame.duration = currentNal.dts - currentFrame.dts;
frames.push(currentFrame);
}
currentFrame = [currentNal];
currentFrame.byteLength = currentNal.data.byteLength;
currentFrame.pts = currentNal.pts;
currentFrame.dts = currentNal.dts;
} else {
// Specifically flag key frames for ease of use later
if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') {
currentFrame.keyFrame = true;
}
currentFrame.duration = currentNal.dts - currentFrame.dts;
currentFrame.byteLength += currentNal.data.byteLength;
currentFrame.push(currentNal);
}
}
// For the last frame, use the duration of the previous frame if we
// have nothing better to go on
if (frames.length &&
(!currentFrame.duration ||
currentFrame.duration <= 0)) {
currentFrame.duration = frames[frames.length - 1].duration;
}
// Push the final frame
frames.push(currentFrame);
return frames;
};
// Convert an array of frames into an array of Gop with each Gop being composed
// of the frames that make up that Gop
// Also keep track of cummulative data about the Gop from the frames such as the
// Gop duration, starting pts, etc.
this.groupFramesIntoGops_ = function(frames) {
var
i,
currentFrame,
currentGop = [],
gops = [];
// We must pre-set some of the values on the Gop since we
// keep running totals of these values
currentGop.byteLength = 0;
currentGop.nalCount = 0;
currentGop.duration = 0;
currentGop.pts = frames[0].pts;
currentGop.dts = frames[0].dts;
// store some metadata about all the Gops
gops.byteLength = 0;
gops.nalCount = 0;
gops.duration = 0;
gops.pts = frames[0].pts;
gops.dts = frames[0].dts;
for (i = 0; i < frames.length; i++) {
currentFrame = frames[i];
if (currentFrame.keyFrame) {
// Since the very first frame is expected to be an keyframe
// only push to the gops array when currentGop is not empty
if (currentGop.length) {
gops.push(currentGop);
gops.byteLength += currentGop.byteLength;
gops.nalCount += currentGop.nalCount;
gops.duration += currentGop.duration;
}
currentGop = [currentFrame];
currentGop.nalCount = currentFrame.length;
currentGop.byteLength = currentFrame.byteLength;
currentGop.pts = currentFrame.pts;
currentGop.dts = currentFrame.dts;
currentGop.duration = currentFrame.duration;
} else {
currentGop.duration += currentFrame.duration;
currentGop.nalCount += currentFrame.length;
currentGop.byteLength += currentFrame.byteLength;
currentGop.push(currentFrame);
}
}
if (gops.length && currentGop.duration <= 0) {
currentGop.duration = gops[gops.length - 1].duration;
}
gops.byteLength += currentGop.byteLength;
gops.nalCount += currentGop.nalCount;
gops.duration += currentGop.duration;
// push the final Gop
gops.push(currentGop);
return gops;
};
// generate the track's sample table from an array of gops
this.generateSampleTable_ = function(gops, baseDataOffset) {
var
h, i,
sample,
currentGop,
currentFrame,
dataOffset = baseDataOffset || 0,
samples = [];
for (h = 0; h < gops.length; h++) {
currentGop = gops[h];
for (i = 0; i < currentGop.length; i++) {
currentFrame = currentGop[i];
sample = createDefaultSample();
sample.dataOffset = dataOffset;
sample.compositionTimeOffset = currentFrame.pts - currentFrame.dts;
sample.duration = currentFrame.duration;
sample.size = 4 * currentFrame.length; // Space for nal unit size
sample.size += currentFrame.byteLength;
if (currentFrame.keyFrame) {
sample.flags.dependsOn = 2;
sample.flags.isNonSyncSample = 0;
}
dataOffset += sample.size;
samples.push(sample);
}
}
return samples;
};
// generate the track's raw mdat data from an array of gops
this.concatenateNalData_ = function(gops) {
var
h, i, j,
currentGop,
currentFrame,
currentNal,
dataOffset = 0,
nalsByteLength = gops.byteLength,
numberOfNals = gops.nalCount,
totalByteLength = nalsByteLength + 4 * numberOfNals,
data = new Uint8Array(totalByteLength),
view = new DataView(data.buffer);
// For each Gop..
for (h = 0; h < gops.length; h++) {
currentGop = gops[h];
// For each Frame..
for (i = 0; i < currentGop.length; i++) {
currentFrame = currentGop[i];
// For each NAL..
for (j = 0; j < currentFrame.length; j++) {
currentNal = currentFrame[j];
view.setUint32(dataOffset, currentNal.data.byteLength);
dataOffset += 4;
data.set(currentNal.data, dataOffset);
dataOffset += currentNal.data.byteLength;
}
}
}
return data;
};
// trim gop list to the first gop found that has a matching pts with a gop in the list

@@ -948,96 +729,2 @@ // of gopsToAlignWith starting from the START of the list

/**
* Store information about the start and end of the track and the
* duration for each frame/sample we process in order to calculate
* the baseMediaDecodeTime
*/
collectDtsInfo = function(track, data) {
if (typeof data.pts === 'number') {
if (track.timelineStartInfo.pts === undefined) {
track.timelineStartInfo.pts = data.pts;
}
if (track.minSegmentPts === undefined) {
track.minSegmentPts = data.pts;
} else {
track.minSegmentPts = Math.min(track.minSegmentPts, data.pts);
}
if (track.maxSegmentPts === undefined) {
track.maxSegmentPts = data.pts;
} else {
track.maxSegmentPts = Math.max(track.maxSegmentPts, data.pts);
}
}
if (typeof data.dts === 'number') {
if (track.timelineStartInfo.dts === undefined) {
track.timelineStartInfo.dts = data.dts;
}
if (track.minSegmentDts === undefined) {
track.minSegmentDts = data.dts;
} else {
track.minSegmentDts = Math.min(track.minSegmentDts, data.dts);
}
if (track.maxSegmentDts === undefined) {
track.maxSegmentDts = data.dts;
} else {
track.maxSegmentDts = Math.max(track.maxSegmentDts, data.dts);
}
}
};
/**
* Clear values used to calculate the baseMediaDecodeTime between
* tracks
*/
clearDtsInfo = function(track) {
delete track.minSegmentDts;
delete track.maxSegmentDts;
delete track.minSegmentPts;
delete track.maxSegmentPts;
};
/**
* Calculate the track's baseMediaDecodeTime based on the earliest
* DTS the transmuxer has ever seen and the minimum DTS for the
* current track
* @param track {object} track metadata configuration
* @param keepOriginalTimestamps {boolean} If true, keep the timestamps
* in the source; false to adjust the first segment to start at 0.
*/
calculateTrackBaseMediaDecodeTime = function(track, keepOriginalTimestamps) {
var
baseMediaDecodeTime,
scale,
minSegmentDts = track.minSegmentDts;
// Optionally adjust the time so the first segment starts at zero.
if (!keepOriginalTimestamps) {
minSegmentDts -= track.timelineStartInfo.dts;
}
// track.timelineStartInfo.baseMediaDecodeTime is the location, in time, where
// we want the start of the first segment to be placed
baseMediaDecodeTime = track.timelineStartInfo.baseMediaDecodeTime;
// Add to that the distance this segment is from the very first
baseMediaDecodeTime += minSegmentDts;
// baseMediaDecodeTime must not become negative
baseMediaDecodeTime = Math.max(0, baseMediaDecodeTime);
if (track.type === 'audio') {
// Audio has a different clock equal to the sampling_rate so we need to
// scale the PTS values into the clock rate of the track
scale = track.samplerate / ONE_SECOND_IN_TS;
baseMediaDecodeTime *= scale;
baseMediaDecodeTime = Math.floor(baseMediaDecodeTime);
}
return baseMediaDecodeTime;
};
/**
* A Stream that can combine multiple streams (ie. audio & video)

@@ -1413,3 +1100,3 @@ * into a single output segment for MSE. Also supports audio-only

audioTrack.timelineStartInfo.pts = undefined;
clearDtsInfo(audioTrack);
trackDecodeInfo.clearDtsInfo(audioTrack);
audioTrack.timelineStartInfo.baseMediaDecodeTime = baseMediaDecodeTime;

@@ -1427,3 +1114,3 @@ if (pipeline.audioTimestampRolloverStream) {

videoTrack.timelineStartInfo.pts = undefined;
clearDtsInfo(videoTrack);
trackDecodeInfo.clearDtsInfo(videoTrack);
pipeline.captionStream.reset();

@@ -1430,0 +1117,0 @@ videoTrack.timelineStartInfo.baseMediaDecodeTime = baseMediaDecodeTime;

@@ -215,3 +215,3 @@ /**

// see http://ecmanaut.blogspot.com/2006/07/encoding-decoding-utf8-in-javascript.html
result.name = decodeURIComponent(global.escape(result.name));
result.name = decodeURIComponent(escape(result.name));

@@ -218,0 +218,0 @@ return result;

{
"name": "mux.js",
"version": "4.5.0",
"version": "4.5.1",
"description": "A collection of lightweight utilities for inspecting and manipulating video container formats.",

@@ -78,3 +78,3 @@ "repository": {

"karma-qunit": "^0.1.5",
"npm-run-all": "^1.4.0",
"npm-run-all": "^4.1.3",
"portscanner": "^1.0.0",

@@ -81,0 +81,0 @@ "qunitjs": "^1.0.0",

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc