Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

hark

Package Overview
Dependencies
Maintainers
1
Versions
17
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

hark - npm Package Compare versions

Comparing version 0.0.1 to 0.1.0

example/ihaveadream.mp3

202

example/demo.bundle.js
;(function(e,t,n){function i(n,s){if(!t[n]){if(!e[n]){var o=typeof require=="function"&&require;if(!s&&o)return o(n,!0);if(r)return r(n,!0);throw new Error("Cannot find module '"+n+"'")}var u=t[n]={exports:{}};e[n][0].call(u.exports,function(t){var r=e[n][1][t];return i(r?r:t)},u,u.exports)}return t[n].exports}var r=typeof require=="function"&&require;for(var s=0;s<n.length;s++)i(n[s]);return i})({1:[function(require,module,exports){
var hark = require('../hark.js')
var log = require('bows')('Demo');
localStorage.debug = true;
var getUserMedia = require('getusermedia')
var hark = require('../hark.js');
var bows = require('bows');
getUserMedia(function(err, stream) {
if (err) throw err
(function() {
//Audio Tag Demo
var stream = document.querySelector('audio');
var speechEvents = hark(stream);
var notification = document.querySelector('#mlkSpeaking');
var log = bows('MLK Demo');
speechEvents.on('speaking', function() {
document.write('Speaking<br>');
log('speaking');
notification.style.display = 'block';
});
speechEvents.on('volume_change', function(volume, threshold) {
//log('volume change', volume, threshold);
});
speechEvents.on('stopped_speaking', function() {
document.write('Not Speaking<br>');
log('stopped_speaking');
notification.style.display = 'none';
});
});
})();
},{"../hark.js":2,"bows":3,"getusermedia":4}],4:[function(require,module,exports){
(function() {
//Microphone demo
var getUserMedia = require('getusermedia');
var attachmediastream = require('attachmediastream');
var notification = document.querySelector('#userSpeaking');
var log = bows('Microphone Demo');
getUserMedia(function(err, stream) {
if (err) throw err
attachmediastream(document.querySelector('video'), stream);
var speechEvents = hark(stream);
speechEvents.on('speaking', function() {
notification.style.display = 'block';
log('speaking');
});
speechEvents.on('volume_change', function(volume, threshold) {
log(volume, threshold)
});
speechEvents.on('stopped_speaking', function() {
notification.style.display = 'none';
log('stopped_speaking');
});
});
})();
},{"../hark.js":2,"attachmediastream":3,"bows":4,"getusermedia":5}],3:[function(require,module,exports){
module.exports = function (element, stream, play) {
var autoPlay = (play === false) ? false : true;
if (autoPlay) element.autoplay = true;
// handle mozilla case
if (window.mozGetUserMedia) {
element.mozSrcObject = stream;
if (autoPlay) element.play();
} else {
if (typeof element.srcObject !== 'undefined') {
element.srcObject = stream;
} else if (typeof element.mozSrcObject !== 'undefined') {
element.mozSrcObject = stream;
} else if (typeof element.src !== 'undefined') {
element.src = URL.createObjectURL(stream);
} else {
return false;
}
}
return true;
};
},{}],5:[function(require,module,exports){
// getUserMedia helper by @HenrikJoreteg

@@ -55,49 +116,92 @@ var func = (navigator.getUserMedia ||

module.exports = function(stream) {
var speakingThreshold = -45;
var smoothing = 0.5;
var pollPeriod = 100;
function getMaxVolume (analyser, fftBins) {
var maxVolume = -Infinity;
analyser.getFloatFrequencyData(fftBins);
for(var i=0, ii=fftBins.length; i < ii; i++) {
if (fftBins[i] > maxVolume && fftBins[i] < 0) {
maxVolume = fftBins[i];
}
};
return maxVolume;
}
module.exports = function(stream, options) {
var harker = new WildEmitter();
//Config
var options = options || {},
smoothing = (options.smoothing || 0.5),
interval = (options.interval || 100),
threshold = options.threshold,
play = options.play;
//Setup Audio Context
var audioContext = new webkitAudioContext();
var sourceNode = audioContext.createMediaStreamSource(stream);
var analyser = audioContext.createAnalyser();
var fftBins = new Float32Array(analyser.fftSize);
var sourceNode, fftBins, analyser;
analyser = audioContext.createAnalyser();
analyser.fftSize = 512;
analyser.smoothingTimeConstant = smoothing;
fftBins = new Float32Array(analyser.fftSize);
if (stream.jquery) stream = stream[0];
if (stream instanceof HTMLAudioElement) {
//Audio Tag
sourceNode = audioContext.createMediaElementSource(stream);
if (typeof play === 'undefined') play = true;
threshold = threshold || -65;
} else {
//WebRTC Stream
sourceNode = audioContext.createMediaStreamSource(stream);
threshold = threshold || -45;
}
sourceNode.connect(analyser);
if (play) analyser.connect(audioContext.destination);
var emitter = new WildEmitter();
var speaking = false;
harker.speaking = false;
harker.setThreshold = function(t) {
threshold = t;
};
harker.setInterval = function(i) {
interval = i;
};
// Poll the analyser node to determine if speaking
// and emit events if changed
setInterval(function() {
var currentVolume = -Infinity;
analyser.getFloatFrequencyData(fftBins)
var looper = function() {
setTimeout(function() {
var currentVolume = getMaxVolume(analyser, fftBins);
for(var i=0, ii=fftBins.length; i < ii; i++) {
if (fftBins[i] > currentVolume && fftBins[i] < 0) {
currentVolume = fftBins[i];
}
};
harker.emit('volume_change', currentVolume, threshold);
if (currentVolume > speakingThreshold) {
if (!speaking) {
speaking = true;
emitter.emit('speaking');
if (currentVolume > threshold) {
if (!harker.speaking) {
harker.speaking = true;
harker.emit('speaking');
}
} else {
if (harker.speaking) {
harker.speaking = false;
harker.emit('stopped_speaking');
}
}
} else {
if (speaking) {
speaking = false;
emitter.emit('stopped_speaking');
}
}
}, pollPeriod);
return emitter;
looper();
}, interval);
};
looper();
return harker;
}
},{"wildemitter":5}],5:[function(require,module,exports){
},{"wildemitter":6}],6:[function(require,module,exports){
/*
WildEmitter.js is a slim little event emitter by @henrikjoreteg largely based
WildEmitter.js is a slim little event emitter by @henrikjoreteg largely based
on @visionmedia's Emitter from UI Kit.

@@ -110,10 +214,10 @@

emitter.on('*', function (eventName, other, event, payloads) {
});
emitter.on('somenamespace*', function (eventName, payloads) {
});
Please note that callbacks triggered by wildcard registered events also get
Please note that callbacks triggered by wildcard registered events also get
the event name as the first argument.

@@ -130,3 +234,3 @@ */

var hasGroup = (arguments.length === 3),
group = hasGroup ? arguments[1] : undefined,
group = hasGroup ? arguments[1] : undefined,
func = hasGroup ? arguments[2] : arguments[1];

@@ -143,3 +247,3 @@ func._groupName = group;

hasGroup = (arguments.length === 3),
group = hasGroup ? arguments[1] : undefined,
group = hasGroup ? arguments[1] : undefined,
func = hasGroup ? arguments[2] : arguments[1];

@@ -177,3 +281,3 @@ function on() {

i;
if (!callbacks) return this;

@@ -241,3 +345,3 @@

},{}],3:[function(require,module,exports){
},{}],4:[function(require,module,exports){
(function(window) {

@@ -278,3 +382,3 @@ var logger = require('andlog'),

},{"andlog":6}],6:[function(require,module,exports){
},{"andlog":7}],7:[function(require,module,exports){
// follow @HenrikJoreteg and @andyet if you like this ;)

@@ -281,0 +385,0 @@ (function () {

@@ -1,20 +0,56 @@

var hark = require('../hark.js')
var log = require('bows')('Demo');
localStorage.debug = true;
var getUserMedia = require('getusermedia')
var hark = require('../hark.js');
var bows = require('bows');
getUserMedia(function(err, stream) {
if (err) throw err
(function() {
//Audio Tag Demo
var stream = document.querySelector('audio');
var speechEvents = hark(stream);
var notification = document.querySelector('#mlkSpeaking');
var log = bows('MLK Demo');
speechEvents.on('speaking', function() {
document.write('Speaking<br>');
log('speaking');
notification.style.display = 'block';
});
speechEvents.on('volume_change', function(volume, threshold) {
//log('volume change', volume, threshold);
});
speechEvents.on('stopped_speaking', function() {
document.write('Not Speaking<br>');
log('stopped_speaking');
notification.style.display = 'none';
});
});
})();
(function() {
//Microphone demo
var getUserMedia = require('getusermedia');
var attachmediastream = require('attachmediastream');
var notification = document.querySelector('#userSpeaking');
var log = bows('Microphone Demo');
getUserMedia(function(err, stream) {
if (err) throw err
attachmediastream(document.querySelector('video'), stream);
var speechEvents = hark(stream);
speechEvents.on('speaking', function() {
notification.style.display = 'block';
log('speaking');
});
speechEvents.on('volume_change', function(volume, threshold) {
log(volume, threshold)
});
speechEvents.on('stopped_speaking', function() {
notification.style.display = 'none';
log('stopped_speaking');
});
});
})();

@@ -5,44 +5,87 @@ (function(e){if("function"==typeof bootstrap)bootstrap("hark",e);else if("object"==typeof exports)module.exports=e();else if("function"==typeof define&&define.amd)define(e);else if("undefined"!=typeof ses){if(!ses.ok())return;ses.makeHark=e}else"undefined"!=typeof window?window.hark=e():global.hark=e()})(function(){var define,ses,bootstrap,module,exports;

module.exports = function(stream) {
var speakingThreshold = -45;
var smoothing = 0.5;
var pollPeriod = 100;
function getMaxVolume (analyser, fftBins) {
var maxVolume = -Infinity;
analyser.getFloatFrequencyData(fftBins);
for(var i=0, ii=fftBins.length; i < ii; i++) {
if (fftBins[i] > maxVolume && fftBins[i] < 0) {
maxVolume = fftBins[i];
}
};
return maxVolume;
}
module.exports = function(stream, options) {
var harker = new WildEmitter();
//Config
var options = options || {},
smoothing = (options.smoothing || 0.5),
interval = (options.interval || 100),
threshold = options.threshold,
play = options.play;
//Setup Audio Context
var audioContext = new webkitAudioContext();
var sourceNode = audioContext.createMediaStreamSource(stream);
var analyser = audioContext.createAnalyser();
var fftBins = new Float32Array(analyser.fftSize);
var sourceNode, fftBins, analyser;
analyser = audioContext.createAnalyser();
analyser.fftSize = 512;
analyser.smoothingTimeConstant = smoothing;
fftBins = new Float32Array(analyser.fftSize);
if (stream.jquery) stream = stream[0];
if (stream instanceof HTMLAudioElement) {
//Audio Tag
sourceNode = audioContext.createMediaElementSource(stream);
if (typeof play === 'undefined') play = true;
threshold = threshold || -65;
} else {
//WebRTC Stream
sourceNode = audioContext.createMediaStreamSource(stream);
threshold = threshold || -45;
}
sourceNode.connect(analyser);
if (play) analyser.connect(audioContext.destination);
var emitter = new WildEmitter();
var speaking = false;
harker.speaking = false;
harker.setThreshold = function(t) {
threshold = t;
};
harker.setInterval = function(i) {
interval = i;
};
// Poll the analyser node to determine if speaking
// and emit events if changed
setInterval(function() {
var currentVolume = -Infinity;
analyser.getFloatFrequencyData(fftBins)
var looper = function() {
setTimeout(function() {
var currentVolume = getMaxVolume(analyser, fftBins);
for(var i=0, ii=fftBins.length; i < ii; i++) {
if (fftBins[i] > currentVolume && fftBins[i] < 0) {
currentVolume = fftBins[i];
}
};
harker.emit('volume_change', currentVolume, threshold);
if (currentVolume > speakingThreshold) {
if (!speaking) {
speaking = true;
emitter.emit('speaking');
if (currentVolume > threshold) {
if (!harker.speaking) {
harker.speaking = true;
harker.emit('speaking');
}
} else {
if (harker.speaking) {
harker.speaking = false;
harker.emit('stopped_speaking');
}
}
} else {
if (speaking) {
speaking = false;
emitter.emit('stopped_speaking');
}
}
}, pollPeriod);
return emitter;
looper();
}, interval);
};
looper();
return harker;
}

@@ -52,3 +95,3 @@

/*
WildEmitter.js is a slim little event emitter by @henrikjoreteg largely based
WildEmitter.js is a slim little event emitter by @henrikjoreteg largely based
on @visionmedia's Emitter from UI Kit.

@@ -61,10 +104,10 @@

emitter.on('*', function (eventName, other, event, payloads) {
});
emitter.on('somenamespace*', function (eventName, payloads) {
});
Please note that callbacks triggered by wildcard registered events also get
Please note that callbacks triggered by wildcard registered events also get
the event name as the first argument.

@@ -81,3 +124,3 @@ */

var hasGroup = (arguments.length === 3),
group = hasGroup ? arguments[1] : undefined,
group = hasGroup ? arguments[1] : undefined,
func = hasGroup ? arguments[2] : arguments[1];

@@ -94,3 +137,3 @@ func._groupName = group;

hasGroup = (arguments.length === 3),
group = hasGroup ? arguments[1] : undefined,
group = hasGroup ? arguments[1] : undefined,
func = hasGroup ? arguments[2] : arguments[1];

@@ -128,3 +171,3 @@ function on() {

i;
if (!callbacks) return this;

@@ -131,0 +174,0 @@

var WildEmitter = require('wildemitter');
module.exports = function(stream) {
var speakingThreshold = -45;
var smoothing = 0.5;
var pollPeriod = 100;
function getMaxVolume (analyser, fftBins) {
var maxVolume = -Infinity;
analyser.getFloatFrequencyData(fftBins);
for(var i=0, ii=fftBins.length; i < ii; i++) {
if (fftBins[i] > maxVolume && fftBins[i] < 0) {
maxVolume = fftBins[i];
}
};
return maxVolume;
}
module.exports = function(stream, options) {
var harker = new WildEmitter();
//Config
var options = options || {},
smoothing = (options.smoothing || 0.5),
interval = (options.interval || 100),
threshold = options.threshold,
play = options.play;
//Setup Audio Context
var audioContext = new webkitAudioContext();
var sourceNode = audioContext.createMediaStreamSource(stream);
var analyser = audioContext.createAnalyser();
var fftBins = new Float32Array(analyser.fftSize);
var sourceNode, fftBins, analyser;
analyser = audioContext.createAnalyser();
analyser.fftSize = 512;
analyser.smoothingTimeConstant = smoothing;
fftBins = new Float32Array(analyser.fftSize);
if (stream.jquery) stream = stream[0];
if (stream instanceof HTMLAudioElement) {
//Audio Tag
sourceNode = audioContext.createMediaElementSource(stream);
if (typeof play === 'undefined') play = true;
threshold = threshold || -65;
} else {
//WebRTC Stream
sourceNode = audioContext.createMediaStreamSource(stream);
threshold = threshold || -45;
}
sourceNode.connect(analyser);
if (play) analyser.connect(audioContext.destination);
var emitter = new WildEmitter();
var speaking = false;
harker.speaking = false;
harker.setThreshold = function(t) {
threshold = t;
};
harker.setInterval = function(i) {
interval = i;
};
// Poll the analyser node to determine if speaking
// and emit events if changed
setInterval(function() {
var currentVolume = -Infinity;
analyser.getFloatFrequencyData(fftBins)
var looper = function() {
setTimeout(function() {
var currentVolume = getMaxVolume(analyser, fftBins);
for(var i=0, ii=fftBins.length; i < ii; i++) {
if (fftBins[i] > currentVolume && fftBins[i] < 0) {
currentVolume = fftBins[i];
}
};
harker.emit('volume_change', currentVolume, threshold);
if (currentVolume > speakingThreshold) {
if (!speaking) {
speaking = true;
emitter.emit('speaking');
if (currentVolume > threshold) {
if (!harker.speaking) {
harker.speaking = true;
harker.emit('speaking');
}
} else {
if (harker.speaking) {
harker.speaking = false;
harker.emit('stopped_speaking');
}
}
} else {
if (speaking) {
speaking = false;
emitter.emit('stopped_speaking');
}
}
}, pollPeriod);
return emitter;
looper();
}, interval);
};
looper();
return harker;
}
{
"name": "hark",
"version": "0.0.1",
"version": "0.1.0",
"description": "Converts an audio stream to speech events in the browser",
"main": "hark.js",
"scripts": { },
"scripts": {},
"repository": "https://github.com/latentflip/hark.git",

@@ -13,3 +13,4 @@ "author": "Philip Roberts phil@latentflip.com",

"browserify": "~2.22.0",
"bows": "~0.1.3"
"bows": "~0.1.3",
"attachmediastream": "0.0.1"
},

@@ -16,0 +17,0 @@ "dependencies": {

@@ -7,3 +7,3 @@ # Hark

## Usage:
## Example:

@@ -22,3 +22,4 @@ `npm install hark`

var speechEvents = hark(stream);
var options = {};
var speechEvents = hark(stream, options);

@@ -35,9 +36,46 @@ speechEvents.on('speaking', function() {

## How does hark work?
Hark uses the webaudio API to FFT (get the power of) the audio in the audio stream. If the power is above a threshold, it's determined to be speech.
## Usage
```javascript
var speech = hark(stream, options);
speech.on('speaking', function() {
console.log('Speaking!');
});
```
* Pass hark either a webrtc stream which has audio enabled, or an audio element, and an optional options hash (see below for options).
* hark returns an event emitter with the following events:
* `speaking` emitted when the stream appears to be speaking
* `stopped_speaking` emitted when the audio doesn't seem to be speaking
* `current_volume` emitted on every poll event by the event emitter with the current volume (in decibels) and the current threshold for speech
* The hark object also has the following methods to update the config of hark. Both of these options can be passed in on instantiation, but you may wish to alter them either for debug or fine tuning as your app runs.
* `setInterval(interval_in_ms)` change
* `setThreshold(threshold_in_db)` change the minimum volume at which the audio will emit a `speaking` event
## Options
* `interval` (optional, default 100ms) how frequently the analyser polls the audio stream to check if speaking has started or stopped. This will also be the frequency of the `volume_change` events.
* `threshold` (optional, default -65db for audio tags, -45db for rtc streams) the volume at which `speaking`/`stopped\_speaking` events will be fired
* `play` (optional, default true for audio tags, false for webrtc streams) whether the audio stream should also be piped to the speakers, or just swallowed by the analyser. Typically for audio tags you would want to hear them, but for microphone based webrtc streams you may not to avoid feedback.
## Understanding dB/volume threshold
Fine tuning the volume threshold is the main configuration setting for how this module will behave. The levels of -65db and -45db for audio tags and rtc streams respectively have been chosen based on some basic experimentation on mysetup, but you may wish to change them (and should if it improves your app).
**What is dB?** Decibels are how sound is measured. The loudest sounds on your system will be at 0dB, and silence in webaudio is -100dB. Speech seems to be between roughly -65dB and -45dB depending on the volume and type of source. If speaking events are being fired too frequently, you would make this number higher (i.e. towards 0). If they are not firing frequently enough (you are speaking loudly but no events are firing), make the number closer to -100dB).
## Demo:
Clone and open example/index.html
Clone and open example/index.html or [view it online](http://latentflip.com/hark/demo)
## Requirements:
Chrome with webrtc audio input flag enabled
Chrome 27+ currently

@@ -44,0 +82,0 @@ ## License

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc