New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@chordbook/tuner

Package Overview
Dependencies
Maintainers
1
Versions
3
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@chordbook/tuner - npm Package Compare versions

Comparing version 0.0.2 to 0.0.3

3

dist/index.d.ts

@@ -11,2 +11,4 @@ import { PitchDetector } from 'pitchy';

maxFrequency?: number;
updateInterval?: number;
sampleRate?: number;
onNote?: (note: Note) => void;

@@ -28,3 +30,2 @@ }

analyser: AnalyserNode;
processor: ScriptProcessorNode;
detector: PitchDetector<Float32Array>;

@@ -31,0 +32,0 @@ config: TunerConfig;

@@ -1,2 +0,2 @@

import{PitchDetector as q}from"pitchy";var p=69,C=["C","C\u266F","D","D\u266F","E","F","F\u266F","G","G\u266F","A","A\u266F","B"],g={a4:440,clarityThreshold:.95,minVolumeDecibels:-100,bufferSize:2048,smoothingTimeConstant:.9,minFrequency:73.42,maxFrequency:1084};function x(e={}){e={...g,...e};let c=q.forFloat32Array(e.bufferSize);c.minVolumeDecibels=e.minVolumeDecibels;let t=new AudioContext,i=t.createScriptProcessor(e.bufferSize,1,1);i.addEventListener("audioprocess",T);let h=new AnalyserNode(t,{fftSize:e.bufferSize,smoothingTimeConstant:e.smoothingTimeConstant}),m=t.createBiquadFilter();m.type="highpass",m.frequency.value=e.minFrequency;let l=t.createBiquadFilter();l.type="lowpass",l.frequency.value=e.maxFrequency;let a=[l,m,h,i,t.destination];for(var s=0;s<a.length-1;s++)a[s].connect(a[s+1]);let o,b;async function y(){o=await navigator.mediaDevices.getUserMedia({audio:!0}),b=t.createMediaStreamSource(o),b.connect(a[0])}function T(n){let d=n.inputBuffer.getChannelData(0),[r,u]=c.findPitch(d,t.sampleRate);u>e.clarityThreshold&&e.onNote?.(f(r,u))}function f(n,d){let r=Math.round(12*(Math.log(n/e.a4)/Math.log(2)))+p,u=e.a4*Math.pow(2,(r-p)/12),S=Math.floor(1200*Math.log(n/u)/Math.log(2)),F=C[r%12],M=Math.floor(r/12)-1;return{frequency:n,name:F,value:r,cents:S,octave:M,clarity:d}}async function v(){o.getTracks().forEach(n=>n.stop()),o.removeTrack(o.getAudioTracks()[0])}return{start:y,stop:v,context:t,analyser:h,processor:i,detector:c,config:e,getNote:f}}export{g as TunerDefaults,x as createTuner};
import{PitchDetector as M}from"pitchy";var y=69,D=["C","C\u266F","D","D\u266F","E","F","F\u266F","G","G\u266F","A","A\u266F","B"],S={a4:440,clarityThreshold:.9,minVolumeDecibels:-1e3,bufferSize:8192,smoothingTimeConstant:.8,minFrequency:27.5,maxFrequency:4186.01,updateInterval:50,sampleRate:44100};function x(e={}){e={...S,...e};let n=new AudioContext({sampleRate:e.sampleRate}),f=new BiquadFilterNode(n,{type:"highpass",frequency:e.minFrequency}),l=new BiquadFilterNode(n,{type:"lowpass",frequency:e.maxFrequency}),s=new AnalyserNode(n,{fftSize:e.bufferSize,smoothingTimeConstant:e.smoothingTimeConstant});l.connect(f).connect(s);let u=M.forFloat32Array(s.fftSize);u.minVolumeDecibels=e.minVolumeDecibels;let d=new Float32Array(u.inputLength),a,h,p=0;async function F(){a=await navigator.mediaDevices.getUserMedia({audio:!0}),h=n.createMediaStreamSource(a),h.connect(l),p=setInterval(T,e.updateInterval)}function T(){let{clarityThreshold:t,minFrequency:i,maxFrequency:r,onNote:m}=e;s.getFloatTimeDomainData(d);let[o,c]=u.findPitch(d,n.sampleRate);c>t&&o>i&&o<r&&m?.(b(o,c))}function b(t,i){let r=Math.round(12*(Math.log(t/e.a4)/Math.log(2)))+y,m=e.a4*Math.pow(2,(r-y)/12),o=Math.floor(1200*Math.log(t/m)/Math.log(2)),c=D[r%12],q=Math.floor(r/12)-1;return{frequency:t,name:c,value:r,cents:o,octave:q,clarity:i}}async function v(){clearInterval(p),a.getTracks().forEach(t=>t.stop()),a.removeTrack(a.getAudioTracks()[0])}return{start:F,stop:v,context:n,analyser:s,detector:u,config:e,getNote:b}}export{S as TunerDefaults,x as createTuner};
//# sourceMappingURL=index.js.map
{
"name": "@chordbook/tuner",
"version": "0.0.2",
"version": "0.0.3",
"license": "GPL-3.0",

@@ -5,0 +5,0 @@ "type": "module",

@@ -33,2 +33,5 @@ # @chordbook/tuner

// The number of milliseconds between each pitch detection.
updateInterval: 50,
// The frequency of middle A. Defaults to 440Hz.

@@ -38,18 +41,22 @@ a4: 440,

// The minimum clarity threshold. Anything below this will be ignored
clarityThreshold: 0.95,
clarityThreshold: 0.9,
// The minimum volume threshold. -100 means 1/100th the volume of the loudest sound.
minVolumeDecibels: -100,
// The minimum volume threshold. -1000 means 1/1000th the volume of the loudest sound.
minVolumeDecibels: -1000,
// The minimum and maximum frequencies to detect. To reduce noise, everything else is
// filtered out using a lowpass and highpass filter.
minFrequency: 73.42, // D2, drop D
maxFrequency: 1084.0, // C6, highest note on the guitar in front of me
minFrequency: 27.5, // A0, Lowest note on a piano
maxFrequency: 4186.01, // C8, Highest note on a piano
// The sample rate to use for the audio context.
// https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/sampleRate
sampleRate: 44100,
// The size of buffer to use for frequency analysis, which maps to the `fftSize`:
// https://developer.mozilla.org/en-US/docs/Web/API/AnalyserNode/fftSize
bufferSize: 2048,
bufferSize: 8192,
// https://developer.mozilla.org/en-US/docs/Web/API/AnalyserNode/smoothingTimeConstant
smoothingTimeConstant: 0.9,
smoothingTimeConstant: 0.8
})

@@ -56,0 +63,0 @@

@@ -14,2 +14,4 @@ import { PitchDetector } from 'pitchy'

maxFrequency?: number
updateInterval?: number
sampleRate?: number
onNote?: (note: Note) => void

@@ -20,8 +22,10 @@ }

a4: 440,
clarityThreshold: 0.95,
minVolumeDecibels: -100,
bufferSize: 2048,
smoothingTimeConstant: 0.9,
minFrequency: 73.42, // D2
maxFrequency: 1084.0, // C6, highest note on the guitar in front of me
clarityThreshold: 0.9,
minVolumeDecibels: -1000,
bufferSize: 8192,
smoothingTimeConstant: 0.8,
minFrequency: 27.5, // A0, Lowest note on a piano
maxFrequency: 4186.01, // C8, Highest note on a piano
updateInterval: 50,
sampleRate: 44100, // Seems to work better than 48000 for some reason
}

@@ -41,37 +45,15 @@

const detector = PitchDetector.forFloat32Array(config.bufferSize!)
const context = new AudioContext({ sampleRate: config.sampleRate })
const highpass = new BiquadFilterNode(context, { type: "highpass", frequency: config.minFrequency })
const lowpass = new BiquadFilterNode(context, { type: "lowpass", frequency: config.maxFrequency })
const analyser = new AnalyserNode(context, { fftSize: config.bufferSize, smoothingTimeConstant: config.smoothingTimeConstant })
lowpass.connect(highpass).connect(analyser)
const detector = PitchDetector.forFloat32Array(analyser.fftSize)
detector.minVolumeDecibels = config.minVolumeDecibels!;
const inputBuffer = new Float32Array(detector.inputLength)
const context = new AudioContext()
const processor = context.createScriptProcessor(config.bufferSize, 1, 1)
processor.addEventListener('audioprocess', process)
const analyser = new AnalyserNode(context, {
fftSize: config.bufferSize,
smoothingTimeConstant: config.smoothingTimeConstant
})
const highpass = context.createBiquadFilter();
highpass.type = "highpass";
highpass.frequency.value = config.minFrequency!;
const lowpass = context.createBiquadFilter();
lowpass.type = "lowpass";
lowpass.frequency.value = config.maxFrequency!;
const pipeline: AudioNode[] = [
lowpass,
highpass,
analyser,
processor,
context.destination
]
for (var i = 0; i < pipeline.length - 1; i++) {
pipeline[i].connect(pipeline[i + 1])
}
let stream: MediaStream
let source: MediaStreamAudioSourceNode
let interval = 0;

@@ -81,11 +63,14 @@ async function start () {

source = context.createMediaStreamSource(stream)
source.connect(pipeline[0])
source.connect(lowpass)
interval = setInterval(process, config.updateInterval!)
}
function process(event: AudioProcessingEvent) {
const data = event.inputBuffer.getChannelData(0)
const [frequency, clarity] = detector.findPitch(data, context.sampleRate)
function process() {
const { clarityThreshold, minFrequency, maxFrequency, onNote } = config
analyser.getFloatTimeDomainData(inputBuffer);
const [frequency, clarity] = detector.findPitch(inputBuffer, context.sampleRate)
if (clarity > config.clarityThreshold!) {
config.onNote?.(getNote(frequency, clarity))
if (clarity > clarityThreshold! && frequency > minFrequency! && frequency < maxFrequency!) {
onNote?.(getNote(frequency, clarity))
}

@@ -104,2 +89,3 @@ }

async function stop() {
clearInterval(interval)
stream.getTracks().forEach(track => track.stop())

@@ -109,3 +95,3 @@ stream.removeTrack(stream.getAudioTracks()[0])

return { start, stop, context, analyser, processor, detector, config, getNote }
return { start, stop, context, analyser, detector, config, getNote }
}

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc