Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@magenta/music

Package Overview
Dependencies
Maintainers
5
Versions
69
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@magenta/music - npm Package Compare versions

Comparing version 1.1.8 to 1.1.9

.gitignore

6

es5/core/chords.d.ts

@@ -7,3 +7,3 @@ import * as tf from '@tensorflow/tfjs-core';

Diminished = 3,
Other = 4
Other = 4,
}

@@ -30,3 +30,3 @@ export declare class ChordSymbolException extends Error {

depth: number;
private index;
private index(chord);
encode(chord: string): tf.Tensor<tf.Rank.R1>;

@@ -36,3 +36,3 @@ }

depth: number;
private index;
private index(chord);
encode(chord: string): tf.Tensor<tf.Rank.R1>;

@@ -39,0 +39,0 @@ }

@@ -37,6 +37,6 @@ import * as tf from '@tensorflow/tfjs-core';

readonly numSegments: number;
abstract readonly depth: number;
abstract readonly endTensor: tf.Tensor1D;
abstract readonly NUM_SPLITS: number;
abstract readonly SEGMENTED_BY_TRACK: boolean;
readonly abstract depth: number;
readonly abstract endTensor: tf.Tensor1D;
readonly abstract NUM_SPLITS: number;
readonly abstract SEGMENTED_BY_TRACK: boolean;
abstract toTensor(noteSequence: INoteSequence): tf.Tensor2D;

@@ -127,6 +127,6 @@ abstract toNoteSequence(tensor: tf.Tensor2D, stepsPerQuarter: number): Promise<INoteSequence>;

constructor(args: MultitrackConverterArgs);
private trackToTensor;
private trackToTensor(track?);
toTensor(noteSequence: INoteSequence): tf.Tensor<tf.Rank.R2>;
private tokensToTrack;
private tokensToTrack(tokens);
toNoteSequence(oh: tf.Tensor2D, stepsPerQuarter?: number): Promise<NoteSequence>;
}

@@ -27,4 +27,4 @@ "use strict";

while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
if (f = 1, y && (t = y[op[0] & 2 ? "return" : op[0] ? "throw" : "next"]) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [0, t.value];
switch (op[0]) {

@@ -362,4 +362,4 @@ case 0: case 1: t = op; break;

return __awaiter(this, void 0, void 0, function () {
var _a, _b, ohs, ns, bassNs, drumsNs;
var _this = this;
var ohs, ns, bassNs, drumsNs, _a, _b;
return __generator(this, function (_c) {

@@ -530,4 +530,4 @@ switch (_c.label) {

return __awaiter(this, void 0, void 0, function () {
var _this = this;
var noteSequence, tensors, tracks;
var _this = this;
return __generator(this, function (_a) {

@@ -557,5 +557,5 @@ switch (_a.label) {

tracks.forEach(function (track, instrument) {
var _a;
track.setNumSteps(_this.totalSteps);
(_a = noteSequence.notes).push.apply(_a, track.toNoteSequence(instrument).notes);
var _a;
});

@@ -562,0 +562,0 @@ return [2, noteSequence];

@@ -15,3 +15,3 @@ import { INoteSequence, NoteSequence } from '../protobuf/index';

setTempo(qpm: number): void;
private makeClickSequence;
private makeClickSequence(seq);
start(seq: INoteSequence, qpm?: number): Promise<void>;

@@ -26,3 +26,3 @@ stop(): void;

protected playNote(time: number, note: NoteSequence.INote): void;
private getSynth;
private getSynth(instrument, program?);
}

@@ -29,0 +29,0 @@ export declare class SoundFontPlayer extends BasePlayer {

@@ -27,4 +27,4 @@ "use strict";

while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
if (f = 1, y && (t = y[op[0] & 2 ? "return" : op[0] ? "throw" : "next"]) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [0, t.value];
switch (op[0]) {

@@ -31,0 +31,0 @@ case 0: case 1: t = op; break;

@@ -28,5 +28,5 @@ export interface SampleInfo {

initialize(): Promise<void>;
private sampleInfoToName;
private sampleNameToURL;
private nearestVelocity;
private sampleInfoToName(sampleInfo);
private sampleNameToURL(name);
private nearestVelocity(velocity);
loadSamples(samples: SampleInfo[]): Promise<void>;

@@ -33,0 +33,0 @@ playNote(pitch: number, velocity: number, startTime: number, duration: number, output: any): void;

@@ -17,4 +17,4 @@ "use strict";

while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
if (f = 1, y && (t = y[op[0] & 2 ? "return" : op[0] ? "throw" : "next"]) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [0, t.value];
switch (op[0]) {

@@ -103,4 +103,4 @@ case 0: case 1: t = op; break;

return __awaiter(this, void 0, void 0, function () {
var _this = this;
var nearestSampleNames, uniqueSampleNames, sampleNamesAndURLs;
var _this = this;
return __generator(this, function (_a) {

@@ -204,4 +204,4 @@ switch (_a.label) {

return __awaiter(this, void 0, void 0, function () {
var _this = this;
var instrumentSamples;
var _this = this;
return __generator(this, function (_a) {

@@ -208,0 +208,0 @@ switch (_a.label) {

@@ -33,3 +33,2 @@ "use strict";

AttentionWrapper.prototype.call = function (input, c, h, state) {
var _a;
var nextAttnInput = tf.concat([input, state.attention.as2D(1, -1)], 1);

@@ -60,2 +59,3 @@ var nextRnnInput = tf.add(tf.matMul(nextAttnInput, this.attnInputMatrix), this.attnInputBias.as2D(1, -1));

return { output: output, c: c, h: h, attentionState: { attention: attention, attentionState: attentionState } };
var _a;
};

@@ -62,0 +62,0 @@ return AttentionWrapper;

@@ -29,7 +29,12 @@ import * as aux_inputs from '../core/aux_inputs';

isInitialized(): boolean;
private instantiateFromSpec;
private instantiateFromSpec();
initialize(): Promise<void>;
dispose(): void;
continueSequence(sequence: INoteSequence, steps: number, temperature?: number, chordProgression?: string[]): Promise<INoteSequence>;
private sampleRnn;
continueSequenceAndReturnProbabilities(sequence: INoteSequence, steps: number, temperature?: number, chordProgression?: string[]): Promise<{
sequence: Promise<INoteSequence>;
probs: Float32Array[];
}>;
private continueSequenceImpl(sequence, steps, temperature?, chordProgression?, returnProbs?);
private sampleRnn(inputs, steps, temperature, controls?, auxInputs?, returnProbs?);
}

@@ -17,4 +17,4 @@ "use strict";

while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
if (f = 1, y && (t = y[op[0] & 2 ? "return" : op[0] ? "throw" : "next"]) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [0, t.value];
switch (op[0]) {

@@ -70,4 +70,4 @@ case 0: case 1: t = op; break;

return __awaiter(this, void 0, void 0, function () {
var _this = this;
var vars, hasAttention, rnnPrefix, l, _loop_1, this_1, state_1;
var _this = this;
return __generator(this, function (_a) {

@@ -147,4 +147,24 @@ switch (_a.label) {

return __awaiter(this, void 0, void 0, function () {
var oh, result, _a, _b;
var result;
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4, this.continueSequenceImpl(sequence, steps, temperature, chordProgression, false)];
case 1:
result = _a.sent();
return [2, result.sequence];
}
});
});
};
MusicRNN.prototype.continueSequenceAndReturnProbabilities = function (sequence, steps, temperature, chordProgression) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
return [2, this.continueSequenceImpl(sequence, steps, temperature, chordProgression, true)];
});
});
};
MusicRNN.prototype.continueSequenceImpl = function (sequence, steps, temperature, chordProgression, returnProbs) {
return __awaiter(this, void 0, void 0, function () {
var _this = this;
var oh, samplesAndProbs, result, probs, i, _a, _b;
return __generator(this, function (_c) {

@@ -176,11 +196,31 @@ switch (_c.label) {

undefined;
var samples = _this.sampleRnn(inputs, steps, temperature, controls, auxInputs);
return tf.stack(samples).as2D(samples.length, outputSize);
var rnnResult = _this.sampleRnn(inputs, steps, temperature, controls, auxInputs, returnProbs);
var samples = rnnResult.samples;
return {
samples: tf.stack(samples).as2D(samples.length, outputSize),
probs: rnnResult.probs
};
});
_b = (_a = this.dataConverter).toNoteSequence;
return [4, oh];
case 3:
result = _b.apply(_a, [_c.sent(), sequence.quantizationInfo.stepsPerQuarter]);
oh.dispose();
return [2, result];
samplesAndProbs = _c.sent();
result = this.dataConverter.toNoteSequence(samplesAndProbs.samples, sequence.quantizationInfo.stepsPerQuarter);
probs = [];
if (!returnProbs) return [3, 7];
i = 0;
_c.label = 4;
case 4:
if (!(i < samplesAndProbs.probs.length)) return [3, 7];
_b = (_a = probs).push;
return [4, samplesAndProbs.probs[i].data()];
case 5:
_b.apply(_a, [_c.sent()]);
samplesAndProbs.probs[i].dispose();
_c.label = 6;
case 6:
i++;
return [3, 4];
case 7:
oh.samples.dispose();
return [2, { sequence: result, probs: probs }];
}

@@ -190,4 +230,3 @@ });

};
MusicRNN.prototype.sampleRnn = function (inputs, steps, temperature, controls, auxInputs) {
var _a;
MusicRNN.prototype.sampleRnn = function (inputs, steps, temperature, controls, auxInputs, returnProbs) {
var length = inputs.shape[0];

@@ -205,2 +244,3 @@ var outputSize = inputs.shape[1];

var samples = [];
var probs = [];
var splitInputs = tf.split(inputs.toFloat(), length);

@@ -216,6 +256,15 @@ var splitControls = controls ? tf.split(controls, controls.shape[0]) : undefined;

var logits = lastOutput.matMul(this.lstmFcW).add(this.lstmFcB);
var sampledOutput = (temperature ?
tf.multinomial(logits.div(tf.scalar(temperature)), 1)
.as1D() :
logits.argMax(1).as1D());
var sampledOutput = void 0;
if (returnProbs || temperature) {
var theseProbs = temperature ?
tf.softmax(logits.div(tf.scalar(temperature))) :
tf.softmax(logits);
probs.push(theseProbs);
sampledOutput =
tf.multinomial(theseProbs, 1, undefined, true)
.as1D();
}
else {
sampledOutput = logits.argMax(1).as1D();
}
nextInput = tf.oneHot(sampledOutput, outputSize).toFloat();

@@ -248,3 +297,4 @@ samples.push(nextInput.as1D().toBool());

}
return samples;
return { samples: samples, probs: probs };
var _a;
};

@@ -251,0 +301,0 @@ return MusicRNN;

@@ -42,15 +42,15 @@ import * as tf from '@tensorflow/tfjs-core';

constructor(checkpointURL: string, spec?: MusicVAESpec);
private instantiateFromSpec;
private instantiateFromSpec();
dispose(): void;
private getLstmLayers;
private getLstmLayers(cellFormat, vars);
initialize(): Promise<void>;
isInitialized(): boolean;
interpolate(inputSequences: INoteSequence[], numInterps: number | number[], temperature?: number, chordProgression?: string[]): Promise<INoteSequence[]>;
private getSegmentLengths;
private encodeChordProgression;
private getSegmentLengths(inputTensors);
private encodeChordProgression(chordProgression);
encode(inputSequences: INoteSequence[], chordProgression?: string[]): Promise<tf.Tensor<tf.Rank.R2>>;
decode(z: tf.Tensor2D, temperature?: number, chordProgression?: string[], stepsPerQuarter?: number): Promise<INoteSequence[]>;
private getInterpolatedZs;
private getInterpolatedZs(z, numInterps);
sample(numSamples: number, temperature?: number, chordProgression?: string[], stepsPerQuarter?: number): Promise<INoteSequence[]>;
}
export { LayerVars, Encoder, Decoder, Nade, MusicVAE, };
export { LayerVars, Encoder, Decoder, Nade, MusicVAE };

@@ -27,4 +27,4 @@ "use strict";

while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
if (f = 1, y && (t = y[op[0] & 2 ? "return" : op[0] ? "throw" : "next"]) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [0, t.value];
switch (op[0]) {

@@ -204,3 +204,2 @@ case 0: case 1: t = op; break;

return tf.tidy(function () {
var _a;
var lstmCell = initLstmCells(z, _this.lstmCellVars, _this.zToInitStateVars);

@@ -236,2 +235,3 @@ var samples = [];

return tf.stack(samples, 1);
var _a;
});

@@ -258,3 +258,2 @@ };

return tf.tidy(function () {
var _a;
var lstmCell = initLstmCells(z, _this.lstmCellVars, _this.zToInitStateVars);

@@ -277,2 +276,3 @@ var samples = [];

return tf.concat(samples, 1);
var _a;
});

@@ -350,4 +350,4 @@ };

return __awaiter(this, void 0, void 0, function () {
var _this = this;
var LSTM_CELL_FORMAT, MUTLI_LSTM_CELL_FORMAT, CONDUCTOR_PREFIX, BIDI_LSTM_CELL, ENCODER_FORMAT, HIER_ENCODER_FORMAT, vars, encMu, fwLayers_1, bwLayers_1, baseEncoders, fwLayers, bwLayers, decVarPrefix, decVarPrefixes, i, baseDecoders, condLstmLayers, condZtoInitState;
var _this = this;
return __generator(this, function (_a) {

@@ -451,4 +451,4 @@ switch (_a.label) {

return __awaiter(this, void 0, void 0, function () {
var _this = this;
var inputZs, interpZs, outputSequenes;
var _this = this;
return __generator(this, function (_a) {

@@ -482,4 +482,4 @@ switch (_a.label) {

return __awaiter(this, void 0, void 0, function () {
var _this = this;
var numSteps, numSegments, isEndTensor, isEndArray, maxSegmentLength, segmentLengths, offset, fromIndex;
var _this = this;
return __generator(this, function (_a) {

@@ -533,4 +533,4 @@ switch (_a.label) {

return __awaiter(this, void 0, void 0, function () {
var _this = this;
var inputTensors, segmentLengths, _a, newInputTensors, z;
var _this = this;
return __generator(this, function (_b) {

@@ -585,4 +585,4 @@ switch (_b.label) {

return __awaiter(this, void 0, void 0, function () {
var _this = this;
var numSteps, ohSeqs, outputSequences, _i, ohSeqs_1, oh, _a, _b;
var _this = this;
return __generator(this, function (_c) {

@@ -678,4 +678,4 @@ switch (_c.label) {

return __awaiter(this, void 0, void 0, function () {
var _this = this;
var randZs, outputSequenes;
var _this = this;
return __generator(this, function (_a) {

@@ -682,0 +682,0 @@ switch (_a.label) {

{
"name": "@magenta/music",
"version": "1.1.8",
"version": "1.1.9",
"description": "Make music in the browser with machine learning.",

@@ -5,0 +5,0 @@ "main": "es5/index.js",

@@ -11,6 +11,6 @@ # @magenta/music

* [Example Applcations](#example-applications)
* [Supported Models](#supported-models)
* [Getting Started](#getting-started)
* [Model Checkpoints](#model-checkpoints)
- [Example Applications](#example-applications)
- [Supported Models](#supported-models)
- [Getting Started](#getting-started)
- [Model Checkpoints](#model-checkpoints)

@@ -21,6 +21,6 @@ ## Example Applications

* [Beat Blender](https://g.co/beatblender) by [Google Creative Lab](https://github.com/googlecreativelab)
* [Melody Mixer](https://g.co/melodymixer) by [Google Creative Lab](https://github.com/googlecreativelab)
* [Latent Loops](https://goo.gl/magenta/latent-loops) by [Google Pie Shop](https://github.com/teampieshop)
* [Neural Drum Machine](https://codepen.io/teropa/pen/RMGxOQ) by [Tero Parviainen](https://github.com/teropa)
- [Beat Blender](https://g.co/beatblender) by [Google Creative Lab](https://github.com/googlecreativelab)
- [Melody Mixer](https://g.co/melodymixer) by [Google Creative Lab](https://github.com/googlecreativelab)
- [Latent Loops](https://goo.gl/magenta/latent-loops) by [Google Pie Shop](https://github.com/teampieshop)
- [Neural Drum Machine](https://codepen.io/teropa/pen/RMGxOQ) by [Tero Parviainen](https://github.com/teropa)

@@ -92,5 +92,6 @@ ## Supported Models

model.initialize()
.then(() => model.sample(1))
.then((samples) => player.start(samples[0]));
model
.initialize()
.then(() => model.sample(1))
.then(samples => player.start(samples[0]));
```

@@ -141,10 +142,10 @@

"type": "MusicRNN",
"dataConverter": {
"type": "MelodyConverter",
"args": {
"minPitch": 48,
"maxPitch": 83
}
},
"chordEncoder": "PitchChordEncoder"
"dataConverter": {
"type": "MelodyConverter",
"args": {
"minPitch": 48,
"maxPitch": 83
}
},
"chordEncoder": "PitchChordEncoder"
}

@@ -156,2 +157,3 @@ ```

<!-- links -->
[melody-rnn]: https://github.com/tensorflow/magenta/tree/master/magenta/models/melody_rnn

@@ -158,0 +160,0 @@ [drums-rnn]: https://github.com/tensorflow/magenta/tree/master/magenta/models/drums_rnn

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc