Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

nodejs-polars

Package Overview
Dependencies
Maintainers
2
Versions
45
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

nodejs-polars - npm Package Compare versions

Comparing version 0.0.6 to 0.0.7

bin/package.json

72

bin/dataframe.d.ts

@@ -6,5 +6,5 @@ /// <reference types="node" />

import { Series } from "./series";
import { Stream } from "stream";
import { Writable } from "stream";
import { DataType, JoinBaseOptions, JsDataFrame, WriteCsvOptions } from "./datatypes";
import { ColumnSelection, DownsampleRule, FillNullStrategy, ColumnsOrExpr, ValueOrArray, ExprOrString } from "./utils";
import { ColumnSelection, FillNullStrategy, ColumnsOrExpr, ValueOrArray, ExprOrString } from "./utils";
declare const inspect: unique symbol;

@@ -28,7 +28,2 @@ export interface DataFrame {

/**
* TODO
* @param func
*/
apply<U>(func: <T>(s: T) => U): DataFrame;
/**
* Very cheap deep clone.

@@ -70,46 +65,2 @@ */

/**
* Start a downsampling groupby operation.
* @param by - Column that will be used as key in the groupby operation. (This should be a datetime/date column.)
* @param rule - Units of the downscaling operation.
* @param n - Number of units (e.g. 5 "day", 15 "minute".)
* @example
* ```
* >>> df = pl.DataFrame(
* >>> {
* >>> "A": ["2020-01-01", "2020-01-02", "2020-01-03","2020-01-04","2020-01-05","2020-01-06"],
* >>> "B": [1.0, 8.0, 6.0, 2.0, 16.0, 10.0],
* >>> "C": [3.0, 6.0, 9.0, 2.0, 13.0, 8.0],
* >>> "D": [12.0, 5.0, 9.0, 2.0, 11.0, 2.0],
* >>> }
* >>> )
* >>> df['A'] = df['A'].str.strftime(pl.Date, "%Y-%m-%d")
* >>>
* >>> df.downsample("A", rule="day", n=3).agg(
* >>> {
* >>> "B": "max",
* >>> "C": "min",
* >>> "D": "last"
* >>> }
* >>> )
* shape: (3, 4)
* ┌──────────────┬───────┬───────┬────────┐
* │ A ┆ B_max ┆ C_min ┆ D_last │
* │ --- ┆ --- ┆ --- ┆ --- │
* │ date(days) ┆ f64 ┆ f64 ┆ f64 │
* ╞══════════════╪═══════╪═══════╪════════╡
* │ 2019-12-31 ┆ 8 ┆ 3 ┆ 5 │
* ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
* │ 2020-01-03 ┆ 16 ┆ 2 ┆ 11 │
* ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
* │ 2020-01-06 ┆ 10 ┆ 8 ┆ 2 │
* └──────────────┴───────┴───────┴────────┘
* ```
*/
downsample(opts: {
by: ColumnSelection;
rule: DownsampleRule;
n: number;
}): GroupBy;
downsample(by: ColumnSelection, rule: DownsampleRule, n: number): GroupBy;
/**
* __Remove column from DataFrame and return as new.__

@@ -1070,3 +1021,3 @@ * ___

* // using a file path
* >>> df.head(1).toCSV({dest: "./foo.csv"})
* >>> df.head(1).toCSV("./foo.csv")
* // foo.csv

@@ -1083,3 +1034,3 @@ * foo,bar,ham

* >>> });
* >>> df.head(1).toCSV({dest: writeStream, hasHeader: false})
* >>> df.head(1).toCSV(writeStream, {hasHeader: false})
* writeStream: '1,6,a'

@@ -1090,4 +1041,3 @@ * ```

toCSV(options: WriteCsvOptions): string;
toCSV(dest: string | Stream): void;
toCSV(dest: string | Stream, options: WriteCsvOptions): void;
toCSV(dest: string | Writable, options?: WriteCsvOptions): void;
toJS(): object;

@@ -1101,3 +1051,3 @@ toJS(options: {

}): string;
toJSON(dest: string | Stream, options?: {
toJSON(dest: string | Writable, options?: {
orient: "row" | "col" | "literal";

@@ -1108,12 +1058,2 @@ }): void;

/**
* Upsample a DataFrame at a regular frequency.
* @param by - Column that will be used as key in the upsampling operation. (This should be a datetime column.)
* @param interval - Interval periods.
*/
upsample(opts: {
by: string;
interval: number;
}): DataFrame;
upsample(by: string, interval: number): DataFrame;
/**
* Aggregate the columns of this DataFrame to their variance value.

@@ -1120,0 +1060,0 @@ * @example

@@ -13,3 +13,2 @@ "use strict";

const expr_1 = require("./lazy/expr");
const error_1 = require("./error");
const series_1 = require("./series");

@@ -321,9 +320,10 @@ const stream_1 = require("stream");

options = { hasHeader: true, sep: ",", ...options };
if (dest instanceof stream_1.Stream.Writable) {
if (dest instanceof stream_1.Writable) {
unwrap("write_csv_stream", { writeStream: dest, ...options });
dest.end("");
}
else if (typeof dest === "string") {
unwrap("write_csv", { path: dest, ...options });
unwrap("write_csv_path", { path: dest, ...options });
}
else if (!dest || (dest.constructor.name === "Object" && !dest["dest"])) {
else {
let body = "";

@@ -337,7 +337,5 @@ const writeStream = new stream_1.Stream.Writable({

unwrap("write_csv_stream", { writeStream, ...options, ...dest });
writeStream.end("");
return body;
}
else {
throw new TypeError("unknown destination type, Supported types are 'string' and 'Stream.Writeable'");
}
},

@@ -377,3 +375,3 @@ toJS(options) {

else if (typeof dest === "string" && dest.length) {
unwrap("write_json", { path: dest });
unwrap("write_json_path", { path: dest });
}

@@ -392,3 +390,3 @@ else if (!dest) {

else {
throw new TypeError("unknown destination type, Supported types are 'string' and 'Stream.Writeable'");
throw new TypeError("unknown destination type, Supported types are 'string' and 'Writeable'");
}

@@ -409,7 +407,4 @@ },

var: noArgWrap("var"),
apply: () => { throw (0, error_1.todo)(); },
map: (fn) => map((0, exports.dfWrapper)(_df), fn),
pipe: (fn) => { throw (0, error_1.todo)(); },
row: (index) => unwrap("to_row", { idx: index }),
upsample: (index) => { throw (0, error_1.todo)(); },
vstack: (other) => wrap("vstack", { other: other._df }),

@@ -416,0 +411,0 @@ withColumn(column) {

@@ -1,3 +0,1 @@

/// <reference types="node" />
import { Stream } from "stream";
export declare type DtypeToPrimitive<T> = T extends DataType.Bool ? boolean : T extends DataType.Utf8 ? string : T extends DataType.Categorical ? string : T extends DataType.Datetime ? number | Date : T extends DataType.Date ? Date : T extends DataType.UInt64 ? bigint : T extends DataType.Int64 ? bigint : number;

@@ -36,3 +34,2 @@ export declare type PrimitiveToDtype<T> = T extends boolean ? DataType.Bool : T extends string ? DataType.Utf8 : T extends Date ? DataType.Datetime : T extends number ? DataType.Float64 : T extends bigint ? DataType.Int64 : T extends ArrayLike<any> ? DataType.List : DataType.Object;

endRows?: number;
file: string;
hasHeader: boolean;

@@ -69,3 +66,2 @@ ignoreErrors?: boolean;

export declare type WriteCsvOptions = {
dest?: string | Stream;
hasHeader?: boolean;

@@ -72,0 +68,0 @@ sep?: string;

import { Series } from "./series";
import { DataFrame } from "./dataframe";
declare type ConcatOptions = {
rechunk: boolean;
how?: "vertical";
rechunk?: boolean;
how?: "vertical" | "horizontal";
};

@@ -23,6 +23,29 @@ /**

export declare function repeat<V>(value: V, n: number, name?: string): Series<V>;
export declare function concat(item: Array<DataFrame>): DataFrame;
export declare function concat<T>(item: Array<Series<T>>): Series<T>;
export declare function concat(item: Array<DataFrame>, options: ConcatOptions): DataFrame;
export declare function concat<T>(item: Array<Series<T>>, options: ConcatOptions): Series<T>;
/**
* Aggregate all the Dataframes/Series in a List of DataFrames/Series to a single DataFrame/Series.
* @param items DataFrames/Series/LazyFrames to concatenate.
* @param options.rechunk rechunk the final DataFrame/Series.
* @param options.how Only used if the items are DataFrames. *Defaults to 'vertical'*
* - Vertical: Applies multiple `vstack` operations.
* - Horizontal: Stacks Series horizontall and fills with nulls if the lengths don't match.
*
* @example
* >>> const df1 = pl.DataFrame({"a": [1], "b": [3]})
* >>> const df2 = pl.DataFrame({"a": [2], "b": [4]})
* >>> pl.concat([df1, df2])
* shape: (2, 2)
* ┌─────┬─────┐
* │ a ┆ b │
* │ --- ┆ --- │
* │ i64 ┆ i64 │
* ╞═════╪═════╡
* │ 1 ┆ 3 │
* ├╌╌╌╌╌┼╌╌╌╌╌┤
* │ 2 ┆ 4 │
* └─────┴─────┘
*/
export declare function concat(items: Array<DataFrame>, options?: ConcatOptions): DataFrame;
export declare function concat<T>(items: Array<Series<T>>, options?: {
rechunk: boolean;
}): Series<T>;
export {};

@@ -10,2 +10,3 @@ "use strict";

const series_1 = require("./series");
const dataframe_1 = require("./dataframe");
const polars_internal_1 = __importDefault(require("./internals/polars_internal"));

@@ -39,7 +40,10 @@ const utils_1 = require("./utils");

}
if (how !== "vertical") {
throw new Error("unsupported operation. only 'vertical' is supported at this time");
}
if ((0, utils_1.isDataFrameArray)(items)) {
const df = items.reduce((acc, curr) => acc.vstack(curr));
let df;
if (how === "vertical") {
df = items.reduce((acc, curr) => acc.vstack(curr));
}
else {
df = (0, dataframe_1.dfWrapper)(polars_internal_1.default.horizontalConcatDF({ items: items.map(i => i._df) }));
}
return rechunk ? df.rechunk() : df;

@@ -51,4 +55,4 @@ }

}
throw new Error("can only concat series and dataframes");
throw new TypeError("can only concat series and dataframes");
}
exports.concat = concat;

@@ -72,3 +72,4 @@ import * as series from "./series";

export import when = _when.when;
const version: string;
}
export = pl;

@@ -27,2 +27,3 @@ "use strict";

const cfg = __importStar(require("./cfg"));
const package_json_1 = require("../package.json");
const lazy_1 = require("./lazy");

@@ -89,3 +90,4 @@ var pl;

pl.when = lazy_1.when.when;
pl.version = package_json_1.version;
})(pl || (pl = {}));
module.exports = pl;

@@ -12,2 +12,5 @@ "use strict";

const jsTypeToPolarsType = (value) => {
if (value === null) {
return datatypes_1.DataType.Float64;
}
if (Array.isArray(value)) {

@@ -14,0 +17,0 @@ return (0, exports.jsTypeToPolarsType)(value[0]);

@@ -0,1 +1,2 @@

/// <reference types="node" />
import { ReadCsvOptions, ReadJsonOptions } from "./datatypes";

@@ -7,5 +8,6 @@ import { DataFrame } from "./dataframe";

* ___
* @param pathOrBody - path or buffer or string
* - path: Path to a file or a file like string. Any valid filepath can be used. Example: `file.csv`.
* - body: String or buffer to be read as a CSV
* @param options
* @param options.file - Path to a file or a file like string. Any valid filepath can be used. Example: `file.csv`.
* Any string containing the contents of a csv can also be used
* @param options.inferSchemaLength -Maximum number of lines to read to infer schema. If set to 0, all columns will be read as pl.Utf8.

@@ -38,5 +40,3 @@ * If set to `null`, a full table scan will be done (slow).

*/
export declare function readCSV(options: Partial<ReadCsvOptions>): DataFrame;
export declare function readCSV(path: string): DataFrame;
export declare function readCSV(path: string, options: Partial<ReadCsvOptions>): DataFrame;
export declare function readCSV(pathOrBody: string | Buffer, options?: Partial<ReadCsvOptions>): DataFrame;
/**

@@ -46,4 +46,6 @@ * __Read a JSON file or string into a DataFrame.__

* _Note: Currently only newline delimited JSON is supported_
* @param pathOrBody - path or buffer or string
* - path: Path to a file or a file like string. Any valid filepath can be used. Example: `file.csv`.
* - body: String or buffer to be read as a CSV
* @param options
* @param options.file - Path to a file, or a file like string
* @param options.inferSchemaLength -Maximum number of lines to read to infer schema. If set to 0, all columns will be read as pl.Utf8.

@@ -59,3 +61,3 @@ * If set to `null`, a full table scan will be done (slow).

* `
* > const df = pl.readJSON({file: jsonString})
* > const df = pl.readJSON(jsonString)
* > console.log(df)

@@ -74,5 +76,3 @@ * shape: (2, 3)

*/
export declare function readJSON(options: ReadJsonOptions): DataFrame;
export declare function readJSON(path: string): DataFrame;
export declare function readJSON(path: string, options: ReadJsonOptions): DataFrame;
export declare function readJSON(pathOrBody: string | Buffer, options?: Partial<ReadJsonOptions>): DataFrame;
/**

@@ -79,0 +79,0 @@ * Read into a DataFrame from a csv file.

@@ -28,18 +28,116 @@ "use strict";

};
function readCSV(arg, options) {
function readCSVBuffer(buff, options) {
return (0, dataframe_1.dfWrapper)(polars_internal_1.default.df.readCSVBuffer({ ...readCsvDefaultOptions, ...options, buff }));
}
function readCSVPath(path, options) {
return (0, dataframe_1.dfWrapper)(polars_internal_1.default.df.readCSVPath({ ...readCsvDefaultOptions, ...options, path }));
}
function readJSONBuffer(buff, options) {
return (0, dataframe_1.dfWrapper)(polars_internal_1.default.df.readJSONBuffer({ ...readJsonDefaultOptions, ...options, buff }));
}
function readJSONPath(path, options) {
return (0, dataframe_1.dfWrapper)(polars_internal_1.default.df.readJSONPath({ ...readJsonDefaultOptions, ...options, path }));
}
/**
* __Read a CSV file or string into a Dataframe.__
* ___
* @param pathOrBody - path or buffer or string
* - path: Path to a file or a file like string. Any valid filepath can be used. Example: `file.csv`.
* - body: String or buffer to be read as a CSV
* @param options
* @param options.inferSchemaLength -Maximum number of lines to read to infer schema. If set to 0, all columns will be read as pl.Utf8.
* If set to `null`, a full table scan will be done (slow).
* @param options.batchSize - Number of lines to read into the buffer at once. Modify this to change performance.
* @param options.hasHeader - Indicate if first row of dataset is header or not. If set to False first row will be set to `column_x`,
* `x` being an enumeration over every column in the dataset.
* @param options.ignoreErrors -Try to keep reading lines if some lines yield errors.
* @param options.endRows -After n rows are read from the CSV, it stops reading.
* During multi-threaded parsing, an upper bound of `n` rows
* cannot be guaranteed.
* @param options.startRows -Start reading after `startRows` position.
* @param options.projection -Indices of columns to select. Note that column indices start at zero.
* @param options.sep -Character to use as delimiter in the file.
* @param options.columns -Columns to select.
* @param options.rechunk -Make sure that all columns are contiguous in memory by aggregating the chunks into a single array.
* @param options.encoding -Allowed encodings: `utf8`, `utf8-lossy`. Lossy means that invalid utf8 values are replaced with `�` character.
* @param options.numThreads -Number of threads to use in csv parsing. Defaults to the number of physical cpu's of your system.
* @param options.dtype -Overwrite the dtypes during inference.
* @param options.lowMemory - Reduce memory usage in expense of performance.
* @param options.commentChar - character that indicates the start of a comment line, for instance '#'.
* @param options.quotChar -character that is used for csv quoting, default = ''. Set to null to turn special handling and escaping of quotes off.
* @param options.nullValues - Values to interpret as null values. You can provide a
* - `string` -> all values encountered equal to this string will be null
* - `Array<string>` -> A null value per column.
* - `Record<string,string>` -> An object or map that maps column name to a null value string.Ex. {"column_1": 0}
* @param options.parseDates -Whether to attempt to parse dates or not
* @returns DataFrame
*/
function readCSV(pathOrBody, options) {
const extensions = [".tsv", ".csv"];
if (typeof arg === "string") {
return readCSV({ ...options, file: arg, inline: !(0, utils_1.isPath)(arg, extensions) });
if (Buffer.isBuffer(pathOrBody)) {
return readCSVBuffer(pathOrBody, options);
}
options = { ...readCsvDefaultOptions, ...arg };
return (0, dataframe_1.dfWrapper)(polars_internal_1.default.df.read_csv(options));
if (typeof pathOrBody === "string") {
const inline = !(0, utils_1.isPath)(pathOrBody, extensions);
if (inline) {
return readCSVBuffer(Buffer.from(pathOrBody, "utf-8"), options);
}
else {
return readCSVPath(pathOrBody, options);
}
}
else {
throw new Error("must supply either a path or body");
}
}
exports.readCSV = readCSV;
function readJSON(arg, options) {
/**
* __Read a JSON file or string into a DataFrame.__
*
* _Note: Currently only newline delimited JSON is supported_
* @param pathOrBody - path or buffer or string
* - path: Path to a file or a file like string. Any valid filepath can be used. Example: `file.csv`.
* - body: String or buffer to be read as a CSV
* @param options
* @param options.inferSchemaLength -Maximum number of lines to read to infer schema. If set to 0, all columns will be read as pl.Utf8.
* If set to `null`, a full table scan will be done (slow).
* @param options.batchSize - Number of lines to read into the buffer at once. Modify this to change performance.
* @returns ({@link DataFrame})
* @example
* ```
* const jsonString = `
* {"a", 1, "b", "foo", "c": 3}
* {"a": 2, "b": "bar", "c": 6}
* `
* > const df = pl.readJSON(jsonString)
* > console.log(df)
* shape: (2, 3)
* ╭─────┬─────┬─────╮
* │ a ┆ b ┆ c │
* │ --- ┆ --- ┆ --- │
* │ i64 ┆ str ┆ i64 │
* ╞═════╪═════╪═════╡
* │ 1 ┆ foo ┆ 3 │
* ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
* │ 2 ┆ bar ┆ 6 │
* ╰─────┴─────┴─────╯
* ```
*/
function readJSON(pathOrBody, options) {
const extensions = [".ndjson", ".json", ".jsonl"];
if (typeof arg === "string") {
return readJSON({ ...options, file: arg, inline: !(0, utils_1.isPath)(arg, extensions) });
if (Buffer.isBuffer(pathOrBody)) {
return readJSONBuffer(pathOrBody, options);
}
options = { ...readJsonDefaultOptions, ...arg };
return (0, dataframe_1.dfWrapper)(polars_internal_1.default.df.read_json(options));
if (typeof pathOrBody === "string") {
const inline = !(0, utils_1.isPath)(pathOrBody, extensions);
if (inline) {
return readJSONBuffer(Buffer.from(pathOrBody, "utf-8"), options);
}
else {
return readJSONPath(pathOrBody, options);
}
}
else {
throw new Error("must supply either a path or body");
}
}

@@ -46,0 +144,0 @@ exports.readJSON = readJSON;

@@ -420,2 +420,12 @@ import { DataType } from "../datatypes";

explode(): Expr;
/**
* Extend the Series with given number of values.
* @param value The value to extend the Series with. This value may be null to fill with nulls.
* @param n The number of values to extend.
*/
extend(value: any, n: number): Expr;
extend(opt: {
value: any;
n: number;
}): Expr;
/** Fill nan value with a fill value */

@@ -422,0 +432,0 @@ fillNan(other: any): Expr;

@@ -252,2 +252,8 @@ "use strict";

explode: wrapNullArgs("explode"),
extend(o, n) {
if (n !== null && typeof n === "number") {
return wrap("extend", { value: o, n });
}
return wrap("extend", o);
},
fillNan: wrapExprArg("fillNan", true),

@@ -254,0 +260,0 @@ fillNull,

@@ -13,3 +13,3 @@ "use strict";

function col(col) {
if ((0, utils_1.isSeries)(col)) {
if (series_1.Series.isSeries(col)) {
col = col.toArray();

@@ -34,3 +34,3 @@ }

}
if ((0, utils_1.isSeries)(value)) {
if (series_1.Series.isSeries(value)) {
return (0, expr_1.Expr)(polars_internal_1.default.lit({ value: value._series }));

@@ -89,3 +89,3 @@ }

function count(column) {
if ((0, utils_1.isSeries)(column)) {
if (series_1.Series.isSeries(column)) {
return column.len();

@@ -118,3 +118,3 @@ }

function first(column) {
if ((0, utils_1.isSeries)(column)) {
if (series_1.Series.isSeries(column)) {
if (column.length) {

@@ -155,3 +155,3 @@ return column[0];

function head(column, n) {
if ((0, utils_1.isSeries)(column)) {
if (series_1.Series.isSeries(column)) {
return column.head(n);

@@ -165,3 +165,3 @@ }

function last(column) {
if ((0, utils_1.isSeries)(column)) {
if (series_1.Series.isSeries(column)) {
if (column.length) {

@@ -180,3 +180,3 @@ return column[-1];

function mean(column) {
if ((0, utils_1.isSeries)(column)) {
if (series_1.Series.isSeries(column)) {
return column.mean();

@@ -188,3 +188,3 @@ }

function median(column) {
if ((0, utils_1.isSeries)(column)) {
if (series_1.Series.isSeries(column)) {
return column.median();

@@ -196,3 +196,3 @@ }

function nUnique(column) {
if ((0, utils_1.isSeries)(column)) {
if (series_1.Series.isSeries(column)) {
return column.nUnique();

@@ -211,3 +211,3 @@ }

function quantile(column, q) {
if ((0, utils_1.isSeries)(column)) {
if (series_1.Series.isSeries(column)) {
return column.quantile(q);

@@ -235,3 +235,3 @@ }

function tail(column, n) {
if ((0, utils_1.isSeries)(column)) {
if (series_1.Series.isSeries(column)) {
return column.tail(n);

@@ -238,0 +238,0 @@ }

@@ -341,2 +341,12 @@ import { DataType, DtypeToPrimitive, Optional } from "./datatypes";

/**
* Extend the Series with given number of values.
* @param value The value to extend the Series with. This value may be null to fill with nulls.
* @param n The number of values to extend.
*/
extend(value: any, n: number): Series<T>;
extend(opt: {
value: any;
n: number;
}): Series<T>;
/**
* __Fill null values with a filling strategy.__

@@ -343,0 +353,0 @@ * ___

@@ -236,2 +236,8 @@ "use strict";

explode: noArgWrap("explode"),
extend(o, n) {
if (n !== null && typeof n === "number") {
return wrap("extend", { value: o, n });
}
return wrap("extend", o);
},
fillNull(strategy) {

@@ -238,0 +244,0 @@ return typeof strategy === "string" ?

@@ -23,8 +23,5 @@ import { Expr } from "./lazy/expr";

export declare const range: (start: number, end: number) => number[];
export declare const isDataFrame: (ty: any) => ty is DataFrame;
export declare const isDataFrameArray: (ty: any) => ty is DataFrame[];
export declare const isSeries: <T>(ty: any) => ty is Series<T>;
export declare const isSeriesArray: <T>(ty: any) => ty is Series<T>[];
export declare const isExpr: (ty: any) => ty is Expr;
export declare const isExprArray: (ty: any) => ty is Expr[];
export declare const regexToString: (r: string | RegExp) => string;

@@ -6,3 +6,3 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
exports.regexToString = exports.isExprArray = exports.isExpr = exports.isSeriesArray = exports.isSeries = exports.isDataFrameArray = exports.isDataFrame = exports.range = exports.isPath = exports.selectionToExprList = exports.columnOrColumnsStrict = exports.columnOrColumns = void 0;
exports.regexToString = exports.isExprArray = exports.isSeriesArray = exports.isDataFrameArray = exports.range = exports.isPath = exports.selectionToExprList = exports.columnOrColumnsStrict = exports.columnOrColumns = void 0;
const expr_1 = require("./lazy/expr");

@@ -35,12 +35,6 @@ const path_1 = __importDefault(require("path"));

exports.range = range;
const isDataFrame = (ty) => (0, types_1.isExternal)(ty?._df);
exports.isDataFrame = isDataFrame;
const isDataFrameArray = (ty) => Array.isArray(ty) && (0, types_1.isExternal)(ty[0]?._df);
exports.isDataFrameArray = isDataFrameArray;
const isSeries = (ty) => (0, types_1.isExternal)(ty._series);
exports.isSeries = isSeries;
const isSeriesArray = (ty) => Array.isArray(ty) && (0, types_1.isExternal)(ty[0]?._series);
exports.isSeriesArray = isSeriesArray;
const isExpr = (ty) => (0, types_1.isExternal)(ty?._expr);
exports.isExpr = isExpr;
const isExprArray = (ty) => Array.isArray(ty) && (0, types_1.isExternal)(ty[0]?._expr);

@@ -47,0 +41,0 @@ exports.isExprArray = isExprArray;

{
"name": "nodejs-polars",
"version": "0.0.6",
"version": "0.0.7",
"repository": "https://github.com/pola-rs/polars.git",

@@ -44,3 +44,3 @@ "license": "SEE LICENSE IN LICENSE",

"build:debug": "napi build --platform",
"build:ts": "tsc -p tsconfig.build.json",
"build:ts": " rm -rf bin; tsc -p tsconfig.build.json; mv bin/polars/* bin",
"format:rs": "cargo fmt",

@@ -72,2 +72,3 @@ "format:source": "prettier --config ./package.json --write './**/*.{js,ts}'",

"prettier": "^2.4.1",
"source-map-support": "^0.5.21",
"ts-jest": "^27.1.0",

@@ -96,15 +97,19 @@ "ts-node": "^10.4.0",

},
"packageManager": "yarn@3.1.1",
"workspaces": [
"benches"
],
"optionalDependencies": {
"nodejs-polars-win32-x64-msvc": "0.0.6",
"nodejs-polars-darwin-x64": "0.0.6",
"nodejs-polars-linux-x64-gnu": "0.0.6",
"nodejs-polars-win32-ia32-msvc": "0.0.6",
"nodejs-polars-linux-arm64-gnu": "0.0.6",
"nodejs-polars-linux-arm-gnueabihf": "0.0.6",
"nodejs-polars-darwin-arm64": "0.0.6",
"nodejs-polars-android-arm64": "0.0.6",
"nodejs-polars-linux-x64-musl": "0.0.6",
"nodejs-polars-linux-arm64-musl": "0.0.6",
"nodejs-polars-win32-arm64-msvc": "0.0.6"
"nodejs-polars-win32-x64-msvc": "0.0.7",
"nodejs-polars-darwin-x64": "0.0.7",
"nodejs-polars-linux-x64-gnu": "0.0.7",
"nodejs-polars-win32-ia32-msvc": "0.0.7",
"nodejs-polars-linux-arm64-gnu": "0.0.7",
"nodejs-polars-linux-arm-gnueabihf": "0.0.7",
"nodejs-polars-darwin-arm64": "0.0.7",
"nodejs-polars-android-arm64": "0.0.7",
"nodejs-polars-linux-x64-musl": "0.0.7",
"nodejs-polars-linux-arm64-musl": "0.0.7",
"nodejs-polars-win32-arm64-msvc": "0.0.7"
}
}
SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc