import { IIndex } from './index'; import { ISeries, SelectorWithIndexFn, PredicateFn, ComparerFn, SelectorFn, AggregateFn, Zip2Fn, Zip3Fn, ZipNFn, CallbackFn, JoinFn, GapFillFn } from './series'; import { ISerializedDataFrame } from "@data-forge/serialization"; /** * An object whose fields specify the data for named columns. */ export interface IColumnSpec { [index: string]: Iterator | Iterable | ISeries; } /** * Specifies the format per column when converting columns to strings. */ export interface IFormatSpec { [index: string]: string; } /** * An function that aggregates a series. */ export declare type SeriesAggregatorFn = (values: ISeries) => OutputT; /** * Specification that can produce multiple output columns from a single input column of a dataframe. */ export interface IColumnAggregatorSpec { [outputColumnName: string]: SeriesAggregatorFn; } /** * Specification that can aggregate multiple input columns in a dataframe to produce multiple output columns. */ export interface IMultiColumnAggregatorSpec { [inputColumnName: string]: SeriesAggregatorFn | IColumnAggregatorSpec; } /** * Defines the configuration for a new column. */ export interface IColumnConfig { /** * The name of the new column. */ name: string; /** * The series of values for the column. */ series: Iterable | ISeries; } /** * Options for CSV output. * * The options object is passed directly to [PapaParse.unparse](https://www.papaparse.com/docs#unparse), please see [PapaParse docs for additional options](https://www.papaparse.com/docs#unparse-config-default). */ export interface ICSVOutputOptions { /** * Enable or disable output of the CSV header line. * Defaults to true. */ header?: boolean; } /** * Used to configure a dataframe. */ export interface IDataFrameConfig { /** * Values to put in the dataframe. * This should be array or iterable of JavaScript objects. * Each element in the array contains fields that match the columns of the dataframe. */ values?: Iterator | Iterable; /** * CSV style rows to put in the dataframe. * An array of arrays. Each element in the top level array is a row of data. * Each row of data contains field values in column order. */ rows?: Iterator | Iterable; /*** * The index for the dataframe. * If omitted the index will default to a 0-based index. */ index?: Iterator | Iterable; /** * Array or iterable of index,value pairs to put in the dataframe. * If index and values are not separately specified they can be extracted * from the pairs. */ pairs?: Iterator<[IndexT, ValueT]> | Iterable<[IndexT, ValueT]>; /** * Array or iterable of column names that are in the dataframe. * The order matters. This arrays specifies the ordering of columns which * is important when rendering tables or writing out CSV data files. * If this is omitted column names will automatically be determined * from the fields of the first row/value in the dataframe. */ columnNames?: Iterator | Iterable; /*** * Set to true when the dataframe has been baked into memory * and does not need to be lazily evaluated. */ baked?: boolean; /** * Set to true to consider all rows/values in the dataframe when * determining the column names. Otherwise only the first row is considered. * You should use this if you have irregular fields in the objects that * make up the rows/values of the dataframe. */ considerAllRows?: boolean; /** * Explicitly specify data for named columns to put in the dataframe. */ columns?: Iterator | Iterable | IColumnSpec; /** * Explicitly set this value if you want columnNames to be caseSensitive. * Default behaviour is to treat column names as case insensitive */ caseSensitive?: boolean; } /** * Represents a named column in a dataframe. */ export interface IColumn { /** * The name of the column. */ name: string; /** * The data type of the column. */ type: string; /** * The data series from the column. */ series: ISeries; } /** * An object whose fields specify data for named named columns or user-defined generator functions that generate the data for the columns. */ export interface IColumnGenSpec { [index: string]: ISeries | SeriesSelectorFn; } /** * A string-to-string mapping that specifies how to rename columns. */ export interface IColumnRenameSpec { [index: string]: string; } /** * Specifies columns to transform and the user-defined selector function that does the transformation. */ export interface IColumnTransformSpec { [columnName: string]: SelectorWithIndexFn; } /** * Specifies columns that should be aggregated and a user-defined aggregator function to do the aggregation. */ export interface IColumnAggregateSpec { [index: string]: AggregateFn; } /** * A selector function that can select a series from a dataframe. */ export declare type SeriesSelectorFn = (dataFrame: IDataFrame) => ISeries; export declare type DataFrameConfigFn = () => IDataFrameConfig; /** * Represents the frequency of a type in a series or dataframe. */ export interface ITypeFrequency { /** * Name of the column containing the value. */ Column: string; /** * The name of the type. */ Type: string; /** * The frequency of the type's appearance in the series or dataframe. */ Frequency: number; } /** * Represents the frequency of a value in a series or dataframe. */ export interface IValueFrequency { /** * Name of the column containing the value. */ Column: string; /** * The value. */ Value: any; /** * The frequency of the value's appearance in the series or dataframe. */ Frequency: number; } /** * Interface that represents a dataframe. * A dataframe contains an indexed sequence of data records. * Think of it as a spreadsheet or CSV file in memory. * * Each data record contains multiple named fields, the value of each field represents one row in a column of data. * Each column of data is a named {@link Series}. * You think of a dataframe a collection of named data series. * * @typeparam IndexT The type to use for the index. * @typeparam ValueT The type to use for each row/data record. */ export interface IDataFrame extends Iterable { /** * Get an iterator to enumerate the rows of the dataframe. * Enumerating the iterator forces lazy evaluation to complete. * This function is automatically called by `for...of`. * * @return An iterator for the rows in the dataframe. * * @example *
     *
     * for (const row of df) {
     *     // ... do something with the row ...
     * }
     * 
*/ [Symbol.iterator](): Iterator; /** * Get the names of the columns in the dataframe. * * @return Returns an array of the column names in the dataframe. * * @example *
     *
     * console.log(df.getColumnNames());
     * 
*/ getColumnNames(): string[]; /** * Retreive the collection of all columns in the dataframe. * * @return Returns a {@link Series} containing the names of the columns in the dataframe. * * @example *
     *
     * for (const column in df.getColumns()) {
     *      console.log("Column name: ");
     *      console.log(column.name);
     *
     *      console.log("Data:");
     *      console.log(column.series.toArray());
     * }
     * 
*/ getColumns(): ISeries; /** * Returns true if the dataframe is case sensitive or false if case insensitive. * * @return true if the dataframe is case sensitive, otherwise false. */ isCaseSensitive(): boolean; /** * Cast the value of the dataframe to a new type. * This operation has no effect but to retype the r9ws that the dataframe contains. * * @return The same dataframe, but with the type changed. * * @example *
     *
     * const castDf = df.cast();
     * 
*/ cast(): IDataFrame; /** * Get the index for the dataframe. * * @return The {@link Index} for the dataframe. * * @example *
     *
     * const index = df.getIndex();
     * 
*/ getIndex(): IIndex; /** * Set a named column as the {@link Index} of the dataframe. * * @param columnName Name of the column to use as the new {@link Index} of the returned dataframe. * * @return Returns a new dataframe with the values of the specified column as the new {@link Index}. * * @example *
     *
     * const indexedDf = df.setIndex("SomeColumn");
     * 
*/ setIndex(columnName: string): IDataFrame; /** * Apply a new {@link Index} to the dataframe. * * @param newIndex The new array or iterable to be the new {@link Index} of the dataframe. Can also be a selector to choose the {@link Index} for each row in the dataframe. * * @return Returns a new dataframe with the specified {@link Index} attached. * * @example *
     *
     * const indexedDf = df.withIndex([10, 20, 30]);
     * 
* * @example *
     *
     * const indexedDf = df.withIndex(df.getSeries("SomeColumn"));
     * 
* * @example *
     *
     * const indexedDf = df.withIndex(row => row.SomeColumn);
     * 
* * @example *
     *
     * const indexedDf = df.withIndex(row => row.SomeColumn + 20);
     * 
*/ withIndex(newIndex: Iterable | SelectorFn): IDataFrame; /** * Resets the {@link Index} of the dataframe back to the default zero-based sequential integer index. * * @return Returns a new dataframe with the {@link Index} reset to the default zero-based index. * * @example *
     *
     * const dfWithResetIndex = df.resetIndex();
     * 
*/ resetIndex(): IDataFrame; /** * Extract a {@link Series} from a named column in the dataframe. * * @param columnName Specifies the name of the column that contains the {@link Series} to retreive. * * @return Returns the {@link Series} extracted from the named column in the dataframe. * * @example *
     *
     * const series = df.getSeries("SomeColumn");
     * 
*/ getSeries(columnName: string): ISeries; /** * Determine if the dataframe contains a {@link Series} the specified named column. * * @param columnName Name of the column to check for. * * @return Returns true if the dataframe contains the requested {@link Series}, otherwise returns false. * * @example *
     *
     * if (df.hasSeries("SomeColumn")) {
     *      // ... the dataframe contains a series with the specified column name ...
     * }
     * 
*/ hasSeries(columnName: string): boolean; /** * Verify the existence of a name column and extracts the {@link Series} for it. * Throws an exception if the requested column doesn't exist. * * @param columnName Name of the column to extract. * * @return Returns the {@link Series} for the column if it exists, otherwise it throws an exception. * * @example *
     *
     * try {
     *      const series = df.expectSeries("SomeColumn");
     *      // ... do something with the series ...
     * }
     * catch (err) {
     *      // ... the dataframe doesn't contain the column "SomeColumn" ...
     * }
     * 
*/ expectSeries(columnName: string): ISeries; /** * Create a new dataframe with a replaced or additional column specified by the passed-in series. * * @param columnNameOrSpec The name of the column to add or replace or a {@link IColumnGenSpec} that defines the columns to add. * @param series When columnNameOrSpec is a string that identifies the column to add, this specifies the {@link Series} to add to the dataframe or a function that produces a series (given a dataframe). * * @return Returns a new dataframe replacing or adding a particular named column. * * @example *
     *
     * const modifiedDf = df.withSeries("ANewColumn", new Series([1, 2, 3]));
     * 
* * @example *
     *
     * const modifiedDf = df.withSeries("ANewColumn", df =>
     *      df.getSeries("SourceData").select(aTransformation)
     * );
     * 
* * @example *
     *
     * const modifiedDf = df.withSeries({
     *      ANewColumn: new Series([1, 2, 3]),
     *      SomeOtherColumn: new Series([10, 20, 30])
     * });
     * 
     *
     * @example
     * 
     *
     * const modifiedDf = df.withSeries({
     *      ANewColumn: df => df.getSeries("SourceData").select(aTransformation))
     * });
     * 
     */
    withSeries(columnNameOrSpec: string | IColumnGenSpec, series?: ISeries | SeriesSelectorFn): IDataFrame;
    /**
     * Merge one or more dataframes into this single dataframe.
     * Rows are merged by indexed.
     * Same named columns in subsequent dataframes override columns in earlier dataframes.
     *
     * @param otherDataFrames... One or more dataframes to merge into this dataframe.
     *
     * @returns The merged data frame.
     *
     * @example
     * 
     *
     * const mergedDF = df1.merge(df2);
     * 
* *
     *
     * const mergedDF = df1.merge(df2, df3, etc);
     * 
*/ merge(...otherDataFrames: IDataFrame[]): IDataFrame; /** * Add a series to the dataframe, but only if it doesn't already exist. * * @param columnNameOrSpec The name of the series to add or a {@link IColumnGenSpec} that specifies the columns to add. * @param series If columnNameOrSpec is a string that specifies the name of the series to add, this specifies the actual {@link Series} to add or a selector that generates the series given the dataframe. * * @return Returns a new dataframe with the specified series added, if the series didn't already exist. Otherwise if the requested series already exists the same dataframe is returned. * * @example *
     *
     * const updatedDf = df.ensureSeries("ANewColumn", new Series([1, 2, 3]));
     * 
* * @example *
     *
     * const updatedDf = df.ensureSeries("ANewColumn", df =>
     *      df.getSeries("AnExistingSeries").select(aTransformation)
     * );
     * 
* * @example *
     *
     * const modifiedDf = df.ensureSeries({
     *      ANewColumn: new Series([1, 2, 3]),
     *      SomeOtherColumn: new Series([10, 20, 30])
     * });
     * 
     *
     * @example
     * 
     *
     * const modifiedDf = df.ensureSeries({
     *      ANewColumn: df => df.getSeries("SourceData").select(aTransformation))
     * });
     * 
     */
    ensureSeries(columnNameOrSpec: string | IColumnGenSpec, series?: ISeries | SeriesSelectorFn): IDataFrame;
    /**
     * Create a new dataframe with just a subset of columns.
     *
     * @param columnNames Array of column names to include in the new dataframe.
     *
     * @return Returns a dataframe with a subset of columns from the original dataframe.
     *
     * @example
     * 
     * const subsetDf = df.subset(["ColumnA", "ColumnB"]);
     * 
*/ subset(columnNames: string[]): IDataFrame; /** * Create a new dataframe with the requested column or columns dropped. * * @param columnOrColumns Specifies the column name (a string) or columns (array of strings) to drop. * * @return Returns a new dataframe with a particular named column or columns removed. * * @example *
     * const modifiedDf = df.dropSeries("SomeColumn");
     * 
* * @example *
     * const modifiedDf = df.dropSeries(["ColumnA", "ColumnB"]);
     * 
*/ dropSeries(columnOrColumns: string | string[]): IDataFrame; /** * Create a new dataframe with columns reordered. * New column names create new columns (with undefined values), omitting existing column names causes those columns to be dropped. * * @param columnNames Specifies the new order for columns. * * @return Returns a new dataframe with columns reordered according to the order of the array of column names that is passed in. * * @example *
     * const reorderedDf = df.reorderSeries(["FirstColumn", "SecondColumn", "etc"]);
     * 
*/ reorderSeries(columnNames: string[]): IDataFrame; /** * Bring the column(s) with specified name(s) to the front of the column order, making it (or them) the first column(s) in the output dataframe. * * @param columnOrColumns Specifies the column or columns to bring to the front. * * @return Returns a new dataframe with 1 or more columns bought to the front of the column ordering. * * @example *
     * const modifiedDf = df.bringToFront("NewFirstColumn");
     * 
* * @example *
     * const modifiedDf = df.bringToFront(["NewFirstColumn", "NewSecondColumn"]);
     * 
*/ bringToFront(columnOrColumns: string | string[]): IDataFrame; /** * Bring the column(s) with specified name(s) to the back of the column order, making it (or them) the last column(s) in the output dataframe. * * @param columnOrColumns Specifies the column or columns to bring to the back. * * @return Returns a new dataframe with 1 or more columns bought to the back of the column ordering. * * @example *
     * const modifiedDf = df.bringToBack("NewLastColumn");
     * 
* * @example *
     * const modifiedDf = df.bringToBack(["NewSecondLastColumn, ""NewLastColumn"]);
     * 
*/ bringToBack(columnOrColumns: string | string[]): IDataFrame; /** * Create a new dataframe with 1 or more columns renamed. * * @param newColumnNames A column rename spec - a JavaScript hash that maps existing column names to new column names. * * @return Returns a new dataframe with specified columns renamed. * * @example *
     *
     * const renamedDf = df.renameSeries({ OldColumnName, NewColumnName });
     * 
* * @example *
     *
     * const renamedDf = df.renameSeries({
     *      Column1: ColumnA,
     *      Column2: ColumnB
     * });
     * 
*/ renameSeries(newColumnNames: IColumnRenameSpec): IDataFrame; /** * Extract rows from the dataframe as an array. * Each element of the array is one row of the dataframe represented as * a JavaScript object with the fields as the dataframe's columns. * This forces lazy evaluation to complete. * * @return Returns an array of the rows contained within the dataframe. * * @example *
    * const values = df.toArray();
    * 
*/ toArray(): ValueT[]; /** * Retreive the index, row pairs from the dataframe as an array. * Each pair is [index, row]. * This forces lazy evaluation to complete. * * @return Returns an array of pairs that contains the dataframe's rows. Each pair is a two element array that contains an index and a row. * * @example *
     * const pairs = df.toPairs();
     * 
*/ toPairs(): ([IndexT, ValueT])[]; /** * Convert the dataframe to a JavaScript object. * * @param keySelector User-defined selector function that selects keys for the resulting object. * @param valueSelector User-defined selector function that selects values for the resulting object. * * @return Returns a JavaScript object generated from the dataframe by applying the key and value selector functions. * * @example *
     *
     * const someObject = df.toObject(
     *      row => row.SomeColumn, // Specify the column to use for field names in the output object.
     *      row => row.SomeOtherColumn // Specify the column to use as the value for each field.
     * );
     * 
*/ toObject(keySelector: (value: ValueT) => KeyT, valueSelector: (value: ValueT) => FieldT): OutT; /** * Bake the data frame to an array of rows were each rows is an array of values in column order. * * @return Returns an array of rows. Each row is an array of values in column order. * * @example *
     * const rows = df.toRows();
     * 
*/ toRows(): any[][]; /** * Transforms an input dataframe, generating a new dataframe. * The transformer function is called for each element of the input and the collection of outputs creates the generated datafarme. * * `select` is an alias for {@link DataFrame.map}. * * This is the same concept as the JavaScript function `Array.map` but maps over a dataframe rather than an array. * * @param transformer A user-defined transformer function that transforms each element from the input to generate the output. * * @return Returns a new dataframe generated by calling the transformer function over each element of the input. * * @example *
     *
     * function transformer (input) {
     *      const output = {
     *          // ... construct output from input ...
     *      };
     *
     *      return output;
     * }
     *
     * const transformed = dataframe.select(transformer);
     * console.log(transformed.toString());
     * 
*/ select(transformer: SelectorWithIndexFn): IDataFrame; /** * Transforms an input dataframe, generating a new dataframe. * The transformer function is called for each element of the input and the collection of outputs creates the generated datafarme. * * This is the same concept as the JavaScript function `Array.map` but maps over a dataframe rather than an array. * * @param transformer A user-defined transformer function that transforms each element from the input to generate the output. * * @return Returns a new dataframe generated by calling the transformer function over each element of the input. * * @example *
     *
     * function transformer (input) {
     *      const output = {
     *          // ... construct output from input ...
     *      };
     *
     *      return output;
     * }
     *
     * const transformed = dataframe.map(transformer);
     * console.log(transformed.toString());
     * 
*/ map(transformer: SelectorWithIndexFn): IDataFrame; /** * Transforms and flattens an input dataframe, generating a new dataframe. * The transformer function is called for each value in the input dataframe and produces an array that is then flattened into the generated dataframe. * * `selectMany` is an alias for {@link DataFrame.flatMap}. * * This is the same concept as the JavaScript function `Array.flatMap` but maps over a dataframe rather than an array. * * @param transformer A user-defined function that transforms each value into an array that is then flattened into the generated dataframe. * * @return Returns a new dataframe generated by calling the transformer function over each element of the input. * * @example *
     *
     * function transformer (input) {
     *      const output = [];
     *      while (someCondition) {
     *          // ... generate zero or more outputs from a single input ...
     *          output.push(... some generated value ...);
     *      }
     *      return output;
     * }
     *
     * const transformed = dataframe.selectMany(transformer);
     * console.log(transformed.toString());
     * 
*/ selectMany(transformer: SelectorWithIndexFn>): IDataFrame; /** * Transforms and flattens an input dataframe, generating a new dataframe. * The transformer function is called for each value in the input dataframe and produces an array that is then flattened into the generated dataframe. * * This is the same concept as the JavaScript function `Array.flatMap` but maps over a dataframe rather than an array. * * @param transformer A user-defined function that transforms each value into an array that is then flattened into the generated dataframe. * * @return Returns a new dataframe generated by calling the transformer function over each element of the input. * * @example *
     *
     * function transformer (input) {
     *      const output = [];
     *      while (someCondition) {
     *          // ... generate zero or more outputs from a single input ...
     *          output.push(... some generated value ...);
     *      }
     *      return output;
     * }
     *
     * const transformed = dataframe.flatMap(transformer);
     * console.log(transformed.toString());
     * 
*/ flatMap(transformer: SelectorWithIndexFn>): IDataFrame; /** * Transform one or more columns. * * This is equivalent to extracting a {@link Series} with {@link getSeries}, then transforming it with {@link Series.select}, * and finally plugging it back in as the same column using {@link withSeries}. * * @param columnSelectors Object with field names for each column to be transformed. Each field specifies a selector function that transforms that column. * * @return Returns a new dataframe with 1 or more columns transformed. * * @example *
     *
     * const modifiedDf = df.transformSeries({
     *      AColumnToTransform: columnValue => transformRow(columnValue)
     * });
     * 
* * @example *
     *
     * const modifiedDf = df.transformSeries({
     *      ColumnA: columnValue => transformColumnA(columnValue),
     *      ColumnB: columnValue => transformColumnB(columnValue)
     * });
     * 
*/ transformSeries(columnSelectors: IColumnTransformSpec): IDataFrame; /** * Generate new columns based on existing rows. * * This is equivalent to calling {@link select} to transform the original dataframe to a new dataframe with different column, * then using {@link withSeries} to merge each the of both the new and original dataframes. * * @param generator Generator function that transforms each row to produce 1 or more new columns. * Or use a column spec that has fields for each column, the fields specify a generate function that produces the value for each new column. * * @return Returns a new dataframe with 1 or more new columns. * * @example *
     *
     * function produceNewColumns (inputRow) {
     *      const newColumns = {
     *          // ... specify new columns and their values based on the input row ...
     *      };
     *
     *      return newColumns;
     * };
     *
     * const dfWithNewSeries = df.generateSeries(row => produceNewColumns(row));
     * 
* * @example *
     *
     * const dfWithNewSeries = df.generateSeries({
     *      NewColumnA: row => produceNewColumnA(row),
     *      NewColumnB: row => produceNewColumnB(row),
     * })
     * 
*/ generateSeries(generator: SelectorWithIndexFn | IColumnTransformSpec): IDataFrame; /** * Converts (deflates) a dataframe to a {@link Series}. * * @param selector Optional user-defined selector function that transforms each row to produce the series. * * @return Returns a series that was created from the original dataframe. * * @example *
     *
     * const series = df.deflate(); // Deflate to a series of object.
     * 
* * @example *
     *
     * const series = df.deflate(row => row.SomeColumn); // Extract a particular column.
     * 
*/ deflate(selector?: SelectorWithIndexFn): ISeries; /** * Inflate a named {@link Series} in the dataframe to 1 or more new series in the new dataframe. * * This is the equivalent of extracting the series using {@link getSeries}, transforming them with {@link Series.select} * and then running {@link Series.inflate} to create a new dataframe, then merging each column of the new dataframe * into the original dataframe using {@link withSeries}. * * @param columnName Name of the series to inflate. * @param selector Optional selector function that transforms each value in the column to new columns. If not specified it is expected that each value in the column is an object whose fields define the new column names. * * @return Returns a new dataframe with a column inflated to 1 or more new columns. * * @example *
     *
     * function newColumnGenerator (row) {
     *      const newColumns = {
     *          // ... create 1 field per new column ...
     *      };
     *
     *      return row;
     * }
     *
     * const dfWithNewSeries = df.inflateSeries("SomeColumn", newColumnGenerator);
     * 
*/ inflateSeries(columnName: string, selector?: SelectorWithIndexFn): IDataFrame; /** * Partition a dataframe into a {@link Series} of *data windows*. * Each value in the new series is a chunk of data from the original dataframe. * * @param period The number of rows to include in each data window. * * @return Returns a new series, each value of which is a chunk (data window) of the original dataframe. * * @example *
     *
     * const windows = df.window(2); // Get rows in pairs.
     * const pctIncrease = windows.select(pair => (pair.last().SalesAmount - pair.first().SalesAmount) / pair.first().SalesAmount);
     * console.log(pctIncrease.toString());
     * 
* * @example *
     *
     * const salesDf = ... // Daily sales data.
     * const weeklySales = salesDf.window(7); // Partition up into weekly data sets.
     * console.log(weeklySales.toString());
     * 
*/ window(period: number): ISeries>; /** * Partition a dataframe into a {@link Series} of *rolling data windows*. * Each value in the new series is a rolling chunk of data from the original dataframe. * * @param period The number of data rows to include in each data window. * * @return Returns a new series, each value of which is a rolling chunk of the original dataframe. * * @example *
     *
     * const salesDf = ... // Daily sales data.
     * const rollingWeeklySales = salesDf.rollingWindow(7); // Get rolling window over weekly sales data.
     * console.log(rollingWeeklySales.toString());
     * 
*/ rollingWindow(period: number): ISeries>; /** * Partition a dataframe into a {@link Series} of variable-length *data windows* * where the divisions between the data chunks are * defined by a user-provided *comparer* function. * * @param comparer Function that compares two adjacent data rows and returns true if they should be in the same window. * * @return Returns a new series, each value of which is a chunk of data from the original dataframe. * * @example *
     *
     * function rowComparer (rowA, rowB) {
     *      if (... rowA should be in the same data window as rowB ...) {
     *          return true;
     *      }
     *      else {
     *          return false;
     *      }
     * };
     *
     * const variableWindows = df.variableWindow(rowComparer);
     */
    variableWindow(comparer: ComparerFn): ISeries>;
    /**
     * Eliminates adjacent duplicate rows.
     *
     * For each group of adjacent values that are equivalent only returns the last index/row for the group,
     * thus adjacent equivalent rows are collapsed down to the last row.
     *
     * @param selector Optional selector function to determine the value used to compare for equivalence.
     *
     * @return Returns a new dataframe with groups of adjacent duplicate rows collapsed to a single row per group.
     *
     * @example
     * 
     *
     * const dfWithDuplicateRowsRemoved = df.sequentialDistinct(row => row.ColumnA);
     * 
*/ sequentialDistinct(selector?: SelectorFn): IDataFrame; /** * Aggregate the rows in the dataframe to a single result. * * `aggregate` is similar to {@link DataFrame.reduce} but the parameters are reversed. * Please use {@link DataFrame.reduce} in preference to `aggregate`. * * @param seed Optional seed value for producing the aggregation. * @param selector Function that takes the seed and then each row in the dataframe and produces the aggregate value. * * @return Returns a new value that has been aggregated from the dataframe using the 'selector' function. * * @example *
     *
     * const dailySalesDf = ... daily sales figures for the past month ...
     * const totalSalesForthisMonth = dailySalesDf.aggregate(
     *      0, // Seed - the starting value.
     *      (accumulator, row) => accumulator + row.SalesAmount // Aggregation function.
     * );
     * 
* * @example *
     *
     * const totalSalesAllTime = 500; // We'll seed the aggregation with this value.
     * const dailySalesDf = ... daily sales figures for the past month ...
     * const updatedTotalSalesAllTime = dailySalesDf.aggregate(
     *      totalSalesAllTime,
     *      (accumulator, row) => accumulator + row.SalesAmount
     * );
     * 
* * @example *
     *
     * var salesDataSummary = salesDataDf.aggregate({
     *      TotalSales: df => df.count(),
     *      AveragePrice: df => df.deflate(row => row.Price).average(),
     *      TotalRevenue: df => df.deflate(row => row.Revenue).sum(),
     * });
     * 
*/ aggregate(seedOrSelector: AggregateFn | ToT | IColumnAggregateSpec, selector?: AggregateFn): ToT; /** * Reduces the values in the dataframe to a single result. * * This is the same concept as the JavaScript function `Array.reduce` but reduces a dataframe rather than an array. * @param reducer Function that takes the seed and then each value in the dataframe and produces the reduced value. * @param seed Optional initial value, if not specifed the first value in the dataframe is used as the initial value. * * @return Returns a value that has been reduced from the input dataframe by passing each element through the reducer function. * * @example *
     *
     * const dailyRecords = ... daily records for the past month ...
     * const totalSales = dailyRecords.reduce(
     *      (accumulator, row) => accumulator + row.salesAmount, // Reducer function.
     *      0  // Seed value, the starting value.
     * );
     * 
* * @example *
     *
     * const previousSales = 500; // We'll seed the reduction with this value.
     * const dailyRecords = ... daily records for the past month ...
     * const updatedSales = dailyRecords.reduce(
     *      (accumulator, row) => accumulator + row.salesAmount,
     *      previousSales
     * );
     * 
*/ reduce(reducer: AggregateFn, seed?: ToT): ToT; /** * Skip a number of rows in the dataframe. * * @param numValues Number of rows to skip. * * @return Returns a new dataframe with the specified number of rows skipped. * * @example *
     *
     * const dfWithRowsSkipped = df.skip(10); // Skip 10 rows in the original dataframe.
     * 
*/ skip(numValues: number): IDataFrame; /** * Skips values in the dataframe while a condition evaluates to true or truthy. * * @param predicate Returns true/truthy to continue to skip rows in the original dataframe. * * @return Returns a new dataframe with all initial sequential rows removed while the predicate returned true/truthy. * * @example *
     *
     * const dfWithRowsSkipped = df.skipWhile(row => row.CustomerName === "Fred"); // Skip initial customers named Fred.
     * 
*/ skipWhile(predicate: PredicateFn): IDataFrame; /** * Skips values in the dataframe untils a condition evaluates to true or truthy. * * @param predicate Return true/truthy to stop skipping rows in the original dataframe. * * @return Returns a new dataframe with all initial sequential rows removed until the predicate returned true/truthy. * * @example *
     *
     * const dfWithRowsSkipped = df.skipUntil(row => row.CustomerName === "Fred"); // Skip initial customers until we find Fred.
     * 
*/ skipUntil(predicate: PredicateFn): IDataFrame; /** * Take a number of rows in the dataframe. * * @param numValues Number of rows to take. * * @return Returns a new dataframe with only the specified number of rows taken from the original dataframe. * * @example *
     *
     * const dfWithRowsTaken = df.take(15); // Take only the first 15 rows from the original dataframe.
     * 
*/ take(numRows: number): IDataFrame; /** * Takes values from the dataframe while a condition evaluates to true or truthy. * * @param predicate Returns true/truthy to continue to take rows from the original dataframe. * * @return Returns a new dataframe with only the initial sequential rows that were taken while the predicate returned true/truthy. * * @example *
     *
     * const dfWithRowsTaken = df.takeWhile(row => row.CustomerName === "Fred"); // Take only initial customers named Fred.
     * 
*/ takeWhile(predicate: PredicateFn): IDataFrame; /** * Takes values from the dataframe untils a condition evaluates to true or truthy. * * @param predicate Return true/truthy to stop taking rows in the original dataframe. * * @return Returns a new dataframe with only the initial sequential rows taken until the predicate returned true/truthy. * * @example *
     *
     * const dfWithRowsTaken = df.takeUntil(row => row.CustomerName === "Fred"); // Take all initial customers until we find Fred.
     * 
*/ takeUntil(predicate: PredicateFn): IDataFrame; /** * Count the number of rows in the dataframe * * @return Returns the count of all rows. * * @example *
     *
     * const numRows = df.count();
     * 
*/ count(): number; /** * Get the first row of the dataframe. * * @return Returns the first row of the dataframe. * * @example *
     *
     * const firstRow = df.first();
     * 
*/ first(): ValueT; /** * Get the last row of the dataframe. * * @return Returns the last row of the dataframe. * * @example *
     *
     * const lastRow = df.last();
     * 
*/ last(): ValueT; /** * Get the row, if there is one, with the specified index. * * @param index Index to for which to retreive the row. * * @return Returns the row from the specified index in the dataframe or undefined if there is no such index in the present in the dataframe. * * @example *
     *
     * const row = df.at(5); // Get the row at index 5 (with a default 0-based index).
     * 
* * @example *
     *
     * const date = ... some date ...
     * // Retreive the row with specified date from a time-series dataframe (assuming date indexed has been applied).
     * const row = df.at(date);
     * 
*/ at(index: IndexT): ValueT | undefined; /** * Get X rows from the start of the dataframe. * Pass in a negative value to get all rows at the head except for X rows at the tail. * * @param numValues Number of rows to take. * * @return Returns a new dataframe that has only the specified number of rows taken from the start of the original dataframe. * * @examples *
     *
     * const sample = df.head(10); // Take a sample of 10 rows from the start of the dataframe.
     * 
*/ head(numValues: number): IDataFrame; /** * Get X rows from the end of the dataframe. * Pass in a negative value to get all rows at the tail except X rows at the head. * * @param numValues Number of rows to take. * * @return Returns a new dataframe that has only the specified number of rows taken from the end of the original dataframe. * * @examples *
     *
     * const sample = df.tail(12); // Take a sample of 12 rows from the end of the dataframe.
     * 
*/ tail(numValues: number): IDataFrame; /** * Filter the dataframe through a user-defined predicate function. * * `where` is an alias for {@link DataFrame.filter}. * * This is the same concept as the JavaScript function `Array.filter` but filters a dataframe rather than an array. * * @param predicate Predicate function to filter values from the dataframe. Returns true/truthy to keep elements, or false/falsy to omit elements. * * @return Returns a new dataframe containing only the values from the original dataframe that matched the predicate. * * @example *
     *
     * // Filter so we only have sales figures greater than 100.
     * const filtered = dataframe.where(row => row.salesFigure > 100);
     * console.log(filtered.toArray());
     * 
*/ where(predicate: PredicateFn): IDataFrame; /** * Filter the dataframe through a user-defined predicate function. * * This is the same concept as the JavaScript function `Array.filter` but filters a dataframe rather than an array. * * @param predicate Predicate function to filter values from the dataframe. Returns true/truthy to keep elements, or false/falsy to omit elements. * * @return Returns a new dataframe containing only the values from the original dataframe that matched the predicate. * * @example *
     *
     * // Filter so we only have sales figures greater than 100.
     * const filtered = dataframe.filter(row => row.salesFigure > 100);
     * console.log(filtered.toArray());
     * 
*/ filter(predicate: PredicateFn): IDataFrame; /** * Invoke a callback function for each row in the dataframe. * * @param callback The calback function to invoke for each row. * * @return Returns the original dataframe with no modifications. * * @example *
     *
     * df.forEach(row => {
     *      // ... do something with the row ...
     * });
     * 
*/ forEach(callback: CallbackFn): IDataFrame; /** * Evaluates a predicate function for every row in the dataframe to determine * if some condition is true/truthy for **all** rows in the dataframe. * * @param predicate Predicate function that receives each row. It should returns true/truthy for a match, otherwise false/falsy. * * @return Returns true if the predicate has returned true or truthy for every row in the dataframe, otherwise returns false. Returns false for an empty dataframe. * * @example *
     *
     * const everyoneIsNamedFred = df.all(row => row.CustomerName === "Fred"); // Check if all customers are named Fred.
     * 
*/ all(predicate: PredicateFn): boolean; /** * Evaluates a predicate function for every row in the dataframe to determine * if some condition is true/truthy for **any** of rows in the dataframe. * * If no predicate is specified then it simply checks if the dataframe contains more than zero rows. * * @param predicate Optional predicate function that receives each row. It should return true/truthy for a match, otherwise false/falsy. * * @return Returns true if the predicate has returned truthy for any row in the sequence, otherwise returns false. * If no predicate is passed it returns true if the dataframe contains any rows at all. * Returns false for an empty dataframe. * * @example *
     *
     * const anyFreds = df.any(row => row.CustomerName === "Fred"); // Do we have any customers named Fred?
     * 
* * @example *
     *
     * const anyCustomers = df.any(); // Do we have any customers at all?
     * 
*/ any(predicate?: PredicateFn): boolean; /** * Evaluates a predicate function for every row in the dataframe to determine * if some condition is true/truthy for **none** of rows in the dataframe. * * If no predicate is specified then it simply checks if the dataframe contains zero rows. * * @param predicate Optional predicate function that receives each row. It should return true/truthy for a match, otherwise false/falsy. * * @return Returns true if the predicate has returned truthy for zero rows in the dataframe, otherwise returns false. Returns false for an empty dataframe. * * @example *
     *
     * const noFreds = df.none(row => row.CustomerName === "Fred"); // Do we have zero customers named Fred?
     * 
* * @example *
     *
     * const noCustomers = df.none(); // Do we have zero customers?
     * 
*/ none(predicate?: PredicateFn): boolean; /** * Gets a new dataframe containing all rows starting at and after the specified index value. * * @param indexValue The index value at which to start the new dataframe. * * @return Returns a new dataframe containing all rows starting at and after the specified index value. * * @example *
 *
 * const df = new DataFrame({
 *      index: [0, 1, 2, 3], // This is the default index.
 *      values: [10, 20, 30, 40],
 * });
 *
 * const lastHalf = df.startAt(2);
 * expect(lastHalf.toArray()).to.eql([30, 40]);
 * 
* * @example *
 *
 * const timeSeriesDf = ... a dataframe indexed by date/time ...
 *
 * // Get all rows starting at (or after) a particular date.
 * const allRowsFromStartDate = df.startAt(new Date(2016, 5, 4));
 * 
*/ startAt(indexValue: IndexT): IDataFrame; /** * Gets a new dataframe containing all rows up until and including the specified index value (inclusive). * * @param indexValue The index value at which to end the new dataframe. * * @return Returns a new dataframe containing all rows up until and including the specified index value. * * @example *
     *
     * const df = new DataFrame({
     *      index: [0, 1, 2, 3], // This is the default index.
     *      values: [10, 20, 30, 40],
     * });
     *
     * const firstHalf = df.endAt(1);
     * expect(firstHalf.toArray()).to.eql([10, 20]);
     * 
* * @example *
     *
     * const timeSeriesDf = ... a dataframe indexed by date/time ...
     *
     * // Get all rows ending at a particular date.
     * const allRowsUpToAndIncludingTheExactEndDate = df.endAt(new Date(2016, 5, 4));
     * 
*/ endAt(indexValue: IndexT): IDataFrame; /** * Gets a new dataframe containing all rows up to the specified index value (exclusive). * * @param indexValue The index value at which to end the new dataframe. * * @return Returns a new dataframe containing all rows up to (but not including) the specified index value. * * @example *
     *
     * const df = new DataFrame({
     *      index: [0, 1, 2, 3], // This is the default index.
     *      values: [10, 20, 30, 40],
     * });
     *
     * const firstHalf = df.before(2);
     * expect(firstHalf.toArray()).to.eql([10, 20]);
     * 
* * @example *
     *
     * const timeSeriesDf = ... a dataframe indexed by date/time ...
     *
     * // Get all rows before the specified date.
     * const allRowsBeforeEndDate = df.before(new Date(2016, 5, 4));
     * 
*/ before(indexValue: IndexT): IDataFrame; /** * Gets a new dataframe containing all rows after the specified index value (exclusive). * * @param indexValue The index value after which to start the new dataframe. * * @return Returns a new dataframe containing all rows after the specified index value. * * @example *
     *
     * const df = new DataFrame({
     *      index: [0, 1, 2, 3], // This is the default index.
     *      values: [10, 20, 30, 40],
     * });
     *
     * const lastHalf = df.before(1);
     * expect(lastHalf.toArray()).to.eql([30, 40]);
     * 
* * @example *
     *
     * const timeSeriesDf = ... a dataframe indexed by date/time ...
     *
     * // Get all rows after the specified date.
     * const allRowsAfterStartDate = df.after(new Date(2016, 5, 4));
     * 
*/ after(indexValue: IndexT): IDataFrame; /** * Gets a new dataframe containing all rows between the specified index values (inclusive). * * @param startIndexValue The index at which to start the new dataframe. * @param endIndexValue The index at which to end the new dataframe. * * @return Returns a new dataframe containing all values between the specified index values (inclusive). * * @example *
     *
     * const df = new DataFrame({
     *      index: [0, 1, 2, 3, 4, 6], // This is the default index.
     *      values: [10, 20, 30, 40, 50, 60],
     * });
     *
     * const middleSection = df.between(1, 4);
     * expect(middleSection.toArray()).to.eql([20, 30, 40, 50]);
     * 
* * @example *
     *
     * const timeSeriesDf = ... a dataframe indexed by date/time ...
     *
     * // Get all rows between the start and end dates (inclusive).
     * const allRowsBetweenDates = df.after(new Date(2016, 5, 4), new Date(2016, 5, 22));
     * 
*/ between(startIndexValue: IndexT, endIndexValue: IndexT): IDataFrame; /** * Format the dataframe for display as a string. * This forces lazy evaluation to complete. * * @return Generates and returns a string representation of the dataframe or dataframe. * * @example *
     *
     * console.log(df.toString());
     * 
*/ toString(): string; /** * Parse a column with string values and convert it to a column with int values. * * @param columnNameOrNames Specifies the column name or array of column names to parse. * * @return Returns a new dataframe with a particular named column parsed as ints. * * @example *
     *
     * const withParsedColumn = df.parseInts("MyIntColumn");
     * 
* * @example *
     *
     * const withParsedColumns = df.parseInts(["MyIntColumnA", "MyIntColumnA"]);
     * 
*/ parseInts(columnNameOrNames: string | string[]): IDataFrame; /** * Parse a column with string values and convert it to a column with float values. * * @param columnNameOrNames Specifies the column name or array of column names to parse. * * @return Returns a new dataframe with a particular named column parsed as floats. * * @example *
     *
     * const withParsedColumn = df.parseFloats("MyFloatColumn");
     * 
* * @example *
     *
     * const withParsedColumns = df.parseFloats(["MyFloatColumnA", "MyFloatColumnA"]);
     * 
*/ parseFloats(columnNameOrNames: string | string[]): IDataFrame; /** * Parse a column with string values and convert it to a column with date values. * * @param columnNameOrNames -Specifies the column name or array of column names to parse. * @param formatString Optional formatting string for dates. * * @return Returns a new dataframe with a particular named column parsed as dates. * * @example *
     *
     * const withParsedColumn = df.parseDates("MyDateColumn");
     * 
* * @example *
     *
     * const withParsedColumns = df.parseDates(["MyDateColumnA", "MyDateColumnA"]);
     * 
*/ parseDates(columnNameOrNames: string | string[], formatString?: string): IDataFrame; /** * Convert a column of values of different types to a column of string values. * * @param columnNames Specifies the column name or array of column names to convert to strings. Can also be a format spec that specifies which columns to convert and what their format should be. * @param formatString Optional formatting string for dates. * * Numeral.js is used for number formatting. * http://numeraljs.com/ * * Moment is used for date formatting. * https://momentjs.com/docs/#/parsing/string-format/ * * @return Returns a new dataframe with a particular named column convert to strings. * * @example *
     *
     * const withStringColumn = df.toStrings("MyDateColumn", "YYYY-MM-DD");
     * 
* * @example *
     *
     * const withStringColumn = df.toStrings("MyFloatColumn", "0.00");
     * 
*/ toStrings(columnNames: string | string[] | IFormatSpec, formatString?: string): IDataFrame; /** * Produces a new dataframe with all string values truncated to the requested maximum length. * * @param maxLength The maximum length of the string values after truncation. * * @return Returns a new dataframe with all strings truncated to the specified maximum length. * * @example *
     *
     * // Truncate all string columns to 100 characters maximum.
     * const truncatedDf = df.truncateString(100);
     * 
*/ truncateStrings(maxLength: number): IDataFrame; /** * Produces a new dataframe with all number values rounded to the specified number of places. * * @param numDecimalPlaces The number of decimal places, defaults to 2. * * @returns Returns a new dataframe with all number values rounded to the specified number of places. * * @example *
     *
     * const df = ... your data frame ...
     * const rounded = df.round(); // Round numbers to two decimal places.
     * 
* * @example *
     *
     * const df = ... your data frame ...
     * const rounded = df.round(3); // Round numbers to three decimal places.
     * 
*/ round(numDecimalPlaces?: number): IDataFrame; /** * Forces lazy evaluation to complete and 'bakes' the dataframe into memory. * * @return Returns a dataframe that has been 'baked', all lazy evaluation has completed. * * @example *
     *
     * const bakedDf = df.bake();
     * 
*/ bake(): IDataFrame; /** * Gets a new dataframe in reverse order. * * @return Returns a new dataframe that is the reverse of the original. * * @example *
     *
     * const reversed = df.reverse();
     * 
*/ reverse(): IDataFrame; /** * Returns only the set of rows in the dataframe that are distinct according to some criteria. * This can be used to remove duplicate rows from the dataframe. * * @param selector User-defined selector function that specifies the criteria used to make comparisons for duplicate rows. * Note that the selector determines the object used for the comparison. If the selector returns a new instance of an array or a * javascript object, distinct will always include all rows since the object instances are different even if the members are the same. * * @return Returns a dataframe containing only unique values as determined by the 'selector' function. * * @example *
     *
     * // Remove duplicate rows by customer id. Will return only a single row per customer.
     * const distinctCustomers = salesDf.distinct(sale => sale.CustomerId);
     * 
* * @example * *
     * // Remove duplicate rows across mutliple columns
     * const safeJoinChar = '$';
     * const distinctCustomers = salesDf.distinct(sale => [sale.CustomerId, sale.MonthOfYear].join(safeJoinChar));
     * 
*/ distinct(selector?: SelectorFn): IDataFrame; /** * Collects rows in the dataframe into a {@link Series} of groups according to a user-defined selector function. * * @param selector User-defined selector function that specifies the criteria to group by. * * @return Returns a {@link Series} of groups. Each group is a dataframe with rows that have been grouped by the 'selector' function. * * @example *
     *
     * const salesDf = ... product sales ...
     * const salesByProduct = salesDf.groupBy(sale => sale.ProductId);
     * for (const productSalesGroup of salesByProduct) {
     *      // ... do something with each product group ...
     *      const productId = productSalesGroup.first().ProductId;
     *      const totalSalesForProduct = productSalesGroup.deflate(sale => sale.Amount).sum();
     *      console.log(totalSalesForProduct);
     * }
     * 
*/ groupBy(selector: SelectorWithIndexFn): ISeries>; /** * Collects values in the series into a new series of groups based on if the values are the same or according to a user-defined selector function. * * @param selector Optional selector that specifies the criteria for grouping. * * @return Returns a {@link Series} of groups. Each group is a dataframe with rows that are the same or have been grouped by the 'selector' function. * * @example *
     *
     * // Some ultra simple stock trading strategy backtesting...
     * const dailyStockPriceDf = ... daily stock price for a company ...
     * const priceGroups  = dailyStockPriceDf.groupBy(day => day.close > day.movingAverage);
     * for (const priceGroup of priceGroups) {
     *      // ... do something with each stock price group ...
     *
     *      const firstDay = priceGroup.first();
     *      if (firstDay.close > movingAverage) {
     *          // This group of days has the stock price above its moving average.
     *          // ... maybe enter a long trade here ...
     *      }
     *      else {
     *          // This group of days has the stock price below its moving average.
     *          // ... maybe enter a short trade here ...
     *      }
     * }
     * 
*/ groupSequentialBy(selector?: SelectorFn): ISeries>; /** * Concatenate multiple other dataframes onto this dataframe. * * @param dataframes Multiple arguments. Each can be either a dataframe or an array of dataframes. * * @return Returns a single dataframe concatenated from multiple input dataframes. * * @example *
     *
     * const concatenated = a.concat(b);
     * 
* * @example *
     *
     * const concatenated = a.concat(b, c);
     * 
* * @example *
     *
     * const concatenated = a.concat([b, c]);
     * 
* * @example *
     *
     * const concatenated = a.concat(b, [c, d]);
     * 
* * @example *
     *
     * const otherDfs = [... array of dataframes...];
     * const concatenated = a.concat(otherDfs);
     * 
*/ concat(...dataframes: (IDataFrame[] | IDataFrame)[]): IDataFrame; /** * Merge together multiple dataframes to create a new dataframe. * Preserves the index of the first dataframe. * * @param s2, s3, s4, s4 Multiple dataframes to zip. * @param zipper User-defined zipper function that merges rows. It produces rows for the new dataframe based-on rows from the input dataframes. * * @return Returns a single dataframe merged from multiple input dataframes. * * @example *
    *
    * function produceNewRow (rowA, rowB) {
    *       const outputRow = {
    *           ValueA: rowA.Value,
    *           ValueB: rowB.Value,
    *       };
    *       return outputRow;
    * }
    *
    * const dfA = new DataFrame([ { Value: 10 }, { Value: 20 }, { Value: 30 }]);
    * const dfB = new DataFrame([ { Value: 100 }, { Value: 200 }, { Value: 300 }]);
    * const zippedDf = dfA.zip(dfB, produceNewRow);
    * 
*/ zip(s2: IDataFrame, zipper: Zip2Fn): IDataFrame; zip(s2: IDataFrame, s3: IDataFrame, zipper: Zip3Fn): IDataFrame; zip(s2: IDataFrame, s3: IDataFrame, s4: IDataFrame, zipper: Zip3Fn): IDataFrame; zip(...args: any[]): IDataFrame; /** * Sorts the dataframe in ascending order by a value defined by the user-defined selector function. * * @param selector User-defined selector function that selects the value to sort by. * * @return Returns a new dataframe that has been ordered accorrding to the value chosen by the selector function. * * @example *
     *
     * // Order sales by amount from least to most.
     * const orderedDf = salesDf.orderBy(sale => sale.Amount);
     * 
*/ orderBy(selector: SelectorWithIndexFn): IOrderedDataFrame; /** * Sorts the dataframe in descending order by a value defined by the user-defined selector function. * * @param selector User-defined selector function that selects the value to sort by. * * @return Returns a new dataframe that has been ordered accorrding to the value chosen by the selector function. * * @example *
     *
     * // Order sales by amount from most to least
     * const orderedDf = salesDf.orderByDescending(sale => sale.Amount);
     * 
*/ orderByDescending(selector: SelectorWithIndexFn): IOrderedDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains the union of rows from the two input dataframes. * These are the unique combination of rows in both dataframe. * This is basically a concatenation and then elimination of duplicates. * * @param other The other dataframes to merge. * @param selector Optional user-defined selector function that selects the value to compare to determine distinctness. * * @return Returns the union of the two dataframes. * * @example *
     *
     * const dfA = ...
     * const dfB = ...
     * const merged = dfA.union(dfB);
     * 
* * @example *
     *
     * // Merge two sets of customer records that may contain the same
     * // customer record in each set. This is basically a concatenation
     * // of the dataframes and then an elimination of any duplicate records
     * // that result.
     * const customerRecordsA = ...
     * const customerRecordsB = ...
     * const mergedCustomerRecords = customerRecordsA.union(
     *      customerRecordsB,
     *      customerRecord => customerRecord.CustomerId
     * );
     * 
* * * @example *
     *
     * // Note that you can achieve the exact same result as the previous
     * // example by doing a {@link DataFrame.concat) and {@link DataFrame.distinct}
     * // of the dataframes and then an elimination of any duplicate records
     * // that result.
     * const customerRecordsA = ...
     * const customerRecordsB = ...
     * const mergedCustomerRecords = customerRecordsA
     *      .concat(customerRecordsB)
     *      .distinct(customerRecord => customerRecord.CustomerId);
     * 
* */ union(other: IDataFrame, selector?: SelectorFn): IDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains the intersection of rows from the two input dataframes. * These are only the rows that appear in both dataframes. * * @param inner The inner dataframe to merge (the dataframe you call the function on is the 'outer' dataframe). * @param outerSelector Optional user-defined selector function that selects the key from the outer dataframe that is used to match the two dataframes. * @param innerSelector Optional user-defined selector function that selects the key from the inner dataframe that is used to match the two dataframes. * * @return Returns a new dataframe that contains the intersection of rows from the two input dataframes. * * @example *
     *
     * const dfA = ...
     * const dfB = ...
     * const mergedDf = dfA.intersection(dfB);
     * 
* * @example *
     *
     * // Merge two sets of customer records to find only the
     * // customers that appears in both.
     * const customerRecordsA = ...
     * const customerRecordsB = ...
     * const intersectionOfCustomerRecords = customerRecordsA.intersection(
     *      customerRecordsB,
     *      customerRecord => customerRecord.CustomerId
     * );
     * 
* */ intersection(inner: IDataFrame, outerSelector?: SelectorFn, innerSelector?: SelectorFn): IDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains only the rows from the 1st dataframe that don't appear in the 2nd dataframe. * This is essentially subtracting the rows from the 2nd dataframe from the 1st and creating a new dataframe with the remaining rows. * * @param inner The inner dataframe to merge (the dataframe you call the function on is the 'outer' dataframe). * @param outerSelector Optional user-defined selector function that selects the key from the outer dataframe that is used to match the two dataframes. * @param innerSelector Optional user-defined selector function that selects the key from the inner dataframe that is used to match the two dataframes. * * @return Returns a new dataframe that contains only the rows from the 1st dataframe that don't appear in the 2nd dataframe. * * @example *
     *
     * const dfA = ...
     * const dfB = ...
     * const remainingDf = dfA.except(dfB);
     * 
* * @example *
     *
     * // Find the list of customers haven't bought anything recently.
     * const allCustomers = ... list of all customers ...
     * const recentCustomers = ... list of customers who have purchased recently ...
     * const remainingCustomers = allCustomers.except(
     *      recentCustomers,
     *      customerRecord => customerRecord.CustomerId
     * );
     * 
*/ except(inner: IDataFrame, outerSelector?: SelectorFn, innerSelector?: SelectorFn): IDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains only those rows that have matching keys in both input dataframes. * * @param inner The 'inner' dataframe to join (the dataframe you are callling the function on is the 'outer' dataframe). * @param outerKeySelector User-defined selector function that chooses the join key from the outer dataframe. * @param innerKeySelector User-defined selector function that chooses the join key from the inner dataframe. * @param resultSelector User-defined function that merges outer and inner values. * * @return Returns the new merged dataframe. * * @example *
      *
      * // Join together two sets of customers to find those
      * // that have bought both product A and product B.
      * const customerWhoBoughtProductA = ...
      * const customerWhoBoughtProductB = ...
      * const customersWhoBoughtBothProductsDf = customerWhoBoughtProductA.join(
      *          customerWhoBoughtProductB,
      *          customerA => customerA.CustomerId, // Join key.
      *          customerB => customerB.CustomerId, // Join key.
      *          (customerA, customerB) => {
      *              return {
      *                  // ... merge the results ...
      *              };
      *          }
      *      );
      * 
*/ join(inner: IDataFrame, outerKeySelector: SelectorFn, innerKeySelector: SelectorFn, resultSelector: JoinFn): IDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains only those rows that are only present in one or the other of the dataframes, or both. * * @param inner The 'inner' dataframe to join (the dataframe you are callling the function on is the 'outer' dataframe). * @param outerKeySelector User-defined selector function that chooses the join key from the outer dataframe. * @param innerKeySelector User-defined selector function that chooses the join key from the inner dataframe. * @param resultSelector User-defined function that merges outer and inner values. * * Implementation from here: * * http://blogs.geniuscode.net/RyanDHatch/?p=116 * * @return Returns the new merged dataframe. * * @example *
     *
     * // Join together two sets of customers to find those
     * // that have bought either product A or product B, or both.
     * const customerWhoBoughtProductA = ...
     * const customerWhoBoughtProductB = ...
     * const customersWhoBoughtEitherProductButNotBothDf = customerWhoBoughtProductA.joinOuter(
     *          customerWhoBoughtProductB,
     *          customerA => customerA.CustomerId, // Join key.
     *          customerB => customerB.CustomerId, // Join key.
     *          (customerA, customerB) => {
     *              return {
     *                  // ... merge the results ...
     *              };
     *          }
     *      );
     * 
*/ joinOuter(inner: IDataFrame, outerKeySelector: SelectorFn, innerKeySelector: SelectorFn, resultSelector: JoinFn): IDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains only those rows that are present either in both dataframes or only in the outer (left) dataframe. * * @param inner The 'inner' dataframe to join (the dataframe you are callling the function on is the 'outer' dataframe). * @param outerKeySelector User-defined selector function that chooses the join key from the outer dataframe. * @param innerKeySelector User-defined selector function that chooses the join key from the inner dataframe. * @param resultSelector User-defined function that merges outer and inner values. * * Implementation from here: * * http://blogs.geniuscode.net/RyanDHatch/?p=116 * * @return Returns the new merged dataframe. * * @example *
     *
     * // Join together two sets of customers to find those
     * // that have bought either just product A or both product A and product B.
     * const customerWhoBoughtProductA = ...
     * const customerWhoBoughtProductB = ...
     * const boughtJustAorAandB = customerWhoBoughtProductA.joinOuterLeft(
     *          customerWhoBoughtProductB,
     *          customerA => customerA.CustomerId, // Join key.
     *          customerB => customerB.CustomerId, // Join key.
     *          (customerA, customerB) => {
     *              return {
     *                  // ... merge the results ...
     *              };
     *          }
     *      );
     * 
*/ joinOuterLeft(inner: IDataFrame, outerKeySelector: SelectorFn, innerKeySelector: SelectorFn, resultSelector: JoinFn): IDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains only those rows that are present either in both dataframes or only in the inner (right) dataframe. * * @param inner The 'inner' dataframe to join (the dataframe you are callling the function on is the 'outer' dataframe). * @param outerKeySelector User-defined selector function that chooses the join key from the outer dataframe. * @param innerKeySelector User-defined selector function that chooses the join key from the inner dataframe. * @param resultSelector User-defined function that merges outer and inner values. * * Implementation from here: * * http://blogs.geniuscode.net/RyanDHatch/?p=116 * * @return Returns the new merged dataframe. * * @example *
     *
     * // Join together two sets of customers to find those
     * // that have bought either just product B or both product A and product B.
     * const customerWhoBoughtProductA = ...
     * const customerWhoBoughtProductB = ...
     * const boughtJustAorAandB = customerWhoBoughtProductA.joinOuterRight(
     *          customerWhoBoughtProductB,
     *          customerA => customerA.CustomerId, // Join key.
     *          customerB => customerB.CustomerId, // Join key.
     *          (customerA, customerB) => {
     *              return {
     *                  // ... merge the results ...
     *              };
     *          }
     *      );
     * 
*/ joinOuterRight(inner: IDataFrame, outerKeySelector: SelectorFn, innerKeySelector: SelectorFn, resultSelector: JoinFn): IDataFrame; /** * Produces a summary of dataframe. * * @param spec Optional parameter that specifies which columns to aggregate and how to aggregate them. Leave this out to produce a default summary of all columns. * * @returns A object with fields that summary the values in the dataframe. * * @example *
     *
     * const summary = df.summarize();
     * console.log(summary);
     * 
* * @example *
     *
     * const summary = df.summarize({ // Summarize using pre-defined functions.
     *      Column1: Series.sum,
     *      Column2: Series.average,
     *      Column3: Series.count,
     * });
     * console.log(summary);
     * 
* * @example *
     *
     * const summary = df.summarize({ // Summarize using custom functions.
     *      Column1: series => series.sum(),
     *      Column2: series => series.std(),
     *      ColumnN: whateverFunctionYouWant,
     * });
     * console.log(summary);
     * 
* * @example *
     *
     * const summary = df.summarize({ // Multiple output fields per column.
     *      Column1: {
     *          OutputField1: Series.sum,
     *          OutputField2: Series.average,
     *      },
     *      Column2: {
     *          OutputField3: series => series.sum(),
     *          OutputFieldN: whateverFunctionYouWant,
     *      },
     * });
     * console.log(summary);
     * 
*/ summarize(spec?: IMultiColumnAggregatorSpec): OutputValueT; /** * Reshape (or pivot) a dataframe based on column values. * This is a powerful function that combines grouping, aggregation and sorting. * * @param columnOrColumns Column name whose values make the new DataFrame's columns. * @param valueColumnNameOrSpec Column name or column spec that defines the columns whose values should be aggregated. * @param aggregator Optional function used to aggregate pivotted vales. * * @return Returns a new dataframe that has been pivoted based on a particular column's values. * * @example *
     *
     * // Simplest example.
     * // Group by the values in 'PivotColumn'.
     * // The column 'ValueColumn' is aggregated for each group and this becomes the
     * // values in the output column.
     * const pivottedDf = df.pivot("PivotColumn", "ValueColumn", values => values.average());
     * 
* * @example *
     *
     * // Multiple input column example.
     * // Similar to the previous example except now we are aggregating multiple input columns.
     * // Each group has the average computed for 'ValueColumnA' and the sum for 'ValueColumnB'.
     * const pivottedDf = df.pivot("PivotColumn", {
     *      ValueColumnA: aValues => aValues.average(),
     *      ValueColumnB:  bValues => bValues.sum(),
     * });
     * 
* * @example *
     *
     * // Multiple output column example.
     * // Similar to the previous example except now we are aggregating multiple outputs for each input column.
     * // This example produces an output dataframe with columns OutputColumnA, B, C and D.
     * // OutputColumnA/B are the sum and average of ValueColumnA across each group as defined by PivotColumn.
     * // OutputColumnC/D are the sum and average of ValueColumnB across each group as defined by PivotColumn.
     * const pivottedDf = df.pivot("PivotColumn", {
     *      ValueColumnA: {
     *          OutputColumnA: aValues => aValues.sum(),
     *          OutputColumnB: aValues => aValues.average(),
     *      },
     *      ValueColumnB: {
     *          OutputColumnC: bValues => bValues.sum(),
     *          OutputColumnD: bValues => bValues.average(),
     *      },
     * });
     * 
* * @example *
     *
     * // Full multi-column example.
     * // Similar to the previous example, but now we are pivotting on multiple columns.
     * // We now group by 'PivotColumnA' and then by 'PivotColumnB', effectively creating a
     * // multi-level nested group.
     * const pivottedDf = df.pivot(["PivotColumnA", "PivotColumnB" ], {
     *      ValueColumnA: aValues => aValues.average(),
     *      ValueColumnB:  bValues => bValues.sum(),
     * });
     * 
* * @example *
     *
     * // To help understand the pivot function, let's expand it out and look at what it does internally.
     * // Take the simplest example:
     * const pivottedDf = df.pivot("PivotColumn", "ValueColumn", values => values.average());
     *
     * // If we expand out the internals of the pivot function, it will look something like this:
     * const pivottedDf = df.groupBy(row => row.PivotColumn)
     *          .select(group => ({
     *              PivotColumn: group.first().PivotColumn,
     *              ValueColumn: group.deflate(row => row.ValueColumn).average()
     *          }))
     *          .orderBy(row  => row.PivotColumn);
     *
     * // You can see that pivoting a dataframe is the same as grouping, aggregating and sorting it.
     * // Does pivoting seem simpler now?
     *
     * // It gets more complicated than that of course, because the pivot function supports multi-level nested
     * // grouping and aggregation of multiple columns. So a full expansion of the pivot function is rather complex.
     * 
*/ pivot(columnOrColumns: string | Iterable, valueColumnNameOrSpec: string | IMultiColumnAggregatorSpec, aggregator?: (values: ISeries) => any): IDataFrame; /** * Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. * This is a powerful function that combines grouping, aggregation and sorting. * * @param idColumnOrColumns Column(s) to use as identifier variables. * @param valueColumnOrColumns Column(s) to unpivot. * * @return Returns a new dataframe that has been unpivoted based on a particular column's values. * * @example *
     *
     * // Use column in 'idColumnOrColumns' as the identity column.
     * // The column name passed in 'valueColumnOrColumns' forms the 'variable' column
     * // and the values are used to populate the 'value' column of the new dataframe.
     * const moltenDf = df.melt("A", "B");
     * 
* * @example *
     *
     * // Multiple value columns example.
     * // Similar to the previous example except now the variable column will constitute
     * // of multiple values.
     * const moltenDf = df.melt("A", ["B", "C"]);
     * 
* * @example *
     *
     * // Multiple identity and value columns example.
     * const moltenDf = df.melt(["A", "B"], ["C", "D"]);
     * 
*/ melt(idColumnOrColumns: string | Iterable, valueColumnOrColumns: string | Iterable): IDataFrame; /** * Insert a pair at the start of the dataframe. * Doesn't modify the original dataframe! The returned dataframe is entirely new and contains rows from the original dataframe plus the inserted pair. * * @param pair The index/value pair to insert. * * @return Returns a new dataframe with the specified pair inserted. * * @example *
     *
     * const newIndex = ... index of the new row ...
     * const newRow = ... the new data row to insert ...
     * const insertedDf = df.insertPair([newIndex, newRows]);
     * 
*/ insertPair(pair: [IndexT, ValueT]): IDataFrame; /** * Append a pair to the end of a dataframe. * Doesn't modify the original dataframe! The returned dataframe is entirely new and contains rows from the original dataframe plus the appended pair. * * @param pair The index/value pair to append. * * @return Returns a new dataframe with the specified pair appended. * * @example *
     *
     * const newIndex = ... index of the new row ...
     * const newRow = ... the new data row to append ...
     * const appendedDf = df.appendPair([newIndex, newRows]);
     * 
*/ appendPair(pair: [IndexT, ValueT]): IDataFrame; /** * Removes rows from the dataframe by index. */ remove(index: IndexT): IDataFrame; /** * Fill gaps in a dataframe. * * @param comparer User-defined comparer function that is passed pairA and pairB, two consecutive rows, return truthy if there is a gap between the rows, or falsey if there is no gap. * @param generator User-defined generator function that is passed pairA and pairB, two consecutive rows, returns an array of pairs that fills the gap between the rows. * * @return Returns a new dataframe with gaps filled in. * * @example *
     *
     *   var sequenceWithGaps = ...
     *
     *  // Predicate that determines if there is a gap.
     *  var gapExists = (pairA, pairB) => {
     *      // Returns true if there is a gap.
     *      return true;
     *  };
     *
     *  // Generator function that produces new rows to fill the game.
     *  var gapFiller = (pairA, pairB) => {
     *      // Create an array of index, value pairs that fill the gaps between pairA and pairB.
     *      return [
     *          newPair1,
     *          newPair2,
     *          newPair3,
     *      ];
     *  };
     *
     *  var sequenceWithoutGaps = sequenceWithGaps.fillGaps(gapExists, gapFiller);
     * 
*/ fillGaps(comparer: ComparerFn<[IndexT, ValueT], [IndexT, ValueT]>, generator: GapFillFn<[IndexT, ValueT], [IndexT, ValueT]>): IDataFrame; /** * Returns the specified default dataframe if the input dataframe is empty. * * @param defaultSequence Default dataframe to return if the input dataframe is empty. * * @return Returns 'defaultSequence' if the input dataframe is empty. * * @example *
     *
     * const emptyDataFrame = new DataFrame();
     * const defaultDataFrame = new DataFrame([ { A: 1 }, { A: 2 }, { A: 3 } ]);
     * expect(emptyDataFrame.defaultIfEmpty(defaultDataFrame)).to.eql(defaultDataFrame);
     * 
* * @example *
     *
     * const nonEmptyDataFrame = new DataFrame([ { A: 100 }]);
     * const defaultDataFrame = new DataFrame([ { A: 1 }, { A: 2 }, { A: 3 } ]);
     * expect(nonEmptyDataFrame.defaultIfEmpty(defaultDataFrame)).to.eql(nonEmptyDataFrame);
     * 
*/ defaultIfEmpty(defaultSequence: ValueT[] | IDataFrame): IDataFrame; /** * Detect the the frequency of the types of the values in the dataframe. * This is a good way to understand the shape of your data. * * @return Returns a dataframe with rows that confirm to {@link ITypeFrequency} that describes the data types contained in the original dataframe. * * @example *
     *
     * const df = dataForge.readFileSync("./my-data.json").parseJSON();
     * const dataTypes = df.detectTypes();
     * console.log(dataTypes.toString());
     * 
*/ detectTypes(): IDataFrame; /** * Detect the frequency of the values in the dataframe. * This is a good way to understand the shape of your data. * * @return Returns a dataframe with rows that conform to {@link IValueFrequency} that describes the values contained in the original dataframe. * * @example *
     *
     * const df = dataForge.readFileSync("./my-data.json").parseJSON();
     * const dataValues = df.detectValues();
     * console.log(dataValues.toString());
     * 
*/ detectValues(): IDataFrame; /** * Serialize the dataframe to the JSON data format. * * @return Returns a string in the JSON data format that represents the dataframe. * * @example *
     *
     * const jsonData = df.toJSON();
     * console.log(jsonData);
     * 
*/ toJSON(): string; /** * Serialize the dataframe to the JSON5 data format. * * @return Returns a string in the JSON5 data format that represents the dataframe. * * @example *
     *
     * const jsonData = df.toJSON5();
     * console.log(jsonData);
     * 
*/ toJSON5(): string; /** * Serialize the dataframe to the CSV data format. * * @param options Options for CSV output. The options object is passed directly to [PapaParse.unparse](https://www.papaparse.com/docs#unparse), please see [PapaParse docs for additional options](https://www.papaparse.com/docs#unparse-config-default). * * @return Returns a string in the CSV data format that represents the dataframe. * * @example *
     *
     * const csvData = df.toCSV();
     * console.log(csvData);
     * 
* * @example *
     *
     * const csvData = df.toCSV({ header: false });
     * console.log(csvData);
     * 
*/ toCSV(options?: ICSVOutputOptions): string; /** * Serialize the dataframe to HTML. * * @return Returns a string in HTML format that represents the dataframe. */ toHTML(): string; /** * Serialize the dataframe to an ordinary JavaScript data structure. * The resulting data structure is suitable for further serialization to JSON and can be used to * transmit a DataFrame and its internal structure over the wire. * Use the {@link deserialize} function to later reconstitute the serialized dataframe. * * @return Returns a JavaScript data structure conforming to {@link ISerializedDataFrame} that represents the dataframe and its internal structure. * * @example *
     *
     * const jsDataStructure = df.serialize();
     * const jsonData = JSON.stringify(jsDataStructure);
     * console.log(jsonData);
     * const deserializedJsDataStructure = JSON.parse(jsonData);
     * const deserializedDf = DataFrame.deserialize(deserializedJsDataStructure); // Reconsituted.
     * 
*/ serialize(): any; } /** * Interface to a dataframe that has been sorted. */ export interface IOrderedDataFrame extends IDataFrame { /** * Applys additional sorting (ascending) to an already sorted dataframe. * * @param selector User-defined selector that selects the additional value to sort by. * * @return Returns a new dataframe has been additionally sorted by the value chosen by the selector function. * * @example *
     *
     * // Order sales by salesperson and then by amount (from least to most).
     * const orderedDf = salesDf.orderBy(sale => sale.SalesPerson).thenBy(sale => sale.Amount);
     * 
*/ thenBy(selector: SelectorWithIndexFn): IOrderedDataFrame; /** * Applys additional sorting (descending) to an already sorted dataframe. * * @param selector User-defined selector that selects the additional value to sort by. * * @return Returns a new dataframe has been additionally sorted by the value chosen by the selector function. * * @example *
     *
     * // Order sales by salesperson and then by amount (from most to least).
     * const orderedDf = salesDf.orderBy(sale => sale.SalesPerson).thenByDescending(sale => sale.Amount);
     * 
*/ thenByDescending(selector: SelectorWithIndexFn): IOrderedDataFrame; } /** * Class that represents a dataframe. * A dataframe contains an indexed sequence of data records. * Think of it as a spreadsheet or CSV file in memory. * * Each data record contains multiple named fields, the value of each field represents one row in a column of data. * Each column of data is a named {@link Series}. * You think of a dataframe a collection of named data series. * * @typeparam IndexT The type to use for the index. * @typeparam ValueT The type to use for each row/data record. */ export declare class DataFrame implements IDataFrame { private configFn; private content; private indexedContent; private static readonly defaultCountIterable; private static readonly defaultEmptyIterable; private static initFromIterator(iterator); private static initFromIterable(arr); private static initEmpty(); private static initColumnNames(inputColumnNames, isCaseSensitive?); private static isIterator(input); private static isIterable(input); private static checkIterable(input, fieldName); private static initFromConfig(config); /** * Create a dataframe. * * @param config This can be an array, a configuration object or a function that lazily produces a configuration object. * * It can be an array that specifies the data records that the dataframe contains. * * It can be a {@link IDataFrameConfig} that defines the data and configuration of the dataframe. * * Or it can be a function that lazily produces a {@link IDataFrameConfig}. * * @example *
     *
     * const df = new DataFrame();
     * 
* * @example *
     *
     * const df = new DataFrame([ { A: 10 }, { A: 20 }, { A: 30 }, { A: 40 }]);
     * 
* * @example *
     *
     * const df = new DataFrame({ index: [1, 2, 3, 4], values: [ { A: 10 }, { A: 20 }, { A: 30 }, { A: 40 }] });
     * 
* * @example *
     *
     * const lazyInit = () => ({ index: [1, 2, 3, 4], values: [ { A: 10 }, { A: 20 }, { A: 30 }, { A: 40 }] });
     * const df = new DataFrame(lazyInit);
     * 
*/ constructor(config?: Iterator | Iterable | IDataFrameConfig | DataFrameConfigFn | IDataFrame | ISeries); private lazyInit(); private getContent(); private getRowByIndex(index); /** * Get an iterator to enumerate the rows of the dataframe. * Enumerating the iterator forces lazy evaluation to complete. * This function is automatically called by `for...of`. * * @return An iterator for the dataframe. * * @example *
     *
     * for (const row of df) {
     *     // ... do something with the row ...
     * }
     * 
*/ [Symbol.iterator](): Iterator; /** * Get the names of the columns in the dataframe. * * @return Returns an array of the column names in the dataframe. * * @example *
     *
     * console.log(df.getColumnNames());
     * 
*/ getColumnNames(): string[]; /** * Retreive the collection of all columns in the dataframe. * * @return Returns a {@link Series} containing the names of the columns in the dataframe. * * @example *
     *
     * for (const column in df.getColumns()) {
     *      console.log("Column name: ");
     *      console.log(column.name);
     *
     *      console.log("Data:");
     *      console.log(column.series.toArray());
     * }
     * 
*/ getColumns(): ISeries; /** * Returns true if the dataframe is case sensitive or false if case insensitive. * * @return true if the dataframe is case sensitive, otherwise false. */ isCaseSensitive(): boolean; /** * Cast the value of the dataframe to a new type. * This operation has no effect but to retype the value that the dataframe contains. * * @return The same dataframe, but with the type changed. * * @example *
     *
     * const castDf = df.cast();
     * 
*/ cast(): IDataFrame; /** * Get the index for the dataframe. * * @return The {@link Index} for the dataframe. * * @example *
     *
     * const index = df.getIndex();
     * 
*/ getIndex(): IIndex; /** * Set a named column as the {@link Index} of the dataframe. * * @param columnName Name of the column to use as the new {@link Index} of the returned dataframe. * * @return Returns a new dataframe with the values of the specified column as the new {@link Index}. * * @example *
     *
     * const indexedDf = df.setIndex("SomeColumn");
     * 
*/ setIndex(columnName: string): IDataFrame; /** * Apply a new {@link Index} to the dataframe. * * @param newIndex The new array or iterable to be the new {@link Index} of the dataframe. Can also be a selector to choose the {@link Index} for each row in the dataframe. * * @return Returns a new dataframe or dataframe with the specified {@link Index} attached. * * @example *
     *
     * const indexedDf = df.withIndex([10, 20, 30]);
     * 
* * @example *
     *
     * const indexedDf = df.withIndex(df.getSeries("SomeColumn"));
     * 
* * @example *
     *
     * const indexedDf = df.withIndex(row => row.SomeColumn);
     * 
* * @example *
     *
     * const indexedDf = df.withIndex(row => row.SomeColumn + 20);
     * 
*/ withIndex(newIndex: Iterable | SelectorFn): IDataFrame; /** * Resets the {@link Index} of the dataframe back to the default zero-based sequential integer index. * * @return Returns a new dataframe with the {@link Index} reset to the default zero-based index. * * @example *
     *
     * const dfWithResetIndex = df.resetIndex();
     * 
*/ resetIndex(): IDataFrame; /** * Extract a {@link Series} from a named column in the dataframe. * * @param columnName Specifies the name of the column that contains the {@link Series} to retreive. * * @return Returns the {@link Series} extracted from the named column in the dataframe. * * @example *
     *
     * const series = df.getSeries("SomeColumn");
     * 
*/ getSeries(columnName: string): ISeries; /** * Determine if the dataframe contains a {@link Series} the specified named column. * * @param columnName Name of the column to check for. * * @return Returns true if the dataframe contains the requested {@link Series}, otherwise returns false. * * @example *
     *
     * if (df.hasSeries("SomeColumn")) {
     *      // ... the dataframe contains a series with the specified column name ...
     * }
     * 
*/ hasSeries(columnName: string): boolean; /** * Verify the existence of a name column and extracts the {@link Series} for it. * Throws an exception if the requested column doesn't exist. * * @param columnName Name of the column to extract. * * @return Returns the {@link Series} for the column if it exists, otherwise it throws an exception. * * @example *
     *
     * try {
     *      const series = df.expectSeries("SomeColumn");
     *      // ... do something with the series ...
     * }
     * catch (err) {
     *      // ... the dataframe doesn't contain the column "SomeColumn" ...
     * }
     * 
*/ expectSeries(columnName: string): ISeries; /** * Create a new dataframe with a replaced or additional column specified by the passed-in series. * * @param columnNameOrSpec The name of the column to add or replace or a {@link IColumnGenSpec} that defines the columns to add. * @param series When columnNameOrSpec is a string that identifies the column to add, this specifies the {@link Series} to add to the dataframe or a function that produces a series (given a dataframe). * * @return Returns a new dataframe replacing or adding a particular named column. * * @example *
     *
     * const modifiedDf = df.withSeries("ANewColumn", new Series([1, 2, 3]));
     * 
* * @example *
     *
     * const modifiedDf = df.withSeries("ANewColumn", df =>
     *      df.getSeries("SourceData").select(aTransformation)
     * );
     * 
* * @example *
     *
     * const modifiedDf = df.withSeries({
     *      ANewColumn: new Series([1, 2, 3]),
     *      SomeOtherColumn: new Series([10, 20, 30])
     * });
     * 
     *
     * @example
     * 
     *
     * const modifiedDf = df.withSeries({
     *      ANewColumn: df => df.getSeries("SourceData").select(aTransformation))
     * });
     * 
     */
    withSeries(columnNameOrSpec: string | IColumnGenSpec, series?: ISeries | SeriesSelectorFn): IDataFrame;
    /**
     * Merge multiple dataframes into a single dataframe.
     * Rows are merged by indexed.
     * Same named columns in subsequent dataframes override columns earlier dataframes.
     *
     * @param dataFrames An array or series of dataframes to merge.
     *
     * @returns The merged data frame.
     *
     * @example
     * 
     *
     * const mergedDF = DataFrame.merge([df1, df2, etc]);
     * 
*/ static merge(dataFrames: Iterable>): IDataFrame; /** * Merge one or more dataframes into this dataframe. * Rows are merged by indexed. * Same named columns in subsequent dataframes override columns in earlier dataframes. * * @param otherDataFrames... One or more dataframes to merge into this dataframe. * * @returns The merged data frame. * * @example *
     *
     * const mergedDF = df1.merge(df2);
     * 
* *
     *
     * const mergedDF = df1.merge(df2, df3, etc);
     * 
*/ merge(...otherDataFrames: IDataFrame[]): IDataFrame; /** * Add a series to the dataframe, but only if it doesn't already exist. * * @param columnNameOrSpec The name of the series to add or a {@link IColumnGenSpec} that specifies the columns to add. * @param series If columnNameOrSpec is a string that specifies the name of the series to add, this specifies the actual {@link Series} to add or a selector that generates the series given the dataframe. * * @return Returns a new dataframe with the specified series added, if the series didn't already exist. Otherwise if the requested series already exists the same dataframe is returned. * * @example *
     *
     * const updatedDf = df.ensureSeries("ANewColumn", new Series([1, 2, 3]));
     * 
* * @example *
     *
     * const updatedDf = df.ensureSeries("ANewColumn", df =>
     *      df.getSeries("AnExistingSeries").select(aTransformation)
     * );
     * 
* * @example *
     *
     * const modifiedDf = df.ensureSeries({
     *      ANewColumn: new Series([1, 2, 3]),
     *      SomeOtherColumn: new Series([10, 20, 30])
     * });
     * 
     *
     * @example
     * 
     *
     * const modifiedDf = df.ensureSeries({
     *      ANewColumn: df => df.getSeries("SourceData").select(aTransformation))
     * });
     * 
     */
    ensureSeries(columnNameOrSpec: string | IColumnGenSpec, series?: ISeries | SeriesSelectorFn): IDataFrame;
    /**
     * Create a new dataframe with just a subset of columns.
     *
     * @param columnNames Array of column names to include in the new dataframe.
     *
     * @return Returns a dataframe with a subset of columns from the original dataframe.
     *
     * @example
     * 
     * const subsetDf = df.subset(["ColumnA", "ColumnB"]);
     * 
*/ subset(columnNames: string[]): IDataFrame; /** * Create a new dataframe with the requested column or columns dropped. * * @param columnOrColumns Specifies the column name (a string) or columns (array of strings) to drop. * * @return Returns a new dataframe with a particular named column or columns removed. * * @example *
     * const modifiedDf = df.dropSeries("SomeColumn");
     * 
* * @example *
     * const modifiedDf = df.dropSeries(["ColumnA", "ColumnB"]);
     * 
*/ dropSeries(columnOrColumns: string | string[]): IDataFrame; /** * Create a new dataframe with columns reordered. * New column names create new columns (with undefined values), omitting existing column names causes those columns to be dropped. * * @param columnNames Specifies the new order for columns. * * @return Returns a new dataframe with columns reordered according to the order of the array of column names that is passed in. * * @example *
     * const reorderedDf = df.reorderSeries(["FirstColumn", "SecondColumn", "etc"]);
     * 
*/ reorderSeries(columnNames: string[]): IDataFrame; /** * Bring the column(s) with specified name(s) to the front of the column order, making it (or them) the first column(s) in the output dataframe. * * @param columnOrColumns Specifies the column or columns to bring to the front. * * @return Returns a new dataframe with 1 or more columns bought to the front of the column ordering. * * @example *
     * const modifiedDf = df.bringToFront("NewFirstColumn");
     * 
* * @example *
     * const modifiedDf = df.bringToFront(["NewFirstColumn", "NewSecondColumn"]);
     * 
*/ bringToFront(columnOrColumns: string | string[]): IDataFrame; /** * Bring the column(s) with specified name(s) to the back of the column order, making it (or them) the last column(s) in the output dataframe. * * @param columnOrColumns Specifies the column or columns to bring to the back. * * @return Returns a new dataframe with 1 or more columns bought to the back of the column ordering. * * @example *
     * const modifiedDf = df.bringToBack("NewLastColumn");
     * 
* * @example *
     * const modifiedDf = df.bringToBack(["NewSecondLastColumn, ""NewLastColumn"]);
     * 
*/ bringToBack(columnOrColumns: string | string[]): IDataFrame; /** * Create a new dataframe with 1 or more columns renamed. * * @param newColumnNames A column rename spec - a JavaScript hash that maps existing column names to new column names. * * @return Returns a new dataframe with specified columns renamed. * * @example *
     *
     * const renamedDf = df.renameSeries({ OldColumnName, NewColumnName });
     * 
* * @example *
     *
     * const renamedDf = df.renameSeries({
     *      Column1: ColumnA,
     *      Column2: ColumnB
     * });
     * 
*/ renameSeries(newColumnNames: IColumnRenameSpec): IDataFrame; /** * Extract values from the dataframe as an array. * This forces lazy evaluation to complete. * * @return Returns an array of the values contained within the dataframe. * * @example *
    * const values = df.toArray();
    * 
*/ toArray(): ValueT[]; /** * Retreive the index and values pairs from the dataframe as an array. * Each pair is [index, value]. * This forces lazy evaluation to complete. * * @return Returns an array of pairs that contains the dataframe content. Each pair is a two element array that contains an index and a value. * * @example *
     * const pairs = df.toPairs();
     * 
*/ toPairs(): ([IndexT, ValueT])[]; /** * Convert the dataframe to a JavaScript object. * * @param keySelector Function that selects keys for the resulting object. * @param valueSelector Function that selects values for the resulting object. * * @return Returns a JavaScript object generated from the dataframe by applying the key and value selector functions. * * @example *
     *
     * const someObject = df.toObject(
     *      row => row.SomeColumn, // Specify the column to use for fields in the object.
     *      row => row.SomeOtherColumn // Specify the column to use as the value for each field.
     * );
     * 
*/ toObject(keySelector: (value: ValueT) => KeyT, valueSelector: (value: ValueT) => FieldT): OutT; /** * Bake the data frame to an array of rows were each rows is an array of values in column order. * * @return Returns an array of rows. Each row is an array of values in column order. * * @example *
     * const rows = df.toRows();
     * 
*/ toRows(): any[][]; /** * Transforms an input dataframe, generating a new dataframe. * The transformer function is called for each element of the input and the collection of outputs creates the generated datafarme. * * `select` is an alias for {@link DataFrame.map}. * * This is the same concept as the JavaScript function `Array.map` but maps over a dataframe rather than an array. * * @param transformer A user-defined transformer function that transforms each element from the input to generate the output. * * @return Returns a new dataframe generated by calling the transformer function over each element of the input. * * @example *
     *
     * function transformer (input) {
     *      const output = {
     *          // ... construct output from input ...
     *      };
     *
     *      return output;
     * }
     *
     * const transformed = dataframe.select(transformer);
     * console.log(transformed.toString());
     * 
*/ select(transformer: SelectorWithIndexFn): IDataFrame; /** * Transforms an input dataframe, generating a new dataframe. * The transformer function is called for each element of the input and the collection of outputs creates the generated datafarme. * * This is the same concept as the JavaScript function `Array.map` but maps over a dataframe rather than an array. * * @param transformer A user-defined transformer function that transforms each element from the input to generate the output. * * @return Returns a new dataframe generated by calling the transformer function over each element of the input. * * @example *
     *
     * function transformer (input) {
     *      const output = {
     *          // ... construct output from input ...
     *      };
     *
     *      return output;
     * }
     *
     * const transformed = dataframe.map(transformer);
     * console.log(transformed.toString());
     * 
*/ map(transformer: SelectorWithIndexFn): IDataFrame; /** * Transforms and flattens an input dataframe, generating a new dataframe. * The transformer function is called for each value in the input dataframe and produces an array that is then flattened into the generated dataframe. * * `selectMany` is an alias for {@link DataFrame.flatMap}. * * This is the same concept as the JavaScript function `Array.flatMap` but maps over a dataframe rather than an array. * * @param transformer A user-defined function that transforms each value into an array that is then flattened into the generated dataframe. * * @return Returns a new dataframe generated by calling the transformer function over each element of the input. * * @example *
     *
     * function transformer (input) {
     *      const output = [];
     *      while (someCondition) {
     *          // ... generate zero or more outputs from a single input ...
     *          output.push(... some generated value ...);
     *      }
     *      return output;
     * }
     *
     * const transformed = dataframe.selectMany(transformer);
     * console.log(transformed.toString());
     * 
*/ selectMany(transformer: SelectorWithIndexFn>): IDataFrame; /** * Transforms and flattens an input dataframe, generating a new dataframe. * The transformer function is called for each value in the input dataframe and produces an array that is then flattened into the generated dataframe. * * This is the same concept as the JavaScript function `Array.flatMap` but maps over a dataframe rather than an array. * * @param transformer A user-defined function that transforms each value into an array that is then flattened into the generated dataframe. * * @return Returns a new dataframe generated by calling the transformer function over each element of the input. * * @example *
     *
     * function transformer (input) {
     *      const output = [];
     *      while (someCondition) {
     *          // ... generate zero or more outputs from a single input ...
     *          output.push(... some generated value ...);
     *      }
     *      return output;
     * }
     *
     * const transformed = dataframe.flatMap(transformer);
     * console.log(transformed.toString());
     * 
*/ flatMap(transformer: SelectorWithIndexFn>): IDataFrame; /** * Transform one or more columns. * * This is equivalent to extracting a {@link Series} with {@link getSeries}, then transforming it with {@link Series.select}, * and finally plugging it back in as the same column using {@link withSeries}. * * @param columnSelectors Object with field names for each column to be transformed. Each field specifies a selector function that transforms that column. * * @return Returns a new dataframe with 1 or more columns transformed. * * @example *
     *
     * const modifiedDf = df.transformSeries({
     *      AColumnToTransform: columnValue => transformRow(columnValue)
     * });
     * 
* * @example *
     *
     * const modifiedDf = df.transformSeries({
     *      ColumnA: columnValue => transformColumnA(columnValue),
     *      ColumnB: columnValue => transformColumnB(columnValue)
     * });
     * 
*/ transformSeries(columnSelectors: IColumnTransformSpec): IDataFrame; /** * Generate new columns based on existing rows. * * This is equivalent to calling {@link select} to transform the original dataframe to a new dataframe with different column, * then using {@link withSeries} to merge each the of both the new and original dataframes. * * @param generator Generator function that transforms each row to produce 1 or more new columns. * Or use a column spec that has fields for each column, the fields specify a generate function that produces the value for each new column. * * @return Returns a new dataframe with 1 or more new columns. * * @example *
     *
     * function produceNewColumns (inputRow) {
     *      const newColumns = {
     *          // ... specify new columns and their values based on the input row ...
     *      };
     *
     *      return newColumns;
     * };
     *
     * const dfWithNewSeries = df.generateSeries(row => produceNewColumns(row));
     * 
* * @example *
     *
     * const dfWithNewSeries = df.generateSeries({
     *      NewColumnA: row => produceNewColumnA(row),
     *      NewColumnB: row => produceNewColumnB(row),
     * })
     * 
*/ generateSeries(generator: SelectorWithIndexFn | IColumnTransformSpec): IDataFrame; /** * Converts (deflates) a dataframe to a {@link Series}. * * @param selector Optional selector function that transforms each row to produce the series. * * @return Returns a series that was created from the deflated from the original dataframe. * * @example *
     *
     * const series = df.deflate(); // Deflate to a series of object.
     * 
* * @example *
     *
     * const series = df.deflate(row => row.SomeColumn); // Extract a particular column.
     * 
*/ deflate(selector?: SelectorWithIndexFn): ISeries; /** * Inflate a named {@link Series} in the dataframe to 1 or more new series in the new dataframe. * * This is the equivalent of extracting the series using {@link getSeries}, transforming them with {@link Series.select} * and then running {@link Series.inflate} to create a new dataframe, then merging each column of the new dataframe * into the original dataframe using {@link withSeries}. * * @param columnName Name of the series to inflate. * @param selector Optional selector function that transforms each value in the column to new columns. If not specified it is expected that each value in the column is an object whose fields define the new column names. * * @return Returns a new dataframe with a column inflated to 1 or more new columns. * * @example *
     *
     * function newColumnGenerator (row) {
     *      const newColumns = {
     *          // ... create 1 field per new column ...
     *      };
     *
     *      return row;
     * }
     *
     * const dfWithNewSeries = df.inflateSeries("SomeColumn", newColumnGenerator);
     * 
*/ inflateSeries(columnName: string, selector?: SelectorWithIndexFn): IDataFrame; /** * Partition a dataframe into a {@link Series} of *data windows*. * Each value in the new series is a rolling chunk of data from the original dataframe. * * @param period The number of data rows to include in each data window. * * @return Returns a new series, each value of which is a chunk of the original dataframe. * * @example *
     *
     * const windows = df.window(2); // Get values in pairs.
     * const pctIncrease = windows.select(pair => (pair.last() - pair.first()) / pair.first());
     * console.log(pctIncrease.toString());
     * 
* * @example *
     *
     * const salesDf = ... // Daily sales data.
     * const weeklySales = salesDf.window(7); // Partition up into weekly data sets.
     * console.log(weeklySales.toString());
     * 
*/ window(period: number): ISeries>; /** * Partition a dataframe into a {@link Series} of *rolling data windows*. * Each value in the new series is a rolling chunk of data from the original dataframe. * * @param period The number of data rows to include in each data window. * * @return Returns a new series, each value of which is a rolling chunk of the original dataframe. * * @example *
     *
     * const salesDf = ... // Daily sales data.
     * const rollingWeeklySales = salesDf.rollingWindow(7); // Get rolling window over weekly sales data.
     * console.log(rollingWeeklySales.toString());
     * 
*/ rollingWindow(period: number): ISeries>; /** * Partition a dataframe into a {@link Series} of variable-length *data windows* * where the divisions between the data chunks are * defined by a user-provided *comparer* function. * * @param comparer Function that compares two adjacent data rows and returns true if they should be in the same window. * * @return Returns a new series, each value of which is a chunk of data from the original dataframe. * * @example *
     *
     * function rowComparer (rowA, rowB) {
     *      if (... rowA should be in the same data window as rowB ...) {
     *          return true;
     *      }
     *      else {
     *          return false;
     *      }
     * };
     *
     * const variableWindows = df.variableWindow(rowComparer);
     */
    variableWindow(comparer: ComparerFn): ISeries>;
    /**
     * Eliminates adjacent duplicate rows.
     *
     * For each group of adjacent rows that are equivalent only returns the last index/row for the group,
     * thus adjacent equivalent rows are collapsed down to the last row.
     *
     * @param selector Optional selector function to determine the value used to compare for equivalence.
     *
     * @return Returns a new dataframe with groups of adjacent duplicate rows collapsed to a single row per group.
     *
     * @example
     * 
     *
     * const dfWithDuplicateRowsRemoved = df.sequentialDistinct(row => row.ColumnA);
     * 
*/ sequentialDistinct(selector?: SelectorFn): IDataFrame; /** * Aggregate the rows in the dataframe to a single result. * * `aggregate` is similar to {@link DataFrame.reduce} but the parameters are reversed. * Please use {@link DataFrame.reduce} in preference to `aggregate`. * @param seed Optional seed value for producing the aggregation. * @param selector Function that takes the seed and then each row in the dataframe and produces the aggregated value. * * @return Returns a new value that has been aggregated from the dataframe using the 'selector' function. * * @example *
     *
     * const dailySalesDf = ... daily sales figures for the past month ...
     * const totalSalesForthisMonth = dailySalesDf.aggregate(
     *      0, // Seed - the starting value.
     *      (accumulator, row) => accumulator + row.SalesAmount // Aggregation function.
     * );
     * 
* * @example *
     *
     * const totalSalesAllTime = 500; // We'll seed the aggregation with this value.
     * const dailySalesDf = ... daily sales figures for the past month ...
     * const updatedTotalSalesAllTime = dailySalesDf.aggregate(
     *      totalSalesAllTime,
     *      (accumulator, row) => accumulator + row.SalesAmount
     * );
     * 
* * @example *
     *
     * var salesDataSummary = salesDataDf.aggregate({
     *      TotalSales: df => df.count(),
     *      AveragePrice: df => df.deflate(row => row.Price).average(),
     *      TotalRevenue: df => df.deflate(row => row.Revenue).sum(),
     * });
     * 
*/ aggregate(seedOrSelector: AggregateFn | ToT | IColumnAggregateSpec, selector?: AggregateFn): ToT; /** * Reduces the values in the dataframe to a single result. * * This is the same concept as the JavaScript function `Array.reduce` but reduces a dataframe rather than an array. * @param reducer Function that takes the seed and then each value in the dataframe and produces the reduced value. * @param seed Optional initial value, if not specifed the first value in the dataframe is used as the initial value. * * @return Returns a value that has been reduced from the input dataframe by passing each element through the reducer function. * * @example *
     *
     * const dailyRecords = ... daily records for the past month ...
     * const totalSales = dailyRecords.reduce(
     *      (accumulator, row) => accumulator + row.salesAmount, // Reducer function.
     *      0  // Seed value, the starting value.
     * );
     * 
* * @example *
     *
     * const previousSales = 500; // We'll seed the reduction with this value.
     * const dailyRecords = ... daily records for the past month ...
     * const updatedSales = dailyRecords.reduce(
     *      (accumulator, row) => accumulator + row.salesAmount,
     *      previousSales
     * );
     * 
*/ reduce(reducer: AggregateFn, seed?: ToT): ToT; /** * Skip a number of rows in the dataframe. * * @param numValues Number of rows to skip. * * @return Returns a new dataframe with the specified number of rows skipped. * * @example *
     *
     * const dfWithRowsSkipped = df.skip(10); // Skip 10 rows in the original dataframe.
     * 
*/ skip(numValues: number): IDataFrame; /** * Skips rows in the dataframe while a condition evaluates to true or truthy. * * @param predicate Returns true/truthy to continue to skip rows in the original dataframe. * * @return Returns a new dataframe with all initial sequential rows removed while the predicate returned true/truthy. * * @example *
     *
     * const dfWithRowsSkipped = df.skipWhile(row => row.CustomerName === "Fred"); // Skip initial customers named Fred.
     * 
*/ skipWhile(predicate: PredicateFn): IDataFrame; /** * Skips rows in the dataframe untils a condition evaluates to true or truthy. * * @param predicate Return true/truthy to stop skipping rows in the original dataframe. * * @return Returns a new dataframe with all initial sequential rows removed until the predicate returned true/truthy. * * @example *
     *
     * const dfWithRowsSkipped = df.skipUntil(row => row.CustomerName === "Fred"); // Skip initial customers until we find Fred.
     * 
*/ skipUntil(predicate: PredicateFn): IDataFrame; /** * Take a number of rows from the dataframe. * * @param numValues Number of rows to take. * * @return Returns a new dataframe with only the specified number of rows taken from the original dataframe. * * @example *
     *
     * const dfWithRowsTaken = df.take(15); // Take only the first 15 rows from the original dataframe.
     * 
*/ take(numRows: number): IDataFrame; /** * Takes rows from the dataframe while a condition evaluates to true or truthy. * * @param predicate Returns true/truthy to continue to take rows from the original dataframe. * * @return Returns a new dataframe with only the initial sequential rows that were taken while the predicate returned true/truthy. * * @example *
     *
     * const dfWithRowsTaken = df.takeWhile(row => row.CustomerName === "Fred"); // Take only initial customers named Fred.
     * 
*/ takeWhile(predicate: PredicateFn): IDataFrame; /** * Takes rows from the dataframe untils a condition evaluates to true or truthy. * * @param predicate Return true/truthy to stop taking rows in the original dataframe. * * @return Returns a new dataframe with only the initial sequential rows taken until the predicate returned true/truthy. * * @example *
     *
     * const dfWithRowsTaken = df.takeUntil(row => row.CustomerName === "Fred"); // Take all initial customers until we find Fred.
     * 
*/ takeUntil(predicate: PredicateFn): IDataFrame; /** * Count the number of rows in the dataframe * * @return Returns the count of all rows. * * @example *
     *
     * const numRows = df.count();
     * 
*/ count(): number; /** * Get the first row of the dataframe. * * @return Returns the first row of the dataframe. * * @example *
     *
     * const firstRow = df.first();
     * 
*/ first(): ValueT; /** * Get the last row of the dataframe. * * @return Returns the last row of the dataframe. * * @example *
     *
     * const lastRow = df.last();
     * 
*/ last(): ValueT; /** * Get the row, if there is one, with the specified index. * * @param index Index to for which to retreive the row. * * @return Returns the row from the specified index in the dataframe or undefined if there is no such index in the present in the dataframe. * * @example *
     *
     * const row = df.at(5); // Get the row at index 5 (with a default 0-based index).
     * 
* * @example *
     *
     * const date = ... some date ...
     * // Retreive the row with specified date from a time-series dataframe (assuming date indexed has been applied).
     * const row = df.at(date);
     * 
*/ at(index: IndexT): ValueT | undefined; /** * Get X rows from the start of the dataframe. * Pass in a negative value to get all rows at the head except for X rows at the tail. * * @param numValues Number of rows to take. * * @return Returns a new dataframe that has only the specified number of rows taken from the start of the original dataframe. * * @examples *
     *
     * const sample = df.head(10); // Take a sample of 10 rows from the start of the dataframe.
     * 
*/ head(numValues: number): IDataFrame; /** * Get X rows from the end of the dataframe. * Pass in a negative value to get all rows at the tail except X rows at the head. * * @param numValues Number of rows to take. * * @return Returns a new dataframe that has only the specified number of rows taken from the end of the original dataframe. * * @examples *
     *
     * const sample = df.tail(12); // Take a sample of 12 rows from the end of the dataframe.
     * 
*/ tail(numValues: number): IDataFrame; /** * Filter the dataframe through a user-defined predicate function. * * `where` is an alias for {@link DataFrame.filter}. * * This is the same concept as the JavaScript function `Array.filter` but filters a dataframe rather than an array. * * @param predicate Predicate function to filter values from the dataframe. Returns true/truthy to keep elements, or false/falsy to omit elements. * * @return Returns a new dataframe containing only the values from the original dataframe that matched the predicate. * * @example *
     *
     * // Filter so we only have sales figures greater than 100.
     * const filtered = dataframe.where(row => row.salesFigure > 100);
     * console.log(filtered.toArray());
     * 
*/ where(predicate: PredicateFn): IDataFrame; /** * Filter the dataframe through a user-defined predicate function. * * This is the same concept as the JavaScript function `Array.filter` but filters a dataframe rather than an array. * * @param predicate Predicate function to filter values from the dataframe. Returns true/truthy to keep elements, or false/falsy to omit elements. * * @return Returns a new dataframe containing only the values from the original dataframe that matched the predicate. * * @example *
     *
     * // Filter so we only have sales figures greater than 100.
     * const filtered = dataframe.filter(row => row.salesFigure > 100);
     * console.log(filtered.toArray());
     * 
*/ filter(predicate: PredicateFn): IDataFrame; /** * Invoke a callback function for each row in the dataframe. * * @param callback The calback function to invoke for each row. * * @return Returns the original dataframe with no modifications. * * @example *
     *
     * df.forEach(row => {
     *      // ... do something with the row ...
     * });
     * 
*/ forEach(callback: CallbackFn): IDataFrame; /** * Evaluates a predicate function for every row in the dataframe to determine * if some condition is true/truthy for **all** rows in the dataframe. * * @param predicate Predicate function that receives each row. It should returns true/truthy for a match, otherwise false/falsy. * * @return Returns true if the predicate has returned true or truthy for every row in the dataframe, otherwise returns false. Returns false for an empty dataframe. * * @example *
     *
     * const everyoneIsNamedFred = df.all(row => row.CustomerName === "Fred"); // Check if all customers are named Fred.
     * 
*/ all(predicate: PredicateFn): boolean; /** * Evaluates a predicate function for every row in the dataframe to determine * if some condition is true/truthy for **any** of rows in the dataframe. * * If no predicate is specified then it simply checks if the dataframe contains more than zero rows. * * @param predicate Optional predicate function that receives each row. It should return true/truthy for a match, otherwise false/falsy. * * @return Returns true if the predicate has returned truthy for any row in the dataframe, otherwise returns false. * If no predicate is passed it returns true if the dataframe contains any rows at all. * Returns false for an empty dataframe. * * @example *
     *
     * const anyFreds = df.any(row => row.CustomerName === "Fred"); // Do we have any customers named Fred?
     * 
* * @example *
     *
     * const anyCustomers = df.any(); // Do we have any customers at all?
     * 
*/ any(predicate?: PredicateFn): boolean; /** * Evaluates a predicate function for every row in the dataframe to determine * if some condition is true/truthy for **none** of rows in the dataframe. * * If no predicate is specified then it simply checks if the dataframe contains zero rows. * * @param predicate Optional predicate function that receives each row. It should return true/truthy for a match, otherwise false/falsy. * * @return Returns true if the predicate has returned truthy for zero rows in the dataframe, otherwise returns false. Returns false for an empty dataframe. * * @example *
     *
     * const noFreds = df.none(row => row.CustomerName === "Fred"); // Do we have zero customers named Fred?
     * 
* * @example *
     *
     * const noCustomers = df.none(); // Do we have zero customers?
     * 
*/ none(predicate?: PredicateFn): boolean; /** * Gets a new dataframe containing all rows starting at or after the specified index value. * * @param indexValue The index value at which to start the new dataframe. * * @return Returns a new dataframe containing all rows starting at or after the specified index value. * * @example *
     *
     * const df = new DataFrame({
     *      index: [0, 1, 2, 3], // This is the default index.
     *      values: [10, 20, 30, 40],
     * });
     *
     * const lastHalf = df.startAt(2);
     * expect(lastHalf.toArray()).to.eql([30, 40]);
     * 
* * @example *
     *
     * const timeSeriesDf = ... a dataframe indexed by date/time ...
     *
     * // Get all rows starting at (or after) a particular date.
     * const result = timeSeriesDf.startAt(new Date(2016, 5, 4));
     * 
*/ startAt(indexValue: IndexT): IDataFrame; /** * Gets a new dataframe containing all rows up until and including the specified index value (inclusive). * * @param indexValue The index value at which to end the new dataframe. * * @return Returns a new dataframe containing all rows up until and including the specified index value. * * @example *
     *
     * const df = new DataFrame({
     *      index: [0, 1, 2, 3], // This is the default index.
     *      values: [10, 20, 30, 40],
     * });
     *
     * const firstHalf = df.endAt(1);
     * expect(firstHalf.toArray()).to.eql([10, 20]);
     * 
* * @example *
     *
     * const timeSeriesDf = ... a dataframe indexed by date/time ...
     *
     * // Get all rows ending at a particular date.
     * const result = timeSeriesDf.endAt(new Date(2016, 5, 4));
     * 
*/ endAt(indexValue: IndexT): IDataFrame; /** * Gets a new dataframe containing all rows up to the specified index value (exclusive). * * @param indexValue The index value at which to end the new dataframe. * * @return Returns a new dataframe containing all rows up to (but not including) the specified index value. * * @example *
     *
     * const df = new DataFrame({
     *      index: [0, 1, 2, 3], // This is the default index.
     *      values: [10, 20, 30, 40],
     * });
     *
     * const firstHalf = df.before(2);
     * expect(firstHalf.toArray()).to.eql([10, 20]);
     * 
* * @example *
     *
     * const timeSeriesDf = ... a dataframe indexed by date/time ...
     *
     * // Get all rows before the specified date.
     * const result = timeSeriesDf.before(new Date(2016, 5, 4));
     * 
*/ before(indexValue: IndexT): IDataFrame; /** * Gets a new dataframe containing all rows after the specified index value (exclusive). * * @param indexValue The index value after which to start the new dataframe. * * @return Returns a new dataframe containing all rows after the specified index value. * * @example *
     *
     * const df = new DataFrame({
     *      index: [0, 1, 2, 3], // This is the default index.
     *      values: [10, 20, 30, 40],
     * });
     *
     * const lastHalf = df.before(1);
     * expect(lastHalf.toArray()).to.eql([30, 40]);
     * 
* * @example *
     *
     * const timeSeriesDf = ... a dataframe indexed by date/time ...
     *
     * // Get all rows after the specified date.
     * const result = timeSeriesDf.after(new Date(2016, 5, 4));
     * 
*/ after(indexValue: IndexT): IDataFrame; /** * Gets a new dataframe containing all rows between the specified index values (inclusive). * * @param startIndexValue The index at which to start the new dataframe. * @param endIndexValue The index at which to end the new dataframe. * * @return Returns a new dataframe containing all values between the specified index values (inclusive). * * @example *
     *
     * const df = new DataFrame({
     *      index: [0, 1, 2, 3, 4, 6], // This is the default index.
     *      values: [10, 20, 30, 40, 50, 60],
     * });
     *
     * const middleSection = df.between(1, 4);
     * expect(middleSection.toArray()).to.eql([20, 30, 40, 50]);
     * 
* * @example *
     *
     * const timeSeriesDf = ... a dataframe indexed by date/time ...
     *
     * // Get all rows between the start and end dates (inclusive).
     * const result = timeSeriesDf.after(new Date(2016, 5, 4), new Date(2016, 5, 22));
     * 
*/ between(startIndexValue: IndexT, endIndexValue: IndexT): IDataFrame; /** * Format the dataframe for display as a string. * This forces lazy evaluation to complete. * * @return Generates and returns a string representation of the dataframe. * * @example *
     *
     * console.log(df.toString());
     * 
*/ toString(): string; /** * Parse a column with string values and convert it to a column with int values. * * @param columnNameOrNames Specifies the column name or array of column names to parse. * * @return Returns a new dataframe with values of particular named column(s) parsed from strings to ints. * * @example *
     *
     * const parsed = df.parseInts("MyIntColumn");
     * 
* * @example *
     *
     * const parsed = df.parseInts(["MyIntColumnA", "MyIntColumnA"]);
     * 
*/ parseInts(columnNameOrNames: string | string[]): IDataFrame; /** * Parse a column with string values and convert it to a column with float values. * * @param columnNameOrNames Specifies the column name or array of column names to parse. * * @return Returns a new dataframe with values of particular named column(s) parsed from strings to floats. * * @example *
     *
     * const parsed = df.parseFloats("MyFloatColumn");
     * 
* * @example *
     *
     * const parsed = df.parseFloats(["MyFloatColumnA", "MyFloatColumnA"]);
     * 
*/ parseFloats(columnNameOrNames: string | string[]): IDataFrame; /** * Parse a column with string values and convert it to a column with date values. * * @param columnNameOrNames Specifies the column name or array of column names to parse. * @param formatString Optional formatting string for dates. * * Moment is used for date parsing. * https://momentjs.com * * @return Returns a new dataframe with values of particular named column(s) parsed from strings to dates. * * @example *
     *
     * const parsed = df.parseDates("MyDateColumn");
     * 
* * @example *
     *
     * const parsed = df.parseDates(["MyDateColumnA", "MyDateColumnA"]);
     * 
*/ parseDates(columnNameOrNames: string | string[], formatString?: string): IDataFrame; /** * Convert a column of values of different types to a column of string values. * * @param columnNames Specifies the column name or array of column names to convert to strings. Can also be a format spec that specifies which columns to convert and what their format should be. * @param formatString Optional formatting string for dates. * * Numeral.js is used for number formatting. * http://numeraljs.com/ * * Moment is used for date formatting. * https://momentjs.com/docs/#/parsing/string-format/ * * @return Returns a new dataframe with a particular named column converted from values to strings. * * @example *
     *
     * const result = df.toStrings("MyDateColumn", "YYYY-MM-DD");
     * 
* * @example *
     *
     * const result = df.toStrings("MyFloatColumn", "0.00");
     * 
*/ toStrings(columnNames: string | string[] | IFormatSpec, formatString?: string): IDataFrame; /** * Produces a new dataframe with all string values truncated to the requested maximum length. * * @param maxLength The maximum length of the string values after truncation. * * @return Returns a new dataframe with all strings truncated to the specified maximum length. * * @example *
     *
     * // Truncate all string columns to 100 characters maximum.
     * const truncatedDf = df.truncateString(100);
     * 
*/ truncateStrings(maxLength: number): IDataFrame; /** * Produces a new dataframe with all number values rounded to the specified number of places. * * @param numDecimalPlaces The number of decimal places, defaults to 2. * * @returns Returns a new dataframe with all number values rounded to the specified number of places. * * @example *
     *
     * const df = ... your data frame ...
     * const rounded = df.round(); // Round numbers to two decimal places.
     * 
* * @example *
     *
     * const df = ... your data frame ...
     * const rounded = df.round(3); // Round numbers to three decimal places.
     * 
*/ round(numDecimalPlaces?: number): IDataFrame; /** * Forces lazy evaluation to complete and 'bakes' the dataframe into memory. * * @return Returns a dataframe that has been 'baked', all lazy evaluation has completed. * * @example *
     *
     * const baked = df.bake();
     * 
*/ bake(): IDataFrame; /** * Gets a new dataframe in reverse order. * * @return Returns a new dataframe that is the reverse of the input. * * @example *
     *
     * const reversedDf = df.reverse();
     * 
*/ reverse(): IDataFrame; /** * Returns only the set of rows in the dataframe that are distinct according to some criteria. * This can be used to remove duplicate rows from the dataframe. * * @param selector User-defined selector function that specifies the criteria used to make comparisons for duplicate rows. * Note that the selector determines the object used for the comparison. If the selector returns a new instance of an array or a * javascript object, distinct will always include all rows since the object instances are different even if the members are the same. * * @return Returns a dataframe containing only unique values as determined by the 'selector' function. * * @example *
     *
     * // Remove duplicate rows by customer id. Will return only a single row per customer.
     * const distinctCustomers = salesDf.distinct(sale => sale.CustomerId);
     * 
* * @example * *
     * // Remove duplicate rows across mutliple columns
     * const safeJoinChar = '$';
     * const distinctCustomers = salesDf.distinct(sale => [sale.CustomerId, sale.MonthOfYear].join(safeJoinChar));
     * 
*/ distinct(selector?: SelectorFn): IDataFrame; /** * Collects rows in the dataframe into a series of groups according to the user-defined selector function that defines the group for each row. * * @param selector User-defined selector function that defines the value to group by. * * @return Returns a {@link Series} of groups. Each group is a dataframe with values that have been grouped by the 'selector' function. * * @example *
     *
     * const salesDf = ... product sales ...
     * const salesByProduct = salesDf.groupBy(sale => sale.ProductId);
     * for (const productSalesGroup of salesByProduct) {
     *      // ... do something with each product group ...
     *      const productId = productSalesGroup.first().ProductId;
     *      const totalSalesForProduct = productSalesGroup.deflate(sale => sale.Amount).sum();
     *      console.log(totalSalesForProduct);
     * }
     * 
*/ groupBy(selector: SelectorWithIndexFn): ISeries>; /** * Collects rows in the dataframe into a series of groups according to a user-defined selector function that identifies adjacent rows that should be in the same group. * * @param selector Optional selector that defines the value to group by. * * @return Returns a {@link Series} of groups. Each group is a dataframe with values that have been grouped by the 'selector' function. * * @example *
     *
     * // Some ultra simple stock trading strategy backtesting...
     * const dailyStockPriceDf = ... daily stock price for a company ...
     * const priceGroups  = dailyStockPriceDf.groupBy(day => day.close > day.movingAverage);
     * for (const priceGroup of priceGroups) {
     *      // ... do something with each stock price group ...
     *
     *      const firstDay = priceGroup.first();
     *      if (firstDay.close > movingAverage) {
     *          // This group of days has the stock price above its moving average.
     *          // ... maybe enter a long trade here ...
     *      }
     *      else {
     *          // This group of days has the stock price below its moving average.
     *          // ... maybe enter a short trade here ...
     *      }
     * }
     * 
*/ groupSequentialBy(selector?: SelectorFn): ISeries>; /** * Concatenate multiple dataframes into a single dataframe. * * @param dataframes Array of dataframes to concatenate. * * @return Returns a single dataframe concatenated from multiple input dataframes. * * @example *
     *
     * const df1 = ...
     * const df2 = ...
     * const df3 = ...
     * const concatenatedDf = DataFrame.concat([df1, df2, df3]);
     * 
* * @example *
     *
     * const dfs = [... array of dataframes...];
     * const concatenatedDf = DataFrame.concat(dfs);
     * 
*/ static concat(dataframes: IDataFrame[]): IDataFrame; /** * Concatenate multiple other dataframes onto this dataframe. * * @param dataframes Multiple arguments. Each can be either a dataframe or an array of dataframes. * * @return Returns a single dataframes concatenated from multiple input dataframes. * * @example *
     *
     * const concatenatedDf = dfA.concat(dfB);
     * 
* * @example *
     *
     * const concatenatedDf = dfA.concat(dfB, dfC);
     * 
* * @example *
     *
     * const concatenatedDf = dfA.concat([dfB, dfC]);
     * 
* * @example *
     *
     * const concatenatedDf = dfA.concat(dfB, [dfC, dfD]);
     * 
* * @example *
     *
     * const otherDfs = [... array of dataframes...];
     * const concatenatedDf = dfA.concat(otherDfs);
     * 
*/ concat(...dataframes: (IDataFrame[] | IDataFrame)[]): IDataFrame; /** * Zip (or merge) together multiple dataframes to create a new dataframe. * Preserves the index of the first dataframe. * * @param input An iterable of datafames to be zipped. * @param zipper User-defined zipper function that merges rows. It produces rows for the new dataframe based-on rows from the input dataframes. * * @return Returns a single dataframe zipped (or merged) from multiple input dataframes. * * @example *
    *
    * function produceNewRow (inputRows) {
    *       const outputRow = {
    *           // Produce output row based on the contents of the input rows.
    *       };
    *       return outputRow;
    * }
    *
    * const inputDfs = [... array of input dataframes ...];
    * const zippedDf = DataFrame.zip(inputDfs, produceNewRow);
    *
    * 
* * @example *
    *
    * function produceNewRow (inputRows) {
    *       const outputRow = {
    *           // Produce output row based on the contents of the input rows.
    *       };
    *       return outputRow;
    * }
    *
    * const dfA = new DataFrame([ { Value: 10 }, { Value: 20 }, { Value: 30 }]);
    * const dfB = new DataFrame([ { Value: 100 }, { Value: 200 }, { Value: 300 }]);
    * const zippedDf = DataFrame.zip([dfA, dfB], produceNewRow);
    * 
*/ static zip(dataframes: Iterable>, zipper: ZipNFn): IDataFrame; /** * Zip (or merge) together multiple dataframes to create a new dataframe. * Preserves the index of the first dataframe. * * @param s2, s3, s4, s4 Multiple dataframes to zip. * @param zipper User-defined zipper function that merges rows. It produces rows for the new dataframe based-on rows from the input dataframes. * * @return Returns a single dataframe zipped (or merged) from multiple input dataframes. * * @example *
    *
    * function produceNewRow (rowA, rowB) {
    *       const outputRow = {
    *           ValueA: rowA.Value,
    *           ValueB: rowB.Value,
    *       };
    *       return outputRow;
    * }
    *
    * const dfA = new DataFrame([ { Value: 10 }, { Value: 20 }, { Value: 30 }]);
    * const dfB = new DataFrame([ { Value: 100 }, { Value: 200 }, { Value: 300 }]);
    * const zippedDf = dfA.zip(dfB, produceNewRow);
    * 
*/ zip(s2: IDataFrame, zipper: Zip2Fn): IDataFrame; zip(s2: IDataFrame, s3: IDataFrame, zipper: Zip3Fn): IDataFrame; zip(s2: IDataFrame, s3: IDataFrame, s4: IDataFrame, zipper: Zip3Fn): IDataFrame; /** * Sorts the dataframe in ascending order by a value defined by the user-defined selector function. * * @param selector User-defined selector function that selects the value to sort by. * * @return Returns a new dataframe that has been ordered accorrding to the value chosen by the selector function. * * @example *
     *
     * // Order sales by amount from least to most.
     * const orderedDf = salesDf.orderBy(sale => sale.Amount);
     * 
*/ orderBy(selector: SelectorWithIndexFn): IOrderedDataFrame; /** * Sorts the dataframe in descending order by a value defined by the user-defined selector function. * * @param selector User-defined selector function that selects the value to sort by. * * @return Returns a new dataframe that has been ordered accorrding to the value chosen by the selector function. * * @example *
     *
     * // Order sales by amount from most to least
     * const orderedDf = salesDf.orderByDescending(sale => sale.Amount);
     * 
*/ orderByDescending(selector: SelectorWithIndexFn): IOrderedDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains the union of rows from the two input dataframes. * These are the unique combination of rows in both dataframe. * This is basically a concatenation and then elimination of duplicates. * * @param other The other dataframes to merge. * @param selector Optional user-defined selector function that selects the value to compare to determine distinctness. * * @return Returns the union of the two dataframes. * * @example *
     *
     * const dfA = ...
     * const dfB = ...
     * const merged = dfA.union(dfB);
     * 
* * @example *
     *
     * // Merge two sets of customer records that may contain the same
     * // customer record in each set. This is basically a concatenation
     * // of the dataframes and then an elimination of any duplicate records
     * // that result.
     * const customerRecordsA = ...
     * const customerRecordsB = ...
     * const mergedCustomerRecords = customerRecordsA.union(
     *      customerRecordsB,
     *      customerRecord => customerRecord.CustomerId
     * );
     * 
* * * @example *
     *
     * // Note that you can achieve the exact same result as the previous
     * // example by doing a {@link DataFrame.concat) and {@link DataFrame.distinct}
     * // of the dataframes and then an elimination of any duplicate records
     * // that result.
     * const customerRecordsA = ...
     * const customerRecordsB = ...
     * const mergedCustomerRecords = customerRecordsA
     *      .concat(customerRecordsB)
     *      .distinct(customerRecord => customerRecord.CustomerId);
     * 
* */ union(other: IDataFrame, selector?: SelectorFn): IDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains the intersection of rows from the two input dataframes. * These are only the rows that appear in both dataframes. * * @param inner The inner dataframe to merge (the dataframe you call the function on is the 'outer' dataframe). * @param outerSelector Optional user-defined selector function that selects the key from the outer dataframe that is used to match the two dataframes. * @param innerSelector Optional user-defined selector function that selects the key from the inner dataframe that is used to match the two dataframes. * * @return Returns a new dataframe that contains the intersection of rows from the two input dataframes. * * @example *
     *
     * const dfA = ...
     * const dfB = ...
     * const mergedDf = dfA.intersection(dfB);
     * 
* * @example *
     *
     * // Merge two sets of customer records to find only the
     * // customers that appears in both.
     * const customerRecordsA = ...
     * const customerRecordsB = ...
     * const intersectionOfCustomerRecords = customerRecordsA.intersection(
     *      customerRecordsB,
     *      customerRecord => customerRecord.CustomerId
     * );
     * 
* */ intersection(inner: IDataFrame, outerSelector?: SelectorFn, innerSelector?: SelectorFn): IDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains only the rows from the 1st dataframe that don't appear in the 2nd dataframe. * This is essentially subtracting the rows from the 2nd dataframe from the 1st and creating a new dataframe with the remaining rows. * * @param inner The inner dataframe to merge (the dataframe you call the function on is the 'outer' dataframe). * @param outerSelector Optional user-defined selector function that selects the key from the outer dataframe that is used to match the two dataframes. * @param innerSelector Optional user-defined selector function that selects the key from the inner dataframe that is used to match the two dataframes. * * @return Returns a new dataframe that contains only the rows from the 1st dataframe that don't appear in the 2nd dataframe. * * @example *
     *
     * const dfA = ...
     * const dfB = ...
     * const remainingDf = dfA.except(dfB);
     * 
* * @example *
     *
     * // Find the list of customers haven't bought anything recently.
     * const allCustomers = ... list of all customers ...
     * const recentCustomers = ... list of customers who have purchased recently ...
     * const remainingCustomers = allCustomers.except(
     *      recentCustomers,
     *      customerRecord => customerRecord.CustomerId
     * );
     * 
*/ except(inner: IDataFrame, outerSelector?: SelectorFn, innerSelector?: SelectorFn): IDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains only those rows that have matching keys in both input dataframes. * * @param inner The 'inner' dataframe to join (the dataframe you are callling the function on is the 'outer' dataframe). * @param outerKeySelector User-defined selector function that chooses the join key from the outer dataframe. * @param innerKeySelector User-defined selector function that chooses the join key from the inner dataframe. * @param resultSelector User-defined function that merges outer and inner values. * * @return Returns the new merged dataframe. * * @example *
      *
      * // Join together two sets of customers to find those
      * // that have bought both product A and product B.
      * const customerWhoBoughtProductA = ...
      * const customerWhoBoughtProductB = ...
      * const customersWhoBoughtBothProductsDf = customerWhoBoughtProductA.join(
      *          customerWhoBoughtProductB,
      *          customerA => customerA.CustomerId, // Join key.
      *          customerB => customerB.CustomerId, // Join key.
      *          (customerA, customerB) => {
      *              return {
      *                  // ... merge the results ...
      *              };
      *          }
      *      );
      * 
*/ join(inner: IDataFrame, outerKeySelector: SelectorFn, innerKeySelector: SelectorFn, resultSelector: JoinFn): IDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains only those rows that are only present in one or the other of the dataframes, or both. * * @param inner The 'inner' dataframe to join (the dataframe you are callling the function on is the 'outer' dataframe). * @param outerKeySelector User-defined selector function that chooses the join key from the outer dataframe. * @param innerKeySelector User-defined selector function that chooses the join key from the inner dataframe. * @param resultSelector User-defined function that merges outer and inner values. * * Implementation from here: * * http://blogs.geniuscode.net/RyanDHatch/?p=116 * * @return Returns the new merged dataframe. * * @example *
     *
     * // Join together two sets of customers to find those
     * // that have bought either product A or product B, or both.
     * const customerWhoBoughtProductA = ...
     * const customerWhoBoughtProductB = ...
     * const customersWhoBoughtEitherProductButNotBothDf = customerWhoBoughtProductA.joinOuter(
     *          customerWhoBoughtProductB,
     *          customerA => customerA.CustomerId, // Join key.
     *          customerB => customerB.CustomerId, // Join key.
     *          (customerA, customerB) => {
     *              return {
     *                  // ... merge the results ...
     *              };
     *          }
     *      );
     * 
*/ joinOuter(inner: IDataFrame, outerKeySelector: SelectorFn, innerKeySelector: SelectorFn, resultSelector: JoinFn): IDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains only those rows that present either in both dataframes or only in the outer (left) dataframe. * * @param inner The 'inner' dataframe to join (the dataframe you are callling the function on is the 'outer' dataframe). * @param outerKeySelector User-defined selector function that chooses the join key from the outer dataframe. * @param innerKeySelector User-defined selector function that chooses the join key from the inner dataframe. * @param resultSelector User-defined function that merges outer and inner values. * * Implementation from here: * * http://blogs.geniuscode.net/RyanDHatch/?p=116 * * @return Returns the new merged dataframe. * * @example *
     *
     * // Join together two sets of customers to find those
     * // that have bought either just product A or both product A and product B.
     * const customerWhoBoughtProductA = ...
     * const customerWhoBoughtProductB = ...
     * const boughtJustAorAandB = customerWhoBoughtProductA.joinOuterLeft(
     *          customerWhoBoughtProductB,
     *          customerA => customerA.CustomerId, // Join key.
     *          customerB => customerB.CustomerId, // Join key.
     *          (customerA, customerB) => {
     *              return {
     *                  // ... merge the results ...
     *              };
     *          }
     *      );
     * 
*/ joinOuterLeft(inner: IDataFrame, outerKeySelector: SelectorFn, innerKeySelector: SelectorFn, resultSelector: JoinFn): IDataFrame; /** * Creates a new dataframe by merging two input dataframes. * The resulting dataframe contains only those rows that present either in both dataframes or only in the inner (right) dataframe. * * @param inner The 'inner' dataframe to join (the dataframe you are callling the function on is the 'outer' dataframe). * @param outerKeySelector User-defined selector function that chooses the join key from the outer dataframe. * @param innerKeySelector User-defined selector function that chooses the join key from the inner dataframe. * @param resultSelector User-defined function that merges outer and inner values. * * Implementation from here: * * http://blogs.geniuscode.net/RyanDHatch/?p=116 * * @return Returns the new merged dataframe. * * @example *
     *
     * // Join together two sets of customers to find those
     * // that have bought either just product B or both product A and product B.
     * const customerWhoBoughtProductA = ...
     * const customerWhoBoughtProductB = ...
     * const boughtJustAorAandB = customerWhoBoughtProductA.joinOuterRight(
     *          customerWhoBoughtProductB,
     *          customerA => customerA.CustomerId, // Join key.
     *          customerB => customerB.CustomerId, // Join key.
     *          (customerA, customerB) => {
     *              return {
     *                  // ... merge the results ...
     *              };
     *          }
     *      );
     * 
*/ joinOuterRight(inner: IDataFrame, outerKeySelector: SelectorFn, innerKeySelector: SelectorFn, resultSelector: JoinFn): IDataFrame; /** * Produces a summary of dataframe. * * @param spec Optional parameter that specifies which columns to aggregate and how to aggregate them. Leave this out to produce a default summary of all columns. * * @returns A object with fields that summary the values in the dataframe. * * @example *
     *
     * const summary = df.summarize();
     * console.log(summary);
     * 
* * @example *
     *
     * const summary = df.summarize({ // Summarize using pre-defined functions.
     *      Column1: Series.sum,
     *      Column2: Series.average,
     *      Column3: Series.count,
     * });
     * console.log(summary);
     * 
* * @example *
     *
     * const summary = df.summarize({ // Summarize using custom functions.
     *      Column1: series => series.sum(),
     *      Column2: series => series.std(),
     *      ColumnN: whateverFunctionYouWant,
     * });
     * console.log(summary);
     * 
* * @example *
     *
     * const summary = df.summarize({ // Multiple output fields per column.
     *      Column1: {
     *          OutputField1: Series.sum,
     *          OutputField2: Series.average,
     *      },
     *      Column2: {
     *          OutputField3: series => series.sum(),
     *          OutputFieldN: whateverFunctionYouWant,
     *      },
     * });
     * console.log(summary);
     * 
*/ summarize(spec?: IMultiColumnAggregatorSpec): OutputValueT; /** * Reshape (or pivot) a dataframe based on column values. * This is a powerful function that combines grouping, aggregation and sorting. * * @param columnOrColumns Column name whose values make the new DataFrame's columns. * @param valueColumnNameOrSpec Column name or column spec that defines the columns whose values should be aggregated. * @param aggregator Optional function used to aggregate pivotted vales. * * @return Returns a new dataframe that has been pivoted based on a particular column's values. * * @example *
     *
     * // Simplest example.
     * // Group by the values in 'PivotColumn'.
     * // The unique set of values in 'PivotColumn' becomes the columns in the resulting dataframe.
     * // The column 'ValueColumn' is aggregated for each group and this becomes the
     * // values in the new output column.
     * const pivottedDf = df.pivot("PivotColumn", "ValueColumn", values => values.average());
     * 
* * @example *
     *
     * // Multiple input column example.
     * // Similar to the previous example except now we are aggregating multiple input columns.
     * // Each group has the average computed for 'ValueColumnA' and the sum for 'ValueColumnB'.
     * const pivottedDf = df.pivot("PivotColumn", {
     *      ValueColumnA: aValues => aValues.average(),
     *      ValueColumnB:  bValues => bValues.sum(),
     * });
     * 
* * @example *
     *
     * // Multiple output column example.
     * // Similar to the previous example except now we are doing multiple aggregations for each input column.
     * // The example produces an output dataframe with columns OutputColumnA, B, C and D.
     * // OutputColumnA/B are the sum and average of ValueColumnA across each group as defined by PivotColumn.
     * // OutputColumnC/D are the sum and average of ValueColumnB across each group as defined by PivotColumn.
     * const pivottedDf = df.pivot("PivotColumn", {
     *      ValueColumnA: {
     *          OutputColumnA: aValues => aValues.sum(),
     *          OutputColumnB: aValues => aValues.average(),
     *      },
     *      ValueColumnB: {
     *          OutputColumnC: bValues => bValues.sum(),
     *          OutputColumnD: bValues => bValues.average(),
     *      },
     * });
     * 
* * @example *
     *
     * // Full multi-column example.
     * // Similar to the previous example now we are pivotting on multiple columns.
     * // We now group by the 'PivotColumnA' and then by 'PivotColumnB', effectively creating a
     * // multi-level group.
     * const pivottedDf = df.pivot(["PivotColumnA", "PivotColumnB" ], {
     *      ValueColumnA: aValues => aValues.average(),
     *      ValueColumnB:  bValues => bValues.sum(),
     * });
     * 
* * @example *
     *
     * // To help understand the pivot function, let's look at what it does internally.
     * // Take the simplest example:
     * const pivottedDf = df.pivot("PivotColumn", "ValueColumn", values => values.average());
     *
     * // If we expand out the internals of the pivot function, it will look something like this:
     * const pivottedDf = df.groupBy(row => row.PivotColumn)
     *          .select(group => ({
     *              PivotColumn: group.deflate(row => row.ValueColumn).average()
     *          }))
     *          .orderBy(row  => row.PivotColumn);
     *
     * // You can see that pivoting a dataframe is the same as grouping, aggregating and sorting it.
     * // Does pivoting seem simpler now?
     *
     * // It gets more complicated than that of course, because the pivot function supports multi-level nested
     * // grouping and aggregation of multiple columns. So a full expansion of the pivot function is rather complex.
     * 
*/ pivot(columnOrColumns: string | Iterable, valueColumnNameOrSpec: string | IMultiColumnAggregatorSpec, aggregator?: (values: ISeries) => any): IDataFrame; /** * Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. * This is a powerful function that combines grouping, aggregation and sorting. * * @param idColumnOrColumns Column(s) to use as identifier variables. * @param valueColumnOrColumns Column(s) to unpivot. * * @return Returns a new dataframe that has been unpivoted based on a particular column's values. * * @example *
     *
     * // Use column in 'idColumnOrColumns' as the identity column.
     * // The column name passed in 'valueColumnOrColumns' forms the 'variable' column
     * // and the values are used to populate the 'value' column of the new dataframe.
     * const moltenDf = df.melt("A", "B");
     * 
* * @example *
     *
     * // Multiple value columns example.
     * // Similar to the previous example except now the variable column will constitute
     * // of multiple values.
     * const moltenDf = df.melt("A", ["B", "C"]);
     * 
* * @example *
     *
     * // Multiple identity and value columns example.
     * const moltenDf = df.melt(["A", "B"], ["C", "D"]);
     * 
*/ melt(idColumnOrColumns: string | Iterable, valueColumnOrColumns: string | Iterable): IDataFrame; /** * Insert a pair at the start of the dataframe. * Doesn't modify the original dataframe! The returned dataframe is entirely new and contains rows from the original dataframe plus the inserted pair. * * @param pair The pair to insert. * * @return Returns a new dataframe with the specified pair inserted. * * @example *
     *
     * const newIndex = ... index of the new row ...
     * const newRow = ... the new data row to insert ...
     * const insertedDf = df.insertPair([newIndex, newRows]);
     * 
*/ insertPair(pair: [IndexT, ValueT]): IDataFrame; /** * Append a pair to the end of a dataframe. * Doesn't modify the original dataframe! The returned dataframe is entirely new and contains rows from the original dataframe plus the appended pair. * * @param pair - The pair to append. * * @return Returns a new dataframe with the specified pair appended. * * @example *
     *
     * const newIndex = ... index of the new row ...
     * const newRow = ... the new data row to append ...
     * const appendedDf = df.appendPair([newIndex, newRows]);
     * 
*/ appendPair(pair: [IndexT, ValueT]): IDataFrame; /** * Removes rows from the dataframe by index. */ remove(index: IndexT): IDataFrame; /** * Fill gaps in a dataframe. * * @param comparer User-defined comparer function that is passed pairA and pairB, two consecutive rows, return truthy if there is a gap between the rows, or falsey if there is no gap. * @param generator User-defined generator function that is passed pairA and pairB, two consecutive rows, returns an array of pairs that fills the gap between the rows. * * @return Returns a new dataframe with gaps filled in. * * @example *
     *
     *   var sequenceWithGaps = ...
     *
     *  // Predicate that determines if there is a gap.
     *  var gapExists = (pairA, pairB) => {
     *      // Returns true if there is a gap.
     *      return true;
     *  };
     *
     *  // Generator function that produces new rows to fill the game.
     *  var gapFiller = (pairA, pairB) => {
     *      // Create an array of index, value pairs that fill the gaps between pairA and pairB.
     *      return [
     *          newPair1,
     *          newPair2,
     *          newPair3,
     *      ];
     *  };
     *
     *  var sequenceWithoutGaps = sequenceWithGaps.fillGaps(gapExists, gapFiller);
     * 
*/ fillGaps(comparer: ComparerFn<[IndexT, ValueT], [IndexT, ValueT]>, generator: GapFillFn<[IndexT, ValueT], [IndexT, ValueT]>): IDataFrame; /** * Returns the specified default dataframe if the dataframe is empty. * * @param defaultDataFrame Default dataframe to return if the dataframe is empty. * * @return Returns 'defaultDataFrame' if the dataframe is empty. * * @example *
     *
     * const emptyDataFrame = new DataFrame();
     * const defaultDataFrame = new DataFrame([ { A: 1 }, { A: 2 }, { A: 3 } ]);
     * expect(emptyDataFrame.defaultIfEmpty(defaultDataFrame)).to.eql(defaultDataFrame);
     * 
* * @example *
     *
     * const nonEmptyDataFrame = new DataFrame([ { A: 100 }]);
     * const defaultDataFrame = new DataFrame([ { A: 1 }, { A: 2 }, { A: 3 } ]);
     * expect(nonEmptyDataFrame.defaultIfEmpty(defaultDataFrame)).to.eql(nonEmptyDataFrame);
     * 
*/ defaultIfEmpty(defaultDataFrame: ValueT[] | IDataFrame): IDataFrame; /** * Detect the the frequency of the types of the values in the dataframe. * This is a good way to understand the shape of your data. * * @return Returns a dataframe with rows that confirm to {@link ITypeFrequency} that describes the data types contained in the original dataframe. * * @example *
     *
     * const df = dataForge.readFileSync("./my-data.json").parseJSON();
     * const dataTypes = df.detectTypes();
     * console.log(dataTypes.toString());
     * 
*/ detectTypes(): IDataFrame; /** * Detect the frequency of the values in the dataframe. * This is a good way to understand the shape of your data. * * @return Returns a dataframe with rows that conform to {@link IValueFrequency} that describes the values contained in the dataframe. * * @example *
     *
     * const df = dataForge.readFileSync("./my-data.json").parseJSON();
     * const dataValues = df.detectedValues();
     * console.log(dataValues.toString());
     * 
*/ detectValues(): IDataFrame; /** * Serialize the dataframe to the JSON data format. * * @return Returns a string in the JSON data format that represents the dataframe. * * @example *
     *
     * const jsonData = df.toJSON();
     * console.log(jsonData);
     * 
*/ toJSON(): string; /** * Serialize the dataframe to the JSON5 data format. * * @return Returns a string in the JSON5 data format that represents the dataframe. * * @example *
     *
     * const jsonData = df.toJSON5();
     * console.log(jsonData);
     * 
*/ toJSON5(): string; /** * Serialize the dataframe to the CSV data format. * * @param options Options for CSV output. The options object is passed directly to [PapaParse.unparse](https://www.papaparse.com/docs#unparse), please see [PapaParse docs for additional options](https://www.papaparse.com/docs#unparse-config-default). * * @return Returns a string in the CSV data format that represents the dataframe. * * @example *
     *
     * const csvData = df.toCSV();
     * console.log(csvData);
     * 
* * @example *
     *
     * const csvData = df.toCSV({ header: false });
     * console.log(csvData);
     * 
*/ toCSV(options?: ICSVOutputOptions): string; /** * Serialize the dataframe to HTML. * * @return Returns a string in HTML format that represents the dataframe. */ toHTML(): string; /** * Serialize the dataframe to an ordinary JavaScript data structure. * The resulting data structure is suitable for further serialization to JSON and can be used to * transmit a DataFrame and its internal structure over the wire. * Use the {@link deserialize} function to later reconstitute the serialized dataframe. * * @return Returns a JavaScript data structure conforming to {@link ISerializedDataFrame} that represents the dataframe and its internal structure. * * @example *
     *
     * const jsDataStructure = df.serialize();
     * const jsonData = JSON.stringify(jsDataStructure);
     * console.log(jsonData);
     * const deserializedJsDataStructure = JSON.parse(jsonData);
     * const deserializedDf = DataFrame.deserialize(deserializedJsDataStructure); // Reconsituted.
     * 
*/ serialize(): ISerializedDataFrame; /** * Deserialize the dataframe from an ordinary JavaScript data structure. * Can reconstitute a dataframe that previously serialized with the {@link serialize} function. * This can rebuilds the dataframe with the exact same internal structure after it has been transmitted over the wire. * * @param input The serialize JavaScript data structure for the dataframe. * * @return Returns the deserialized/reconstituted dataframe. * * @example *
     *
     * const jsDataStructure = df.serialize();
     * const jsonData = JSON.stringify(jsDataStructure);
     * console.log(jsonData);
     * const deserializedJsDataStructure = JSON.parse(jsonData);
     * const deserializedDf = DataFrame.deserialize(deserializedJsDataStructure); // Reconsituted.
     * 
*/ static deserialize(input: ISerializedDataFrame): IDataFrame; /*** * Allows the dataframe to be queried to confirm that it is actually a dataframe. * Used from JavaScript to tell the difference between a Series and a DataFrame. * * @return Returns the string "dataframe". */ getTypeCode(): string; }