Intial Commit
This commit is contained in:
15
nodered/rootfs/data/node_modules/influx/lib/src/backoff/backoff.d.ts
generated
vendored
Normal file
15
nodered/rootfs/data/node_modules/influx/lib/src/backoff/backoff.d.ts
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
export interface IBackoffStrategy {
|
||||
/**
|
||||
* GetDelay returns the amount of delay of the current backoff.
|
||||
*/
|
||||
getDelay(): number;
|
||||
/**
|
||||
* Next is called when a failure occurs on a host to
|
||||
* return the next backoff amount.
|
||||
*/
|
||||
next(): IBackoffStrategy;
|
||||
/**
|
||||
* Returns a strategy with a reset backoff counter.
|
||||
*/
|
||||
reset(): IBackoffStrategy;
|
||||
}
|
||||
2
nodered/rootfs/data/node_modules/influx/lib/src/backoff/backoff.js
generated
vendored
Normal file
2
nodered/rootfs/data/node_modules/influx/lib/src/backoff/backoff.js
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
48
nodered/rootfs/data/node_modules/influx/lib/src/backoff/exponential.d.ts
generated
vendored
Normal file
48
nodered/rootfs/data/node_modules/influx/lib/src/backoff/exponential.d.ts
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
import { IBackoffStrategy } from './backoff';
|
||||
/**
|
||||
* IExponentialOptions are passed into the ExponentialBackoff constructor. The
|
||||
* backoff equation is, in general, min(max, initial ^ n), where `n` is
|
||||
* an incremented backoff factor. The result of the equation is a delay
|
||||
* given in milliseconds.
|
||||
*
|
||||
*/
|
||||
export interface IExponentialOptions {
|
||||
/**
|
||||
* The initial delay passed to the equation.
|
||||
*/
|
||||
initial: number;
|
||||
/**
|
||||
* Random factor to subtract from the `n` count.
|
||||
*/
|
||||
random: number;
|
||||
/**
|
||||
* Max is the maximum value of the delay.
|
||||
*/
|
||||
max: number;
|
||||
}
|
||||
/**
|
||||
* Exponential Backoff
|
||||
* @see https://en.wikipedia.org/wiki/Exponential_backoff
|
||||
*/
|
||||
export declare class ExponentialBackoff implements IBackoffStrategy {
|
||||
protected options: IExponentialOptions;
|
||||
private _counter;
|
||||
/**
|
||||
* Creates a new exponential backoff strategy.
|
||||
* @see https://en.wikipedia.org/wiki/Exponential_backoff
|
||||
* @param options
|
||||
*/
|
||||
constructor(options: IExponentialOptions);
|
||||
/**
|
||||
* @inheritDoc
|
||||
*/
|
||||
getDelay(): number;
|
||||
/**
|
||||
* @inheritDoc
|
||||
*/
|
||||
next(): IBackoffStrategy;
|
||||
/**
|
||||
* @inheritDoc
|
||||
*/
|
||||
reset(): IBackoffStrategy;
|
||||
}
|
||||
39
nodered/rootfs/data/node_modules/influx/lib/src/backoff/exponential.js
generated
vendored
Normal file
39
nodered/rootfs/data/node_modules/influx/lib/src/backoff/exponential.js
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
/**
|
||||
* Exponential Backoff
|
||||
* @see https://en.wikipedia.org/wiki/Exponential_backoff
|
||||
*/
|
||||
class ExponentialBackoff {
|
||||
/**
|
||||
* Creates a new exponential backoff strategy.
|
||||
* @see https://en.wikipedia.org/wiki/Exponential_backoff
|
||||
* @param options
|
||||
*/
|
||||
constructor(options) {
|
||||
this.options = options;
|
||||
this._counter = 0;
|
||||
}
|
||||
/**
|
||||
* @inheritDoc
|
||||
*/
|
||||
getDelay() {
|
||||
const count = this._counter - Math.round(Math.random() * this.options.random); // Tslint:disable-line
|
||||
return Math.min(this.options.max, this.options.initial * Math.pow(2, Math.max(count, 0)));
|
||||
}
|
||||
/**
|
||||
* @inheritDoc
|
||||
*/
|
||||
next() {
|
||||
const next = new ExponentialBackoff(this.options);
|
||||
next._counter = this._counter + 1;
|
||||
return next;
|
||||
}
|
||||
/**
|
||||
* @inheritDoc
|
||||
*/
|
||||
reset() {
|
||||
return new ExponentialBackoff(this.options);
|
||||
}
|
||||
}
|
||||
exports.ExponentialBackoff = ExponentialBackoff;
|
||||
254
nodered/rootfs/data/node_modules/influx/lib/src/builder.d.ts
generated
vendored
Normal file
254
nodered/rootfs/data/node_modules/influx/lib/src/builder.d.ts
generated
vendored
Normal file
@@ -0,0 +1,254 @@
|
||||
export interface IStringable {
|
||||
toString(): string;
|
||||
}
|
||||
export interface IBaseExpression<T> {
|
||||
/**
|
||||
* Inserts a tag name in the expression.
|
||||
*/
|
||||
tag(name: string): T;
|
||||
/**
|
||||
* Inserts a field name in the expression.
|
||||
*/
|
||||
field(name: string): T;
|
||||
/**
|
||||
* Chains on a value to the expression. An error will be thrown if the
|
||||
* value is a type we can't represent in InfluxQL, primarily `null` or
|
||||
* `undefined.`
|
||||
*/
|
||||
value(value: any): T;
|
||||
}
|
||||
export interface IExpressionHead extends IBaseExpression<IBinaryOp> {
|
||||
}
|
||||
export interface IExpressionTail extends IBaseExpression<IExpressionHead> {
|
||||
}
|
||||
export interface IBinaryOp {
|
||||
/**
|
||||
* Adds an 'AND' operator
|
||||
*/
|
||||
and: IExpressionTail;
|
||||
/**
|
||||
* Adds an 'OR' operator
|
||||
*/
|
||||
or: IExpressionTail;
|
||||
/**
|
||||
* Adds a '+' addition symbol
|
||||
*/
|
||||
plus: IExpressionTail;
|
||||
/**
|
||||
* Adds a '*' multiplication symbol
|
||||
*/
|
||||
times: IExpressionTail;
|
||||
/**
|
||||
* Adds a '-' subtraction symbol
|
||||
*/
|
||||
minus: IExpressionTail;
|
||||
/**
|
||||
* Adds a '/' division symbol
|
||||
*/
|
||||
div: IExpressionTail;
|
||||
/**
|
||||
* Adds a '=' symbol
|
||||
*/
|
||||
equals: IExpressionTail;
|
||||
/**
|
||||
* Adds a '=~' comparator to select entries matching a regex.
|
||||
*/
|
||||
matches: IExpressionTail;
|
||||
/**
|
||||
* Adds a '!~' comparator to select entries not matching a regex.
|
||||
*/
|
||||
doesntMatch: IExpressionTail;
|
||||
/**
|
||||
* Adds a '!=' comparator to select entries not equaling a certain value.
|
||||
*/
|
||||
notEqual: IExpressionTail;
|
||||
/**
|
||||
* Adds a '>' symbol
|
||||
*/
|
||||
gt: IExpressionTail;
|
||||
/**
|
||||
* Adds a '>=' symbol
|
||||
*/
|
||||
gte: IExpressionTail;
|
||||
/**
|
||||
* Adds a '<' symbol
|
||||
*/
|
||||
lt: IExpressionTail;
|
||||
/**
|
||||
* Adds a '<=' symbol
|
||||
*/
|
||||
lte: IExpressionTail;
|
||||
}
|
||||
/**
|
||||
* Expression is used to build filtering expressions, like those used in WHERE
|
||||
* clauses. It can be used for fluent and safe building of queries using
|
||||
* untrusted input.
|
||||
*
|
||||
* @example
|
||||
* e => e
|
||||
* .field('host').equals.value('ares.peet.io')
|
||||
* .or
|
||||
* .field('host').matches(/example\.com$/)
|
||||
* .or
|
||||
* .expr(e => e
|
||||
* .field('country').equals.value('US')
|
||||
* .and
|
||||
* .field('state').equals.value('WA'));
|
||||
*
|
||||
* // Generates:
|
||||
* // "host" = 'ares.peet.io' OR "host" ~= /example\.com$/ OR \
|
||||
* // ("county" = 'US' AND "state" = 'WA')
|
||||
*/
|
||||
export declare class Expression implements IExpressionHead, IExpressionTail, IBinaryOp {
|
||||
private _query;
|
||||
/**
|
||||
* Inserts a tag reference into the expression; the name will be
|
||||
* automatically escaped.
|
||||
* @param name
|
||||
* @return
|
||||
*/
|
||||
tag(name: string): this;
|
||||
/**
|
||||
* Inserts a field reference into the expression; the name will be
|
||||
* automatically escaped.
|
||||
* @param name
|
||||
* @return
|
||||
*/
|
||||
field(name: string): this;
|
||||
/**
|
||||
* Inserts a subexpression; invokes the function with a new expression
|
||||
* that can be chained on.
|
||||
* @param fn
|
||||
* @return
|
||||
* @example
|
||||
* e.field('a').equals.value('b')
|
||||
* .or.expr(e =>
|
||||
* e.field('b').equals.value('b')
|
||||
* .and.field('a').equals.value('c'))
|
||||
* .toString()
|
||||
* // "a" = 'b' OR ("b" = 'b' AND "a" = 'c')
|
||||
*/
|
||||
exp(fn: (e: Expression) => Expression): this;
|
||||
/**
|
||||
* Value chains on a value to the expression.
|
||||
*
|
||||
* - Numbers will be inserted verbatim
|
||||
* - Strings will be escaped and inserted
|
||||
* - Booleans will be inserted correctly
|
||||
* - Dates will be formatted and inserted correctly, including INanoDates.
|
||||
* - Regular expressions will be inserted correctly, however an error will
|
||||
* be thrown if they contain flags, as regex flags do not work in Influx
|
||||
* - Otherwise we'll try to call `.toString()` on the value, throwing
|
||||
* if we cannot do so.
|
||||
*
|
||||
* @param value
|
||||
* @return
|
||||
*/
|
||||
value(value: any): this;
|
||||
/**
|
||||
* Chains on an AND clause to the expression.
|
||||
*/
|
||||
readonly and: this;
|
||||
/**
|
||||
* Chains on an OR clause to the expression.
|
||||
*/
|
||||
readonly or: this;
|
||||
/**
|
||||
* Chains on a `+` operator to the expression.
|
||||
*/
|
||||
readonly plus: this;
|
||||
/**
|
||||
* Chains on a `*` operator to the expression.
|
||||
*/
|
||||
readonly times: this;
|
||||
/**
|
||||
* Chains on a `-` operator to the expression.
|
||||
*/
|
||||
readonly minus: this;
|
||||
/**
|
||||
* Chains on a `/` operator to the expression.
|
||||
*/
|
||||
readonly div: this;
|
||||
/**
|
||||
* Chains on a `=` conditional to the expression.
|
||||
*/
|
||||
readonly equals: this;
|
||||
/**
|
||||
* Chains on a `=~` conditional to the expression to match regexes.
|
||||
*/
|
||||
readonly matches: this;
|
||||
/**
|
||||
* Chains on a `!`` conditional to the expression to match regexes.
|
||||
*/
|
||||
readonly doesntMatch: this;
|
||||
/**
|
||||
* Chains on a `!=` conditional to the expression.
|
||||
*/
|
||||
readonly notEqual: this;
|
||||
/**
|
||||
* Chains on a `>` conditional to the expression.
|
||||
*/
|
||||
readonly gt: this;
|
||||
/**
|
||||
* Chains on a `>=` conditional to the expression.
|
||||
*/
|
||||
readonly gte: this;
|
||||
/**
|
||||
* Chains on a `<` conditional to the expression.
|
||||
*/
|
||||
readonly lt: this;
|
||||
/**
|
||||
* Chains on a `<=` conditional to the expression.
|
||||
*/
|
||||
readonly lte: this;
|
||||
/**
|
||||
* Converts the expression into its InfluxQL representation.
|
||||
* @return
|
||||
*/
|
||||
toString(): string;
|
||||
}
|
||||
/**
|
||||
* Measurement creates a reference to a particular measurement. You can
|
||||
* reference it solely by its name, but you can also specify the retention
|
||||
* policy and database it lives under.
|
||||
*
|
||||
* @example
|
||||
* m.name('my_measurement') // "my_measurement"
|
||||
* m.name('my_measurement').policy('one_day') // "one_day"."my_measurement"
|
||||
* m.name('my_measurement').policy('one_day').db('mydb') // "mydb"."one_day"."my_measurement"
|
||||
*/
|
||||
export declare class Measurement {
|
||||
private _parts;
|
||||
/**
|
||||
* Sets the measurement name.
|
||||
* @param name
|
||||
* @return
|
||||
*/
|
||||
name(name: string): this;
|
||||
/**
|
||||
* Sets the retention policy name.
|
||||
* @param retentionPolicy
|
||||
* @return
|
||||
*/
|
||||
policy(retentionPolicy: string): this;
|
||||
/**
|
||||
* Sets the database name.
|
||||
* @param db
|
||||
* @return
|
||||
*/
|
||||
db(db: string): this;
|
||||
/**
|
||||
* Converts the measurement into its InfluxQL representation.
|
||||
* @return
|
||||
* @throws {Error} if a measurement name is not provided
|
||||
*/
|
||||
toString(): string;
|
||||
}
|
||||
export declare type measurement = {
|
||||
measurement: string | ((m: Measurement) => IStringable);
|
||||
};
|
||||
export declare type where = {
|
||||
where: string | ((e: IExpressionHead) => IStringable);
|
||||
};
|
||||
export declare function parseMeasurement(q: measurement): string;
|
||||
export declare function parseWhere(q: where): string;
|
||||
295
nodered/rootfs/data/node_modules/influx/lib/src/builder.js
generated
vendored
Normal file
295
nodered/rootfs/data/node_modules/influx/lib/src/builder.js
generated
vendored
Normal file
@@ -0,0 +1,295 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const grammar_1 = require("./grammar");
|
||||
function regexHasFlags(re) {
|
||||
if (typeof re.flags !== 'undefined') {
|
||||
return re.flags.length > 0;
|
||||
}
|
||||
return !re.toString().endsWith('/');
|
||||
}
|
||||
/**
|
||||
* Expression is used to build filtering expressions, like those used in WHERE
|
||||
* clauses. It can be used for fluent and safe building of queries using
|
||||
* untrusted input.
|
||||
*
|
||||
* @example
|
||||
* e => e
|
||||
* .field('host').equals.value('ares.peet.io')
|
||||
* .or
|
||||
* .field('host').matches(/example\.com$/)
|
||||
* .or
|
||||
* .expr(e => e
|
||||
* .field('country').equals.value('US')
|
||||
* .and
|
||||
* .field('state').equals.value('WA'));
|
||||
*
|
||||
* // Generates:
|
||||
* // "host" = 'ares.peet.io' OR "host" ~= /example\.com$/ OR \
|
||||
* // ("county" = 'US' AND "state" = 'WA')
|
||||
*/
|
||||
class Expression {
|
||||
constructor() {
|
||||
this._query = [];
|
||||
}
|
||||
/**
|
||||
* Inserts a tag reference into the expression; the name will be
|
||||
* automatically escaped.
|
||||
* @param name
|
||||
* @return
|
||||
*/
|
||||
tag(name) {
|
||||
this.field(name);
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Inserts a field reference into the expression; the name will be
|
||||
* automatically escaped.
|
||||
* @param name
|
||||
* @return
|
||||
*/
|
||||
field(name) {
|
||||
this._query.push(grammar_1.escape.quoted(name));
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Inserts a subexpression; invokes the function with a new expression
|
||||
* that can be chained on.
|
||||
* @param fn
|
||||
* @return
|
||||
* @example
|
||||
* e.field('a').equals.value('b')
|
||||
* .or.expr(e =>
|
||||
* e.field('b').equals.value('b')
|
||||
* .and.field('a').equals.value('c'))
|
||||
* .toString()
|
||||
* // "a" = 'b' OR ("b" = 'b' AND "a" = 'c')
|
||||
*/
|
||||
exp(fn) {
|
||||
this._query.push('(' + fn(new Expression()).toString() + ')');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Value chains on a value to the expression.
|
||||
*
|
||||
* - Numbers will be inserted verbatim
|
||||
* - Strings will be escaped and inserted
|
||||
* - Booleans will be inserted correctly
|
||||
* - Dates will be formatted and inserted correctly, including INanoDates.
|
||||
* - Regular expressions will be inserted correctly, however an error will
|
||||
* be thrown if they contain flags, as regex flags do not work in Influx
|
||||
* - Otherwise we'll try to call `.toString()` on the value, throwing
|
||||
* if we cannot do so.
|
||||
*
|
||||
* @param value
|
||||
* @return
|
||||
*/
|
||||
value(value) {
|
||||
switch (typeof value) {
|
||||
case 'number':
|
||||
this._query.push(value.toString());
|
||||
return this;
|
||||
case 'string':
|
||||
this._query.push(grammar_1.escape.stringLit(value));
|
||||
return this;
|
||||
case 'boolean':
|
||||
this._query.push(value ? 'TRUE' : 'FALSE');
|
||||
return this;
|
||||
default:
|
||||
if (value instanceof Date) {
|
||||
this._query.push(grammar_1.formatDate(value));
|
||||
return this;
|
||||
}
|
||||
if (value instanceof RegExp) {
|
||||
if (regexHasFlags(value)) {
|
||||
throw new Error('Attempted to query using a regex with flags, ' +
|
||||
'but Influx doesn\'t support flags in queries.');
|
||||
}
|
||||
this._query.push('/' + value.source + '/');
|
||||
return this;
|
||||
}
|
||||
if (value && typeof value.toString === 'function') {
|
||||
this._query.push(value.toString());
|
||||
return this;
|
||||
}
|
||||
throw new Error('node-influx doesn\'t know how to encode the provided value into a ' +
|
||||
'query. If you think this is a bug, open an issue here: https://git.io/influx-err');
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Chains on an AND clause to the expression.
|
||||
*/
|
||||
get and() {
|
||||
this._query.push('AND');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on an OR clause to the expression.
|
||||
*/
|
||||
get or() {
|
||||
this._query.push('OR');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on a `+` operator to the expression.
|
||||
*/
|
||||
get plus() {
|
||||
this._query.push('+');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on a `*` operator to the expression.
|
||||
*/
|
||||
get times() {
|
||||
this._query.push('*');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on a `-` operator to the expression.
|
||||
*/
|
||||
get minus() {
|
||||
this._query.push('-');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on a `/` operator to the expression.
|
||||
*/
|
||||
get div() {
|
||||
this._query.push('/');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on a `=` conditional to the expression.
|
||||
*/
|
||||
get equals() {
|
||||
this._query.push('=');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on a `=~` conditional to the expression to match regexes.
|
||||
*/
|
||||
get matches() {
|
||||
this._query.push('=~');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on a `!`` conditional to the expression to match regexes.
|
||||
*/
|
||||
get doesntMatch() {
|
||||
this._query.push('!~');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on a `!=` conditional to the expression.
|
||||
*/
|
||||
get notEqual() {
|
||||
this._query.push('!=');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on a `>` conditional to the expression.
|
||||
*/
|
||||
get gt() {
|
||||
this._query.push('>');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on a `>=` conditional to the expression.
|
||||
*/
|
||||
get gte() {
|
||||
this._query.push('>=');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on a `<` conditional to the expression.
|
||||
*/
|
||||
get lt() {
|
||||
this._query.push('<');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Chains on a `<=` conditional to the expression.
|
||||
*/
|
||||
get lte() {
|
||||
this._query.push('<=');
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Converts the expression into its InfluxQL representation.
|
||||
* @return
|
||||
*/
|
||||
toString() {
|
||||
return this._query.join(' ');
|
||||
}
|
||||
}
|
||||
exports.Expression = Expression;
|
||||
/**
|
||||
* Measurement creates a reference to a particular measurement. You can
|
||||
* reference it solely by its name, but you can also specify the retention
|
||||
* policy and database it lives under.
|
||||
*
|
||||
* @example
|
||||
* m.name('my_measurement') // "my_measurement"
|
||||
* m.name('my_measurement').policy('one_day') // "one_day"."my_measurement"
|
||||
* m.name('my_measurement').policy('one_day').db('mydb') // "mydb"."one_day"."my_measurement"
|
||||
*/
|
||||
class Measurement {
|
||||
constructor() {
|
||||
this._parts = [null, null, null];
|
||||
}
|
||||
/**
|
||||
* Sets the measurement name.
|
||||
* @param name
|
||||
* @return
|
||||
*/
|
||||
name(name) {
|
||||
this._parts[2] = name;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Sets the retention policy name.
|
||||
* @param retentionPolicy
|
||||
* @return
|
||||
*/
|
||||
policy(retentionPolicy) {
|
||||
this._parts[1] = retentionPolicy;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Sets the database name.
|
||||
* @param db
|
||||
* @return
|
||||
*/
|
||||
db(db) {
|
||||
this._parts[0] = db;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Converts the measurement into its InfluxQL representation.
|
||||
* @return
|
||||
* @throws {Error} if a measurement name is not provided
|
||||
*/
|
||||
toString() {
|
||||
if (!this._parts[2]) {
|
||||
throw new Error(`You must specify a measurement name to query! Got \`${this._parts[2]}\``);
|
||||
}
|
||||
return this._parts
|
||||
.filter(p => Boolean(p))
|
||||
.map(p => grammar_1.escape.quoted(p))
|
||||
.join('.');
|
||||
}
|
||||
}
|
||||
exports.Measurement = Measurement;
|
||||
function parseMeasurement(q) {
|
||||
if (typeof q.measurement === 'function') {
|
||||
return q.measurement(new Measurement()).toString();
|
||||
}
|
||||
return q.measurement;
|
||||
}
|
||||
exports.parseMeasurement = parseMeasurement;
|
||||
function parseWhere(q) {
|
||||
if (typeof q.where === 'function') {
|
||||
return q.where(new Expression()).toString();
|
||||
}
|
||||
return q.where;
|
||||
}
|
||||
exports.parseWhere = parseWhere;
|
||||
45
nodered/rootfs/data/node_modules/influx/lib/src/grammar/ds.d.ts
generated
vendored
Normal file
45
nodered/rootfs/data/node_modules/influx/lib/src/grammar/ds.d.ts
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
/**
|
||||
* FieldType is an enumeration of InfluxDB field data types.
|
||||
* @typedef {Number} FieldType
|
||||
* @example
|
||||
* import { FieldType } from 'influx'; // or const FieldType = require('influx').FieldType
|
||||
*
|
||||
* const schema = {
|
||||
* measurement: 'my_measurement',
|
||||
* fields: {
|
||||
* my_int: FieldType.INTEGER,
|
||||
* my_float: FieldType.FLOAT,
|
||||
* my_string: FieldType.STRING,
|
||||
* my_boolean: FieldType.BOOLEAN,
|
||||
* }
|
||||
* }
|
||||
*/
|
||||
export declare enum FieldType {
|
||||
FLOAT = 0,
|
||||
INTEGER = 1,
|
||||
STRING = 2,
|
||||
BOOLEAN = 3
|
||||
}
|
||||
export declare function isNumeric(value: string): boolean;
|
||||
/**
|
||||
* You can provide Raw values to Influx methods to prevent it from escaping
|
||||
* your provided string.
|
||||
* @class
|
||||
* @example
|
||||
* influx.createDatabase(new Influx.Raw('This won\'t be escaped!'));
|
||||
*/
|
||||
export declare class Raw {
|
||||
private value;
|
||||
/**
|
||||
* Wraps a string so that it is not escaped in Influx queries.
|
||||
* @param value
|
||||
* @example
|
||||
* influx.createDatabase(new Influx.Raw('This won\'t be escaped!'));
|
||||
*/
|
||||
constructor(value: string);
|
||||
/**
|
||||
* Returns the wrapped string.
|
||||
* @return
|
||||
*/
|
||||
getValue(): string;
|
||||
}
|
||||
55
nodered/rootfs/data/node_modules/influx/lib/src/grammar/ds.js
generated
vendored
Normal file
55
nodered/rootfs/data/node_modules/influx/lib/src/grammar/ds.js
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
/**
|
||||
* FieldType is an enumeration of InfluxDB field data types.
|
||||
* @typedef {Number} FieldType
|
||||
* @example
|
||||
* import { FieldType } from 'influx'; // or const FieldType = require('influx').FieldType
|
||||
*
|
||||
* const schema = {
|
||||
* measurement: 'my_measurement',
|
||||
* fields: {
|
||||
* my_int: FieldType.INTEGER,
|
||||
* my_float: FieldType.FLOAT,
|
||||
* my_string: FieldType.STRING,
|
||||
* my_boolean: FieldType.BOOLEAN,
|
||||
* }
|
||||
* }
|
||||
*/
|
||||
var FieldType;
|
||||
(function (FieldType) {
|
||||
FieldType[FieldType["FLOAT"] = 0] = "FLOAT";
|
||||
FieldType[FieldType["INTEGER"] = 1] = "INTEGER";
|
||||
FieldType[FieldType["STRING"] = 2] = "STRING";
|
||||
FieldType[FieldType["BOOLEAN"] = 3] = "BOOLEAN";
|
||||
})(FieldType = exports.FieldType || (exports.FieldType = {}));
|
||||
function isNumeric(value) {
|
||||
return !Number.isNaN(Number(value));
|
||||
}
|
||||
exports.isNumeric = isNumeric;
|
||||
/**
|
||||
* You can provide Raw values to Influx methods to prevent it from escaping
|
||||
* your provided string.
|
||||
* @class
|
||||
* @example
|
||||
* influx.createDatabase(new Influx.Raw('This won\'t be escaped!'));
|
||||
*/
|
||||
class Raw {
|
||||
/**
|
||||
* Wraps a string so that it is not escaped in Influx queries.
|
||||
* @param value
|
||||
* @example
|
||||
* influx.createDatabase(new Influx.Raw('This won\'t be escaped!'));
|
||||
*/
|
||||
constructor(value) {
|
||||
this.value = value;
|
||||
}
|
||||
/**
|
||||
* Returns the wrapped string.
|
||||
* @return
|
||||
*/
|
||||
getValue() {
|
||||
return this.value;
|
||||
}
|
||||
}
|
||||
exports.Raw = Raw;
|
||||
37
nodered/rootfs/data/node_modules/influx/lib/src/grammar/escape.d.ts
generated
vendored
Normal file
37
nodered/rootfs/data/node_modules/influx/lib/src/grammar/escape.d.ts
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
/**
|
||||
* TagEscaper escapes tag keys, tag values, and field keys.
|
||||
* @type {Object}
|
||||
* @property {function(s: string): string } quoted Escapes and wraps quoted
|
||||
* values, such as database names.
|
||||
* @property {function(s: string): string } stringLit Escapes and
|
||||
* wraps string literals.
|
||||
* @property {function(s: string): string } measurement Escapes measurement
|
||||
* names on the line protocol.
|
||||
* @property {function(s: string): string } tag Escapes tag keys, take values,
|
||||
* and field keys on the line protocol.
|
||||
*
|
||||
* @example
|
||||
* console.log(escape.quoted('my_"db')); // => "my_\"db"
|
||||
* console.log(escape.stringLit('hello\'world')); // => 'hello\'world'
|
||||
*
|
||||
* console.log(escape.measurement('my measurement')); // => my\ measurement
|
||||
* console.log(escape.tag('my tag=')); // => my\ tag\=
|
||||
*/
|
||||
export declare const escape: {
|
||||
/**
|
||||
* Measurement escapes measurement names.
|
||||
*/
|
||||
measurement: (val: string) => string;
|
||||
/**
|
||||
* Quoted escapes quoted values, such as database names.
|
||||
*/
|
||||
quoted: (val: string) => string;
|
||||
/**
|
||||
* StringLitEscaper escapes single quotes in string literals.
|
||||
*/
|
||||
stringLit: (val: string) => string;
|
||||
/**
|
||||
* TagEscaper escapes tag keys, tag values, and field keys.
|
||||
*/
|
||||
tag: (val: string) => string;
|
||||
};
|
||||
106
nodered/rootfs/data/node_modules/influx/lib/src/grammar/escape.js
generated
vendored
Normal file
106
nodered/rootfs/data/node_modules/influx/lib/src/grammar/escape.js
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const ds_1 = require("./ds");
|
||||
const reEscape = /[-|\\{()[\]^$+*?.]/g;
|
||||
/**
|
||||
* The Escaper escapes the special characters in the provided list
|
||||
* with backslashes. Much of the code here is inspired by that in the
|
||||
* sqlstring packet found here: https://github.com/mysqljs/sqlstring
|
||||
*
|
||||
* Instances of the Escaper are derived from the documentation of escape
|
||||
* sequences found here: https://aka.ms/co1m4k
|
||||
*
|
||||
* sqlstring is made available under the following license:
|
||||
*
|
||||
* Copyright (c) 2012 Felix Geisendörfer (felix@debuggable.com) and contributors
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
class Escaper {
|
||||
constructor(chars, wrap = '', escaper = '\\') {
|
||||
this.wrap = wrap;
|
||||
this.escaper = escaper;
|
||||
const patterns = chars.join('').replace(reEscape, '\\$&');
|
||||
this._re = new RegExp('[' + patterns + ']', 'g');
|
||||
}
|
||||
/**
|
||||
* Escape replaces occurrences of special characters within the target
|
||||
* string with the necessary escape codes.
|
||||
*/
|
||||
escape(val) {
|
||||
if (val instanceof ds_1.Raw) {
|
||||
return val.getValue();
|
||||
}
|
||||
this._re.lastIndex = 0;
|
||||
let chunkIndex = this._re.lastIndex;
|
||||
let escapedVal = '';
|
||||
let match = this._re.exec(val);
|
||||
while (match) {
|
||||
escapedVal += val.slice(chunkIndex, match.index) + this.escaper + match[0];
|
||||
chunkIndex = this._re.lastIndex;
|
||||
match = this._re.exec(val);
|
||||
}
|
||||
if (chunkIndex === 0) {
|
||||
return this.wrap + val + this.wrap;
|
||||
}
|
||||
if (chunkIndex < val.length) {
|
||||
return this.wrap + escapedVal + val.slice(chunkIndex) + this.wrap;
|
||||
}
|
||||
return this.wrap + escapedVal + this.wrap;
|
||||
}
|
||||
}
|
||||
const bindEsc = (e) => e.escape.bind(e);
|
||||
/**
|
||||
* TagEscaper escapes tag keys, tag values, and field keys.
|
||||
* @type {Object}
|
||||
* @property {function(s: string): string } quoted Escapes and wraps quoted
|
||||
* values, such as database names.
|
||||
* @property {function(s: string): string } stringLit Escapes and
|
||||
* wraps string literals.
|
||||
* @property {function(s: string): string } measurement Escapes measurement
|
||||
* names on the line protocol.
|
||||
* @property {function(s: string): string } tag Escapes tag keys, take values,
|
||||
* and field keys on the line protocol.
|
||||
*
|
||||
* @example
|
||||
* console.log(escape.quoted('my_"db')); // => "my_\"db"
|
||||
* console.log(escape.stringLit('hello\'world')); // => 'hello\'world'
|
||||
*
|
||||
* console.log(escape.measurement('my measurement')); // => my\ measurement
|
||||
* console.log(escape.tag('my tag=')); // => my\ tag\=
|
||||
*/
|
||||
exports.escape = {
|
||||
/**
|
||||
* Measurement escapes measurement names.
|
||||
*/
|
||||
measurement: bindEsc(new Escaper([',', ' '])),
|
||||
/**
|
||||
* Quoted escapes quoted values, such as database names.
|
||||
*/
|
||||
quoted: bindEsc(new Escaper(['"', '\\\\'], '"')),
|
||||
/**
|
||||
* StringLitEscaper escapes single quotes in string literals.
|
||||
*/
|
||||
stringLit: bindEsc(new Escaper(['\''], '\'')),
|
||||
/**
|
||||
* TagEscaper escapes tag keys, tag values, and field keys.
|
||||
*/
|
||||
tag: bindEsc(new Escaper([',', '=', ' ']))
|
||||
};
|
||||
3
nodered/rootfs/data/node_modules/influx/lib/src/grammar/index.d.ts
generated
vendored
Normal file
3
nodered/rootfs/data/node_modules/influx/lib/src/grammar/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
export * from './escape';
|
||||
export * from './ds';
|
||||
export * from './times';
|
||||
8
nodered/rootfs/data/node_modules/influx/lib/src/grammar/index.js
generated
vendored
Normal file
8
nodered/rootfs/data/node_modules/influx/lib/src/grammar/index.js
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
"use strict";
|
||||
function __export(m) {
|
||||
for (var p in m) if (!exports.hasOwnProperty(p)) exports[p] = m[p];
|
||||
}
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
__export(require("./escape"));
|
||||
__export(require("./ds"));
|
||||
__export(require("./times"));
|
||||
73
nodered/rootfs/data/node_modules/influx/lib/src/grammar/times.d.ts
generated
vendored
Normal file
73
nodered/rootfs/data/node_modules/influx/lib/src/grammar/times.d.ts
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
export interface INanoDate extends Date {
|
||||
/**
|
||||
* Returns the unix nanoseconds timestamp as a string.
|
||||
*/
|
||||
getNanoTime(): string;
|
||||
/**
|
||||
* Formats the date as an ISO RFC3339 timestamp with nanosecond precision.
|
||||
*/
|
||||
toNanoISOString(): string;
|
||||
}
|
||||
export declare type TimePrecision = 'n' | 'u' | 'ms' | 's' | 'm' | 'h';
|
||||
/**
|
||||
* Precision is a map of available Influx time precisions.
|
||||
* @type {Object.<String, String>}
|
||||
* @example
|
||||
* console.log(Precision.Hours); // => 'h'
|
||||
* console.log(Precision.Minutes); // => 'm'
|
||||
* console.log(Precision.Seconds); // => 's'
|
||||
* console.log(Precision.Milliseconds); // => 'ms'
|
||||
* console.log(Precision.Microseconds); // => 'u'
|
||||
* console.log(Precision.Nanoseconds); // => 'n'
|
||||
*/
|
||||
export declare const Precision: Readonly<{
|
||||
Hours: string;
|
||||
Microseconds: string;
|
||||
Milliseconds: string;
|
||||
Minutes: string;
|
||||
Nanoseconds: string;
|
||||
Seconds: string;
|
||||
}>;
|
||||
/**
|
||||
* Covers a nanoseconds unix timestamp to a INanoDate for node-influx. The
|
||||
* timestamp is provided as a string to prevent precision loss.
|
||||
*
|
||||
* Please see [A Moment for Times](https://node-influx.github.io/manual/
|
||||
* usage.html#a-moment-for-times) for a more complete and eloquent explanation
|
||||
* of time handling in this module.
|
||||
*
|
||||
* @param timestamp
|
||||
* @example
|
||||
* const date = toNanoDate('1475985480231035600')
|
||||
*
|
||||
* // You can use the returned Date as a normal date:
|
||||
* expect(date.getTime()).to.equal(1475985480231);
|
||||
*
|
||||
* // We decorate it with two additional methods to read
|
||||
* // nanosecond-precision results:
|
||||
* expect(date.getNanoTime()).to.equal('1475985480231035600');
|
||||
* expect(date.toNanoISOString()).to.equal('2016-10-09T03:58:00.231035600Z');
|
||||
*/
|
||||
export declare function toNanoDate(timestamp: string): INanoDate;
|
||||
/**
|
||||
* FormatDate converts the Date instance to Influx's date query format.
|
||||
* @private
|
||||
*/
|
||||
export declare function formatDate(date: Date): string;
|
||||
/**
|
||||
* Converts a Date instance to a timestamp with the specified time precision.
|
||||
* @private
|
||||
*/
|
||||
export declare function dateToTime(date: Date | INanoDate, precision: TimePrecision): string;
|
||||
/**
|
||||
* Converts an ISO-formatted data or unix timestamp to a Date instance. If
|
||||
* the precision is finer than 'ms' the returned value will be a INanoDate.
|
||||
* @private
|
||||
*/
|
||||
export declare function isoOrTimeToDate(stamp: string | number, precision?: TimePrecision): INanoDate;
|
||||
/**
|
||||
* Converts a timestamp to a string with the correct precision. Assumes
|
||||
* that raw number and string instances are already in the correct precision.
|
||||
* @private
|
||||
*/
|
||||
export declare function castTimestamp(timestamp: string | number | Date, precision: TimePrecision): string;
|
||||
328
nodered/rootfs/data/node_modules/influx/lib/src/grammar/times.js
generated
vendored
Normal file
328
nodered/rootfs/data/node_modules/influx/lib/src/grammar/times.js
generated
vendored
Normal file
@@ -0,0 +1,328 @@
|
||||
"use strict";
|
||||
/* eslint-disable no-fallthrough */
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const ds_1 = require("./ds");
|
||||
/**
|
||||
* Just a quick overview of what's going on in this file. It's a bit of a mess.
|
||||
* Influx uses three time formats:
|
||||
* - ISO times with nanoseconds when querying where an epoch is not provided
|
||||
* - Unix timestamps when querying with an epoch (specifying the precision
|
||||
* in the given time unit)
|
||||
* - Its own time format for time literals.
|
||||
*
|
||||
* To complicate matters, Influx operates on nanosecond precisions
|
||||
* by default, but we can't represent nanosecond timestamps in
|
||||
* JavaScript numbers as they're 64 bit uints.
|
||||
*
|
||||
* As a result we have several utilities to convert between these different
|
||||
* formats. When precision is required, we represent nanosecond timestamps
|
||||
* as strings and wrap default dates in the INanoDate interface which
|
||||
* lets the consumer read and write these more precise timestamps.
|
||||
*
|
||||
* Representing the timestamps as strings is definitely not a pure way to go
|
||||
* about it, but importing an arbitrary-precision integer library adds
|
||||
* bloat and is a massive hit to throughput. The operations we do do
|
||||
* are pretty trivial, so we stick with manipulating strings
|
||||
* and make sure to wash our hands when we're done.
|
||||
*
|
||||
* Vocabulary:
|
||||
* Unix timestamp = 'timestamp', abbreviated as 'time'
|
||||
* ISO timestamp = 'ISO time', abbreviated as 'ISO'
|
||||
* Influx timestamp = 'Influx time', abbreviated as 'Influx'
|
||||
*/
|
||||
function leftPad(str, length, pad = '0') {
|
||||
if (typeof str === 'number') {
|
||||
str = String(str);
|
||||
}
|
||||
while (str.length < length) {
|
||||
str = pad + str;
|
||||
}
|
||||
return str;
|
||||
}
|
||||
function rightPad(str, length, pad = '0') {
|
||||
if (typeof str === 'number') {
|
||||
str = String(str);
|
||||
}
|
||||
while (str.length < length) {
|
||||
str += pad;
|
||||
}
|
||||
return str;
|
||||
}
|
||||
/**
|
||||
* Precision is a map of available Influx time precisions.
|
||||
* @type {Object.<String, String>}
|
||||
* @example
|
||||
* console.log(Precision.Hours); // => 'h'
|
||||
* console.log(Precision.Minutes); // => 'm'
|
||||
* console.log(Precision.Seconds); // => 's'
|
||||
* console.log(Precision.Milliseconds); // => 'ms'
|
||||
* console.log(Precision.Microseconds); // => 'u'
|
||||
* console.log(Precision.Nanoseconds); // => 'n'
|
||||
*/
|
||||
exports.Precision = Object.freeze({
|
||||
// Tslint:disable-line
|
||||
Hours: 'h',
|
||||
Microseconds: 'u',
|
||||
Milliseconds: 'ms',
|
||||
Minutes: 'm',
|
||||
Nanoseconds: 'n',
|
||||
Seconds: 's'
|
||||
});
|
||||
class MillisecondDateManipulator {
|
||||
format(date) {
|
||||
return ('"' +
|
||||
leftPad(date.getUTCFullYear(), 2) +
|
||||
'-' +
|
||||
leftPad(date.getUTCMonth() + 1, 2) +
|
||||
'-' +
|
||||
leftPad(date.getUTCDate(), 2) +
|
||||
' ' +
|
||||
leftPad(date.getUTCHours(), 2) +
|
||||
':' +
|
||||
leftPad(date.getUTCMinutes(), 2) +
|
||||
':' +
|
||||
leftPad(date.getUTCSeconds(), 2) +
|
||||
'.' +
|
||||
leftPad(date.getUTCMilliseconds(), 3) +
|
||||
'"');
|
||||
}
|
||||
toTime(date, precision) {
|
||||
let ms = date.getTime();
|
||||
switch (precision) {
|
||||
case 'n':
|
||||
ms *= 1000;
|
||||
case 'u':
|
||||
ms *= 1000;
|
||||
case 'ms':
|
||||
return String(ms);
|
||||
case 'h':
|
||||
ms /= 60;
|
||||
case 'm':
|
||||
ms /= 60;
|
||||
case 's':
|
||||
ms /= 1000;
|
||||
return String(Math.floor(ms));
|
||||
default:
|
||||
throw new Error(`Unknown precision '${precision}'!`);
|
||||
}
|
||||
}
|
||||
isoToDate(timestamp) {
|
||||
return new Date(timestamp);
|
||||
}
|
||||
timetoDate(timestamp, precision) {
|
||||
switch (precision) {
|
||||
case 'n':
|
||||
timestamp /= 1000;
|
||||
case 'u':
|
||||
timestamp /= 1000;
|
||||
case 'ms':
|
||||
return new Date(timestamp);
|
||||
case 'h':
|
||||
timestamp *= 60;
|
||||
case 'm':
|
||||
timestamp *= 60;
|
||||
case 's':
|
||||
timestamp *= 1000;
|
||||
return new Date(timestamp);
|
||||
default:
|
||||
throw new Error(`Unknown precision '${precision}'!`);
|
||||
}
|
||||
}
|
||||
}
|
||||
const nsPer = {
|
||||
ms: Math.pow(10, 6),
|
||||
s: Math.pow(10, 9)
|
||||
};
|
||||
function nanoIsoToTime(iso) {
|
||||
let [secondsStr, decimalStr] = iso.split('.');
|
||||
if (decimalStr === undefined) {
|
||||
decimalStr = '000000000';
|
||||
}
|
||||
else {
|
||||
decimalStr = rightPad(decimalStr.slice(0, -1), 9);
|
||||
secondsStr += 'Z';
|
||||
}
|
||||
const seconds = Math.floor(new Date(secondsStr).getTime() / 1000);
|
||||
return `${seconds}${decimalStr}`;
|
||||
}
|
||||
const nanoDateMethods = {
|
||||
getNanoTimeFromISO() {
|
||||
if (!this._cachedNanoISO) {
|
||||
this._cachedNanoTime = nanoIsoToTime(this._nanoISO);
|
||||
}
|
||||
return this._cachedNanoTime;
|
||||
},
|
||||
toNanoISOStringFromISO() {
|
||||
if (!this._cachedNanoISO) {
|
||||
this._cachedNanoTime = nanoIsoToTime(this._nanoISO);
|
||||
}
|
||||
const base = this.toISOString().slice(0, -4); // Slice of `123Z` milliseconds
|
||||
return `${base}${this._cachedNanoTime.slice(-9)}Z`;
|
||||
},
|
||||
getNanoTimeFromStamp() {
|
||||
return this._nanoTime;
|
||||
},
|
||||
toNanoISOStringFromStamp() {
|
||||
const base = this.toISOString().slice(0, -4); // Slice of `123Z` milliseconds
|
||||
return `${base}${this._nanoTime.slice(-9)}Z`;
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Covers a nanoseconds unix timestamp to a INanoDate for node-influx. The
|
||||
* timestamp is provided as a string to prevent precision loss.
|
||||
*
|
||||
* Please see [A Moment for Times](https://node-influx.github.io/manual/
|
||||
* usage.html#a-moment-for-times) for a more complete and eloquent explanation
|
||||
* of time handling in this module.
|
||||
*
|
||||
* @param timestamp
|
||||
* @example
|
||||
* const date = toNanoDate('1475985480231035600')
|
||||
*
|
||||
* // You can use the returned Date as a normal date:
|
||||
* expect(date.getTime()).to.equal(1475985480231);
|
||||
*
|
||||
* // We decorate it with two additional methods to read
|
||||
* // nanosecond-precision results:
|
||||
* expect(date.getNanoTime()).to.equal('1475985480231035600');
|
||||
* expect(date.toNanoISOString()).to.equal('2016-10-09T03:58:00.231035600Z');
|
||||
*/
|
||||
function toNanoDate(timestamp) {
|
||||
const date = new Date(Math.floor(Number(timestamp) / nsPer.ms));
|
||||
date._nanoTime = timestamp;
|
||||
date.getNanoTime = nanoDateMethods.getNanoTimeFromStamp;
|
||||
date.toNanoISOString = nanoDateMethods.toNanoISOStringFromStamp;
|
||||
return date;
|
||||
}
|
||||
exports.toNanoDate = toNanoDate;
|
||||
function asNanoDate(date) {
|
||||
const d = date;
|
||||
if (d.getNanoTime) {
|
||||
return d;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
class NanosecondsDateManipulator {
|
||||
format(date) {
|
||||
return ('"' +
|
||||
leftPad(date.getUTCFullYear(), 2) +
|
||||
'-' +
|
||||
leftPad(date.getUTCMonth() + 1, 2) +
|
||||
'-' +
|
||||
leftPad(date.getUTCDate(), 2) +
|
||||
' ' +
|
||||
leftPad(date.getUTCHours(), 2) +
|
||||
':' +
|
||||
leftPad(date.getUTCMinutes(), 2) +
|
||||
':' +
|
||||
leftPad(date.getUTCSeconds(), 2) +
|
||||
'.' +
|
||||
date.getNanoTime().slice(-9) +
|
||||
'"');
|
||||
}
|
||||
toTime(date, precision) {
|
||||
let ms = date.getTime();
|
||||
switch (precision) {
|
||||
case 'u':
|
||||
return date.getNanoTime().slice(0, -3);
|
||||
case 'n':
|
||||
return date.getNanoTime();
|
||||
case 'h':
|
||||
ms /= 60;
|
||||
case 'm':
|
||||
ms /= 60;
|
||||
case 's':
|
||||
ms /= 1000;
|
||||
case 'ms':
|
||||
return String(Math.floor(ms));
|
||||
default:
|
||||
throw new Error(`Unknown precision '${precision}'!`);
|
||||
}
|
||||
}
|
||||
isoToDate(timestamp) {
|
||||
const date = new Date(timestamp);
|
||||
date._nanoISO = timestamp;
|
||||
date.getNanoTime = nanoDateMethods.getNanoTimeFromISO;
|
||||
date.toNanoISOString = nanoDateMethods.toNanoISOStringFromISO;
|
||||
return date;
|
||||
}
|
||||
timetoDate(timestamp, precision) {
|
||||
switch (precision) {
|
||||
case 'h':
|
||||
timestamp *= 60;
|
||||
case 'm':
|
||||
timestamp *= 60;
|
||||
case 's':
|
||||
timestamp *= 1000;
|
||||
case 'ms':
|
||||
timestamp *= 1000;
|
||||
case 'u':
|
||||
timestamp *= 1000;
|
||||
case 'n': {
|
||||
const date = new Date(timestamp / nsPer.ms);
|
||||
date._nanoTime = String(timestamp);
|
||||
date.getNanoTime = nanoDateMethods.getNanoTimeFromStamp;
|
||||
date.toNanoISOString = nanoDateMethods.toNanoISOStringFromStamp;
|
||||
return date;
|
||||
}
|
||||
default:
|
||||
throw new Error(`Unknown precision '${precision}'!`);
|
||||
}
|
||||
}
|
||||
}
|
||||
const milliManipulator = new MillisecondDateManipulator();
|
||||
const nanoManipulator = new NanosecondsDateManipulator();
|
||||
/**
|
||||
* FormatDate converts the Date instance to Influx's date query format.
|
||||
* @private
|
||||
*/
|
||||
function formatDate(date) {
|
||||
const nano = asNanoDate(date);
|
||||
if (nano) {
|
||||
return nanoManipulator.format(nano);
|
||||
}
|
||||
return milliManipulator.format(date);
|
||||
}
|
||||
exports.formatDate = formatDate;
|
||||
/**
|
||||
* Converts a Date instance to a timestamp with the specified time precision.
|
||||
* @private
|
||||
*/
|
||||
function dateToTime(date, precision) {
|
||||
const nano = asNanoDate(date);
|
||||
if (nano) {
|
||||
return nanoManipulator.toTime(nano, precision);
|
||||
}
|
||||
return milliManipulator.toTime(date, precision);
|
||||
}
|
||||
exports.dateToTime = dateToTime;
|
||||
/**
|
||||
* Converts an ISO-formatted data or unix timestamp to a Date instance. If
|
||||
* the precision is finer than 'ms' the returned value will be a INanoDate.
|
||||
* @private
|
||||
*/
|
||||
function isoOrTimeToDate(stamp, precision = 'n') {
|
||||
if (typeof stamp === 'string') {
|
||||
return nanoManipulator.isoToDate(stamp);
|
||||
}
|
||||
return nanoManipulator.timetoDate(stamp, precision);
|
||||
}
|
||||
exports.isoOrTimeToDate = isoOrTimeToDate;
|
||||
/**
|
||||
* Converts a timestamp to a string with the correct precision. Assumes
|
||||
* that raw number and string instances are already in the correct precision.
|
||||
* @private
|
||||
*/
|
||||
function castTimestamp(timestamp, precision) {
|
||||
if (typeof timestamp === 'string') {
|
||||
if (!ds_1.isNumeric(timestamp)) {
|
||||
throw new Error(`Expected numeric value for, timestamp, but got '${timestamp}'!`);
|
||||
}
|
||||
return timestamp;
|
||||
}
|
||||
if (typeof timestamp === 'number') {
|
||||
return String(timestamp);
|
||||
}
|
||||
return dateToTime(timestamp, precision);
|
||||
}
|
||||
exports.castTimestamp = castTimestamp;
|
||||
26
nodered/rootfs/data/node_modules/influx/lib/src/host.d.ts
generated
vendored
Normal file
26
nodered/rootfs/data/node_modules/influx/lib/src/host.d.ts
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
/// <reference types="node" />
|
||||
import { RequestOptions } from 'https';
|
||||
import * as urlModule from 'url';
|
||||
import { IBackoffStrategy } from './backoff/backoff';
|
||||
export declare class Host {
|
||||
private backoff;
|
||||
readonly options: RequestOptions;
|
||||
readonly url: urlModule.Url;
|
||||
/**
|
||||
* Creates a new Host instance.
|
||||
* @param url
|
||||
* @param backoff
|
||||
*/
|
||||
constructor(url: string, backoff: IBackoffStrategy, options: RequestOptions);
|
||||
/**
|
||||
* Marks a failure on the host and returns the length of time it
|
||||
* should be removed from the pool
|
||||
* @return removal time in milliseconds
|
||||
*/
|
||||
fail(): number;
|
||||
/**
|
||||
* Should be called when a successful operation is run against the host.
|
||||
* It resets the host's backoff strategy.
|
||||
*/
|
||||
success(): void;
|
||||
}
|
||||
33
nodered/rootfs/data/node_modules/influx/lib/src/host.js
generated
vendored
Normal file
33
nodered/rootfs/data/node_modules/influx/lib/src/host.js
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const urlModule = require("url");
|
||||
class Host {
|
||||
/**
|
||||
* Creates a new Host instance.
|
||||
* @param url
|
||||
* @param backoff
|
||||
*/
|
||||
constructor(url, backoff, options) {
|
||||
this.backoff = backoff;
|
||||
this.options = options;
|
||||
this.url = urlModule.parse(url);
|
||||
}
|
||||
/**
|
||||
* Marks a failure on the host and returns the length of time it
|
||||
* should be removed from the pool
|
||||
* @return removal time in milliseconds
|
||||
*/
|
||||
fail() {
|
||||
const value = this.backoff.getDelay();
|
||||
this.backoff = this.backoff.next();
|
||||
return value;
|
||||
}
|
||||
/**
|
||||
* Should be called when a successful operation is run against the host.
|
||||
* It resets the host's backoff strategy.
|
||||
*/
|
||||
success() {
|
||||
this.backoff = this.backoff.reset();
|
||||
}
|
||||
}
|
||||
exports.Host = Host;
|
||||
736
nodered/rootfs/data/node_modules/influx/lib/src/index.d.ts
generated
vendored
Normal file
736
nodered/rootfs/data/node_modules/influx/lib/src/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,736 @@
|
||||
/// <reference types="node" />
|
||||
import { RequestOptions } from 'https';
|
||||
import * as b from './builder';
|
||||
import * as grammar from './grammar';
|
||||
import { IPingStats, IPoolOptions } from './pool';
|
||||
import { IResults } from './results';
|
||||
import { ISchemaOptions } from './schema';
|
||||
export * from './builder';
|
||||
export { INanoDate, FieldType, Precision, Raw, TimePrecision, escape, toNanoDate } from './grammar';
|
||||
export { ISchemaOptions } from './schema';
|
||||
export { IPingStats, IPoolOptions } from './pool';
|
||||
export { IResults, IResponse, ResultError } from './results';
|
||||
export interface IHostConfig {
|
||||
/**
|
||||
* Influx host to connect to, defaults to 127.0.0.1.
|
||||
*/
|
||||
host?: string;
|
||||
/**
|
||||
* Influx port to connect to, defaults to 8086.
|
||||
*/
|
||||
port?: number;
|
||||
/**
|
||||
* Path for Influx within the host, defaults to ''.
|
||||
* May be used if Influx is behind a reverse proxy or load balancer.
|
||||
*/
|
||||
path?: string;
|
||||
/**
|
||||
* Protocol to connect over, defaults to 'http'.
|
||||
*/
|
||||
protocol?: 'http' | 'https';
|
||||
/**
|
||||
* Optional request option overrides.
|
||||
*/
|
||||
options?: RequestOptions;
|
||||
}
|
||||
export interface ISingleHostConfig extends IHostConfig {
|
||||
/**
|
||||
* Username for connecting to the database. Defaults to 'root'.
|
||||
*/
|
||||
username?: string;
|
||||
/**
|
||||
* Password for connecting to the database. Defaults to 'root'.
|
||||
*/
|
||||
password?: string;
|
||||
/**
|
||||
* Default database to write information to.
|
||||
*/
|
||||
database?: string;
|
||||
/**
|
||||
* Settings for the connection pool.
|
||||
*/
|
||||
pool?: IPoolOptions;
|
||||
/**
|
||||
* A list of schema for measurements in the database.
|
||||
*/
|
||||
schema?: ISchemaOptions[];
|
||||
}
|
||||
export interface IClusterConfig {
|
||||
/**
|
||||
* Username for connecting to the database. Defaults to 'root'.
|
||||
*/
|
||||
username?: string;
|
||||
/**
|
||||
* Password for connecting to the database. Defaults to 'root'.
|
||||
*/
|
||||
password?: string;
|
||||
/**
|
||||
* Default database to write information to.
|
||||
*/
|
||||
database?: string;
|
||||
/**
|
||||
* A list of cluster hosts to connect to.
|
||||
*/
|
||||
hosts: IHostConfig[];
|
||||
/**
|
||||
* Settings for the connection pool.
|
||||
*/
|
||||
pool?: IPoolOptions;
|
||||
/**
|
||||
* A list of schema for measurements in the database.
|
||||
*/
|
||||
schema?: ISchemaOptions[];
|
||||
}
|
||||
export interface IPoint {
|
||||
/**
|
||||
* Measurement is the Influx measurement name.
|
||||
*/
|
||||
measurement?: string;
|
||||
/**
|
||||
* Tags is the list of tag values to insert.
|
||||
*/
|
||||
tags?: {
|
||||
[name: string]: string;
|
||||
};
|
||||
/**
|
||||
* Fields is the list of field values to insert.
|
||||
*/
|
||||
fields?: {
|
||||
[name: string]: any;
|
||||
};
|
||||
/**
|
||||
* Timestamp tags this measurement with a date. This can be a Date object,
|
||||
* in which case we'll adjust it to the desired precision, or a numeric
|
||||
* string or number, in which case it gets passed directly to Influx.
|
||||
*/
|
||||
timestamp?: Date | string | number;
|
||||
}
|
||||
export interface IWriteOptions {
|
||||
/**
|
||||
* Precision at which the points are written, defaults to nanoseconds 'n'.
|
||||
*/
|
||||
precision?: grammar.TimePrecision;
|
||||
/**
|
||||
* Retention policy to write the points under, defaults to the DEFAULT
|
||||
* database policy.
|
||||
*/
|
||||
retentionPolicy?: string;
|
||||
/**
|
||||
* Database under which to write the points. This is required if a default
|
||||
* database is not provided in Influx.
|
||||
*/
|
||||
database?: string;
|
||||
}
|
||||
export interface IQueryOptions {
|
||||
/**
|
||||
* Defines the precision at which to query points. When left blank, it will
|
||||
* query in nanosecond precision.
|
||||
*/
|
||||
precision?: grammar.TimePrecision;
|
||||
/**
|
||||
* Retention policy to query from, defaults to the DEFAULT
|
||||
* database policy.
|
||||
*/
|
||||
retentionPolicy?: string;
|
||||
/**
|
||||
* Database under which to query the points. This is required if a default
|
||||
* database is not provided in Influx.
|
||||
*/
|
||||
database?: string;
|
||||
}
|
||||
/**
|
||||
* IRetentionOptions are passed into passed into the {@link
|
||||
* InfluxDB#createRetentionPolicy} and {@link InfluxDB#alterRetentionPolicy}.
|
||||
* See the [Downsampling and Retention page](https://docs.influxdata.com/
|
||||
* influxdb/v1.0/guides/downsampling_and_retention/) on the Influx docs for
|
||||
* more information.
|
||||
*/
|
||||
export interface IRetentionOptions {
|
||||
database?: string;
|
||||
duration: string;
|
||||
replication: number;
|
||||
isDefault?: boolean;
|
||||
}
|
||||
/**
|
||||
* InfluxDB is the public interface to run queries against your database.
|
||||
* This is a 'driver-level' module, not a a full-fleged ORM or ODM; you run
|
||||
* queries directly by calling methods on this class.
|
||||
*
|
||||
* Please check out some of [the tutorials](https://node-influx.github.io/manual/tutorial.html)
|
||||
* if you want help getting started!
|
||||
*
|
||||
* @example
|
||||
* const Influx = require('influx');
|
||||
* const influx = new Influx.InfluxDB({
|
||||
* host: 'localhost',
|
||||
* database: 'express_response_db',
|
||||
* schema: [
|
||||
* {
|
||||
* measurement: 'response_times',
|
||||
* fields: {
|
||||
* path: Influx.FieldType.STRING,
|
||||
* duration: Influx.FieldType.INTEGER
|
||||
* },
|
||||
* tags: [
|
||||
* 'host'
|
||||
* ]
|
||||
* }
|
||||
* ]
|
||||
* })
|
||||
*
|
||||
* @example
|
||||
* // Connect over HTTPS
|
||||
* const Influx = require('influx');
|
||||
* const influx = new Influx.InfluxDB({
|
||||
* host: 'myinfluxdbhost',
|
||||
* port: 443,
|
||||
* protocol: 'https'
|
||||
* database: 'express_response_db',
|
||||
* schema: [
|
||||
* {
|
||||
* measurement: 'response_times',
|
||||
* fields: {
|
||||
* path: Influx.FieldType.STRING,
|
||||
* duration: Influx.FieldType.INTEGER
|
||||
* },
|
||||
* tags: [
|
||||
* 'host'
|
||||
* ]
|
||||
* }
|
||||
* ]
|
||||
* })
|
||||
*
|
||||
* influx.writePoints([
|
||||
* {
|
||||
* measurement: 'response_times',
|
||||
* tags: { host: os.hostname() },
|
||||
* fields: { duration, path: req.path },
|
||||
* }
|
||||
* ]).then(() => {
|
||||
* return influx.query(`
|
||||
* select * from response_times
|
||||
* where host = ${Influx.escape.stringLit(os.hostname())}
|
||||
* order by time desc
|
||||
* limit 10
|
||||
* `)
|
||||
* }).then(rows => {
|
||||
* rows.forEach(row => console.log(`A request to ${row.path} took ${row.duration}ms`))
|
||||
* })
|
||||
*/
|
||||
export declare class InfluxDB {
|
||||
/**
|
||||
* Connect pool for making requests.
|
||||
* @private
|
||||
*/
|
||||
private _pool;
|
||||
/**
|
||||
* Config options for Influx.
|
||||
* @private
|
||||
*/
|
||||
private _options;
|
||||
/**
|
||||
* Map of Schema instances defining measurements in Influx.
|
||||
* @private
|
||||
*/
|
||||
private _schema;
|
||||
constructor(options: ISingleHostConfig);
|
||||
/**
|
||||
* Connect to an InfluxDB cluster by specifying a
|
||||
* set of connection options.
|
||||
*/
|
||||
constructor(options: IClusterConfig);
|
||||
/**
|
||||
* Connect to an InfluxDB instance using a configuration URL.
|
||||
* @example
|
||||
* new InfluxDB('http://user:password@host:8086/database')
|
||||
*/
|
||||
constructor(url: string);
|
||||
/**
|
||||
* Connects to a local, default Influx instance.
|
||||
*/
|
||||
constructor();
|
||||
/**
|
||||
* Adds specified schema for better fields coercing.
|
||||
*
|
||||
* @param {ISchemaOptions} schema
|
||||
* @memberof InfluxDB
|
||||
*/
|
||||
addSchema(schema: ISchemaOptions): void;
|
||||
/**
|
||||
* Creates a new database with the provided name.
|
||||
* @param databaseName
|
||||
* @return
|
||||
* @example
|
||||
* influx.createDatabase('mydb')
|
||||
*/
|
||||
createDatabase(databaseName: string): Promise<void>;
|
||||
/**
|
||||
* Deletes a database with the provided name.
|
||||
* @param databaseName
|
||||
* @return
|
||||
* @example
|
||||
* influx.dropDatabase('mydb')
|
||||
*/
|
||||
dropDatabase(databaseName: string): Promise<void>;
|
||||
/**
|
||||
* Returns array of database names. Requires cluster admin privileges.
|
||||
* @returns a list of database names
|
||||
* @example
|
||||
* influx.getDatabaseNames().then(names =>
|
||||
* console.log('My database names are: ' + names.join(', ')));
|
||||
*/
|
||||
getDatabaseNames(): Promise<string[]>;
|
||||
/**
|
||||
* Returns array of measurements.
|
||||
* @returns a list of measurement names
|
||||
* @param [database] the database the measurement lives in, optional
|
||||
* if a default database is provided.
|
||||
* @example
|
||||
* influx.getMeasurements().then(names =>
|
||||
* console.log('My measurement names are: ' + names.join(', ')));
|
||||
*/
|
||||
getMeasurements(database?: string): Promise<string[]>;
|
||||
/**
|
||||
* Returns a list of all series within the target measurement, or from the
|
||||
* entire database if a measurement isn't provided.
|
||||
* @param [options]
|
||||
* @param [options.measurement] if provided, we'll only get series
|
||||
* from within that measurement.
|
||||
* @param [options.database] the database the series lives in,
|
||||
* optional if a default database is provided.
|
||||
* @returns a list of series names
|
||||
* @example
|
||||
* influx.getSeries().then(names => {
|
||||
* console.log('My series names in my_measurement are: ' + names.join(', '))
|
||||
* })
|
||||
*
|
||||
* influx.getSeries({
|
||||
* measurement: 'my_measurement',
|
||||
* database: 'my_db'
|
||||
* }).then(names => {
|
||||
* console.log('My series names in my_measurement are: ' + names.join(', '))
|
||||
* })
|
||||
*/
|
||||
getSeries(options?: {
|
||||
measurement?: string;
|
||||
database?: string;
|
||||
}): Promise<string[]>;
|
||||
/**
|
||||
* Removes a measurement from the database.
|
||||
* @param measurement
|
||||
* @param [database] the database the measurement lives in, optional
|
||||
* if a default database is provided.
|
||||
* @return
|
||||
* @example
|
||||
* influx.dropMeasurement('my_measurement')
|
||||
*/
|
||||
dropMeasurement(measurement: string, database?: string): Promise<void>;
|
||||
/**
|
||||
* Removes a one or more series from InfluxDB.
|
||||
*
|
||||
* @returns
|
||||
* @example
|
||||
* // The following pairs of queries are equivalent: you can chose either to
|
||||
* // use our builder or pass in string directly. The builder takes care
|
||||
* // of escaping and most syntax handling for you.
|
||||
*
|
||||
* influx.dropSeries({ where: e => e.tag('cpu').equals.value('cpu8') })
|
||||
* influx.dropSeries({ where: '"cpu" = \'cpu8\'' })
|
||||
* // DROP SERIES WHERE "cpu" = 'cpu8'
|
||||
*
|
||||
* influx.dropSeries({ measurement: m => m.name('cpu').policy('autogen') })
|
||||
* influx.dropSeries({ measurement: '"cpu"."autogen"' })
|
||||
* // DROP SERIES FROM "autogen"."cpu"
|
||||
*
|
||||
* influx.dropSeries({
|
||||
* measurement: m => m.name('cpu').policy('autogen'),
|
||||
* where: e => e.tag('cpu').equals.value('cpu8'),
|
||||
* database: 'my_db'
|
||||
* })
|
||||
* // DROP SERIES FROM "autogen"."cpu" WHERE "cpu" = 'cpu8'
|
||||
*/
|
||||
dropSeries(options: b.measurement | b.where | {
|
||||
database: string;
|
||||
}): Promise<void>;
|
||||
/**
|
||||
* Returns a list of users on the Influx database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.getUsers().then(users => {
|
||||
* users.forEach(user => {
|
||||
* if (user.admin) {
|
||||
* console.log(user.user, 'is an admin!')
|
||||
* } else {
|
||||
* console.log(user.user, 'is not an admin!')
|
||||
* }
|
||||
* })
|
||||
* })
|
||||
*/
|
||||
getUsers(): Promise<IResults<{
|
||||
user: string;
|
||||
admin: boolean;
|
||||
}>>;
|
||||
/**
|
||||
* Creates a new InfluxDB user.
|
||||
* @param username
|
||||
* @param password
|
||||
* @param [admin=false] If true, the user will be given all
|
||||
* privileges on all databases.
|
||||
* @return
|
||||
* @example
|
||||
* influx.createUser('connor', 'pa55w0rd', true) // make 'connor' an admin
|
||||
*
|
||||
* // make non-admins:
|
||||
* influx.createUser('not_admin', 'pa55w0rd')
|
||||
*/
|
||||
createUser(username: string, password: string, admin?: boolean): Promise<void>;
|
||||
/**
|
||||
* Sets a password for an Influx user.
|
||||
* @param username
|
||||
* @param password
|
||||
* @return
|
||||
* @example
|
||||
* influx.setPassword('connor', 'pa55w0rd')
|
||||
*/
|
||||
setPassword(username: string, password: string): Promise<void>;
|
||||
/**
|
||||
* Grants a privilege to a specified user.
|
||||
* @param username
|
||||
* @param privilege Should be one of 'READ' or 'WRITE'
|
||||
* @param [database] If not provided, uses the default database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.grantPrivilege('connor', 'READ', 'my_db') // grants read access on my_db to connor
|
||||
*/
|
||||
grantPrivilege(username: string, privilege: 'READ' | 'WRITE', database?: string): Promise<void>;
|
||||
/**
|
||||
* Removes a privilege from a specified user.
|
||||
* @param username
|
||||
* @param privilege Should be one of 'READ' or 'WRITE'
|
||||
* @param [database] If not provided, uses the default database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.revokePrivilege('connor', 'READ', 'my_db') // removes read access on my_db from connor
|
||||
*/
|
||||
revokePrivilege(username: string, privilege: 'READ' | 'WRITE', database?: string): Promise<void>;
|
||||
/**
|
||||
* Grants admin privileges to a specified user.
|
||||
* @param username
|
||||
* @return
|
||||
* @example
|
||||
* influx.grantAdminPrivilege('connor')
|
||||
*/
|
||||
grantAdminPrivilege(username: string): Promise<void>;
|
||||
/**
|
||||
* Removes a admin privilege from a specified user.
|
||||
* @param username
|
||||
* @return
|
||||
* @example
|
||||
* influx.revokeAdminPrivilege('connor')
|
||||
*/
|
||||
revokeAdminPrivilege(username: string): Promise<void>;
|
||||
/**
|
||||
* Removes a user from the database.
|
||||
* @param username
|
||||
* @return
|
||||
* @example
|
||||
* influx.dropUser('connor')
|
||||
*/
|
||||
dropUser(username: string): Promise<void>;
|
||||
/**
|
||||
* Creates a continuous query in a database
|
||||
* @param name The query name, for later reference
|
||||
* @param query The body of the query to run
|
||||
* @param [database] If not provided, uses the default database.
|
||||
* @param [resample] If provided, adds resample policy
|
||||
* @return
|
||||
* @example
|
||||
* influx.createContinuousQuery('downsample_cpu_1h', `
|
||||
* SELECT MEAN(cpu) INTO "7d"."perf"
|
||||
* FROM "1d"."perf" GROUP BY time(1m)
|
||||
* `, undefined, 'RESAMPLE FOR 7m')
|
||||
*/
|
||||
createContinuousQuery(name: string, query: string, database?: string, resample?: string): Promise<void>;
|
||||
/**
|
||||
* Returns a list of continous queries in the database.
|
||||
* @param [database] If not provided, uses the default database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.showContinousQueries()
|
||||
*/
|
||||
showContinousQueries(database?: string): Promise<IResults<{
|
||||
name: string;
|
||||
query: string;
|
||||
}>>;
|
||||
/**
|
||||
* Creates a continuous query in a database
|
||||
* @param name The query name
|
||||
* @param [database] If not provided, uses the default database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.dropContinuousQuery('downsample_cpu_1h')
|
||||
*/
|
||||
dropContinuousQuery(name: string, database?: string): Promise<void>;
|
||||
/**
|
||||
* Creates a new retention policy on a database. You can read more about
|
||||
* [Downsampling and Retention](https://docs.influxdata.com/influxdb/v1.0/
|
||||
* guides/downsampling_and_retention/) on the InfluxDB website.
|
||||
*
|
||||
* @param name The retention policy name
|
||||
* @param options
|
||||
* @param [options.database] Database to create the policy on,
|
||||
* uses the default database if not provided.
|
||||
* @param options.duration How long data in the retention policy
|
||||
* should be stored for, should be in a format like `7d`. See details
|
||||
* [here](https://docs.influxdata.com/influxdb/v1.0/query_language/spec/#durations)
|
||||
* @param options.replication How many servers data in the series
|
||||
* should be replicated to.
|
||||
* @param [options.isDefault] Whether the retention policy should
|
||||
* be the default policy on the database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.createRetentionPolicy('7d', {
|
||||
* duration: '7d',
|
||||
* replication: 1
|
||||
* })
|
||||
*/
|
||||
createRetentionPolicy(name: string, options: IRetentionOptions): Promise<void>;
|
||||
/**
|
||||
* Alters an existing retention policy on a database.
|
||||
*
|
||||
* @param name The retention policy name
|
||||
* @param options
|
||||
* @param [options.database] Database to create the policy on,
|
||||
* uses the default database if not provided.
|
||||
* @param options.duration How long data in the retention policy
|
||||
* should be stored for, should be in a format like `7d`. See details
|
||||
* [here](https://docs.influxdata.com/influxdb/v1.0/query_language/spec/#durations)
|
||||
* @param options.replication How many servers data in the series
|
||||
* should be replicated to.
|
||||
* @param [options.default] Whether the retention policy should
|
||||
* be the default policy on the database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.alterRetentionPolicy('7d', {
|
||||
* duration: '7d',
|
||||
* replication: 1,
|
||||
* default: true
|
||||
* })
|
||||
*/
|
||||
alterRetentionPolicy(name: string, options: IRetentionOptions): Promise<void>;
|
||||
/**
|
||||
* Deletes a retention policy and associated data. Note that the data will
|
||||
* not be immediately destroyed, and will hang around until Influx's
|
||||
* bi-hourly cron.
|
||||
*
|
||||
* @param name The retention policy name
|
||||
* @param [database] Database name that the policy lives in,
|
||||
* uses the default database if not provided.
|
||||
* @return
|
||||
* @example
|
||||
* influx.dropRetentionPolicy('7d')
|
||||
*/
|
||||
dropRetentionPolicy(name: string, database?: string): Promise<void>;
|
||||
/**
|
||||
* Shows retention policies on the database
|
||||
*
|
||||
* @param [database] The database to list policies on, uses the
|
||||
* default database if not provided.
|
||||
* @return
|
||||
* @example
|
||||
* influx.showRetentionPolicies().then(policies => {
|
||||
* expect(policies.slice()).to.deep.equal([
|
||||
* {
|
||||
* name: 'autogen',
|
||||
* duration: '0s',
|
||||
* shardGroupDuration: '168h0m0s',
|
||||
* replicaN: 1,
|
||||
* default: true,
|
||||
* },
|
||||
* {
|
||||
* name: '7d',
|
||||
* duration: '168h0m0s',
|
||||
* shardGroupDuration: '24h0m0s',
|
||||
* replicaN: 1,
|
||||
* default: false,
|
||||
* },
|
||||
* ])
|
||||
* })
|
||||
*/
|
||||
showRetentionPolicies(database?: string): Promise<IResults<{
|
||||
default: boolean;
|
||||
duration: string;
|
||||
name: string;
|
||||
replicaN: number;
|
||||
shardGroupDuration: string;
|
||||
}>>;
|
||||
/**
|
||||
* Shows shards on the database
|
||||
*
|
||||
* @param [database] The database to list policies on, uses the
|
||||
* default database if not provided.
|
||||
* @return
|
||||
* @example
|
||||
* influx.showShards().then(shards => {
|
||||
* expect(shards.slice()).to.deep.equal([
|
||||
* {
|
||||
* id: 1
|
||||
* database: 'database',
|
||||
* retention_policy: 'autogen',
|
||||
* shard_group: 1,
|
||||
* start_time: '2019-05-06T00:00:00Z',
|
||||
* end_time: '2019-05-13T00:00:00Z',
|
||||
* expiry_time: '2019-05-13T00:00:00Z',
|
||||
* owners: null,
|
||||
* },
|
||||
* ])
|
||||
* })
|
||||
*/
|
||||
showShards(database?: string): Promise<Array<{
|
||||
id: number;
|
||||
database: string;
|
||||
retention_policy: string;
|
||||
shard_group: number;
|
||||
start_time: string;
|
||||
end_time: string;
|
||||
expiry_time: string;
|
||||
owners: string;
|
||||
}>>;
|
||||
/**
|
||||
* Drops a shard with the provided number.
|
||||
* @param shard_id
|
||||
* @return
|
||||
* @example
|
||||
* influx.dropShard(3)
|
||||
*/
|
||||
dropShard(shard_id: number): Promise<void>;
|
||||
/**
|
||||
* WritePoints sends a list of points together in a batch to InfluxDB. In
|
||||
* each point you must specify the measurement name to write into as well
|
||||
* as a list of tag and field values. Optionally, you can specify the
|
||||
* time to tag that point at, defaulting to the current time.
|
||||
*
|
||||
* If you defined a schema for the measurement in the options you passed
|
||||
* to `new Influx(options)`, we'll use that to make sure that types get
|
||||
* cast correctly and that there are no extraneous fields or columns.
|
||||
*
|
||||
* For best performance, it's recommended that you batch your data into
|
||||
* sets of a couple thousand records before writing it. In the future we'll
|
||||
* have some utilities within node-influx to make this easier.
|
||||
*
|
||||
* ---
|
||||
*
|
||||
* A note when using manually-specified times and precisions: by default
|
||||
* we write using the `ms` precision since that's what JavaScript gives us.
|
||||
* You can adjust this. However, there is some special behaviour if you
|
||||
* manually specify a timestamp in your points:
|
||||
* - if you specify the timestamp as a Date object, we'll convert it to
|
||||
* milliseconds and manipulate it as needed to get the right precision
|
||||
* - if provide a INanoDate as returned from {@link toNanoTime} or the
|
||||
* results from an Influx query, we'll be able to pull the precise
|
||||
* nanosecond timestamp and manipulate it to get the right precision
|
||||
* - if you provide a string or number as the timestamp, we'll pass it
|
||||
* straight into Influx.
|
||||
*
|
||||
* Please see the IPoint and IWriteOptions types for a
|
||||
* full list of possible options.
|
||||
*
|
||||
* @param points
|
||||
* @param [options]
|
||||
* @return
|
||||
* @example
|
||||
* // write a point into the default database with
|
||||
* // the default retention policy.
|
||||
* influx.writePoints([
|
||||
* {
|
||||
* measurement: 'perf',
|
||||
* tags: { host: 'box1.example.com' },
|
||||
* fields: { cpu: getCpuUsage(), mem: getMemUsage() },
|
||||
* }
|
||||
* ])
|
||||
*
|
||||
* // you can manually specify the database,
|
||||
* // retention policy, and time precision:
|
||||
* influx.writePoints([
|
||||
* {
|
||||
* measurement: 'perf',
|
||||
* tags: { host: 'box1.example.com' },
|
||||
* fields: { cpu: getCpuUsage(), mem: getMemUsage() },
|
||||
* timestamp: getLastRecordedTime(),
|
||||
* }
|
||||
* ], {
|
||||
* database: 'my_db',
|
||||
* retentionPolicy: '1d',
|
||||
* precision: 's'
|
||||
* })
|
||||
*/
|
||||
writePoints(points: IPoint[], options?: IWriteOptions): Promise<void>;
|
||||
/**
|
||||
* WriteMeasurement functions similarly to {@link InfluxDB#writePoints}, but
|
||||
* it automatically fills in the `measurement` value for all points for you.
|
||||
*
|
||||
* @param measurement
|
||||
* @param points
|
||||
* @param [options]
|
||||
* @return
|
||||
* @example
|
||||
* influx.writeMeasurement('perf', [
|
||||
* {
|
||||
* tags: { host: 'box1.example.com' },
|
||||
* fields: { cpu: getCpuUsage(), mem: getMemUsage() },
|
||||
* }
|
||||
* ])
|
||||
*/
|
||||
writeMeasurement(measurement: string, points: IPoint[], options?: IWriteOptions): Promise<void>;
|
||||
query<T>(query: string[], options?: IQueryOptions): Promise<Array<IResults<T>>>;
|
||||
query<T>(query: string, options?: IQueryOptions): Promise<IResults<T>>;
|
||||
/**
|
||||
* QueryRaw functions similarly to .query() but it does no fancy
|
||||
* transformations on the returned data; it calls `JSON.parse` and returns
|
||||
* those results verbatim.
|
||||
*
|
||||
* @param query
|
||||
* @param [options]
|
||||
* @return
|
||||
* @example
|
||||
* influx.queryRaw('select * from perf').then(rawData => {
|
||||
* console.log(rawData)
|
||||
* })
|
||||
*/
|
||||
queryRaw(query: string | string[], options?: IQueryOptions): Promise<any>;
|
||||
/**
|
||||
* Pings all available hosts, collecting online status and version info.
|
||||
* @param timeout Given in milliseconds
|
||||
* @return
|
||||
* @example
|
||||
* influx.ping(5000).then(hosts => {
|
||||
* hosts.forEach(host => {
|
||||
* if (host.online) {
|
||||
* console.log(`${host.url.host} responded in ${host.rtt}ms running ${host.version})`)
|
||||
* } else {
|
||||
* console.log(`${host.url.host} is offline :(`)
|
||||
* }
|
||||
* })
|
||||
* })
|
||||
*/
|
||||
ping(timeout: number): Promise<IPingStats[]>;
|
||||
/**
|
||||
* Returns the default database that queries operates on. It throws if called
|
||||
* when a default database isn't set.
|
||||
* @private
|
||||
*/
|
||||
private _defaultDB;
|
||||
/**
|
||||
* Creates options to be passed into the pool to query databases.
|
||||
* @private
|
||||
*/
|
||||
private _getQueryOpts;
|
||||
/**
|
||||
* Creates specified measurement schema
|
||||
*
|
||||
* @private
|
||||
* @param {ISchemaOptions} schema
|
||||
* @memberof InfluxDB
|
||||
*/
|
||||
private _createSchema;
|
||||
}
|
||||
982
nodered/rootfs/data/node_modules/influx/lib/src/index.js
generated
vendored
Normal file
982
nodered/rootfs/data/node_modules/influx/lib/src/index.js
generated
vendored
Normal file
@@ -0,0 +1,982 @@
|
||||
"use strict";
|
||||
/* eslint-disable @typescript-eslint/unified-signatures */
|
||||
/* eslint-disable no-dupe-class-members */
|
||||
/* eslint-disable no-prototype-builtins */
|
||||
function __export(m) {
|
||||
for (var p in m) if (!exports.hasOwnProperty(p)) exports[p] = m[p];
|
||||
}
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const url = require("url");
|
||||
const b = require("./builder");
|
||||
const grammar = require("./grammar");
|
||||
const pool_1 = require("./pool");
|
||||
const results_1 = require("./results");
|
||||
const schema_1 = require("./schema");
|
||||
const defaultHost = Object.freeze({
|
||||
host: '127.0.0.1',
|
||||
port: 8086,
|
||||
path: '',
|
||||
protocol: 'http'
|
||||
});
|
||||
const defaultOptions = Object.freeze({
|
||||
database: null,
|
||||
hosts: [],
|
||||
password: 'root',
|
||||
schema: [],
|
||||
username: 'root'
|
||||
});
|
||||
__export(require("./builder"));
|
||||
var grammar_1 = require("./grammar");
|
||||
exports.FieldType = grammar_1.FieldType;
|
||||
exports.Precision = grammar_1.Precision;
|
||||
exports.Raw = grammar_1.Raw;
|
||||
exports.escape = grammar_1.escape;
|
||||
exports.toNanoDate = grammar_1.toNanoDate;
|
||||
var results_2 = require("./results");
|
||||
exports.ResultError = results_2.ResultError;
|
||||
/**
|
||||
* Parses the URL out into into a IClusterConfig object
|
||||
*/
|
||||
function parseOptionsUrl(addr) {
|
||||
const parsed = url.parse(addr);
|
||||
const options = {
|
||||
host: parsed.hostname,
|
||||
port: Number(parsed.port),
|
||||
protocol: parsed.protocol.slice(0, -1)
|
||||
};
|
||||
if (parsed.auth) {
|
||||
[options.username, options.password] = parsed.auth.split(':');
|
||||
}
|
||||
if (parsed.pathname.length > 1) {
|
||||
options.database = parsed.pathname.slice(1);
|
||||
}
|
||||
return options;
|
||||
}
|
||||
/**
|
||||
* Works similarly to Object.assign, but only overwrites
|
||||
* properties that resolve to undefined.
|
||||
*/
|
||||
function defaults(target, ...srcs) {
|
||||
srcs.forEach(src => {
|
||||
Object.keys(src).forEach((key) => {
|
||||
if (target[key] === undefined) {
|
||||
target[key] = src[key];
|
||||
}
|
||||
});
|
||||
});
|
||||
return target;
|
||||
}
|
||||
/**
|
||||
* InfluxDB is the public interface to run queries against your database.
|
||||
* This is a 'driver-level' module, not a a full-fleged ORM or ODM; you run
|
||||
* queries directly by calling methods on this class.
|
||||
*
|
||||
* Please check out some of [the tutorials](https://node-influx.github.io/manual/tutorial.html)
|
||||
* if you want help getting started!
|
||||
*
|
||||
* @example
|
||||
* const Influx = require('influx');
|
||||
* const influx = new Influx.InfluxDB({
|
||||
* host: 'localhost',
|
||||
* database: 'express_response_db',
|
||||
* schema: [
|
||||
* {
|
||||
* measurement: 'response_times',
|
||||
* fields: {
|
||||
* path: Influx.FieldType.STRING,
|
||||
* duration: Influx.FieldType.INTEGER
|
||||
* },
|
||||
* tags: [
|
||||
* 'host'
|
||||
* ]
|
||||
* }
|
||||
* ]
|
||||
* })
|
||||
*
|
||||
* @example
|
||||
* // Connect over HTTPS
|
||||
* const Influx = require('influx');
|
||||
* const influx = new Influx.InfluxDB({
|
||||
* host: 'myinfluxdbhost',
|
||||
* port: 443,
|
||||
* protocol: 'https'
|
||||
* database: 'express_response_db',
|
||||
* schema: [
|
||||
* {
|
||||
* measurement: 'response_times',
|
||||
* fields: {
|
||||
* path: Influx.FieldType.STRING,
|
||||
* duration: Influx.FieldType.INTEGER
|
||||
* },
|
||||
* tags: [
|
||||
* 'host'
|
||||
* ]
|
||||
* }
|
||||
* ]
|
||||
* })
|
||||
*
|
||||
* influx.writePoints([
|
||||
* {
|
||||
* measurement: 'response_times',
|
||||
* tags: { host: os.hostname() },
|
||||
* fields: { duration, path: req.path },
|
||||
* }
|
||||
* ]).then(() => {
|
||||
* return influx.query(`
|
||||
* select * from response_times
|
||||
* where host = ${Influx.escape.stringLit(os.hostname())}
|
||||
* order by time desc
|
||||
* limit 10
|
||||
* `)
|
||||
* }).then(rows => {
|
||||
* rows.forEach(row => console.log(`A request to ${row.path} took ${row.duration}ms`))
|
||||
* })
|
||||
*/
|
||||
class InfluxDB {
|
||||
/**
|
||||
* Connect to a single InfluxDB instance by specifying
|
||||
* a set of connection options.
|
||||
* @param [options='http://root:root@127.0.0.1:8086']
|
||||
*
|
||||
* @example
|
||||
* const Influx = require('influx')
|
||||
*
|
||||
* // Connect to a single host with a DSN:
|
||||
* const influx = new Influx.InfluxDB('http://user:password@host:8086/database')
|
||||
*
|
||||
* @example
|
||||
* const Influx = require('influx')
|
||||
*
|
||||
* // Connect to a single host with a full set of config details and
|
||||
* // a custom schema
|
||||
* const client = new Influx.InfluxDB({
|
||||
* database: 'my_db',
|
||||
* host: 'localhost',
|
||||
* port: 8086,
|
||||
* username: 'connor',
|
||||
* password: 'pa$$w0rd',
|
||||
* schema: [
|
||||
* {
|
||||
* measurement: 'perf',
|
||||
* fields: {
|
||||
* memory_usage: Influx.FieldType.INTEGER,
|
||||
* cpu_usage: Influx.FieldType.FLOAT,
|
||||
* is_online: Influx.FieldType.BOOLEAN
|
||||
* }
|
||||
* tags: [
|
||||
* 'hostname'
|
||||
* ]
|
||||
* }
|
||||
* ]
|
||||
* })
|
||||
*
|
||||
* @example
|
||||
* const Influx = require('influx')
|
||||
*
|
||||
* // Use a pool of several host connections and balance queries across them:
|
||||
* const client = new Influx.InfluxDB({
|
||||
* database: 'my_db',
|
||||
* username: 'connor',
|
||||
* password: 'pa$$w0rd',
|
||||
* hosts: [
|
||||
* { host: 'db1.example.com' },
|
||||
* { host: 'db2.example.com' },
|
||||
* ],
|
||||
* schema: [
|
||||
* {
|
||||
* measurement: 'perf',
|
||||
* fields: {
|
||||
* memory_usage: Influx.FieldType.INTEGER,
|
||||
* cpu_usage: Influx.FieldType.FLOAT,
|
||||
* is_online: Influx.FieldType.BOOLEAN
|
||||
* }
|
||||
* tags: [
|
||||
* 'hostname'
|
||||
* ]
|
||||
* }
|
||||
* ]
|
||||
* })
|
||||
*
|
||||
*/
|
||||
constructor(options) {
|
||||
/**
|
||||
* Map of Schema instances defining measurements in Influx.
|
||||
* @private
|
||||
*/
|
||||
this._schema = Object.create(null);
|
||||
// Figure out how to parse whatever we were passed in into a IClusterConfig.
|
||||
if (typeof options === 'string') {
|
||||
// Plain URI => ISingleHostConfig
|
||||
options = parseOptionsUrl(options);
|
||||
}
|
||||
else if (!options) {
|
||||
options = defaultHost;
|
||||
}
|
||||
if (!options.hasOwnProperty('hosts')) {
|
||||
// ISingleHostConfig => IClusterConfig
|
||||
options = {
|
||||
database: options.database,
|
||||
hosts: [options],
|
||||
password: options.password,
|
||||
pool: options.pool,
|
||||
schema: options.schema,
|
||||
username: options.username
|
||||
};
|
||||
}
|
||||
const resolved = options;
|
||||
resolved.hosts = resolved.hosts.map(host => {
|
||||
return defaults({
|
||||
host: host.host,
|
||||
port: host.port,
|
||||
path: host.path,
|
||||
protocol: host.protocol,
|
||||
options: host.options
|
||||
}, defaultHost);
|
||||
});
|
||||
this._pool = new pool_1.Pool(resolved.pool);
|
||||
this._options = defaults(resolved, defaultOptions);
|
||||
resolved.hosts.forEach(host => {
|
||||
this._pool.addHost(`${host.protocol}://${host.host}:${host.port}${host.path}`, host.options);
|
||||
});
|
||||
this._options.schema.forEach(schema => this._createSchema(schema));
|
||||
}
|
||||
/**
|
||||
* Adds specified schema for better fields coercing.
|
||||
*
|
||||
* @param {ISchemaOptions} schema
|
||||
* @memberof InfluxDB
|
||||
*/
|
||||
addSchema(schema) {
|
||||
this._createSchema(schema);
|
||||
}
|
||||
/**
|
||||
* Creates a new database with the provided name.
|
||||
* @param databaseName
|
||||
* @return
|
||||
* @example
|
||||
* influx.createDatabase('mydb')
|
||||
*/
|
||||
createDatabase(databaseName) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `create database ${grammar.escape.quoted(databaseName)}`
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Deletes a database with the provided name.
|
||||
* @param databaseName
|
||||
* @return
|
||||
* @example
|
||||
* influx.dropDatabase('mydb')
|
||||
*/
|
||||
dropDatabase(databaseName) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `drop database ${grammar.escape.quoted(databaseName)}`
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Returns array of database names. Requires cluster admin privileges.
|
||||
* @returns a list of database names
|
||||
* @example
|
||||
* influx.getDatabaseNames().then(names =>
|
||||
* console.log('My database names are: ' + names.join(', ')));
|
||||
*/
|
||||
getDatabaseNames() {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({ q: 'show databases' }))
|
||||
.then(res => results_1.parseSingle(res).map(r => r.name));
|
||||
}
|
||||
/**
|
||||
* Returns array of measurements.
|
||||
* @returns a list of measurement names
|
||||
* @param [database] the database the measurement lives in, optional
|
||||
* if a default database is provided.
|
||||
* @example
|
||||
* influx.getMeasurements().then(names =>
|
||||
* console.log('My measurement names are: ' + names.join(', ')));
|
||||
*/
|
||||
getMeasurements(database = this._defaultDB()) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
db: database,
|
||||
q: 'show measurements'
|
||||
}))
|
||||
.then(res => results_1.parseSingle(res).map(r => r.name));
|
||||
}
|
||||
/**
|
||||
* Returns a list of all series within the target measurement, or from the
|
||||
* entire database if a measurement isn't provided.
|
||||
* @param [options]
|
||||
* @param [options.measurement] if provided, we'll only get series
|
||||
* from within that measurement.
|
||||
* @param [options.database] the database the series lives in,
|
||||
* optional if a default database is provided.
|
||||
* @returns a list of series names
|
||||
* @example
|
||||
* influx.getSeries().then(names => {
|
||||
* console.log('My series names in my_measurement are: ' + names.join(', '))
|
||||
* })
|
||||
*
|
||||
* influx.getSeries({
|
||||
* measurement: 'my_measurement',
|
||||
* database: 'my_db'
|
||||
* }).then(names => {
|
||||
* console.log('My series names in my_measurement are: ' + names.join(', '))
|
||||
* })
|
||||
*/
|
||||
getSeries(options = {}) {
|
||||
const { database = this._defaultDB(), measurement } = options;
|
||||
let query = 'show series';
|
||||
if (measurement) {
|
||||
query += ` from ${grammar.escape.quoted(measurement)}`;
|
||||
}
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
db: database,
|
||||
q: query
|
||||
}))
|
||||
.then(res => results_1.parseSingle(res).map(r => r.key));
|
||||
}
|
||||
/**
|
||||
* Removes a measurement from the database.
|
||||
* @param measurement
|
||||
* @param [database] the database the measurement lives in, optional
|
||||
* if a default database is provided.
|
||||
* @return
|
||||
* @example
|
||||
* influx.dropMeasurement('my_measurement')
|
||||
*/
|
||||
dropMeasurement(measurement, database = this._defaultDB()) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
db: database,
|
||||
q: `drop measurement ${grammar.escape.quoted(measurement)}`
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Removes a one or more series from InfluxDB.
|
||||
*
|
||||
* @returns
|
||||
* @example
|
||||
* // The following pairs of queries are equivalent: you can chose either to
|
||||
* // use our builder or pass in string directly. The builder takes care
|
||||
* // of escaping and most syntax handling for you.
|
||||
*
|
||||
* influx.dropSeries({ where: e => e.tag('cpu').equals.value('cpu8') })
|
||||
* influx.dropSeries({ where: '"cpu" = \'cpu8\'' })
|
||||
* // DROP SERIES WHERE "cpu" = 'cpu8'
|
||||
*
|
||||
* influx.dropSeries({ measurement: m => m.name('cpu').policy('autogen') })
|
||||
* influx.dropSeries({ measurement: '"cpu"."autogen"' })
|
||||
* // DROP SERIES FROM "autogen"."cpu"
|
||||
*
|
||||
* influx.dropSeries({
|
||||
* measurement: m => m.name('cpu').policy('autogen'),
|
||||
* where: e => e.tag('cpu').equals.value('cpu8'),
|
||||
* database: 'my_db'
|
||||
* })
|
||||
* // DROP SERIES FROM "autogen"."cpu" WHERE "cpu" = 'cpu8'
|
||||
*/
|
||||
dropSeries(options) {
|
||||
const db = 'database' in options ? options.database : this._defaultDB();
|
||||
let q = 'drop series';
|
||||
if ('measurement' in options) {
|
||||
q += ' from ' + b.parseMeasurement(options);
|
||||
}
|
||||
if ('where' in options) {
|
||||
q += ' where ' + b.parseWhere(options);
|
||||
}
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({ db, q }, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Returns a list of users on the Influx database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.getUsers().then(users => {
|
||||
* users.forEach(user => {
|
||||
* if (user.admin) {
|
||||
* console.log(user.user, 'is an admin!')
|
||||
* } else {
|
||||
* console.log(user.user, 'is not an admin!')
|
||||
* }
|
||||
* })
|
||||
* })
|
||||
*/
|
||||
getUsers() {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({ q: 'show users' }))
|
||||
.then(result => results_1.parseSingle(result));
|
||||
}
|
||||
/**
|
||||
* Creates a new InfluxDB user.
|
||||
* @param username
|
||||
* @param password
|
||||
* @param [admin=false] If true, the user will be given all
|
||||
* privileges on all databases.
|
||||
* @return
|
||||
* @example
|
||||
* influx.createUser('connor', 'pa55w0rd', true) // make 'connor' an admin
|
||||
*
|
||||
* // make non-admins:
|
||||
* influx.createUser('not_admin', 'pa55w0rd')
|
||||
*/
|
||||
createUser(username, password, admin = false) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `create user ${grammar.escape.quoted(username)} with password ` +
|
||||
grammar.escape.stringLit(password) +
|
||||
(admin ? ' with all privileges' : '')
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Sets a password for an Influx user.
|
||||
* @param username
|
||||
* @param password
|
||||
* @return
|
||||
* @example
|
||||
* influx.setPassword('connor', 'pa55w0rd')
|
||||
*/
|
||||
setPassword(username, password) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `set password for ${grammar.escape.quoted(username)} = ` +
|
||||
grammar.escape.stringLit(password)
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Grants a privilege to a specified user.
|
||||
* @param username
|
||||
* @param privilege Should be one of 'READ' or 'WRITE'
|
||||
* @param [database] If not provided, uses the default database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.grantPrivilege('connor', 'READ', 'my_db') // grants read access on my_db to connor
|
||||
*/
|
||||
grantPrivilege(username, privilege, database = this._defaultDB()) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `grant ${privilege} on ${grammar.escape.quoted(database)} ` +
|
||||
`to ${grammar.escape.quoted(username)}`
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Removes a privilege from a specified user.
|
||||
* @param username
|
||||
* @param privilege Should be one of 'READ' or 'WRITE'
|
||||
* @param [database] If not provided, uses the default database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.revokePrivilege('connor', 'READ', 'my_db') // removes read access on my_db from connor
|
||||
*/
|
||||
revokePrivilege(username, privilege, database = this._defaultDB()) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `revoke ${privilege} on ${grammar.escape.quoted(database)} from ` +
|
||||
grammar.escape.quoted(username)
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Grants admin privileges to a specified user.
|
||||
* @param username
|
||||
* @return
|
||||
* @example
|
||||
* influx.grantAdminPrivilege('connor')
|
||||
*/
|
||||
grantAdminPrivilege(username) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `grant all to ${grammar.escape.quoted(username)}`
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Removes a admin privilege from a specified user.
|
||||
* @param username
|
||||
* @return
|
||||
* @example
|
||||
* influx.revokeAdminPrivilege('connor')
|
||||
*/
|
||||
revokeAdminPrivilege(username) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `revoke all from ${grammar.escape.quoted(username)}`
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Removes a user from the database.
|
||||
* @param username
|
||||
* @return
|
||||
* @example
|
||||
* influx.dropUser('connor')
|
||||
*/
|
||||
dropUser(username) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `drop user ${grammar.escape.quoted(username)}`
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Creates a continuous query in a database
|
||||
* @param name The query name, for later reference
|
||||
* @param query The body of the query to run
|
||||
* @param [database] If not provided, uses the default database.
|
||||
* @param [resample] If provided, adds resample policy
|
||||
* @return
|
||||
* @example
|
||||
* influx.createContinuousQuery('downsample_cpu_1h', `
|
||||
* SELECT MEAN(cpu) INTO "7d"."perf"
|
||||
* FROM "1d"."perf" GROUP BY time(1m)
|
||||
* `, undefined, 'RESAMPLE FOR 7m')
|
||||
*/
|
||||
createContinuousQuery(name, query, database = this._defaultDB(), resample = '') {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `create continuous query ${grammar.escape.quoted(name)}` +
|
||||
` on ${grammar.escape.quoted(database)} ${resample} begin ${query} end`
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Returns a list of continous queries in the database.
|
||||
* @param [database] If not provided, uses the default database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.showContinousQueries()
|
||||
*/
|
||||
showContinousQueries(database = this._defaultDB()) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
db: database,
|
||||
q: 'show continuous queries'
|
||||
}))
|
||||
.then(result => results_1.parseSingle(result));
|
||||
}
|
||||
/**
|
||||
* Creates a continuous query in a database
|
||||
* @param name The query name
|
||||
* @param [database] If not provided, uses the default database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.dropContinuousQuery('downsample_cpu_1h')
|
||||
*/
|
||||
dropContinuousQuery(name, database = this._defaultDB()) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `drop continuous query ${grammar.escape.quoted(name)}` +
|
||||
` on ${grammar.escape.quoted(database)}`
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Creates a new retention policy on a database. You can read more about
|
||||
* [Downsampling and Retention](https://docs.influxdata.com/influxdb/v1.0/
|
||||
* guides/downsampling_and_retention/) on the InfluxDB website.
|
||||
*
|
||||
* @param name The retention policy name
|
||||
* @param options
|
||||
* @param [options.database] Database to create the policy on,
|
||||
* uses the default database if not provided.
|
||||
* @param options.duration How long data in the retention policy
|
||||
* should be stored for, should be in a format like `7d`. See details
|
||||
* [here](https://docs.influxdata.com/influxdb/v1.0/query_language/spec/#durations)
|
||||
* @param options.replication How many servers data in the series
|
||||
* should be replicated to.
|
||||
* @param [options.isDefault] Whether the retention policy should
|
||||
* be the default policy on the database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.createRetentionPolicy('7d', {
|
||||
* duration: '7d',
|
||||
* replication: 1
|
||||
* })
|
||||
*/
|
||||
createRetentionPolicy(name, options) {
|
||||
const q = `create retention policy ${grammar.escape.quoted(name)} on ` +
|
||||
grammar.escape.quoted(options.database || this._defaultDB()) +
|
||||
` duration ${options.duration} replication ${options.replication}` +
|
||||
(options.isDefault ? ' default' : '');
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({ q }, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Alters an existing retention policy on a database.
|
||||
*
|
||||
* @param name The retention policy name
|
||||
* @param options
|
||||
* @param [options.database] Database to create the policy on,
|
||||
* uses the default database if not provided.
|
||||
* @param options.duration How long data in the retention policy
|
||||
* should be stored for, should be in a format like `7d`. See details
|
||||
* [here](https://docs.influxdata.com/influxdb/v1.0/query_language/spec/#durations)
|
||||
* @param options.replication How many servers data in the series
|
||||
* should be replicated to.
|
||||
* @param [options.default] Whether the retention policy should
|
||||
* be the default policy on the database.
|
||||
* @return
|
||||
* @example
|
||||
* influx.alterRetentionPolicy('7d', {
|
||||
* duration: '7d',
|
||||
* replication: 1,
|
||||
* default: true
|
||||
* })
|
||||
*/
|
||||
alterRetentionPolicy(name, options) {
|
||||
const q = `alter retention policy ${grammar.escape.quoted(name)} on ` +
|
||||
grammar.escape.quoted(options.database || this._defaultDB()) +
|
||||
` duration ${options.duration} replication ${options.replication}` +
|
||||
(options.isDefault ? ' default' : '');
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({ q }, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Deletes a retention policy and associated data. Note that the data will
|
||||
* not be immediately destroyed, and will hang around until Influx's
|
||||
* bi-hourly cron.
|
||||
*
|
||||
* @param name The retention policy name
|
||||
* @param [database] Database name that the policy lives in,
|
||||
* uses the default database if not provided.
|
||||
* @return
|
||||
* @example
|
||||
* influx.dropRetentionPolicy('7d')
|
||||
*/
|
||||
dropRetentionPolicy(name, database = this._defaultDB()) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `drop retention policy ${grammar.escape.quoted(name)} ` +
|
||||
`on ${grammar.escape.quoted(database)}`
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* Shows retention policies on the database
|
||||
*
|
||||
* @param [database] The database to list policies on, uses the
|
||||
* default database if not provided.
|
||||
* @return
|
||||
* @example
|
||||
* influx.showRetentionPolicies().then(policies => {
|
||||
* expect(policies.slice()).to.deep.equal([
|
||||
* {
|
||||
* name: 'autogen',
|
||||
* duration: '0s',
|
||||
* shardGroupDuration: '168h0m0s',
|
||||
* replicaN: 1,
|
||||
* default: true,
|
||||
* },
|
||||
* {
|
||||
* name: '7d',
|
||||
* duration: '168h0m0s',
|
||||
* shardGroupDuration: '24h0m0s',
|
||||
* replicaN: 1,
|
||||
* default: false,
|
||||
* },
|
||||
* ])
|
||||
* })
|
||||
*/
|
||||
showRetentionPolicies(database = this._defaultDB()) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `show retention policies on ${grammar.escape.quoted(database)}`
|
||||
}, 'GET'))
|
||||
.then(result => results_1.parseSingle(result));
|
||||
}
|
||||
/**
|
||||
* Shows shards on the database
|
||||
*
|
||||
* @param [database] The database to list policies on, uses the
|
||||
* default database if not provided.
|
||||
* @return
|
||||
* @example
|
||||
* influx.showShards().then(shards => {
|
||||
* expect(shards.slice()).to.deep.equal([
|
||||
* {
|
||||
* id: 1
|
||||
* database: 'database',
|
||||
* retention_policy: 'autogen',
|
||||
* shard_group: 1,
|
||||
* start_time: '2019-05-06T00:00:00Z',
|
||||
* end_time: '2019-05-13T00:00:00Z',
|
||||
* expiry_time: '2019-05-13T00:00:00Z',
|
||||
* owners: null,
|
||||
* },
|
||||
* ])
|
||||
* })
|
||||
*/
|
||||
showShards(database = this._defaultDB()) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: 'show shards '
|
||||
}, 'GET'))
|
||||
.then(result => results_1.parseSingle(result).filter(function (i) {
|
||||
return i.database === database;
|
||||
}));
|
||||
}
|
||||
/**
|
||||
* Drops a shard with the provided number.
|
||||
* @param shard_id
|
||||
* @return
|
||||
* @example
|
||||
* influx.dropShard(3)
|
||||
*/
|
||||
dropShard(shard_id) {
|
||||
return this._pool
|
||||
.json(this._getQueryOpts({
|
||||
q: `drop shard ${shard_id}`
|
||||
}, 'POST'))
|
||||
.then(results_1.assertNoErrors)
|
||||
.then(() => undefined);
|
||||
}
|
||||
/**
|
||||
* WritePoints sends a list of points together in a batch to InfluxDB. In
|
||||
* each point you must specify the measurement name to write into as well
|
||||
* as a list of tag and field values. Optionally, you can specify the
|
||||
* time to tag that point at, defaulting to the current time.
|
||||
*
|
||||
* If you defined a schema for the measurement in the options you passed
|
||||
* to `new Influx(options)`, we'll use that to make sure that types get
|
||||
* cast correctly and that there are no extraneous fields or columns.
|
||||
*
|
||||
* For best performance, it's recommended that you batch your data into
|
||||
* sets of a couple thousand records before writing it. In the future we'll
|
||||
* have some utilities within node-influx to make this easier.
|
||||
*
|
||||
* ---
|
||||
*
|
||||
* A note when using manually-specified times and precisions: by default
|
||||
* we write using the `ms` precision since that's what JavaScript gives us.
|
||||
* You can adjust this. However, there is some special behaviour if you
|
||||
* manually specify a timestamp in your points:
|
||||
* - if you specify the timestamp as a Date object, we'll convert it to
|
||||
* milliseconds and manipulate it as needed to get the right precision
|
||||
* - if provide a INanoDate as returned from {@link toNanoTime} or the
|
||||
* results from an Influx query, we'll be able to pull the precise
|
||||
* nanosecond timestamp and manipulate it to get the right precision
|
||||
* - if you provide a string or number as the timestamp, we'll pass it
|
||||
* straight into Influx.
|
||||
*
|
||||
* Please see the IPoint and IWriteOptions types for a
|
||||
* full list of possible options.
|
||||
*
|
||||
* @param points
|
||||
* @param [options]
|
||||
* @return
|
||||
* @example
|
||||
* // write a point into the default database with
|
||||
* // the default retention policy.
|
||||
* influx.writePoints([
|
||||
* {
|
||||
* measurement: 'perf',
|
||||
* tags: { host: 'box1.example.com' },
|
||||
* fields: { cpu: getCpuUsage(), mem: getMemUsage() },
|
||||
* }
|
||||
* ])
|
||||
*
|
||||
* // you can manually specify the database,
|
||||
* // retention policy, and time precision:
|
||||
* influx.writePoints([
|
||||
* {
|
||||
* measurement: 'perf',
|
||||
* tags: { host: 'box1.example.com' },
|
||||
* fields: { cpu: getCpuUsage(), mem: getMemUsage() },
|
||||
* timestamp: getLastRecordedTime(),
|
||||
* }
|
||||
* ], {
|
||||
* database: 'my_db',
|
||||
* retentionPolicy: '1d',
|
||||
* precision: 's'
|
||||
* })
|
||||
*/
|
||||
writePoints(points, options = {}) {
|
||||
const { database = this._defaultDB(), precision = 'n', retentionPolicy } = options;
|
||||
let payload = '';
|
||||
points.forEach(point => {
|
||||
const { fields = {}, tags = {}, measurement, timestamp } = point;
|
||||
const schema = this._schema[database] && this._schema[database][measurement];
|
||||
const fieldsPairs = schema ? schema.coerceFields(fields) : schema_1.coerceBadly(fields);
|
||||
const tagsNames = schema ? schema.checkTags(tags) : Object.keys(tags);
|
||||
payload += (payload.length > 0 ? '\n' : '') + measurement;
|
||||
for (let tagsName of tagsNames) {
|
||||
payload += ',' + grammar.escape.tag(tagsName) + '=' + grammar.escape.tag(tags[tagsName]);
|
||||
}
|
||||
for (let i = 0; i < fieldsPairs.length; i += 1) {
|
||||
payload +=
|
||||
(i === 0 ? ' ' : ',') + grammar.escape.tag(fieldsPairs[i][0]) + '=' + fieldsPairs[i][1];
|
||||
}
|
||||
if (timestamp !== undefined) {
|
||||
payload += ' ' + grammar.castTimestamp(timestamp, precision);
|
||||
}
|
||||
});
|
||||
return this._pool.discard({
|
||||
body: payload,
|
||||
method: 'POST',
|
||||
path: '/write',
|
||||
query: { db: database,
|
||||
p: this._options.password,
|
||||
precision,
|
||||
rp: retentionPolicy,
|
||||
u: this._options.username }
|
||||
});
|
||||
}
|
||||
/**
|
||||
* WriteMeasurement functions similarly to {@link InfluxDB#writePoints}, but
|
||||
* it automatically fills in the `measurement` value for all points for you.
|
||||
*
|
||||
* @param measurement
|
||||
* @param points
|
||||
* @param [options]
|
||||
* @return
|
||||
* @example
|
||||
* influx.writeMeasurement('perf', [
|
||||
* {
|
||||
* tags: { host: 'box1.example.com' },
|
||||
* fields: { cpu: getCpuUsage(), mem: getMemUsage() },
|
||||
* }
|
||||
* ])
|
||||
*/
|
||||
writeMeasurement(measurement, points, options = {}) {
|
||||
points = points.map(p => (Object.assign({ measurement }, p)));
|
||||
return this.writePoints(points, options);
|
||||
}
|
||||
/**
|
||||
* .query() runs a query (or list of queries), and returns the results in a
|
||||
* friendly format, {@link IResults}. If you run multiple queries, an array of results
|
||||
* will be returned, otherwise a single result (array of objects) will be returned.
|
||||
*
|
||||
* @param query
|
||||
* @param [options]
|
||||
* @return result(s)
|
||||
* @example
|
||||
* influx.query('select * from perf').then(results => {
|
||||
* console.log(results)
|
||||
* })
|
||||
*/
|
||||
query(query, options = {}) {
|
||||
if (Array.isArray(query)) {
|
||||
query = query.join(';');
|
||||
}
|
||||
// If the consumer asked explicitly for nanosecond precision parsing,
|
||||
// remove that to cause Influx to give us ISO dates that
|
||||
// we can parse correctly.
|
||||
if (options.precision === 'n') {
|
||||
options = Object.assign({}, options); // Avoid mutating
|
||||
delete options.precision;
|
||||
}
|
||||
return this.queryRaw(query, options).then(res => results_1.parse(res, options.precision));
|
||||
}
|
||||
/**
|
||||
* QueryRaw functions similarly to .query() but it does no fancy
|
||||
* transformations on the returned data; it calls `JSON.parse` and returns
|
||||
* those results verbatim.
|
||||
*
|
||||
* @param query
|
||||
* @param [options]
|
||||
* @return
|
||||
* @example
|
||||
* influx.queryRaw('select * from perf').then(rawData => {
|
||||
* console.log(rawData)
|
||||
* })
|
||||
*/
|
||||
queryRaw(query, options = {}) {
|
||||
const { database = this._defaultDB(), retentionPolicy } = options;
|
||||
if (query instanceof Array) {
|
||||
query = query.join(';');
|
||||
}
|
||||
return this._pool.json(this._getQueryOpts({
|
||||
db: database,
|
||||
epoch: options.precision,
|
||||
q: query,
|
||||
rp: retentionPolicy
|
||||
}));
|
||||
}
|
||||
/**
|
||||
* Pings all available hosts, collecting online status and version info.
|
||||
* @param timeout Given in milliseconds
|
||||
* @return
|
||||
* @example
|
||||
* influx.ping(5000).then(hosts => {
|
||||
* hosts.forEach(host => {
|
||||
* if (host.online) {
|
||||
* console.log(`${host.url.host} responded in ${host.rtt}ms running ${host.version})`)
|
||||
* } else {
|
||||
* console.log(`${host.url.host} is offline :(`)
|
||||
* }
|
||||
* })
|
||||
* })
|
||||
*/
|
||||
ping(timeout) {
|
||||
return this._pool.ping(timeout);
|
||||
}
|
||||
/**
|
||||
* Returns the default database that queries operates on. It throws if called
|
||||
* when a default database isn't set.
|
||||
* @private
|
||||
*/
|
||||
_defaultDB() {
|
||||
if (!this._options.database) {
|
||||
throw new Error('Attempted to run an influx query without a default' +
|
||||
' database specified or an explicit database provided.');
|
||||
}
|
||||
return this._options.database;
|
||||
}
|
||||
/**
|
||||
* Creates options to be passed into the pool to query databases.
|
||||
* @private
|
||||
*/
|
||||
_getQueryOpts(params, method = 'GET') {
|
||||
return {
|
||||
method,
|
||||
path: '/query',
|
||||
query: Object.assign({ p: this._options.password, u: this._options.username }, params)
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Creates specified measurement schema
|
||||
*
|
||||
* @private
|
||||
* @param {ISchemaOptions} schema
|
||||
* @memberof InfluxDB
|
||||
*/
|
||||
_createSchema(schema) {
|
||||
schema.database = schema.database || this._options.database;
|
||||
if (!schema.database) {
|
||||
throw new Error(`Schema ${schema.measurement} doesn't have a database specified,` +
|
||||
'and no default database is provided!');
|
||||
}
|
||||
if (!this._schema[schema.database]) {
|
||||
this._schema[schema.database] = Object.create(null);
|
||||
}
|
||||
this._schema[schema.database][schema.measurement] = new schema_1.Schema(schema);
|
||||
}
|
||||
}
|
||||
exports.InfluxDB = InfluxDB;
|
||||
149
nodered/rootfs/data/node_modules/influx/lib/src/pool.d.ts
generated
vendored
Normal file
149
nodered/rootfs/data/node_modules/influx/lib/src/pool.d.ts
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
/// <reference types="node" />
|
||||
import { IBackoffStrategy } from './backoff/backoff';
|
||||
import { Host } from './host';
|
||||
import * as http from 'http';
|
||||
import * as https from 'https';
|
||||
import * as urlModule from 'url';
|
||||
export interface IPoolOptions {
|
||||
/**
|
||||
* Number of times we should retry running a query
|
||||
* before calling back with an error.
|
||||
*/
|
||||
maxRetries?: number;
|
||||
/**
|
||||
* The length of time after which HTTP requests will error
|
||||
* if they do not receive a response.
|
||||
*/
|
||||
requestTimeout?: number;
|
||||
/**
|
||||
* Options to configure the backoff policy for the pool. Defaults
|
||||
* to using exponential backoff.
|
||||
*/
|
||||
backoff?: IBackoffStrategy;
|
||||
}
|
||||
export interface IPoolRequestOptions {
|
||||
/**
|
||||
* Request method.
|
||||
*/
|
||||
method: 'GET' | 'POST';
|
||||
/**
|
||||
* Path to hit on the database server, must begin with a leading slash.
|
||||
*/
|
||||
path: string;
|
||||
/**
|
||||
* Query string to be appended to the request path.
|
||||
*/
|
||||
query?: any;
|
||||
/**
|
||||
* Request body to include.
|
||||
*/
|
||||
body?: string;
|
||||
/**
|
||||
* For internal use only, a counter of the number of times we've retried
|
||||
* running this request.
|
||||
*/
|
||||
retries?: number;
|
||||
}
|
||||
/**
|
||||
* An ServiceNotAvailableError is returned as an error from requests that
|
||||
* result in a > 500 error code.
|
||||
*/
|
||||
export declare class ServiceNotAvailableError extends Error {
|
||||
constructor(message: string);
|
||||
}
|
||||
/**
|
||||
* An RequestError is returned as an error from requests that
|
||||
* result in a 300 <= error code <= 500.
|
||||
*/
|
||||
export declare class RequestError extends Error {
|
||||
req: http.ClientRequest;
|
||||
res: http.IncomingMessage;
|
||||
constructor(req: http.ClientRequest, res: http.IncomingMessage, body: string);
|
||||
static Create(req: http.ClientRequest, res: http.IncomingMessage, callback: (e: RequestError) => void): void;
|
||||
}
|
||||
export interface IPingStats {
|
||||
url: urlModule.Url;
|
||||
res: http.IncomingMessage;
|
||||
online: boolean;
|
||||
rtt: number;
|
||||
version: string;
|
||||
}
|
||||
/**
|
||||
*
|
||||
* The Pool maintains a list available Influx hosts and dispatches requests
|
||||
* to them. If there are errors connecting to hosts, it will disable that
|
||||
* host for a period of time.
|
||||
*/
|
||||
export declare class Pool {
|
||||
private _options;
|
||||
private _index;
|
||||
private _timeout;
|
||||
private _hostsAvailable;
|
||||
private _hostsDisabled;
|
||||
/**
|
||||
* Creates a new Pool instance.
|
||||
* @param {IPoolOptions} options
|
||||
*/
|
||||
constructor(options: IPoolOptions);
|
||||
/**
|
||||
* Returns a list of currently active hosts.
|
||||
* @return {Host[]}
|
||||
*/
|
||||
getHostsAvailable(): Host[];
|
||||
/**
|
||||
* Returns a list of hosts that are currently disabled due to network
|
||||
* errors.
|
||||
* @return {Host[]}
|
||||
*/
|
||||
getHostsDisabled(): Host[];
|
||||
/**
|
||||
* Inserts a new host to the pool.
|
||||
*/
|
||||
addHost(url: string, options?: https.RequestOptions): Host;
|
||||
/**
|
||||
* Returns true if there's any host available to by queried.
|
||||
* @return {Boolean}
|
||||
*/
|
||||
hostIsAvailable(): boolean;
|
||||
/**
|
||||
* Makes a request and calls back with the response, parsed as JSON.
|
||||
* An error is returned on a non-2xx status code or on a parsing exception.
|
||||
*/
|
||||
json(options: IPoolRequestOptions): Promise<any>;
|
||||
/**
|
||||
* Makes a request and resolves with the plain text response,
|
||||
* if possible. An error is raised on a non-2xx status code.
|
||||
*/
|
||||
text(options: IPoolRequestOptions): Promise<string>;
|
||||
/**
|
||||
* Makes a request and discards any response body it receives.
|
||||
* An error is returned on a non-2xx status code.
|
||||
*/
|
||||
discard(options: IPoolRequestOptions): Promise<void>;
|
||||
/**
|
||||
* Ping sends out a request to all available Influx servers, reporting on
|
||||
* their response time and version number.
|
||||
*/
|
||||
ping(timeout: number, path?: string): Promise<IPingStats[]>;
|
||||
/**
|
||||
* Makes a request and calls back with the IncomingMessage stream,
|
||||
* if possible. An error is returned on a non-2xx status code.
|
||||
*/
|
||||
stream(options: IPoolRequestOptions, callback: (err: Error, res: http.IncomingMessage) => void): void;
|
||||
/**
|
||||
* Returns the next available host for querying.
|
||||
* @return {Host}
|
||||
*/
|
||||
private _getHost;
|
||||
/**
|
||||
* Re-enables the provided host, returning it to the pool to query.
|
||||
* @param {Host} host
|
||||
*/
|
||||
private _enableHost;
|
||||
/**
|
||||
* Disables the provided host, removing it from the query pool. It will be
|
||||
* re-enabled after a backoff interval
|
||||
*/
|
||||
private _disableHost;
|
||||
private _handleRequestError;
|
||||
}
|
||||
310
nodered/rootfs/data/node_modules/influx/lib/src/pool.js
generated
vendored
Normal file
310
nodered/rootfs/data/node_modules/influx/lib/src/pool.js
generated
vendored
Normal file
@@ -0,0 +1,310 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const exponential_1 = require("./backoff/exponential");
|
||||
const host_1 = require("./host");
|
||||
const http = require("http");
|
||||
const https = require("https");
|
||||
const querystring = require("querystring");
|
||||
/**
|
||||
* Status codes that will cause a host to be marked as 'failed' if we get
|
||||
* them from a request to Influx.
|
||||
* @type {Array}
|
||||
*/
|
||||
const resubmitErrorCodes = [
|
||||
'ETIMEDOUT',
|
||||
'ESOCKETTIMEDOUT',
|
||||
'ECONNRESET',
|
||||
'ECONNREFUSED',
|
||||
'EHOSTUNREACH'
|
||||
];
|
||||
/**
|
||||
* An ServiceNotAvailableError is returned as an error from requests that
|
||||
* result in a > 500 error code.
|
||||
*/
|
||||
class ServiceNotAvailableError extends Error {
|
||||
constructor(message) {
|
||||
super();
|
||||
this.message = message;
|
||||
Object.setPrototypeOf(this, ServiceNotAvailableError.prototype);
|
||||
}
|
||||
}
|
||||
exports.ServiceNotAvailableError = ServiceNotAvailableError;
|
||||
/**
|
||||
* An RequestError is returned as an error from requests that
|
||||
* result in a 300 <= error code <= 500.
|
||||
*/
|
||||
class RequestError extends Error {
|
||||
constructor(req, res, body) {
|
||||
super();
|
||||
this.req = req;
|
||||
this.res = res;
|
||||
this.message = `A ${res.statusCode} ${res.statusMessage} error occurred: ${body}`;
|
||||
Object.setPrototypeOf(this, RequestError.prototype);
|
||||
}
|
||||
static Create(req, res, callback) {
|
||||
let body = '';
|
||||
res.on('data', str => {
|
||||
body += str.toString();
|
||||
});
|
||||
res.on('end', () => callback(new RequestError(req, res, body)));
|
||||
}
|
||||
}
|
||||
exports.RequestError = RequestError;
|
||||
/**
|
||||
* Creates a function generation that returns a wrapper which only allows
|
||||
* through the first call of any function that it generated.
|
||||
*/
|
||||
function doOnce() {
|
||||
let handled = false;
|
||||
return fn => {
|
||||
return arg => {
|
||||
if (handled) {
|
||||
return;
|
||||
}
|
||||
handled = true;
|
||||
fn(arg);
|
||||
};
|
||||
};
|
||||
}
|
||||
function setToArray(itemSet) {
|
||||
const output = [];
|
||||
itemSet.forEach(value => {
|
||||
output.push(value);
|
||||
});
|
||||
return output;
|
||||
}
|
||||
const request = (options, callback) => {
|
||||
if (options.protocol === 'https:') {
|
||||
return https.request(options, callback);
|
||||
}
|
||||
return http.request(options, callback);
|
||||
};
|
||||
/**
|
||||
*
|
||||
* The Pool maintains a list available Influx hosts and dispatches requests
|
||||
* to them. If there are errors connecting to hosts, it will disable that
|
||||
* host for a period of time.
|
||||
*/
|
||||
class Pool {
|
||||
/**
|
||||
* Creates a new Pool instance.
|
||||
* @param {IPoolOptions} options
|
||||
*/
|
||||
constructor(options) {
|
||||
this._options = Object.assign({ backoff: new exponential_1.ExponentialBackoff({
|
||||
initial: 300,
|
||||
max: 10 * 1000,
|
||||
random: 1
|
||||
}), maxRetries: 2, requestTimeout: 30 * 1000 }, options);
|
||||
this._index = 0;
|
||||
this._hostsAvailable = new Set();
|
||||
this._hostsDisabled = new Set();
|
||||
this._timeout = this._options.requestTimeout;
|
||||
}
|
||||
/**
|
||||
* Returns a list of currently active hosts.
|
||||
* @return {Host[]}
|
||||
*/
|
||||
getHostsAvailable() {
|
||||
return setToArray(this._hostsAvailable);
|
||||
}
|
||||
/**
|
||||
* Returns a list of hosts that are currently disabled due to network
|
||||
* errors.
|
||||
* @return {Host[]}
|
||||
*/
|
||||
getHostsDisabled() {
|
||||
return setToArray(this._hostsDisabled);
|
||||
}
|
||||
/**
|
||||
* Inserts a new host to the pool.
|
||||
*/
|
||||
addHost(url, options = {}) {
|
||||
const host = new host_1.Host(url, this._options.backoff.reset(), options);
|
||||
this._hostsAvailable.add(host);
|
||||
return host;
|
||||
}
|
||||
/**
|
||||
* Returns true if there's any host available to by queried.
|
||||
* @return {Boolean}
|
||||
*/
|
||||
hostIsAvailable() {
|
||||
return this._hostsAvailable.size > 0;
|
||||
}
|
||||
/**
|
||||
* Makes a request and calls back with the response, parsed as JSON.
|
||||
* An error is returned on a non-2xx status code or on a parsing exception.
|
||||
*/
|
||||
json(options) {
|
||||
return this.text(options).then(res => JSON.parse(res));
|
||||
}
|
||||
/**
|
||||
* Makes a request and resolves with the plain text response,
|
||||
* if possible. An error is raised on a non-2xx status code.
|
||||
*/
|
||||
text(options) {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.stream(options, (err, res) => {
|
||||
if (err) {
|
||||
return reject(err);
|
||||
}
|
||||
let output = '';
|
||||
res.on('data', str => {
|
||||
output += str.toString();
|
||||
});
|
||||
res.on('end', () => resolve(output));
|
||||
});
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Makes a request and discards any response body it receives.
|
||||
* An error is returned on a non-2xx status code.
|
||||
*/
|
||||
discard(options) {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.stream(options, (err, res) => {
|
||||
if (err) {
|
||||
return reject(err);
|
||||
}
|
||||
res.on('data', () => {
|
||||
/* ignore */
|
||||
});
|
||||
res.on('end', () => resolve());
|
||||
});
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Ping sends out a request to all available Influx servers, reporting on
|
||||
* their response time and version number.
|
||||
*/
|
||||
ping(timeout, path = '/ping') {
|
||||
const todo = [];
|
||||
setToArray(this._hostsAvailable)
|
||||
.concat(setToArray(this._hostsDisabled))
|
||||
.forEach(host => {
|
||||
const start = Date.now();
|
||||
const url = host.url;
|
||||
const once = doOnce();
|
||||
return todo.push(new Promise(resolve => {
|
||||
const req = request(Object.assign({ hostname: url.hostname, method: 'GET', path, port: Number(url.port), protocol: url.protocol, timeout }, host.options), once((res) => {
|
||||
resolve({
|
||||
url,
|
||||
res: res.resume(),
|
||||
online: res.statusCode < 300,
|
||||
rtt: Date.now() - start,
|
||||
version: res.headers['x-influxdb-version']
|
||||
});
|
||||
}));
|
||||
const fail = once(() => {
|
||||
req.abort();
|
||||
resolve({
|
||||
online: false,
|
||||
res: null,
|
||||
rtt: Infinity,
|
||||
url,
|
||||
version: null
|
||||
});
|
||||
});
|
||||
// Support older Nodes and polyfills which don't allow .timeout() in
|
||||
// the request options, wrapped in a conditional for even worse
|
||||
// polyfills. See: https://github.com/node-influx/node-influx/issues/221
|
||||
if (typeof req.setTimeout === 'function') {
|
||||
req.setTimeout(timeout, () => {
|
||||
fail.call(fail, arguments);
|
||||
}); // Tslint:disable-line
|
||||
}
|
||||
req.on('timeout', fail);
|
||||
req.on('error', fail);
|
||||
req.end();
|
||||
}));
|
||||
});
|
||||
return Promise.all(todo);
|
||||
}
|
||||
/**
|
||||
* Makes a request and calls back with the IncomingMessage stream,
|
||||
* if possible. An error is returned on a non-2xx status code.
|
||||
*/
|
||||
stream(options, callback) {
|
||||
if (!this.hostIsAvailable()) {
|
||||
return callback(new ServiceNotAvailableError('No host available'), null);
|
||||
}
|
||||
const once = doOnce();
|
||||
const host = this._getHost();
|
||||
let path = host.url.pathname === '/' ? '' : host.url.pathname;
|
||||
path += options.path;
|
||||
if (options.query) {
|
||||
path += '?' + querystring.stringify(options.query);
|
||||
}
|
||||
const req = request(Object.assign({ headers: { 'content-length': options.body ? Buffer.from(options.body).length : 0 }, hostname: host.url.hostname, method: options.method, path, port: Number(host.url.port), protocol: host.url.protocol, timeout: this._timeout }, host.options), once((res) => {
|
||||
if (res.statusCode >= 500) {
|
||||
return this._handleRequestError(new ServiceNotAvailableError(res.statusMessage), host, options, callback);
|
||||
}
|
||||
if (res.statusCode >= 300) {
|
||||
return RequestError.Create(req, res, err => callback(err, res)); // eslint-disable-line new-cap
|
||||
}
|
||||
host.success();
|
||||
return callback(undefined, res);
|
||||
}));
|
||||
// Handle network or HTTP parsing errors:
|
||||
req.on('error', once((err) => {
|
||||
this._handleRequestError(err, host, options, callback);
|
||||
}));
|
||||
// Handle timeouts:
|
||||
req.on('timeout', once(() => {
|
||||
req.abort();
|
||||
this._handleRequestError(new ServiceNotAvailableError('Request timed out'), host, options, callback);
|
||||
}));
|
||||
// Support older Nodes and polyfills which don't allow .timeout() in the
|
||||
// request options, wrapped in a conditional for even worse polyfills. See:
|
||||
// https://github.com/node-influx/node-influx/issues/221
|
||||
if (typeof req.setTimeout === 'function') {
|
||||
req.setTimeout(host.options.timeout || this._timeout); // Tslint:disable-line
|
||||
}
|
||||
// Write out the body:
|
||||
if (options.body) {
|
||||
req.write(options.body);
|
||||
}
|
||||
req.end();
|
||||
}
|
||||
/**
|
||||
* Returns the next available host for querying.
|
||||
* @return {Host}
|
||||
*/
|
||||
_getHost() {
|
||||
const available = setToArray(this._hostsAvailable);
|
||||
const host = available[this._index];
|
||||
this._index = (this._index + 1) % available.length;
|
||||
return host;
|
||||
}
|
||||
/**
|
||||
* Re-enables the provided host, returning it to the pool to query.
|
||||
* @param {Host} host
|
||||
*/
|
||||
_enableHost(host) {
|
||||
this._hostsDisabled.delete(host);
|
||||
this._hostsAvailable.add(host);
|
||||
}
|
||||
/**
|
||||
* Disables the provided host, removing it from the query pool. It will be
|
||||
* re-enabled after a backoff interval
|
||||
*/
|
||||
_disableHost(host) {
|
||||
this._hostsAvailable.delete(host);
|
||||
this._hostsDisabled.add(host);
|
||||
this._index %= Math.max(1, this._hostsAvailable.size);
|
||||
setTimeout(() => this._enableHost(host), host.fail());
|
||||
}
|
||||
_handleRequestError(err, host, options, callback) {
|
||||
if (!(err instanceof ServiceNotAvailableError) && !resubmitErrorCodes.includes(err.code)) {
|
||||
return callback(err, null);
|
||||
}
|
||||
this._disableHost(host);
|
||||
const retries = options.retries || 0;
|
||||
if (retries < this._options.maxRetries && this.hostIsAvailable()) {
|
||||
options.retries = retries + 1;
|
||||
return this.stream(options, callback);
|
||||
}
|
||||
callback(err, null);
|
||||
}
|
||||
}
|
||||
exports.Pool = Pool;
|
||||
118
nodered/rootfs/data/node_modules/influx/lib/src/results.d.ts
generated
vendored
Normal file
118
nodered/rootfs/data/node_modules/influx/lib/src/results.d.ts
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
import { TimePrecision } from './grammar';
|
||||
/**
|
||||
* A ResultError is thrown when a query generates errorful results from Influx.
|
||||
*/
|
||||
export declare class ResultError extends Error {
|
||||
constructor(message: string);
|
||||
}
|
||||
/**
|
||||
* InfluxResults describes the result structure received from InfluxDB.
|
||||
*
|
||||
* NOTE: if you're poking around in Influx, use curl, not the `json` formatter
|
||||
* provided by the CLI. As of 1.0 this formatter changes the result structure
|
||||
* and it will confuse you, as it did me ;)
|
||||
*/
|
||||
export interface IResponse {
|
||||
results: IResultEntry[];
|
||||
}
|
||||
export interface IResultEntry {
|
||||
series?: IResponseSeries[];
|
||||
error?: string;
|
||||
}
|
||||
export declare type Tags = {
|
||||
[name: string]: string;
|
||||
};
|
||||
export declare type Row = any;
|
||||
export interface IResponseSeries {
|
||||
name?: string;
|
||||
columns: string[];
|
||||
tags?: Tags;
|
||||
values?: Row[];
|
||||
}
|
||||
/**
|
||||
* IResultsParser is a user-friendly results tables from raw Influx responses.
|
||||
*/
|
||||
export interface IResults<T> extends Array<T> {
|
||||
/**
|
||||
* Group looks for and returns the first group in the results
|
||||
* that matches the provided tags.
|
||||
*
|
||||
* If you've used lodash or underscore, we do something quite similar to
|
||||
* their object matching: for every row in the results, if it contains tag
|
||||
* values matching the requested object, we return it.
|
||||
*
|
||||
* @param matcher
|
||||
* @return
|
||||
* @example
|
||||
* // Matching tags sets in queries:
|
||||
* influx.query('select * from perf group by host').then(results => {
|
||||
* expect(results.group({ host: 'ares.peet.io'})).to.deep.equal([
|
||||
* { host: 'ares.peet.io', cpu: 0.12, mem: 2435 },
|
||||
* { host: 'ares.peet.io', cpu: 0.10, mem: 2451 },
|
||||
* // ...
|
||||
* ])
|
||||
*
|
||||
* expect(results.group({ host: 'box1.example.com'})).to.deep.equal([
|
||||
* { host: 'box1.example.com', cpu: 0.54, mem: 8420 },
|
||||
* // ...
|
||||
* ])
|
||||
* })
|
||||
*/
|
||||
group(matcher: Tags): T[];
|
||||
/**
|
||||
* Returns the data grouped into nested arrays, similarly to how it was
|
||||
* returned from Influx originally.
|
||||
*
|
||||
* @returns
|
||||
* @example
|
||||
* influx.query('select * from perf group by host').then(results => {
|
||||
* expect(results.groups()).to.deep.equal([
|
||||
* {
|
||||
* name: 'perf',
|
||||
* tags: { host: 'ares.peet.io' },
|
||||
* rows: [
|
||||
* { host: 'ares.peet.io', cpu: 0.12, mem: 2435 },
|
||||
* { host: 'ares.peet.io', cpu: 0.10, mem: 2451 },
|
||||
* // ...
|
||||
* ]
|
||||
* }
|
||||
* {
|
||||
* name: 'perf',
|
||||
* tags: { host: 'box1.example.com' },
|
||||
* rows: [
|
||||
* { host: 'box1.example.com', cpu: 0.54, mem: 8420 },
|
||||
* // ...
|
||||
* ]
|
||||
* }
|
||||
* ])
|
||||
* })
|
||||
*/
|
||||
groups(): Array<{
|
||||
name: string;
|
||||
tags: Tags;
|
||||
rows: T[];
|
||||
}>;
|
||||
}
|
||||
/**
|
||||
* Checks if there are any errors in the IResponse and, if so, it throws them.
|
||||
* @private
|
||||
* @throws {ResultError}
|
||||
*/
|
||||
export declare function assertNoErrors(res: IResponse): IResponse;
|
||||
/**
|
||||
* From parses out a response to a result or list of responses.
|
||||
* There are three situations we cover here:
|
||||
* 1. A single query without groups, like `select * from myseries`
|
||||
* 2. A single query with groups, generated with a `group by` statement
|
||||
* which groups by series *tags*, grouping by times is case (1)
|
||||
* 3. Multiple queries of types 1 and 2
|
||||
* @private
|
||||
*/
|
||||
export declare function parse<T>(res: IResponse, precision?: TimePrecision): Array<IResults<T>> | IResults<T>;
|
||||
/**
|
||||
* ParseSingle asserts that the response contains a single result,
|
||||
* and returns that result.
|
||||
* @throws {Error} if the number of results is not exactly one
|
||||
* @private
|
||||
*/
|
||||
export declare function parseSingle<T>(res: IResponse, precision?: TimePrecision): IResults<T>;
|
||||
130
nodered/rootfs/data/node_modules/influx/lib/src/results.js
generated
vendored
Normal file
130
nodered/rootfs/data/node_modules/influx/lib/src/results.js
generated
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const grammar_1 = require("./grammar");
|
||||
/**
|
||||
* A ResultError is thrown when a query generates errorful results from Influx.
|
||||
*/
|
||||
class ResultError extends Error {
|
||||
constructor(message) {
|
||||
super();
|
||||
this.message = `Error from InfluxDB: ${message}`;
|
||||
}
|
||||
}
|
||||
exports.ResultError = ResultError;
|
||||
function groupMethod(matcher) {
|
||||
// We do a tiny bit of 'custom' deep equality checking here, taking
|
||||
// advantage of the fact that the tag keys are consistent across all
|
||||
// series results. This lets us match groupings much more efficiently,
|
||||
// ~6000x faster than the fastest vanilla equality checker (lodash)
|
||||
// when operating on large (~100,000 grouping) sets.
|
||||
const srcKeys = this.groupsTagsKeys;
|
||||
const dstKeys = Object.keys(matcher);
|
||||
if (srcKeys.length === 0 || srcKeys.length !== dstKeys.length) {
|
||||
return [];
|
||||
}
|
||||
L: for (let row of this.groupRows) { // eslint-disable-line no-labels
|
||||
for (let src of srcKeys) {
|
||||
if (row.tags[src] !== matcher[src]) {
|
||||
continue L; // eslint-disable-line no-labels
|
||||
}
|
||||
}
|
||||
return row.rows;
|
||||
}
|
||||
return [];
|
||||
}
|
||||
function groupsMethod() {
|
||||
return this.groupRows;
|
||||
}
|
||||
/**
|
||||
* Inner parsing function which unpacks the series into a table and attaches
|
||||
* methods to the array. This is quite optimized and a bit of a mess to read,
|
||||
* but it's all fairly easy procedural logic.
|
||||
*
|
||||
* We do this instead of subclassing Array since subclassing has some
|
||||
* undesirable side-effects. For example, calling .slice() on the array
|
||||
* makes it impossible to preserve groups as would be necessary if it's
|
||||
* subclassed.
|
||||
*/
|
||||
function parseInner(series = [], precision) {
|
||||
const results = [];
|
||||
results.groupsTagsKeys = series.length && series[0].tags ? Object.keys(series[0].tags) : [];
|
||||
const tags = results.groupsTagsKeys;
|
||||
let nextGroup = [];
|
||||
results.groupRows = new Array(series.length); // Tslint:disable-line
|
||||
for (let i = 0; i < series.length; i += 1, results.length) {
|
||||
const { columns = [], values = [] } = series[i];
|
||||
for (let value of values) {
|
||||
const obj = {};
|
||||
for (let j = 0; j < columns.length; j += 1) {
|
||||
if (columns[j] === 'time') {
|
||||
obj.time = grammar_1.isoOrTimeToDate(value[j], precision);
|
||||
}
|
||||
else {
|
||||
obj[columns[j]] = value[j];
|
||||
}
|
||||
}
|
||||
for (let tag of tags) {
|
||||
obj[tag] = series[i].tags[tag];
|
||||
}
|
||||
results.push(obj);
|
||||
nextGroup.push(obj);
|
||||
}
|
||||
results.groupRows[i] = {
|
||||
name: series[i].name,
|
||||
rows: nextGroup,
|
||||
tags: series[i].tags || {}
|
||||
};
|
||||
nextGroup = [];
|
||||
}
|
||||
results.group = groupMethod;
|
||||
results.groups = groupsMethod;
|
||||
return results;
|
||||
}
|
||||
/**
|
||||
* Checks if there are any errors in the IResponse and, if so, it throws them.
|
||||
* @private
|
||||
* @throws {ResultError}
|
||||
*/
|
||||
function assertNoErrors(res) {
|
||||
for (let result of res.results) {
|
||||
const { error } = result;
|
||||
if (error) {
|
||||
throw new ResultError(error);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
exports.assertNoErrors = assertNoErrors;
|
||||
/**
|
||||
* From parses out a response to a result or list of responses.
|
||||
* There are three situations we cover here:
|
||||
* 1. A single query without groups, like `select * from myseries`
|
||||
* 2. A single query with groups, generated with a `group by` statement
|
||||
* which groups by series *tags*, grouping by times is case (1)
|
||||
* 3. Multiple queries of types 1 and 2
|
||||
* @private
|
||||
*/
|
||||
function parse(res, precision) {
|
||||
assertNoErrors(res);
|
||||
if (res.results.length === 1) {
|
||||
// Normalize case 3
|
||||
return parseInner(res.results[0].series, precision);
|
||||
}
|
||||
return res.results.map(result => parseInner(result.series, precision));
|
||||
}
|
||||
exports.parse = parse;
|
||||
/**
|
||||
* ParseSingle asserts that the response contains a single result,
|
||||
* and returns that result.
|
||||
* @throws {Error} if the number of results is not exactly one
|
||||
* @private
|
||||
*/
|
||||
function parseSingle(res, precision) {
|
||||
assertNoErrors(res);
|
||||
if (res.results.length !== 1) {
|
||||
throw new Error('node-influx expected the results length to equal 1, but ' +
|
||||
`it was ${0}. Please report this here: https://git.io/influx-err`);
|
||||
}
|
||||
return parseInner(res.results[0].series, precision);
|
||||
}
|
||||
exports.parseSingle = parseSingle;
|
||||
58
nodered/rootfs/data/node_modules/influx/lib/src/schema.d.ts
generated
vendored
Normal file
58
nodered/rootfs/data/node_modules/influx/lib/src/schema.d.ts
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
import { FieldType } from './grammar';
|
||||
export interface ISchemaOptions {
|
||||
/**
|
||||
* The measurement name this schema is describing.
|
||||
*/
|
||||
measurement: string;
|
||||
/**
|
||||
* The database the measurement lives under. Uses the default database
|
||||
* if one is provided.
|
||||
*/
|
||||
database?: string;
|
||||
/**
|
||||
* Columns is the map of column type definitions on the database.
|
||||
*/
|
||||
fields: {
|
||||
[column: string]: FieldType;
|
||||
};
|
||||
/**
|
||||
* A list of schema tag names.
|
||||
*/
|
||||
tags: string[];
|
||||
}
|
||||
export declare type FieldMap = {
|
||||
[name: string]: string | number | boolean;
|
||||
};
|
||||
/**
|
||||
* The Schema provides information and utilities for an InfluxDB measurement.
|
||||
* @private
|
||||
*/
|
||||
export declare class Schema {
|
||||
private options;
|
||||
private _fieldNames;
|
||||
private _tagHash;
|
||||
constructor(options: ISchemaOptions);
|
||||
/**
|
||||
* CoerceFields converts a map of field values to a strings which
|
||||
* can be injected into the line protocol without further escaping.
|
||||
* The output is given in [key, value] pairs.
|
||||
*/
|
||||
coerceFields(fields: FieldMap): Array<[string, string]>;
|
||||
/**
|
||||
* Throws an error if the tags include values other than
|
||||
* what was specified in the schema. It returns a list of tag names.
|
||||
*/
|
||||
checkTags(tags: {
|
||||
[tag: string]: string;
|
||||
}): string[];
|
||||
/**
|
||||
* Returns the 'db'.'measurement'[.'field'] referencing the current schema.
|
||||
*/
|
||||
private _ref;
|
||||
}
|
||||
/**
|
||||
* Coerces the field map to a set of writable values, a la coerceFields,
|
||||
* using native guesses based on the field datatypes.
|
||||
* @private
|
||||
*/
|
||||
export declare function coerceBadly(fields: FieldMap): Array<[string, string]>;
|
||||
117
nodered/rootfs/data/node_modules/influx/lib/src/schema.js
generated
vendored
Normal file
117
nodered/rootfs/data/node_modules/influx/lib/src/schema.js
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
"use strict";
|
||||
/* eslint-disable @typescript-eslint/require-array-sort-compare */
|
||||
/* eslint-disable no-prototype-builtins */
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const grammar_1 = require("./grammar");
|
||||
/**
|
||||
* The Schema provides information and utilities for an InfluxDB measurement.
|
||||
* @private
|
||||
*/
|
||||
class Schema {
|
||||
constructor(options) {
|
||||
this.options = options;
|
||||
this._tagHash = {};
|
||||
// FieldNames are sorted for performance: when coerceFields is run the
|
||||
// fields will be added to the output in order.
|
||||
this._fieldNames = Object.keys(options.fields).sort();
|
||||
options.tags.forEach(tag => {
|
||||
this._tagHash[tag] = true;
|
||||
});
|
||||
}
|
||||
/**
|
||||
* CoerceFields converts a map of field values to a strings which
|
||||
* can be injected into the line protocol without further escaping.
|
||||
* The output is given in [key, value] pairs.
|
||||
*/
|
||||
coerceFields(fields) {
|
||||
let consumed = 0;
|
||||
const output = [];
|
||||
this._fieldNames.forEach(field => {
|
||||
if (!fields.hasOwnProperty(field)) {
|
||||
return;
|
||||
}
|
||||
const value = fields[field];
|
||||
const typ = typeof value;
|
||||
consumed += 1;
|
||||
if (value === null || value === undefined) {
|
||||
return;
|
||||
}
|
||||
let coerced;
|
||||
switch (this.options.fields[field]) {
|
||||
case grammar_1.FieldType.STRING:
|
||||
coerced = grammar_1.escape.quoted(String(value));
|
||||
break;
|
||||
case grammar_1.FieldType.INTEGER:
|
||||
if (typ !== 'number' && !grammar_1.isNumeric(String(value))) {
|
||||
throw new Error(`Expected numeric value for ${this._ref(field)}, but got '${value}'!`);
|
||||
}
|
||||
coerced = String(Math.floor(value)) + 'i';
|
||||
break;
|
||||
case grammar_1.FieldType.FLOAT:
|
||||
if (typ !== 'number' && !grammar_1.isNumeric(String(value))) {
|
||||
throw new Error(`Expected numeric value for ${this._ref(field)}, but got '${value}'!`);
|
||||
}
|
||||
coerced = String(value);
|
||||
break;
|
||||
case grammar_1.FieldType.BOOLEAN:
|
||||
if (typ !== 'boolean') {
|
||||
throw new Error(`Expected boolean value for ${this._ref(field)}, but got a ${typ}!`);
|
||||
}
|
||||
coerced = value ? 'T' : 'F';
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unknown field type ${this.options.fields[field]} for ${field} in ` +
|
||||
`${this._ref()}. Please ensure that your configuration is correct.`);
|
||||
}
|
||||
output.push([field, coerced]);
|
||||
});
|
||||
const keys = Object.keys(fields);
|
||||
if (consumed !== keys.length) {
|
||||
const extraneous = keys.filter(f => !this._fieldNames.includes(f));
|
||||
throw new Error('Extraneous fields detected for writing InfluxDB point in ' +
|
||||
`${this._ref()}: \`${extraneous.join('`, `')}\`.`);
|
||||
}
|
||||
return output;
|
||||
}
|
||||
/**
|
||||
* Throws an error if the tags include values other than
|
||||
* what was specified in the schema. It returns a list of tag names.
|
||||
*/
|
||||
checkTags(tags) {
|
||||
const names = Object.keys(tags);
|
||||
const extraneous = names.filter(tag => !this._tagHash[tag]);
|
||||
if (extraneous.length > 0) {
|
||||
throw new Error('Extraneous tags detected for writing InfluxDB point in ' +
|
||||
`${this._ref()}: \`${extraneous.join('`, `')}\`.`);
|
||||
}
|
||||
return names;
|
||||
}
|
||||
/**
|
||||
* Returns the 'db'.'measurement'[.'field'] referencing the current schema.
|
||||
*/
|
||||
_ref(field) {
|
||||
let out = this.options.database + '.' + this.options.measurement;
|
||||
if (field) {
|
||||
out += '.' + field;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
}
|
||||
exports.Schema = Schema;
|
||||
/**
|
||||
* Coerces the field map to a set of writable values, a la coerceFields,
|
||||
* using native guesses based on the field datatypes.
|
||||
* @private
|
||||
*/
|
||||
function coerceBadly(fields) {
|
||||
return Object.keys(fields)
|
||||
.sort()
|
||||
.map(field => {
|
||||
const value = fields[field];
|
||||
if (typeof value === 'string') {
|
||||
return [field, grammar_1.escape.quoted(value)];
|
||||
}
|
||||
return [field, String(value)];
|
||||
});
|
||||
}
|
||||
exports.coerceBadly = coerceBadly;
|
||||
Reference in New Issue
Block a user