-first commit
This commit is contained in:
172
node_modules/ioredis/built/cluster/ClusterOptions.d.ts
generated
vendored
Normal file
172
node_modules/ioredis/built/cluster/ClusterOptions.d.ts
generated
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
/// <reference types="node" />
|
||||
import { SrvRecord } from "dns";
|
||||
import { RedisOptions } from "../redis/RedisOptions";
|
||||
import { CommanderOptions } from "../utils/Commander";
|
||||
import { NodeRole } from "./util";
|
||||
export declare type DNSResolveSrvFunction = (hostname: string, callback: (err: NodeJS.ErrnoException | null | undefined, records?: SrvRecord[]) => void) => void;
|
||||
export declare type DNSLookupFunction = (hostname: string, callback: (err: NodeJS.ErrnoException | null | undefined, address: string, family?: number) => void) => void;
|
||||
export declare type NatMapFunction = (key: string) => {
|
||||
host: string;
|
||||
port: number;
|
||||
} | null;
|
||||
export declare type NatMap = {
|
||||
[key: string]: {
|
||||
host: string;
|
||||
port: number;
|
||||
};
|
||||
} | NatMapFunction;
|
||||
/**
|
||||
* Options for Cluster constructor
|
||||
*/
|
||||
export interface ClusterOptions extends CommanderOptions {
|
||||
/**
|
||||
* See "Quick Start" section.
|
||||
*
|
||||
* @default (times) => Math.min(100 + times * 2, 2000)
|
||||
*/
|
||||
clusterRetryStrategy?: (times: number, reason?: Error) => number | void | null;
|
||||
/**
|
||||
* See Redis class.
|
||||
*
|
||||
* @default true
|
||||
*/
|
||||
enableOfflineQueue?: boolean;
|
||||
/**
|
||||
* When enabled, ioredis only emits "ready" event when `CLUSTER INFO`
|
||||
* command reporting the cluster is ready for handling commands.
|
||||
*
|
||||
* @default true
|
||||
*/
|
||||
enableReadyCheck?: boolean;
|
||||
/**
|
||||
* Scale reads to the node with the specified role.
|
||||
*
|
||||
* @default "master"
|
||||
*/
|
||||
scaleReads?: NodeRole | Function;
|
||||
/**
|
||||
* When a MOVED or ASK error is received, client will redirect the
|
||||
* command to another node.
|
||||
* This option limits the max redirections allowed to send a command.
|
||||
*
|
||||
* @default 16
|
||||
*/
|
||||
maxRedirections?: number;
|
||||
/**
|
||||
* When an error is received when sending a command (e.g.
|
||||
* "Connection is closed." when the target Redis node is down), client will retry
|
||||
* if `retryDelayOnFailover` is valid delay time (in ms).
|
||||
*
|
||||
* @default 100
|
||||
*/
|
||||
retryDelayOnFailover?: number;
|
||||
/**
|
||||
* When a CLUSTERDOWN error is received, client will retry
|
||||
* if `retryDelayOnClusterDown` is valid delay time (in ms).
|
||||
*
|
||||
* @default 100
|
||||
*/
|
||||
retryDelayOnClusterDown?: number;
|
||||
/**
|
||||
* When a TRYAGAIN error is received, client will retry
|
||||
* if `retryDelayOnTryAgain` is valid delay time (in ms).
|
||||
*
|
||||
* @default 100
|
||||
*/
|
||||
retryDelayOnTryAgain?: number;
|
||||
/**
|
||||
* By default, this value is 0, which means when a `MOVED` error is received,
|
||||
* the client will resend the command instantly to the node returned together with
|
||||
* the `MOVED` error. However, sometimes it takes time for a cluster to become
|
||||
* state stabilized after a failover, so adding a delay before resending can
|
||||
* prevent a ping pong effect.
|
||||
*
|
||||
* @default 0
|
||||
*/
|
||||
retryDelayOnMoved?: number;
|
||||
/**
|
||||
* The milliseconds before a timeout occurs while refreshing
|
||||
* slots from the cluster.
|
||||
*
|
||||
* @default 1000
|
||||
*/
|
||||
slotsRefreshTimeout?: number;
|
||||
/**
|
||||
* The milliseconds between every automatic slots refresh.
|
||||
*
|
||||
* @default 5000
|
||||
*/
|
||||
slotsRefreshInterval?: number;
|
||||
/**
|
||||
* Use sharded subscribers instead of a single subscriber.
|
||||
*
|
||||
* If sharded subscribers are used, then one additional subscriber connection per master node
|
||||
* is established. If you don't plan to use SPUBLISH/SSUBSCRIBE, then this should be disabled.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
shardedSubscribers?: boolean;
|
||||
/**
|
||||
* Passed to the constructor of `Redis`
|
||||
*
|
||||
* @default null
|
||||
*/
|
||||
redisOptions?: Omit<RedisOptions, "port" | "host" | "path" | "sentinels" | "retryStrategy" | "enableOfflineQueue" | "readOnly">;
|
||||
/**
|
||||
* By default, When a new Cluster instance is created,
|
||||
* it will connect to the Redis cluster automatically.
|
||||
* If you want to keep the instance disconnected until the first command is called,
|
||||
* set this option to `true`.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
lazyConnect?: boolean;
|
||||
/**
|
||||
* Discover nodes using SRV records
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
useSRVRecords?: boolean;
|
||||
/**
|
||||
* SRV records will be resolved via this function.
|
||||
*
|
||||
* You may provide a custom `resolveSrv` function when you want to customize
|
||||
* the cache behavior of the default function.
|
||||
*
|
||||
* @default require('dns').resolveSrv
|
||||
*/
|
||||
resolveSrv?: DNSResolveSrvFunction;
|
||||
/**
|
||||
* Hostnames will be resolved to IP addresses via this function.
|
||||
* This is needed when the addresses of startup nodes are hostnames instead
|
||||
* of IPs.
|
||||
*
|
||||
* You may provide a custom `lookup` function when you want to customize
|
||||
* the cache behavior of the default function.
|
||||
*
|
||||
* @default require('dns').lookup
|
||||
*/
|
||||
dnsLookup?: DNSLookupFunction;
|
||||
natMap?: NatMap;
|
||||
/**
|
||||
* See Redis class.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
enableAutoPipelining?: boolean;
|
||||
/**
|
||||
* See Redis class.
|
||||
*
|
||||
* @default []
|
||||
*/
|
||||
autoPipeliningIgnoredCommands?: string[];
|
||||
/**
|
||||
* Custom LUA commands
|
||||
*/
|
||||
scripts?: Record<string, {
|
||||
lua: string;
|
||||
numberOfKeys?: number;
|
||||
readOnly?: boolean;
|
||||
}>;
|
||||
}
|
||||
export declare const DEFAULT_CLUSTER_OPTIONS: ClusterOptions;
|
||||
22
node_modules/ioredis/built/cluster/ClusterOptions.js
generated
vendored
Normal file
22
node_modules/ioredis/built/cluster/ClusterOptions.js
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.DEFAULT_CLUSTER_OPTIONS = void 0;
|
||||
const dns_1 = require("dns");
|
||||
exports.DEFAULT_CLUSTER_OPTIONS = {
|
||||
clusterRetryStrategy: (times) => Math.min(100 + times * 2, 2000),
|
||||
enableOfflineQueue: true,
|
||||
enableReadyCheck: true,
|
||||
scaleReads: "master",
|
||||
maxRedirections: 16,
|
||||
retryDelayOnMoved: 0,
|
||||
retryDelayOnFailover: 100,
|
||||
retryDelayOnClusterDown: 100,
|
||||
retryDelayOnTryAgain: 100,
|
||||
slotsRefreshTimeout: 1000,
|
||||
useSRVRecords: false,
|
||||
resolveSrv: dns_1.resolveSrv,
|
||||
dnsLookup: dns_1.lookup,
|
||||
enableAutoPipelining: false,
|
||||
autoPipeliningIgnoredCommands: [],
|
||||
shardedSubscribers: false,
|
||||
};
|
||||
29
node_modules/ioredis/built/cluster/ClusterSubscriber.d.ts
generated
vendored
Normal file
29
node_modules/ioredis/built/cluster/ClusterSubscriber.d.ts
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
/// <reference types="node" />
|
||||
import { EventEmitter } from "events";
|
||||
import ConnectionPool from "./ConnectionPool";
|
||||
export default class ClusterSubscriber {
|
||||
private connectionPool;
|
||||
private emitter;
|
||||
private isSharded;
|
||||
private started;
|
||||
private subscriber;
|
||||
private lastActiveSubscriber;
|
||||
private slotRange;
|
||||
constructor(connectionPool: ConnectionPool, emitter: EventEmitter, isSharded?: boolean);
|
||||
getInstance(): any;
|
||||
/**
|
||||
* Associate this subscriber to a specific slot range.
|
||||
*
|
||||
* Returns the range or an empty array if the slot range couldn't be associated.
|
||||
*
|
||||
* BTW: This is more for debugging and testing purposes.
|
||||
*
|
||||
* @param range
|
||||
*/
|
||||
associateSlotRange(range: number[]): number[];
|
||||
start(): void;
|
||||
stop(): void;
|
||||
isStarted(): boolean;
|
||||
private onSubscriberEnd;
|
||||
private selectSubscriber;
|
||||
}
|
||||
223
node_modules/ioredis/built/cluster/ClusterSubscriber.js
generated
vendored
Normal file
223
node_modules/ioredis/built/cluster/ClusterSubscriber.js
generated
vendored
Normal file
@@ -0,0 +1,223 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const util_1 = require("./util");
|
||||
const utils_1 = require("../utils");
|
||||
const Redis_1 = require("../Redis");
|
||||
const debug = (0, utils_1.Debug)("cluster:subscriber");
|
||||
class ClusterSubscriber {
|
||||
constructor(connectionPool, emitter, isSharded = false) {
|
||||
this.connectionPool = connectionPool;
|
||||
this.emitter = emitter;
|
||||
this.isSharded = isSharded;
|
||||
this.started = false;
|
||||
//There is only one connection for the entire pool
|
||||
this.subscriber = null;
|
||||
//The slot range for which this subscriber is responsible
|
||||
this.slotRange = [];
|
||||
this.onSubscriberEnd = () => {
|
||||
if (!this.started) {
|
||||
debug("subscriber has disconnected, but ClusterSubscriber is not started, so not reconnecting.");
|
||||
return;
|
||||
}
|
||||
// If the subscriber closes whilst it's still the active connection,
|
||||
// we might as well try to connecting to a new node if possible to
|
||||
// minimise the number of missed publishes.
|
||||
debug("subscriber has disconnected, selecting a new one...");
|
||||
this.selectSubscriber();
|
||||
};
|
||||
// If the current node we're using as the subscriber disappears
|
||||
// from the node pool for some reason, we will select a new one
|
||||
// to connect to.
|
||||
// Note that this event is only triggered if the connection to
|
||||
// the node has been used; cluster subscriptions are setup with
|
||||
// lazyConnect = true. It's possible for the subscriber node to
|
||||
// disappear without this method being called!
|
||||
// See https://github.com/luin/ioredis/pull/1589
|
||||
this.connectionPool.on("-node", (_, key) => {
|
||||
if (!this.started || !this.subscriber) {
|
||||
return;
|
||||
}
|
||||
if ((0, util_1.getNodeKey)(this.subscriber.options) === key) {
|
||||
debug("subscriber has left, selecting a new one...");
|
||||
this.selectSubscriber();
|
||||
}
|
||||
});
|
||||
this.connectionPool.on("+node", () => {
|
||||
if (!this.started || this.subscriber) {
|
||||
return;
|
||||
}
|
||||
debug("a new node is discovered and there is no subscriber, selecting a new one...");
|
||||
this.selectSubscriber();
|
||||
});
|
||||
}
|
||||
getInstance() {
|
||||
return this.subscriber;
|
||||
}
|
||||
/**
|
||||
* Associate this subscriber to a specific slot range.
|
||||
*
|
||||
* Returns the range or an empty array if the slot range couldn't be associated.
|
||||
*
|
||||
* BTW: This is more for debugging and testing purposes.
|
||||
*
|
||||
* @param range
|
||||
*/
|
||||
associateSlotRange(range) {
|
||||
if (this.isSharded) {
|
||||
this.slotRange = range;
|
||||
}
|
||||
return this.slotRange;
|
||||
}
|
||||
start() {
|
||||
this.started = true;
|
||||
this.selectSubscriber();
|
||||
debug("started");
|
||||
}
|
||||
stop() {
|
||||
this.started = false;
|
||||
if (this.subscriber) {
|
||||
this.subscriber.disconnect();
|
||||
this.subscriber = null;
|
||||
}
|
||||
}
|
||||
isStarted() {
|
||||
return this.started;
|
||||
}
|
||||
selectSubscriber() {
|
||||
const lastActiveSubscriber = this.lastActiveSubscriber;
|
||||
// Disconnect the previous subscriber even if there
|
||||
// will not be a new one.
|
||||
if (lastActiveSubscriber) {
|
||||
lastActiveSubscriber.off("end", this.onSubscriberEnd);
|
||||
lastActiveSubscriber.disconnect();
|
||||
}
|
||||
if (this.subscriber) {
|
||||
this.subscriber.off("end", this.onSubscriberEnd);
|
||||
this.subscriber.disconnect();
|
||||
}
|
||||
const sampleNode = (0, utils_1.sample)(this.connectionPool.getNodes());
|
||||
if (!sampleNode) {
|
||||
debug("selecting subscriber failed since there is no node discovered in the cluster yet");
|
||||
this.subscriber = null;
|
||||
return;
|
||||
}
|
||||
const { options } = sampleNode;
|
||||
debug("selected a subscriber %s:%s", options.host, options.port);
|
||||
/*
|
||||
* Create a specialized Redis connection for the subscription.
|
||||
* Note that auto reconnection is enabled here.
|
||||
*
|
||||
* `enableReadyCheck` is also enabled because although subscription is allowed
|
||||
* while redis is loading data from the disk, we can check if the password
|
||||
* provided for the subscriber is correct, and if not, the current subscriber
|
||||
* will be disconnected and a new subscriber will be selected.
|
||||
*/
|
||||
let connectionPrefix = "subscriber";
|
||||
if (this.isSharded)
|
||||
connectionPrefix = "ssubscriber";
|
||||
this.subscriber = new Redis_1.default({
|
||||
port: options.port,
|
||||
host: options.host,
|
||||
username: options.username,
|
||||
password: options.password,
|
||||
enableReadyCheck: true,
|
||||
connectionName: (0, util_1.getConnectionName)(connectionPrefix, options.connectionName),
|
||||
lazyConnect: true,
|
||||
tls: options.tls,
|
||||
// Don't try to reconnect the subscriber connection. If the connection fails
|
||||
// we will get an end event (handled below), at which point we'll pick a new
|
||||
// node from the pool and try to connect to that as the subscriber connection.
|
||||
retryStrategy: null,
|
||||
});
|
||||
// Ignore the errors since they're handled in the connection pool.
|
||||
this.subscriber.on("error", utils_1.noop);
|
||||
this.subscriber.on("moved", () => {
|
||||
this.emitter.emit("forceRefresh");
|
||||
});
|
||||
// The node we lost connection to may not come back up in a
|
||||
// reasonable amount of time (e.g. a slave that's taken down
|
||||
// for maintainence), we could potentially miss many published
|
||||
// messages so we should reconnect as quickly as possible, to
|
||||
// a different node if needed.
|
||||
this.subscriber.once("end", this.onSubscriberEnd);
|
||||
// Re-subscribe previous channels
|
||||
const previousChannels = { subscribe: [], psubscribe: [], ssubscribe: [] };
|
||||
if (lastActiveSubscriber) {
|
||||
const condition = lastActiveSubscriber.condition || lastActiveSubscriber.prevCondition;
|
||||
if (condition && condition.subscriber) {
|
||||
previousChannels.subscribe = condition.subscriber.channels("subscribe");
|
||||
previousChannels.psubscribe =
|
||||
condition.subscriber.channels("psubscribe");
|
||||
previousChannels.ssubscribe =
|
||||
condition.subscriber.channels("ssubscribe");
|
||||
}
|
||||
}
|
||||
if (previousChannels.subscribe.length ||
|
||||
previousChannels.psubscribe.length ||
|
||||
previousChannels.ssubscribe.length) {
|
||||
let pending = 0;
|
||||
for (const type of ["subscribe", "psubscribe", "ssubscribe"]) {
|
||||
const channels = previousChannels[type];
|
||||
if (channels.length == 0) {
|
||||
continue;
|
||||
}
|
||||
debug("%s %d channels", type, channels.length);
|
||||
if (type === "ssubscribe") {
|
||||
for (const channel of channels) {
|
||||
pending += 1;
|
||||
this.subscriber[type](channel)
|
||||
.then(() => {
|
||||
if (!--pending) {
|
||||
this.lastActiveSubscriber = this.subscriber;
|
||||
}
|
||||
})
|
||||
.catch(() => {
|
||||
// TODO: should probably disconnect the subscriber and try again.
|
||||
debug("failed to ssubscribe to channel: %s", channel);
|
||||
});
|
||||
}
|
||||
}
|
||||
else {
|
||||
pending += 1;
|
||||
this.subscriber[type](channels)
|
||||
.then(() => {
|
||||
if (!--pending) {
|
||||
this.lastActiveSubscriber = this.subscriber;
|
||||
}
|
||||
})
|
||||
.catch(() => {
|
||||
// TODO: should probably disconnect the subscriber and try again.
|
||||
debug("failed to %s %d channels", type, channels.length);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
this.lastActiveSubscriber = this.subscriber;
|
||||
}
|
||||
for (const event of [
|
||||
"message",
|
||||
"messageBuffer",
|
||||
]) {
|
||||
this.subscriber.on(event, (arg1, arg2) => {
|
||||
this.emitter.emit(event, arg1, arg2);
|
||||
});
|
||||
}
|
||||
for (const event of ["pmessage", "pmessageBuffer"]) {
|
||||
this.subscriber.on(event, (arg1, arg2, arg3) => {
|
||||
this.emitter.emit(event, arg1, arg2, arg3);
|
||||
});
|
||||
}
|
||||
if (this.isSharded == true) {
|
||||
for (const event of [
|
||||
"smessage",
|
||||
"smessageBuffer",
|
||||
]) {
|
||||
this.subscriber.on(event, (arg1, arg2) => {
|
||||
this.emitter.emit(event, arg1, arg2);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.default = ClusterSubscriber;
|
||||
86
node_modules/ioredis/built/cluster/ClusterSubscriberGroup.d.ts
generated
vendored
Normal file
86
node_modules/ioredis/built/cluster/ClusterSubscriberGroup.d.ts
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
/// <reference types="node" />
|
||||
import ClusterSubscriber from "./ClusterSubscriber";
|
||||
import Cluster from "./index";
|
||||
/**
|
||||
* Redis differs between "normal" and sharded PubSub. If using the "normal" PubSub feature, exactly one
|
||||
* ClusterSubscriber exists per cluster instance. This works because the Redis cluster bus forwards m
|
||||
* messages between shards. However, this has scalability limitations, which is the reason why the sharded
|
||||
* PubSub feature was added to Redis. With sharded PubSub, each shard is responsible for its own messages.
|
||||
* Given that, we need at least one ClusterSubscriber per master endpoint/node.
|
||||
*
|
||||
* This class leverages the previously exising ClusterSubscriber by adding support for multiple such subscribers
|
||||
* in alignment to the master nodes of the cluster. The ClusterSubscriber class was extended in a non-breaking way
|
||||
* to support this feature.
|
||||
*/
|
||||
export default class ClusterSubscriberGroup {
|
||||
private cluster;
|
||||
private shardedSubscribers;
|
||||
private clusterSlots;
|
||||
private subscriberToSlotsIndex;
|
||||
private channels;
|
||||
/**
|
||||
* Register callbacks
|
||||
*
|
||||
* @param cluster
|
||||
*/
|
||||
constructor(cluster: Cluster, refreshSlotsCacheCallback: () => void);
|
||||
/**
|
||||
* Get the responsible subscriber.
|
||||
*
|
||||
* Returns null if no subscriber was found
|
||||
*
|
||||
* @param slot
|
||||
*/
|
||||
getResponsibleSubscriber(slot: number): ClusterSubscriber;
|
||||
/**
|
||||
* Adds a channel for which this subscriber group is responsible
|
||||
*
|
||||
* @param channels
|
||||
*/
|
||||
addChannels(channels: (string | Buffer)[]): number;
|
||||
/**
|
||||
* Removes channels for which the subscriber group is responsible by optionally unsubscribing
|
||||
* @param channels
|
||||
*/
|
||||
removeChannels(channels: (string | Buffer)[]): number;
|
||||
/**
|
||||
* Disconnect all subscribers
|
||||
*/
|
||||
stop(): void;
|
||||
/**
|
||||
* Start all not yet started subscribers
|
||||
*/
|
||||
start(): void;
|
||||
/**
|
||||
* Add a subscriber to the group of subscribers
|
||||
*
|
||||
* @param redis
|
||||
*/
|
||||
private _addSubscriber;
|
||||
/**
|
||||
* Removes a subscriber from the group
|
||||
* @param redis
|
||||
*/
|
||||
private _removeSubscriber;
|
||||
/**
|
||||
* Refreshes the subscriber-related slot ranges
|
||||
*
|
||||
* Returns false if no refresh was needed
|
||||
*
|
||||
* @param cluster
|
||||
*/
|
||||
private _refreshSlots;
|
||||
/**
|
||||
* Resubscribes to the previous channels
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
private _resubscribe;
|
||||
/**
|
||||
* Deep equality of the cluster slots objects
|
||||
*
|
||||
* @param other
|
||||
* @private
|
||||
*/
|
||||
private _slotsAreEqual;
|
||||
}
|
||||
227
node_modules/ioredis/built/cluster/ClusterSubscriberGroup.js
generated
vendored
Normal file
227
node_modules/ioredis/built/cluster/ClusterSubscriberGroup.js
generated
vendored
Normal file
@@ -0,0 +1,227 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const utils_1 = require("../utils");
|
||||
const ClusterSubscriber_1 = require("./ClusterSubscriber");
|
||||
const ConnectionPool_1 = require("./ConnectionPool");
|
||||
const util_1 = require("./util");
|
||||
const calculateSlot = require("cluster-key-slot");
|
||||
const debug = (0, utils_1.Debug)("cluster:subscriberGroup");
|
||||
/**
|
||||
* Redis differs between "normal" and sharded PubSub. If using the "normal" PubSub feature, exactly one
|
||||
* ClusterSubscriber exists per cluster instance. This works because the Redis cluster bus forwards m
|
||||
* messages between shards. However, this has scalability limitations, which is the reason why the sharded
|
||||
* PubSub feature was added to Redis. With sharded PubSub, each shard is responsible for its own messages.
|
||||
* Given that, we need at least one ClusterSubscriber per master endpoint/node.
|
||||
*
|
||||
* This class leverages the previously exising ClusterSubscriber by adding support for multiple such subscribers
|
||||
* in alignment to the master nodes of the cluster. The ClusterSubscriber class was extended in a non-breaking way
|
||||
* to support this feature.
|
||||
*/
|
||||
class ClusterSubscriberGroup {
|
||||
/**
|
||||
* Register callbacks
|
||||
*
|
||||
* @param cluster
|
||||
*/
|
||||
constructor(cluster, refreshSlotsCacheCallback) {
|
||||
this.cluster = cluster;
|
||||
this.shardedSubscribers = new Map();
|
||||
this.clusterSlots = [];
|
||||
//Simple [min, max] slot ranges aren't enough because you can migrate single slots
|
||||
this.subscriberToSlotsIndex = new Map();
|
||||
this.channels = new Map();
|
||||
cluster.on("+node", (redis) => {
|
||||
this._addSubscriber(redis);
|
||||
});
|
||||
cluster.on("-node", (redis) => {
|
||||
this._removeSubscriber(redis);
|
||||
});
|
||||
cluster.on("refresh", () => {
|
||||
this._refreshSlots(cluster);
|
||||
});
|
||||
cluster.on("forceRefresh", () => {
|
||||
refreshSlotsCacheCallback();
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Get the responsible subscriber.
|
||||
*
|
||||
* Returns null if no subscriber was found
|
||||
*
|
||||
* @param slot
|
||||
*/
|
||||
getResponsibleSubscriber(slot) {
|
||||
const nodeKey = this.clusterSlots[slot][0];
|
||||
return this.shardedSubscribers.get(nodeKey);
|
||||
}
|
||||
/**
|
||||
* Adds a channel for which this subscriber group is responsible
|
||||
*
|
||||
* @param channels
|
||||
*/
|
||||
addChannels(channels) {
|
||||
const slot = calculateSlot(channels[0]);
|
||||
//Check if the all channels belong to the same slot and otherwise reject the operation
|
||||
channels.forEach((c) => {
|
||||
if (calculateSlot(c) != slot)
|
||||
return -1;
|
||||
});
|
||||
const currChannels = this.channels.get(slot);
|
||||
if (!currChannels) {
|
||||
this.channels.set(slot, channels);
|
||||
}
|
||||
else {
|
||||
this.channels.set(slot, currChannels.concat(channels));
|
||||
}
|
||||
return [...this.channels.values()].flatMap(v => v).length;
|
||||
}
|
||||
/**
|
||||
* Removes channels for which the subscriber group is responsible by optionally unsubscribing
|
||||
* @param channels
|
||||
*/
|
||||
removeChannels(channels) {
|
||||
const slot = calculateSlot(channels[0]);
|
||||
//Check if the all channels belong to the same slot and otherwise reject the operation
|
||||
channels.forEach((c) => {
|
||||
if (calculateSlot(c) != slot)
|
||||
return -1;
|
||||
});
|
||||
const slotChannels = this.channels.get(slot);
|
||||
if (slotChannels) {
|
||||
const updatedChannels = slotChannels.filter(c => !channels.includes(c));
|
||||
this.channels.set(slot, updatedChannels);
|
||||
}
|
||||
return [...this.channels.values()].flatMap(v => v).length;
|
||||
}
|
||||
/**
|
||||
* Disconnect all subscribers
|
||||
*/
|
||||
stop() {
|
||||
for (const s of this.shardedSubscribers.values()) {
|
||||
s.stop();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Start all not yet started subscribers
|
||||
*/
|
||||
start() {
|
||||
for (const s of this.shardedSubscribers.values()) {
|
||||
if (!s.isStarted()) {
|
||||
s.start();
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Add a subscriber to the group of subscribers
|
||||
*
|
||||
* @param redis
|
||||
*/
|
||||
_addSubscriber(redis) {
|
||||
const pool = new ConnectionPool_1.default(redis.options);
|
||||
if (pool.addMasterNode(redis)) {
|
||||
const sub = new ClusterSubscriber_1.default(pool, this.cluster, true);
|
||||
const nodeKey = (0, util_1.getNodeKey)(redis.options);
|
||||
this.shardedSubscribers.set(nodeKey, sub);
|
||||
sub.start();
|
||||
// We need to attempt to resubscribe them in case the new node serves their slot
|
||||
this._resubscribe();
|
||||
this.cluster.emit("+subscriber");
|
||||
return sub;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
* Removes a subscriber from the group
|
||||
* @param redis
|
||||
*/
|
||||
_removeSubscriber(redis) {
|
||||
const nodeKey = (0, util_1.getNodeKey)(redis.options);
|
||||
const sub = this.shardedSubscribers.get(nodeKey);
|
||||
if (sub) {
|
||||
sub.stop();
|
||||
this.shardedSubscribers.delete(nodeKey);
|
||||
// Even though the subscriber to this node is going down, we might have another subscriber
|
||||
// handling the same slots, so we need to attempt to subscribe the orphaned channels
|
||||
this._resubscribe();
|
||||
this.cluster.emit("-subscriber");
|
||||
}
|
||||
return this.shardedSubscribers;
|
||||
}
|
||||
/**
|
||||
* Refreshes the subscriber-related slot ranges
|
||||
*
|
||||
* Returns false if no refresh was needed
|
||||
*
|
||||
* @param cluster
|
||||
*/
|
||||
_refreshSlots(cluster) {
|
||||
//If there was an actual change, then reassign the slot ranges
|
||||
if (this._slotsAreEqual(cluster.slots)) {
|
||||
debug("Nothing to refresh because the new cluster map is equal to the previous one.");
|
||||
}
|
||||
else {
|
||||
debug("Refreshing the slots of the subscriber group.");
|
||||
//Rebuild the slots index
|
||||
this.subscriberToSlotsIndex = new Map();
|
||||
for (let slot = 0; slot < cluster.slots.length; slot++) {
|
||||
const node = cluster.slots[slot][0];
|
||||
if (!this.subscriberToSlotsIndex.has(node)) {
|
||||
this.subscriberToSlotsIndex.set(node, []);
|
||||
}
|
||||
this.subscriberToSlotsIndex.get(node).push(Number(slot));
|
||||
}
|
||||
//Update the subscribers from the index
|
||||
this._resubscribe();
|
||||
//Update the cached slots map
|
||||
this.clusterSlots = JSON.parse(JSON.stringify(cluster.slots));
|
||||
this.cluster.emit("subscribersReady");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Resubscribes to the previous channels
|
||||
*
|
||||
* @private
|
||||
*/
|
||||
_resubscribe() {
|
||||
if (this.shardedSubscribers) {
|
||||
this.shardedSubscribers.forEach((s, nodeKey) => {
|
||||
const subscriberSlots = this.subscriberToSlotsIndex.get(nodeKey);
|
||||
if (subscriberSlots) {
|
||||
//More for debugging purposes
|
||||
s.associateSlotRange(subscriberSlots);
|
||||
//Resubscribe on the underlying connection
|
||||
subscriberSlots.forEach((ss) => {
|
||||
//Might return null if being disconnected
|
||||
const redis = s.getInstance();
|
||||
const channels = this.channels.get(ss);
|
||||
if (channels && channels.length > 0) {
|
||||
//Try to subscribe now
|
||||
if (redis) {
|
||||
redis.ssubscribe(channels);
|
||||
//If the instance isn't ready yet, then register the re-subscription for later
|
||||
redis.on("ready", () => {
|
||||
redis.ssubscribe(channels);
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Deep equality of the cluster slots objects
|
||||
*
|
||||
* @param other
|
||||
* @private
|
||||
*/
|
||||
_slotsAreEqual(other) {
|
||||
if (this.clusterSlots === undefined)
|
||||
return false;
|
||||
else
|
||||
return JSON.stringify(this.clusterSlots) === JSON.stringify(other);
|
||||
}
|
||||
}
|
||||
exports.default = ClusterSubscriberGroup;
|
||||
37
node_modules/ioredis/built/cluster/ConnectionPool.d.ts
generated
vendored
Normal file
37
node_modules/ioredis/built/cluster/ConnectionPool.d.ts
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
/// <reference types="node" />
|
||||
import { EventEmitter } from "events";
|
||||
import { RedisOptions, NodeKey, NodeRole } from "./util";
|
||||
import Redis from "../Redis";
|
||||
export default class ConnectionPool extends EventEmitter {
|
||||
private redisOptions;
|
||||
private nodes;
|
||||
private specifiedOptions;
|
||||
constructor(redisOptions: any);
|
||||
getNodes(role?: NodeRole): Redis[];
|
||||
getInstanceByKey(key: NodeKey): Redis;
|
||||
getSampleInstance(role: NodeRole): Redis;
|
||||
/**
|
||||
* Add a master node to the pool
|
||||
* @param node
|
||||
*/
|
||||
addMasterNode(node: RedisOptions): boolean;
|
||||
/**
|
||||
* Creates a Redis connection instance from the node options
|
||||
* @param node
|
||||
* @param readOnly
|
||||
*/
|
||||
createRedisFromOptions(node: RedisOptions, readOnly: boolean): Redis;
|
||||
/**
|
||||
* Find or create a connection to the node
|
||||
*/
|
||||
findOrCreate(node: RedisOptions, readOnly?: boolean): Redis;
|
||||
/**
|
||||
* Reset the pool with a set of nodes.
|
||||
* The old node will be removed.
|
||||
*/
|
||||
reset(nodes: RedisOptions[]): void;
|
||||
/**
|
||||
* Remove a node from the pool.
|
||||
*/
|
||||
private removeNode;
|
||||
}
|
||||
154
node_modules/ioredis/built/cluster/ConnectionPool.js
generated
vendored
Normal file
154
node_modules/ioredis/built/cluster/ConnectionPool.js
generated
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const events_1 = require("events");
|
||||
const utils_1 = require("../utils");
|
||||
const util_1 = require("./util");
|
||||
const Redis_1 = require("../Redis");
|
||||
const debug = (0, utils_1.Debug)("cluster:connectionPool");
|
||||
class ConnectionPool extends events_1.EventEmitter {
|
||||
constructor(redisOptions) {
|
||||
super();
|
||||
this.redisOptions = redisOptions;
|
||||
// master + slave = all
|
||||
this.nodes = {
|
||||
all: {},
|
||||
master: {},
|
||||
slave: {},
|
||||
};
|
||||
this.specifiedOptions = {};
|
||||
}
|
||||
getNodes(role = "all") {
|
||||
const nodes = this.nodes[role];
|
||||
return Object.keys(nodes).map((key) => nodes[key]);
|
||||
}
|
||||
getInstanceByKey(key) {
|
||||
return this.nodes.all[key];
|
||||
}
|
||||
getSampleInstance(role) {
|
||||
const keys = Object.keys(this.nodes[role]);
|
||||
const sampleKey = (0, utils_1.sample)(keys);
|
||||
return this.nodes[role][sampleKey];
|
||||
}
|
||||
/**
|
||||
* Add a master node to the pool
|
||||
* @param node
|
||||
*/
|
||||
addMasterNode(node) {
|
||||
const key = (0, util_1.getNodeKey)(node.options);
|
||||
const redis = this.createRedisFromOptions(node, node.options.readOnly);
|
||||
//Master nodes aren't read-only
|
||||
if (!node.options.readOnly) {
|
||||
this.nodes.all[key] = redis;
|
||||
this.nodes.master[key] = redis;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Creates a Redis connection instance from the node options
|
||||
* @param node
|
||||
* @param readOnly
|
||||
*/
|
||||
createRedisFromOptions(node, readOnly) {
|
||||
const redis = new Redis_1.default((0, utils_1.defaults)({
|
||||
// Never try to reconnect when a node is lose,
|
||||
// instead, waiting for a `MOVED` error and
|
||||
// fetch the slots again.
|
||||
retryStrategy: null,
|
||||
// Offline queue should be enabled so that
|
||||
// we don't need to wait for the `ready` event
|
||||
// before sending commands to the node.
|
||||
enableOfflineQueue: true,
|
||||
readOnly: readOnly,
|
||||
}, node, this.redisOptions, { lazyConnect: true }));
|
||||
return redis;
|
||||
}
|
||||
/**
|
||||
* Find or create a connection to the node
|
||||
*/
|
||||
findOrCreate(node, readOnly = false) {
|
||||
const key = (0, util_1.getNodeKey)(node);
|
||||
readOnly = Boolean(readOnly);
|
||||
if (this.specifiedOptions[key]) {
|
||||
Object.assign(node, this.specifiedOptions[key]);
|
||||
}
|
||||
else {
|
||||
this.specifiedOptions[key] = node;
|
||||
}
|
||||
let redis;
|
||||
if (this.nodes.all[key]) {
|
||||
redis = this.nodes.all[key];
|
||||
if (redis.options.readOnly !== readOnly) {
|
||||
redis.options.readOnly = readOnly;
|
||||
debug("Change role of %s to %s", key, readOnly ? "slave" : "master");
|
||||
redis[readOnly ? "readonly" : "readwrite"]().catch(utils_1.noop);
|
||||
if (readOnly) {
|
||||
delete this.nodes.master[key];
|
||||
this.nodes.slave[key] = redis;
|
||||
}
|
||||
else {
|
||||
delete this.nodes.slave[key];
|
||||
this.nodes.master[key] = redis;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
debug("Connecting to %s as %s", key, readOnly ? "slave" : "master");
|
||||
redis = this.createRedisFromOptions(node, readOnly);
|
||||
this.nodes.all[key] = redis;
|
||||
this.nodes[readOnly ? "slave" : "master"][key] = redis;
|
||||
redis.once("end", () => {
|
||||
this.removeNode(key);
|
||||
this.emit("-node", redis, key);
|
||||
if (!Object.keys(this.nodes.all).length) {
|
||||
this.emit("drain");
|
||||
}
|
||||
});
|
||||
this.emit("+node", redis, key);
|
||||
redis.on("error", function (error) {
|
||||
this.emit("nodeError", error, key);
|
||||
});
|
||||
}
|
||||
return redis;
|
||||
}
|
||||
/**
|
||||
* Reset the pool with a set of nodes.
|
||||
* The old node will be removed.
|
||||
*/
|
||||
reset(nodes) {
|
||||
debug("Reset with %O", nodes);
|
||||
const newNodes = {};
|
||||
nodes.forEach((node) => {
|
||||
const key = (0, util_1.getNodeKey)(node);
|
||||
// Don't override the existing (master) node
|
||||
// when the current one is slave.
|
||||
if (!(node.readOnly && newNodes[key])) {
|
||||
newNodes[key] = node;
|
||||
}
|
||||
});
|
||||
Object.keys(this.nodes.all).forEach((key) => {
|
||||
if (!newNodes[key]) {
|
||||
debug("Disconnect %s because the node does not hold any slot", key);
|
||||
this.nodes.all[key].disconnect();
|
||||
this.removeNode(key);
|
||||
}
|
||||
});
|
||||
Object.keys(newNodes).forEach((key) => {
|
||||
const node = newNodes[key];
|
||||
this.findOrCreate(node, node.readOnly);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Remove a node from the pool.
|
||||
*/
|
||||
removeNode(key) {
|
||||
const { nodes } = this;
|
||||
if (nodes.all[key]) {
|
||||
debug("Remove %s from the pool", key);
|
||||
delete nodes.all[key];
|
||||
}
|
||||
delete nodes.master[key];
|
||||
delete nodes.slave[key];
|
||||
}
|
||||
}
|
||||
exports.default = ConnectionPool;
|
||||
20
node_modules/ioredis/built/cluster/DelayQueue.d.ts
generated
vendored
Normal file
20
node_modules/ioredis/built/cluster/DelayQueue.d.ts
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
export interface DelayQueueOptions {
|
||||
callback?: Function;
|
||||
timeout: number;
|
||||
}
|
||||
/**
|
||||
* Queue that runs items after specified duration
|
||||
*/
|
||||
export default class DelayQueue {
|
||||
private queues;
|
||||
private timeouts;
|
||||
/**
|
||||
* Add a new item to the queue
|
||||
*
|
||||
* @param bucket bucket name
|
||||
* @param item function that will run later
|
||||
* @param options
|
||||
*/
|
||||
push(bucket: string, item: Function, options: DelayQueueOptions): void;
|
||||
private execute;
|
||||
}
|
||||
53
node_modules/ioredis/built/cluster/DelayQueue.js
generated
vendored
Normal file
53
node_modules/ioredis/built/cluster/DelayQueue.js
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const utils_1 = require("../utils");
|
||||
const Deque = require("denque");
|
||||
const debug = (0, utils_1.Debug)("delayqueue");
|
||||
/**
|
||||
* Queue that runs items after specified duration
|
||||
*/
|
||||
class DelayQueue {
|
||||
constructor() {
|
||||
this.queues = {};
|
||||
this.timeouts = {};
|
||||
}
|
||||
/**
|
||||
* Add a new item to the queue
|
||||
*
|
||||
* @param bucket bucket name
|
||||
* @param item function that will run later
|
||||
* @param options
|
||||
*/
|
||||
push(bucket, item, options) {
|
||||
const callback = options.callback || process.nextTick;
|
||||
if (!this.queues[bucket]) {
|
||||
this.queues[bucket] = new Deque();
|
||||
}
|
||||
const queue = this.queues[bucket];
|
||||
queue.push(item);
|
||||
if (!this.timeouts[bucket]) {
|
||||
this.timeouts[bucket] = setTimeout(() => {
|
||||
callback(() => {
|
||||
this.timeouts[bucket] = null;
|
||||
this.execute(bucket);
|
||||
});
|
||||
}, options.timeout);
|
||||
}
|
||||
}
|
||||
execute(bucket) {
|
||||
const queue = this.queues[bucket];
|
||||
if (!queue) {
|
||||
return;
|
||||
}
|
||||
const { length } = queue;
|
||||
if (!length) {
|
||||
return;
|
||||
}
|
||||
debug("send %d commands in %s queue", length, bucket);
|
||||
this.queues[bucket] = null;
|
||||
while (queue.length > 0) {
|
||||
queue.shift()();
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.default = DelayQueue;
|
||||
161
node_modules/ioredis/built/cluster/index.d.ts
generated
vendored
Normal file
161
node_modules/ioredis/built/cluster/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
/// <reference types="node" />
|
||||
import { EventEmitter } from "events";
|
||||
import Command from "../Command";
|
||||
import Redis from "../Redis";
|
||||
import ScanStream from "../ScanStream";
|
||||
import { Transaction } from "../transaction";
|
||||
import { Callback, ScanStreamOptions, WriteableStream } from "../types";
|
||||
import Commander from "../utils/Commander";
|
||||
import { ClusterOptions } from "./ClusterOptions";
|
||||
import { NodeKey, NodeRole } from "./util";
|
||||
export declare type ClusterNode = string | number | {
|
||||
host?: string | undefined;
|
||||
port?: number | undefined;
|
||||
};
|
||||
declare type ClusterStatus = "end" | "close" | "wait" | "connecting" | "connect" | "ready" | "reconnecting" | "disconnecting";
|
||||
/**
|
||||
* Client for the official Redis Cluster
|
||||
*/
|
||||
declare class Cluster extends Commander {
|
||||
options: ClusterOptions;
|
||||
slots: NodeKey[][];
|
||||
status: ClusterStatus;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
_groupsIds: {
|
||||
[key: string]: number;
|
||||
};
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
_groupsBySlot: number[];
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
isCluster: boolean;
|
||||
private startupNodes;
|
||||
private connectionPool;
|
||||
private manuallyClosing;
|
||||
private retryAttempts;
|
||||
private delayQueue;
|
||||
private offlineQueue;
|
||||
private subscriber;
|
||||
private shardedSubscribers;
|
||||
private slotsTimer;
|
||||
private reconnectTimeout;
|
||||
private isRefreshing;
|
||||
private _refreshSlotsCacheCallbacks;
|
||||
private _autoPipelines;
|
||||
private _runningAutoPipelines;
|
||||
private _readyDelayedCallbacks;
|
||||
/**
|
||||
* Every time Cluster#connect() is called, this value will be
|
||||
* auto-incrementing. The purpose of this value is used for
|
||||
* discarding previous connect attampts when creating a new
|
||||
* connection.
|
||||
*/
|
||||
private connectionEpoch;
|
||||
/**
|
||||
* Creates an instance of Cluster.
|
||||
*/
|
||||
constructor(startupNodes: ClusterNode[], options?: ClusterOptions);
|
||||
/**
|
||||
* Connect to a cluster
|
||||
*/
|
||||
connect(): Promise<void>;
|
||||
/**
|
||||
* Disconnect from every node in the cluster.
|
||||
*/
|
||||
disconnect(reconnect?: boolean): void;
|
||||
/**
|
||||
* Quit the cluster gracefully.
|
||||
*/
|
||||
quit(callback?: Callback<"OK">): Promise<"OK">;
|
||||
/**
|
||||
* Create a new instance with the same startup nodes and options as the current one.
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* var cluster = new Redis.Cluster([{ host: "127.0.0.1", port: "30001" }]);
|
||||
* var anotherCluster = cluster.duplicate();
|
||||
* ```
|
||||
*/
|
||||
duplicate(overrideStartupNodes?: any[], overrideOptions?: {}): Cluster;
|
||||
/**
|
||||
* Get nodes with the specified role
|
||||
*/
|
||||
nodes(role?: NodeRole): Redis[];
|
||||
/**
|
||||
* This is needed in order not to install a listener for each auto pipeline
|
||||
*
|
||||
* @ignore
|
||||
*/
|
||||
delayUntilReady(callback: Callback): void;
|
||||
/**
|
||||
* Get the number of commands queued in automatic pipelines.
|
||||
*
|
||||
* This is not available (and returns 0) until the cluster is connected and slots information have been received.
|
||||
*/
|
||||
get autoPipelineQueueSize(): number;
|
||||
/**
|
||||
* Refresh the slot cache
|
||||
*
|
||||
* @ignore
|
||||
*/
|
||||
refreshSlotsCache(callback?: Callback<void>): void;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
sendCommand(command: Command, stream?: WriteableStream, node?: any): unknown;
|
||||
sscanStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
sscanBufferStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
hscanStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
hscanBufferStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
zscanStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
zscanBufferStream(key: string, options?: ScanStreamOptions): ScanStream;
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
handleError(error: Error, ttl: {
|
||||
value?: any;
|
||||
}, handlers: any): void;
|
||||
private resetOfflineQueue;
|
||||
private clearNodesRefreshInterval;
|
||||
private resetNodesRefreshInterval;
|
||||
/**
|
||||
* Change cluster instance's status
|
||||
*/
|
||||
private setStatus;
|
||||
/**
|
||||
* Called when closed to check whether a reconnection should be made
|
||||
*/
|
||||
private handleCloseEvent;
|
||||
/**
|
||||
* Flush offline queue with error.
|
||||
*/
|
||||
private flushQueue;
|
||||
private executeOfflineCommands;
|
||||
private natMapper;
|
||||
private getInfoFromNode;
|
||||
private invokeReadyDelayedCallbacks;
|
||||
/**
|
||||
* Check whether Cluster is able to process commands
|
||||
*/
|
||||
private readyCheck;
|
||||
private resolveSrv;
|
||||
private dnsLookup;
|
||||
/**
|
||||
* Normalize startup nodes, and resolving hostnames to IPs.
|
||||
*
|
||||
* This process happens every time when #connect() is called since
|
||||
* #startupNodes and DNS records may chanage.
|
||||
*/
|
||||
private resolveStartupNodeHostnames;
|
||||
private createScanStream;
|
||||
}
|
||||
interface Cluster extends EventEmitter {
|
||||
}
|
||||
interface Cluster extends Transaction {
|
||||
}
|
||||
export default Cluster;
|
||||
863
node_modules/ioredis/built/cluster/index.js
generated
vendored
Normal file
863
node_modules/ioredis/built/cluster/index.js
generated
vendored
Normal file
@@ -0,0 +1,863 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const commands_1 = require("@ioredis/commands");
|
||||
const events_1 = require("events");
|
||||
const redis_errors_1 = require("redis-errors");
|
||||
const standard_as_callback_1 = require("standard-as-callback");
|
||||
const Command_1 = require("../Command");
|
||||
const ClusterAllFailedError_1 = require("../errors/ClusterAllFailedError");
|
||||
const Redis_1 = require("../Redis");
|
||||
const ScanStream_1 = require("../ScanStream");
|
||||
const transaction_1 = require("../transaction");
|
||||
const utils_1 = require("../utils");
|
||||
const applyMixin_1 = require("../utils/applyMixin");
|
||||
const Commander_1 = require("../utils/Commander");
|
||||
const ClusterOptions_1 = require("./ClusterOptions");
|
||||
const ClusterSubscriber_1 = require("./ClusterSubscriber");
|
||||
const ConnectionPool_1 = require("./ConnectionPool");
|
||||
const DelayQueue_1 = require("./DelayQueue");
|
||||
const util_1 = require("./util");
|
||||
const Deque = require("denque");
|
||||
const ClusterSubscriberGroup_1 = require("./ClusterSubscriberGroup");
|
||||
const debug = (0, utils_1.Debug)("cluster");
|
||||
const REJECT_OVERWRITTEN_COMMANDS = new WeakSet();
|
||||
/**
|
||||
* Client for the official Redis Cluster
|
||||
*/
|
||||
class Cluster extends Commander_1.default {
|
||||
/**
|
||||
* Creates an instance of Cluster.
|
||||
*/
|
||||
//TODO: Add an option that enables or disables sharded PubSub
|
||||
constructor(startupNodes, options = {}) {
|
||||
super();
|
||||
this.slots = [];
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
this._groupsIds = {};
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
this._groupsBySlot = Array(16384);
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
this.isCluster = true;
|
||||
this.retryAttempts = 0;
|
||||
this.delayQueue = new DelayQueue_1.default();
|
||||
this.offlineQueue = new Deque();
|
||||
this.isRefreshing = false;
|
||||
this._refreshSlotsCacheCallbacks = [];
|
||||
this._autoPipelines = new Map();
|
||||
this._runningAutoPipelines = new Set();
|
||||
this._readyDelayedCallbacks = [];
|
||||
/**
|
||||
* Every time Cluster#connect() is called, this value will be
|
||||
* auto-incrementing. The purpose of this value is used for
|
||||
* discarding previous connect attampts when creating a new
|
||||
* connection.
|
||||
*/
|
||||
this.connectionEpoch = 0;
|
||||
events_1.EventEmitter.call(this);
|
||||
this.startupNodes = startupNodes;
|
||||
this.options = (0, utils_1.defaults)({}, options, ClusterOptions_1.DEFAULT_CLUSTER_OPTIONS, this.options);
|
||||
if (this.options.shardedSubscribers == true)
|
||||
this.shardedSubscribers = new ClusterSubscriberGroup_1.default(this, this.refreshSlotsCache.bind(this));
|
||||
if (this.options.redisOptions &&
|
||||
this.options.redisOptions.keyPrefix &&
|
||||
!this.options.keyPrefix) {
|
||||
this.options.keyPrefix = this.options.redisOptions.keyPrefix;
|
||||
}
|
||||
// validate options
|
||||
if (typeof this.options.scaleReads !== "function" &&
|
||||
["all", "master", "slave"].indexOf(this.options.scaleReads) === -1) {
|
||||
throw new Error('Invalid option scaleReads "' +
|
||||
this.options.scaleReads +
|
||||
'". Expected "all", "master", "slave" or a custom function');
|
||||
}
|
||||
this.connectionPool = new ConnectionPool_1.default(this.options.redisOptions);
|
||||
this.connectionPool.on("-node", (redis, key) => {
|
||||
this.emit("-node", redis);
|
||||
});
|
||||
this.connectionPool.on("+node", (redis) => {
|
||||
this.emit("+node", redis);
|
||||
});
|
||||
this.connectionPool.on("drain", () => {
|
||||
this.setStatus("close");
|
||||
});
|
||||
this.connectionPool.on("nodeError", (error, key) => {
|
||||
this.emit("node error", error, key);
|
||||
});
|
||||
this.subscriber = new ClusterSubscriber_1.default(this.connectionPool, this);
|
||||
if (this.options.scripts) {
|
||||
Object.entries(this.options.scripts).forEach(([name, definition]) => {
|
||||
this.defineCommand(name, definition);
|
||||
});
|
||||
}
|
||||
if (this.options.lazyConnect) {
|
||||
this.setStatus("wait");
|
||||
}
|
||||
else {
|
||||
this.connect().catch((err) => {
|
||||
debug("connecting failed: %s", err);
|
||||
});
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Connect to a cluster
|
||||
*/
|
||||
connect() {
|
||||
return new Promise((resolve, reject) => {
|
||||
if (this.status === "connecting" ||
|
||||
this.status === "connect" ||
|
||||
this.status === "ready") {
|
||||
reject(new Error("Redis is already connecting/connected"));
|
||||
return;
|
||||
}
|
||||
const epoch = ++this.connectionEpoch;
|
||||
this.setStatus("connecting");
|
||||
this.resolveStartupNodeHostnames()
|
||||
.then((nodes) => {
|
||||
if (this.connectionEpoch !== epoch) {
|
||||
debug("discard connecting after resolving startup nodes because epoch not match: %d != %d", epoch, this.connectionEpoch);
|
||||
reject(new redis_errors_1.RedisError("Connection is discarded because a new connection is made"));
|
||||
return;
|
||||
}
|
||||
if (this.status !== "connecting") {
|
||||
debug("discard connecting after resolving startup nodes because the status changed to %s", this.status);
|
||||
reject(new redis_errors_1.RedisError("Connection is aborted"));
|
||||
return;
|
||||
}
|
||||
this.connectionPool.reset(nodes);
|
||||
const readyHandler = () => {
|
||||
this.setStatus("ready");
|
||||
this.retryAttempts = 0;
|
||||
this.executeOfflineCommands();
|
||||
this.resetNodesRefreshInterval();
|
||||
resolve();
|
||||
};
|
||||
let closeListener = undefined;
|
||||
const refreshListener = () => {
|
||||
this.invokeReadyDelayedCallbacks(undefined);
|
||||
this.removeListener("close", closeListener);
|
||||
this.manuallyClosing = false;
|
||||
this.setStatus("connect");
|
||||
if (this.options.enableReadyCheck) {
|
||||
this.readyCheck((err, fail) => {
|
||||
if (err || fail) {
|
||||
debug("Ready check failed (%s). Reconnecting...", err || fail);
|
||||
if (this.status === "connect") {
|
||||
this.disconnect(true);
|
||||
}
|
||||
}
|
||||
else {
|
||||
readyHandler();
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
readyHandler();
|
||||
}
|
||||
};
|
||||
closeListener = () => {
|
||||
const error = new Error("None of startup nodes is available");
|
||||
this.removeListener("refresh", refreshListener);
|
||||
this.invokeReadyDelayedCallbacks(error);
|
||||
reject(error);
|
||||
};
|
||||
this.once("refresh", refreshListener);
|
||||
this.once("close", closeListener);
|
||||
this.once("close", this.handleCloseEvent.bind(this));
|
||||
this.refreshSlotsCache((err) => {
|
||||
if (err && err.message === ClusterAllFailedError_1.default.defaultMessage) {
|
||||
Redis_1.default.prototype.silentEmit.call(this, "error", err);
|
||||
this.connectionPool.reset([]);
|
||||
}
|
||||
});
|
||||
this.subscriber.start();
|
||||
if (this.options.shardedSubscribers) {
|
||||
this.shardedSubscribers.start();
|
||||
}
|
||||
})
|
||||
.catch((err) => {
|
||||
this.setStatus("close");
|
||||
this.handleCloseEvent(err);
|
||||
this.invokeReadyDelayedCallbacks(err);
|
||||
reject(err);
|
||||
});
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Disconnect from every node in the cluster.
|
||||
*/
|
||||
disconnect(reconnect = false) {
|
||||
const status = this.status;
|
||||
this.setStatus("disconnecting");
|
||||
if (!reconnect) {
|
||||
this.manuallyClosing = true;
|
||||
}
|
||||
if (this.reconnectTimeout && !reconnect) {
|
||||
clearTimeout(this.reconnectTimeout);
|
||||
this.reconnectTimeout = null;
|
||||
debug("Canceled reconnecting attempts");
|
||||
}
|
||||
this.clearNodesRefreshInterval();
|
||||
this.subscriber.stop();
|
||||
if (this.options.shardedSubscribers) {
|
||||
this.shardedSubscribers.stop();
|
||||
}
|
||||
if (status === "wait") {
|
||||
this.setStatus("close");
|
||||
this.handleCloseEvent();
|
||||
}
|
||||
else {
|
||||
this.connectionPool.reset([]);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Quit the cluster gracefully.
|
||||
*/
|
||||
quit(callback) {
|
||||
const status = this.status;
|
||||
this.setStatus("disconnecting");
|
||||
this.manuallyClosing = true;
|
||||
if (this.reconnectTimeout) {
|
||||
clearTimeout(this.reconnectTimeout);
|
||||
this.reconnectTimeout = null;
|
||||
}
|
||||
this.clearNodesRefreshInterval();
|
||||
this.subscriber.stop();
|
||||
if (this.options.shardedSubscribers) {
|
||||
this.shardedSubscribers.stop();
|
||||
}
|
||||
if (status === "wait") {
|
||||
const ret = (0, standard_as_callback_1.default)(Promise.resolve("OK"), callback);
|
||||
// use setImmediate to make sure "close" event
|
||||
// being emitted after quit() is returned
|
||||
setImmediate(function () {
|
||||
this.setStatus("close");
|
||||
this.handleCloseEvent();
|
||||
}.bind(this));
|
||||
return ret;
|
||||
}
|
||||
return (0, standard_as_callback_1.default)(Promise.all(this.nodes().map((node) => node.quit().catch((err) => {
|
||||
// Ignore the error caused by disconnecting since
|
||||
// we're disconnecting...
|
||||
if (err.message === utils_1.CONNECTION_CLOSED_ERROR_MSG) {
|
||||
return "OK";
|
||||
}
|
||||
throw err;
|
||||
}))).then(() => "OK"), callback);
|
||||
}
|
||||
/**
|
||||
* Create a new instance with the same startup nodes and options as the current one.
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* var cluster = new Redis.Cluster([{ host: "127.0.0.1", port: "30001" }]);
|
||||
* var anotherCluster = cluster.duplicate();
|
||||
* ```
|
||||
*/
|
||||
duplicate(overrideStartupNodes = [], overrideOptions = {}) {
|
||||
const startupNodes = overrideStartupNodes.length > 0
|
||||
? overrideStartupNodes
|
||||
: this.startupNodes.slice(0);
|
||||
const options = Object.assign({}, this.options, overrideOptions);
|
||||
return new Cluster(startupNodes, options);
|
||||
}
|
||||
/**
|
||||
* Get nodes with the specified role
|
||||
*/
|
||||
nodes(role = "all") {
|
||||
if (role !== "all" && role !== "master" && role !== "slave") {
|
||||
throw new Error('Invalid role "' + role + '". Expected "all", "master" or "slave"');
|
||||
}
|
||||
return this.connectionPool.getNodes(role);
|
||||
}
|
||||
/**
|
||||
* This is needed in order not to install a listener for each auto pipeline
|
||||
*
|
||||
* @ignore
|
||||
*/
|
||||
delayUntilReady(callback) {
|
||||
this._readyDelayedCallbacks.push(callback);
|
||||
}
|
||||
/**
|
||||
* Get the number of commands queued in automatic pipelines.
|
||||
*
|
||||
* This is not available (and returns 0) until the cluster is connected and slots information have been received.
|
||||
*/
|
||||
get autoPipelineQueueSize() {
|
||||
let queued = 0;
|
||||
for (const pipeline of this._autoPipelines.values()) {
|
||||
queued += pipeline.length;
|
||||
}
|
||||
return queued;
|
||||
}
|
||||
/**
|
||||
* Refresh the slot cache
|
||||
*
|
||||
* @ignore
|
||||
*/
|
||||
refreshSlotsCache(callback) {
|
||||
if (callback) {
|
||||
this._refreshSlotsCacheCallbacks.push(callback);
|
||||
}
|
||||
if (this.isRefreshing) {
|
||||
return;
|
||||
}
|
||||
this.isRefreshing = true;
|
||||
const _this = this;
|
||||
const wrapper = (error) => {
|
||||
this.isRefreshing = false;
|
||||
for (const callback of this._refreshSlotsCacheCallbacks) {
|
||||
callback(error);
|
||||
}
|
||||
this._refreshSlotsCacheCallbacks = [];
|
||||
};
|
||||
const nodes = (0, utils_1.shuffle)(this.connectionPool.getNodes());
|
||||
let lastNodeError = null;
|
||||
function tryNode(index) {
|
||||
if (index === nodes.length) {
|
||||
const error = new ClusterAllFailedError_1.default(ClusterAllFailedError_1.default.defaultMessage, lastNodeError);
|
||||
return wrapper(error);
|
||||
}
|
||||
const node = nodes[index];
|
||||
const key = `${node.options.host}:${node.options.port}`;
|
||||
debug("getting slot cache from %s", key);
|
||||
_this.getInfoFromNode(node, function (err) {
|
||||
switch (_this.status) {
|
||||
case "close":
|
||||
case "end":
|
||||
return wrapper(new Error("Cluster is disconnected."));
|
||||
case "disconnecting":
|
||||
return wrapper(new Error("Cluster is disconnecting."));
|
||||
}
|
||||
if (err) {
|
||||
_this.emit("node error", err, key);
|
||||
lastNodeError = err;
|
||||
tryNode(index + 1);
|
||||
}
|
||||
else {
|
||||
_this.emit("refresh");
|
||||
wrapper();
|
||||
}
|
||||
});
|
||||
}
|
||||
tryNode(0);
|
||||
}
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
sendCommand(command, stream, node) {
|
||||
if (this.status === "wait") {
|
||||
this.connect().catch(utils_1.noop);
|
||||
}
|
||||
if (this.status === "end") {
|
||||
command.reject(new Error(utils_1.CONNECTION_CLOSED_ERROR_MSG));
|
||||
return command.promise;
|
||||
}
|
||||
let to = this.options.scaleReads;
|
||||
if (to !== "master") {
|
||||
const isCommandReadOnly = command.isReadOnly ||
|
||||
((0, commands_1.exists)(command.name) && (0, commands_1.hasFlag)(command.name, "readonly"));
|
||||
if (!isCommandReadOnly) {
|
||||
to = "master";
|
||||
}
|
||||
}
|
||||
let targetSlot = node ? node.slot : command.getSlot();
|
||||
const ttl = {};
|
||||
const _this = this;
|
||||
if (!node && !REJECT_OVERWRITTEN_COMMANDS.has(command)) {
|
||||
REJECT_OVERWRITTEN_COMMANDS.add(command);
|
||||
const reject = command.reject;
|
||||
command.reject = function (err) {
|
||||
const partialTry = tryConnection.bind(null, true);
|
||||
_this.handleError(err, ttl, {
|
||||
moved: function (slot, key) {
|
||||
debug("command %s is moved to %s", command.name, key);
|
||||
targetSlot = Number(slot);
|
||||
if (_this.slots[slot]) {
|
||||
_this.slots[slot][0] = key;
|
||||
}
|
||||
else {
|
||||
_this.slots[slot] = [key];
|
||||
}
|
||||
_this._groupsBySlot[slot] =
|
||||
_this._groupsIds[_this.slots[slot].join(";")];
|
||||
_this.connectionPool.findOrCreate(_this.natMapper(key));
|
||||
tryConnection();
|
||||
debug("refreshing slot caches... (triggered by MOVED error)");
|
||||
_this.refreshSlotsCache();
|
||||
},
|
||||
ask: function (slot, key) {
|
||||
debug("command %s is required to ask %s:%s", command.name, key);
|
||||
const mapped = _this.natMapper(key);
|
||||
_this.connectionPool.findOrCreate(mapped);
|
||||
tryConnection(false, `${mapped.host}:${mapped.port}`);
|
||||
},
|
||||
tryagain: partialTry,
|
||||
clusterDown: partialTry,
|
||||
connectionClosed: partialTry,
|
||||
maxRedirections: function (redirectionError) {
|
||||
reject.call(command, redirectionError);
|
||||
},
|
||||
defaults: function () {
|
||||
reject.call(command, err);
|
||||
},
|
||||
});
|
||||
};
|
||||
}
|
||||
tryConnection();
|
||||
function tryConnection(random, asking) {
|
||||
if (_this.status === "end") {
|
||||
command.reject(new redis_errors_1.AbortError("Cluster is ended."));
|
||||
return;
|
||||
}
|
||||
let redis;
|
||||
if (_this.status === "ready" || command.name === "cluster") {
|
||||
if (node && node.redis) {
|
||||
redis = node.redis;
|
||||
}
|
||||
else if (Command_1.default.checkFlag("ENTER_SUBSCRIBER_MODE", command.name) ||
|
||||
Command_1.default.checkFlag("EXIT_SUBSCRIBER_MODE", command.name)) {
|
||||
if (_this.options.shardedSubscribers == true &&
|
||||
(command.name == "ssubscribe" || command.name == "sunsubscribe")) {
|
||||
const sub = _this.shardedSubscribers.getResponsibleSubscriber(targetSlot);
|
||||
let status = -1;
|
||||
if (command.name == "ssubscribe")
|
||||
status = _this.shardedSubscribers.addChannels(command.getKeys());
|
||||
if (command.name == "sunsubscribe")
|
||||
status = _this.shardedSubscribers.removeChannels(command.getKeys());
|
||||
if (status !== -1) {
|
||||
redis = sub.getInstance();
|
||||
}
|
||||
else {
|
||||
command.reject(new redis_errors_1.AbortError("Can't add or remove the given channels. Are they in the same slot?"));
|
||||
}
|
||||
}
|
||||
else {
|
||||
redis = _this.subscriber.getInstance();
|
||||
}
|
||||
if (!redis) {
|
||||
command.reject(new redis_errors_1.AbortError("No subscriber for the cluster"));
|
||||
return;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (!random) {
|
||||
if (typeof targetSlot === "number" && _this.slots[targetSlot]) {
|
||||
const nodeKeys = _this.slots[targetSlot];
|
||||
if (typeof to === "function") {
|
||||
const nodes = nodeKeys.map(function (key) {
|
||||
return _this.connectionPool.getInstanceByKey(key);
|
||||
});
|
||||
redis = to(nodes, command);
|
||||
if (Array.isArray(redis)) {
|
||||
redis = (0, utils_1.sample)(redis);
|
||||
}
|
||||
if (!redis) {
|
||||
redis = nodes[0];
|
||||
}
|
||||
}
|
||||
else {
|
||||
let key;
|
||||
if (to === "all") {
|
||||
key = (0, utils_1.sample)(nodeKeys);
|
||||
}
|
||||
else if (to === "slave" && nodeKeys.length > 1) {
|
||||
key = (0, utils_1.sample)(nodeKeys, 1);
|
||||
}
|
||||
else {
|
||||
key = nodeKeys[0];
|
||||
}
|
||||
redis = _this.connectionPool.getInstanceByKey(key);
|
||||
}
|
||||
}
|
||||
if (asking) {
|
||||
redis = _this.connectionPool.getInstanceByKey(asking);
|
||||
redis.asking();
|
||||
}
|
||||
}
|
||||
if (!redis) {
|
||||
redis =
|
||||
(typeof to === "function"
|
||||
? null
|
||||
: _this.connectionPool.getSampleInstance(to)) ||
|
||||
_this.connectionPool.getSampleInstance("all");
|
||||
}
|
||||
}
|
||||
if (node && !node.redis) {
|
||||
node.redis = redis;
|
||||
}
|
||||
}
|
||||
if (redis) {
|
||||
redis.sendCommand(command, stream);
|
||||
}
|
||||
else if (_this.options.enableOfflineQueue) {
|
||||
_this.offlineQueue.push({
|
||||
command: command,
|
||||
stream: stream,
|
||||
node: node,
|
||||
});
|
||||
}
|
||||
else {
|
||||
command.reject(new Error("Cluster isn't ready and enableOfflineQueue options is false"));
|
||||
}
|
||||
}
|
||||
return command.promise;
|
||||
}
|
||||
sscanStream(key, options) {
|
||||
return this.createScanStream("sscan", { key, options });
|
||||
}
|
||||
sscanBufferStream(key, options) {
|
||||
return this.createScanStream("sscanBuffer", { key, options });
|
||||
}
|
||||
hscanStream(key, options) {
|
||||
return this.createScanStream("hscan", { key, options });
|
||||
}
|
||||
hscanBufferStream(key, options) {
|
||||
return this.createScanStream("hscanBuffer", { key, options });
|
||||
}
|
||||
zscanStream(key, options) {
|
||||
return this.createScanStream("zscan", { key, options });
|
||||
}
|
||||
zscanBufferStream(key, options) {
|
||||
return this.createScanStream("zscanBuffer", { key, options });
|
||||
}
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
handleError(error, ttl, handlers) {
|
||||
if (typeof ttl.value === "undefined") {
|
||||
ttl.value = this.options.maxRedirections;
|
||||
}
|
||||
else {
|
||||
ttl.value -= 1;
|
||||
}
|
||||
if (ttl.value <= 0) {
|
||||
handlers.maxRedirections(new Error("Too many Cluster redirections. Last error: " + error));
|
||||
return;
|
||||
}
|
||||
const errv = error.message.split(" ");
|
||||
if (errv[0] === "MOVED") {
|
||||
const timeout = this.options.retryDelayOnMoved;
|
||||
if (timeout && typeof timeout === "number") {
|
||||
this.delayQueue.push("moved", handlers.moved.bind(null, errv[1], errv[2]), { timeout });
|
||||
}
|
||||
else {
|
||||
handlers.moved(errv[1], errv[2]);
|
||||
}
|
||||
}
|
||||
else if (errv[0] === "ASK") {
|
||||
handlers.ask(errv[1], errv[2]);
|
||||
}
|
||||
else if (errv[0] === "TRYAGAIN") {
|
||||
this.delayQueue.push("tryagain", handlers.tryagain, {
|
||||
timeout: this.options.retryDelayOnTryAgain,
|
||||
});
|
||||
}
|
||||
else if (errv[0] === "CLUSTERDOWN" &&
|
||||
this.options.retryDelayOnClusterDown > 0) {
|
||||
this.delayQueue.push("clusterdown", handlers.connectionClosed, {
|
||||
timeout: this.options.retryDelayOnClusterDown,
|
||||
callback: this.refreshSlotsCache.bind(this),
|
||||
});
|
||||
}
|
||||
else if (error.message === utils_1.CONNECTION_CLOSED_ERROR_MSG &&
|
||||
this.options.retryDelayOnFailover > 0 &&
|
||||
this.status === "ready") {
|
||||
this.delayQueue.push("failover", handlers.connectionClosed, {
|
||||
timeout: this.options.retryDelayOnFailover,
|
||||
callback: this.refreshSlotsCache.bind(this),
|
||||
});
|
||||
}
|
||||
else {
|
||||
handlers.defaults();
|
||||
}
|
||||
}
|
||||
resetOfflineQueue() {
|
||||
this.offlineQueue = new Deque();
|
||||
}
|
||||
clearNodesRefreshInterval() {
|
||||
if (this.slotsTimer) {
|
||||
clearTimeout(this.slotsTimer);
|
||||
this.slotsTimer = null;
|
||||
}
|
||||
}
|
||||
resetNodesRefreshInterval() {
|
||||
if (this.slotsTimer || !this.options.slotsRefreshInterval) {
|
||||
return;
|
||||
}
|
||||
const nextRound = () => {
|
||||
this.slotsTimer = setTimeout(() => {
|
||||
debug('refreshing slot caches... (triggered by "slotsRefreshInterval" option)');
|
||||
this.refreshSlotsCache(() => {
|
||||
nextRound();
|
||||
});
|
||||
}, this.options.slotsRefreshInterval);
|
||||
};
|
||||
nextRound();
|
||||
}
|
||||
/**
|
||||
* Change cluster instance's status
|
||||
*/
|
||||
setStatus(status) {
|
||||
debug("status: %s -> %s", this.status || "[empty]", status);
|
||||
this.status = status;
|
||||
process.nextTick(() => {
|
||||
this.emit(status);
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Called when closed to check whether a reconnection should be made
|
||||
*/
|
||||
handleCloseEvent(reason) {
|
||||
if (reason) {
|
||||
debug("closed because %s", reason);
|
||||
}
|
||||
let retryDelay;
|
||||
if (!this.manuallyClosing &&
|
||||
typeof this.options.clusterRetryStrategy === "function") {
|
||||
retryDelay = this.options.clusterRetryStrategy.call(this, ++this.retryAttempts, reason);
|
||||
}
|
||||
if (typeof retryDelay === "number") {
|
||||
this.setStatus("reconnecting");
|
||||
this.reconnectTimeout = setTimeout(() => {
|
||||
this.reconnectTimeout = null;
|
||||
debug("Cluster is disconnected. Retrying after %dms", retryDelay);
|
||||
this.connect().catch(function (err) {
|
||||
debug("Got error %s when reconnecting. Ignoring...", err);
|
||||
});
|
||||
}, retryDelay);
|
||||
}
|
||||
else {
|
||||
this.setStatus("end");
|
||||
this.flushQueue(new Error("None of startup nodes is available"));
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Flush offline queue with error.
|
||||
*/
|
||||
flushQueue(error) {
|
||||
let item;
|
||||
while ((item = this.offlineQueue.shift())) {
|
||||
item.command.reject(error);
|
||||
}
|
||||
}
|
||||
executeOfflineCommands() {
|
||||
if (this.offlineQueue.length) {
|
||||
debug("send %d commands in offline queue", this.offlineQueue.length);
|
||||
const offlineQueue = this.offlineQueue;
|
||||
this.resetOfflineQueue();
|
||||
let item;
|
||||
while ((item = offlineQueue.shift())) {
|
||||
this.sendCommand(item.command, item.stream, item.node);
|
||||
}
|
||||
}
|
||||
}
|
||||
natMapper(nodeKey) {
|
||||
const key = typeof nodeKey === "string"
|
||||
? nodeKey
|
||||
: `${nodeKey.host}:${nodeKey.port}`;
|
||||
let mapped = null;
|
||||
if (this.options.natMap && typeof this.options.natMap === "function") {
|
||||
mapped = this.options.natMap(key);
|
||||
}
|
||||
else if (this.options.natMap && typeof this.options.natMap === "object") {
|
||||
mapped = this.options.natMap[key];
|
||||
}
|
||||
if (mapped) {
|
||||
debug("NAT mapping %s -> %O", key, mapped);
|
||||
return Object.assign({}, mapped);
|
||||
}
|
||||
return typeof nodeKey === "string"
|
||||
? (0, util_1.nodeKeyToRedisOptions)(nodeKey)
|
||||
: nodeKey;
|
||||
}
|
||||
getInfoFromNode(redis, callback) {
|
||||
if (!redis) {
|
||||
return callback(new Error("Node is disconnected"));
|
||||
}
|
||||
// Use a duplication of the connection to avoid
|
||||
// timeouts when the connection is in the blocking
|
||||
// mode (e.g. waiting for BLPOP).
|
||||
const duplicatedConnection = redis.duplicate({
|
||||
enableOfflineQueue: true,
|
||||
enableReadyCheck: false,
|
||||
retryStrategy: null,
|
||||
connectionName: (0, util_1.getConnectionName)("refresher", this.options.redisOptions && this.options.redisOptions.connectionName),
|
||||
});
|
||||
// Ignore error events since we will handle
|
||||
// exceptions for the CLUSTER SLOTS command.
|
||||
duplicatedConnection.on("error", utils_1.noop);
|
||||
duplicatedConnection.cluster("SLOTS", (0, utils_1.timeout)((err, result) => {
|
||||
duplicatedConnection.disconnect();
|
||||
if (err) {
|
||||
debug("error encountered running CLUSTER.SLOTS: %s", err);
|
||||
return callback(err);
|
||||
}
|
||||
if (this.status === "disconnecting" ||
|
||||
this.status === "close" ||
|
||||
this.status === "end") {
|
||||
debug("ignore CLUSTER.SLOTS results (count: %d) since cluster status is %s", result.length, this.status);
|
||||
callback();
|
||||
return;
|
||||
}
|
||||
const nodes = [];
|
||||
debug("cluster slots result count: %d", result.length);
|
||||
for (let i = 0; i < result.length; ++i) {
|
||||
const items = result[i];
|
||||
const slotRangeStart = items[0];
|
||||
const slotRangeEnd = items[1];
|
||||
const keys = [];
|
||||
for (let j = 2; j < items.length; j++) {
|
||||
if (!items[j][0]) {
|
||||
continue;
|
||||
}
|
||||
const node = this.natMapper({
|
||||
host: items[j][0],
|
||||
port: items[j][1],
|
||||
});
|
||||
node.readOnly = j !== 2;
|
||||
nodes.push(node);
|
||||
keys.push(node.host + ":" + node.port);
|
||||
}
|
||||
debug("cluster slots result [%d]: slots %d~%d served by %s", i, slotRangeStart, slotRangeEnd, keys);
|
||||
for (let slot = slotRangeStart; slot <= slotRangeEnd; slot++) {
|
||||
this.slots[slot] = keys;
|
||||
}
|
||||
}
|
||||
// Assign to each node keys a numeric value to make autopipeline comparison faster.
|
||||
this._groupsIds = Object.create(null);
|
||||
let j = 0;
|
||||
for (let i = 0; i < 16384; i++) {
|
||||
const target = (this.slots[i] || []).join(";");
|
||||
if (!target.length) {
|
||||
this._groupsBySlot[i] = undefined;
|
||||
continue;
|
||||
}
|
||||
if (!this._groupsIds[target]) {
|
||||
this._groupsIds[target] = ++j;
|
||||
}
|
||||
this._groupsBySlot[i] = this._groupsIds[target];
|
||||
}
|
||||
this.connectionPool.reset(nodes);
|
||||
callback();
|
||||
}, this.options.slotsRefreshTimeout));
|
||||
}
|
||||
invokeReadyDelayedCallbacks(err) {
|
||||
for (const c of this._readyDelayedCallbacks) {
|
||||
process.nextTick(c, err);
|
||||
}
|
||||
this._readyDelayedCallbacks = [];
|
||||
}
|
||||
/**
|
||||
* Check whether Cluster is able to process commands
|
||||
*/
|
||||
readyCheck(callback) {
|
||||
this.cluster("INFO", (err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
if (typeof res !== "string") {
|
||||
return callback();
|
||||
}
|
||||
let state;
|
||||
const lines = res.split("\r\n");
|
||||
for (let i = 0; i < lines.length; ++i) {
|
||||
const parts = lines[i].split(":");
|
||||
if (parts[0] === "cluster_state") {
|
||||
state = parts[1];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (state === "fail") {
|
||||
debug("cluster state not ok (%s)", state);
|
||||
callback(null, state);
|
||||
}
|
||||
else {
|
||||
callback();
|
||||
}
|
||||
});
|
||||
}
|
||||
resolveSrv(hostname) {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.options.resolveSrv(hostname, (err, records) => {
|
||||
if (err) {
|
||||
return reject(err);
|
||||
}
|
||||
const self = this, groupedRecords = (0, util_1.groupSrvRecords)(records), sortedKeys = Object.keys(groupedRecords).sort((a, b) => parseInt(a) - parseInt(b));
|
||||
function tryFirstOne(err) {
|
||||
if (!sortedKeys.length) {
|
||||
return reject(err);
|
||||
}
|
||||
const key = sortedKeys[0], group = groupedRecords[key], record = (0, util_1.weightSrvRecords)(group);
|
||||
if (!group.records.length) {
|
||||
sortedKeys.shift();
|
||||
}
|
||||
self.dnsLookup(record.name).then((host) => resolve({
|
||||
host,
|
||||
port: record.port,
|
||||
}), tryFirstOne);
|
||||
}
|
||||
tryFirstOne();
|
||||
});
|
||||
});
|
||||
}
|
||||
dnsLookup(hostname) {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.options.dnsLookup(hostname, (err, address) => {
|
||||
if (err) {
|
||||
debug("failed to resolve hostname %s to IP: %s", hostname, err.message);
|
||||
reject(err);
|
||||
}
|
||||
else {
|
||||
debug("resolved hostname %s to IP %s", hostname, address);
|
||||
resolve(address);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Normalize startup nodes, and resolving hostnames to IPs.
|
||||
*
|
||||
* This process happens every time when #connect() is called since
|
||||
* #startupNodes and DNS records may chanage.
|
||||
*/
|
||||
async resolveStartupNodeHostnames() {
|
||||
if (!Array.isArray(this.startupNodes) || this.startupNodes.length === 0) {
|
||||
throw new Error("`startupNodes` should contain at least one node.");
|
||||
}
|
||||
const startupNodes = (0, util_1.normalizeNodeOptions)(this.startupNodes);
|
||||
const hostnames = (0, util_1.getUniqueHostnamesFromOptions)(startupNodes);
|
||||
if (hostnames.length === 0) {
|
||||
return startupNodes;
|
||||
}
|
||||
const configs = await Promise.all(hostnames.map((this.options.useSRVRecords ? this.resolveSrv : this.dnsLookup).bind(this)));
|
||||
const hostnameToConfig = (0, utils_1.zipMap)(hostnames, configs);
|
||||
return startupNodes.map((node) => {
|
||||
const config = hostnameToConfig.get(node.host);
|
||||
if (!config) {
|
||||
return node;
|
||||
}
|
||||
if (this.options.useSRVRecords) {
|
||||
return Object.assign({}, node, config);
|
||||
}
|
||||
return Object.assign({}, node, { host: config });
|
||||
});
|
||||
}
|
||||
createScanStream(command, { key, options = {} }) {
|
||||
return new ScanStream_1.default({
|
||||
objectMode: true,
|
||||
key: key,
|
||||
redis: this,
|
||||
command: command,
|
||||
...options,
|
||||
});
|
||||
}
|
||||
}
|
||||
(0, applyMixin_1.default)(Cluster, events_1.EventEmitter);
|
||||
(0, transaction_1.addTransactionSupport)(Cluster.prototype);
|
||||
exports.default = Cluster;
|
||||
25
node_modules/ioredis/built/cluster/util.d.ts
generated
vendored
Normal file
25
node_modules/ioredis/built/cluster/util.d.ts
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
/// <reference types="node" />
|
||||
import { SrvRecord } from "dns";
|
||||
export declare type NodeKey = string;
|
||||
export declare type NodeRole = "master" | "slave" | "all";
|
||||
export interface RedisOptions {
|
||||
port: number;
|
||||
host: string;
|
||||
username?: string;
|
||||
password?: string;
|
||||
[key: string]: any;
|
||||
}
|
||||
export interface SrvRecordsGroup {
|
||||
totalWeight: number;
|
||||
records: SrvRecord[];
|
||||
}
|
||||
export interface GroupedSrvRecords {
|
||||
[key: number]: SrvRecordsGroup;
|
||||
}
|
||||
export declare function getNodeKey(node: RedisOptions): NodeKey;
|
||||
export declare function nodeKeyToRedisOptions(nodeKey: NodeKey): RedisOptions;
|
||||
export declare function normalizeNodeOptions(nodes: Array<string | number | object>): RedisOptions[];
|
||||
export declare function getUniqueHostnamesFromOptions(nodes: RedisOptions[]): string[];
|
||||
export declare function groupSrvRecords(records: SrvRecord[]): GroupedSrvRecords;
|
||||
export declare function weightSrvRecords(recordsGroup: SrvRecordsGroup): SrvRecord;
|
||||
export declare function getConnectionName(component: any, nodeConnectionName: any): string;
|
||||
100
node_modules/ioredis/built/cluster/util.js
generated
vendored
Normal file
100
node_modules/ioredis/built/cluster/util.js
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getConnectionName = exports.weightSrvRecords = exports.groupSrvRecords = exports.getUniqueHostnamesFromOptions = exports.normalizeNodeOptions = exports.nodeKeyToRedisOptions = exports.getNodeKey = void 0;
|
||||
const utils_1 = require("../utils");
|
||||
const net_1 = require("net");
|
||||
function getNodeKey(node) {
|
||||
node.port = node.port || 6379;
|
||||
node.host = node.host || "127.0.0.1";
|
||||
return node.host + ":" + node.port;
|
||||
}
|
||||
exports.getNodeKey = getNodeKey;
|
||||
function nodeKeyToRedisOptions(nodeKey) {
|
||||
const portIndex = nodeKey.lastIndexOf(":");
|
||||
if (portIndex === -1) {
|
||||
throw new Error(`Invalid node key ${nodeKey}`);
|
||||
}
|
||||
return {
|
||||
host: nodeKey.slice(0, portIndex),
|
||||
port: Number(nodeKey.slice(portIndex + 1)),
|
||||
};
|
||||
}
|
||||
exports.nodeKeyToRedisOptions = nodeKeyToRedisOptions;
|
||||
function normalizeNodeOptions(nodes) {
|
||||
return nodes.map((node) => {
|
||||
const options = {};
|
||||
if (typeof node === "object") {
|
||||
Object.assign(options, node);
|
||||
}
|
||||
else if (typeof node === "string") {
|
||||
Object.assign(options, (0, utils_1.parseURL)(node));
|
||||
}
|
||||
else if (typeof node === "number") {
|
||||
options.port = node;
|
||||
}
|
||||
else {
|
||||
throw new Error("Invalid argument " + node);
|
||||
}
|
||||
if (typeof options.port === "string") {
|
||||
options.port = parseInt(options.port, 10);
|
||||
}
|
||||
// Cluster mode only support db 0
|
||||
delete options.db;
|
||||
if (!options.port) {
|
||||
options.port = 6379;
|
||||
}
|
||||
if (!options.host) {
|
||||
options.host = "127.0.0.1";
|
||||
}
|
||||
return (0, utils_1.resolveTLSProfile)(options);
|
||||
});
|
||||
}
|
||||
exports.normalizeNodeOptions = normalizeNodeOptions;
|
||||
function getUniqueHostnamesFromOptions(nodes) {
|
||||
const uniqueHostsMap = {};
|
||||
nodes.forEach((node) => {
|
||||
uniqueHostsMap[node.host] = true;
|
||||
});
|
||||
return Object.keys(uniqueHostsMap).filter((host) => !(0, net_1.isIP)(host));
|
||||
}
|
||||
exports.getUniqueHostnamesFromOptions = getUniqueHostnamesFromOptions;
|
||||
function groupSrvRecords(records) {
|
||||
const recordsByPriority = {};
|
||||
for (const record of records) {
|
||||
if (!recordsByPriority.hasOwnProperty(record.priority)) {
|
||||
recordsByPriority[record.priority] = {
|
||||
totalWeight: record.weight,
|
||||
records: [record],
|
||||
};
|
||||
}
|
||||
else {
|
||||
recordsByPriority[record.priority].totalWeight += record.weight;
|
||||
recordsByPriority[record.priority].records.push(record);
|
||||
}
|
||||
}
|
||||
return recordsByPriority;
|
||||
}
|
||||
exports.groupSrvRecords = groupSrvRecords;
|
||||
function weightSrvRecords(recordsGroup) {
|
||||
if (recordsGroup.records.length === 1) {
|
||||
recordsGroup.totalWeight = 0;
|
||||
return recordsGroup.records.shift();
|
||||
}
|
||||
// + `recordsGroup.records.length` to support `weight` 0
|
||||
const random = Math.floor(Math.random() * (recordsGroup.totalWeight + recordsGroup.records.length));
|
||||
let total = 0;
|
||||
for (const [i, record] of recordsGroup.records.entries()) {
|
||||
total += 1 + record.weight;
|
||||
if (total > random) {
|
||||
recordsGroup.totalWeight -= record.weight;
|
||||
recordsGroup.records.splice(i, 1);
|
||||
return record;
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.weightSrvRecords = weightSrvRecords;
|
||||
function getConnectionName(component, nodeConnectionName) {
|
||||
const prefix = `ioredis-cluster(${component})`;
|
||||
return nodeConnectionName ? `${prefix}:${nodeConnectionName}` : prefix;
|
||||
}
|
||||
exports.getConnectionName = getConnectionName;
|
||||
Reference in New Issue
Block a user