diff --git a/.env b/.env index 91d193035..5ea7f421d 100644 --- a/.env +++ b/.env @@ -216,3 +216,18 @@ STACKS_EVENTS_DIR=./events # SNP_REDIS_URL=redis://127.0.0.1:6379 # Only specify `SNP_REDIS_STREAM_KEY_PREFIX` if `REDIS_STREAM_KEY_PREFIX` is configured on the SNP server. # SNP_REDIS_STREAM_KEY_PREFIX= + +# If enabled this service will notify Redis whenever the Stacks index advances i.e. whenever a new block is confirmed. +# High Availability Redis is supported via Sentinels, Cluster or a simple Redis connection URL. +# REDIS_NOTIFIER_ENABLED=1 +# REDIS_QUEUE=index-progress +# REDIS_URL=127.0.0.1:6379 +# REDIS_SENTINELS= +# REDIS_SENTINEL_MASTER= +# REDIS_SENTINEL_PASSWORD= +# REDIS_SENTINEL_AUTH_PASSWORD= +# REDIS_CLUSTER_NODES= +# REDIS_CLUSTER_PASSWORD= +# REDIS_CONNECTION_TIMEOUT=10000 +# REDIS_COMMAND_TIMEOUT=5000 +# REDIS_MAX_RETRIES=20 diff --git a/package-lock.json b/package-lock.json index 16db2165e..d147b6d98 100644 --- a/package-lock.json +++ b/package-lock.json @@ -46,6 +46,7 @@ "fastify": "4.29.1", "fastify-metrics": "11.0.0", "getopts": "2.3.0", + "ioredis": "5.6.1", "jsonc-parser": "3.0.0", "jsonrpc-lite": "2.2.0", "lru-cache": "6.0.0", @@ -1624,6 +1625,12 @@ "deprecated": "Use @eslint/object-schema instead", "dev": true }, + "node_modules/@ioredis/commands": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ioredis/commands/-/commands-1.2.0.tgz", + "integrity": "sha512-Sx1pU8EM64o2BrqNpEO1CNLtKQwyhuXuqyfH7oGKCk+1a33d2r5saW8zNwm3j6BTExtjrv2BxTgzzkMwts6vGg==", + "license": "MIT" + }, "node_modules/@isaacs/cliui": { "version": "8.0.2", "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", @@ -7439,6 +7446,15 @@ "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==" }, + "node_modules/denque": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/denque/-/denque-2.1.0.tgz", + "integrity": "sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.10" + } + }, "node_modules/depd": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", @@ -10339,6 +10355,30 @@ "node": ">= 0.4" } }, + "node_modules/ioredis": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/ioredis/-/ioredis-5.6.1.tgz", + "integrity": "sha512-UxC0Yv1Y4WRJiGQxQkP0hfdL0/5/6YvdfOOClRgJ0qppSarkhneSa6UvkMkms0AkdGimSH3Ikqm+6mkMmX7vGA==", + "license": "MIT", + "dependencies": { + "@ioredis/commands": "^1.1.1", + "cluster-key-slot": "^1.1.0", + "debug": "^4.3.4", + "denque": "^2.1.0", + "lodash.defaults": "^4.2.0", + "lodash.isarguments": "^3.1.0", + "redis-errors": "^1.2.0", + "redis-parser": "^3.0.0", + "standard-as-callback": "^2.1.0" + }, + "engines": { + "node": ">=12.22.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/ioredis" + } + }, "node_modules/ip-address": { "version": "9.0.5", "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-9.0.5.tgz", @@ -12760,6 +12800,18 @@ "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", "integrity": "sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==" }, + "node_modules/lodash.defaults": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/lodash.defaults/-/lodash.defaults-4.2.0.tgz", + "integrity": "sha512-qjxPLHd3r5DnsdGacqOMU6pb/avJzdh9tFX2ymgoZE27BmjXrNy/y4LoaiTeAb+O3gL8AfpJGtqfX/ae2leYYQ==", + "license": "MIT" + }, + "node_modules/lodash.isarguments": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz", + "integrity": "sha512-chi4NHZlZqZD18a0imDHnZPrDeBbTtVN7GXMwuGdRH9qotxAjYs3aVLKc7zNOG9eddR5Ksd8rvFEBc9SsggPpg==", + "license": "MIT" + }, "node_modules/lodash.isplainobject": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", @@ -15468,6 +15520,27 @@ "@redis/time-series": "1.1.0" } }, + "node_modules/redis-errors": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/redis-errors/-/redis-errors-1.2.0.tgz", + "integrity": "sha512-1qny3OExCf0UvUV/5wpYKf2YwPcOqXzkwKKSmKHiE6ZMQs5heeE/c8eXK+PNllPvmjgAbfnsbpkGZWy8cBpn9w==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/redis-parser": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-3.0.0.tgz", + "integrity": "sha512-DJnGAeenTdpMEH6uAJRK/uiyEIH9WVsUmoLwzudwGJUwZPp80PDBWPHXSAGNPwNvIXAbe7MSUB1zQFugFml66A==", + "license": "MIT", + "dependencies": { + "redis-errors": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/redoc": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/redoc/-/redoc-2.4.0.tgz", @@ -16645,6 +16718,12 @@ "resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.104.tgz", "integrity": "sha512-OF3keVCbfPlkzxnnDBUZJn1RiCJzKeadjiW0xTEb0G1SUJ5gDVb3qnzZr2T4uIFvsbKJbXy1v2DN7e2zaEY7jQ==" }, + "node_modules/standard-as-callback": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/standard-as-callback/-/standard-as-callback-2.1.0.tgz", + "integrity": "sha512-qoRRSyROncaz1z0mvYqIE4lCd9p2R90i6GxW3uZv5ucSu8tU7B5HXUP1gG8pVZsYNVaXjk8ClXHPttLyxAL48A==", + "license": "MIT" + }, "node_modules/statuses": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", diff --git a/package.json b/package.json index 2a413788a..513a991e8 100644 --- a/package.json +++ b/package.json @@ -126,6 +126,7 @@ "fastify": "4.29.1", "fastify-metrics": "11.0.0", "getopts": "2.3.0", + "ioredis": "5.6.1", "jsonc-parser": "3.0.0", "jsonrpc-lite": "2.2.0", "lru-cache": "6.0.0", diff --git a/src/datastore/common.ts b/src/datastore/common.ts index 1f127e47e..9ddba7656 100644 --- a/src/datastore/common.ts +++ b/src/datastore/common.ts @@ -1111,6 +1111,7 @@ export interface DbPoxCycleSignerStacker { } interface ReOrgEntities { + blockHeaders: { index_block_hash: string; block_height: number }[]; blocks: number; microblockHashes: string[]; microblocks: number; diff --git a/src/datastore/helpers.ts b/src/datastore/helpers.ts index 08d0230c7..20cbeb042 100644 --- a/src/datastore/helpers.ts +++ b/src/datastore/helpers.ts @@ -1315,6 +1315,7 @@ export function markBlockUpdateDataAsNonCanonical(data: DataStoreBlockUpdateData export function newReOrgUpdatedEntities(): ReOrgUpdatedEntities { return { markedCanonical: { + blockHeaders: [], blocks: 0, microblockHashes: [], microblocks: 0, @@ -1336,6 +1337,7 @@ export function newReOrgUpdatedEntities(): ReOrgUpdatedEntities { poxCycles: 0, }, markedNonCanonical: { + blockHeaders: [], blocks: 0, microblockHashes: [], microblocks: 0, diff --git a/src/datastore/pg-write-store.ts b/src/datastore/pg-write-store.ts index 83d1ec201..f95ee8be0 100644 --- a/src/datastore/pg-write-store.ts +++ b/src/datastore/pg-write-store.ts @@ -95,6 +95,7 @@ import { } from '@hirosystems/api-toolkit'; import { PgServer, getConnectionArgs, getConnectionConfig } from './connection'; import { BigNumber } from 'bignumber.js'; +import { RedisNotifier } from './redis-notifier'; const MIGRATIONS_TABLE = 'pgmigrations'; const INSERT_BATCH_SIZE = 500; @@ -130,6 +131,7 @@ type TransactionHeader = { */ export class PgWriteStore extends PgStore { readonly isEventReplay: boolean; + protected readonly redisNotifier: RedisNotifier | undefined = undefined; protected isIbdBlockHeightReached = false; private metrics: | { @@ -141,10 +143,12 @@ export class PgWriteStore extends PgStore { constructor( sql: PgSqlClient, notifier: PgNotifier | undefined = undefined, - isEventReplay: boolean = false + isEventReplay: boolean = false, + redisNotifier: RedisNotifier | undefined = undefined ) { super(sql, notifier); this.isEventReplay = isEventReplay; + this.redisNotifier = redisNotifier; if (isProdEnv) { this.metrics = { blockHeight: new prom.Gauge({ @@ -163,11 +167,13 @@ export class PgWriteStore extends PgStore { usageName, skipMigrations = false, withNotifier = true, + withRedisNotifier = false, isEventReplay = false, }: { usageName: string; skipMigrations?: boolean; withNotifier?: boolean; + withRedisNotifier?: boolean; isEventReplay?: boolean; }): Promise { const sql = await connectPostgres({ @@ -190,7 +196,8 @@ export class PgWriteStore extends PgStore { }); } const notifier = withNotifier ? await PgNotifier.create(usageName) : undefined; - const store = new PgWriteStore(sql, notifier, isEventReplay); + const redisNotifier = withRedisNotifier ? new RedisNotifier() : undefined; + const store = new PgWriteStore(sql, notifier, isEventReplay, redisNotifier); await store.connectPgNotifier(); return store; } @@ -229,11 +236,13 @@ export class PgWriteStore extends PgStore { async update(data: DataStoreBlockUpdateData): Promise { let garbageCollectedMempoolTxs: string[] = []; let newTxData: DataStoreTxEventData[] = []; + let reorg: ReOrgUpdatedEntities = newReOrgUpdatedEntities(); + let isCanonical = true; await this.sqlWriteTransaction(async sql => { const chainTip = await this.getChainTip(sql); - const reorg = await this.handleReorg(sql, data.block, chainTip.block_height); - const isCanonical = data.block.block_height > chainTip.block_height; + reorg = await this.handleReorg(sql, data.block, chainTip.block_height); + isCanonical = data.block.block_height > chainTip.block_height; if (!isCanonical) { markBlockUpdateDataAsNonCanonical(data); } else { @@ -396,6 +405,9 @@ export class PgWriteStore extends PgStore { } } }); + if (isCanonical) { + await this.redisNotifier?.notify(reorg, data.block.index_block_hash, data.block.block_height); + } // Do we have an IBD height defined in ENV? If so, check if this block update reached it. const ibdHeight = getIbdBlockHeight(); this.isIbdBlockHeightReached = ibdHeight ? data.block.block_height > ibdHeight : true; @@ -3548,6 +3560,13 @@ export class PgWriteStore extends PgStore { return result; } + /** + * Recursively restore previously orphaned blocks to canonical. + * @param sql - The SQL client + * @param indexBlockHash - The index block hash that we will restore first + * @param updatedEntities - The updated entities + * @returns The updated entities + */ async restoreOrphanedChain( sql: PgSqlClient, indexBlockHash: string, @@ -3568,6 +3587,10 @@ export class PgWriteStore extends PgStore { throw new Error(`Found multiple non-canonical parents for index_hash ${indexBlockHash}`); } updatedEntities.markedCanonical.blocks++; + updatedEntities.markedCanonical.blockHeaders.unshift({ + index_block_hash: restoredBlockResult[0].index_block_hash, + block_height: restoredBlockResult[0].block_height, + }); // Orphan the now conflicting block at the same height const orphanedBlockResult = await sql` @@ -3606,6 +3629,10 @@ export class PgWriteStore extends PgStore { } updatedEntities.markedNonCanonical.blocks++; + updatedEntities.markedNonCanonical.blockHeaders.unshift({ + index_block_hash: orphanedBlockResult[0].index_block_hash, + block_height: orphanedBlockResult[0].block_height, + }); const markNonCanonicalResult = await this.markEntitiesCanonical( sql, orphanedBlockResult[0].index_block_hash, @@ -3662,6 +3689,8 @@ export class PgWriteStore extends PgStore { markCanonicalResult.txsMarkedCanonical ); updatedEntities.prunedMempoolTxs += prunedMempoolTxs.removedTxs.length; + + // Do we have a parent that is non-canonical? If so, restore it recursively. const parentResult = await sql<{ index_block_hash: string }[]>` SELECT index_block_hash FROM blocks @@ -4019,6 +4048,7 @@ export class PgWriteStore extends PgStore { if (this._debounceMempoolStat.debounce) { clearTimeout(this._debounceMempoolStat.debounce); } + await this.redisNotifier?.close(); await super.close(args); } } diff --git a/src/datastore/redis-notifier.ts b/src/datastore/redis-notifier.ts new file mode 100644 index 000000000..a34f06789 --- /dev/null +++ b/src/datastore/redis-notifier.ts @@ -0,0 +1,121 @@ +import Redis, { Cluster, RedisOptions } from 'ioredis'; +import { ReOrgUpdatedEntities } from './common'; +import { ChainID } from '@stacks/transactions'; +import { getApiConfiguredChainID } from '../helpers'; +import { logger } from '@hirosystems/api-toolkit'; + +/** + * Notifies Chainhooks of the progress of the Stacks index via a message sent to a Redis queue. This + * message will contain a block header for each new canonical block as well as headers for those + * that need to be rolled back from a re-org. + */ +export class RedisNotifier { + private readonly redis: Redis | Cluster; + private readonly chainId: ChainID; + private readonly queue: string; + + constructor() { + this.redis = this.newRedisConnection(); + this.chainId = getApiConfiguredChainID(); + this.queue = process.env.REDIS_QUEUE ?? 'chainhooks:stacks:index-progress'; + logger.info(`RedisNotifier initialized for queue ${this.queue}`); + } + + /** + * Broadcast index progress message to the Redis queue. + * @param reOrg - The re-org updated entities, if any + * @param indexBlockHash - Block hash of the newest canonical block + * @param blockHeight - Block height of the newest canonical block + */ + async notify(reOrg: ReOrgUpdatedEntities, indexBlockHash: string, blockHeight: number) { + const message = { + id: `stacks-${blockHeight}-${indexBlockHash}-${Date.now()}`, + payload: { + chain: 'stacks', + network: this.chainId === ChainID.Mainnet ? 'mainnet' : 'testnet', + apply_blocks: [ + ...reOrg.markedCanonical.blockHeaders.map(block => ({ + hash: block.index_block_hash, + index: block.block_height, + })), + { + hash: indexBlockHash, + index: blockHeight, + }, + ], + rollback_blocks: reOrg.markedNonCanonical.blockHeaders.map(block => ({ + hash: block.index_block_hash, + index: block.block_height, + })), + }, + }; + logger.debug(message, 'RedisNotifier broadcasting index progress message'); + await this.redis.rpush(this.queue, JSON.stringify(message)); + } + + async close() { + await this.redis.quit(); + } + + /** + * Create a new Redis connection based on the environment variables. This will auto-select a + * single connection, cluster or sentinel. + */ + private newRedisConnection(): Redis | Cluster { + const baseOptions: RedisOptions = { + retryStrategy: times => Math.min(times * 50, 2000), + maxRetriesPerRequest: parseInt(process.env.REDIS_MAX_RETRIES ?? '20'), + connectTimeout: parseInt(process.env.REDIS_CONNECTION_TIMEOUT ?? '10000'), + commandTimeout: parseInt(process.env.REDIS_COMMAND_TIMEOUT ?? '5000'), + lazyConnect: true, + }; + + // Single Redis instance with URL + if (process.env.REDIS_URL) { + logger.info(`RedisNotifier connecting to redis at ${process.env.REDIS_URL}`); + return new Redis(process.env.REDIS_URL, baseOptions); + } + + // Redis Cluster configuration + if (process.env.REDIS_CLUSTER_NODES && process.env.REDIS_CLUSTER_NODES.length > 0) { + let isSRVRecord = false; + const clusterNodesArray = process.env.REDIS_CLUSTER_NODES.split(','); + if (clusterNodesArray.length === 1) { + isSRVRecord = true; + } + logger.info( + `RedisNotifier connecting to redis cluster at ${process.env.REDIS_CLUSTER_NODES}` + ); + return new Redis.Cluster(clusterNodesArray, { + ...baseOptions, + redisOptions: { + ...baseOptions, + password: process.env.REDIS_CLUSTER_PASSWORD, + }, + useSRVRecords: isSRVRecord, + clusterRetryStrategy: times => Math.min(times * 50, 2000), + }); + } + + // Redis Sentinel configuration + if (process.env.REDIS_SENTINELS) { + const sentinels = process.env.REDIS_SENTINELS.split(','); + logger.info(`RedisNotifier connecting to redis sentinel at ${process.env.REDIS_SENTINELS}`); + return new Redis({ + ...baseOptions, + sentinels: sentinels.map(sentinel => { + const [host, port] = sentinel.split(':'); + return { host, port: parseInt(port) }; + }), + name: process.env.REDIS_SENTINEL_MASTER, + password: process.env.REDIS_SENTINEL_PASSWORD, + sentinelPassword: process.env.REDIS_SENTINEL_AUTH_PASSWORD, + sentinelRetryStrategy: times => Math.min(times * 50, 2000), + }); + } + + throw new Error( + 'Redis configuration required. Please set REDIS_URL, REDIS_SENTINELS, or REDIS_CLUSTER_NODES' + ); + } +} diff --git a/src/index.ts b/src/index.ts index 0d4636f3b..5cfc5293e 100644 --- a/src/index.ts +++ b/src/index.ts @@ -130,6 +130,7 @@ async function init(): Promise { dbWriteStore = await PgWriteStore.connect({ usageName: `write-datastore-${apiMode}`, skipMigrations: apiMode === StacksApiMode.readOnly, + withRedisNotifier: parseBoolean(process.env['REDIS_NOTIFIER_ENABLED']) ?? false, }); registerMempoolPromStats(dbWriteStore.eventEmitter); } diff --git a/tests/api/datastore.test.ts b/tests/api/datastore.test.ts index ef70fdd58..909a8d495 100644 --- a/tests/api/datastore.test.ts +++ b/tests/api/datastore.test.ts @@ -3772,6 +3772,12 @@ describe('postgres datastore', () => { const reorgResult = await db.handleReorg(client, block5, 0); const expectedReorgResult: ReOrgUpdatedEntities = { markedCanonical: { + blockHeaders: [ + { block_height: 1, index_block_hash: '0xaa' }, + { block_height: 2, index_block_hash: '0xbb' }, + { block_height: 3, index_block_hash: '0xcc' }, + { block_height: 4, index_block_hash: '0xdd' }, + ], blocks: 4, microblocks: 0, microblockHashes: [], @@ -3793,6 +3799,7 @@ describe('postgres datastore', () => { poxSigners: 0, }, markedNonCanonical: { + blockHeaders: [{ block_height: 3, index_block_hash: '0xccbb' }], blocks: 1, microblocks: 0, microblockHashes: [], diff --git a/tests/api/redis-notifier.test.ts b/tests/api/redis-notifier.test.ts new file mode 100644 index 000000000..0236574ba --- /dev/null +++ b/tests/api/redis-notifier.test.ts @@ -0,0 +1,122 @@ +const messages: string[] = []; + +// Mock Redis to capture messages +jest.mock('ioredis', () => { + const redisMock = jest.fn().mockImplementation(() => ({ + rpush: jest.fn((_, message) => { + messages.push(message); + }), + quit: jest.fn().mockResolvedValue(undefined), + })); + const mock = redisMock as unknown as { default: typeof redisMock }; + mock.default = redisMock; + return mock; +}); + +import { migrate } from '../utils/test-helpers'; +import { PgWriteStore } from '../../src/datastore/pg-write-store'; +import { TestBlockBuilder } from '../utils/test-builders'; + +describe('redis notifier', () => { + let db: PgWriteStore; + + beforeEach(async () => { + process.env.REDIS_NOTIFIER_ENABLED = '1'; + process.env.REDIS_URL = 'localhost:6379'; + process.env.REDIS_QUEUE = 'test-queue'; + db = await PgWriteStore.connect({ + usageName: 'tests', + withNotifier: false, + withRedisNotifier: true, + skipMigrations: true, + }); + await migrate('up'); + messages.length = 0; + }); + + afterEach(async () => { + await db.close(); + await migrate('down'); + }); + + test('updates redis', async () => { + const block1 = new TestBlockBuilder({ + block_height: 1, + block_hash: '0x1234', + index_block_hash: '0x1234', + }).build(); + await db.update(block1); + + expect(messages.length).toBe(1); + expect(JSON.parse(messages[0]).payload).toEqual({ + chain: 'stacks', + network: 'mainnet', + apply_blocks: [{ hash: '0x1234', index: 1 }], + rollback_blocks: [], + }); + }); + + test('updates redis with re-orgs', async () => { + await db.update( + new TestBlockBuilder({ + block_height: 1, + block_hash: '0x1234', + index_block_hash: '0x1234', + }).build() + ); + expect(messages.length).toBe(1); + expect(JSON.parse(messages[0]).payload).toEqual({ + chain: 'stacks', + network: 'mainnet', + apply_blocks: [{ hash: '0x1234', index: 1 }], + rollback_blocks: [], + }); + + await db.update( + new TestBlockBuilder({ + block_height: 2, + block_hash: '0x1235', + index_block_hash: '0x1235', + parent_index_block_hash: '0x1234', + }).build() + ); + expect(messages.length).toBe(2); + expect(JSON.parse(messages[1]).payload).toEqual({ + chain: 'stacks', + network: 'mainnet', + apply_blocks: [{ hash: '0x1235', index: 2 }], + rollback_blocks: [], + }); + + // Re-org block 2, should not send a message because this block is not canonical + await db.update( + new TestBlockBuilder({ + block_height: 2, + block_hash: '0x1235aa', + index_block_hash: '0x1235aa', + parent_index_block_hash: '0x1234', + }).build() + ); + expect(messages.length).toBe(2); + + // Advance the non-canoincal chain, original block 2 should be sent as a rollback block + await db.update( + new TestBlockBuilder({ + block_height: 3, + block_hash: '0x1236', + index_block_hash: '0x1236', + parent_index_block_hash: '0x1235aa', + }).build() + ); + expect(messages.length).toBe(3); + expect(JSON.parse(messages[2]).payload).toEqual({ + chain: 'stacks', + network: 'mainnet', + apply_blocks: [ + { hash: '0x1235aa', index: 2 }, + { hash: '0x1236', index: 3 }, + ], + rollback_blocks: [{ hash: '0x1235', index: 2 }], + }); + }); +});