diff --git a/hathor/indexes/deps_index.py b/hathor/indexes/deps_index.py index ef2654fe3..fc8d56a87 100644 --- a/hathor/indexes/deps_index.py +++ b/hathor/indexes/deps_index.py @@ -118,10 +118,7 @@ def get_scope(self) -> Scope: return SCOPE def init_loop_step(self, tx: BaseTransaction) -> None: - tx_meta = tx.get_metadata() - if tx_meta.voided_by: - return - self.add_tx(tx, partial=False) + self.add_tx(tx) def update(self, tx: BaseTransaction) -> None: assert tx.hash is not None @@ -193,6 +190,6 @@ def remove_from_needed_index(self, tx: bytes) -> None: raise NotImplementedError @abstractmethod - def get_next_needed_tx(self) -> bytes: - """Choose the start hash for downloading the needed txs""" + def iter_next_needed_txs(self) -> Iterator[bytes]: + """Iterate over the next needed transactions.""" raise NotImplementedError diff --git a/hathor/indexes/memory_deps_index.py b/hathor/indexes/memory_deps_index.py index b596ef98a..8d9d74a9b 100644 --- a/hathor/indexes/memory_deps_index.py +++ b/hathor/indexes/memory_deps_index.py @@ -34,6 +34,9 @@ class MemoryDepsIndex(DepsIndex): _txs_with_deps_ready: set[bytes] # Next to be downloaded + # - Key: hash of the tx to be downloaded + # - Value[0]: height + # - Value[1]: hash of the tx waiting for the download _needed_txs_index: dict[bytes, tuple[int, bytes]] def __init__(self): @@ -49,10 +52,11 @@ def force_clear(self) -> None: self._needed_txs_index = {} def add_tx(self, tx: BaseTransaction, partial: bool = True) -> None: - assert tx.hash is not None - assert tx.storage is not None validation = tx.get_metadata().validation if validation.is_fully_connected(): + # discover if new txs are ready because of this tx + self._update_new_deps_ready(tx) + # finally remove from rev deps self._del_from_deps_index(tx) elif not partial: raise ValueError('partial=False will only accept fully connected transactions') @@ -63,6 +67,19 @@ def add_tx(self, tx: BaseTransaction, partial: bool = True) -> None: def del_tx(self, tx: BaseTransaction) -> None: self._del_from_deps_index(tx) + def _update_new_deps_ready(self, tx: BaseTransaction) -> None: + """Go over the reverse dependencies of tx and check if any of them are now ready to be validated. + + This is also idempotent. + """ + assert tx.hash is not None + assert tx.storage is not None + for candidate_hash in self._rev_dep_index.get(tx.hash, []): + with tx.storage.allow_partially_validated_context(): + candidate_tx = tx.storage.get_transaction(candidate_hash) + if candidate_tx.is_ready_for_validation(): + self._txs_with_deps_ready.add(candidate_hash) + def _add_deps(self, tx: BaseTransaction) -> None: """This method is idempotent, because self.update needs it to be indempotent.""" assert tx.hash is not None @@ -94,7 +111,9 @@ def next_ready_for_validation(self, tx_storage: 'TransactionStorage', *, dry_run else: cur_ready, self._txs_with_deps_ready = self._txs_with_deps_ready, set() while cur_ready: - yield from sorted(cur_ready, key=lambda tx_hash: tx_storage.get_transaction(tx_hash).timestamp) + with tx_storage.allow_partially_validated_context(): + sorted_cur_ready = sorted(cur_ready, key=lambda tx_hash: tx_storage.get_transaction(tx_hash).timestamp) + yield from sorted_cur_ready if dry_run: cur_ready = self._txs_with_deps_ready - cur_ready else: @@ -113,7 +132,8 @@ def _get_rev_deps(self, tx: bytes) -> frozenset[bytes]: def known_children(self, tx: BaseTransaction) -> list[bytes]: assert tx.hash is not None assert tx.storage is not None - it_rev_deps = map(tx.storage.get_transaction, self._get_rev_deps(tx.hash)) + with tx.storage.allow_partially_validated_context(): + it_rev_deps = map(tx.storage.get_transaction, self._get_rev_deps(tx.hash)) return [not_none(rev.hash) for rev in it_rev_deps if tx.hash in rev.parents] # needed-txs-index methods: @@ -127,18 +147,13 @@ def is_tx_needed(self, tx: bytes) -> bool: def remove_from_needed_index(self, tx: bytes) -> None: self._needed_txs_index.pop(tx, None) - def get_next_needed_tx(self) -> bytes: - # This strategy maximizes the chance to download multiple txs on the same stream - # find the tx with highest "height" - # XXX: we could cache this onto `needed_txs` so we don't have to fetch txs every time - # TODO: improve this by using some sorted data structure to make this better than O(n) - height, start_hash, tx = max((h, s, t) for t, (h, s) in self._needed_txs_index.items()) - self.log.debug('next needed tx start', needed=len(self._needed_txs_index), start=start_hash.hex(), - height=height, needed_tx=tx.hex()) - return start_hash + def iter_next_needed_txs(self) -> Iterator[bytes]: + for tx_hash, _ in self._needed_txs_index.items(): + yield tx_hash def _add_needed(self, tx: BaseTransaction) -> None: """This method is idempotent, because self.update needs it to be indempotent.""" + assert tx.hash is not None assert tx.storage is not None tx_storage = tx.storage @@ -147,9 +162,14 @@ def _add_needed(self, tx: BaseTransaction) -> None: # get_all_dependencies is needed to ensure that we get the inputs that aren't reachable through parents alone, # this can happen for inputs that have not been confirmed as of the block the confirms the block or transaction # that we're adding the dependencies of - for tx_hash in tx.get_all_dependencies(): + for dep_hash in tx.get_all_dependencies(): # It may happen that we have one of the dependencies already, so just add the ones we don't # have. We should add at least one dependency, otherwise this tx should be full validated - if not tx_storage.transaction_exists(tx_hash): - self.log.debug('tx parent is needed', tx=tx_hash.hex()) - self._needed_txs_index[tx_hash] = (height, not_none(tx.hash)) + with tx_storage.allow_partially_validated_context(): + tx_exists = tx_storage.transaction_exists(dep_hash) + if not tx_exists: + self.log.debug('tx parent is needed', tx=dep_hash.hex()) + self._needed_txs_index[dep_hash] = (height, not_none(tx.hash)) + + # also, remove the given transaction from needed, because we already have it + self._needed_txs_index.pop(tx.hash, None) diff --git a/hathor/indexes/rocksdb_deps_index.py b/hathor/indexes/rocksdb_deps_index.py index fee70fb99..780299ee8 100644 --- a/hathor/indexes/rocksdb_deps_index.py +++ b/hathor/indexes/rocksdb_deps_index.py @@ -351,11 +351,6 @@ def remove_from_needed_index(self, tx: bytes) -> None: key_needed = self._to_key_needed(tx) self._db.delete((self._cf, key_needed)) - def get_next_needed_tx(self) -> bytes: - # This strategy maximizes the chance to download multiple txs on the same stream - # Find the tx with highest "height" - # XXX: we could cache this onto `needed_txs` so we don't have to fetch txs every time - # TODO: improve this by using some sorted data structure to make this better than O(n) - height, start_hash, tx = max((h, s, t) for t, h, s in self._iter_needed()) - self.log.debug('next needed tx start', start=start_hash.hex(), height=height, needed_tx=tx.hex()) - return start_hash + def iter_next_needed_txs(self) -> Iterator[bytes]: + for tx_hash, _, __ in self._iter_needed(): + yield tx_hash diff --git a/hathor/manager.py b/hathor/manager.py index ec92144f6..7988f49f0 100644 --- a/hathor/manager.py +++ b/hathor/manager.py @@ -403,8 +403,6 @@ def _initialize_components_full_verification(self) -> None: self.log.debug('load blocks and transactions') for tx in self.tx_storage._topological_sort_dfs(): - tx.update_initial_metadata() - assert tx.hash is not None tx_meta = tx.get_metadata() @@ -433,7 +431,14 @@ def _initialize_components_full_verification(self) -> None: try: # TODO: deal with invalid tx + tx.calculate_height() + tx._update_parents_children_metadata() + if tx.can_validate_full(): + tx.update_initial_metadata() + tx.calculate_min_height() + if tx.is_genesis: + assert tx.validate_checkpoint(self.checkpoints) assert tx.validate_full(skip_block_weight_verification=skip_block_weight_verification) self.tx_storage.add_to_indexes(tx) with self.tx_storage.allow_only_valid_context(): @@ -934,12 +939,11 @@ def on_new_tx(self, tx: BaseTransaction, *, conn: Optional[HathorProtocol] = Non """ assert self.tx_storage.is_only_valid_allowed() assert tx.hash is not None + + already_exists = False if self.tx_storage.transaction_exists(tx.hash): self.tx_storage.compare_bytes_with_local_tx(tx) - if not fails_silently: - raise InvalidNewTransaction('Transaction already exists {}'.format(tx.hash_hex)) - self.log.warn('on_new_tx(): Transaction already exists', tx=tx.hash_hex) - return False + already_exists = True if tx.timestamp - self.reactor.seconds() > settings.MAX_FUTURE_TIMESTAMP_ALLOWED: if not fails_silently: @@ -956,8 +960,14 @@ def on_new_tx(self, tx: BaseTransaction, *, conn: Optional[HathorProtocol] = Non metadata = tx.get_metadata() except TransactionDoesNotExist: if not fails_silently: - raise InvalidNewTransaction('missing parent') - self.log.warn('on_new_tx(): missing parent', tx=tx.hash_hex) + raise InvalidNewTransaction('cannot get metadata') + self.log.warn('on_new_tx(): cannot get metadata', tx=tx.hash_hex) + return False + + if already_exists and metadata.validation.is_fully_connected(): + if not fails_silently: + raise InvalidNewTransaction('Transaction already exists {}'.format(tx.hash_hex)) + self.log.warn('on_new_tx(): Transaction already exists', tx=tx.hash_hex) return False if metadata.validation.is_invalid(): @@ -1044,7 +1054,7 @@ def sync_v2_step_validations(self, txs: Iterable[BaseTransaction], *, quiet: boo try: # XXX: `reject_locked_reward` might not apply, partial validation is only used on sync-v2 # TODO: deal with `reject_locked_reward` on sync-v2 - assert tx.validate_full(reject_locked_reward=True) + assert tx.validate_full(reject_locked_reward=False) except (AssertionError, HathorError): # TODO raise diff --git a/hathor/p2p/manager.py b/hathor/p2p/manager.py index 37a912897..067fd4030 100644 --- a/hathor/p2p/manager.py +++ b/hathor/p2p/manager.py @@ -100,6 +100,7 @@ def __init__(self, enable_sync_v1_1: bool) -> None: from hathor.p2p.sync_v1.factory_v1_0 import SyncV10Factory from hathor.p2p.sync_v1.factory_v1_1 import SyncV11Factory + from hathor.p2p.sync_v2.factory import SyncV2Factory if not (enable_sync_v1 or enable_sync_v1_1 or enable_sync_v2): raise TypeError(f'{type(self).__name__}() at least one sync version is required') @@ -185,7 +186,7 @@ def __init__(self, if enable_sync_v1_1: self._sync_factories[SyncVersion.V1_1] = SyncV11Factory(self) if enable_sync_v2: - self._sync_factories[SyncVersion.V2] = SyncV10Factory(self) + self._sync_factories[SyncVersion.V2] = SyncV2Factory(self) def set_manager(self, manager: 'HathorManager') -> None: """Set the manager. This method must be called before start().""" diff --git a/hathor/p2p/messages.py b/hathor/p2p/messages.py index 507acab23..7ac3259c5 100644 --- a/hathor/p2p/messages.py +++ b/hathor/p2p/messages.py @@ -103,14 +103,12 @@ class ProtocolMessages(Enum): GET_BEST_BLOCK = 'GET-BEST-BLOCK' # Request the best block of the peer BEST_BLOCK = 'BEST-BLOCK' # Send the best block to your peer - GET_BLOCK_TXS = 'GET-BLOCK-TXS' # TODO: rename, maybe GET-TX-RANGE or repurpose GET-TRANSACTIONS above + GET_TRANSACTIONS_BFS = 'GET-TRANSACTIONS-BFS' TRANSACTION = 'TRANSACTION' + TRANSACTIONS_END = 'TRANSACTIONS-END' - GET_MEMPOOL = 'GET-MEMPOOL' # TODO: rename, maybe GET-TX-RANGE or repurpose GET-TRANSACTIONS above - MEMPOOL_END = 'MEMPOOL-END' # End of mempool sync - - GET_COMMON_CHAIN = 'GET-COMMON-CHAIN' - COMMON_CHAIN = 'COMMON-CHAIN' + GET_MEMPOOL = 'GET-MEMPOOL' + MEMPOOL_END = 'MEMPOOL-END' GET_PEER_BLOCK_HASHES = 'GET-PEER-BLOCK-HASHES' PEER_BLOCK_HASHES = 'PEER-BLOCK-HASHES' diff --git a/hathor/p2p/sync_v2/__init__.py b/hathor/p2p/sync_v2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/hathor/p2p/sync_v2/factory.py b/hathor/p2p/sync_v2/factory.py new file mode 100644 index 000000000..1d28278e8 --- /dev/null +++ b/hathor/p2p/sync_v2/factory.py @@ -0,0 +1,32 @@ +# Copyright 2021 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Optional + +from hathor.p2p.manager import ConnectionsManager +from hathor.p2p.sync_factory import SyncManagerFactory +from hathor.p2p.sync_manager import SyncManager +from hathor.p2p.sync_v2.manager import NodeBlockSync +from hathor.util import Reactor + +if TYPE_CHECKING: + from hathor.p2p.protocol import HathorProtocol + + +class SyncV2Factory(SyncManagerFactory): + def __init__(self, connections: ConnectionsManager): + self.connections = connections + + def create_sync_manager(self, protocol: 'HathorProtocol', reactor: Optional[Reactor] = None) -> SyncManager: + return NodeBlockSync(protocol, reactor=reactor) diff --git a/hathor/p2p/sync_v2/manager.py b/hathor/p2p/sync_v2/manager.py new file mode 100644 index 000000000..3de3e94a3 --- /dev/null +++ b/hathor/p2p/sync_v2/manager.py @@ -0,0 +1,1188 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import json +import math +import struct +from collections import OrderedDict +from enum import Enum +from typing import TYPE_CHECKING, Any, Callable, Generator, Optional, cast + +from structlog import get_logger +from twisted.internet.defer import Deferred, inlineCallbacks +from twisted.internet.task import LoopingCall + +from hathor.conf import HathorSettings +from hathor.p2p.messages import ProtocolMessages +from hathor.p2p.sync_manager import SyncManager +from hathor.p2p.sync_v2.mempool import SyncMempoolManager +from hathor.p2p.sync_v2.streamers import DEFAULT_STREAMING_LIMIT, BlockchainStreaming, StreamEnd, TransactionsStreaming +from hathor.transaction import BaseTransaction, Block, Transaction +from hathor.transaction.base_transaction import tx_or_block_from_bytes +from hathor.transaction.exceptions import HathorError +from hathor.transaction.storage.exceptions import TransactionDoesNotExist +from hathor.types import VertexId +from hathor.util import Reactor, collect_n + +if TYPE_CHECKING: + from hathor.p2p.protocol import HathorProtocol + +settings = HathorSettings() +logger = get_logger() + +MAX_GET_TRANSACTIONS_BFS_LEN: int = 8 + + +class PeerState(Enum): + ERROR = 'error' + UNKNOWN = 'unknown' + SYNCING_BLOCKS = 'syncing-blocks' + SYNCING_TRANSACTIONS = 'syncing-transactions' + SYNCING_MEMPOOL = 'syncing-mempool' + + +class NodeBlockSync(SyncManager): + """ An algorithm to sync two peers based on their blockchain. + """ + name: str = 'node-block-sync' + + def __init__(self, protocol: 'HathorProtocol', reactor: Optional[Reactor] = None) -> None: + """ + :param protocol: Protocol of the connection. + :type protocol: HathorProtocol + + :param reactor: Reactor to schedule later calls. (default=twisted.internet.reactor) + :type reactor: Reactor + """ + self.protocol = protocol + self.manager = protocol.node + self.tx_storage = protocol.node.tx_storage + self.state = PeerState.UNKNOWN + + self.DEFAULT_STREAMING_LIMIT = DEFAULT_STREAMING_LIMIT + + if reactor is None: + from hathor.util import reactor as twisted_reactor + reactor = twisted_reactor + assert reactor is not None + self.reactor: Reactor = reactor + self._is_streaming: bool = False + + # Create logger with context + self.log = logger.new(peer=self.protocol.get_short_peer_id()) + + # Extra + self._blk_size = 0 + self._blk_end_hash = settings.GENESIS_BLOCK_HASH + self._blk_max_quantity = 0 + + # indicates whether we're receiving a stream from the peer + self.receiving_stream = False + + # highest block where we are synced + self.synced_height = 0 + + # highest block peer has + self.peer_height = 0 + + # Latest deferred waiting for a reply. + self.deferred_by_key: dict[str, Deferred] = {} + + # When syncing blocks we start streaming with all peers + # so the moment I get some repeated blocks, I stop the download + # because it's probably a streaming that I've just received + self.max_repeated_blocks = 10 + + # Streaming objects + self.blockchain_streaming: Optional[BlockchainStreaming] = None + self.transactions_streaming: Optional[TransactionsStreaming] = None + + # Whether the peers are synced, i.e. our best height and best block are the same + self._synced = False + + # Indicate whether the sync manager has been started. + self._started: bool = False + + # Saves the last received block from the block streaming # this is useful to be used when running the sync of + # transactions in the case when I am downloading a side chain. Starts at the genesis, which is common to all + # peers on the network + self._last_received_block: Optional[Block] = None + + # Saves if I am in the middle of a mempool sync + # we don't execute any sync while in the middle of it + self.mempool_manager = SyncMempoolManager(self) + self._receiving_tips: Optional[list[bytes]] = None + + # Cache for get_tx calls + self._get_tx_cache: OrderedDict[bytes, BaseTransaction] = OrderedDict() + self._get_tx_cache_maxsize = 1000 + + # Looping call of the main method + self._lc_run = LoopingCall(self.run_sync) + self._lc_run.clock = self.reactor + self._is_running = False + + # Whether we propagate transactions or not + self._is_relaying = False + + # This stores the final height that we expect the last "get blocks" stream to end on + self._blk_end_height: Optional[int] = None + + # Whether to sync with this peer + self._is_enabled: bool = False + + def get_status(self) -> dict[str, Any]: + """ Return the status of the sync. + """ + res = { + 'is_enabled': self.is_sync_enabled(), + 'peer_height': self.peer_height, + 'synced_height': self.synced_height, + 'synced': self._synced, + 'state': self.state.value, + } + return res + + def is_synced(self) -> bool: + return self._synced + + def is_errored(self) -> bool: + return self.state is PeerState.ERROR + + def is_sync_enabled(self) -> bool: + return self._is_enabled + + def enable_sync(self) -> None: + self._is_enabled = True + + def disable_sync(self) -> None: + self._is_enabled = False + + def send_tx_to_peer_if_possible(self, tx: BaseTransaction) -> None: + if not self._is_enabled: + self.log.debug('sync is disabled') + return + if not self.is_synced(): + # XXX Should we accept any tx while I am not synced? + return + + # XXX When we start having many txs/s this become a performance issue + # Then we could change this to be a streaming of real time data with + # blocks as priorities to help miners get the blocks as fast as we can + # We decided not to implement this right now because we already have some producers + # being used in the sync algorithm and the code was becoming a bit too complex + if self._is_relaying: + self.send_data(tx) + + def is_started(self) -> bool: + return self._started + + def start(self) -> None: + if self._started: + raise Exception('NodeSyncBlock is already running') + self._started = True + self._lc_run.start(5) + + def stop(self) -> None: + if not self._started: + raise Exception('NodeSyncBlock is already stopped') + self._started = False + self._lc_run.stop() + + def get_cmd_dict(self) -> dict[ProtocolMessages, Callable[[str], None]]: + """ Return a dict of messages of the plugin. + + For further information about each message, see the RFC. + Link: https://github.com/HathorNetwork/rfcs/blob/master/text/0025-p2p-sync-v2.md#p2p-sync-protocol-messages + """ + return { + ProtocolMessages.GET_NEXT_BLOCKS: self.handle_get_next_blocks, + ProtocolMessages.BLOCKS: self.handle_blocks, + ProtocolMessages.BLOCKS_END: self.handle_blocks_end, + ProtocolMessages.GET_BEST_BLOCK: self.handle_get_best_block, + ProtocolMessages.BEST_BLOCK: self.handle_best_block, + ProtocolMessages.GET_TRANSACTIONS_BFS: self.handle_get_transactions_bfs, + ProtocolMessages.TRANSACTION: self.handle_transaction, + ProtocolMessages.TRANSACTIONS_END: self.handle_transactions_end, + ProtocolMessages.GET_PEER_BLOCK_HASHES: self.handle_get_peer_block_hashes, + ProtocolMessages.PEER_BLOCK_HASHES: self.handle_peer_block_hashes, + ProtocolMessages.STOP_BLOCK_STREAMING: self.handle_stop_block_streaming, + ProtocolMessages.GET_TIPS: self.handle_get_tips, + ProtocolMessages.TIPS: self.handle_tips, + ProtocolMessages.TIPS_END: self.handle_tips_end, + # XXX: overriding ReadyState.handle_error + ProtocolMessages.ERROR: self.handle_error, + ProtocolMessages.GET_DATA: self.handle_get_data, + ProtocolMessages.DATA: self.handle_data, + ProtocolMessages.RELAY: self.handle_relay, + ProtocolMessages.NOT_FOUND: self.handle_not_found, + } + + def handle_not_found(self, payload: str) -> None: + """ Handle a received NOT-FOUND message. + """ + # XXX: NOT_FOUND is a valid message, but we shouldn't ever receive it unless the other peer is running with a + # modified code or if there is a bug + self.log.warn('not found? close connection', payload=payload) + self.protocol.send_error_and_close_connection('Unexpected NOT_FOUND') + + def handle_error(self, payload: str) -> None: + """ Override protocols original handle_error so we can recover a sync in progress. + """ + assert self.protocol.connections is not None + # forward message to overloaded handle_error: + self.protocol.handle_error(payload) + + def update_synced(self, synced: bool) -> None: + self._synced = synced + + @inlineCallbacks + def run_sync(self) -> Generator[Any, Any, None]: + """ Async step of the sync algorithm. + + This is the entrypoint for the sync. It is always safe to call this method. + """ + if not self._is_enabled: + self.log.debug('sync is disabled') + return + if self._is_running: + # Already running... + self.log.debug('already running') + return + self._is_running = True + try: + yield self._run_sync() + finally: + self._is_running = False + + @inlineCallbacks + def _run_sync(self) -> Generator[Any, Any, None]: + """ Actual implementation of the sync step logic in run_sync. + """ + if self.receiving_stream: + # If we're receiving a stream, wait for it to finish before running sync. + # If we're sending a stream, do the sync to update the peer's synced block + self.log.debug('receiving stream, try again later') + return + + if self.mempool_manager.is_running(): + # It's running a mempool sync, so we wait until it finishes + self.log.debug('running mempool sync, try again later') + return + + bestblock = self.tx_storage.get_best_block() + meta = bestblock.get_metadata() + + self.log.debug('run sync', height=meta.height) + + assert self.protocol.connections is not None + assert self.tx_storage.indexes is not None + assert self.tx_storage.indexes.deps is not None + + if self.tx_storage.indexes.deps.has_needed_tx(): + self.log.debug('needed tx exist, sync transactions') + self.update_synced(False) + # TODO: find out whether we can sync transactions from this peer to speed things up + self.run_sync_transactions() + else: + # I am already in sync with all checkpoints, sync next blocks + yield self.run_sync_blocks() + + def run_sync_transactions(self) -> None: + """ Run a step of the transaction syncing phase. + """ + self.state = PeerState.SYNCING_TRANSACTIONS + + assert self.protocol.connections is not None + assert self.tx_storage.indexes is not None + assert self.tx_storage.indexes.deps is not None + + # start_hash = self.tx_storage.indexes.deps.get_next_needed_tx() + needed_txs, _ = collect_n(self.tx_storage.indexes.deps.iter_next_needed_txs(), + MAX_GET_TRANSACTIONS_BFS_LEN) + + # Start with the last received block and find the best block full validated in its chain + block = self._last_received_block + if block is None: + block = cast(Block, self.tx_storage.get_genesis(settings.GENESIS_BLOCK_HASH)) + else: + with self.tx_storage.allow_partially_validated_context(): + while not block.get_metadata().validation.is_valid(): + block = block.get_block_parent() + assert block is not None + assert block.hash is not None + block_height = block.get_height() + + self.log.info('run sync transactions', start=[i.hex() for i in needed_txs], end_block_hash=block.hash.hex(), + end_block_height=block_height) + self.send_get_transactions_bfs(needed_txs, block.hash) + + @inlineCallbacks + def run_sync_blocks(self) -> Generator[Any, Any, None]: + """ Async step of the block syncing phase. + """ + assert self.tx_storage.indexes is not None + self.state = PeerState.SYNCING_BLOCKS + + # Find my height + bestblock = self.tx_storage.get_best_block() + assert bestblock.hash is not None + meta = bestblock.get_metadata() + my_height = meta.height + + self.log.debug('run sync blocks', my_height=my_height) + + # Find best block + data = yield self.get_peer_best_block() + peer_best_block = data['block'] + peer_best_height = data['height'] + self.peer_height = peer_best_height + + # find best common block + yield self.find_best_common_block(peer_best_height, peer_best_block) + self.log.debug('run_sync_blocks', peer_height=self.peer_height, synced_height=self.synced_height) + + if self.synced_height < self.peer_height: + # sync from common block + peer_block_at_height = yield self.get_peer_block_hashes([self.synced_height]) + self.run_block_sync(peer_block_at_height[0][1], self.synced_height, peer_best_block, peer_best_height) + elif my_height == self.synced_height == self.peer_height: + # we're synced and on the same height, get their mempool + self.state = PeerState.SYNCING_MEMPOOL + self.mempool_manager.run() + elif self._is_relaying: + # TODO: validate if this is when we should disable relaying + self.send_relay(enable=False) + else: + # we got all the peer's blocks but aren't on the same height, nothing to do + pass + + def get_tips(self) -> Deferred[list[bytes]]: + """ Async method to request the remote peer's tips. + """ + key = 'tips' + deferred = self.deferred_by_key.get(key, None) + if deferred is None: + deferred = self.deferred_by_key[key] = Deferred() + self.send_get_tips() + else: + assert self._receiving_tips is not None + return deferred + + def send_get_tips(self) -> None: + """ Send a GET-TIPS message. + """ + self.log.debug('get tips') + self.send_message(ProtocolMessages.GET_TIPS) + self._receiving_tips = [] + + def handle_get_tips(self, payload: str) -> None: + """ Handle a GET-TIPS message. + """ + assert self.tx_storage.indexes is not None + assert self.tx_storage.indexes.mempool_tips is not None + if self._is_streaming: + self.log.warn('can\'t send while streaming') # XXX: or can we? + self.send_message(ProtocolMessages.MEMPOOL_END) + return + self.log.debug('handle_get_tips') + # TODO Use a streaming of tips + for txid in self.tx_storage.indexes.mempool_tips.get(): + self.send_tips(txid) + self.send_message(ProtocolMessages.TIPS_END) + + def send_tips(self, tx_id: bytes) -> None: + """ Send a TIPS message. + """ + self.send_message(ProtocolMessages.TIPS, json.dumps([tx_id.hex()])) + + def handle_tips(self, payload: str) -> None: + """ Handle a TIPS message. + """ + self.log.debug('tips', receiving_tips=self._receiving_tips) + if self._receiving_tips is None: + self.protocol.send_error_and_close_connection('TIPS not expected') + return + data = json.loads(payload) + data = [bytes.fromhex(x) for x in data] + # filter-out txs we already have + self._receiving_tips.extend(tx_id for tx_id in data if not self.partial_vertex_exists(tx_id)) + + def handle_tips_end(self, payload: str) -> None: + """ Handle a TIPS-END message. + """ + assert self._receiving_tips is not None + key = 'tips' + deferred = self.deferred_by_key.pop(key, None) + if deferred is None: + self.protocol.send_error_and_close_connection('TIPS-END not expected') + return + deferred.callback(self._receiving_tips) + self._receiving_tips = None + + def send_relay(self, *, enable: bool = True) -> None: + """ Send a RELAY message. + """ + self.log.debug('send_relay', enable=enable) + self.send_message(ProtocolMessages.RELAY, json.dumps(enable)) + + def handle_relay(self, payload: str) -> None: + """ Handle a RELAY message. + """ + if not payload: + # XXX: "legacy" nothing means enable + self._is_relaying = True + else: + val = json.loads(payload) + if isinstance(val, bool): + self._is_relaying = val + else: + self.protocol.send_error_and_close_connection('RELAY: invalid value') + return + + def _setup_block_streaming(self, start_hash: bytes, start_height: int, end_hash: bytes, end_height: int, + reverse: bool) -> None: + """ Common setup before starting an outgoing block stream. + """ + self._blk_start_hash = start_hash + self._blk_start_height = start_height + self._blk_end_hash = end_hash + self._blk_end_height = end_height + self._blk_received = 0 + self._blk_repeated = 0 + raw_quantity = end_height - start_height + 1 + self._blk_max_quantity = -raw_quantity if reverse else raw_quantity + self._blk_prev_hash: Optional[bytes] = None + self._blk_stream_reverse = reverse + self._last_received_block = None + + def run_block_sync(self, start_hash: bytes, start_height: int, end_hash: bytes, end_height: int) -> None: + """ Called when the bestblock is after all checkpoints. + + It must syncs to the left until it reaches the remote's best block or the max stream limit. + """ + self._setup_block_streaming(start_hash, start_height, end_hash, end_height, False) + quantity = end_height - start_height + self.log.info('get next blocks', start_height=start_height, end_height=end_height, quantity=quantity, + start_hash=start_hash.hex(), end_hash=end_hash.hex()) + self.send_get_next_blocks(start_hash, end_hash) + + def send_message(self, cmd: ProtocolMessages, payload: Optional[str] = None) -> None: + """ Helper to send a message. + """ + assert self.protocol.state is not None + self.protocol.state.send_message(cmd, payload) + + def partial_vertex_exists(self, vertex_id: VertexId) -> bool: + """ Return true if the vertex exists no matter its validation state. + """ + with self.tx_storage.allow_partially_validated_context(): + return self.tx_storage.transaction_exists(vertex_id) + + @inlineCallbacks + def find_best_common_block(self, peer_best_height: int, peer_best_block: bytes) -> Generator[Any, Any, None]: + """ Search for the highest block/height where we're synced. + """ + assert self.tx_storage.indexes is not None + my_best_height = self.tx_storage.get_height_best_block() + + self.log.debug('find common chain', peer_height=peer_best_height, my_height=my_best_height) + + if peer_best_height <= my_best_height: + my_block = self.tx_storage.indexes.height.get(peer_best_height) + if my_block == peer_best_block: + # we have all the peer's blocks + if peer_best_height == my_best_height: + # We are in sync, ask for relay so the remote sends transactions in real time + self.update_synced(True) + self.send_relay() + else: + self.update_synced(False) + + self.log.debug('synced to the latest peer block', height=peer_best_height) + self.synced_height = peer_best_height + return + else: + # TODO peer is on a different best chain + self.log.warn('peer on different chain', peer_height=peer_best_height, + peer_block=peer_best_block.hex(), my_block=(my_block.hex() if my_block is not None else + None)) + + self.update_synced(False) + not_synced = min(peer_best_height, my_best_height) + synced = self.synced_height + + while not_synced - synced > 1: + self.log.debug('find_best_common_block synced not_synced', synced=synced, not_synced=not_synced) + step = math.ceil((not_synced - synced)/10) + heights = [] + height = synced + while height < not_synced: + heights.append(height) + height += step + heights.append(not_synced) + block_height_list = yield self.get_peer_block_hashes(heights) + block_height_list.reverse() + for height, block_hash in block_height_list: + try: + # We must check only fully validated transactions. + blk = self.tx_storage.get_transaction(block_hash) + assert blk.get_metadata().validation.is_fully_connected() + assert isinstance(blk, Block) + if height != blk.get_height(): + # WTF?! It should never happen. + self.state = PeerState.ERROR + return + synced = height + break + except TransactionDoesNotExist: + not_synced = height + + self.log.debug('find_best_common_block finished synced not_synced', synced=synced, not_synced=not_synced) + self.synced_height = synced + + def get_peer_block_hashes(self, heights: list[int]) -> Deferred[list[tuple[int, bytes]]]: + """ Returns the peer's block hashes in the given heights. + """ + key = 'peer-block-hashes' + if self.deferred_by_key.get(key, None) is not None: + raise Exception('latest_deferred is not None') + self.send_get_peer_block_hashes(heights) + deferred: Deferred[list[tuple[int, bytes]]] = Deferred() + self.deferred_by_key[key] = deferred + return deferred + + def send_get_peer_block_hashes(self, heights: list[int]) -> None: + """ Send a GET-PEER-BLOCK-HASHES message. + """ + payload = json.dumps(heights) + self.send_message(ProtocolMessages.GET_PEER_BLOCK_HASHES, payload) + + def handle_get_peer_block_hashes(self, payload: str) -> None: + """ Handle a GET-PEER-BLOCK-HASHES message. + """ + assert self.tx_storage.indexes is not None + heights = json.loads(payload) + if len(heights) > 20: + self.protocol.send_error_and_close_connection('GET-PEER-BLOCK-HASHES: too many heights') + return + data = [] + for h in heights: + blk_hash = self.tx_storage.indexes.height.get(h) + if blk_hash is None: + break + blk = self.tx_storage.get_transaction(blk_hash) + if blk.get_metadata().voided_by: + # The height index might have voided blocks when there is a draw. + # Let's try again soon. + self.reactor.callLater(3, self.handle_get_peer_block_hashes, payload) + return + data.append((h, blk_hash.hex())) + payload = json.dumps(data) + self.send_message(ProtocolMessages.PEER_BLOCK_HASHES, payload) + + def handle_peer_block_hashes(self, payload: str) -> None: + """ Handle a PEER-BLOCK-HASHES message. + """ + data = json.loads(payload) + data = [(h, bytes.fromhex(block_hash)) for (h, block_hash) in data] + key = 'peer-block-hashes' + deferred = self.deferred_by_key.pop(key, None) + if deferred: + deferred.callback(data) + + def send_get_next_blocks(self, start_hash: bytes, end_hash: bytes) -> None: + """ Send a PEER-BLOCK-HASHES message. + """ + payload = json.dumps(dict( + start_hash=start_hash.hex(), + end_hash=end_hash.hex(), + )) + self.send_message(ProtocolMessages.GET_NEXT_BLOCKS, payload) + self.receiving_stream = True + + def handle_get_next_blocks(self, payload: str) -> None: + """ Handle a GET-NEXT-BLOCKS message. + """ + self.log.debug('handle GET-NEXT-BLOCKS') + if self._is_streaming: + self.protocol.send_error_and_close_connection('GET-NEXT-BLOCKS received before previous one finished') + return + data = json.loads(payload) + self.send_next_blocks( + start_hash=bytes.fromhex(data['start_hash']), + end_hash=bytes.fromhex(data['end_hash']), + ) + + def send_next_blocks(self, start_hash: bytes, end_hash: bytes) -> None: + """ Send a NEXT-BLOCKS message. + """ + self.log.debug('start NEXT-BLOCKS stream') + try: + blk = self.tx_storage.get_transaction(start_hash) + except TransactionDoesNotExist: + # In case the tx does not exist we send a NOT-FOUND message + self.log.debug('requested start_hash not found', start_hash=start_hash.hex()) + self.send_message(ProtocolMessages.NOT_FOUND, start_hash.hex()) + return + assert isinstance(blk, Block) + assert blk.hash is not None + # XXX: it is not an error for the other peer to request a voided block, we'll pretend it doesn't exist, butf + blk_meta = blk.get_metadata() + if blk_meta.voided_by: + # In case the tx does not exist we send a NOT-FOUND message + self.log.debug('requested start_hash is voided, continue anyway', start_hash=start_hash.hex(), + voided_by=[i.hex() for i in blk_meta.voided_by]) + # XXX: we want to be able to not send this, but we do because the remote node could get stuck otherwise + # (tracked by issue #711) + # self.send_message(ProtocolMessages.NOT_FOUND, start_hash.hex()) + # return + if self.blockchain_streaming is not None and self.blockchain_streaming.is_running: + self.blockchain_streaming.stop() + self.blockchain_streaming = BlockchainStreaming(self, blk, end_hash, limit=self.DEFAULT_STREAMING_LIMIT) + self.blockchain_streaming.start() + + def send_blocks(self, blk: Block) -> None: + """ Send a BLOCKS message. + + This message is called from a streamer for each block to being sent. + """ + payload = base64.b64encode(bytes(blk)).decode('ascii') + self.send_message(ProtocolMessages.BLOCKS, payload) + + def send_blocks_end(self, response_code: StreamEnd) -> None: + """ Send a BLOCKS-END message. + + This message marks the end of a stream of BLOCKS messages. It is mandatory to send any BLOCKS messages before, + in which case it would be an "empty" stream. + """ + payload = str(int(response_code)) + self.log.debug('send BLOCKS-END', payload=payload) + self.send_message(ProtocolMessages.BLOCKS_END, payload) + + def handle_blocks_end(self, payload: str) -> None: + """ Handle a BLOCKS-END message. + + This is important to know that the other peer will not send any BLOCKS messages anymore as a response to a + previous command. + """ + self.log.debug('recv BLOCKS-END', payload=payload, size=self._blk_size) + + response_code = StreamEnd(int(payload)) + self.receiving_stream = False + assert self.protocol.connections is not None + + if self.state is not PeerState.SYNCING_BLOCKS: + self.log.error('unexpected BLOCKS-END', state=self.state) + self.protocol.send_error_and_close_connection('Not expecting to receive BLOCKS-END message') + return + + self.log.debug('block streaming ended', reason=str(response_code)) + + def handle_blocks(self, payload: str) -> None: + """ Handle a BLOCKS message. + """ + if self.state is not PeerState.SYNCING_BLOCKS: + self.log.error('unexpected BLOCK', state=self.state) + self.protocol.send_error_and_close_connection('Not expecting to receive BLOCK message') + return + + assert self.protocol.connections is not None + + blk_bytes = base64.b64decode(payload) + blk = tx_or_block_from_bytes(blk_bytes) + if not isinstance(blk, Block): + # Not a block. Punish peer? + return + blk.storage = self.tx_storage + + assert blk.hash is not None + + self._blk_received += 1 + if self._blk_received > self._blk_max_quantity + 1: + self.log.warn('too many blocks received', last_block=blk.hash_hex) + # Too many blocks. Punish peer? + self.state = PeerState.ERROR + return + + if self.partial_vertex_exists(blk.hash): + # We reached a block we already have. Skip it. + self._blk_prev_hash = blk.hash + self._blk_repeated += 1 + if self.receiving_stream and self._blk_repeated > self.max_repeated_blocks: + self.log.debug('repeated block received', total_repeated=self._blk_repeated) + self.handle_many_repeated_blocks() + + # basic linearity validation, crucial for correctly predicting the next block's height + if self._blk_stream_reverse: + if self._last_received_block and blk.hash != self._last_received_block.get_block_parent_hash(): + self.handle_invalid_block('received block is not parent of previous block') + return + else: + if self._last_received_block and blk.get_block_parent_hash() != self._last_received_block.hash: + self.handle_invalid_block('received block is not child of previous block') + return + + try: + # this methods takes care of checking if the block already exists, + # it will take care of doing at least a basic validation + # self.log.debug('add new block', block=blk.hash_hex) + if self.partial_vertex_exists(blk.hash): + # XXX: early terminate? + self.log.debug('block early terminate?', blk_id=blk.hash.hex()) + else: + self.log.debug('block received', blk_id=blk.hash.hex()) + self.on_new_tx(blk, propagate_to_peers=False, quiet=True) + except HathorError: + self.handle_invalid_block(exc_info=True) + return + else: + self._last_received_block = blk + self._blk_repeated = 0 + # XXX: debugging log, maybe add timing info + if self._blk_received % 500 == 0: + self.log.debug('block streaming in progress', blocks_received=self._blk_received) + + def handle_invalid_block(self, msg: Optional[str] = None, *, exc_info: bool = False) -> None: + """ Call this method when receiving an invalid block. + """ + kwargs: dict[str, Any] = {} + if msg is not None: + kwargs['error'] = msg + if exc_info: + kwargs['exc_info'] = True + self.log.warn('invalid new block', **kwargs) + # Invalid block?! + self.state = PeerState.ERROR + + def handle_many_repeated_blocks(self) -> None: + """ Call this when a stream sends too many blocks in sequence that we already have. + """ + self.send_stop_block_streaming() + self.receiving_stream = False + + def send_stop_block_streaming(self) -> None: + """ Send a STOP-BLOCK-STREAMING message. + + This asks the other peer to stop a running block stream. + """ + self.send_message(ProtocolMessages.STOP_BLOCK_STREAMING) + + def handle_stop_block_streaming(self, payload: str) -> None: + """ Handle a STOP-BLOCK-STREAMING message. + + This means the remote peer wants to stop the current block stream. + """ + if not self.blockchain_streaming or not self._is_streaming: + self.log.debug('got stop streaming message with no streaming running') + return + + self.log.debug('got stop streaming message') + self.blockchain_streaming.stop() + self.blockchain_streaming = None + + def get_peer_best_block(self) -> Deferred[dict[str, Any]]: + """ Async call to get the remote peer's best block. + """ + key = 'best-block' + deferred = self.deferred_by_key.pop(key, None) + if self.deferred_by_key.get(key, None) is not None: + raise Exception('latest_deferred is not None') + + self.send_get_best_block() + deferred = Deferred() + self.deferred_by_key[key] = deferred + return deferred + + def send_get_best_block(self) -> None: + """ Send a GET-BEST-BLOCK messsage. + """ + self.send_message(ProtocolMessages.GET_BEST_BLOCK) + + def handle_get_best_block(self, payload: str) -> None: + """ Handle a GET-BEST-BLOCK message. + """ + best_block = self.tx_storage.get_best_block() + meta = best_block.get_metadata() + data = {'block': best_block.hash_hex, 'height': meta.height} + self.send_message(ProtocolMessages.BEST_BLOCK, json.dumps(data)) + + def handle_best_block(self, payload: str) -> None: + """ Handle a BEST-BLOCK message. + """ + data = json.loads(payload) + assert self.protocol.connections is not None + self.log.debug('got best block', **data) + data['block'] = bytes.fromhex(data['block']) + + key = 'best-block' + deferred = self.deferred_by_key.pop(key, None) + if deferred: + deferred.callback(data) + + def _setup_tx_streaming(self): + """ Common setup before starting an outgoing transaction stream. + """ + self._tx_received = 0 + self._tx_max_quantity = DEFAULT_STREAMING_LIMIT # XXX: maybe this is redundant + # XXX: what else can we add for checking if everything is going well? + + def send_get_transactions_bfs(self, start_from: list[bytes], until_first_block: bytes) -> None: + """ Send a GET-TRANSACTIONS-BFS message. + + This will request a BFS of all transactions starting from start_from list and walking back into parents/inputs. + + The start_from list can contain blocks, but they won't be sent. For example if a block B1 has T1 and T2 as + transaction parents, start_from=[B1] and start_from=[T1, T2] will have the same result. + + The stop condition is reaching transactions/inputs that have a first_block of height less or equal than the + height of until_first_block. The other peer will return an empty response if it doesn't have any of the + transactions in start_from or if it doesn't have the until_first_block block. + """ + self._setup_tx_streaming() + start_from_hexlist = [tx.hex() for tx in start_from] + until_first_block_hex = until_first_block.hex() + self.log.debug('send_get_transactions_bfs', start_from=start_from_hexlist, last_block=until_first_block_hex) + payload = json.dumps(dict( + start_from=start_from_hexlist, + until_first_block=until_first_block_hex, + )) + self.send_message(ProtocolMessages.GET_TRANSACTIONS_BFS, payload) + self.receiving_stream = True + + def handle_get_transactions_bfs(self, payload: str) -> None: + """ Handle a GET-TRANSACTIONS-BFS message. + """ + if self._is_streaming: + self.log.warn('ignore GET-TRANSACTIONS-BFS, already streaming') + return + data = json.loads(payload) + # XXX: todo verify this limit while parsing the payload. + start_from = data['start_from'] + if len(start_from) > MAX_GET_TRANSACTIONS_BFS_LEN: + self.log.error('too many transactions in GET-TRANSACTIONS-BFS', state=self.state) + self.protocol.send_error_and_close_connection('Too many transactions in GET-TRANSACTIONS-BFS') + return + self.log.debug('handle_get_transactions_bfs', **data) + start_from = [bytes.fromhex(tx_hash_hex) for tx_hash_hex in start_from] + until_first_block = bytes.fromhex(data['until_first_block']) + self.send_transactions_bfs(start_from, until_first_block) + + def send_transactions_bfs(self, start_from: list[bytes], until_first_block: bytes) -> None: + """ Start a transactions BFS stream. + """ + start_from_txs = [] + for start_from_hash in start_from: + try: + start_from_txs.append(self.tx_storage.get_transaction(start_from_hash)) + except TransactionDoesNotExist: + # In case the tx does not exist we send a NOT-FOUND message + self.log.debug('requested start_from_hash not found', start_from_hash=start_from_hash.hex()) + self.send_message(ProtocolMessages.NOT_FOUND, start_from_hash.hex()) + return + if not self.tx_storage.transaction_exists(until_first_block): + # In case the tx does not exist we send a NOT-FOUND message + self.log.debug('requested until_first_block not found', until_first_block=until_first_block.hex()) + self.send_message(ProtocolMessages.NOT_FOUND, until_first_block.hex()) + return + if self.transactions_streaming is not None and self.transactions_streaming.is_running: + self.transactions_streaming.stop() + self.transactions_streaming = TransactionsStreaming(self, start_from_txs, until_first_block, + limit=self.DEFAULT_STREAMING_LIMIT) + self.transactions_streaming.start() + + def send_transaction(self, tx: Transaction) -> None: + """ Send a TRANSACTION message. + """ + # payload = bytes(tx).hex() # fails for big transactions + payload = base64.b64encode(bytes(tx)).decode('ascii') + self.send_message(ProtocolMessages.TRANSACTION, payload) + + def send_transactions_end(self, response_code: StreamEnd) -> None: + """ Send a TRANSACTIONS-END message. + """ + payload = str(int(response_code)) + self.log.debug('send TRANSACTIONS-END', payload=payload) + self.send_message(ProtocolMessages.TRANSACTIONS_END, payload) + + def handle_transactions_end(self, payload: str) -> None: + """ Handle a TRANSACTIONS-END message. + """ + self.log.debug('recv TRANSACTIONS-END', payload=payload, size=self._blk_size) + + response_code = StreamEnd(int(payload)) + self.receiving_stream = False + assert self.protocol.connections is not None + + if self.state is not PeerState.SYNCING_TRANSACTIONS: + self.log.error('unexpected TRANSACTIONS-END', state=self.state) + self.protocol.send_error_and_close_connection('Not expecting to receive TRANSACTIONS-END message') + return + + self.log.debug('transaction streaming ended', reason=str(response_code)) + + def handle_transaction(self, payload: str) -> None: + """ Handle a TRANSACTION message. + """ + assert self.protocol.connections is not None + + # tx_bytes = bytes.fromhex(payload) + tx_bytes = base64.b64decode(payload) + tx = tx_or_block_from_bytes(tx_bytes) + assert tx.hash is not None + if not isinstance(tx, Transaction): + self.log.warn('not a transaction', hash=tx.hash_hex) + # Not a transaction. Punish peer? + return + + self._tx_received += 1 + if self._tx_received > self._tx_max_quantity + 1: + self.log.warn('too many txs received') + self.state = PeerState.ERROR + return + + try: + # this methods takes care of checking if the tx already exists, it will take care of doing at least + # a basic validation + # self.log.debug('add new tx', tx=tx.hash_hex) + if self.partial_vertex_exists(tx.hash): + # XXX: early terminate? + self.log.debug('tx early terminate?', tx_id=tx.hash.hex()) + else: + self.log.debug('tx received', tx_id=tx.hash.hex()) + self.on_new_tx(tx, propagate_to_peers=False, quiet=True, reject_locked_reward=True) + except HathorError: + self.log.warn('invalid new tx', exc_info=True) + # Invalid block?! + # Invalid transaction?! + # Maybe stop syncing and punish peer. + self.state = PeerState.ERROR + return + else: + # XXX: debugging log, maybe add timing info + if self._tx_received % 100 == 0: + self.log.debug('tx streaming in progress', txs_received=self._tx_received) + + @inlineCallbacks + def get_tx(self, tx_id: bytes) -> Generator[Deferred, Any, BaseTransaction]: + """ Async method to get a transaction from the db/cache or to download it. + """ + tx = self._get_tx_cache.get(tx_id) + if tx is not None: + self.log.debug('tx in cache', tx=tx_id.hex()) + return tx + try: + tx = self.tx_storage.get_transaction(tx_id) + except TransactionDoesNotExist: + tx = yield self.get_data(tx_id, 'mempool') + if tx is None: + self.log.error('failed to get tx', tx_id=tx_id.hex()) + self.protocol.send_error_and_close_connection(f'DATA mempool {tx_id.hex()} not found') + raise + if tx.hash != tx_id: + self.protocol.send_error_and_close_connection(f'DATA mempool {tx_id.hex()} hash mismatch') + raise + return tx + + def get_data(self, tx_id: bytes, origin: str) -> Deferred: + """ Async method to request a tx by id. + """ + # TODO: deal with stale `get_data` calls + if origin != 'mempool': + raise ValueError(f'origin={origin} not supported, only origin=mempool is supported') + key = f'{origin}:{tx_id.hex()}' + deferred = self.deferred_by_key.get(key, None) + if deferred is None: + deferred = self.deferred_by_key[key] = Deferred() + self.send_get_data(tx_id, origin=origin) + self.log.debug('get_data of new tx_id', deferred=deferred, key=key) + else: + # XXX: can we re-use deferred objects like this? + self.log.debug('get_data of same tx_id, reusing deferred', deferred=deferred, key=key) + return deferred + + def _on_get_data(self, tx: BaseTransaction, origin: str) -> None: + """ Called when a requested tx is received. + """ + assert tx.hash is not None + key = f'{origin}:{tx.hash_hex}' + deferred = self.deferred_by_key.pop(key, None) + if deferred is None: + # Peer sent the wrong transaction?! + # XXX: ban peer? + self.protocol.send_error_and_close_connection(f'DATA {origin}: with tx that was not requested') + return + self.log.debug('get_data fulfilled', deferred=deferred, key=key) + self._get_tx_cache[tx.hash] = tx + if len(self._get_tx_cache) > self._get_tx_cache_maxsize: + self._get_tx_cache.popitem(last=False) + deferred.callback(tx) + + def send_data(self, tx: BaseTransaction, *, origin: str = '') -> None: + """ Send a DATA message. + """ + self.log.debug('send tx', tx=tx.hash_hex) + tx_payload = base64.b64encode(tx.get_struct()).decode('ascii') + if not origin: + payload = tx_payload + else: + payload = ' '.join([origin, tx_payload]) + self.send_message(ProtocolMessages.DATA, payload) + + def send_get_data(self, txid: bytes, *, origin: Optional[str] = None) -> None: + """ Send a GET-DATA message for a given txid. + """ + data = { + 'txid': txid.hex(), + } + if origin is not None: + data['origin'] = origin + payload = json.dumps(data) + self.send_message(ProtocolMessages.GET_DATA, payload) + + def handle_get_data(self, payload: str) -> None: + """ Handle a GET-DATA message. + """ + data = json.loads(payload) + txid_hex = data['txid'] + origin = data.get('origin', '') + # self.log.debug('handle_get_data', payload=hash_hex) + try: + tx = self.protocol.node.tx_storage.get_transaction(bytes.fromhex(txid_hex)) + self.send_data(tx, origin=origin) + except TransactionDoesNotExist: + # In case the tx does not exist we send a NOT-FOUND message + self.send_message(ProtocolMessages.NOT_FOUND, txid_hex) + + def handle_data(self, payload: str) -> None: + """ Handle a DATA message. + """ + if not payload: + return + part1, _, part2 = payload.partition(' ') + if not part2: + origin = None + data = base64.b64decode(part1) + else: + origin = part1 + data = base64.b64decode(part2) + + try: + tx = tx_or_block_from_bytes(data) + except struct.error: + # Invalid data for tx decode + return + + if origin: + if origin != 'mempool': + # XXX: ban peer? + self.protocol.send_error_and_close_connection(f'DATA {origin}: unsupported origin') + return + assert tx is not None + self._on_get_data(tx, origin) + return + + assert tx is not None + assert tx.hash is not None + if self.protocol.node.tx_storage.get_genesis(tx.hash): + # We just got the data of a genesis tx/block. What should we do? + # Will it reduce peer reputation score? + return + + tx.storage = self.protocol.node.tx_storage + assert tx.hash is not None + + if self.partial_vertex_exists(tx.hash): + # transaction already added to the storage, ignore it + # XXX: maybe we could add a hash blacklist and punish peers propagating known bad txs + self.manager.tx_storage.compare_bytes_with_local_tx(tx) + return + else: + # If we have not requested the data, it is a new transaction being propagated + # in the network, thus, we propagate it as well. + if tx.can_validate_full(): + self.log.info('tx received in real time from peer', tx=tx.hash_hex, peer=self.protocol.get_peer_id()) + self.on_new_tx(tx, propagate_to_peers=True) + else: + self.log.info('skipping tx received in real time from peer', + tx=tx.hash_hex, peer=self.protocol.get_peer_id()) + + def on_new_tx(self, tx: BaseTransaction, *, quiet: bool = False, propagate_to_peers: bool = True, + sync_checkpoints: bool = False, reject_locked_reward: bool = True) -> bool: + """ This method handle everything related to adding potentially partially validated transactions. + + Call this instead of HathorManager.on_new_tx, unless `tx` must be fully validated (for example when receiving + realtime DATA pushes). + """ + + assert self.tx_storage.indexes is not None + assert tx.hash is not None + + # XXX: "refresh" the transaction so there isn't a duplicate in memory + if self.partial_vertex_exists(tx.hash): + with self.tx_storage.allow_partially_validated_context(): + self.tx_storage.compare_bytes_with_local_tx(tx) + tx = self.tx_storage.get_transaction(tx.hash) + assert tx.hash is not None + + tx.storage = self.tx_storage + + with self.tx_storage.allow_partially_validated_context(): + metadata = tx.get_metadata() + + if metadata.validation.is_fully_connected() or tx.can_validate_full(): + if not self.manager.on_new_tx(tx): + return False + elif sync_checkpoints: + assert self.tx_storage.indexes.deps is not None + with self.tx_storage.allow_partially_validated_context(): + metadata.children = self.tx_storage.indexes.deps.known_children(tx) + try: + tx.validate_checkpoint(self.manager.checkpoints) + except HathorError: + self.log.warn('on_new_tx(): checkpoint validation failed', tx=tx.hash_hex, exc_info=True) + return False + self.tx_storage.save_transaction(tx) + self.tx_storage.indexes.deps.add_tx(tx) + self.manager.log_new_object(tx, 'new {} partially accepted while syncing checkpoints', quiet=quiet) + else: + assert self.tx_storage.indexes.deps is not None + with self.tx_storage.allow_partially_validated_context(): + if isinstance(tx, Block) and not tx.has_basic_block_parent(): + self.log.warn('on_new_tx(): block parent needs to be at least basic-valid', tx=tx.hash_hex) + return False + if not tx.validate_basic(): + self.log.warn('on_new_tx(): basic validation failed', tx=tx.hash_hex) + return False + + # The method below adds the tx as a child of the parents + # This needs to be called right before the save because we were adding the children + # in the tx parents even if the tx was invalid (failing the verifications above) + # then I would have a children that was not in the storage + self.tx_storage.save_transaction(tx) + self.tx_storage.indexes.deps.add_tx(tx) + self.manager.log_new_object(tx, 'new {} partially accepted', quiet=quiet) + + if self.tx_storage.indexes.deps is not None: + self.tx_storage.indexes.deps.remove_from_needed_index(tx.hash) + + if self.tx_storage.indexes.deps is not None: + try: + self.manager.sync_v2_step_validations([tx], quiet=quiet) + except (AssertionError, HathorError): + self.log.warn('on_new_tx(): step validations failed', tx=tx.hash_hex, exc_info=True) + return False + + return True diff --git a/hathor/p2p/sync_v2/mempool.py b/hathor/p2p/sync_v2/mempool.py new file mode 100644 index 000000000..7c2130251 --- /dev/null +++ b/hathor/p2p/sync_v2/mempool.py @@ -0,0 +1,121 @@ +# Copyright 2020 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import deque +from typing import TYPE_CHECKING, Any, Generator, Optional + +from structlog import get_logger +from twisted.internet.defer import Deferred, inlineCallbacks + +from hathor.transaction import BaseTransaction + +if TYPE_CHECKING: + from hathor.p2p.sync_v2.manager import NodeBlockSync + +logger = get_logger() + + +class SyncMempoolManager: + """Manage the sync-v2 mempool with one peer. + """ + def __init__(self, sync_manager: 'NodeBlockSync'): + """Initialize the sync-v2 mempool manager.""" + self.log = logger.new(peer=sync_manager.protocol.get_short_peer_id()) + + # Shortcuts. + self.sync_manager = sync_manager + self.manager = self.sync_manager.manager + self.tx_storage = self.manager.tx_storage + self.reactor = self.sync_manager.reactor + + # Set of tips we know but couldn't add to the DAG yet. + self.missing_tips: set[bytes] = set() + + # Maximum number of items in the DFS. + self.MAX_STACK_LENGTH: int = 1000 + + # Whether the mempool algorithm is running + self._is_running = False + + def is_running(self) -> bool: + """Whether the sync-mempool is currently running.""" + return self._is_running + + def run(self) -> None: + """Starts _run in, won't start again if already running.""" + if self.is_running(): + self.log.warn('already started') + return + self._is_running = True + self.reactor.callLater(0, self._run) + + @inlineCallbacks + def _run(self) -> Generator[Deferred, Any, None]: + try: + yield self._unsafe_run() + finally: + # sync_manager.run_sync will start it again when needed + self._is_running = False + + @inlineCallbacks + def _unsafe_run(self) -> Generator[Deferred, Any, None]: + """Run a single loop of the sync-v2 mempool.""" + if not self.missing_tips: + # No missing tips? Let's get them! + tx_hashes: list[bytes] = yield self.sync_manager.get_tips() + self.missing_tips.update(h for h in tx_hashes if not self.tx_storage.transaction_exists(h)) + + while self.missing_tips: + self.log.debug('We have missing tips! Let\'s start!', missing_tips=[x.hex() for x in self.missing_tips]) + tx_id = next(iter(self.missing_tips)) + tx: BaseTransaction = yield self.sync_manager.get_tx(tx_id) + # Stack used by the DFS in the dependencies. + # We use a deque for performance reasons. + self.log.debug('start mempool DSF', tx=tx.hash_hex) + yield self._dfs(deque([tx])) + + @inlineCallbacks + def _dfs(self, stack: deque[BaseTransaction]) -> Generator[Deferred, Any, None]: + """DFS method.""" + while stack: + tx = stack[-1] + self.log.debug('step mempool DSF', tx=tx.hash_hex, stack_len=len(stack)) + missing_dep = self._next_missing_dep(tx) + if missing_dep is None: + self.log.debug(r'No dependencies missing! \o/') + self._add_tx(tx) + assert tx == stack.pop() + else: + self.log.debug('Iterate in the DFS.', missing_dep=missing_dep.hex()) + tx_dep = yield self.sync_manager.get_tx(missing_dep) + stack.append(tx_dep) + if len(stack) > self.MAX_STACK_LENGTH: + stack.popleft() + + def _next_missing_dep(self, tx: BaseTransaction) -> Optional[bytes]: + """Get the first missing dependency found of tx.""" + assert not tx.is_block + for txin in tx.inputs: + if not self.tx_storage.transaction_exists(txin.tx_id): + return txin.tx_id + for parent in tx.parents: + if not self.tx_storage.transaction_exists(parent): + return parent + return None + + def _add_tx(self, tx: BaseTransaction) -> None: + """Add tx to the DAG.""" + assert tx.hash is not None + self.missing_tips.discard(tx.hash) + self.manager.on_new_tx(tx) diff --git a/hathor/p2p/sync_v2/streamers.py b/hathor/p2p/sync_v2/streamers.py new file mode 100644 index 000000000..968741c65 --- /dev/null +++ b/hathor/p2p/sync_v2/streamers.py @@ -0,0 +1,260 @@ +# Copyright 2021 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import IntFlag +from typing import TYPE_CHECKING, Optional + +from structlog import get_logger +from twisted.internet.interfaces import IConsumer, IDelayedCall, IPushProducer +from zope.interface import implementer + +from hathor.transaction import BaseTransaction, Block, Transaction +from hathor.transaction.storage.traversal import BFSOrderWalk +from hathor.util import verified_cast + +if TYPE_CHECKING: + from hathor.p2p.protocol import HathorProtocol + from hathor.p2p.sync_v2.manager import NodeBlockSync + +logger = get_logger() + +DEFAULT_STREAMING_LIMIT = 1000 + + +class StreamEnd(IntFlag): + END_HASH_REACHED = 0 + NO_MORE_BLOCKS = 1 + LIMIT_EXCEEDED = 2 + STREAM_BECAME_VOIDED = 3 # this will happen when the current chain becomes voided while it is being sent + TX_NOT_CONFIRMED = 4 + + def __str__(self): + if self is StreamEnd.END_HASH_REACHED: + return 'end hash reached' + elif self is StreamEnd.NO_MORE_BLOCKS: + return 'end of blocks, no more blocks to download from this peer' + elif self is StreamEnd.LIMIT_EXCEEDED: + return 'streaming limit exceeded' + elif self is StreamEnd.STREAM_BECAME_VOIDED: + return 'streamed block chain became voided' + elif self is StreamEnd.TX_NOT_CONFIRMED: + return 'streamed reached a tx that is not confirmed' + else: + raise ValueError(f'invalid StreamEnd value: {self.value}') + + +@implementer(IPushProducer) +class _StreamingBase: + def __init__(self, node_sync: 'NodeBlockSync', *, limit: int = DEFAULT_STREAMING_LIMIT): + self.node_sync = node_sync + self.protocol: 'HathorProtocol' = node_sync.protocol + assert self.protocol.transport is not None + self.consumer = verified_cast(IConsumer, self.protocol.transport) + + self.counter = 0 + self.limit = limit + + self.is_running: bool = False + self.is_producing: bool = False + + self.delayed_call: Optional[IDelayedCall] = None + self.log = logger.new(peer=node_sync.protocol.get_short_peer_id()) + + def schedule_if_needed(self) -> None: + """Schedule `send_next` if needed.""" + if not self.is_running: + return + + if not self.is_producing: + return + + if self.delayed_call and self.delayed_call.active(): + return + + self.delayed_call = self.node_sync.reactor.callLater(0, self.send_next) + + def start(self) -> None: + """Start pushing.""" + self.log.debug('start streaming') + assert not self.node_sync._is_streaming + self.node_sync._is_streaming = True + self.is_running = True + self.consumer.registerProducer(self, True) + self.resumeProducing() + + def stop(self) -> None: + """Stop pushing.""" + self.log.debug('stop streaming') + assert self.node_sync._is_streaming + self.is_running = False + self.pauseProducing() + self.consumer.unregisterProducer() + self.node_sync._is_streaming = False + + def send_next(self) -> None: + """Push next block to peer.""" + raise NotImplementedError + + def resumeProducing(self) -> None: + """This method is automatically called to resume pushing data.""" + self.is_producing = True + self.schedule_if_needed() + + def pauseProducing(self) -> None: + """This method is automatically called to pause pushing data.""" + self.is_producing = False + if self.delayed_call and self.delayed_call.active(): + self.delayed_call.cancel() + + def stopProducing(self) -> None: + """This method is automatically called to stop pushing data.""" + self.pauseProducing() + + +class BlockchainStreaming(_StreamingBase): + def __init__(self, node_sync: 'NodeBlockSync', start_block: Block, end_hash: bytes, + *, limit: int = DEFAULT_STREAMING_LIMIT, reverse: bool = False): + super().__init__(node_sync, limit=limit) + + self.start_block = start_block + self.current_block: Optional[Block] = start_block + self.end_hash = end_hash + self.reverse = reverse + + def send_next(self) -> None: + """Push next block to peer.""" + assert self.is_running + assert self.is_producing + assert self.current_block is not None + + cur = self.current_block + assert cur is not None + assert cur.hash is not None + + if cur.hash == self.end_hash: + # only send the last when not reverse + if not self.reverse: + self.log.debug('send next block', blk_id=cur.hash.hex()) + self.node_sync.send_blocks(cur) + self.stop() + self.node_sync.send_blocks_end(StreamEnd.END_HASH_REACHED) + return + + if self.counter >= self.limit: + # only send the last when not reverse + if not self.reverse: + self.log.debug('send next block', blk_id=cur.hash.hex()) + self.node_sync.send_blocks(cur) + self.stop() + self.node_sync.send_blocks_end(StreamEnd.LIMIT_EXCEEDED) + return + + self.counter += 1 + + self.log.debug('send next block', blk_id=cur.hash.hex()) + self.node_sync.send_blocks(cur) + + if self.reverse: + self.current_block = cur.get_block_parent() + else: + self.current_block = cur.get_next_block_best_chain() + + # XXX: don't send the genesis or the current block + if self.current_block is None or self.current_block.is_genesis: + self.stop() + self.node_sync.send_blocks_end(StreamEnd.NO_MORE_BLOCKS) + return + + self.schedule_if_needed() + + +class TransactionsStreaming(_StreamingBase): + """Streams all transactions confirmed by the given block, from right to left (decreasing timestamp). + """ + + def __init__(self, node_sync: 'NodeBlockSync', start_from: list[BaseTransaction], last_block_hash: bytes, + *, limit: int = DEFAULT_STREAMING_LIMIT): + # XXX: is limit needed for tx streaming? Or let's always send all txs for + # a block? Very unlikely we'll reach this limit + super().__init__(node_sync, limit=limit) + + assert len(start_from) > 0 + assert start_from[0].storage is not None + self.storage = start_from[0].storage + self.last_block_hash = last_block_hash + self.last_block_height = 0 + + self.bfs = BFSOrderWalk(self.storage, is_dag_verifications=True, is_dag_funds=True, is_left_to_right=False) + self.iter = self.bfs.run(start_from, skip_root=False) + + def start(self) -> None: + super().start() + last_blk = self.storage.get_transaction(self.last_block_hash) + assert isinstance(last_blk, Block) + self.last_block_height = last_blk.get_height() + + # TODO: make this generic too? + def send_next(self) -> None: + """Push next transaction to peer.""" + assert self.is_running + assert self.is_producing + + try: + cur = next(self.iter) + except StopIteration: + # nothing more to send + self.stop() + self.node_sync.send_transactions_end(StreamEnd.END_HASH_REACHED) + return + + if cur.is_block: + if cur.hash == self.last_block_hash: + self.bfs.skip_neighbors(cur) + self.schedule_if_needed() + return + + assert isinstance(cur, Transaction) + assert cur.hash is not None + + cur_metadata = cur.get_metadata() + if cur_metadata.first_block is None: + self.log.debug('reached a tx that is not confirming, continuing anyway') + # XXX: related to issue #711 + # self.stop() + # self.node_sync.send_transactions_end(StreamEnd.TX_NOT_CONFIRMED) + # return + else: + assert cur_metadata.first_block is not None + first_blk_meta = self.storage.get_metadata(cur_metadata.first_block) + assert first_blk_meta is not None + confirmed_by_height = first_blk_meta.height + assert confirmed_by_height is not None + if confirmed_by_height <= self.last_block_height: + # got to a tx that is confirmed by the given last-block or an older block + self.log.debug('tx confirmed by block older than last_block', tx=cur.hash_hex, + confirmed_by_height=confirmed_by_height, last_block_height=self.last_block_height) + self.bfs.skip_neighbors(cur) + self.schedule_if_needed() + return + + self.log.debug('send next transaction', tx_id=cur.hash.hex()) + self.node_sync.send_transaction(cur) + + self.counter += 1 + if self.counter >= self.limit: + self.stop() + self.node_sync.send_transactions_end(StreamEnd.LIMIT_EXCEEDED) + return + + self.schedule_if_needed() diff --git a/hathor/p2p/sync_version.py b/hathor/p2p/sync_version.py index 8c9ab7ee6..8db49918a 100644 --- a/hathor/p2p/sync_version.py +++ b/hathor/p2p/sync_version.py @@ -24,7 +24,7 @@ class SyncVersion(Enum): # on. V1 = 'v1' V1_1 = 'v1.1' - V2 = 'v2-fake' # uses sync-v1 to mock sync-v2 + V2 = 'v2' def __str__(self): return f'sync-{self.value}' diff --git a/hathor/transaction/storage/transaction_storage.py b/hathor/transaction/storage/transaction_storage.py index ea8534c6f..6dd195174 100644 --- a/hathor/transaction/storage/transaction_storage.py +++ b/hathor/transaction/storage/transaction_storage.py @@ -1043,6 +1043,14 @@ def iter_mempool_from_tx_tips(self) -> Iterator[Transaction]: assert isinstance(tx, Transaction) yield tx + def iter_mempool_tips_from_best_index(self) -> Iterator[Transaction]: + """Get tx tips in the mempool, using the best available index (mempool_tips or tx_tips)""" + assert self.indexes is not None + if self.indexes.mempool_tips is not None: + yield from self.indexes.mempool_tips.iter(self) + else: + yield from self.iter_mempool_tips_from_tx_tips() + def iter_mempool_from_best_index(self) -> Iterator[Transaction]: """Get all transactions in the mempool, using the best available index (mempool_tips or tx_tips)""" assert self.indexes is not None diff --git a/tests/event/test_event_simulation_scenarios.py b/tests/event/test_event_simulation_scenarios.py index c5f8398f7..fea2548ec 100644 --- a/tests/event/test_event_simulation_scenarios.py +++ b/tests/event/test_event_simulation_scenarios.py @@ -243,30 +243,30 @@ def test_reorg(self): UnorderedList([ # One VERTEX_METADATA_CHANGED for a new block (below), and one VERTEX_METADATA_CHANGED for each genesis tx (2), adding the new block as their child # noqa E501 # Also one VERTEX_METADATA_CHANGED for the previous block, voiding it - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=9, timestamp=1578878955.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=12, timestamp=1578878955.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=11, timestamp=1578878955.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', nonce=2246536493, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', spent_outputs=[], conflict_with=[], voided_by=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=10, timestamp=1578878955.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', nonce=1279525218, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', spent_outputs=[], conflict_with=[], voided_by=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=9, timestamp=1578878949.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=12, timestamp=1578878949.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=11, timestamp=1578878949.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', nonce=2246536493, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', spent_outputs=[], conflict_with=[], voided_by=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=10, timestamp=1578878949.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', nonce=1279525218, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', spent_outputs=[], conflict_with=[], voided_by=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 ]), [ # One NEW_VERTEX_ACCEPTED for a new block from manager2 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=13, timestamp=1578878955.75, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', nonce=1279525218, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', spent_outputs=[], conflict_with=[], voided_by=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=13, timestamp=1578878949.75, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', nonce=1279525218, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', spent_outputs=[], conflict_with=[], voided_by=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 # REORG_STARTED caused by a new block from manager2 (below) - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=14, timestamp=1578878956.0, type=EventType.REORG_STARTED, data=ReorgData(reorg_size=1, previous_best_block='3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', new_best_block='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', common_block='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792'), group_id=0), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=14, timestamp=1578878950.0, type=EventType.REORG_STARTED, data=ReorgData(reorg_size=1, previous_best_block='3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', new_best_block='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', common_block='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792'), group_id=0), latest_event_id=20), # noqa E501 ], UnorderedList([ # One VERTEX_METADATA_CHANGED for a new block (below), and one VERTEX_METADATA_CHANGED for each genesis tx (2), adding the new block as their child # noqa E501 # Also one VERTEX_METADATA_CHANGED for the previous block, un-voiding it as it's now part of the best blockchain # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=15, timestamp=1578878956.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', 'a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4'], twins=[], accumulated_weight=2.0, score=2.0, first_block='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', height=0, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=18, timestamp=1578878956.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', 'a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4'], twins=[], accumulated_weight=2.0, score=2.0, first_block='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', height=0, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=17, timestamp=1578878956.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', nonce=1279525218, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=16, timestamp=1578878956.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', nonce=4136633663, timestamp=1578878941, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUgQrqLefPfPVpkXlfvvAp943epyOIrA==', token_data=0)], parents=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=15, timestamp=1578878950.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', 'a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4'], twins=[], accumulated_weight=2.0, score=2.0, first_block='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', height=0, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=18, timestamp=1578878950.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', 'a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4'], twins=[], accumulated_weight=2.0, score=2.0, first_block='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', height=0, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=17, timestamp=1578878950.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', nonce=1279525218, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=16, timestamp=1578878950.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', nonce=4136633663, timestamp=1578878941, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUgQrqLefPfPVpkXlfvvAp943epyOIrA==', token_data=0)], parents=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 ]), [ # REORG_FINISHED - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=19, timestamp=1578878956.0, type=EventType.REORG_FINISHED, data=EmptyData(), group_id=0), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=19, timestamp=1578878950.0, type=EventType.REORG_FINISHED, data=EmptyData(), group_id=0), latest_event_id=20), # noqa E501 # One NEW_VERTEX_ACCEPTED for a new block from manager2 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=20, timestamp=1578878956.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', nonce=4136633663, timestamp=1578878941, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUgQrqLefPfPVpkXlfvvAp943epyOIrA==', token_data=0)], parents=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=20) # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=20, timestamp=1578878950.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', nonce=4136633663, timestamp=1578878941, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUgQrqLefPfPVpkXlfvvAp943epyOIrA==', token_data=0)], parents=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=20) # noqa E501 ] ] diff --git a/tests/p2p/test_capabilities.py b/tests/p2p/test_capabilities.py index d2e4f5737..cfc4719f0 100644 --- a/tests/p2p/test_capabilities.py +++ b/tests/p2p/test_capabilities.py @@ -1,4 +1,6 @@ from hathor.conf import HathorSettings +from hathor.p2p.sync_v1.agent import NodeSyncTimestamp +from hathor.p2p.sync_v2.manager import NodeBlockSync from hathor.simulator import FakeConnection from tests import unittest @@ -21,6 +23,8 @@ def test_capabilities(self): # Even if we don't have the capability we must connect because the whitelist url conf is None self.assertEqual(conn._proto1.state.state_name, 'READY') self.assertEqual(conn._proto2.state.state_name, 'READY') + self.assertIsInstance(conn._proto1.state.sync_manager, NodeSyncTimestamp) + self.assertIsInstance(conn._proto2.state.sync_manager, NodeSyncTimestamp) manager3 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST]) manager4 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST]) @@ -34,10 +38,46 @@ def test_capabilities(self): self.assertEqual(conn2._proto1.state.state_name, 'READY') self.assertEqual(conn2._proto2.state.state_name, 'READY') + self.assertIsInstance(conn2._proto1.state.sync_manager, NodeSyncTimestamp) + self.assertIsInstance(conn2._proto2.state.sync_manager, NodeSyncTimestamp) class SyncV2HathorCapabilitiesTestCase(unittest.SyncV2Params, unittest.TestCase): - __test__ = True + def test_capabilities(self): + network = 'testnet' + manager1 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST, + settings.CAPABILITY_SYNC_VERSION]) + manager2 = self.create_peer(network, capabilities=[settings.CAPABILITY_SYNC_VERSION]) + + conn = FakeConnection(manager1, manager2) + + # Run the p2p protocol. + for _ in range(100): + conn.run_one_step(debug=True) + self.clock.advance(0.1) + + # Even if we don't have the capability we must connect because the whitelist url conf is None + self.assertEqual(conn._proto1.state.state_name, 'READY') + self.assertEqual(conn._proto2.state.state_name, 'READY') + self.assertIsInstance(conn._proto1.state.sync_manager, NodeBlockSync) + self.assertIsInstance(conn._proto2.state.sync_manager, NodeBlockSync) + + manager3 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST, + settings.CAPABILITY_SYNC_VERSION]) + manager4 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST, + settings.CAPABILITY_SYNC_VERSION]) + + conn2 = FakeConnection(manager3, manager4) + + # Run the p2p protocol. + for _ in range(100): + conn2.run_one_step(debug=True) + self.clock.advance(0.1) + + self.assertEqual(conn2._proto1.state.state_name, 'READY') + self.assertEqual(conn2._proto2.state.state_name, 'READY') + self.assertIsInstance(conn2._proto1.state.sync_manager, NodeBlockSync) + self.assertIsInstance(conn2._proto2.state.sync_manager, NodeBlockSync) # sync-bridge should behave like sync-v2 diff --git a/tests/p2p/test_protocol.py b/tests/p2p/test_protocol.py index c661545b1..268005ad0 100644 --- a/tests/p2p/test_protocol.py +++ b/tests/p2p/test_protocol.py @@ -1,4 +1,5 @@ from json import JSONDecodeError +from typing import Optional from twisted.internet.defer import inlineCallbacks from twisted.python.failure import Failure @@ -25,6 +26,14 @@ def setUp(self): self.manager2 = self.create_peer(self.network, peer_id=self.peer_id2) self.conn = FakeConnection(self.manager1, self.manager2) + def assertAndStepConn(self, conn: FakeConnection, regex1: bytes, regex2: Optional[bytes] = None) -> None: + """If only one regex is given it is tested on both cons, if two are given they'll be used respectively.""" + if regex2 is None: + regex2 = regex1 + self.assertRegex(conn.peek_tr1_value(), regex1) + self.assertRegex(conn.peek_tr2_value(), regex2) + conn.run_one_step() + def assertIsConnected(self, conn=None): if conn is None: conn = self.conn @@ -157,20 +166,6 @@ def test_valid_hello(self): self.assertFalse(self.conn.tr1.disconnecting) self.assertFalse(self.conn.tr2.disconnecting) - @inlineCallbacks - def test_invalid_peer_id(self): - self.conn.run_one_step() # HELLO - self.conn.run_one_step() # PEER-ID - self.conn.run_one_step() # READY - self.conn.run_one_step() # GET-PEERS - self.conn.run_one_step() # GET-TIPS - self.conn.run_one_step() # PEERS - self.conn.run_one_step() # TIPS - invalid_payload = {'id': '123', 'entrypoints': ['tcp://localhost:1234']} - yield self._send_cmd(self.conn.proto1, 'PEER-ID', json_dumps(invalid_payload)) - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'ERROR') - self.assertTrue(self.conn.tr1.disconnecting) - def test_invalid_same_peer_id(self): manager3 = self.create_peer(self.network, peer_id=self.peer_id1) conn = FakeConnection(self.manager1, manager3) @@ -218,14 +213,13 @@ def test_invalid_same_peer_id2(self): self.conn.run_until_empty() conn.run_until_empty() self.run_to_completion() - # one of the peers will close the connection. We don't know which on, as it depends + # one of the peers will close the connection. We don't know which one, as it depends # on the peer ids - conn1_value = self.conn.peek_tr1_value() + self.conn.peek_tr2_value() - conn2_value = conn.peek_tr1_value() + conn.peek_tr2_value() - if b'ERROR' in conn1_value: + + if self.conn.tr1.disconnecting or self.conn.tr2.disconnecting: conn_dead = self.conn conn_alive = conn - elif b'ERROR' in conn2_value: + elif conn.tr1.disconnecting or conn.tr2.disconnecting: conn_dead = conn conn_alive = self.conn else: @@ -248,51 +242,6 @@ def test_invalid_different_network(self): self.assertTrue(conn.tr1.disconnecting) conn.run_one_step() # ERROR - def test_valid_hello_and_peer_id(self): - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'HELLO') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'HELLO') - self.conn.run_one_step() # HELLO - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'PEER-ID') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'PEER-ID') - self.conn.run_one_step() # PEER-ID - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'READY') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'READY') - self.conn.run_one_step() # READY - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'GET-PEERS') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'GET-PEERS') - self.conn.run_one_step() # GET-PEERS - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'GET-TIPS') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'GET-TIPS') - self.conn.run_one_step() # GET-TIPS - self.assertIsConnected() - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'PEERS') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'PEERS') - self.conn.run_one_step() # PEERS - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'TIPS') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'TIPS') - self.conn.run_one_step() # TIPS - self.assertIsConnected() - - def test_send_ping(self): - self.conn.run_one_step() # HELLO - self.conn.run_one_step() # PEER-ID - self.conn.run_one_step() # READY - self.conn.run_one_step() # GET-PEERS - self.conn.run_one_step() # GET-TIPS - self.conn.run_one_step() # PEERS - self.conn.run_one_step() # TIPS - self.assertIsConnected() - self.clock.advance(5) - self.assertEqual(b'PING\r\n', self.conn.peek_tr1_value()) - self.assertEqual(b'PING\r\n', self.conn.peek_tr2_value()) - self.conn.run_one_step() # PING - self.conn.run_one_step() # GET-TIPS - self.assertEqual(b'PONG\r\n', self.conn.peek_tr1_value()) - self.assertEqual(b'PONG\r\n', self.conn.peek_tr2_value()) - while b'PONG\r\n' in self.conn.peek_tr1_value(): - self.conn.run_one_step() - self.assertEqual(self.clock.seconds(), self.conn.proto1.last_message) - def test_send_invalid_unicode(self): # \xff is an invalid unicode. self.conn.proto1.dataReceived(b'\xff\r\n') @@ -330,6 +279,16 @@ def test_on_disconnect_after_peer_id(self): # Peer id 2 removed from peer_storage (known_peers) after disconnection and after looping call self.assertNotIn(self.peer_id2.id, self.manager1.connections.peer_storage) + def test_idle_connection(self): + self.clock.advance(settings.PEER_IDLE_TIMEOUT - 10) + self.assertIsConnected(self.conn) + self.clock.advance(15) + self.assertIsNotConnected(self.conn) + + +class SyncV1HathorProtocolTestCase(unittest.SyncV1Params, BaseHathorProtocolTestCase): + __test__ = True + def test_two_connections(self): self.conn.run_one_step() # HELLO self.conn.run_one_step() # PEER-ID @@ -347,12 +306,6 @@ def test_two_connections(self): self._check_result_only_cmd(self.conn.peek_tr1_value(), b'PEERS') self.conn.run_one_step() - def test_idle_connection(self): - self.clock.advance(settings.PEER_IDLE_TIMEOUT - 10) - self.assertIsConnected(self.conn) - self.clock.advance(15) - self.assertIsNotConnected(self.conn) - @inlineCallbacks def test_get_data(self): self.conn.run_one_step() # HELLO @@ -368,14 +321,170 @@ def test_get_data(self): self._check_result_only_cmd(self.conn.peek_tr1_value(), b'NOT-FOUND') self.conn.run_one_step() + def test_valid_hello_and_peer_id(self): + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'HELLO') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'HELLO') + self.conn.run_one_step() # HELLO + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'PEER-ID') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'PEER-ID') + self.conn.run_one_step() # PEER-ID + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'READY') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'READY') + self.conn.run_one_step() # READY + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'GET-PEERS') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'GET-PEERS') + self.conn.run_one_step() # GET-PEERS + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'GET-TIPS') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'GET-TIPS') + self.conn.run_one_step() # GET-TIPS + self.assertIsConnected() + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'PEERS') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'PEERS') + self.conn.run_one_step() # PEERS + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'TIPS') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'TIPS') + self.conn.run_one_step() # TIPS + self.assertIsConnected() -class SyncV1HathorProtocolTestCase(unittest.SyncV1Params, BaseHathorProtocolTestCase): - __test__ = True + def test_send_ping(self): + self.conn.run_one_step() # HELLO + self.conn.run_one_step() # PEER-ID + self.conn.run_one_step() # READY + self.conn.run_one_step() # GET-PEERS + self.conn.run_one_step() # GET-TIPS + self.conn.run_one_step() # PEERS + self.conn.run_one_step() # TIPS + self.assertIsConnected() + self.clock.advance(5) + self.assertEqual(b'PING\r\n', self.conn.peek_tr1_value()) + self.assertEqual(b'PING\r\n', self.conn.peek_tr2_value()) + self.conn.run_one_step() # PING + self.conn.run_one_step() # GET-TIPS + self.assertEqual(b'PONG\r\n', self.conn.peek_tr1_value()) + self.assertEqual(b'PONG\r\n', self.conn.peek_tr2_value()) + while b'PONG\r\n' in self.conn.peek_tr1_value(): + self.conn.run_one_step() + self.assertEqual(self.clock.seconds(), self.conn.proto1.last_message) + + @inlineCallbacks + def test_invalid_peer_id(self): + self.conn.run_one_step() # HELLO + self.conn.run_one_step() # PEER-ID + self.conn.run_one_step() # READY + self.conn.run_one_step() # GET-PEERS + self.conn.run_one_step() # GET-TIPS + self.conn.run_one_step() # PEERS + self.conn.run_one_step() # TIPS + invalid_payload = {'id': '123', 'entrypoints': ['tcp://localhost:1234']} + yield self._send_cmd(self.conn.proto1, 'PEER-ID', json_dumps(invalid_payload)) + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'ERROR') + self.assertTrue(self.conn.tr1.disconnecting) class SyncV2HathorProtocolTestCase(unittest.SyncV2Params, BaseHathorProtocolTestCase): __test__ = True + def test_two_connections(self): + self.assertAndStepConn(self.conn, b'^HELLO') + self.assertAndStepConn(self.conn, b'^PEER-ID') + self.assertAndStepConn(self.conn, b'^READY') + self.assertAndStepConn(self.conn, b'^GET-PEERS') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^PEERS') + self.assertAndStepConn(self.conn, b'^BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^RELAY') + self.assertIsConnected() + + # disable timeout because we will make several steps on a new conn and this might get left behind + self.conn.disable_idle_timeout() + + manager3 = self.create_peer(self.network, enable_sync_v2=True) + conn = FakeConnection(self.manager1, manager3) + self.assertAndStepConn(conn, b'^HELLO') + self.assertAndStepConn(conn, b'^PEER-ID') + self.assertAndStepConn(conn, b'^READY') + self.assertAndStepConn(conn, b'^GET-PEERS') + + self.clock.advance(5) + self.assertIsConnected() + self.assertAndStepConn(self.conn, b'^GET-TIPS') + self.assertAndStepConn(self.conn, b'^PING') + # peer1 should now send a PEERS with the new peer that just connected + self.assertAndStepConn(self.conn, b'^PEERS', b'^TIPS') + self.assertAndStepConn(self.conn, b'^TIPS', b'^TIPS') + self.assertAndStepConn(self.conn, b'^TIPS', b'^TIPS-END') + self.assertAndStepConn(self.conn, b'^TIPS-END', b'^PONG') + self.assertAndStepConn(self.conn, b'^PONG', b'^$') + self.assertIsConnected() + + @inlineCallbacks + def test_get_data(self): + self.assertAndStepConn(self.conn, b'^HELLO') + self.assertAndStepConn(self.conn, b'^PEER-ID') + self.assertAndStepConn(self.conn, b'^READY') + self.assertAndStepConn(self.conn, b'^GET-PEERS') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^PEERS') + self.assertAndStepConn(self.conn, b'^BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^RELAY') + self.assertIsConnected() + missing_tx = '00000000228dfcd5dec1c9c6263f6430a5b4316bb9e3decb9441a6414bfd8697' + payload = {'child': missing_tx, 'last_block': settings.GENESIS_BLOCK_HASH.hex()} + yield self._send_cmd(self.conn.proto1, 'GET-BLOCK-TXS', json_dumps(payload)) + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'NOT-FOUND') + self.conn.run_one_step() + + def test_valid_hello_and_peer_id(self): + self.assertAndStepConn(self.conn, b'^HELLO') + self.assertAndStepConn(self.conn, b'^PEER-ID') + self.assertAndStepConn(self.conn, b'^READY') + self.assertAndStepConn(self.conn, b'^GET-PEERS') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^PEERS') + self.assertAndStepConn(self.conn, b'^BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^RELAY') + + # this will tick the ping-pong mechanism and looping calls + self.clock.advance(5) + self.assertIsConnected() + self.assertAndStepConn(self.conn, b'^GET-TIPS') + self.assertAndStepConn(self.conn, b'^PING') + self.assertAndStepConn(self.conn, b'^TIPS') + self.assertAndStepConn(self.conn, b'^TIPS') + self.assertAndStepConn(self.conn, b'^TIPS-END') + self.assertAndStepConn(self.conn, b'^PONG') + self.assertIsConnected() + + self.clock.advance(5) + self.assertAndStepConn(self.conn, b'^PING') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^PONG') + self.assertAndStepConn(self.conn, b'^BEST-BLOCK') + self.assertIsConnected() + + def test_send_ping(self): + self.assertAndStepConn(self.conn, b'^HELLO') + self.assertAndStepConn(self.conn, b'^PEER-ID') + self.assertAndStepConn(self.conn, b'^READY') + self.assertAndStepConn(self.conn, b'^GET-PEERS') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^PEERS') + self.assertAndStepConn(self.conn, b'^BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^RELAY') + + # this will tick the ping-pong mechanism and looping calls + self.clock.advance(5) + self.assertAndStepConn(self.conn, b'^GET-TIPS') + self.assertAndStepConn(self.conn, b'^PING') + self.assertAndStepConn(self.conn, b'^TIPS') + self.assertAndStepConn(self.conn, b'^TIPS') + self.assertAndStepConn(self.conn, b'^TIPS-END') + self.assertEqual(b'PONG\r\n', self.conn.peek_tr1_value()) + self.assertEqual(b'PONG\r\n', self.conn.peek_tr2_value()) + while b'PONG\r\n' in self.conn.peek_tr1_value(): + self.conn.run_one_step() + self.assertEqual(self.clock.seconds(), self.conn.proto1.last_message) + # sync-bridge should behave like sync-v2 class SyncBridgeHathorProtocolTestCase(unittest.SyncBridgeParams, SyncV2HathorProtocolTestCase): diff --git a/tests/p2p/test_split_brain.py b/tests/p2p/test_split_brain.py index ca07ca7a1..02214a56b 100644 --- a/tests/p2p/test_split_brain.py +++ b/tests/p2p/test_split_brain.py @@ -9,7 +9,7 @@ from tests.utils import add_blocks_unlock_reward, add_new_block, add_new_double_spending, add_new_transactions -class BaseHathorSplitBrainTestCase(unittest.TestCase): +class BaseHathorSyncMethodsTestCase(unittest.TestCase): __test__ = False def setUp(self): @@ -37,7 +37,7 @@ def create_peer(self, network, unlock_wallet=True): return manager @pytest.mark.slow - def test_split_brain(self): + def test_split_brain_plain(self): debug_pdf = False manager1 = self.create_peer(self.network, unlock_wallet=True) @@ -72,16 +72,60 @@ def test_split_brain(self): conn = FakeConnection(manager1, manager2) - conn.run_one_step() # HELLO - conn.run_one_step() # PEER-ID - conn.run_one_step() # READY - conn.run_one_step() # GET-PEERS - conn.run_one_step() # GET-TIPS - conn.run_one_step() # PEERS - conn.run_one_step() # TIPS + # upper limit to how many steps it definitely should be enough + for i in range(3000): + if not conn.can_step(): + break + conn.run_one_step() + self.clock.advance(0.2) + else: + # error if we fall off the loop without breaking + self.fail('took more steps than expected') + self.log.debug('steps', count=i) + for i in range(500): + conn.run_one_step() + self.clock.advance(0.2) + + if debug_pdf: + dot1 = GraphvizVisualizer(manager1.tx_storage, include_verifications=True).dot() + dot1.render('dot1-post') + dot2 = GraphvizVisualizer(manager2.tx_storage, include_verifications=True).dot() + dot2.render('dot2-post') + + node_sync = conn.proto1.state.sync_manager + self.assertSyncedProgress(node_sync) + self.assertTipsEqual(manager1, manager2) + self.assertConsensusEqual(manager1, manager2) + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + @pytest.mark.slow + def test_split_brain_only_blocks_different_height(self): + manager1 = self.create_peer(self.network, unlock_wallet=True) + manager1.avg_time_between_blocks = 3 + + manager2 = self.create_peer(self.network, unlock_wallet=True) + manager2.avg_time_between_blocks = 3 + + for _ in range(10): + add_new_block(manager1, advance_clock=1) + add_blocks_unlock_reward(manager1) + add_new_block(manager2, advance_clock=1) + add_blocks_unlock_reward(manager2) + self.clock.advance(10) + + # Add one more block to manager1, so it's the winner chain + add_new_block(manager1, advance_clock=1) + + block_tip1 = manager1.tx_storage.indexes.height.get_tip() + + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + conn = FakeConnection(manager1, manager2) empty_counter = 0 - for i in range(2000): + for i in range(1000): if conn.is_empty(): empty_counter += 1 if empty_counter > 10: @@ -90,30 +134,273 @@ def test_split_brain(self): empty_counter = 0 conn.run_one_step() - self.clock.advance(0.2) + self.clock.advance(1) - if debug_pdf: - dot1 = GraphvizVisualizer(manager1.tx_storage, include_verifications=True).dot() - dot1.render('dot1-post') - dot2 = GraphvizVisualizer(manager2.tx_storage, include_verifications=True).dot() - dot2.render('dot2-post') + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + self.assertConsensusEqual(manager1, manager2) - node_sync = conn.proto1.state.sync_manager - self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) - self.assertTipsEqual(manager1, manager2) + self.assertEqual(block_tip1, manager1.tx_storage.indexes.height.get_tip()) + self.assertEqual(block_tip1, manager2.tx_storage.indexes.height.get_tip()) + + # XXX We must decide what to do when different chains have the same score + # For now we are voiding everyone until the first common block + def test_split_brain_only_blocks_same_height(self): + manager1 = self.create_peer(self.network, unlock_wallet=True) + manager1.avg_time_between_blocks = 3 + + manager2 = self.create_peer(self.network, unlock_wallet=True) + manager2.avg_time_between_blocks = 3 + + for _ in range(10): + add_new_block(manager1, advance_clock=1) + unlock_reward_blocks1 = add_blocks_unlock_reward(manager1) + add_new_block(manager2, advance_clock=1) + unlock_reward_blocks2 = add_blocks_unlock_reward(manager2) + self.clock.advance(10) + + block_tips1 = unlock_reward_blocks1[-1].hash + block_tips2 = unlock_reward_blocks2[-1].hash + + self.assertEqual(len(manager1.tx_storage.get_best_block_tips()), 1) + self.assertCountEqual(manager1.tx_storage.get_best_block_tips(), {block_tips1}) + self.assertEqual(len(manager2.tx_storage.get_best_block_tips()), 1) + self.assertCountEqual(manager2.tx_storage.get_best_block_tips(), {block_tips2}) + + # Save winners for manager1 and manager2 + winners1 = set() + for tx1 in manager1.tx_storage.get_all_transactions(): + tx1_meta = tx1.get_metadata() + if not tx1_meta.voided_by: + winners1.add(tx1.hash) + + winners2 = set() + for tx2 in manager2.tx_storage.get_all_transactions(): + tx2_meta = tx2.get_metadata() + if not tx2_meta.voided_by: + winners2.add(tx2.hash) + + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + conn = FakeConnection(manager1, manager2) + + empty_counter = 0 + for i in range(1000): + if conn.is_empty(): + empty_counter += 1 + if empty_counter > 10: + break + else: + empty_counter = 0 + + conn.run_one_step() + self.clock.advance(1) + + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + # self.assertEqual(len(manager1.tx_storage.get_best_block_tips()), 2) + # self.assertCountEqual(manager1.tx_storage.get_best_block_tips(), {block_tips1, block_tips2}) + # self.assertEqual(len(manager2.tx_storage.get_best_block_tips()), 2) + # self.assertCountEqual(manager2.tx_storage.get_best_block_tips(), {block_tips1, block_tips2}) + + winners1_after = set() + for tx1 in manager1.tx_storage.get_all_transactions(): + tx1_meta = tx1.get_metadata() + if not tx1_meta.voided_by: + winners1_after.add(tx1.hash) + + winners2_after = set() + for tx2 in manager2.tx_storage.get_all_transactions(): + tx2_meta = tx2.get_metadata() + if not tx2_meta.voided_by: + winners2_after.add(tx2.hash) + + # Both chains have the same height and score + # so they will void all blocks and keep only the genesis (the common block and txs) + self.assertEqual(len(winners1_after), 3) + self.assertEqual(len(winners2_after), 3) + + new_block = add_new_block(manager1, advance_clock=1) + self.clock.advance(20) + + empty_counter = 0 + for i in range(500): + if conn.is_empty(): + empty_counter += 1 + if empty_counter > 10: + break + else: + empty_counter = 0 + + conn.run_one_step() + self.clock.advance(1) + + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + winners1_after = set() + for tx1 in manager1.tx_storage.get_all_transactions(): + tx1_meta = tx1.get_metadata() + if not tx1_meta.voided_by: + winners1_after.add(tx1.hash) + + winners2_after = set() + for tx2 in manager2.tx_storage.get_all_transactions(): + tx2_meta = tx2.get_metadata() + if not tx2_meta.voided_by: + winners2_after.add(tx2.hash) + + winners1.add(new_block.hash) + winners2.add(new_block.hash) + + if new_block.get_block_parent().hash == block_tips1: + winners = winners1 + else: + winners = winners2 + + self.assertCountEqual(winners, winners1_after) + self.assertCountEqual(winners, winners2_after) + + self.assertEqual(len(manager1.tx_storage.get_best_block_tips()), 1) + self.assertCountEqual(manager1.tx_storage.get_best_block_tips(), {new_block.hash}) + self.assertEqual(len(manager2.tx_storage.get_best_block_tips()), 1) + self.assertCountEqual(manager2.tx_storage.get_best_block_tips(), {new_block.hash}) + + def test_split_brain_only_blocks_bigger_score(self): + manager1 = self.create_peer(self.network, unlock_wallet=True) + manager1.avg_time_between_blocks = 3 + + manager2 = self.create_peer(self.network, unlock_wallet=True) + manager2.avg_time_between_blocks = 3 + + # Start with 1 because of the genesis block + manager2_blocks = 1 + for _ in range(10): + add_new_block(manager1, advance_clock=1) + add_blocks_unlock_reward(manager1) + add_new_block(manager2, advance_clock=1) + manager2_blocks += 1 + blocks2 = add_blocks_unlock_reward(manager2) + manager2_blocks += len(blocks2) + self.clock.advance(10) + + # Add two more blocks to manager1, so it's the winner chain + add_new_block(manager1, advance_clock=1) + add_new_block(manager1, advance_clock=1) + + # Propagates a block with bigger weight, so the score of the manager2 chain + # will be bigger than the other one + b = add_new_block(manager2, advance_clock=1, propagate=False) + b.weight = 5 + b.resolve() + manager2.propagate_tx(b) + manager2_blocks += 1 + + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + conn = FakeConnection(manager1, manager2) + + empty_counter = 0 + for i in range(1000): + if conn.is_empty(): + empty_counter += 1 + if empty_counter > 10: + break + else: + empty_counter = 0 + + conn.run_one_step() + self.clock.advance(1) + + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) self.assertConsensusEqual(manager1, manager2) + + winners2_blocks = 0 + for tx2 in manager2.tx_storage.get_all_transactions(): + tx2_meta = tx2.get_metadata() + if tx2.is_block and not tx2_meta.voided_by: + winners2_blocks += 1 + + # Assert that the consensus had the manager2 chain + self.assertEqual(winners2_blocks, manager2_blocks) + + def test_split_brain_no_double_spending(self): + manager1 = self.create_peer(self.network, unlock_wallet=True) + manager1.avg_time_between_blocks = 3 + manager1.connections.disable_rate_limiter() + + manager2 = self.create_peer(self.network, unlock_wallet=True) + manager2.avg_time_between_blocks = 3 + manager2.connections.disable_rate_limiter() + + winner_blocks = 1 + winner_txs = 2 + + for _ in range(10): + add_new_block(manager1, advance_clock=1) + add_blocks_unlock_reward(manager1) + add_new_block(manager2, advance_clock=1) + winner_blocks += 1 + blocks = add_blocks_unlock_reward(manager2) + winner_blocks += len(blocks) + self.clock.advance(10) + for _ in range(self.rng.randint(3, 10)): + add_new_transactions(manager1, self.rng.randint(2, 4), advance_clock=1) + txs = add_new_transactions(manager2, self.rng.randint(3, 7), advance_clock=1) + winner_txs += len(txs) + self.clock.advance(10) + + self.clock.advance(20) + + # Manager2 will be the winner because it has the biggest chain + add_new_block(manager2, advance_clock=1) + winner_blocks += 1 + self.clock.advance(20) + self.assertConsensusValid(manager1) self.assertConsensusValid(manager2) + conn = FakeConnection(manager1, manager2) + # Disable idle timeout. + conn.disable_idle_timeout() + + self.log.info('starting sync now...') + + # upper limit to how many steps it definitely should be enough + for i in range(3000): + if not conn.can_step(): + break + conn.run_one_step() + self.clock.advance(1) + conn.run_until_empty() + + self.log.debug('steps taken', steps=i + 1) + + self.assertConsensusEqual(manager1, manager2) + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + winners2 = set() + for tx in manager2.tx_storage.get_all_transactions(): + tx_meta = tx.get_metadata() + if not tx_meta.voided_by: + winners2.add(tx.hash) + + self.assertEqual(len(winners2), winner_blocks + winner_txs) + -class SyncV1HathorSplitBrainTestCase(unittest.SyncV1Params, BaseHathorSplitBrainTestCase): +class SyncV1HathorSyncMethodsTestCase(unittest.SyncV1Params, BaseHathorSyncMethodsTestCase): __test__ = True -class SyncV2HathorSplitBrainTestCase(unittest.SyncV2Params, BaseHathorSplitBrainTestCase): +class SyncV2HathorSyncMethodsTestCase(unittest.SyncV2Params, BaseHathorSyncMethodsTestCase): __test__ = True # sync-bridge should behave like sync-v2 -class SyncBridgeHathorSplitBrainTestCase(unittest.SyncBridgeParams, SyncV2HathorSplitBrainTestCase): +class SyncBridgeHathorSyncMethodsTestCase(unittest.SyncBridgeParams, SyncV2HathorSyncMethodsTestCase): pass diff --git a/tests/p2p/test_split_brain2.py b/tests/p2p/test_split_brain2.py index 9fad42242..59d24e6cb 100644 --- a/tests/p2p/test_split_brain2.py +++ b/tests/p2p/test_split_brain2.py @@ -67,10 +67,8 @@ def test_split_brain(self): dot2 = GraphvizVisualizer(manager2.tx_storage, include_verifications=True).dot() dot2.render('dot2-post') - node_sync = conn12.proto1.state.sync_manager - self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) - node_sync = conn12.proto2.state.sync_manager - self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) + self.assertSyncedProgress(conn12.proto1.state.sync_manager) + self.assertSyncedProgress(conn12.proto2.state.sync_manager) self.assertTipsEqual(manager1, manager2) self.assertConsensusEqual(manager1, manager2) self.assertConsensusValid(manager1) diff --git a/tests/p2p/test_sync.py b/tests/p2p/test_sync.py index 7d77e451f..2c4709fb2 100644 --- a/tests/p2p/test_sync.py +++ b/tests/p2p/test_sync.py @@ -1,5 +1,6 @@ from twisted.python.failure import Failure +from hathor.checkpoint import Checkpoint as cp from hathor.conf import HathorSettings from hathor.crypto.util import decode_address from hathor.p2p.protocol import PeerIdState @@ -7,6 +8,7 @@ from hathor.simulator import FakeConnection from hathor.transaction.storage.exceptions import TransactionIsNotABlock from tests import unittest +from tests.utils import add_blocks_unlock_reward settings = HathorSettings() @@ -74,7 +76,6 @@ def test_get_blocks_before(self): genesis_block = self.genesis_blocks[0] result = self.manager1.tx_storage.get_blocks_before(genesis_block.hash) self.assertEqual(0, len(result)) - genesis_tx = [tx for tx in self.genesis if not tx.is_block][0] with self.assertRaises(TransactionIsNotABlock): self.manager1.tx_storage.get_blocks_before(genesis_tx.hash) @@ -215,8 +216,9 @@ def test_tx_propagation_nat_peers(self): self._add_new_transactions(1) - for _ in range(1000): - if self.conn1.is_empty() and self.conn2.is_empty(): + for i in range(1000): + # XXX: give it at least 100 steps before checking for emptyness + if i > 100 and self.conn1.is_empty() and self.conn2.is_empty(): break self.conn1.run_one_step() self.conn2.run_one_step() @@ -475,6 +477,273 @@ def test_downloader_disconnect(self): class SyncV2HathorSyncMethodsTestCase(unittest.SyncV2Params, BaseHathorSyncMethodsTestCase): __test__ = True + def test_sync_metadata(self): + # test if the synced peer will build all tx metadata correctly + + height = 0 + # add a mix of blocks and transactions + height += len(self._add_new_blocks(8)) + height += len(add_blocks_unlock_reward(self.manager1)) + self._add_new_transactions(2) + height += len(self._add_new_blocks(1)) + self._add_new_transactions(4) + height += len(self._add_new_blocks(2)) + self._add_new_transactions(2) + + manager2 = self.create_peer(self.network) + self.assertEqual(manager2.state, manager2.NodeState.READY) + conn = FakeConnection(self.manager1, manager2) + + for _ in range(100): + if conn.is_empty(): + break + conn.run_one_step(debug=True) + self.clock.advance(1) + + # check they have the same consensus + node_sync1 = conn.proto1.state.sync_manager + node_sync2 = conn.proto2.state.sync_manager + self.assertEqual(node_sync1.peer_height, height) + self.assertEqual(node_sync1.synced_height, height) + self.assertEqual(node_sync2.peer_height, height) + # 3 genesis + blocks + 8 txs + self.assertEqual(self.manager1.tx_storage.get_vertices_count(), height + 11) + self.assertEqual(manager2.tx_storage.get_vertices_count(), height + 11) + self.assertConsensusValid(self.manager1) + self.assertConsensusValid(manager2) + self.assertConsensusEqual(self.manager1, manager2) + + # Nodes are synced. Make sure manager2 has the correct metadata. + for tx in self.manager1.tx_storage.topological_iterator(): + meta1 = tx.get_metadata() + meta2 = manager2.tx_storage.get_metadata(tx.hash) + self.assertCountEqual(meta1.children or [], meta2.children or []) + self.assertCountEqual(meta1.voided_by or [], meta2.voided_by or []) + self.assertCountEqual(meta1.conflict_with or [], meta2.conflict_with or []) + self.assertCountEqual(meta1.twins or [], meta2.twins or []) + + def test_tx_propagation_nat_peers(self): + super().test_tx_propagation_nat_peers() + + node_sync1 = self.conn1.proto1.state.sync_manager + self.assertEqual(self.manager1.tx_storage.latest_timestamp, self.manager2.tx_storage.latest_timestamp) + self.assertEqual(node_sync1.peer_height, node_sync1.synced_height) + self.assertEqual(node_sync1.peer_height, self.manager1.tx_storage.get_height_best_block()) + self.assertConsensusEqual(self.manager1, self.manager2) + + node_sync2 = self.conn2.proto1.state.sync_manager + self.assertEqual(self.manager2.tx_storage.latest_timestamp, self.manager3.tx_storage.latest_timestamp) + self.assertEqual(node_sync2.peer_height, node_sync2.synced_height) + self.assertEqual(node_sync2.peer_height, self.manager2.tx_storage.get_height_best_block()) + self.assertConsensusEqual(self.manager2, self.manager3) + + def test_block_sync_new_blocks_and_txs(self): + self._add_new_blocks(25) + self._add_new_transactions(3) + self._add_new_blocks(4) + self._add_new_transactions(5) + + manager2 = self.create_peer(self.network) + self.assertEqual(manager2.state, manager2.NodeState.READY) + + conn = FakeConnection(self.manager1, manager2) + + for _ in range(1000): + conn.run_one_step() + self.clock.advance(0.1) + + # dot1 = self.manager1.tx_storage.graphviz(format='pdf') + # dot1.render('dot1') + + # dot2 = manager2.tx_storage.graphviz(format='pdf') + # dot2.render('dot2') + + node_sync = conn.proto1.state.sync_manager + self.assertEqual(self.manager1.tx_storage.latest_timestamp, manager2.tx_storage.latest_timestamp) + self.assertEqual(node_sync.peer_height, node_sync.synced_height) + self.assertEqual(node_sync.peer_height, self.manager1.tx_storage.get_height_best_block()) + self.assertConsensusEqual(self.manager1, manager2) + self.assertConsensusValid(self.manager1) + self.assertConsensusValid(manager2) + + def test_block_sync_many_new_blocks(self): + self._add_new_blocks(150) + + manager2 = self.create_peer(self.network) + self.assertEqual(manager2.state, manager2.NodeState.READY) + + conn = FakeConnection(self.manager1, manager2) + + for _ in range(1000): + if conn.is_empty(): + break + conn.run_one_step(debug=True) + self.clock.advance(1) + + node_sync = conn.proto1.state.sync_manager + self.assertEqual(node_sync.peer_height, node_sync.synced_height) + self.assertEqual(node_sync.peer_height, self.manager1.tx_storage.get_height_best_block()) + self.assertConsensusEqual(self.manager1, manager2) + self.assertConsensusValid(self.manager1) + self.assertConsensusValid(manager2) + + def test_block_sync_new_blocks(self): + self._add_new_blocks(15) + + manager2 = self.create_peer(self.network) + self.assertEqual(manager2.state, manager2.NodeState.READY) + + conn = FakeConnection(self.manager1, manager2) + + for _ in range(1000): + if conn.is_empty(): + break + conn.run_one_step(debug=True) + self.clock.advance(1) + + node_sync = conn.proto1.state.sync_manager + self.assertEqual(node_sync.peer_height, node_sync.synced_height) + self.assertEqual(node_sync.peer_height, self.manager1.tx_storage.get_height_best_block()) + self.assertConsensusEqual(self.manager1, manager2) + self.assertConsensusValid(self.manager1) + self.assertConsensusValid(manager2) + + def test_full_sync(self): + # 10 blocks + blocks = self._add_new_blocks(10) + # N blocks to unlock the reward + unlock_reward_blocks = add_blocks_unlock_reward(self.manager1) + len_reward_unlock = len(unlock_reward_blocks) + # 3 transactions still before the last checkpoint + self._add_new_transactions(3) + # 5 more blocks and the last one is the last checkpoint + new_blocks = self._add_new_blocks(5) + + LAST_CHECKPOINT = len(blocks) + len_reward_unlock + len(new_blocks) + FIRST_CHECKPOINT = LAST_CHECKPOINT // 2 + cps = [ + cp(0, self.genesis_blocks[0].hash), + cp(FIRST_CHECKPOINT, (blocks + unlock_reward_blocks + new_blocks)[FIRST_CHECKPOINT - 1].hash), + cp(LAST_CHECKPOINT, (blocks + unlock_reward_blocks + new_blocks)[LAST_CHECKPOINT - 1].hash) + ] + + # 5 blocks after the last checkpoint + self._add_new_blocks(5) + # 3 transactions + self._add_new_transactions(3) + # 5 more blocks + self._add_new_blocks(5) + + # Add transactions to the mempool + self._add_new_transactions(2) + + self.manager1.checkpoints = cps + + manager2 = self.create_peer(self.network) + manager2.checkpoints = cps + self.assertEqual(manager2.state, manager2.NodeState.READY) + + total_count = 36 + len_reward_unlock + + self.assertEqual(self.manager1.tx_storage.get_vertices_count(), total_count) + self.assertEqual(manager2.tx_storage.get_vertices_count(), 3) + + conn = FakeConnection(self.manager1, manager2) + for i in range(300): + conn.run_one_step(debug=True) + self.clock.advance(0.1) + conn.run_until_empty(1000) + + # node_sync = conn.proto1.state.sync_manager + # self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) + # self.assertTipsEqual(self.manager1, manager2) + common_height = 25 + len_reward_unlock + + self.assertEqual(self.manager1.tx_storage.get_height_best_block(), common_height) + self.assertEqual(manager2.tx_storage.get_height_best_block(), common_height) + + node_sync1 = conn.proto1.state.sync_manager + node_sync2 = conn.proto2.state.sync_manager + self.assertEqual(node_sync1.peer_height, common_height) + self.assertEqual(node_sync1.synced_height, common_height) + self.assertEqual(node_sync2.peer_height, common_height) + self.assertConsensusValid(self.manager1) + self.assertConsensusValid(manager2) + self.assertConsensusEqual(self.manager1, manager2) + + # 3 genesis + # 25 blocks + # Unlock reward blocks + # 8 txs + self.assertEqual(self.manager1.tx_storage.get_vertices_count(), total_count) + self.assertEqual(manager2.tx_storage.get_vertices_count(), total_count) + self.assertEqual(len(manager2.tx_storage.indexes.mempool_tips.get()), 1) + self.assertEqual(len(self.manager1.tx_storage.indexes.mempool_tips.get()), 1) + + def test_block_sync_checkpoints(self): + TOTAL_BLOCKS = 30 + LAST_CHECKPOINT = 15 + FIRST_CHECKPOINT = LAST_CHECKPOINT // 2 + blocks = self._add_new_blocks(TOTAL_BLOCKS, propagate=False) + cps = [ + cp(0, self.genesis_blocks[0].hash), + cp(FIRST_CHECKPOINT, blocks[FIRST_CHECKPOINT - 1].hash), + cp(LAST_CHECKPOINT, blocks[LAST_CHECKPOINT - 1].hash) + ] + self.manager1.checkpoints = cps + + manager2 = self.create_peer(self.network) + manager2.checkpoints = cps + self.assertEqual(manager2.state, manager2.NodeState.READY) + + conn = FakeConnection(self.manager1, manager2) + + # initial connection setup + for _ in range(100): + conn.run_one_step(debug=False) + self.clock.advance(0.1) + + # find synced timestamp + self.clock.advance(5) + for _ in range(600): + conn.run_one_step(debug=False) + self.clock.advance(0.1) + + self.assertEqual(self.manager1.tx_storage.get_best_block().get_metadata().height, TOTAL_BLOCKS) + self.assertEqual(manager2.tx_storage.get_best_block().get_metadata().height, TOTAL_BLOCKS) + + node_sync1 = conn.proto1.state.sync_manager + node_sync2 = conn.proto2.state.sync_manager + + self.assertEqual(node_sync1.peer_height, TOTAL_BLOCKS) + self.assertEqual(node_sync1.synced_height, TOTAL_BLOCKS) + self.assertEqual(node_sync2.peer_height, len(blocks)) + self.assertConsensusValid(self.manager1) + self.assertConsensusValid(manager2) + + def test_block_sync_only_genesis(self): + manager2 = self.create_peer(self.network) + self.assertEqual(manager2.state, manager2.NodeState.READY) + + conn = FakeConnection(self.manager1, manager2) + + genesis_tx = [tx for tx in self.genesis if not tx.is_block][0] + with self.assertRaises(TransactionIsNotABlock): + self.manager1.tx_storage.get_blocks_before(genesis_tx.hash) + + for _ in range(100): + if conn.is_empty(): + break + conn.run_one_step(debug=True) + self.clock.advance(1) + + node_sync = conn.proto1.state.sync_manager + self.assertEqual(node_sync.synced_height, 0) + self.assertEqual(node_sync.peer_height, 0) + + self.assertEqual(self.manager1.tx_storage.get_vertices_count(), 3) + self.assertEqual(manager2.tx_storage.get_vertices_count(), 3) + # TODO: an equivalent test to test_downloader, could be something like test_checkpoint_sync diff --git a/tests/p2p/test_sync_bridge.py b/tests/p2p/test_sync_bridge.py new file mode 100644 index 000000000..cdf000627 --- /dev/null +++ b/tests/p2p/test_sync_bridge.py @@ -0,0 +1,82 @@ +from hathor.simulator import FakeConnection +from tests.simulation.base import SimulatorTestCase + + +class MixedSyncRandomSimulatorTestCase(SimulatorTestCase): + __test__ = True + + def test_the_three_transacting_miners(self): + manager1 = self.create_peer(enable_sync_v1=True, enable_sync_v2=False) + manager2 = self.create_peer(enable_sync_v1=True, enable_sync_v2=True) + manager3 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) + + managers = [manager1, manager2, manager3] + all_managers = managers + miners = [] + tx_gens = [] + + for manager in managers: + miner = self.simulator.create_miner(manager, hashpower=100e6) + miner.start() + miners.append(miner) + tx_gen = self.simulator.create_tx_generator(manager, rate=2 / 60., hashpower=1e6, ignore_no_funds=True) + tx_gen.start() + tx_gens.append(tx_gen) + + self.simulator.run(2000) + + self.simulator.add_connection(FakeConnection(manager1, manager2, latency=0.300)) + self.simulator.add_connection(FakeConnection(manager1, manager3, latency=0.300)) + self.simulator.add_connection(FakeConnection(manager2, manager3, latency=0.300)) + + for tx_gen in tx_gens: + tx_gen.stop() + for miner in miners: + miner.stop() + + self.simulator.run_until_complete(2000, 600) + + for idx, node in enumerate(all_managers): + self.log.debug(f'checking node {idx}') + self.assertConsensusValid(manager) + + for manager_a, manager_b in zip(all_managers[:-1], all_managers[1:]): + # sync-v2 consensus test is more lenient (if sync-v1 assert passes sync-v2 assert will pass too) + self.assertConsensusEqualSyncV2(manager_a, manager_b, strict_sync_v2_indexes=False) + + def test_bridge_with_late_v2(self): + manager1 = self.create_peer(enable_sync_v1=True, enable_sync_v2=False) + manager2 = self.create_peer(enable_sync_v1=True, enable_sync_v2=True) + manager3 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) + + managers = [manager1, manager2] + all_managers = [manager1, manager2, manager3] + miners = [] + tx_gens = [] + + for manager in managers: + miner = self.simulator.create_miner(manager, hashpower=100e6) + miner.start() + miners.append(miner) + tx_gen = self.simulator.create_tx_generator(manager, rate=2 / 60., hashpower=1e6, ignore_no_funds=True) + tx_gen.start() + tx_gens.append(tx_gen) + + self.simulator.add_connection(FakeConnection(manager1, manager2, latency=0.300)) + self.simulator.run(2000) + + for tx_gen in tx_gens: + tx_gen.stop() + for miner in miners: + miner.stop() + + self.simulator.add_connection(FakeConnection(manager2, manager3, latency=0.300)) + self.simulator.run_until_complete(2000, 600) + + for idx, node in enumerate(all_managers): + self.log.debug(f'checking node {idx}') + self.assertConsensusValid(manager) + + for manager_a, manager_b in zip(all_managers[:-1], all_managers[1:]): + # sync-v2 consensus test is more lenient (if sync-v1 assert passes sync-v2 assert will pass too) + self.assertConsensusEqualSyncV2(manager_a, manager_b, strict_sync_v2_indexes=False) diff --git a/tests/p2p/test_sync_v2.py b/tests/p2p/test_sync_v2.py new file mode 100644 index 000000000..234ca2306 --- /dev/null +++ b/tests/p2p/test_sync_v2.py @@ -0,0 +1,243 @@ +import pytest +from twisted.python.failure import Failure + +from hathor.conf import HathorSettings +from hathor.p2p.peer_id import PeerId +from hathor.simulator import FakeConnection +from hathor.simulator.trigger import StopAfterNMinedBlocks, StopAfterNTransactions, StopWhenTrue, Trigger +from hathor.transaction.storage.traversal import DFSWalk +from tests.simulation.base import SimulatorTestCase +from tests.utils import HAS_ROCKSDB + +settings = HathorSettings() + + +class BaseRandomSimulatorTestCase(SimulatorTestCase): + __test__ = True + + def _get_partial_blocks(self, tx_storage): + with tx_storage.allow_partially_validated_context(): + partial_blocks = set() + for tx in tx_storage.get_all_transactions(): + if not tx.is_block: + continue + meta = tx.get_metadata() + if meta.validation.is_partial(): + partial_blocks.add(tx.hash) + return partial_blocks + + def _run_restart_test(self, *, full_verification: bool, use_tx_storage_cache: bool) -> None: + manager1 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) + manager1.allow_mining_without_peers() + + miner1 = self.simulator.create_miner(manager1, hashpower=10e6) + miner1.start() + trigger: Trigger = StopAfterNMinedBlocks(miner1, quantity=50) + self.assertTrue(self.simulator.run(3 * 3600, trigger=trigger)) + + gen_tx1 = self.simulator.create_tx_generator(manager1, rate=2., hashpower=1e6, ignore_no_funds=True) + gen_tx1.start() + trigger = StopAfterNTransactions(gen_tx1, quantity=500) + self.assertTrue(self.simulator.run(3600, trigger=trigger)) + + # Stop mining and run again to increase the mempool. + miner1.stop() + self.simulator.run(600) + + # Finally, stop all generators. + gen_tx1.stop() + + # Create a new peer and run sync for a while (but stop before getting synced). + path = self.mkdtemp() + peer_id = PeerId() + builder2 = self.simulator.get_default_builder() \ + .set_peer_id(peer_id) \ + .disable_sync_v1() \ + .enable_sync_v2() \ + .use_rocksdb(path) + + manager2 = self.simulator.create_peer(builder2) + conn12 = FakeConnection(manager1, manager2, latency=0.05) + self.simulator.add_connection(conn12) + + # Run sync for 2 minutes so we know it's not going to complete. + self.simulator.run(120) + + b1 = manager1.tx_storage.get_best_block() + b2 = manager2.tx_storage.get_best_block() + + self.assertNotEqual(b1.hash, b2.hash) + + partial_blocks = self._get_partial_blocks(manager2.tx_storage) + self.assertGreater(len(partial_blocks), 0) + + for _ in range(20): + print() + print('Stopping manager2...') + for _ in range(20): + print() + + # Stop the full node. + conn12.disconnect(Failure(Exception('testing'))) + self.simulator.remove_connection(conn12) + manager2.stop() + manager2.tx_storage._rocksdb_storage.close() + del manager2 + + for _ in range(20): + print() + print('Restarting manager2 as manager3...') + for _ in range(20): + print() + + # Restart full node using the same db. + builder3 = self.simulator.get_default_builder() \ + .set_peer_id(peer_id) \ + .disable_sync_v1() \ + .enable_sync_v2() \ + .use_rocksdb(path) + + if full_verification: + builder3.enable_full_verification() + else: + builder3.disable_full_verification() + + if use_tx_storage_cache: + builder3.use_tx_storage_cache() + + manager3 = self.simulator.create_peer(builder3) + self.assertEqual(partial_blocks, self._get_partial_blocks(manager3.tx_storage)) + self.assertTrue(manager3.tx_storage.indexes.deps.has_needed_tx()) + + conn13 = FakeConnection(manager1, manager3, latency=0.05) + self.simulator.add_connection(conn13) + + # Let the connection start to sync. + self.simulator.run(60) + + # Run until it's synced (time out of 1h) + sync3 = conn13.proto2.state.sync_manager + self.simulator.run(600) + sync3._breakpoint = True + + trigger = StopWhenTrue(sync3.is_synced) + self.assertTrue(self.simulator.run(5400, trigger=trigger)) + + self.assertEqual(manager1.tx_storage.get_vertices_count(), manager3.tx_storage.get_vertices_count()) + self.assertConsensusEqualSyncV2(manager1, manager3) + + # Start generators again to test real time sync. + miner1.start() + gen_tx1.start() + self.simulator.run(600) + miner1.stop() + gen_tx1.stop() + + # Make sure we are all synced. + self.simulator.run(600) + + self.assertEqual(manager1.tx_storage.get_vertices_count(), manager3.tx_storage.get_vertices_count()) + self.assertConsensusEqualSyncV2(manager1, manager3) + + @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') + def test_restart_fullnode_full_verification(self): + self._run_restart_test(full_verification=True, use_tx_storage_cache=False) + + @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') + def test_restart_fullnode_quick(self): + self._run_restart_test(full_verification=False, use_tx_storage_cache=False) + + @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') + def test_restart_fullnode_quick_with_cache(self): + self._run_restart_test(full_verification=False, use_tx_storage_cache=True) + + @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') + def test_restart_fullnode_full_verification_with_cache(self): + self._run_restart_test(full_verification=True, use_tx_storage_cache=True) + + def test_exceeds_streaming_and_mempool_limits(self) -> None: + manager1 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) + manager1.allow_mining_without_peers() + + # Find 50 blocks. + miner1 = self.simulator.create_miner(manager1, hashpower=10e6) + miner1.start() + trigger: Trigger = StopAfterNMinedBlocks(miner1, quantity=100) + self.assertTrue(self.simulator.run(3 * 3600, trigger=trigger)) + miner1.stop() + + # Generate 500 txs. + gen_tx1 = self.simulator.create_tx_generator(manager1, rate=3., hashpower=10e9, ignore_no_funds=True) + gen_tx1.start() + trigger = StopAfterNTransactions(gen_tx1, quantity=500) + self.simulator.run(3600, trigger=trigger) + self.assertGreater(manager1.tx_storage.get_vertices_count(), 500) + gen_tx1.stop() + + # Find 1 block. + miner1.start() + trigger = StopAfterNMinedBlocks(miner1, quantity=1) + self.assertTrue(self.simulator.run(3600, trigger=trigger)) + miner1.stop() + + # Confirm block has 400+ transactions. + blk = manager1.tx_storage.get_best_block() + tx_parents = [manager1.tx_storage.get_transaction(x) for x in blk.parents[1:]] + self.assertEqual(len(tx_parents), 2) + dfs = DFSWalk(manager1.tx_storage, is_dag_verifications=True, is_left_to_right=False) + cnt = 0 + for tx in dfs.run(tx_parents): + if tx.get_metadata().first_block == blk.hash: + cnt += 1 + else: + dfs.skip_neighbors(tx) + self.assertGreater(cnt, 400) + + # Generate 500 txs in mempool. + gen_tx1.start() + trigger = StopAfterNTransactions(gen_tx1, quantity=500) + self.simulator.run(3600, trigger=trigger) + self.assertGreater(manager1.tx_storage.get_vertices_count(), 1000) + gen_tx1.stop() + + for _ in range(20): + print() + print('Part 2: Start new fullnode and sync') + for _ in range(20): + print() + + # Create a new peer and run sync for a while (but stop before getting synced). + peer_id = PeerId() + builder2 = self.simulator.get_default_builder() \ + .set_peer_id(peer_id) \ + .disable_sync_v1() \ + .enable_sync_v2() \ + + manager2 = self.simulator.create_peer(builder2) + conn12 = FakeConnection(manager1, manager2, latency=0.05) + self.simulator.add_connection(conn12) + + # Let the connection start to sync. + self.simulator.run(1) + + # Change manager1 default streaming and mempool limits. + sync1 = conn12.proto1.state.sync_manager + sync1.DEFAULT_STREAMING_LIMIT = 30 + sync1.mempool_manager.MAX_STACK_LENGTH = 30 + self.assertIsNone(sync1.blockchain_streaming) + self.assertIsNone(sync1.transactions_streaming) + + # Change manager2 default streaming and mempool limits. + sync2 = conn12.proto2.state.sync_manager + sync2.DEFAULT_STREAMING_LIMIT = 50 + sync2.mempool_manager.MAX_STACK_LENGTH = 50 + self.assertIsNone(sync2.blockchain_streaming) + self.assertIsNone(sync2.transactions_streaming) + + # Run until fully synced. + # trigger = StopWhenTrue(sync2.is_synced) + # self.assertTrue(self.simulator.run(5400, trigger=trigger)) + self.simulator.run(3600) + + self.assertEqual(manager1.tx_storage.get_vertices_count(), manager2.tx_storage.get_vertices_count()) + self.assertConsensusEqualSyncV2(manager1, manager2) diff --git a/tests/unittest.py b/tests/unittest.py index d5dcb056d..c6fe0b213 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -182,15 +182,8 @@ def create_peer(self, network, peer_id=None, wallet=None, tx_storage=None, unloc capabilities=None, full_verification=True, enable_sync_v1=None, enable_sync_v2=None, checkpoints=None, utxo_index=False, event_manager=None, use_memory_index=None, start_manager=True, pubsub=None, event_storage=None, enable_event_queue=None, use_memory_storage=None): - if enable_sync_v1 is None: - assert hasattr(self, '_enable_sync_v1'), ('`_enable_sync_v1` has no default by design, either set one on ' - 'the test class or pass `enable_sync_v1` by argument') - enable_sync_v1 = self._enable_sync_v1 - if enable_sync_v2 is None: - assert hasattr(self, '_enable_sync_v2'), ('`_enable_sync_v2` has no default by design, either set one on ' - 'the test class or pass `enable_sync_v2` by argument') - enable_sync_v2 = self._enable_sync_v2 - assert enable_sync_v1 or enable_sync_v2, 'enable at least one sync version' + + enable_sync_v1, enable_sync_v2 = self._syncVersionFlags(enable_sync_v1, enable_sync_v2) builder = self.get_builder(network) \ .set_full_verification(full_verification) @@ -290,7 +283,33 @@ def assertIsTopological(self, tx_sequence: Iterator[BaseTransaction], message: O self.assertIn(dep, valid_deps, message) valid_deps.add(tx.hash) + def _syncVersionFlags(self, enable_sync_v1=None, enable_sync_v2=None): + """Internal: use this to check and get the flags and optionally provide override values.""" + if enable_sync_v1 is None: + assert hasattr(self, '_enable_sync_v1'), ('`_enable_sync_v1` has no default by design, either set one on ' + 'the test class or pass `enable_sync_v1` by argument') + enable_sync_v1 = self._enable_sync_v1 + if enable_sync_v2 is None: + assert hasattr(self, '_enable_sync_v2'), ('`_enable_sync_v2` has no default by design, either set one on ' + 'the test class or pass `enable_sync_v2` by argument') + enable_sync_v2 = self._enable_sync_v2 + assert enable_sync_v1 or enable_sync_v2, 'enable at least one sync version' + return enable_sync_v1, enable_sync_v2 + def assertTipsEqual(self, manager1, manager2): + _, enable_sync_v2 = self._syncVersionFlags() + if enable_sync_v2: + self.assertTipsEqualSyncV2(manager1, manager2) + else: + self.assertTipsEqualSyncV1(manager1, manager2) + + def assertTipsNotEqual(self, manager1, manager2): + s1 = set(manager1.tx_storage.get_all_tips()) + s2 = set(manager2.tx_storage.get_all_tips()) + self.assertNotEqual(s1, s2) + + def assertTipsEqualSyncV1(self, manager1, manager2): + # XXX: this is the original implementation of assertTipsEqual s1 = set(manager1.tx_storage.get_all_tips()) s2 = set(manager2.tx_storage.get_all_tips()) self.assertEqual(s1, s2) @@ -299,12 +318,38 @@ def assertTipsEqual(self, manager1, manager2): s2 = set(manager2.tx_storage.get_tx_tips()) self.assertEqual(s1, s2) - def assertTipsNotEqual(self, manager1, manager2): - s1 = set(manager1.tx_storage.get_all_tips()) - s2 = set(manager2.tx_storage.get_all_tips()) - self.assertNotEqual(s1, s2) + def assertTipsEqualSyncV2(self, manager1, manager2, *, strict_sync_v2_indexes=True): + # tx tips + if strict_sync_v2_indexes: + tips1 = manager1.tx_storage.indexes.mempool_tips.get() + tips2 = manager2.tx_storage.indexes.mempool_tips.get() + else: + tips1 = {tx.hash for tx in manager1.tx_storage.iter_mempool_tips_from_best_index()} + tips2 = {tx.hash for tx in manager2.tx_storage.iter_mempool_tips_from_best_index()} + self.log.debug('tx tips1', len=len(tips1), list=shorten_hash(tips1)) + self.log.debug('tx tips2', len=len(tips2), list=shorten_hash(tips2)) + self.assertEqual(tips1, tips2) + + # best block + s1 = set(manager1.tx_storage.get_best_block_tips()) + s2 = set(manager2.tx_storage.get_best_block_tips()) + self.log.debug('block tips1', len=len(s1), list=shorten_hash(s1)) + self.log.debug('block tips2', len=len(s2), list=shorten_hash(s2)) + self.assertEqual(s1, s2) + + # best block (from height index) + b1 = manager1.tx_storage.indexes.height.get_tip() + b2 = manager2.tx_storage.indexes.height.get_tip() + self.assertEqual(b1, b2) def assertConsensusEqual(self, manager1, manager2): + _, enable_sync_v2 = self._syncVersionFlags() + if enable_sync_v2: + self.assertConsensusEqualSyncV2(manager1, manager2) + else: + self.assertConsensusEqualSyncV1(manager1, manager2) + + def assertConsensusEqualSyncV1(self, manager1, manager2): self.assertEqual(manager1.tx_storage.get_vertices_count(), manager2.tx_storage.get_vertices_count()) for tx1 in manager1.tx_storage.get_all_transactions(): tx2 = manager2.tx_storage.get_transaction(tx1.hash) @@ -323,6 +368,44 @@ def assertConsensusEqual(self, manager1, manager2): # Hard verification # self.assertEqual(tx1_meta.voided_by, tx2_meta.voided_by) + def assertConsensusEqualSyncV2(self, manager1, manager2, *, strict_sync_v2_indexes=True): + # The current sync algorithm does not propagate voided blocks/txs + # so the count might be different even though the consensus is equal + # One peer might have voided txs that the other does not have + + # to start off, both nodes must have the same tips + self.assertTipsEqualSyncV2(manager1, manager2, strict_sync_v2_indexes=strict_sync_v2_indexes) + + # the following is specific to sync-v2 + + # helper function: + def get_all_executed_or_voided(tx_storage): + """Get all txs separated into three sets: executed, voided, partial""" + tx_executed = set() + tx_voided = set() + tx_partial = set() + for tx in tx_storage.get_all_transactions(): + assert tx.hash is not None + tx_meta = tx.get_metadata() + if not tx_meta.validation.is_fully_connected(): + tx_partial.add(tx.hash) + elif not tx_meta.voided_by: + tx_executed.add(tx.hash) + else: + tx_voided.add(tx.hash) + return tx_executed, tx_voided, tx_partial + + # extract all the transactions from each node, split into three sets + tx_executed1, tx_voided1, tx_partial1 = get_all_executed_or_voided(manager1.tx_storage) + tx_executed2, tx_voided2, tx_partial2 = get_all_executed_or_voided(manager2.tx_storage) + + # both must have the exact same executed set + self.assertEqual(tx_executed1, tx_executed2) + + # XXX: the rest actually doesn't matter + self.log.debug('node1 rest', len_voided=len(tx_voided1), len_partial=len(tx_partial1)) + self.log.debug('node2 rest', len_voided=len(tx_voided2), len_partial=len(tx_partial2)) + def assertConsensusValid(self, manager): for tx in manager.tx_storage.get_all_transactions(): if tx.is_block: @@ -374,6 +457,20 @@ def assertTransactionConsensusValid(self, tx): self.assertTrue(meta.voided_by) self.assertTrue(parent_meta.voided_by.issubset(meta.voided_by)) + def assertSyncedProgress(self, node_sync): + """Check "synced" status of p2p-manager, uses self._enable_sync_vX to choose which check to run.""" + enable_sync_v1, enable_sync_v2 = self._syncVersionFlags() + if enable_sync_v2: + self.assertV2SyncedProgress(node_sync) + elif enable_sync_v1: + self.assertV1SyncedProgress(node_sync) + + def assertV1SyncedProgress(self, node_sync): + self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) + + def assertV2SyncedProgress(self, node_sync): + self.assertEqual(node_sync.synced_height, node_sync.peer_height) + def clean_tmpdirs(self): for tmpdir in self.tmpdirs: shutil.rmtree(tmpdir)