diff --git a/hathor/builder/builder.py b/hathor/builder/builder.py index e6dee4600..90e67e009 100644 --- a/hathor/builder/builder.py +++ b/hathor/builder/builder.py @@ -256,7 +256,6 @@ def build(self) -> BuildArtifacts: wallet=wallet, rng=self._rng, checkpoints=self._checkpoints, - capabilities=self._capabilities, environment_info=get_environment_info(self._cmdline, str(peer.id)), bit_signaling_service=bit_signaling_service, verification_service=verification_service, @@ -268,7 +267,7 @@ def build(self) -> BuildArtifacts: **kwargs ) - p2p_manager.set_manager(manager) + p2p_manager.finalize_factories() if poa_block_producer: poa_block_producer.manager = manager @@ -426,6 +425,7 @@ def _get_or_create_p2p_manager(self) -> ConnectionsManager: ssl=enable_ssl, whitelist_only=False, rng=self._rng, + capabilities=self._get_or_create_capabilities(), ) SyncSupportLevel.add_factories( self._p2p_manager, @@ -640,6 +640,12 @@ def _get_or_create_poa_block_producer(self) -> PoaBlockProducer | None: return self._poa_block_producer + def _get_or_create_capabilities(self) -> list[str]: + if self._capabilities is None: + settings = self._get_or_create_settings() + self._capabilities = settings.get_default_capabilities() + return self._capabilities + def use_memory(self) -> 'Builder': self.check_if_can_modify() self._storage_type = StorageType.MEMORY diff --git a/hathor/builder/cli_builder.py b/hathor/builder/cli_builder.py index 3325de8f1..64c1fa58c 100644 --- a/hathor/builder/cli_builder.py +++ b/hathor/builder/cli_builder.py @@ -341,12 +341,15 @@ def create_manager(self, reactor: Reactor) -> HathorManager: pubsub=pubsub, ) + capabilities = settings.get_default_capabilities() p2p_manager = ConnectionsManager( dependencies=p2p_dependencies, my_peer=peer, ssl=True, whitelist_only=False, rng=Random(), + capabilities=capabilities, + hostname=hostname, ) SyncSupportLevel.add_factories( @@ -371,7 +374,6 @@ def create_manager(self, reactor: Reactor) -> HathorManager: self.manager = HathorManager( reactor, settings=settings, - hostname=hostname, pubsub=pubsub, consensus_algorithm=consensus_algorithm, daa=daa, @@ -398,7 +400,7 @@ def create_manager(self, reactor: Reactor) -> HathorManager: '--x-ipython-kernel must be used with --x-asyncio-reactor') self._start_ipykernel() - p2p_manager.set_manager(self.manager) + p2p_manager.finalize_factories() if poa_block_producer: poa_block_producer.manager = self.manager diff --git a/hathor/conf/settings.py b/hathor/conf/settings.py index d6509b4b6..d0a347231 100644 --- a/hathor/conf/settings.py +++ b/hathor/conf/settings.py @@ -455,6 +455,14 @@ def from_yaml(cls, *, filepath: str) -> 'HathorSettings': validators=_VALIDATORS ) + def get_default_capabilities(self) -> list[str]: + """Return the default capabilities.""" + return [ + self.CAPABILITY_WHITELIST, + self.CAPABILITY_SYNC_VERSION, + self.CAPABILITY_GET_BEST_BLOCKCHAIN + ] + def _parse_checkpoints(checkpoints: Union[dict[int, str], list[Checkpoint]]) -> list[Checkpoint]: """Parse a dictionary of raw checkpoint data into a list of checkpoints.""" diff --git a/hathor/manager.py b/hathor/manager.py index 534885e66..363da3407 100644 --- a/hathor/manager.py +++ b/hathor/manager.py @@ -47,7 +47,6 @@ from hathor.mining.cpu_mining_service import CpuMiningService from hathor.p2p.manager import ConnectionsManager from hathor.p2p.peer import PrivatePeer -from hathor.p2p.peer_id import PeerId from hathor.pubsub import HathorEvents, PubSubManager from hathor.reactor import ReactorProtocol as Reactor from hathor.reward_lock import is_spent_reward_locked @@ -108,9 +107,7 @@ def __init__( execution_manager: ExecutionManager, vertex_handler: VertexHandler, vertex_parser: VertexParser, - hostname: Optional[str] = None, wallet: Optional[BaseWallet] = None, - capabilities: Optional[list[str]] = None, checkpoints: Optional[list[Checkpoint]] = None, rng: Optional[Random] = None, environment_info: Optional[EnvironmentInfo] = None, @@ -158,9 +155,6 @@ def __init__( self.is_profiler_running: bool = False self.profiler_last_start_time: float = 0 - # Hostname, used to be accessed by other peers. - self.hostname = hostname - # Remote address, which can be different from local address. self.remote_address = None @@ -226,15 +220,6 @@ def __init__( # Can be activated on the command line with --full-verification self._full_verification = full_verification - # List of whitelisted peers - self.peers_whitelist: list[PeerId] = [] - - # List of capabilities of the peer - if capabilities is not None: - self.capabilities = capabilities - else: - self.capabilities = self.get_default_capabilities() - # This is included in some logs to provide more context self.environment_info = environment_info @@ -245,14 +230,6 @@ def __init__( self.lc_check_sync_state.clock = self.reactor self.lc_check_sync_state_interval = self.CHECK_SYNC_STATE_INTERVAL - def get_default_capabilities(self) -> list[str]: - """Return the default capabilities for this manager.""" - return [ - self._settings.CAPABILITY_WHITELIST, - self._settings.CAPABILITY_SYNC_VERSION, - self._settings.CAPABILITY_GET_BEST_BLOCKCHAIN - ] - def start(self) -> None: """ A factory must be started only once. And it is usually automatically started. """ @@ -984,27 +961,6 @@ def on_new_tx( return result - def has_sync_version_capability(self) -> bool: - return self._settings.CAPABILITY_SYNC_VERSION in self.capabilities - - def add_peer_to_whitelist(self, peer_id: PeerId) -> None: - if not self._settings.ENABLE_PEER_WHITELIST: - return - - if peer_id in self.peers_whitelist: - self.log.info('peer already in whitelist', peer_id=peer_id) - else: - self.peers_whitelist.append(peer_id) - - def remove_peer_from_whitelist_and_disconnect(self, peer_id: PeerId) -> None: - if not self._settings.ENABLE_PEER_WHITELIST: - return - - if peer_id in self.peers_whitelist: - self.peers_whitelist.remove(peer_id) - # disconnect from node - self.connections.drop_connection_by_peer_id(peer_id) - def has_recent_activity(self) -> bool: current_timestamp = time.time() latest_blockchain_timestamp = self.tx_storage.latest_timestamp @@ -1053,13 +1009,6 @@ def get_cmd_path(self) -> Optional[str]: """Return the cmd path. If no cmd path is set, returns None.""" return self._cmd_path - def set_hostname_and_reset_connections(self, new_hostname: str) -> None: - """Set the hostname and reset all connections.""" - old_hostname = self.hostname - self.hostname = new_hostname - self.connections.update_hostname_entrypoints(old_hostname=old_hostname, new_hostname=self.hostname) - self.connections.disconnect_all_peers(force=True) - class ParentTxs(NamedTuple): """ Tuple where the `must_include` hash, when present (at most 1), must be included in a pair, and a list of hashes diff --git a/hathor/p2p/dependencies/p2p_dependencies.py b/hathor/p2p/dependencies/p2p_dependencies.py index fd6aa1453..d9e05ac37 100644 --- a/hathor/p2p/dependencies/p2p_dependencies.py +++ b/hathor/p2p/dependencies/p2p_dependencies.py @@ -129,3 +129,7 @@ def get_best_block_tips(self) -> list[VertexId]: @abstractmethod def partial_vertex_exists(self, vertex_id: VertexId) -> bool: raise NotImplementedError + + @abstractmethod + def enable_mempool_index(self) -> None: + raise NotImplementedError diff --git a/hathor/p2p/dependencies/single_process_p2p_dependencies.py b/hathor/p2p/dependencies/single_process_p2p_dependencies.py index e04135223..01eb1281f 100644 --- a/hathor/p2p/dependencies/single_process_p2p_dependencies.py +++ b/hathor/p2p/dependencies/single_process_p2p_dependencies.py @@ -142,3 +142,7 @@ def get_best_block_tips(self) -> list[VertexId]: @override def partial_vertex_exists(self, vertex_id: VertexId) -> bool: return self._tx_storage.partial_vertex_exists(vertex_id) + + @override + def enable_mempool_index(self) -> None: + self._indexes.enable_mempool_index() diff --git a/hathor/p2p/manager.py b/hathor/p2p/manager.py index 8b719d14b..0e7534a0f 100644 --- a/hathor/p2p/manager.py +++ b/hathor/p2p/manager.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Any, Iterable, NamedTuple, Optional +from typing import Any, Iterable, NamedTuple, Optional from structlog import get_logger from twisted.internet import endpoints @@ -41,9 +41,6 @@ from hathor.transaction import BaseTransaction from hathor.util import Random -if TYPE_CHECKING: - from hathor.manager import HathorManager - logger = get_logger() # The timeout in seconds for the whitelist GET request @@ -77,7 +74,6 @@ class ConnectionsManager: class GlobalRateLimiter: SEND_TIPS = 'NodeSyncTimestamp.send_tips' - manager: Optional['HathorManager'] connections: set[HathorProtocol] connected_peers: dict[PeerId, HathorProtocol] connecting_peers: dict[IStreamClientEndpoint, _ConnectingPeer] @@ -97,12 +93,14 @@ def __init__( ssl: bool, rng: Random, whitelist_only: bool, + capabilities: list[str], + hostname: str | None = None, ) -> None: self.log = logger.new() self.dependencies = dependencies self._settings = dependencies.settings self.rng = rng - self.manager = None + self.capabilities = capabilities self.MAX_ENABLED_SYNC = self._settings.MAX_ENABLED_SYNC self.SYNC_UPDATE_INTERVAL = self._settings.SYNC_UPDATE_INTERVAL @@ -110,6 +108,13 @@ def __init__( self.reactor = dependencies.reactor self.my_peer = my_peer + self._finalized_factories = False + + # List of whitelisted peers + self.peers_whitelist: list[PeerId] = [] + + # Hostname, used to be accessed by other peers. + self.hostname = hostname # List of address descriptions to listen for new connections (eg: [tcp:8000]) self.listen_address_descriptions: list[str] = [] @@ -203,8 +208,7 @@ def __init__( def add_sync_factory(self, sync_version: SyncVersion, sync_factory: SyncAgentFactory) -> None: """Add factory for the given sync version, must use a sync version that does not already exist.""" - # XXX: to allow code in `set_manager` to safely use the the available sync versions, we add this restriction: - assert self.manager is None, 'Cannot modify sync factories after a manager is set' + assert not self._finalized_factories, 'Cannot modify sync factories after it is finalized' if sync_version in self._sync_factories: raise ValueError('sync version already exists') self._sync_factories[sync_version] = sync_factory @@ -240,17 +244,15 @@ def disable_sync_version(self, sync_version: SyncVersion) -> None: return self._enabled_sync_versions.discard(sync_version) - def set_manager(self, manager: 'HathorManager') -> None: - """Set the manager. This method must be called before start().""" + def finalize_factories(self) -> None: + """Signal that no more sync factories will be added. This method must be called before start().""" + self._finalized_factories = True if len(self._enabled_sync_versions) == 0: raise TypeError('Class built incorrectly without any enabled sync version') - self.manager = manager if self.is_sync_version_available(SyncVersion.V2): - assert self.manager.tx_storage.indexes is not None - indexes = self.manager.tx_storage.indexes self.log.debug('enable sync-v2 indexes') - indexes.enable_mempool_index() + self.dependencies.enable_mempool_index() def add_listen_address_description(self, addr: str) -> None: """Add address to listen for incoming connections.""" @@ -283,9 +285,7 @@ def enable_rate_limiter(self, max_hits: int = 16, window_seconds: float = 1) -> def start(self) -> None: """Listen on the given address descriptions and start accepting and processing connections.""" - if self.manager is None: - raise TypeError('Class was built incorrectly without a HathorManager.') - + assert self._finalized_factories, 'sync factories must be finalized by calling `finalize_factories`' self.lc_reconnect.start(5, now=False) self.lc_sync_update.start(self.lc_sync_update_interval, now=False) @@ -524,7 +524,6 @@ def reconnect_to_all(self) -> None: """ self.peers_cleanup() # when we have no connected peers left, run the discovery process again - assert self.manager is not None now = self.reactor.seconds() if now - self._last_discovery >= self.PEER_DISCOVERY_INTERVAL: self._last_discovery = now @@ -555,7 +554,6 @@ def _update_whitelist_err(self, *args: Any, **kwargs: Any) -> None: self.log.error('update whitelist failed', args=args, kwargs=kwargs) def _update_whitelist_cb(self, body: bytes) -> None: - assert self.manager is not None self.log.info('update whitelist got response') try: text = body.decode() @@ -563,7 +561,7 @@ def _update_whitelist_cb(self, body: bytes) -> None: except Exception: self.log.exception('failed to parse whitelist') return - current_whitelist = set(self.manager.peers_whitelist) + current_whitelist = set(self.peers_whitelist) peers_to_add = new_whitelist - current_whitelist if peers_to_add: self.log.info('add new peers to whitelist', peers=peers_to_add) @@ -571,9 +569,9 @@ def _update_whitelist_cb(self, body: bytes) -> None: if peers_to_remove: self.log.info('remove peers peers from whitelist', peers=peers_to_remove) for peer_id in peers_to_add: - self.manager.add_peer_to_whitelist(peer_id) + self._add_peer_to_whitelist(peer_id) for peer_id in peers_to_remove: - self.manager.remove_peer_from_whitelist_and_disconnect(peer_id) + self._remove_peer_from_whitelist_and_disconnect(peer_id) def connect_to_if_not_connected(self, peer: UnverifiedPeer | PublicPeer, now: int) -> None: """ Attempts to connect if it is not connected to the peer. @@ -699,24 +697,21 @@ def _on_listen_success(self, listening_port: IListeningPort, description: str) - return self._listen_addresses.append(address) + self._add_hostname_entrypoint(address) - assert self.manager is not None - if self.manager.hostname: - self._add_hostname_entrypoint(self.manager.hostname, address) - - def update_hostname_entrypoints(self, *, old_hostname: str | None, new_hostname: str) -> None: + def _update_hostname_entrypoints(self, *, old_hostname: str | None) -> None: """Add new hostname entrypoints according to the listen addresses, and remove any old entrypoint.""" - assert self.manager is not None for address in self._listen_addresses: if old_hostname is not None: old_entrypoint = Entrypoint.from_hostname_address(old_hostname, address) if old_entrypoint in self.my_peer.info.entrypoints: self.my_peer.info.entrypoints.remove(old_entrypoint) - self._add_hostname_entrypoint(new_hostname, address) + self._add_hostname_entrypoint(address) - def _add_hostname_entrypoint(self, hostname: str, address: IPv4Address | IPv6Address) -> None: - hostname_entrypoint = Entrypoint.from_hostname_address(hostname, address) - self.my_peer.info.entrypoints.append(hostname_entrypoint) + def _add_hostname_entrypoint(self, address: IPv4Address | IPv6Address) -> None: + if self.hostname: + hostname_entrypoint = Entrypoint.from_hostname_address(self.hostname, address) + self.my_peer.info.entrypoints.append(hostname_entrypoint) def get_connection_to_drop(self, protocol: HathorProtocol) -> HathorProtocol: """ When there are duplicate connections, determine which one should be dropped. @@ -750,7 +745,7 @@ def drop_connection(self, protocol: HathorProtocol) -> None: self.log.debug('dropping connection', peer_id=protocol.peer.id, protocol=type(protocol).__name__) protocol.send_error_and_close_connection('Connection droped') - def drop_connection_by_peer_id(self, peer_id: PeerId) -> None: + def _drop_connection_by_peer_id(self, peer_id: PeerId) -> None: """ Drop a connection by peer id """ protocol = self.connected_peers.get(peer_id) @@ -844,3 +839,31 @@ def reload_entrypoints_and_connections(self) -> None: self.log.warn('Killing all connections and resetting entrypoints...') self.disconnect_all_peers(force=True) self.my_peer.reload_entrypoints_from_source_file() + + def has_sync_version_capability(self) -> bool: + return self._settings.CAPABILITY_SYNC_VERSION in self.capabilities + + def _add_peer_to_whitelist(self, peer_id: PeerId) -> None: + if not self._settings.ENABLE_PEER_WHITELIST: + return + + if peer_id in self.peers_whitelist: + self.log.info('peer already in whitelist', peer_id=peer_id) + else: + self.peers_whitelist.append(peer_id) + + def _remove_peer_from_whitelist_and_disconnect(self, peer_id: PeerId) -> None: + if not self._settings.ENABLE_PEER_WHITELIST: + return + + if peer_id in self.peers_whitelist: + self.peers_whitelist.remove(peer_id) + # disconnect from node + self._drop_connection_by_peer_id(peer_id) + + def set_hostname_and_reset_connections(self, new_hostname: str) -> None: + """Set the hostname and reset all connections.""" + old_hostname = self.hostname + self.hostname = new_hostname + self._update_hostname_entrypoints(old_hostname=old_hostname) + self.disconnect_all_peers(force=True) diff --git a/hathor/p2p/protocol.py b/hathor/p2p/protocol.py index 47764621b..b45cb116b 100644 --- a/hathor/p2p/protocol.py +++ b/hathor/p2p/protocol.py @@ -75,7 +75,6 @@ class WarningFlags(str, Enum): my_peer: PrivatePeer connections: 'ConnectionsManager' - node: 'HathorManager' app_version: str last_message: float _peer: Optional[PublicPeer] @@ -110,9 +109,6 @@ def __init__( self.my_peer = my_peer self.connections = p2p_manager - assert p2p_manager.manager is not None - self.node = p2p_manager.manager - assert self.connections.reactor is not None self.reactor = self.connections.reactor diff --git a/hathor/p2p/resources/status.py b/hathor/p2p/resources/status.py index 68edb9f0e..580c72327 100644 --- a/hathor/p2p/resources/status.py +++ b/hathor/p2p/resources/status.py @@ -108,7 +108,7 @@ def render_GET(self, request): 'uptime': now - self.manager.start_time, 'entrypoints': self.manager.connections.my_peer.info.entrypoints_as_str(), }, - 'peers_whitelist': [str(peer_id) for peer_id in self.manager.peers_whitelist], + 'peers_whitelist': [str(peer_id) for peer_id in self.manager.connections.peers_whitelist], 'known_peers': known_peers, 'connections': { 'connected_peers': connected_peers, diff --git a/hathor/p2p/states/hello.py b/hathor/p2p/states/hello.py index 5996da12f..e8d2b8f4c 100644 --- a/hathor/p2p/states/hello.py +++ b/hathor/p2p/states/hello.py @@ -17,7 +17,6 @@ from structlog import get_logger import hathor -from hathor.conf.get_settings import get_global_settings from hathor.exception import HathorError from hathor.p2p import P2PDependencies from hathor.p2p.messages import ProtocolMessages @@ -57,9 +56,9 @@ def _get_hello_data(self) -> dict[str, Any]: 'genesis_short_hash': get_genesis_short_hash(), 'timestamp': self.dependencies.reactor.seconds(), 'settings_dict': get_settings_hello_dict(self._settings), - 'capabilities': protocol.node.capabilities, + 'capabilities': protocol.connections.capabilities, } - if self.protocol.node.has_sync_version_capability(): + if protocol.connections.has_sync_version_capability(): data['sync_versions'] = [x.value for x in self._get_sync_versions()] return data @@ -112,7 +111,7 @@ def handle_hello(self, payload: str) -> None: my_sync_versions = self._get_sync_versions() try: - remote_sync_versions = _parse_sync_versions(data) + remote_sync_versions = self._parse_sync_versions(data) except HathorError as e: # this will only happen if the remote implementation is wrong self.log.warn('invalid protocol', error=e) @@ -172,16 +171,14 @@ def handle_hello(self, payload: str) -> None: protocol.change_state(protocol.PeerState.PEER_ID) - -def _parse_sync_versions(hello_data: dict[str, Any]) -> set[SyncVersion]: - """Versions that are not recognized will not be included.""" - settings = get_global_settings() - if settings.CAPABILITY_SYNC_VERSION in hello_data['capabilities']: - if 'sync_versions' not in hello_data: - raise HathorError('protocol error, expected sync_versions field') - known_values = set(x.value for x in SyncVersion) - recognized_values = set(hello_data['sync_versions']) & known_values - return set(SyncVersion(x) for x in recognized_values) - else: - # XXX: implied value when sync-version capability isn't present - return {SyncVersion.V1_1} + def _parse_sync_versions(self, hello_data: dict[str, Any]) -> set[SyncVersion]: + """Versions that are not recognized will not be included.""" + if self._settings.CAPABILITY_SYNC_VERSION in hello_data['capabilities']: + if 'sync_versions' not in hello_data: + raise HathorError('protocol error, expected sync_versions field') + known_values = set(x.value for x in SyncVersion) + recognized_values = set(hello_data['sync_versions']) & known_values + return set(SyncVersion(x) for x in recognized_values) + else: + # XXX: implied value when sync-version capability isn't present + return {SyncVersion.V1_1} diff --git a/hathor/p2p/states/peer_id.py b/hathor/p2p/states/peer_id.py index a9af1aeb8..3bf8172cc 100644 --- a/hathor/p2p/states/peer_id.py +++ b/hathor/p2p/states/peer_id.py @@ -145,7 +145,7 @@ def _should_block_peer(self, peer_id: PeerId) -> bool: Currently this is only because the peer is not in a whitelist and whitelist blocking is active. """ - peer_is_whitelisted = peer_id in self.protocol.node.peers_whitelist + peer_is_whitelisted = peer_id in self.protocol.connections.peers_whitelist # never block whitelisted peers if peer_is_whitelisted: return False diff --git a/hathor/p2p/states/ready.py b/hathor/p2p/states/ready.py index 1bc42d7d5..3c11e7fff 100644 --- a/hathor/p2p/states/ready.py +++ b/hathor/p2p/states/ready.py @@ -85,7 +85,7 @@ def __init__(self, protocol: 'HathorProtocol', *, dependencies: P2PDependencies) self.lc_get_best_blockchain: Optional[LoopingCall] = None # if the peer has the GET-BEST-BLOCKCHAIN capability - common_capabilities = protocol.capabilities & set(protocol.node.capabilities) + common_capabilities = protocol.capabilities & set(protocol.connections.capabilities) if (self._settings.CAPABILITY_GET_BEST_BLOCKCHAIN in common_capabilities): # set the loop to get the best blockchain from the peer self.lc_get_best_blockchain = LoopingCall(self.send_get_best_blockchain) diff --git a/hathor/sysctl/p2p/manager.py b/hathor/sysctl/p2p/manager.py index 9f9856a42..7b66407e4 100644 --- a/hathor/sysctl/p2p/manager.py +++ b/hathor/sysctl/p2p/manager.py @@ -243,20 +243,17 @@ def set_kill_connection(self, peer_id: str, force: bool = False) -> None: def get_hostname(self) -> str | None: """Return the configured hostname.""" - assert self.connections.manager is not None - return self.connections.manager.hostname + return self.connections.hostname def set_hostname(self, hostname: str) -> None: """Set the hostname and reset all connections.""" - assert self.connections.manager is not None - self.connections.manager.set_hostname_and_reset_connections(hostname) + self.connections.set_hostname_and_reset_connections(hostname) def refresh_auto_hostname(self) -> None: """ Automatically discover the hostname and set it, if it's found. This operation blocks the event loop. Then, reset all connections. """ - assert self.connections.manager is not None try: hostname = discover_hostname(timeout=AUTO_HOSTNAME_TIMEOUT_SECONDS) except Exception as e: @@ -264,7 +261,7 @@ def refresh_auto_hostname(self) -> None: return if hostname: - self.connections.manager.set_hostname_and_reset_connections(hostname) + self.connections.set_hostname_and_reset_connections(hostname) def reload_entrypoints_and_connections(self) -> None: """Kill all connections and reload entrypoints from the peer config file.""" diff --git a/tests/p2p/test_bootstrap.py b/tests/p2p/test_bootstrap.py index 13fca9941..58b27e24a 100644 --- a/tests/p2p/test_bootstrap.py +++ b/tests/p2p/test_bootstrap.py @@ -54,6 +54,7 @@ def test_mock_discovery(self) -> None: whitelist_only=True, rng=self.rng, ssl=True, + capabilities=self._settings.get_default_capabilities() ) host_ports1 = [ ('foobar', 1234), @@ -83,6 +84,7 @@ def test_dns_discovery(self) -> None: whitelist_only=True, rng=self.rng, ssl=True, + capabilities=self._settings.get_default_capabilities() ) bootstrap_a = [ '127.0.0.99', diff --git a/tests/p2p/test_get_best_blockchain.py b/tests/p2p/test_get_best_blockchain.py index ff0d95149..d8c7aed3f 100644 --- a/tests/p2p/test_get_best_blockchain.py +++ b/tests/p2p/test_get_best_blockchain.py @@ -204,11 +204,11 @@ def test_node_without_get_best_blockchain_capability(self) -> None: manager1 = self.create_peer() manager2 = self.create_peer() - cababilities_without_get_best_blockchain = [ + capabilities_without_get_best_blockchain = [ self._settings.CAPABILITY_WHITELIST, self._settings.CAPABILITY_SYNC_VERSION, ] - manager2.capabilities = cababilities_without_get_best_blockchain + manager2.connections.capabilities = capabilities_without_get_best_blockchain conn12 = FakeConnection(manager1, manager2, latency=0.05) self.simulator.add_connection(conn12) @@ -222,9 +222,9 @@ def test_node_without_get_best_blockchain_capability(self) -> None: # assert the peers have the proper capabilities protocol2 = connected_peers1[0] - self.assertTrue(protocol2.capabilities.issuperset(set(cababilities_without_get_best_blockchain))) + self.assertTrue(protocol2.capabilities.issuperset(set(capabilities_without_get_best_blockchain))) protocol1 = connected_peers2[0] - default_capabilities = manager2.get_default_capabilities() + default_capabilities = self._settings.get_default_capabilities() self.assertTrue(protocol1.capabilities.issuperset(set(default_capabilities))) # assert the peers don't engage in get_best_blockchain messages diff --git a/tests/p2p/test_whitelist.py b/tests/p2p/test_whitelist.py index db854ff63..c17690a5b 100644 --- a/tests/p2p/test_whitelist.py +++ b/tests/p2p/test_whitelist.py @@ -46,7 +46,7 @@ def test_sync_v11_whitelist_yes_no(self) -> None: manager2 = self.create_peer(network) self.assertEqual(manager2.connections.get_enabled_sync_versions(), {SyncVersion.V1_1}) - manager1.peers_whitelist.append(manager2.my_peer.id) + manager1.connections.peers_whitelist.append(manager2.my_peer.id) conn = FakeConnection(manager1, manager2) self.assertFalse(conn.tr1.disconnecting) @@ -70,8 +70,8 @@ def test_sync_v11_whitelist_yes_yes(self) -> None: manager2 = self.create_peer(network) self.assertEqual(manager2.connections.get_enabled_sync_versions(), {SyncVersion.V1_1}) - manager1.peers_whitelist.append(manager2.my_peer.id) - manager2.peers_whitelist.append(manager1.my_peer.id) + manager1.connections.peers_whitelist.append(manager2.my_peer.id) + manager2.connections.peers_whitelist.append(manager1.my_peer.id) conn = FakeConnection(manager1, manager2) self.assertFalse(conn.tr1.disconnecting) diff --git a/tests/resources/p2p/test_status.py b/tests/resources/p2p/test_status.py index 68d409348..cca3ed292 100644 --- a/tests/resources/p2p/test_status.py +++ b/tests/resources/p2p/test_status.py @@ -18,8 +18,8 @@ def setUp(self): self.web = StubSite(StatusResource(self.manager)) self.entrypoint = Entrypoint.parse('tcp://192.168.1.1:54321') self.manager.connections.my_peer.info.entrypoints.append(self.entrypoint) - self.manager.peers_whitelist.append(self.get_random_peer_from_pool().id) - self.manager.peers_whitelist.append(self.get_random_peer_from_pool().id) + self.manager.connections.peers_whitelist.append(self.get_random_peer_from_pool().id) + self.manager.connections.peers_whitelist.append(self.get_random_peer_from_pool().id) self.manager2 = self.create_peer('testnet') self.manager2.connections.my_peer.info.entrypoints.append(self.entrypoint)