diff --git a/tests/TestHarness/Node.py b/tests/TestHarness/Node.py index d96d5f5e29..869dd36f33 100644 --- a/tests/TestHarness/Node.py +++ b/tests/TestHarness/Node.py @@ -705,15 +705,44 @@ def linesInLog(self, searchStr): lines.append(line) return lines - def countInLog(self, searchStr) -> int: + # Verfify that in during synching, unlinkable blocks are expected if + # the number of each group of consecutive unlinkable blocks is less than sync fetch span + def verifyUnlinkableBlocksExpected(self, syncFetchSpan) -> bool: dataDir=Utils.getNodeDataDir(self.nodeId) files=Node.findStderrFiles(dataDir) - count = 0 + + # A sample of unique line of unlinkable_block in logging file looks like: + # info 2024-11-14T13:48:06.038 nodeos net_plugin.cpp:3870 process_signed_block ] unlinkable_block_exception connection - 1: #130 1d74c43582d10251...: Unlinkable block (3030001) + pattern = re.compile(r"unlinkable_block_exception connection - \d+: #(\d+)") + for file in files: + blocks = [] with open(file, 'r') as f: - contents = f.read() - count += contents.count(searchStr) - return count + for line in f: + match = pattern.search(line) + if match: + try: + blockNum = int(match.group(1)) + blocks.append(blockNum) + except ValueError: + Utils.Print(f"unlinkable block number cannot be converted into integer: in {line.strip()} of {f}") + return False + blocks.sort() # blocks from multiple connections might be out of order + numConsecutiveUnlinkableBlocks = 0 if len(blocks) == 0 else 1 # numConsecutiveUnlinkableBlocks is at least 1 if len(blocks) > 0 + for i in range(1, len(blocks)): + if blocks[i] == blocks[i - 1] or blocks[i] == blocks[i - 1] + 1: # look for consecutive blocks, including duplicate + if blocks[i] == blocks[i - 1] + 1: # excluding duplicate + ++numConsecutiveUnlinkableBlocks + else: # start a new group of consecutive blocks + if numConsecutiveUnlinkableBlocks > syncFetchSpan: + Utils.Print(f"the number of a group of unlinkable blocks {numConsecutiveUnlinkableBlocks} greater than syncFetchSpan {syncFetchSpan} in {f}") + return False + numConsecutiveUnlinkableBlocks = 1 + if numConsecutiveUnlinkableBlocks > syncFetchSpan: + Utils.Print(f"the number of a group of unlinkable blocks {numConsecutiveUnlinkableBlocks} greater than syncFetchSpan {syncFetchSpan} in {f}") + return False + else: + return True # Verify that we have only one "Starting block" in the log for any block number unless: # - the block was restarted because it was exhausted, diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index 669e1d7233..00774a84ad 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -248,12 +248,8 @@ def waitForNodeStarted(node): # See https://github.com/AntelopeIO/spring/issues/81 for fix to reduce the number of expected unlinkable blocks # Test verifies LIB is advancing, check to see that not too many unlinkable block exceptions are generated # while syncing up to head. - numUnlinkable = catchupNode.countInLog("unlinkable_block") - numUnlinkableAllowed = 500 - Print(f"Node{catchupNodeNum} has {numUnlinkable} unlinkable_block in {catchupNode.data_dir}") - if numUnlinkable > numUnlinkableAllowed: - errorExit(f"Node{catchupNodeNum} has {numUnlinkable} which is more than the configured " - f"allowed {numUnlinkableAllowed} unlinkable blocks: {catchupNode.data_dir}.") + if not catchupNode.verifyUnlinkableBlocksExpected(sync_fetch_span): + errorExit(f"unlinkable blocks are not expected") # details already logged in verifyUnlinkableBlocksExpected testSuccessful=True