Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[1.0.4] Fix nodeos_startup_catchup test #1033

Merged
merged 6 commits into from
Nov 21, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 33 additions & 5 deletions tests/TestHarness/Node.py
Original file line number Diff line number Diff line change
Expand Up @@ -705,15 +705,43 @@ def linesInLog(self, searchStr):
lines.append(line)
return lines

def countInLog(self, searchStr) -> int:
# Verfify that in during synching, unlinkable blocks are expected if
# the number of each group of consecutive unlinkable blocks is less than sync fetch span
def verifyUnlinkableBlocksExpected(self, syncFetchSpan) -> bool:
dataDir=Utils.getNodeDataDir(self.nodeId)
files=Node.findStderrFiles(dataDir)
count = 0

# A sample of unique line of unlinkable_block in logging file looks like:
# info 2024-11-14T13:48:06.038 nodeos net_plugin.cpp:3870 process_signed_block ] unlinkable_block_exception connection - 1: #130 1d74c43582d10251...: Unlinkable block (3030001)
pattern = re.compile(r"unlinkable_block_exception connection - \d+: #(\d+)")

for file in files:
blocks = []
with open(file, 'r') as f:
contents = f.read()
count += contents.count(searchStr)
return count
for line in f:
match = pattern.search(line)
if match:
try:
blockNum = int(match.group(1))
blocks.append(blockNum)
except ValueError:
Utils.Print(f"unlinkable block number cannot be converted into integer: in {line.strip()} of {f}")
return False
numConsecutiveUnlinkableBlocks = 0 if len(blocks) == 0 else 1 # numConsecutiveUnlinkableBlocks is at least 1 if len(blocks) > 0
for i in range(1, len(blocks)):
if blocks[i] == blocks[i - 1] or blocks[i] == blocks[i - 1] + 1: # look for consecutive blocks, including duplicate
if blocks[i] == blocks[i - 1] + 1: # excluding duplicate
++numConsecutiveUnlinkableBlocks
else: # start a new group of consecutive blocks
if numConsecutiveUnlinkableBlocks > syncFetchSpan:
Utils.Print(f"the number of a group of unlinkable blocks {numConsecutiveUnlinkableBlocks} greater than syncFetchSpan {syncFetchSpan} in {f}")
return False
numConsecutiveUnlinkableBlocks = 1
if numConsecutiveUnlinkableBlocks > syncFetchSpan:
Utils.Print(f"the number of a group of unlinkable blocks {numConsecutiveUnlinkableBlocks} greater than syncFetchSpan {syncFetchSpan} in {f}")
greg7mdp marked this conversation as resolved.
Show resolved Hide resolved
return False
else:
return True

# Verify that we have only one "Starting block" in the log for any block number unless:
# - the block was restarted because it was exhausted,
Expand Down
8 changes: 2 additions & 6 deletions tests/nodeos_startup_catchup.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,12 +248,8 @@ def waitForNodeStarted(node):
# See https://github.com/AntelopeIO/spring/issues/81 for fix to reduce the number of expected unlinkable blocks
# Test verifies LIB is advancing, check to see that not too many unlinkable block exceptions are generated
# while syncing up to head.
numUnlinkable = catchupNode.countInLog("unlinkable_block")
numUnlinkableAllowed = 500
Print(f"Node{catchupNodeNum} has {numUnlinkable} unlinkable_block in {catchupNode.data_dir}")
if numUnlinkable > numUnlinkableAllowed:
errorExit(f"Node{catchupNodeNum} has {numUnlinkable} which is more than the configured "
f"allowed {numUnlinkableAllowed} unlinkable blocks: {catchupNode.data_dir}.")
if not catchupNode.verifyUnlinkableBlocksExpected(sync_fetch_span):
errorExit(f"unlinkable blocks are not expected") # details already logged in verifyUnlinkableBlocksExpected

testSuccessful=True

Expand Down