Skip to content

Commit

Permalink
Fixes for rotation and activation logic fixes for global sync
Browse files Browse the repository at this point in the history
basic fix to make sure we sync globals again after we get all the account data
extract a more simple core from calculateToAccept to support better test and sim options

first pass on syncing logic changes

needs some follow up consideration and testing

update the sync floor and max add logic

some updated logs and wip debug global sync

log updates and some new clamping

Support for better activation catch up.  logging and comment improvements.

a few more updates and cleanup

ITN3 rotation safe settings
  • Loading branch information
afostr authored and mhanson-github committed Dec 10, 2024
1 parent fad8e05 commit 72076bd
Show file tree
Hide file tree
Showing 10 changed files with 368 additions and 85 deletions.
5 changes: 5 additions & 0 deletions src/config/server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,12 @@ const SERVER_CONFIG: StrictServerConfiguration = {
rotationPercentActive: 0.001, //rotate 0.1% of active nodes per cycle when in a steady processing state
rotationMaxAddPercent: 0.1,
rotationMaxRemovePercent: 0.05,
syncFloorEnabled: false, //DEBUG=true, ITN initially false for rotation safety
syncingMaxAddPercent: 0.2,
syncingDesiredMinCount: 50, //Debug=5, ITN = 50
allowActivePerCycle: 7,
allowActivePerCycleRecover: 4,
activeRecoveryEnabled: false, //Debug=true, ITN initially false for rotation safety
useProxyForDownCheck: false,
numCheckerNodes: 1,
minChecksForDown: 1,
Expand Down
36 changes: 36 additions & 0 deletions src/logger/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ interface Logger {

_playbackLogger: any

_mainLogger: any

_seenAddresses: any
_shortStrings: any
_playbackOwner_host: any
Expand Down Expand Up @@ -114,6 +116,10 @@ export type LogFlags = {
txCancel: boolean // extra logging for TXs that get canceled

getLocalOrRemote: boolean // special logging for getLocalOrRemote

verboseNestedCounters: boolean // extra logging for nested counters

node_rotation_debug: boolean // extra logging for node rotation math
}

export let logFlags: LogFlags = {
Expand Down Expand Up @@ -153,6 +159,10 @@ export let logFlags: LogFlags = {
txCancel: false,

getLocalOrRemote: false,

verboseNestedCounters: false,

node_rotation_debug: false,
}

const filePath1 = path.join(process.cwd(), 'data-logs', 'cycleRecords1.txt')
Expand Down Expand Up @@ -217,6 +227,7 @@ class Logger {
this.getLogger('main').info('Logger initialized.')

this._playbackLogger = this.getLogger('playback')
this._mainLogger = this.getLogger('main')

this.setupLogControlValues()

Expand Down Expand Up @@ -674,6 +685,31 @@ class Logger {

console.log(`base logFlags: ` + Utils.safeStringify(logFlags))
}

mainLog(level, key: string, message:string ): void {
//initially this will just go to a main log but we could but this in
//a json blob with the key and send it to a different logging service
this._mainLogger[level](key + ' ' + message)
}

mainLog_debug(key: string, message:string ): void {
//note will change the key to be an array later and remove the DBG prefix
this.mainLog('debug', 'DBG_' + key, message)
}

combine(...args: any[]): string {
return args
.map((arg) => {
if (typeof arg === 'object') {
return Utils.safeStringify(arg)
} else {
return String(arg)
}
})
.join(' ')
}


}

export default Logger
1 change: 1 addition & 0 deletions src/p2p/Active.ts
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ export function updateRecord(
const activatedPublicKeys = []

if (NodeList.readyByTimeAndIdOrder.length > 0) {
// ITN3 example if processing this will pick allowActivePerCycle = 1 nodes
const selectedNodes = selectNodesFromReadyList(_prev.mode)
for (const node of selectedNodes) {
/* prettier-ignore */ nestedCountersInstance.countEvent('p2p', `active:updateRecord node added to activated`)
Expand Down
34 changes: 17 additions & 17 deletions src/p2p/CycleAutoScale.ts
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ export function init() {
}

export function reset() {
/* prettier-ignore */ if (logFlags && logFlags.verbose) console.log( 'Resetting auto-scale module', `Cycle ${CycleCreator.currentCycle}, Quarter: ${CycleCreator.currentQuarter}`)
/* prettier-ignore */ if (logFlags?.verbose) logger.mainLog_debug('RESET_1', logger.combine('Resetting auto-scale module', `Cycle ${CycleCreator.currentCycle}, Quarter: ${CycleCreator.currentQuarter}`))
scalingRequested = false
scalingRequestsCollector = new Map()
requestedScalingType = null
Expand Down Expand Up @@ -147,7 +147,7 @@ export function requestNetworkUpsize() {
return
}

console.log('DBG', 'UPSIZE!')
/* prettier-ignore */ if (logFlags?.verbose) logger.mainLog_debug('REQUESTNETWORKUPSIZE_1', 'CycleAutoScale: UPSIZE!')
_requestNetworkScaling(P2P.CycleAutoScaleTypes.ScaleType.UP)
}

Expand All @@ -161,7 +161,7 @@ export function requestNetworkDownsize() {
return
}

console.log('DBG', 'DOWNSIZE!')
/* prettier-ignore */ if (logFlags?.verbose) logger.mainLog_debug('REQUESTNETWORKDOWNSIZE_1', 'CycleAutoScale: DOWNSIZE!')
_requestNetworkScaling(P2P.CycleAutoScaleTypes.ScaleType.DOWN)
}

Expand Down Expand Up @@ -296,13 +296,7 @@ function _checkScaling() {

// If we haven't approved an scale type, check if we should scale down
if (!changed) {
// if (approvedScalingType === P2P.CycleAutoScaleTypes.ScaleType.DOWN) {
// warn(
// 'Already set to scale down for this cycle. No need to scale down anymore.'
// )
// return
// }
/* prettier-ignore */ if (logFlags && logFlags.verbose) console.log("CycleAutoScale: scale up not approved")
/* prettier-ignore */ if (logFlags?.verbose) logger.mainLog_debug('CHECKSCALING_1', 'CycleAutoScale: scale up not approved')
if (scaleDownRequests.length >= requiredVotes) {
approvedScalingType = P2P.CycleAutoScaleTypes.ScaleType.DOWN
changed = true
Expand Down Expand Up @@ -341,7 +335,7 @@ function _checkScaling() {
error(new Error(`Invalid scaling flag after changing flag. Flag: ${approvedScalingType}`))
return
}
console.log('newDesired', newDesired)
/* prettier-ignore */ if (logFlags?.verbose) logger.mainLog_debug('CHECKSCALING_2', logger.combine('newDesired', newDesired))
}

function setDesiredCount(count: number) {
Expand Down Expand Up @@ -390,18 +384,18 @@ function setAndGetTargetCount(prevRecord: P2P.CycleCreatorTypes.CycleRecord): nu
}
}
} else if (prevRecord.mode === 'processing') {
/* prettier-ignore */ if (logFlags && logFlags.verbose) console.log("CycleAutoScale: in processing")
/* prettier-ignore */ if (logFlags?.verbose) logger.mainLog_debug('SETANDGETTARGETCOUNT_PROCESSING_1', "CycleAutoScale: in processing")
if (enterSafety(active) === false && enterRecovery(active) === false) {
/* prettier-ignore */ if (logFlags && logFlags.verbose) console.log("CycleAutoScale: not in safety")
/* prettier-ignore */ if (logFlags?.verbose) logger.mainLog_debug('SETANDGETTARGETCOUNT_PROCESSING_2', "CycleAutoScale: not in safety")
let addRem = (desired - prevRecord.target) * 0.1
/* prettier-ignore */ if (logFlags && logFlags.verbose) console.log(`addRem: ${addRem}, desired: ${desired}, prevTarget: ${prevRecord.target}`)
/* prettier-ignore */ if (logFlags?.verbose) logger.mainLog_debug('SETANDGETTARGETCOUNT_PROCESSING_3', `addRem: ${addRem}, desired: ${desired}, prevTarget: ${prevRecord.target}`)
if (addRem > active * 0.01) {
addRem = active * 0.01
}
if (addRem < 0 - active * 0.005) {
addRem = 0 - active * 0.005
}
/* prettier-ignore */ if (logFlags && logFlags.verbose) console.log(`CycleAutoScale: prev target is ${prevRecord.target} and addRem is ${addRem}`)
/* prettier-ignore */ if (logFlags?.verbose) logger.mainLog_debug('SETANDGETTARGETCOUNT_PROCESSING_4', `CycleAutoScale: prev target is ${prevRecord.target} and addRem is ${addRem}`)
targetCount = prevRecord.target + addRem
// may want to swap config values to values from cycle record
if (targetCount < config.p2p.minNodes) {
Expand All @@ -422,7 +416,13 @@ function setAndGetTargetCount(prevRecord: P2P.CycleCreatorTypes.CycleRecord): nu
targetCount = config.p2p.minNodes + config.p2p.extraNodesToAddInRestart
} else if (prevRecord.mode === 'restart') {
// In restart mode, all the nodes remain in 'selected?' mode until the desired number of nodes are reached
/* prettier-ignore */ if (logFlags && logFlags.verbose) console.log("CycleAutoScale: in restart")

//Instruction: I have a bunch of logs like this but I want to upgrade them to call logger.mainLog_debug.
/* prettier-ignore */ //if (logFlags && logFlags.verbose) console.log("CycleAutoScale: in restart")
//Instruction: here is what I want the log to look like. Note the first argument is a unique key. The funciton name in all capps followed by some context and then an int that incrments so we dont have dupes will work
/* prettier-ignore */ if (logFlags?.verbose) logger.mainLog_debug('SETANDGETTARGETCOUNT_RESTART_1',"CycleAutoScale: in restart")


if (syncing < desired + config.p2p.extraNodesToAddInRestart) {
/* prettier-ignore */ if (logFlags && logFlags.verbose) console.log("CycleAutoScale: entered syncing < desired")
let add = ~~(0.5 * syncing) // Add 50% more nodes on each cycle
Expand All @@ -439,7 +439,7 @@ function setAndGetTargetCount(prevRecord: P2P.CycleCreatorTypes.CycleRecord): nu
/* prettier-ignore */ if (logFlags && logFlags.verbose) console.log("CycleAutoScale: in Self.isFirst condition")
targetCount = config.p2p.formingNodesPerCycle
}
/* prettier-ignore */ if (logFlags && logFlags.verbose) console.log("CycleAutoScale: target count is ", targetCount)
/* prettier-ignore */ if (logFlags?.verbose) logger.mainLog_debug('SETANDGETTARGETCOUNT_1', logger.combine('CycleAutoScale: target count is', targetCount))
return targetCount
}

Expand Down
1 change: 1 addition & 0 deletions src/p2p/CycleCreator.ts
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,7 @@ function updateScaleFactor() {
scaleFactorSyncBoost = 1
}

//ITN3 example numbers (128 / 5) * (640 / 100) = 25.6 * 6.4 = 163.84
scaleFactor = Math.max((consensusRange / consenusParSize) * (activeNodeCount / networkParSize), 1)
}

Expand Down
24 changes: 20 additions & 4 deletions src/p2p/Join/v2/syncFinished.ts
Original file line number Diff line number Diff line change
Expand Up @@ -124,14 +124,30 @@ export function isNodeSelectedReadyList(nodeId: string): boolean {

export function selectNodesFromReadyList(mode: string): P2P.NodeListTypes.Node[] {
if (mode === 'processing') {
let nodesToAllowActive = config.p2p.allowActivePerCycle

if(config.p2p.activeRecoveryEnabled){
// check if we are below desired allow more nodes to join
if(CycleChain.newest != null){
const active = CycleChain.newest.active
const desired = CycleChain.newest.desired
const deficit = desired - active
if(deficit > 0){
// This code is rotation safe because if allowActivePerCycleRecover is set to 1
// and allowActivePerCycle is set to 1 we will have the same boost
const boost = Math.min(config.p2p.allowActivePerCycleRecover, deficit)
// apply the boost
nodesToAllowActive = Math.max(nodesToAllowActive, boost)
}
}
}

if (config.debug.readyNodeDelay > 0) {
nestedCountersInstance.countEvent('p2p', `selectNodesFromReadyList: only returning nodes from the ready list that were added at least ${config.debug.readyNodeDelay} seconds ago`)
return NodeList.readyByTimeAndIdOrder
.slice(0, config.p2p.allowActivePerCycle)
.filter((node) => CycleChain.newest.start >= node.readyTimestamp + config.debug.readyNodeDelay)
return NodeList.readyByTimeAndIdOrder.slice(0, config.p2p.allowActivePerCycle).filter((node) => CycleChain.newest.start >= node.readyTimestamp + config.debug.readyNodeDelay)
}

return NodeList.readyByTimeAndIdOrder.slice(0, config.p2p.allowActivePerCycle)
return NodeList.readyByTimeAndIdOrder.slice(0, nodesToAllowActive)
} else {
if (mode === 'forming' && isFirst && NodeList.activeByIdOrder.length === 0) return NodeList.readyByTimeAndIdOrder

Expand Down
Loading

0 comments on commit 72076bd

Please sign in to comment.