diff --git a/cmd/fuzz.go b/cmd/fuzz.go index c9905b84..7ff660d4 100644 --- a/cmd/fuzz.go +++ b/cmd/fuzz.go @@ -161,7 +161,7 @@ func cmdRunFuzz(cmd *cobra.Command, args []string) error { signal.Notify(c, os.Interrupt) go func() { <-c - fuzzer.Stop() + fuzzer.Terminate() }() // Start the fuzzing process with our cancellable context. @@ -170,8 +170,8 @@ func cmdRunFuzz(cmd *cobra.Command, args []string) error { return exitcodes.NewErrorWithExitCode(fuzzErr, exitcodes.ExitCodeHandledError) } - // If we have no error and failed test cases, we'll want to return a special exit code - if fuzzErr == nil && len(fuzzer.TestCasesWithStatus(fuzzing.TestCaseStatusFailed)) > 0 { + // If we have failed test cases, we'll want to return a special exit code + if len(fuzzer.TestCasesWithStatus(fuzzing.TestCaseStatusFailed)) > 0 { return exitcodes.NewErrorWithExitCode(fuzzErr, exitcodes.ExitCodeTestFailed) } diff --git a/fuzzing/fuzzer.go b/fuzzing/fuzzer.go index 95e7877b..6f59ca05 100644 --- a/fuzzing/fuzzer.go +++ b/fuzzing/fuzzer.go @@ -43,11 +43,18 @@ import ( // Fuzzer represents an Ethereum smart contract fuzzing provider. type Fuzzer struct { - // ctx describes the context for the fuzzing run, used to cancel running operations. + // ctx is the main context used by the fuzzer. ctx context.Context - // ctxCancelFunc describes a function which can be used to cancel the fuzzing operations ctx tracks. + // ctxCancelFunc describes a function which can be used to cancel the fuzzing operations the main ctx tracks. + // Cancelling ctx does _not_ guarantee that all operations will terminate. ctxCancelFunc context.CancelFunc + // emergencyCtx is the context that is used by the fuzzer to react to OS-level interrupts (e.g. SIGINT) or errors. + emergencyCtx context.Context + // emergencyCtxCancelFunc describes a function which can be used to cancel the fuzzing operations due to an OS-level + // interrupt or an error. Cancelling emergencyCtx will guarantee that all operations will terminate. + emergencyCtxCancelFunc context.CancelFunc + // config describes the project configuration which the fuzzing is targeting. config config.ProjectConfig // senders describes a set of account addresses used to send state changing calls in fuzzing campaigns. @@ -652,8 +659,8 @@ func (f *Fuzzer) spawnWorkersLoop(baseTestChain *chain.TestChain) error { } } - // Define a flag that indicates whether we have not cancelled o - working := !utils.CheckContextDone(f.ctx) + // Define a flag that indicates whether we have cancelled fuzzing or not + working := !(utils.CheckContextDone(f.ctx) || utils.CheckContextDone(f.emergencyCtx)) // Create workers and start fuzzing. var err error @@ -715,9 +722,9 @@ func (f *Fuzzer) spawnWorkersLoop(baseTestChain *chain.TestChain) error { }(workerSlotInfo) } - // Explicitly call cancel on our context to ensure all threads exit if we encountered an error. - if f.ctxCancelFunc != nil { - f.ctxCancelFunc() + // Explicitly call cancel on our emergency context to ensure all threads exit if we encountered an error. + if err != nil { + f.Terminate() } // Wait for every worker to be freed, so we don't have a race condition when reporting the order @@ -748,8 +755,9 @@ func (f *Fuzzer) Start() error { // While we're fuzzing, we'll want to have an initialized random provider. f.randomProvider = rand.New(rand.NewSource(time.Now().UnixNano())) - // Create our running context (allows us to cancel across threads) + // Create our main and emergency running context (allows us to cancel across threads) f.ctx, f.ctxCancelFunc = context.WithCancel(context.Background()) + f.emergencyCtx, f.emergencyCtxCancelFunc = context.WithCancel(context.Background()) // If we set a timeout, create the timeout context now, as we're about to begin fuzzing. if f.config.Fuzzing.Timeout > 0 { @@ -904,15 +912,26 @@ func (f *Fuzzer) Start() error { return err } -// Stop stops a running operation invoked by the Start method. This method may return before complete operation teardown -// occurs. +// Stop attempts to stop all running operations invoked by the Start method. Note that Stop is not guaranteed to fully +// terminate the operations across all threads. For example, the optimization testing provider may request a thread to +// shrink some call sequences before the thread is torn down. Stop will not prevent those shrink requests from +// executing. An OS-level interrupt must be used to guarantee the stopping of _all_ operations (see Terminate). func (f *Fuzzer) Stop() { - // Call the cancel function on our running context to stop all working goroutines + // Call the cancel function on our main running context to try stop all working goroutines if f.ctxCancelFunc != nil { f.ctxCancelFunc() } } +// Terminate is called to react to an OS-level interrupt (e.g. SIGINT) or an error. This will stop all operations. +// Note that this function will return before all operations are complete. +func (f *Fuzzer) Terminate() { + // Call the emergency context cancel function on our running context to stop all working goroutines + if f.emergencyCtxCancelFunc != nil { + f.emergencyCtxCancelFunc() + } +} + // printMetricsLoop prints metrics to the console in a loop until ctx signals a stopped operation. func (f *Fuzzer) printMetricsLoop() { // Define our start time @@ -968,6 +987,7 @@ func (f *Fuzzer) printMetricsLoop() { lastWorkerStartupCount = workerStartupCount // If we reached our transaction threshold, halt + // TODO: We should move this logic somewhere else because it is weird that the metrics loop halts the fuzzer testLimit := f.config.Fuzzing.TestLimit if testLimit > 0 && (!callsTested.IsUint64() || callsTested.Uint64() >= testLimit) { f.logger.Info("Transaction test limit reached, halting now...") diff --git a/fuzzing/fuzzer_hooks.go b/fuzzing/fuzzer_hooks.go index 11d0e450..2ec49fdf 100644 --- a/fuzzing/fuzzer_hooks.go +++ b/fuzzing/fuzzer_hooks.go @@ -57,6 +57,8 @@ type CallSequenceTestFunc func(worker *FuzzerWorker, callSequence calls.CallSequ // ShrinkCallSequenceRequest is a structure signifying a request for a shrunken call sequence from the FuzzerWorker. type ShrinkCallSequenceRequest struct { + // CallSequenceToShrink represents the _original_ CallSequence that needs to be shrunk + CallSequenceToShrink calls.CallSequence // VerifierFunction is a method is called upon by a FuzzerWorker to check if a shrunken call sequence satisfies // the needs of an original method. VerifierFunction func(worker *FuzzerWorker, callSequence calls.CallSequence) (bool, error) diff --git a/fuzzing/fuzzer_worker.go b/fuzzing/fuzzer_worker.go index 6452fece..d62adcb7 100644 --- a/fuzzing/fuzzer_worker.go +++ b/fuzzing/fuzzer_worker.go @@ -43,6 +43,11 @@ type FuzzerWorker struct { // pureMethods is a list of contract functions which are side-effect free with respect to the EVM (view and/or pure in terms of Solidity mutability). pureMethods []fuzzerTypes.DeployedContractMethod + // shrinkCallSequenceRequests is a list of ShrinkCallSequenceRequest that will be handled in the next iteration of + // the fuzzing loop. In the future we can generalize this to any type of "request" that must be handled immediately + // before the execution of the next call sequence. + shrinkCallSequenceRequests []ShrinkCallSequenceRequest + // randomProvider provides random data as inputs to decisions throughout the worker. randomProvider *rand.Rand // sequenceGenerator creates entirely new or mutated call sequences based on corpus call sequences, for use in @@ -82,14 +87,15 @@ func newFuzzerWorker(fuzzer *Fuzzer, workerIndex int, randomProvider *rand.Rand) // Create a new worker with the data provided. worker := &FuzzerWorker{ - workerIndex: workerIndex, - fuzzer: fuzzer, - deployedContracts: make(map[common.Address]*fuzzerTypes.Contract), - stateChangingMethods: make([]fuzzerTypes.DeployedContractMethod, 0), - pureMethods: make([]fuzzerTypes.DeployedContractMethod, 0), - coverageTracer: nil, - randomProvider: randomProvider, - valueSet: valueSet, + workerIndex: workerIndex, + fuzzer: fuzzer, + deployedContracts: make(map[common.Address]*fuzzerTypes.Contract), + stateChangingMethods: make([]fuzzerTypes.DeployedContractMethod, 0), + pureMethods: make([]fuzzerTypes.DeployedContractMethod, 0), + shrinkCallSequenceRequests: make([]ShrinkCallSequenceRequest, 0), + coverageTracer: nil, + randomProvider: randomProvider, + valueSet: valueSet, } worker.sequenceGenerator = NewCallSequenceGenerator(worker, callSequenceGenConfig) worker.shrinkingValueMutator = shrinkingValueMutator @@ -255,8 +261,8 @@ func (fw *FuzzerWorker) updateMethods() { // CallSequenceTestFunc registered with the parent Fuzzer to update any test results. If any call message in the // sequence is nil, a call message will be created in its place, targeting a state changing method of a contract // deployed in the Chain. -// Returns the length of the call sequence tested, any requests for call sequence shrinking, or an error if one occurs. -func (fw *FuzzerWorker) testNextCallSequence() (calls.CallSequence, []ShrinkCallSequenceRequest, error) { +// Returns any requests for call sequence shrinking or an error if one occurs. +func (fw *FuzzerWorker) testNextCallSequence() ([]ShrinkCallSequenceRequest, error) { // We will make a copy of the worker's base value set so that we can rollback to it at the end of the call sequence originalValueSet := fw.valueSet.Clone() @@ -274,7 +280,7 @@ func (fw *FuzzerWorker) testNextCallSequence() (calls.CallSequence, []ShrinkCall var isNewSequence bool isNewSequence, err = fw.sequenceGenerator.InitializeNextSequence() if err != nil { - return nil, nil, err + return nil, err } // Define our shrink requests we'll collect during execution. @@ -321,8 +327,8 @@ func (fw *FuzzerWorker) testNextCallSequence() (calls.CallSequence, []ShrinkCall lastCallSequenceElement := currentlyExecutedSequence[len(currentlyExecutedSequence)-1] fw.workerMetrics().gasUsed.Add(fw.workerMetrics().gasUsed, new(big.Int).SetUint64(lastCallSequenceElement.ChainReference.Block.MessageResults[lastCallSequenceElement.ChainReference.TransactionIndex].Receipt.GasUsed)) - // If our fuzzer context is done, exit out immediately without results. - if utils.CheckContextDone(fw.fuzzer.ctx) { + // If our fuzzer context or the emergency context is cancelled, exit out immediately without results. + if utils.CheckContextDone(fw.fuzzer.ctx) || utils.CheckContextDone(fw.fuzzer.emergencyCtx) { return true, nil } @@ -331,27 +337,27 @@ func (fw *FuzzerWorker) testNextCallSequence() (calls.CallSequence, []ShrinkCall } // Execute our call sequence. - testedCallSequence, err := calls.ExecuteCallSequenceIteratively(fw.chain, fetchElementFunc, executionCheckFunc) + _, err = calls.ExecuteCallSequenceIteratively(fw.chain, fetchElementFunc, executionCheckFunc) // If we encountered an error, report it. if err != nil { - return nil, nil, err + return nil, err } // If our fuzzer context is done, exit out immediately without results. - if utils.CheckContextDone(fw.fuzzer.ctx) { - return nil, nil, nil + if utils.CheckContextDone(fw.fuzzer.ctx) || utils.CheckContextDone(fw.fuzzer.emergencyCtx) { + return nil, nil } // If this was not a new call sequence, indicate not to save the shrunken result to the corpus again. if !isNewSequence { - for i := 0; i < len(shrinkCallSequenceRequests); i++ { + for i := 0; i < len(fw.shrinkCallSequenceRequests); i++ { shrinkCallSequenceRequests[i].RecordResultInCorpus = false } } // Return our results accordingly. - return testedCallSequence, shrinkCallSequenceRequests, nil + return shrinkCallSequenceRequests, nil } // testShrunkenCallSequence tests a provided shrunken call sequence to verify it continues to satisfy the provided @@ -388,8 +394,10 @@ func (fw *FuzzerWorker) testShrunkenCallSequence(possibleShrunkSequence calls.Ca return true, seqErr } - // If our fuzzer context is done, exit out immediately without results. - if utils.CheckContextDone(fw.fuzzer.ctx) { + // If the emergency context is cancelled, we exit out immediately without results. + // We ignore the cancellation of the main context since, in some cases, we want to still shrink after the + // main context is called. + if utils.CheckContextDone(fw.fuzzer.emergencyCtx) { return true, nil } @@ -402,8 +410,10 @@ func (fw *FuzzerWorker) testShrunkenCallSequence(possibleShrunkSequence calls.Ca return false, err } - // If our fuzzer context is done, exit out immediately without results. - if utils.CheckContextDone(fw.fuzzer.ctx) { + // If the emergency context is cancelled, we exit out immediately without results. + // We ignore the cancellation of the main context since, in some cases, we want to still shrink after the + // main context is called. + if utils.CheckContextDone(fw.fuzzer.emergencyCtx) { return false, nil } @@ -427,15 +437,15 @@ func (fw *FuzzerWorker) testShrunkenCallSequence(possibleShrunkSequence calls.Ca // // Returns a call sequence that was optimized to include as little calls as possible to trigger the // expected conditions, or an error if one occurred. -func (fw *FuzzerWorker) shrinkCallSequence(callSequence calls.CallSequence, shrinkRequest ShrinkCallSequenceRequest) (calls.CallSequence, error) { +func (fw *FuzzerWorker) shrinkCallSequence(shrinkRequest ShrinkCallSequenceRequest) (calls.CallSequence, error) { // Define a variable to track our most optimized sequence across all optimization iterations. - optimizedSequence := callSequence + optimizedSequence := shrinkRequest.CallSequenceToShrink // Obtain our shrink limits and begin shrinking. shrinkIteration := uint64(0) shrinkLimit := fw.fuzzer.config.Fuzzing.ShrinkLimit shrinkingEnded := func() bool { - return shrinkIteration >= shrinkLimit || utils.CheckContextDone(fw.fuzzer.ctx) + return shrinkIteration >= shrinkLimit || utils.CheckContextDone(fw.fuzzer.emergencyCtx) } if shrinkLimit > 0 { // The first pass of shrinking is greedy towards trying to remove any unnecessary calls. @@ -444,7 +454,7 @@ func (fw *FuzzerWorker) shrinkCallSequence(callSequence calls.CallSequence, shri // 2) Add block/time delay to previous call (retain original block/time, possibly exceed max delays) // At worst, this costs `2 * len(callSequence)` shrink iterations. fw.workerMetrics().shrinking = true - fw.fuzzer.logger.Info(fmt.Sprintf("[Worker %d] Shrinking call sequence with %d call(s)", fw.workerIndex, len(callSequence))) + fw.fuzzer.logger.Info(fmt.Sprintf("[Worker %d] Shrinking call sequence with %d call(s)", fw.workerIndex, len(shrinkRequest.CallSequenceToShrink))) for removalStrategy := 0; removalStrategy < 2 && !shrinkingEnded(); removalStrategy++ { for i := len(optimizedSequence) - 1; i >= 0 && !shrinkingEnded(); i-- { @@ -547,8 +557,9 @@ func (fw *FuzzerWorker) shrinkCallSequence(callSequence calls.CallSequence, shri } // run takes a base Chain in a setup state ready for testing, clones it, and begins executing fuzzed transaction calls -// and asserting properties are upheld. This runs until Fuzzer.ctx cancels the operation. -// Returns a boolean indicating whether Fuzzer.ctx has indicated we cancel the operation, and an error if one occurred. +// and asserting properties are upheld. This runs until Fuzzer.ctx or Fuzzer.emergencyCtx cancels the operation. +// Returns a boolean indicating whether Fuzzer.ctx or Fuzzer.emergencyCtx has indicated we cancel the operation, and an +// error if one occurred. func (fw *FuzzerWorker) run(baseTestChain *chain.TestChain) (bool, error) { // Clone our chain, attaching our necessary components for fuzzing post-genesis, prior to all blocks being copied. // This means any tracers added or events subscribed to within this inner function are done so prior to chain @@ -584,7 +595,7 @@ func (fw *FuzzerWorker) run(baseTestChain *chain.TestChain) (bool, error) { // Defer the closing of the test chain object defer fw.chain.Close() - // Emit an event indicating the worker has setup its chain. + // Emit an event indicating the worker has set up its chain. err = fw.Events.FuzzerWorkerChainSetup.Publish(FuzzerWorkerChainSetupEvent{ Worker: fw, Chain: fw.chain, @@ -600,13 +611,46 @@ func (fw *FuzzerWorker) run(baseTestChain *chain.TestChain) (bool, error) { // to this state between testing. fw.testingBaseBlockIndex = uint64(len(fw.chain.CommittedBlocks())) - // Enter the main fuzzing loop, restricting our memory database size based on our config variable. - // When the limit is reached, we exit this method gracefully, which will cause the fuzzing to recreate - // this worker with a fresh memory database. + // Enter the main fuzzing loop. In the main fuzzing loop, we will always handle shrink requests first. + // While there are no shrink requests, we will execute call sequence restricted by our memory database size based + // on our config variable. When the limit is reached, we exit this method gracefully, which will cause the fuzzer + // to recreate this worker with a fresh memory database. Note that if fuzzing is cancelled/complete, we will + // execute any outstanding shrink requests and then exit. sequencesTested := 0 + fuzzingComplete := false for sequencesTested <= fw.fuzzer.config.Fuzzing.WorkerResetLimit { - // If our context signalled to close the operation, exit our testing loop accordingly, otherwise continue. + // Immediately exit if the emergency context is triggered + if utils.CheckContextDone(fw.fuzzer.emergencyCtx) { + return true, nil + } + + // If our main context signaled to close the operation, we will emit an event notifying any subscribers that + // this fuzzer worker is going to be shut down. This allows any subscriber (e.g. the optimization provider) + // one last opportunity to shrink a call sequence if necessary. This is why we do not return here if the + // main context says fuzzing is complete. if utils.CheckContextDone(fw.fuzzer.ctx) { + fuzzingComplete = true + err = fw.Events.TestingComplete.Publish(FuzzerWorkerTestingCompleteEvent{ + Worker: fw, + }) + if err != nil { + return true, fmt.Errorf("error returned by an event handler when a worker emitted an event indicating testing is complete: %v", err) + } + } + + // Run all shrink requests + for _, shrinkCallSequenceRequest := range fw.shrinkCallSequenceRequests { + _, err = fw.shrinkCallSequence(shrinkCallSequenceRequest) + if err != nil { + return false, err + } + } + + // Clean up the shrink requests + fw.shrinkCallSequenceRequests = nil + + // If we have cancelled fuzzing, return now + if fuzzingComplete { return true, nil } @@ -619,20 +663,15 @@ func (fw *FuzzerWorker) run(baseTestChain *chain.TestChain) (bool, error) { } // Test a new sequence - callSequence, shrinkVerifiers, err := fw.testNextCallSequence() + shrinkRequests, err := fw.testNextCallSequence() if err != nil { return false, err } - // If we have any requests to shrink call sequences, do so now. - for _, shrinkVerifier := range shrinkVerifiers { - _, err = fw.shrinkCallSequence(callSequence, shrinkVerifier) - if err != nil { - return false, err - } - } + // Add any new shrink requests to our list + fw.shrinkCallSequenceRequests = append(fw.shrinkCallSequenceRequests, shrinkRequests...) - // Emit an event indicating the worker is about to test a new call sequence. + // Emit an event indicating the worker finished testing a new call sequence. err = fw.Events.CallSequenceTested.Publish(FuzzerWorkerCallSequenceTestedEvent{ Worker: fw, }) diff --git a/fuzzing/fuzzer_worker_events.go b/fuzzing/fuzzer_worker_events.go index 2b903fc4..8fb779ff 100644 --- a/fuzzing/fuzzer_worker_events.go +++ b/fuzzing/fuzzer_worker_events.go @@ -31,6 +31,10 @@ type FuzzerWorkerEvents struct { // CallSequenceTested emits events when the FuzzerWorker has finished generating and testing a // new call sequence. CallSequenceTested events.EventEmitter[FuzzerWorkerCallSequenceTestedEvent] + + // TestingComplete emits events when the FuzzerWorker has completed testing of call sequences and is about to exit + // the fuzzing loop. + TestingComplete events.EventEmitter[FuzzerWorkerTestingCompleteEvent] } // FuzzerWorkerContractAddedEvent describes an event where a fuzzing.FuzzerWorker detects a newly deployed contract in @@ -94,3 +98,10 @@ type FuzzerWorkerCallSequenceTestedEvent struct { // Worker represents the instance of the fuzzing.FuzzerWorker for which the event occurred. Worker *FuzzerWorker } + +// FuzzerWorkerTestingCompleteEvent describes an event where a fuzzing.FuzzerWorker has completed testing of call sequences +// and is about to exit the fuzzing loop. +type FuzzerWorkerTestingCompleteEvent struct { + // Worker represents the instance of the fuzzing.FuzzerWorker for which the event occurred. + Worker *FuzzerWorker +} diff --git a/fuzzing/test_case_assertion_provider.go b/fuzzing/test_case_assertion_provider.go index f9b9978a..90405f7e 100644 --- a/fuzzing/test_case_assertion_provider.go +++ b/fuzzing/test_case_assertion_provider.go @@ -191,6 +191,7 @@ func (t *AssertionTestCaseProvider) callSequencePostCallTest(worker *FuzzerWorke if testFailed { // Create a request to shrink this call sequence. shrinkRequest := ShrinkCallSequenceRequest{ + CallSequenceToShrink: callSequence, VerifierFunction: func(worker *FuzzerWorker, shrunkenCallSequence calls.CallSequence) (bool, error) { // Obtain the method ID for the last call and check if it encountered assertion failures. shrunkSeqMethodId, shrunkSeqTestFailed, err := t.checkAssertionFailures(shrunkenCallSequence) diff --git a/fuzzing/test_case_optimization.go b/fuzzing/test_case_optimization.go index 3c785047..61ec7b38 100644 --- a/fuzzing/test_case_optimization.go +++ b/fuzzing/test_case_optimization.go @@ -2,16 +2,14 @@ package fuzzing import ( "fmt" - "math/big" - "strings" - "sync" - "github.com/crytic/medusa/fuzzing/calls" "github.com/crytic/medusa/fuzzing/contracts" "github.com/crytic/medusa/fuzzing/executiontracer" "github.com/crytic/medusa/logging" "github.com/crytic/medusa/logging/colors" "github.com/ethereum/go-ethereum/accounts/abi" + "math/big" + "strings" ) // OptimizationTestCase describes a test being run by a OptimizationTestCaseProvider. @@ -24,10 +22,11 @@ type OptimizationTestCase struct { targetMethod abi.Method // callSequence describes the call sequence that maximized the value callSequence *calls.CallSequence + // shrinkCallSequenceRequest is the shrink request that will be executed to identify the optimal call sequence + // that maximizes the value + shrinkCallSequenceRequest *ShrinkCallSequenceRequest // value is used to store the maximum value returned by the test method value *big.Int - // valueLock is used for thread-synchronization when updating the value - valueLock sync.Mutex // optimizationTestTrace describes the execution trace when running the callSequence optimizationTestTrace *executiontracer.ExecutionTrace } diff --git a/fuzzing/test_case_optimization_provider.go b/fuzzing/test_case_optimization_provider.go index e782c646..38b8b772 100644 --- a/fuzzing/test_case_optimization_provider.go +++ b/fuzzing/test_case_optimization_provider.go @@ -21,6 +21,14 @@ type OptimizationTestCaseProvider struct { // fuzzer describes the Fuzzer which this provider is attached to. fuzzer *Fuzzer + // shrinkingRequested describes whether the optimization provider has already requested a worker to complete the + // provider's outstanding shrink requests. If the requests have already gone through, other workers can continue + // their operations. + shrinkingRequested bool + + // shrinkingRequestedLock is used for thread-synchronization with reading and updating shrinkingRequested + shrinkingRequestedLock sync.Mutex + // testCases is a map of contract-method IDs to optimization test cases.GetContractMethodID testCases map[contracts.ContractMethodID]*OptimizationTestCase @@ -186,6 +194,34 @@ func (t *OptimizationTestCaseProvider) onWorkerCreated(event FuzzerWorkerCreated // Subscribe to relevant worker events. event.Worker.Events.ContractAdded.Subscribe(t.onWorkerDeployedContractAdded) event.Worker.Events.ContractDeleted.Subscribe(t.onWorkerDeployedContractDeleted) + event.Worker.Events.TestingComplete.Subscribe(t.onWorkerTestingComplete) + return nil +} + +// onWorkerTestingComplete is the event handler triggered when a FuzzerWorker has completed testing of call sequences +// and is about to exit the fuzzing loop. We use this event to attach shrink requests to the worker. +// This way we are only shrinking once throughout the entire fuzzing campaign in optimization mode. +func (t *OptimizationTestCaseProvider) onWorkerTestingComplete(event FuzzerWorkerTestingCompleteEvent) error { + // Acquire lock to see if this worker should handle the shrink requests or not + t.shrinkingRequestedLock.Lock() + if t.shrinkingRequested { + // If another thread has already been requested to shrink, exit early + t.shrinkingRequestedLock.Unlock() + return nil + } else { + // This is the first thread to reach this function, so set the boolean to true and handle shrink requests + t.shrinkingRequested = true + } + t.shrinkingRequestedLock.Unlock() + + // Iterate across each test case to see if there is a shrink request for it + for _, testCase := range t.testCases { + // We have a shrink request, let's send it to the fuzzer worker + if testCase.shrinkCallSequenceRequest != nil { + event.Worker.shrinkCallSequenceRequests = append(event.Worker.shrinkCallSequenceRequests, *testCase.shrinkCallSequenceRequest) + testCase.shrinkCallSequenceRequest = nil + } + } return nil } @@ -265,10 +301,6 @@ func (t *OptimizationTestCaseProvider) onWorkerDeployedContractDeleted(event Fuz // and any underlying FuzzerWorker. It is called after every call made in a call sequence. It checks whether any // optimization test's value has increased. func (t *OptimizationTestCaseProvider) callSequencePostCallTest(worker *FuzzerWorker, callSequence calls.CallSequence) ([]ShrinkCallSequenceRequest, error) { - // Create a list of shrink call sequence verifiers, which we populate for each maximized optimization test we want a call - // sequence shrunk for. - shrinkRequests := make([]ShrinkCallSequenceRequest, 0) - // Obtain the test provider state for this worker workerState := &t.workerStates[worker.WorkerIndex()] @@ -286,14 +318,17 @@ func (t *OptimizationTestCaseProvider) callSequencePostCallTest(worker *FuzzerWo return nil, err } - // If we updated the test case's maximum value, we update our state immediately. We provide a shrink verifier which will update - // the call sequence for each shrunken sequence provided that still it maintains the maximum value. - // TODO: This is very inefficient since this runs every time a new max value is found. It would be ideal if we - // could perform a one-time shrink request. This code should be refactored when we introduce the high-level - // testing API. + // If we updated the test case's maximum value, we update our state immediately. Note that we are allowing + // for races here. We also update the test case's cached shrink request. + // TODO: Should we allow for races here? if newValue.Cmp(testCase.value) == 1 { + // Update the test case's value and call sequence + testCase.value = newValue + testCase.callSequence = &callSequence + // Create a request to shrink this call sequence. shrinkRequest := ShrinkCallSequenceRequest{ + CallSequenceToShrink: callSequence, VerifierFunction: func(worker *FuzzerWorker, shrunkenCallSequence calls.CallSequence) (bool, error) { // First verify the contract to the optimization test is still deployed to call upon. _, optimizationTestContractDeployed := worker.deployedContracts[workerOptimizationTestMethod.Address] @@ -330,16 +365,11 @@ func (t *OptimizationTestCaseProvider) callSequencePostCallTest(worker *FuzzerWo return err } - // If, for some reason, the shrunken sequence lowers the new max value, do not save anything and exit + // If, for some reason, the shrunken sequence lowers the new max value, throw an error if shrunkenSequenceNewValue.Cmp(newValue) < 0 { return fmt.Errorf("optimized call sequence failed to maximize value") } - // Update our value with lock - testCase.valueLock.Lock() - testCase.value = new(big.Int).Set(shrunkenSequenceNewValue) - testCase.valueLock.Unlock() - // Update call sequence and trace testCase.callSequence = &shrunkenCallSequence testCase.optimizationTestTrace = executionTrace @@ -348,10 +378,10 @@ func (t *OptimizationTestCaseProvider) callSequencePostCallTest(worker *FuzzerWo RecordResultInCorpus: true, } - // Add our shrink request to our list. - shrinkRequests = append(shrinkRequests, shrinkRequest) + // Update the shrink request attached to this test case + testCase.shrinkCallSequenceRequest = &shrinkRequest } } - return shrinkRequests, nil + return nil, nil } diff --git a/fuzzing/test_case_property_provider.go b/fuzzing/test_case_property_provider.go index 6bb6d419..9c265031 100644 --- a/fuzzing/test_case_property_provider.go +++ b/fuzzing/test_case_property_provider.go @@ -296,6 +296,7 @@ func (t *PropertyTestCaseProvider) callSequencePostCallTest(worker *FuzzerWorker if failedPropertyTest { // Create a request to shrink this call sequence. shrinkRequest := ShrinkCallSequenceRequest{ + CallSequenceToShrink: callSequence, VerifierFunction: func(worker *FuzzerWorker, shrunkenCallSequence calls.CallSequence) (bool, error) { // First verify the contract to property test is still deployed to call upon. _, propertyTestContractDeployed := worker.deployedContracts[workerPropertyTestMethod.Address]