diff --git a/app/Main.hs b/app/Main.hs index 6e137163..61c4ead4 100644 --- a/app/Main.hs +++ b/app/Main.hs @@ -38,7 +38,8 @@ import qualified Network.Wai.Handler.Warp as Warp import System.Directory import System.Environment import System.FilePath (()) -import System.IO (hPutStrLn, stderr) +import System.IO +import System.Exit import Options.Generic @@ -76,6 +77,7 @@ import Network.HTTP.Client.TLS -- import Network.HTTP.Simple import Network.Connection + main :: IO () main = do cliOptions@CLIOptions{..} <- unwrapRecord $ @@ -98,7 +100,7 @@ main = do executeMainProcess :: CLIOptions Unwrapped -> IO () -executeMainProcess cliOptions = do +executeMainProcess cliOptions@CLIOptions{..} = do -- TODO This doesn't look pretty, come up with something better. appStateHolder <- newTVarIO Nothing @@ -110,7 +112,10 @@ executeMainProcess cliOptions = do for_ z $ mergeSystemMetrics sm withLogger logConfig bumpSysMetric $ \logger -> do - logDebug logger [i|Starting #{rpkiProverVersion}.|] + logDebug logger $ if once + then [i|Starting #{rpkiProverVersion} in one-off mode.|] + else [i|Starting #{rpkiProverVersion} as a server.|] + if cliOptions ^. #initialise then -- init the FS layout and download TALs @@ -118,18 +123,25 @@ executeMainProcess cliOptions = do else do -- run the validator (appContext, validations) <- do - runValidatorT (newScopes "initialise") $ do + runValidatorT (newScopes "Initialise") $ do checkPreconditions cliOptions createAppContext cliOptions logger (logConfig ^. #logLevel) case appContext of - Left _ -> - logError logger [i|Couldn't initialise, problems: #{validations}.|] + Left _ -> do + logError logger [i|Failure: +#{formatValidations (validations ^. typed)}|] + drainLog logger + hFlush stdout + hFlush stderr + exitFailure Right appContext' -> do -- now we have the appState, set appStateHolder atomically $ writeTVar appStateHolder $ Just $ appContext' ^. #appState - void $ race - (runHttpApi appContext') - (runValidatorServer appContext') + if once + then runValidatorServer appContext' + else void $ race + (runHttpApi appContext') + (runValidatorServer appContext') executeWorkerProcess :: IO () executeWorkerProcess = do @@ -176,6 +188,7 @@ turnOffTlsValidation = do manager <- newManager $ mkManagerSettings (TLSSettingsSimple True True True) Nothing setGlobalManager manager + runValidatorServer :: (Storage s, MaintainableStorage s) => AppContext s -> IO () runValidatorServer appContext@AppContext {..} = do @@ -220,18 +233,21 @@ createAppContext cliOptions@CLIOptions{..} logger derivedLogLevel = do programPath <- liftIO getExecutablePath - (root, tald, rsyncd, tmpd, cached) <- fsLayout cliOptions logger CheckTALsExists - let defaults = defaultConfig - let lmdbRealSize = (Size <$> lmdbSize) `orDefault` (defaults ^. #lmdbSizeMb) - - -- clean up tmp directory if it's not empty - cleanDir tmpd + let lmdbRealSize = (Size <$> lmdbSize) `orDefault` (defaults ^. #lmdbSizeMb) - let cpuCount' = fromMaybe getRtsCpuCount cpuCount + (root, tald, rsyncd, tmpd, cached) <- + fromTryM + (\e -> UnspecifiedE "Error verifying/creating FS layout: " (fmtEx e)) + $ do + z@(_, _, _, tmpd, _) <- fsLayout cliOptions logger CheckTALsExists + -- clean up tmp directory if it's not empty + cleanDir tmpd + pure z -- Set capabilities to the values from the CLI or to all available CPUs, -- (disregard the HT issue for now it needs more testing). + let cpuCount' = fromMaybe getRtsCpuCount cpuCount liftIO $ setCpuCount cpuCount' let parallelism = case fetcherCount of @@ -243,9 +259,10 @@ createAppContext cliOptions@CLIOptions{..} logger derivedLogLevel = do & maybeSet #rtrPort rtrPort & maybeSet #rtrAddress rtrAddress & #rtrLogFile .~ rtrLogFile - else Nothing + else Nothing - rsyncPrefetchUrls <- rsyncPrefetches cliOptions + proverRunMode <- deriveProverRunMode cliOptions + rsyncPrefetchUrls <- rsyncPrefetches cliOptions let config = defaults & #programBinaryPath .~ programPath @@ -254,6 +271,7 @@ createAppContext cliOptions@CLIOptions{..} logger derivedLogLevel = do & #tmpDirectory .~ tmpd & #cacheDirectory .~ cached & #extraTalsDirectories .~ extraTalsDirectory + & #proverRunMode .~ proverRunMode & #parallelism .~ parallelism & #rsyncConf . #rsyncRoot .~ rsyncd & #rsyncConf . #rsyncClientPath .~ rsyncClientPath @@ -296,6 +314,7 @@ createAppContext cliOptions@CLIOptions{..} logger derivedLogLevel = do & maybeSet (#systemConfig . #rrdpWorkerMemoryMb) maxRrdpFetchMemory & maybeSet (#systemConfig . #validationWorkerMemoryMb) maxValidationMemory + -- Do some "common sense" adjustmnents to the config for correctness let adjustedConfig = config -- Cache must be cleaned up at least as often as the -- lifetime of the objects in it @@ -306,7 +325,7 @@ createAppContext cliOptions@CLIOptions{..} logger derivedLogLevel = do readSlurmFiles files -- Read the files first to fail fast - unless (null localExceptions) $ do + unless (null localExceptions) $ void $ readSlurms localExceptions appState <- createAppState logger localExceptions @@ -447,7 +466,7 @@ checkSubDirectory :: FilePath -> FilePath -> IO (Either Text FilePath) checkSubDirectory root sub = do let subDirectory = root sub doesDirectoryExist subDirectory >>= \case - False -> pure $ Left [i|Directory #{subDirectory} doesn't exist.|] + False -> pure $ Left [i|Directory #{subDirectory} doesn't exist|] True -> pure $ Right subDirectory createSubDirectoryIfNeeded :: FilePath -> FilePath -> IO (Either Text FilePath) @@ -509,6 +528,14 @@ createAppState logger localExceptions = do checkPreconditions :: CLIOptions Unwrapped -> ValidatorT IO () checkPreconditions CLIOptions {..} = checkRsyncInPath rsyncClientPath +deriveProverRunMode :: CLIOptions Unwrapped -> ValidatorT IO ProverRunMode +deriveProverRunMode CLIOptions {..} = + case (once, vrpOutput) of + (False, Nothing) -> pure ServerMode + (True, Just vo) -> pure $ OneOffMode vo + _ -> appError $ UnspecifiedE "options" + [i|Options `--once` and `--vrp-output` must be either both set or both not set|] + -- | Run rpki-prover in a CLI mode for verifying RSC signature (*.sig file). executeVerifier :: CLIOptions Unwrapped -> IO () @@ -565,6 +592,13 @@ data CLIOptions wrapped = CLIOptions { initialise :: wrapped ::: Bool "If set, the FS layout will be created and TAL files will be downloaded.", + once :: wrapped ::: Bool + ("If set, will run one validation cycle and exit. Http API will not start, " +++ + "result will be written to the file set by --vrp-output option (which must also be set)."), + + vrpOutput :: wrapped ::: Maybe FilePath + "Path of the file to write VRPs to. Only effectful when --once option is set.", + noRirTals :: wrapped ::: Bool "If set, RIR TAL files will not be downloaded.", @@ -738,7 +772,6 @@ deriving instance Show (CLIOptions Unwrapped) type (+++) (a :: Symbol) (b :: Symbol) = AppendSymbol a b - withLogConfig :: CLIOptions Unwrapped -> (LogConfig -> IO ()) -> IO () withLogConfig CLIOptions{..} f = case logLevel of diff --git a/src/RPKI/AppMonad.hs b/src/RPKI/AppMonad.hs index ddbc3724..ade7da4f 100644 --- a/src/RPKI/AppMonad.hs +++ b/src/RPKI/AppMonad.hs @@ -54,7 +54,10 @@ fromEither z = fromValue z fromEitherM :: Monad m => m (Either AppError r) -> ValidatorT m r -fromEitherM s = embedValidatorT $ (, mempty) <$> s +fromEitherM s = + appLift s >>= \case + Left e -> appError e + Right r -> pure r vFromEither :: Either ValidationError r -> PureValidatorT r vFromEither = fromEither . first ValidationE diff --git a/src/RPKI/Config.hs b/src/RPKI/Config.hs index ad9837b3..7ba65c86 100644 --- a/src/RPKI/Config.hs +++ b/src/RPKI/Config.hs @@ -54,6 +54,7 @@ data Config = Config { extraTalsDirectories :: [FilePath], tmpDirectory :: FilePath, cacheDirectory :: FilePath, + proverRunMode :: ProverRunMode, parallelism :: Parallelism, rsyncConf :: RsyncConf, rrdpConf :: RrdpConf, @@ -101,7 +102,6 @@ data ManifestProcessing = RFC6486_Strict | RFC9286 deriving stock (Eq, Ord, Show, Generic) deriving anyclass (TheBinary) - data ValidationAlgorithm = FullEveryIteration | Incremental deriving stock (Eq, Ord, Show, Generic) deriving anyclass (TheBinary) @@ -110,11 +110,14 @@ data FetchTimingCalculation = Constant | Adaptive deriving stock (Eq, Ord, Show, Generic) deriving anyclass (TheBinary) - data FetchMethod = SyncOnly | SyncAndAsync deriving stock (Eq, Ord, Show, Generic) deriving anyclass (TheBinary) +data ProverRunMode = OneOffMode FilePath | ServerMode + deriving stock (Show, Eq, Ord, Generic) + deriving anyclass (TheBinary) + data ValidationConfig = ValidationConfig { revalidationInterval :: Seconds, rrdpRepositoryRefreshInterval :: Seconds, @@ -213,6 +216,7 @@ defaultConfig = Config { extraTalsDirectories = [], tmpDirectory = "", cacheDirectory = "", + proverRunMode = ServerMode, parallelism = makeParallelism 2, rsyncConf = RsyncConf { rsyncClientPath = Nothing, diff --git a/src/RPKI/Fetch.hs b/src/RPKI/Fetch.hs index c266c9ef..2b5037ad 100644 --- a/src/RPKI/Fetch.hs +++ b/src/RPKI/Fetch.hs @@ -64,13 +64,13 @@ import RPKI.Parallel Fall-back is disabled in the sync mode, so if primary repoistory doesn't respond, we skip all the fall-backs and mark them all as ForAsyncFetch. -} -fetchSync :: (MonadIO m, Storage s) => +fetchQuickly :: (MonadIO m, Storage s) => AppContext s -> RepositoryProcessing -> WorldVersion -> PublicationPointAccess - -> ValidatorT m (Maybe FetchResult) -fetchSync appContext@AppContext {..} + -> ValidatorT m [FetchResult] +fetchQuickly appContext@AppContext {..} repositoryProcessing@RepositoryProcessing {..} worldVersion ppa = do @@ -83,7 +83,7 @@ fetchSync appContext@AppContext {..} -- It exists mainly for comparisons and measurements and -- not very useful in practice. let primaryRepo = getPrimaryRepository pps ppa - Just <$> + (:[]) <$> fetchOnePp appContext (syncFetchConfig config) repositoryProcessing worldVersion primaryRepo (\meta _ -> pure meta) @@ -95,7 +95,7 @@ fetchSync appContext@AppContext {..} -- There's nothing to be fetched in the sync mode, -- so just mark all of them for async fetching. markForAsyncFetch repositoryProcessing asyncRepos - pure Nothing + pure [] Just syncPp_ -> do -- In sync mode fetch only the first PP fetchResult <- @@ -112,7 +112,7 @@ fetchSync appContext@AppContext {..} $ NonEmpty.toList $ unPublicationPointAccess ppa markForAsyncFetch repositoryProcessing toMarkAsync - pure $ Just fetchResult + pure $! [fetchResult] where newMetaCallback syncPp_ pps newMeta fetchMoment = do -- Set fetchType to ForAsyncFetch to all fall-back URLs, @@ -140,26 +140,26 @@ fetchSync appContext@AppContext {..} {- - Fetch repositories in the async mode (i.e. concurrently to top-down validation). - Fetch with fallback going through all (both RRDP and rsync) options. -} -fetchAsync :: (MonadIO m, Storage s) => +fetchWithFallback :: (MonadIO m, Storage s) => AppContext s -> RepositoryProcessing -> WorldVersion + -> FetchConfig -> PublicationPointAccess -> ValidatorT m [FetchResult] -fetchAsync +fetchWithFallback appContext@AppContext {..} repositoryProcessing worldVersion + fetchConfig ppa = go True $ NonEmpty.toList $ unPublicationPointAccess ppa where go _ [] = pure [] go isPrimary (pp : rest) = do - fetchResult <- fetchOnePp appContext (asyncFetchConfig config) + fetchResult <- fetchOnePp appContext fetchConfig repositoryProcessing worldVersion pp (newMetaCallback isPrimary) case fetchResult of FetchFailure _ _ -> do @@ -183,19 +183,23 @@ fetchAsync logWarn logger $ if nextOneNeedAFetch then [i|Failed to fetch #{getURL pp}, will fall-back to the next one: #{getURL $ getRpkiURL ppNext}.|] else [i|Failed to fetch #{getURL pp}, next one (#{getURL $ getRpkiURL ppNext}) is up-to-date.|] - go False rest + + (fetchResult :) <$> go False rest _ -> pure [fetchResult] - -- We are doing async fetch here, so we are not going to promote fall-back - -- repositories back to ForSyncFetch type. I.e. if a CA has publication - -- points as "repo_a - fall-back-to -> repo_b", repo_b is never going to - -- become ForSyncFetch, only repo_a can become sync and only after it is - -- back to normal state. newMetaCallback isPrimary newMeta fetchMoment = - pure $ if isPrimary - then newMeta - else newMeta & #fetchType .~ ForAsyncFetch fetchMoment + case config ^. #proverRunMode of + OneOffMode {} -> pure newMeta + ServerMode -> + -- We are doing async fetch here, so we are not going to promote fall-back + -- repositories back to ForSyncFetch type. I.e. if a CA has publication + -- points as "repo_a - fall-back-to -> repo_b", repo_b is never going to + -- become ForSyncFetch, only repo_a can become sync and only after it is + -- back to normal state. + pure $ if isPrimary + then newMeta + else newMeta & #fetchType .~ ForAsyncFetch fetchMoment fetchOnePp :: (MonadIO m, Storage s) => @@ -260,7 +264,7 @@ fetchOnePp pure (rpkiUrl, fetchFreshness, f) -- This is hacky but basically setting the "fetched/up-to-date" metric - -- without ValidatorT/PureValidatorT. + -- without ValidatorT/PureValidatorT (we can only run it in IO). updateFetchMetric repoUrl fetchFreshness validations r elapsed = let realFreshness = either (const FailedToFetch) (const fetchFreshness) r repoScope = validatorSubScope' RepositoryFocus repoUrl parentScope diff --git a/src/RPKI/Logging.hs b/src/RPKI/Logging.hs index 8885268f..0e0b10bc 100644 --- a/src/RPKI/Logging.hs +++ b/src/RPKI/Logging.hs @@ -19,7 +19,6 @@ import qualified Data.ByteString.Char8 as C8 import Data.Bifunctor import Data.Foldable -import Data.Maybe import Data.Text (Text, justifyLeft) import Data.String.Interpolate.IsString @@ -246,6 +245,12 @@ withLogger LogConfig {..} sysMetricCallback f = do in [i|#{level} #{pid} #{timestamp} #{message}|] +drainLog :: MonadIO m => AppLogger -> m () +drainLog (getQueue -> queue) = + liftIO $ atomically $ do + empty <- isEmptyCQueue queue + unless empty retry + eol :: Char eol = '\n' diff --git a/src/RPKI/Messages.hs b/src/RPKI/Messages.hs index b049d8f9..0002cf36 100644 --- a/src/RPKI/Messages.hs +++ b/src/RPKI/Messages.hs @@ -44,7 +44,7 @@ toMessage = \case InternalE t -> toInternalErrorMessage t UnspecifiedE context e -> - [i|Unspecified error #{context}, details: #{e}.|] + [i|Unspecified error in #{context}, details: #{e}.|] toRsyncMessage :: RsyncError -> Text diff --git a/src/RPKI/Orphans/Json.hs b/src/RPKI/Orphans/Json.hs index 6df542c9..245d420d 100644 --- a/src/RPKI/Orphans/Json.hs +++ b/src/RPKI/Orphans/Json.hs @@ -405,6 +405,7 @@ instance ToJSON ManifestProcessing instance ToJSON ValidationAlgorithm instance ToJSON FetchTimingCalculation instance ToJSON FetchMethod +instance ToJSON ProverRunMode instance ToJSON TAL instance ToJSON HttpApiConfig instance ToJSON ValidationConfig diff --git a/src/RPKI/Orphans/Swagger.hs b/src/RPKI/Orphans/Swagger.hs index 9491bf8a..289ca380 100644 --- a/src/RPKI/Orphans/Swagger.hs +++ b/src/RPKI/Orphans/Swagger.hs @@ -133,6 +133,8 @@ instance ToSchema ManifestProcessing instance ToSchema ValidationAlgorithm instance ToSchema FetchTimingCalculation instance ToSchema FetchMethod +instance ToSchema ProverRunMode where + declareNamedSchema _ = declareNamedSchema (Proxy :: Proxy Text) instance ToSchema a => ToSchema (Located a) diff --git a/src/RPKI/Parallel.hs b/src/RPKI/Parallel.hs index 8861fa7d..2f65b81f 100644 --- a/src/RPKI/Parallel.hs +++ b/src/RPKI/Parallel.hs @@ -179,6 +179,9 @@ closeCQueue (ClosableQueue _ s) = writeTVar s QClosed isClosedCQueue :: ClosableQueue a -> STM Bool isClosedCQueue (ClosableQueue _ s) = (QClosed ==) <$> readTVar s +isEmptyCQueue :: ClosableQueue a -> STM Bool +isEmptyCQueue (ClosableQueue q _) = isEmptyTBQueue q + readCQueue :: ClosableQueue a -> STM (Maybe a) readCQueue (ClosableQueue q queueState) = Q.tryReadTBQueue q >>= \case diff --git a/src/RPKI/Store/Database.hs b/src/RPKI/Store/Database.hs index 505fdea9..4e2fa034 100644 --- a/src/RPKI/Store/Database.hs +++ b/src/RPKI/Store/Database.hs @@ -68,7 +68,7 @@ import RPKI.Time -- It is brittle and inconvenient, but so far seems to be -- the only realistic option. currentDatabaseVersion :: Integer -currentDatabaseVersion = 31 +currentDatabaseVersion = 32 -- Some constant keys databaseVersionKey, lastValidMftKey, forAsyncFetchKey :: Text diff --git a/src/RPKI/Validation/TopDown.hs b/src/RPKI/Validation/TopDown.hs index 14903a65..d1c8b135 100644 --- a/src/RPKI/Validation/TopDown.hs +++ b/src/RPKI/Validation/TopDown.hs @@ -498,15 +498,21 @@ validateCaNoLimitChecks validateThisCertAndGoDown Just filteredPpa -> do - r <- fetchSync appContext repositoryProcessing worldVersion filteredPpa - case r of - Nothing -> + let fetch = case config ^. #proverRunMode of + ServerMode -> do + fetchQuickly appContext repositoryProcessing worldVersion filteredPpa + OneOffMode {} -> + fetchWithFallback appContext repositoryProcessing worldVersion + (syncFetchConfig config) filteredPpa + fetch >>= \case + [] -> -- Nothing has been fetched validateThisCertAndGoDown - Just _ -> do + _ -> do pps <- readPublicationPoints repositoryProcessing let primaryUrl = getPrimaryRepositoryUrl pps filteredPpa metricFocusOn PPFocus primaryUrl validateThisCertAndGoDown + where validateThisCertAndGoDown = validateCaNoFetch appContext topDownContext ca @@ -745,7 +751,7 @@ validateCaNoFetch -- if CA prescribes to use standard validation instead of reconsidered. -- -- NOTE: That means that in case of full validation falling back to - -- the prevous valid manifest will not work, since there are no + -- the previous valid manifest will not work, since there are no -- shortcuts of previous manifests to fall back to. (Incremental, StrictRFC) -> do issues <- vHoist thisScopeIssues diff --git a/src/RPKI/Workflow.hs b/src/RPKI/Workflow.hs index e10ff957..0500554d 100644 --- a/src/RPKI/Workflow.hs +++ b/src/RPKI/Workflow.hs @@ -18,6 +18,8 @@ import Control.Lens import Data.Generics.Product.Typed import GHC.Generics (Generic) +import qualified Data.ByteString.Lazy as LBS + import qualified Data.List.NonEmpty as NE import Data.Foldable (for_, toList) import qualified Data.Text as Text @@ -57,12 +59,14 @@ import RPKI.Util import RPKI.Time import RPKI.Worker import RPKI.SLURM.Types +import RPKI.Http.Dto +import RPKI.Http.Types -- A job run can be the first one or not and -- sometimes we need this information. data JobRun = FirstRun | RanBefore - deriving (Show, Eq, Ord, Generic) + deriving stock (Show, Eq, Ord, Generic) data WorkflowShared = WorkflowShared { -- Indicates if anything was ever deleted from the DB, @@ -79,7 +83,7 @@ data WorkflowShared = WorkflowShared { -- running to avoid launching it until the previous run is done. asyncFetchIsRunning :: TVar Bool } - deriving (Generic) + deriving stock (Generic) newWorkflowShared :: PrometheusMetrics -> STM WorkflowShared newWorkflowShared prometheusMetrics = WorkflowShared @@ -106,7 +110,7 @@ data TaskType = -- Delete local rsync mirror once in a long while | RsyncCleanupTask - deriving (Show, Eq, Ord, Bounded, Enum, Generic) + deriving stock (Show, Eq, Ord, Bounded, Enum, Generic) data Task = Task TaskType (IO ()) @@ -118,8 +122,7 @@ data Scheduling = Scheduling { taskDef :: (TaskType, WorldVersion -> JobRun -> IO ()), persistent :: Bool } - deriving (Generic) - + deriving stock (Generic) -- The main entry point for the whole validator workflow. Runs multiple threads, -- running validation, RTR server, cleanups, cache maintenance and async fetches. @@ -136,13 +139,27 @@ runWorkflow appContext@AppContext {..} tals = do -- Fill in the current appState if it's not too old. -- It is useful in case of restarts. void $ loadStoredAppState appContext - - -- Run the main scheduler and RTR server if configured - void $ concurrently - (runScheduledTasks workflowShared) - runRtrIfConfigured + + case config ^. #proverRunMode of + OneOffMode vrpOutputFile -> oneOffRun workflowShared vrpOutputFile + ServerMode -> + void $ concurrently + -- Run the main scheduler and RTR server if RTR is configured + (runScheduledTasks workflowShared) + runRtrIfConfigured where + oneOffRun workflowShared vrpOutputFile = do + worldVersion <- createWorldVersion + void $ validateTAs workflowShared worldVersion FirstRun + vrps <- roTxT database $ \tx db -> + getLastValidationVersion db tx >>= \case + Nothing -> pure Nothing + Just latestVersion -> getVrps tx db latestVersion + case vrps of + Nothing -> logWarn logger [i|Don't have any VRPs.|] + _ -> LBS.writeFile vrpOutputFile $ unRawCSV $ vrpDtosToCSV $ toVrpDtos vrps + schedules workflowShared = [ Scheduling { initialDelay = 0, @@ -228,7 +245,9 @@ runWorkflow appContext@AppContext {..} tals = do validateTAs workflowShared@WorkflowShared {..} worldVersion _ = do doValidateTAs workflowShared worldVersion `finally` - runAsyncFetcherIfNeeded + (case config ^. #proverRunMode of + ServerMode -> runAsyncFetcherIfNeeded + OneOffMode {} -> pure ()) where runAsyncFetcherIfNeeded = case config ^. #validationConfig . #fetchMethod of @@ -258,7 +277,7 @@ runWorkflow appContext@AppContext {..} tals = do logInfo logger [i|Finished asynchronous fetch #{fetchVersion} in #{elapsed `div` 1000}s.|] - doValidateTAs WorkflowShared {..} worldVersion= do + doValidateTAs WorkflowShared {..} worldVersion = do logInfo logger [i|Validating all TAs, world version #{worldVersion} |] executeOrDie processTALs @@ -351,7 +370,7 @@ runWorkflow appContext@AppContext {..} tals = do -- possible leakages (even if it were possible). cleaned <- cleanUpStaleTx appContext when (cleaned > 0) $ - logDebug logger [i|Cleaned #{cleaned} stale readers from LMDB cache.|] + logDebug logger [i|Cleaned #{cleaned} stale readers from LMDB cache.|] -- Delete local rsync mirror. The assumption here is that over time there -- be a lot of local copies of rsync repositories that are so old that @@ -598,7 +617,8 @@ runAsyncFetches appContext@AppContext {..} worldVersion = do void $ forConcurrently (sortPpas asyncRepos problematicPpas) $ \ppAccess -> do let url = getRpkiURL $ NE.head $ unPublicationPointAccess ppAccess void $ runValidatorT (newScopes' RepositoryFocus url) $ - fetchAsync appContext repositoryProcessing worldVersion ppAccess + fetchWithFallback appContext repositoryProcessing + worldVersion (asyncFetchConfig config) ppAccess validationStateOfFetches repositoryProcessing where diff --git a/stack.yaml b/stack.yaml index ef2cdd25..c0506b9e 100644 --- a/stack.yaml +++ b/stack.yaml @@ -1,4 +1,4 @@ -resolver: lts-22.26 +resolver: lts-22.27 packages: - .