diff --git a/toolkit/docs/formats/imageconfig.md b/toolkit/docs/formats/imageconfig.md index 08175d0d859..c71456eb372 100644 --- a/toolkit/docs/formats/imageconfig.md +++ b/toolkit/docs/formats/imageconfig.md @@ -1,6 +1,6 @@ # Image configuration -Image configuration consists of two sections - Disks and SystemConfigs - that describe the produced artifact(image). Image configuration code can be found in (configuration.go)[../../tools/imagegen/configuration/configuration.go] and validity of the configuration file can be verified by the [imageconfigvalidator](../../tools/imageconfigvalidator/imageconfigvalidator.go) +Image configuration consists of two sections - Disks and SystemConfigs - that describe the produced artifact(image). Image configuration code can be found in (configuration.go)[../../tools/pkg/imagegen/configuration/configuration.go] and validity of the configuration file can be verified by the [imageconfigvalidator](../../tools/imageconfigvalidator/imageconfigvalidator.go) ## Disks diff --git a/toolkit/docs/security/read-only-roots.md b/toolkit/docs/security/read-only-roots.md index c553da18bff..f1866eca304 100644 --- a/toolkit/docs/security/read-only-roots.md +++ b/toolkit/docs/security/read-only-roots.md @@ -49,7 +49,7 @@ Files in `/etc` such as the `passwd` and `machine-id` files are also part of the Ideally as much of `/etc` as possible should be left read-only to avoid miss-configuration. ### Forward Error Correction (FEC) -Verity supports error correction which will return the original data even if the underlying blocks have been modified (it does not restore the underlying data stored on disk however). FEC incurs some overhead but the hash tree, which is a sunk cost, makes it much more effective than normal Reed-Solomon codes. For a 2GiB disk, FEC with 2 roots (i.e. 2 bytes of error correction codes per 255 bytes of real data) can correct ~16MiB of errors with ~16MiB of overhead. See [veritydisk.go](../../tools/imagegen/configuration/veritydisk.go) for calculation details. +Verity supports error correction which will return the original data even if the underlying blocks have been modified (it does not restore the underlying data stored on disk however). FEC incurs some overhead but the hash tree, which is a sunk cost, makes it much more effective than normal Reed-Solomon codes. For a 2GiB disk, FEC with 2 roots (i.e. 2 bytes of error correction codes per 255 bytes of real data) can correct ~16MiB of errors with ~16MiB of overhead. See [veritydisk.go](../../tools/pkg/imagegen/configuration/veritydisk.go) for calculation details. ### Hash Tree and FEC Overhead The extra data required for verity needs to be stored outside the measured partition. In the case of Mariner it is stored in the initramfs. Assuming the Merkle tree is a full m-ary tree with m=128 (128 branches per node, from `4k/sizeof(sha256)`), the size of the Merkle tree is: diff --git a/toolkit/scripts/tools.mk b/toolkit/scripts/tools.mk index 5c5edf98072..54870ffd932 100644 --- a/toolkit/scripts/tools.mk +++ b/toolkit/scripts/tools.mk @@ -14,7 +14,6 @@ $(call create_folder,$(BUILD_DIR)/tools) # List of go utilities in tools/ directory go_tool_list = \ - boilerplate \ depsearch \ grapher \ graphpkgfetcher \ @@ -37,7 +36,7 @@ go_tool_targets = $(foreach target,$(go_tool_list),$(TOOL_BINS_DIR)/$(target)) # Common files to monitor for all go targets go_module_files = $(TOOLS_DIR)/go.mod $(TOOLS_DIR)/go.sum go_internal_files = $(shell find $(TOOLS_DIR)/internal/ -type f -name '*.go') -go_imagegen_files = $(shell find $(TOOLS_DIR)/imagegen/ -type f -name '*.go') +go_imagegen_files = $(shell find $(TOOLS_DIR)/pkg/imagegen/ -type f -name '*.go') go_common_files = $(go_module_files) $(go_internal_files) $(go_imagegen_files) $(BUILD_DIR)/tools/internal.test_coverage # A report on test coverage for all the go tools test_coverage_report=$(TOOL_BINS_DIR)/test_coverage_report.html diff --git a/toolkit/tools/boilerplate/boilerplate.go b/toolkit/tools/boilerplate/boilerplate.go deleted file mode 100644 index 234441e7e2b..00000000000 --- a/toolkit/tools/boilerplate/boilerplate.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -// A boilerplate for Mariner go tools - -package main - -import ( - "os" - - "github.com/microsoft/CBL-Mariner/toolkit/tools/boilerplate/hello" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - - "gopkg.in/alecthomas/kingpin.v2" -) - -var ( - app = kingpin.New("boilerplate", "A sample golang tool for Mariner.") - - logFile = exe.LogFileFlag(app) - logLevel = exe.LogLevelFlag(app) -) - -func main() { - app.Version(exe.ToolkitVersion) - kingpin.MustParse(app.Parse(os.Args[1:])) - - logger.InitBestEffort(*logFile, *logLevel) - - logger.Log.Info(hello.World()) -} diff --git a/toolkit/tools/boilerplate/hello/hello.go b/toolkit/tools/boilerplate/hello/hello.go deleted file mode 100644 index aa226334b18..00000000000 --- a/toolkit/tools/boilerplate/hello/hello.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -package hello - -// World is a sample public (starts with a capital letter, must be commented) function. -func World() string { - return "Hello, world!" -} diff --git a/toolkit/tools/boilerplate/hello/hello_test.go b/toolkit/tools/boilerplate/hello/hello_test.go deleted file mode 100644 index b5b3a8dd52f..00000000000 --- a/toolkit/tools/boilerplate/hello/hello_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -package hello - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestHelloWorld(t *testing.T) { - want := "Hello, world!" - assert.Equal(t, want, World()) -} diff --git a/toolkit/tools/depsearch/depsearch.go b/toolkit/tools/depsearch/depsearch.go index 088b951035d..65aee0b64b7 100644 --- a/toolkit/tools/depsearch/depsearch.go +++ b/toolkit/tools/depsearch/depsearch.go @@ -9,14 +9,14 @@ import ( "path/filepath" "sort" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" - "github.com/microsoft/CBL-Mariner/toolkit/tools/scheduler/schedulerutils" - "gonum.org/v1/gonum/graph" "gopkg.in/alecthomas/kingpin.v2" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/scheduler/schedulerutils" ) const ( diff --git a/toolkit/tools/graphPreprocessor/graphPreprocessor.go b/toolkit/tools/graphPreprocessor/graphPreprocessor.go index 8d7c16f7605..a4b00825d90 100644 --- a/toolkit/tools/graphPreprocessor/graphPreprocessor.go +++ b/toolkit/tools/graphPreprocessor/graphPreprocessor.go @@ -7,8 +7,9 @@ import ( "os" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/preprocessor" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "gopkg.in/alecthomas/kingpin.v2" ) @@ -23,43 +24,13 @@ var ( logLevel = exe.LogLevelFlag(app) ) -func replaceRunNodesWithPrebuiltNodes(pkgGraph *pkggraph.PkgGraph) (err error) { - for _, node := range pkgGraph.AllNodes() { - - if node.Type != pkggraph.TypeRun { - continue - } - - isPrebuilt, _, missing := pkggraph.IsSRPMPrebuilt(node.SrpmPath, pkgGraph, nil) - - if isPrebuilt == false { - logger.Log.Tracef("Can't mark %s as prebuilt, missing: %v", node.SrpmPath, missing) - continue - } - - preBuiltNode := pkgGraph.CloneNode(node) - preBuiltNode.State = pkggraph.StateUpToDate - preBuiltNode.Type = pkggraph.TypePreBuilt - - parentNodes := pkgGraph.To(node.ID()) - for parentNodes.Next() { - parentNode := parentNodes.Node().(*pkggraph.PkgNode) - - if parentNode.Type != pkggraph.TypeGoal { - pkgGraph.RemoveEdge(parentNode.ID(), node.ID()) - - logger.Log.Debugf("Adding a 'PreBuilt' node '%s' with id %d. For '%s'", preBuiltNode.FriendlyName(), preBuiltNode.ID(), parentNode.FriendlyName()) - err = pkgGraph.AddEdge(parentNode, preBuiltNode) - - if err != nil { - logger.Log.Errorf("Adding edge failed for %v -> %v", parentNode, preBuiltNode) - return - } - } - } +func populatePreprocessorConfig() *preprocessor.Config { + return &preprocessor.Config{ + InputGraphFile: *inputGraphFile, + OutputGraphFile: *outputGraphFile, + HydratedBuild: *hydratedBuild, } - return } func main() { @@ -67,25 +38,14 @@ func main() { kingpin.MustParse(app.Parse(os.Args[1:])) logger.InitBestEffort(*logFile, *logLevel) - scrubbedGraph := pkggraph.NewPkgGraph() - - err := pkggraph.ReadDOTGraphFile(scrubbedGraph, *inputGraphFile) + cfg := populatePreprocessorConfig() + scrubbedGraph, err := cfg.ReadAndPreprocessGraph() if err != nil { - logger.Log.Panicf("Failed to read graph to file, %s. Error: %s", *inputGraphFile, err) + logger.Log.Panic(err) } - - if *hydratedBuild { - logger.Log.Debugf("Nodes before replacing prebuilt nodes: %d", len(scrubbedGraph.AllNodes())) - err = replaceRunNodesWithPrebuiltNodes(scrubbedGraph) - logger.Log.Debugf("Nodes after replacing prebuilt nodes: %d", len(scrubbedGraph.AllNodes())) - if err != nil { - logger.Log.Panicf("Failed to replace run nodes with preBuilt nodes. Error: %s", err) - } - } - - err = pkggraph.WriteDOTGraphFile(scrubbedGraph, *outputGraphFile) + err = pkggraph.WriteDOTGraphFile(scrubbedGraph, cfg.OutputGraphFile) if err != nil { - logger.Log.Panicf("Failed to write cache graph to file, %s. Error: %s", *outputGraphFile, err) + logger.Log.Panicf("Failed to write cache graph to file, %s. Error: %s", cfg.OutputGraphFile, err) } return } diff --git a/toolkit/tools/graphanalytics/graphanalytics.go b/toolkit/tools/graphanalytics/graphanalytics.go index 3a0b032cf86..019d17234cd 100644 --- a/toolkit/tools/graphanalytics/graphanalytics.go +++ b/toolkit/tools/graphanalytics/graphanalytics.go @@ -11,9 +11,9 @@ import ( "strings" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/sliceutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "gonum.org/v1/gonum/graph" graphpath "gonum.org/v1/gonum/graph/path" diff --git a/toolkit/tools/grapher/grapher.go b/toolkit/tools/grapher/grapher.go index 6f801ad79b2..c8a3f155f18 100644 --- a/toolkit/tools/grapher/grapher.go +++ b/toolkit/tools/grapher/grapher.go @@ -4,13 +4,12 @@ package main import ( - "fmt" "os" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/grapher" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "gopkg.in/alecthomas/kingpin.v2" ) @@ -24,38 +23,24 @@ var ( logLevel = exe.LogLevelFlag(app) strictGoals = app.Flag("strict-goals", "Don't allow missing goal packages").Bool() strictUnresolved = app.Flag("strict-unresolved", "Don't allow missing unresolved packages").Bool() - - depGraph = pkggraph.NewPkgGraph() ) -func main() { - const goalNodeName = "ALL" +func populateGrapherConfig() *grapher.Config { + return &grapher.Config{ + Input: *input, + Output: *output, + StrictGoals: *strictGoals, + StrictUnresolved: *strictUnresolved, + } +} +func main() { app.Version(exe.ToolkitVersion) kingpin.MustParse(app.Parse(os.Args[1:])) - - var err error logger.InitBestEffort(*logFile, *logLevel) - localPackages := pkgjson.PackageRepo{} - err = localPackages.ParsePackageJSON(*input) - if err != nil { - logger.Log.Panic(err) - } - - err = populateGraph(depGraph, &localPackages) - if err != nil { - logger.Log.Panic(err) - } - - // Add a default "ALL" goal to build everything local - _, err = depGraph.AddGoalNode(goalNodeName, nil, *strictGoals) - if err != nil { - logger.Log.Panic(err) - } - - logger.Log.Info("Running cycle resolution to fix any cycles in the dependency graph") - err = depGraph.MakeDAG() + cfg := populateGrapherConfig() + depGraph, err := cfg.GenerateDependencyGraph() if err != nil { logger.Log.Panic(err) } @@ -67,213 +52,3 @@ func main() { logger.Log.Info("Finished generating graph.") } - -// addUnresolvedPackage adds an unresolved node to the graph representing the -// packged described in the PackgetVer structure. Returns an error if the node -// could not be created. -func addUnresolvedPackage(g *pkggraph.PkgGraph, pkgVer *pkgjson.PackageVer) (newRunNode *pkggraph.PkgNode, err error) { - logger.Log.Debugf("Adding unresolved %s", pkgVer) - if *strictUnresolved { - err = fmt.Errorf("strict-unresolved does not allow unresolved packages, attempting to add %s", pkgVer) - return - } - - nodes, err := g.FindBestPkgNode(pkgVer) - if err != nil { - return - } - if nodes != nil { - err = fmt.Errorf(`attempted to mark a local package "%+v" as unresolved`, pkgVer) - return - } - - // Create a new node - newRunNode, err = g.AddPkgNode(pkgVer, pkggraph.StateUnresolved, pkggraph.TypeRemote, "", "", "", "", "", "") - if err != nil { - return - } - - logger.Log.Infof("Adding unresolved node %s\n", newRunNode.FriendlyName()) - - return -} - -// addNodesForPackage creates a "Run" and "Build" node for the package described -// in the PackageVer structure. Returns pointers to the build and run Nodes -// created, or an error if one of the nodes could not be created. -func addNodesForPackage(g *pkggraph.PkgGraph, pkgVer *pkgjson.PackageVer, pkg *pkgjson.Package) (newRunNode *pkggraph.PkgNode, newBuildNode *pkggraph.PkgNode, err error) { - nodes, err := g.FindExactPkgNodeFromPkg(pkgVer) - if err != nil { - return - } - if nodes != nil { - logger.Log.Warnf(`Duplicate package name for package %+v read from SRPM "%s" (Previous: %+v)`, pkgVer, pkg.SrpmPath, nodes.RunNode) - err = nil - if nodes.RunNode != nil { - newRunNode = nodes.RunNode - } - if nodes.BuildNode != nil { - newBuildNode = nodes.BuildNode - } - } - - if newRunNode == nil { - // Add "Run" node - newRunNode, err = g.AddPkgNode(pkgVer, pkggraph.StateMeta, pkggraph.TypeRun, pkg.SrpmPath, pkg.RpmPath, pkg.SpecPath, pkg.SourceDir, pkg.Architecture, "") - logger.Log.Debugf("Adding run node %s with id %d\n", newRunNode.FriendlyName(), newRunNode.ID()) - if err != nil { - return - } - } - - if newBuildNode == nil { - // Add "Build" node - newBuildNode, err = g.AddPkgNode(pkgVer, pkggraph.StateBuild, pkggraph.TypeBuild, pkg.SrpmPath, pkg.RpmPath, pkg.SpecPath, pkg.SourceDir, pkg.Architecture, "") - logger.Log.Debugf("Adding build node %s with id %d\n", newBuildNode.FriendlyName(), newBuildNode.ID()) - if err != nil { - return - } - } - - // A "run" node has an implicit dependency on its coresponding "build" node, encode that here. - err = g.AddEdge(newRunNode, newBuildNode) - if err != nil { - logger.Log.Errorf("Adding edge failed for %+v", pkgVer) - } - - return -} - -// addSingleDependency will add an edge between packageNode and the "Run" node for the -// dependency described in the PackageVer structure. Returns an error if the -// addition failed. -func addSingleDependency(g *pkggraph.PkgGraph, packageNode *pkggraph.PkgNode, dependency *pkgjson.PackageVer) (err error) { - var dependentNode *pkggraph.PkgNode - logger.Log.Tracef("Adding a dependency from %+v to %+v", packageNode.VersionedPkg, dependency) - nodes, err := g.FindBestPkgNode(dependency) - if err != nil { - logger.Log.Errorf("Unable to check lookup list for %+v (%s)", dependency, err) - return err - } - - if nodes == nil { - dependentNode, err = addUnresolvedPackage(g, dependency) - if err != nil { - logger.Log.Errorf(`Could not add a package "%s"`, dependency.Name) - return err - } - } else { - // All dependencies are assumed to be "Run" dependencies - dependentNode = nodes.RunNode - } - - if packageNode == dependentNode { - logger.Log.Debugf("Package %+v requires itself!", packageNode) - return nil - } - - // Avoid creating runtime dependencies from an RPM to a different provide from the same RPM as the dependency will always be met on RPM installation. - // Creating these edges may cause non-problematic cycles that can significantly increase memory usage and runtime during cycle resolution. - // If there are enough of these cycles it can exhaust the system's memory when resolving them. - // - Only check run nodes. If a build node has a reflexive cycle then it cannot be built without a bootstrap version. - if packageNode.Type == pkggraph.TypeRun && - dependentNode.Type == pkggraph.TypeRun && - packageNode.RpmPath == dependentNode.RpmPath { - - logger.Log.Debugf("%+v requires %+v which is provided by the same RPM.", packageNode, dependentNode) - return nil - } - - err = g.AddEdge(packageNode, dependentNode) - if err != nil { - logger.Log.Errorf("Failed to add edge failed between %+v and %+v.", packageNode, dependency) - } - - return err -} - -// addLocalPackage adds the package provided by the Package structure, and -// updates the SRPM path name -func addLocalPackage(g *pkggraph.PkgGraph, pkg *pkgjson.Package) error { - _, _, err := addNodesForPackage(g, pkg.Provides, pkg) - return err -} - -// addDependencies adds edges for both build and runtime requirements for the -// package described in the Package structure. Returns an error if the edges -// could not be created. -func addPkgDependencies(g *pkggraph.PkgGraph, pkg *pkgjson.Package) (dependenciesAdded int, err error) { - provide := pkg.Provides - runDependencies := pkg.Requires - buildDependencies := pkg.BuildRequires - - // Find the current node in the lookup list. - logger.Log.Debugf("Adding dependencies for package %s", pkg.SrpmPath) - nodes, err := g.FindExactPkgNodeFromPkg(provide) - if err != nil { - return - } - if nodes == nil { - return dependenciesAdded, fmt.Errorf("can't add dependencies to a missing package %+v", pkg) - } - runNode := nodes.RunNode - buildNode := nodes.BuildNode - - // For each run time and build time dependency, add the edges - logger.Log.Tracef("Adding run dependencies") - for _, dependency := range runDependencies { - err = addSingleDependency(g, runNode, dependency) - if err != nil { - logger.Log.Errorf("Unable to add run-time dependencies for %+v", pkg) - return - } - dependenciesAdded++ - } - - logger.Log.Tracef("Adding build dependencies") - for _, dependency := range buildDependencies { - err = addSingleDependency(g, buildNode, dependency) - if err != nil { - logger.Log.Errorf("Unable to add build-time dependencies for %+v", pkg) - return - } - dependenciesAdded++ - } - - return -} - -// populateGraph adds all the data contained in the PackageRepo structure into -// the graph. -func populateGraph(graph *pkggraph.PkgGraph, repo *pkgjson.PackageRepo) (err error) { - packages := repo.Repo - - // Scan and add each package we know about - logger.Log.Infof("Adding all packages from %s", *input) - // NOTE: range iterates by value, not reference. Manually access slice - for idx := range packages { - pkg := packages[idx] - err = addLocalPackage(graph, pkg) - if err != nil { - logger.Log.Errorf("Failed to add local package %+v", pkg) - return err - } - } - logger.Log.Infof("\tAdded %d packages", len(packages)) - - // Rescan and add all the dependencies - logger.Log.Infof("Adding all dependencies from %s", *input) - dependenciesAdded := 0 - for idx := range packages { - pkg := packages[idx] - num, err := addPkgDependencies(graph, pkg) - if err != nil { - logger.Log.Errorf("Failed to add dependency %+v", pkg) - return err - } - dependenciesAdded += num - } - logger.Log.Infof("\tAdded %d dependencies", dependenciesAdded) - - return err -} diff --git a/toolkit/tools/graphpkgfetcher/graphpkgfetcher.go b/toolkit/tools/graphpkgfetcher/graphpkgfetcher.go index 90cf41e95bb..b8769f52eba 100644 --- a/toolkit/tools/graphpkgfetcher/graphpkgfetcher.go +++ b/toolkit/tools/graphpkgfetcher/graphpkgfetcher.go @@ -4,21 +4,12 @@ package main import ( - "fmt" "os" - "path/filepath" - "strings" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repocloner/rpmrepocloner" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repoutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/rpm" - "github.com/microsoft/CBL-Mariner/toolkit/tools/scheduler/schedulerutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkgfetcher" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" - "gonum.org/v1/gonum/graph" "gopkg.in/alecthomas/kingpin.v2" ) @@ -50,232 +41,33 @@ var ( logLevel = exe.LogLevelFlag(app) ) +func populateGraphpkgfetcherConfig() *pkgfetcher.Config { + return &pkgfetcher.Config{ + InputGraph: *inputGraph, + OutputGraph: *outputGraph, + OutDir: *outDir, + ExistingRpmDir: *existingRpmDir, + TmpDir: *tmpDir, + WorkerTar: *workertar, + RepoFiles: *repoFiles, + UsePreviewRepo: *usePreviewRepo, + ToolchainManifest: *toolchainManifest, + TlsClientCert: *tlsClientCert, + TlsClientKey: *tlsClientKey, + StopOnFailure: *stopOnFailure, + InputSummaryFile: *inputSummaryFile, + OutputSummaryFile: *outputSummaryFile, + } +} + func main() { app.Version(exe.ToolkitVersion) kingpin.MustParse(app.Parse(os.Args[1:])) logger.InitBestEffort(*logFile, *logLevel) - dependencyGraph := pkggraph.NewPkgGraph() - - err := pkggraph.ReadDOTGraphFile(dependencyGraph, *inputGraph) - if err != nil { - logger.Log.Panicf("Failed to read graph to file. Error: %s", err) - } - - var toolchainPackages []string - toolchainManifest := *toolchainManifest - if len(toolchainManifest) > 0 { - toolchainPackages, err = schedulerutils.ReadReservedFilesList(toolchainManifest) - if err != nil { - logger.Log.Fatalf("unable to read toolchain manifest file '%s': %s", toolchainManifest, err) - } - } - - if hasUnresolvedNodes(dependencyGraph) { - err = resolveGraphNodes(dependencyGraph, *inputSummaryFile, *outputSummaryFile, toolchainPackages, *disableUpstreamRepos, *stopOnFailure) - if err != nil { - logger.Log.Panicf("Failed to resolve graph. Error: %s", err) - } - } else { - logger.Log.Info("No unresolved packages to cache") - } - - err = pkggraph.WriteDOTGraphFile(dependencyGraph, *outputGraph) + cfg := populateGraphpkgfetcherConfig() + err := cfg.ResolvePackages() if err != nil { logger.Log.Panicf("Failed to write cache graph to file. Error: %s", err) } } - -// hasUnresolvedNodes scans through the graph to see if there is anything to do -func hasUnresolvedNodes(graph *pkggraph.PkgGraph) bool { - for _, n := range graph.AllRunNodes() { - if n.State == pkggraph.StateUnresolved { - return true - } - } - return false -} - -// resolveGraphNodes scans a graph and for each unresolved node in the graph clones the RPMs needed -// to satisfy it. -func resolveGraphNodes(dependencyGraph *pkggraph.PkgGraph, inputSummaryFile, outputSummaryFile string, toolchainPackages []string, disableUpstreamRepos, stopOnFailure bool) (err error) { - // Create the worker environment - cloner := rpmrepocloner.New() - err = cloner.Initialize(*outDir, *tmpDir, *workertar, *existingRpmDir, *usePreviewRepo, *repoFiles) - if err != nil { - logger.Log.Errorf("Failed to initialize RPM repo cloner. Error: %s", err) - return - } - defer cloner.Close() - - if !disableUpstreamRepos { - tlsKey, tlsCert := strings.TrimSpace(*tlsClientKey), strings.TrimSpace(*tlsClientCert) - err = cloner.AddNetworkFiles(tlsCert, tlsKey) - if err != nil { - logger.Log.Panicf("Failed to customize RPM repo cloner. Error: %s", err) - } - } - - cachingSucceeded := true - if strings.TrimSpace(inputSummaryFile) == "" { - // Cache an RPM for each unresolved node in the graph. - fetchedPackages := make(map[string]bool) - prebuiltPackages := make(map[string]bool) - for _, n := range dependencyGraph.AllRunNodes() { - if n.State == pkggraph.StateUnresolved { - resolveErr := resolveSingleNode(cloner, n, toolchainPackages, fetchedPackages, prebuiltPackages, *outDir) - // Failing to clone a dependency should not halt a build. - // The build should continue and attempt best effort to build as many packages as possible. - if resolveErr != nil { - cachingSucceeded = false - errorMessage := strings.Builder{} - errorMessage.WriteString(fmt.Sprintf("Failed to resolve all nodes in the graph while resolving '%s'\n", n)) - errorMessage.WriteString("Nodes which have this as a dependency:\n") - for _, dependant := range graph.NodesOf(dependencyGraph.To(n.ID())) { - errorMessage.WriteString(fmt.Sprintf("\t'%s' depends on '%s'\n", dependant.(*pkggraph.PkgNode), n)) - } - logger.Log.Debugf(errorMessage.String()) - } - } - } - } else { - // If an input summary file was provided, simply restore the cache using the file. - err = repoutils.RestoreClonedRepoContents(cloner, inputSummaryFile) - cachingSucceeded = err == nil - } - if stopOnFailure && !cachingSucceeded { - return fmt.Errorf("failed to cache unresolved nodes") - } - - logger.Log.Info("Configuring downloaded RPMs as a local repository") - err = cloner.ConvertDownloadedPackagesIntoRepo() - if err != nil { - logger.Log.Errorf("Failed to convert downloaded RPMs into a repo. Error: %s", err) - return - } - - if strings.TrimSpace(outputSummaryFile) != "" { - err = repoutils.SaveClonedRepoContents(cloner, outputSummaryFile) - if err != nil { - logger.Log.Errorf("Failed to save cloned repo contents.") - return - } - } - - return -} - -// resolveSingleNode caches the RPM for a single node. -// It will modify fetchedPackages on a successful package clone. -func resolveSingleNode(cloner *rpmrepocloner.RpmRepoCloner, node *pkggraph.PkgNode, toolchainPackages []string, fetchedPackages, prebuiltPackages map[string]bool, outDir string) (err error) { - const cloneDeps = true - logger.Log.Debugf("Adding node %s to the cache", node.FriendlyName()) - - logger.Log.Debugf("Searching for a package which supplies: %s", node.VersionedPkg.Name) - // Resolve nodes to exact package names so they can be referenced in the graph. - resolvedPackages, err := cloner.WhatProvides(node.VersionedPkg) - if err != nil { - msg := fmt.Sprintf("Failed to resolve (%s) to a package. Error: %s", node.VersionedPkg, err) - // It is not an error if an implicit node could not be resolved as it may become available later in the build. - // If it does not become available scheduler will print an error at the end of the build. - if node.Implicit { - logger.Log.Debug(msg) - } else { - logger.Log.Error(msg) - } - return - } - - if len(resolvedPackages) == 0 { - return fmt.Errorf("failed to find any packages providing '%v'", node.VersionedPkg) - } - - preBuilt := false - for _, resolvedPackage := range resolvedPackages { - if !fetchedPackages[resolvedPackage] { - desiredPackage := &pkgjson.PackageVer{ - Name: resolvedPackage, - } - - preBuilt, err = cloner.Clone(cloneDeps, desiredPackage) - if err != nil { - logger.Log.Errorf("Failed to clone '%s' from RPM repo. Error: %s", resolvedPackage, err) - return - } - fetchedPackages[resolvedPackage] = true - prebuiltPackages[resolvedPackage] = preBuilt - - logger.Log.Debugf("Fetched '%s' as potential candidate (is pre-built: %v).", resolvedPackage, prebuiltPackages[resolvedPackage]) - } - } - - err = assignRPMPath(node, outDir, resolvedPackages) - if err != nil { - logger.Log.Errorf("Failed to find an RPM to provide '%s'. Error: %s", node.VersionedPkg.Name, err) - return - } - - // If a package is available locally, and it is part of the toolchain, mark it as a prebuilt so the scheduler knows it can use it - // immediately (especially for dynamic generator created capabilities) - if (preBuilt || prebuiltPackages[node.RpmPath]) && isToolchainPackage(node.RpmPath, toolchainPackages) { - logger.Log.Debugf("Using a prebuilt toolchain package to resolve this dependency") - prebuiltPackages[node.RpmPath] = true - node.State = pkggraph.StateUpToDate - node.Type = pkggraph.TypePreBuilt - } else { - node.State = pkggraph.StateCached - } - - logger.Log.Infof("Choosing '%s' to provide '%s'.", filepath.Base(node.RpmPath), node.VersionedPkg.Name) - - return -} - -func assignRPMPath(node *pkggraph.PkgNode, outDir string, resolvedPackages []string) (err error) { - rpmPaths := []string{} - for _, resolvedPackage := range resolvedPackages { - rpmPaths = append(rpmPaths, rpmPackageToRPMPath(resolvedPackage, outDir)) - } - - node.RpmPath = rpmPaths[0] - if len(rpmPaths) > 1 { - var resolvedRPMs []string - logger.Log.Debugf("Found %d candidates. Resolving.", len(rpmPaths)) - - resolvedRPMs, err = rpm.ResolveCompetingPackages(*tmpDir, rpmPaths...) - if err != nil { - logger.Log.Errorf("Failed while trying to pick an RPM providing '%s' from the following RPMs: %v", node.VersionedPkg.Name, rpmPaths) - return - } - - resolvedRPMsCount := len(resolvedRPMs) - if resolvedRPMsCount == 0 { - logger.Log.Errorf("Failed while trying to pick an RPM providing '%s'. No RPM can be installed from the following: %v", node.VersionedPkg.Name, rpmPaths) - return - } - - if resolvedRPMsCount > 1 { - logger.Log.Warnf("Found %d candidates to provide '%s'. Picking the first one.", resolvedRPMsCount, node.VersionedPkg.Name) - } - - node.RpmPath = rpmPackageToRPMPath(resolvedRPMs[0], outDir) - } - - return -} - -func rpmPackageToRPMPath(rpmPackage, outDir string) string { - // Construct the rpm path of the cloned package. - rpmName := fmt.Sprintf("%s.rpm", rpmPackage) - return filepath.Join(outDir, rpmName) -} - -func isToolchainPackage(rpmPath string, toolchainRPMs []string) bool { - base := filepath.Base(rpmPath) - for _, t := range toolchainRPMs { - if t == base { - return true - } - } - return false -} diff --git a/toolkit/tools/imageconfigvalidator/imageconfigvalidator.go b/toolkit/tools/imageconfigvalidator/imageconfigvalidator.go index 18854cfbcd6..493cc87dbde 100644 --- a/toolkit/tools/imageconfigvalidator/imageconfigvalidator.go +++ b/toolkit/tools/imageconfigvalidator/imageconfigvalidator.go @@ -6,16 +6,13 @@ package main import ( - "fmt" "os" "path/filepath" - "strings" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/installutils" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/image/configvalidator" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "gopkg.in/alecthomas/kingpin.v2" ) @@ -48,7 +45,7 @@ func main() { } // Basic validation will occur during load, but we can add additional checking here. - err = ValidateConfiguration(config) + err = configvalidator.ValidateConfiguration(config) if err != nil { // Log an error here as opposed to panicing to keep the output simple // and only contain the error with the config file. @@ -57,92 +54,3 @@ func main() { return } - -// ValidateConfiguration will run sanity checks on a configuration structure -func ValidateConfiguration(config configuration.Config) (err error) { - err = config.IsValid() - if err != nil { - return - } - - err = validatePackages(config) - if err != nil { - return - } - - err = validateKickStartInstall(config) - return -} - -func validateKickStartInstall(config configuration.Config) (err error) { - // If doing a kickstart-style installation, then the image config file - // must not have any partitioning info because that will be provided - // by the preinstall script - for _, systemConfig := range config.SystemConfigs { - if systemConfig.IsKickStartBoot { - if len(config.Disks) > 0 || len(systemConfig.PartitionSettings) > 0 { - return fmt.Errorf("Partition should not be specified in image config file when performing kickstart installation") - } - } - } - - return -} - -func validatePackages(config configuration.Config) (err error) { - const ( - selinuxPkgName = "selinux-policy" - validateError = "failed to validate package lists in config" - verityPkgName = "verity-read-only-root" - verityDebugPkgName = "verity-read-only-root-debug-tools" - dracutFipsPkgName = "dracut-fips" - fipsKernelCmdLine = "fips=1" - ) - for _, systemConfig := range config.SystemConfigs { - packageList, err := installutils.PackageNamesFromSingleSystemConfig(systemConfig) - if err != nil { - return fmt.Errorf("%s: %w", validateError, err) - } - foundSELinuxPackage := false - foundVerityInitramfsPackage := false - foundVerityInitramfsDebugPackage := false - foundDracutFipsPackage := false - kernelCmdLineString := systemConfig.KernelCommandLine.ExtraCommandLine - for _, pkg := range packageList { - if pkg == "kernel" { - return fmt.Errorf("%s: kernel should not be included in a package list, add via config file's [KernelOptions] entry", validateError) - } - if pkg == verityPkgName { - foundVerityInitramfsPackage = true - } - if pkg == verityDebugPkgName { - foundVerityInitramfsDebugPackage = true - } - if pkg == dracutFipsPkgName { - foundDracutFipsPackage = true - } - if pkg == selinuxPkgName { - foundSELinuxPackage = true - } - } - if systemConfig.ReadOnlyVerityRoot.Enable { - if !foundVerityInitramfsPackage { - return fmt.Errorf("%s: [ReadOnlyVerityRoot] selected, but '%s' package is not included in the package lists", validateError, verityPkgName) - } - if systemConfig.ReadOnlyVerityRoot.TmpfsOverlayDebugEnabled && !foundVerityInitramfsDebugPackage { - return fmt.Errorf("%s: [ReadOnlyVerityRoot] and [TmpfsOverlayDebugEnabled] selected, but '%s' package is not included in the package lists", validateError, verityDebugPkgName) - } - } - if strings.Contains(kernelCmdLineString, fipsKernelCmdLine) { - if !foundDracutFipsPackage { - return fmt.Errorf("%s: 'fips=1' provided on kernel cmdline, but '%s' package is not included in the package lists", validateError, dracutFipsPkgName) - } - } - if systemConfig.KernelCommandLine.SELinux != configuration.SELinuxOff { - if !foundSELinuxPackage { - return fmt.Errorf("%s: [SELinux] selected, but '%s' package is not included in the package lists", validateError, selinuxPkgName) - } - } - } - return -} diff --git a/toolkit/tools/imageconfigvalidator/imageconfigvalidator_test.go b/toolkit/tools/imageconfigvalidator/imageconfigvalidator_test.go index 72b62d79856..ca734ac6455 100644 --- a/toolkit/tools/imageconfigvalidator/imageconfigvalidator_test.go +++ b/toolkit/tools/imageconfigvalidator/imageconfigvalidator_test.go @@ -11,11 +11,11 @@ import ( "strings" "testing" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/installutils" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/jsonutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/image/configvalidator" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/installutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "github.com/stretchr/testify/assert" ) @@ -47,7 +47,7 @@ func TestShouldSucceedValidatingDefaultConfigs(t *testing.T) { fmt.Printf("Failed to validate %s\n", configPath) } - err = ValidateConfiguration(config) + err = configvalidator.ValidateConfiguration(config) assert.NoError(t, err) if err != nil { fmt.Printf("Failed to validate %s\n", configPath) @@ -62,7 +62,7 @@ func TestShouldSucceedValidatingDefaultConfigs(t *testing.T) { func TestShouldFailEmptyConfig(t *testing.T) { config := configuration.Config{} - err := ValidateConfiguration(config) + err := configvalidator.ValidateConfiguration(config) assert.Error(t, err) assert.Equal(t, "config file must provide at least one system configuration inside the [SystemConfigs] field", err.Error()) } @@ -71,7 +71,7 @@ func TestShouldFailEmptySystemConfig(t *testing.T) { config := configuration.Config{} config.SystemConfigs = []configuration.SystemConfig{{}} - err := ValidateConfiguration(config) + err := configvalidator.ValidateConfiguration(config) assert.Error(t, err) assert.Equal(t, "invalid [SystemConfigs]: missing [Name] field", err.Error()) } @@ -96,7 +96,7 @@ func TestShouldFailDeeplyNestedParsingError(t *testing.T) { assert.NoError(t, err) config.Disks[0].PartitionTableType = configuration.PartitionTableType("not_a_real_partition_type") - err = ValidateConfiguration(config) + err = configvalidator.ValidateConfiguration(config) assert.Error(t, err) assert.Equal(t, "invalid [Disks]: invalid [PartitionTableType]: invalid value for PartitionTableType (not_a_real_partition_type)", err.Error()) @@ -134,7 +134,7 @@ func TestShouldFailMissingVerityPackageWithVerityRoot(t *testing.T) { config.SystemConfigs[0].PackageLists = newPackageList - err = ValidateConfiguration(config) + err = configvalidator.ValidateConfiguration(config) assert.Error(t, err) assert.Equal(t, "failed to validate package lists in config: [ReadOnlyVerityRoot] selected, but 'verity-read-only-root' package is not included in the package lists", err.Error()) @@ -178,7 +178,7 @@ func TestShouldFailMissingVerityDebugPackageWithVerityDebug(t *testing.T) { // Turn on the debug flag config.SystemConfigs[0].ReadOnlyVerityRoot.TmpfsOverlayDebugEnabled = true - err = ValidateConfiguration(config) + err = configvalidator.ValidateConfiguration(config) assert.Error(t, err) assert.Equal(t, "failed to validate package lists in config: [ReadOnlyVerityRoot] and [TmpfsOverlayDebugEnabled] selected, but 'verity-read-only-root-debug-tools' package is not included in the package lists", err.Error()) @@ -216,7 +216,7 @@ func TestShouldFailMissingFipsPackageWithFipsCmdLine(t *testing.T) { config.SystemConfigs[0].PackageLists = newPackageList - err = ValidateConfiguration(config) + err = configvalidator.ValidateConfiguration(config) assert.Error(t, err) assert.Equal(t, "failed to validate package lists in config: 'fips=1' provided on kernel cmdline, but 'dracut-fips' package is not included in the package lists", err.Error()) @@ -253,7 +253,7 @@ func TestShouldFailMissingSELinuxPackageWithSELinux(t *testing.T) { config.SystemConfigs[0].KernelCommandLine.SELinux = "enforcing" - err = ValidateConfiguration(config) + err = configvalidator.ValidateConfiguration(config) assert.Error(t, err) assert.Equal(t, "failed to validate package lists in config: [SELinux] selected, but 'selinux-policy' package is not included in the package lists", err.Error()) @@ -295,7 +295,7 @@ func TestShouldSucceedSELinuxPackageDefinedInline(t *testing.T) { config.SystemConfigs[0].KernelCommandLine.SELinux = "enforcing" - err = ValidateConfiguration(config) + err = configvalidator.ValidateConfiguration(config) assert.NoError(t, err) return } diff --git a/toolkit/tools/imagepkgfetcher/imagepkgfetcher.go b/toolkit/tools/imagepkgfetcher/imagepkgfetcher.go index e9c5bf4a2c6..d4822348d1b 100644 --- a/toolkit/tools/imagepkgfetcher/imagepkgfetcher.go +++ b/toolkit/tools/imagepkgfetcher/imagepkgfetcher.go @@ -5,18 +5,10 @@ package main import ( "os" - "strings" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/installutils" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repocloner" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repocloner/rpmrepocloner" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repoutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" - + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/image/pkgfetcher" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "gopkg.in/alecthomas/kingpin.v2" ) @@ -48,105 +40,32 @@ var ( logLevel = exe.LogLevelFlag(app) ) +func populateImagePkgFetcherConfig() *pkgfetcher.Config { + return &pkgfetcher.Config{ + ConfigFile: *configFile, + OutDir: *outDir, + BaseDirPath: *baseDirPath, + ExistingRpmDir: *existingRpmDir, + TmpDir: *tmpDir, + WorkerTar: *workertar, + RepoFiles: *repoFiles, + UsePreviewRepo: *usePreviewRepo, + DisableUpstreamRepos: *disableUpstreamRepos, + TlsClientCert: *tlsClientCert, + TlsClientKey: *tlsClientKey, + ExternalOnly: *externalOnly, + InputGraph: *inputGraph, + InputSummaryFile: *inputSummaryFile, + OutputSummaryFile: *outputSummaryFile, + } +} + func main() { app.Version(exe.ToolkitVersion) kingpin.MustParse(app.Parse(os.Args[1:])) logger.InitBestEffort(*logFile, *logLevel) - if *externalOnly && strings.TrimSpace(*inputGraph) == "" { - logger.Log.Fatal("input-graph must be provided if external-only is set.") - } - - cloner := rpmrepocloner.New() - err := cloner.Initialize(*outDir, *tmpDir, *workertar, *existingRpmDir, *usePreviewRepo, *repoFiles) - if err != nil { - logger.Log.Panicf("Failed to initialize RPM repo cloner. Error: %s", err) - } - defer cloner.Close() - - if !*disableUpstreamRepos { - tlsKey, tlsCert := strings.TrimSpace(*tlsClientKey), strings.TrimSpace(*tlsClientCert) - err = cloner.AddNetworkFiles(tlsCert, tlsKey) - if err != nil { - logger.Log.Panicf("Failed to customize RPM repo cloner. Error: %s", err) - } - } - - if strings.TrimSpace(*inputSummaryFile) != "" { - // If an input summary file was provided, simply restore the cache using the file. - err = repoutils.RestoreClonedRepoContents(cloner, *inputSummaryFile) - } else { - err = cloneSystemConfigs(cloner, *configFile, *baseDirPath, *externalOnly, *inputGraph) - } - - if err != nil { - logger.Log.Panicf("Failed to clone RPM repo. Error: %s", err) - } - - logger.Log.Info("Configuring downloaded RPMs as a local repository") - err = cloner.ConvertDownloadedPackagesIntoRepo() - if err != nil { - logger.Log.Panicf("Failed to convert downloaded RPMs into a repo. Error: %s", err) - } - - if strings.TrimSpace(*outputSummaryFile) != "" { - err = repoutils.SaveClonedRepoContents(cloner, *outputSummaryFile) - logger.PanicOnError(err, "Failed to save cloned repo contents") - } -} - -func cloneSystemConfigs(cloner repocloner.RepoCloner, configFile, baseDirPath string, externalOnly bool, inputGraph string) (err error) { - const cloneDeps = true - - cfg, err := configuration.LoadWithAbsolutePaths(configFile, baseDirPath) - if err != nil { - return - } - - packageVersionsInConfig, err := installutils.PackageNamesFromConfig(cfg) - if err != nil { - return - } - - // Add kernel packages from KernelOptions - packageVersionsInConfig = append(packageVersionsInConfig, installutils.KernelPackages(cfg)...) - - if externalOnly { - packageVersionsInConfig, err = filterExternalPackagesOnly(packageVersionsInConfig, inputGraph) - if err != nil { - return - } - } - - // Add any packages required by the install tools - packageVersionsInConfig = append(packageVersionsInConfig, installutils.GetRequiredPackagesForInstall()...) - - logger.Log.Infof("Cloning: %v", packageVersionsInConfig) - // The image tools don't care if a package was created locally or not, just that it exists. Disregard if it is prebuilt or not. - _, err = cloner.Clone(cloneDeps, packageVersionsInConfig...) - return -} - -// filterExternalPackagesOnly returns the subset of packageVersionsInConfig that only contains external packages. -func filterExternalPackagesOnly(packageVersionsInConfig []*pkgjson.PackageVer, inputGraph string) (filteredPackages []*pkgjson.PackageVer, err error) { - dependencyGraph := pkggraph.NewPkgGraph() - err = pkggraph.ReadDOTGraphFile(dependencyGraph, inputGraph) - if err != nil { - return - } - - for _, pkgVer := range packageVersionsInConfig { - pkgNode, _ := dependencyGraph.FindBestPkgNode(pkgVer) - - // There are two ways an external package will be represented by pkgNode. - // 1) pkgNode may be nil. This is possible if the package is never consumed during the build phase, - // which means it will not be in the graph. - // 2) pkgNode will be of 'StateUnresolved'. This will be the case if a local package has it listed as - // a Requires or BuildRequires. - if pkgNode == nil || pkgNode.RunNode.State == pkggraph.StateUnresolved { - filteredPackages = append(filteredPackages, pkgVer) - } - } - - return + cfg := populateImagePkgFetcherConfig() + err := pkgfetcher.FetchPkgsAndCreateRepo(cfg) + logger.PanicOnError(err, "Failed to save cloned repo contents") } diff --git a/toolkit/tools/imager/imager.go b/toolkit/tools/imager/imager.go index 67d6384defb..a6de8411a12 100644 --- a/toolkit/tools/imager/imager.go +++ b/toolkit/tools/imager/imager.go @@ -6,18 +6,12 @@ package main import ( - "fmt" "os" - "path/filepath" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/diskutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/installutils" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/safechroot" - + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/installutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imager" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "gopkg.in/alecthomas/kingpin.v2" ) @@ -37,23 +31,23 @@ var ( logLevel = exe.LogLevelFlag(app) ) -const ( - // additionalFilesTempDirectory is the location where installutils expects to pick up any additional files - // to add to the install directory - additionalFilesTempDirectory = "/tmp/additionalfiles" - - // postInstallScriptTempDirectory is the directory where installutils expects to pick up any post install scripts - // to run inside the install directory environment - postInstallScriptTempDirectory = "/tmp/postinstall" - - // sshPubKeysTempDirectory is the directory where installutils expects to pick up ssh public key files to add into - // the install directory - sshPubKeysTempDirectory = "/tmp/sshpubkeys" +func populateImagerConfig() *imager.Config { + const defaultSystemConfig = 0 - // kickstartPartitionFile is the file that includes the partitioning schema used by - // kickstart installation - kickstartPartitionFile = "/tmp/part-include" -) + return &imager.Config{ + BuildDir: *buildDir, + ConfigFile: *configFile, + LocalRepo: *localRepo, + TdnfTar: *tdnfTar, + RepoFile: *repoFile, + Assets: *assets, + BaseDirPath: *baseDirPath, + OutputDir: *outputDir, + LiveInstallFlag: *liveInstallFlag, + EmitProgress: *emitProgress, + SystemConfig: defaultSystemConfig, + } +} func main() { const defaultSystemConfig = 0 @@ -67,588 +61,7 @@ func main() { installutils.EnableEmittingProgress() } - // Parse Config - config, err := configuration.LoadWithAbsolutePaths(*configFile, *baseDirPath) - logger.PanicOnError(err, "Failed to load configuration file (%s) with base directory (%s)", *configFile, *baseDirPath) - - // Currently only process 1 system config - systemConfig := config.SystemConfigs[defaultSystemConfig] - - // Execute preinstall scripts and parse partitioning when performing kickstart installation - if systemConfig.IsKickStartBoot { - err = installutils.RunPreInstallScripts(systemConfig) - logger.PanicOnError(err, "Failed to preinstall scripts") - - disks, partitionSettings, err := configuration.ParseKickStartPartitionScheme(kickstartPartitionFile) - logger.PanicOnError(err, "Failed to parse partition schema") - - config.Disks = disks - systemConfig.PartitionSettings = partitionSettings - - err = config.IsValid() - if err != nil { - logger.PanicOnError(err, "Invalid image configuration: %s", err) - } - } - - err = buildSystemConfig(systemConfig, config.Disks, *outputDir, *buildDir) + cfg := populateImagerConfig() + err := cfg.BuildSysConfig(defaultSystemConfig) logger.PanicOnError(err, "Failed to build system configuration") - -} - -func buildSystemConfig(systemConfig configuration.SystemConfig, disks []configuration.Disk, outputDir, buildDir string) (err error) { - logger.Log.Infof("Building system configuration (%s)", systemConfig.Name) - - const ( - assetsMountPoint = "/installer" - localRepoMountPoint = "/mnt/cdrom/RPMS" - repoFileMountPoint = "/etc/yum.repos.d" - setupRoot = "/setuproot" - installRoot = "/installroot" - rootID = "rootfs" - defaultDiskIndex = 0 - defaultTempDiskName = "disk.raw" - existingChrootDir = false - leaveChrootOnDisk = false - ) - - var ( - isRootFS bool - isLoopDevice bool - isOfflineInstall bool - diskDevPath string - kernelPkg string - encryptedRoot diskutils.EncryptedRootDevice - readOnlyRoot diskutils.VerityDevice - partIDToDevPathMap map[string]string - partIDToFsTypeMap map[string]string - mountPointToOverlayMap map[string]*installutils.Overlay - extraMountPoints []*safechroot.MountPoint - extraDirectories []string - ) - - // Get list of packages to install into image - packagesToInstall, err := installutils.PackageNamesFromSingleSystemConfig(systemConfig) - if err != nil { - logger.Log.Error("Failed to import packages from package lists in config file") - return - } - - isRootFS = len(systemConfig.PartitionSettings) == 0 - if isRootFS { - logger.Log.Infof("Creating rootfs") - additionalExtraMountPoints, additionalExtraDirectories, err := setupRootFS(outputDir, installRoot) - if err != nil { - return err - } - - extraDirectories = append(extraDirectories, additionalExtraDirectories...) - extraMountPoints = append(extraMountPoints, additionalExtraMountPoints...) - isOfflineInstall = true - - // Select the best kernel package for this environment. - kernelPkg, err = installutils.SelectKernelPackage(systemConfig, *liveInstallFlag) - // Rootfs images will usually not set a kernel, ignore errors - if err != nil { - logger.Log.Debugf("Rootfs did not find a kernel, this is normal: '%s'", err.Error()) - } else { - logger.Log.Infof("Rootfs is including a kernel (%s)", kernelPkg) - packagesToInstall = append([]string{kernelPkg}, packagesToInstall...) - } - } else { - logger.Log.Info("Creating raw disk in build directory") - diskConfig := disks[defaultDiskIndex] - diskDevPath, partIDToDevPathMap, partIDToFsTypeMap, isLoopDevice, encryptedRoot, readOnlyRoot, err = setupDisk(buildDir, defaultTempDiskName, *liveInstallFlag, diskConfig, systemConfig.Encryption, systemConfig.ReadOnlyVerityRoot) - if err != nil { - return - } - - if isLoopDevice { - isOfflineInstall = true - defer diskutils.DetachLoopbackDevice(diskDevPath) - defer diskutils.BlockOnDiskIO(diskDevPath) - } - - if systemConfig.ReadOnlyVerityRoot.Enable { - defer readOnlyRoot.CleanupVerityDevice() - } - - // Add additional system settings for root encryption - err = setupDiskEncryption(&systemConfig, &encryptedRoot, buildDir) - if err != nil { - return - } - - // Select the best kernel package for this environment - kernelPkg, err = installutils.SelectKernelPackage(systemConfig, *liveInstallFlag) - if err != nil { - logger.Log.Errorf("Failed to select a suitable kernel to install in config (%s)", systemConfig.Name) - return - } - - logger.Log.Infof("Selected (%s) for the kernel", kernelPkg) - packagesToInstall = append([]string{kernelPkg}, packagesToInstall...) - } - - setupChrootDir := filepath.Join(buildDir, setupRoot) - - // Create Parition to Mountpoint map - mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, diffDiskBuild := installutils.CreateMountPointPartitionMap(partIDToDevPathMap, partIDToFsTypeMap, systemConfig) - if diffDiskBuild { - mountPointToOverlayMap, err = installutils.UpdatePartitionMapWithOverlays(partIDToDevPathMap, partIDToFsTypeMap, mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, systemConfig) - // Schedule unmount of overlays after the upper layers are unmounted. - defer installutils.OverlayUnmount(mountPointToOverlayMap) - if err != nil { - logger.Log.Error("Failed to create the partition map") - return - } - } - - if isOfflineInstall { - // Create setup chroot - additionalExtraMountPoints := []*safechroot.MountPoint{ - safechroot.NewMountPoint(*assets, assetsMountPoint, "", safechroot.BindMountPointFlags, ""), - safechroot.NewMountPoint(*localRepo, localRepoMountPoint, "", safechroot.BindMountPointFlags, ""), - safechroot.NewMountPoint(filepath.Dir(*repoFile), repoFileMountPoint, "", safechroot.BindMountPointFlags, ""), - } - extraMountPoints = append(extraMountPoints, additionalExtraMountPoints...) - - setupChroot := safechroot.NewChroot(setupChrootDir, existingChrootDir) - err = setupChroot.Initialize(*tdnfTar, extraDirectories, extraMountPoints) - if err != nil { - logger.Log.Error("Failed to create setup chroot") - return - } - defer setupChroot.Close(leaveChrootOnDisk) - - // Before entering the chroot, copy in any and all host files needed and - // fix up their paths to be in the tmp directory. - err = fixupExtraFilesIntoChroot(setupChroot, &systemConfig) - if err != nil { - logger.Log.Error("Failed to copy extra files into setup chroot") - return - } - - err = setupChroot.Run(func() error { - return buildImage(mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, partIDToDevPathMap, partIDToFsTypeMap, mountPointToOverlayMap, packagesToInstall, systemConfig, diskDevPath, isRootFS, encryptedRoot, readOnlyRoot, diffDiskBuild) - }) - if err != nil { - logger.Log.Error("Failed to build image") - return - } - - err = cleanupExtraFilesInChroot(setupChroot) - if err != nil { - logger.Log.Error("Failed to cleanup extra files in setup chroot") - return - } - - // Create any partition-based artifacts - err = installutils.ExtractPartitionArtifacts(setupChrootDir, outputDir, defaultDiskIndex, disks[defaultDiskIndex], systemConfig, partIDToDevPathMap, mountPointToOverlayMap) - if err != nil { - return - } - - // Copy disk artifact if necessary. - // Currently only supports one disk config - if !isRootFS { - if disks[defaultDiskIndex].Artifacts != nil { - input := filepath.Join(buildDir, defaultTempDiskName) - output := filepath.Join(outputDir, fmt.Sprintf("disk%d.raw", defaultDiskIndex)) - err = file.Copy(input, output) - if err != nil { - return - } - } - } - } else { - err = buildImage(mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, partIDToDevPathMap, partIDToFsTypeMap, mountPointToOverlayMap, packagesToInstall, systemConfig, diskDevPath, isRootFS, encryptedRoot, readOnlyRoot, diffDiskBuild) - if err != nil { - logger.Log.Error("Failed to build image") - return - } - } - - // Cleanup encrypted disks - if systemConfig.Encryption.Enable { - err = diskutils.CleanupEncryptedDisks(encryptedRoot, isOfflineInstall) - if err != nil { - logger.Log.Warn("Failed to cleanup encrypted disks") - return - } - } - - return -} - -func setupDiskEncryption(systemConfig *configuration.SystemConfig, encryptedRoot *diskutils.EncryptedRootDevice, keyFileDir string) (err error) { - if systemConfig.Encryption.Enable { - // Add a default keyfile for initramfs unlock - encryptedRoot.HostKeyFile, err = diskutils.AddDefaultKeyfile(keyFileDir, encryptedRoot.Device, systemConfig.Encryption) - if err != nil { - logger.Log.Warnf("Failed to add default keyfile: %v", err) - return - } - - // Copy the default keyfile into the image - if len(systemConfig.AdditionalFiles) == 0 { - systemConfig.AdditionalFiles = make(map[string]string) - } - - systemConfig.AdditionalFiles[encryptedRoot.HostKeyFile] = diskutils.DefaultKeyFilePath - logger.Log.Infof("Adding default key file to systemConfig additional files") - } - - return -} - -func setupRootFS(outputDir, installRoot string) (extraMountPoints []*safechroot.MountPoint, extraDirectories []string, err error) { - const rootFSDirName = "rootfs" - - rootFSOutDir := filepath.Join(outputDir, rootFSDirName) - - // Ensure there is not already a directory at rootFSOutDir - exists, err := file.DirExists(rootFSOutDir) - logger.PanicOnError(err, "Failed while checking if directory (%s) exists.", rootFSOutDir) - if exists { - err = fmt.Errorf("output rootfs directory (%s) already exists", rootFSOutDir) - return - } - - err = os.MkdirAll(rootFSOutDir, os.ModePerm) - if err != nil { - return - } - - // For a rootfs, bind-mount the output directory to the chroot directory being installed to - rootFSMountPoint := safechroot.NewMountPoint(rootFSOutDir, installRoot, "", safechroot.BindMountPointFlags, "") - extraMountPoints = []*safechroot.MountPoint{rootFSMountPoint} - extraDirectories = []string{installRoot} - - return -} - -func setupDisk(outputDir, diskName string, liveInstallFlag bool, diskConfig configuration.Disk, rootEncryption configuration.RootEncryption, readOnlyRootConfig configuration.ReadOnlyVerityRoot) (diskDevPath string, partIDToDevPathMap, partIDToFsTypeMap map[string]string, isLoopDevice bool, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice, err error) { - const ( - realDiskType = "path" - ) - if diskConfig.TargetDisk.Type == realDiskType { - if liveInstallFlag { - diskDevPath = diskConfig.TargetDisk.Value - partIDToDevPathMap, partIDToFsTypeMap, encryptedRoot, readOnlyRoot, err = setupRealDisk(diskDevPath, diskConfig, rootEncryption, readOnlyRootConfig) - } else { - err = fmt.Errorf("target Disk Type is set but --live-install option is not set. Please check your config or enable the --live-install option") - return - } - } else { - diskDevPath, partIDToDevPathMap, partIDToFsTypeMap, encryptedRoot, readOnlyRoot, err = setupLoopDeviceDisk(outputDir, diskName, diskConfig, rootEncryption, readOnlyRootConfig) - isLoopDevice = true - } - return -} - -func setupLoopDeviceDisk(outputDir, diskName string, diskConfig configuration.Disk, rootEncryption configuration.RootEncryption, readOnlyRootConfig configuration.ReadOnlyVerityRoot) (diskDevPath string, partIDToDevPathMap, partIDToFsTypeMap map[string]string, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice, err error) { - defer func() { - // Detach the loopback device on failure - if err != nil && diskDevPath != "" { - detachErr := diskutils.DetachLoopbackDevice(diskDevPath) - if detachErr != nil { - logger.Log.Errorf("Failed to detach loopback device on failed initialization. Error: %s", detachErr) - } - } - }() - - // Create Raw Disk File - rawDisk, err := diskutils.CreateEmptyDisk(outputDir, diskName, diskConfig) - if err != nil { - logger.Log.Errorf("Failed to create empty disk file in (%s)", outputDir) - return - } - - diskDevPath, err = diskutils.SetupLoopbackDevice(rawDisk) - if err != nil { - logger.Log.Errorf("Failed to mount raw disk (%s) as a loopback device", rawDisk) - return - } - - partIDToDevPathMap, partIDToFsTypeMap, encryptedRoot, readOnlyRoot, err = setupRealDisk(diskDevPath, diskConfig, rootEncryption, readOnlyRootConfig) - if err != nil { - logger.Log.Errorf("Failed to setup loopback disk partitions (%s)", rawDisk) - return - } - - return -} - -func setupRealDisk(diskDevPath string, diskConfig configuration.Disk, rootEncryption configuration.RootEncryption, readOnlyRootConfig configuration.ReadOnlyVerityRoot) (partIDToDevPathMap, partIDToFsTypeMap map[string]string, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice, err error) { - const ( - defaultBlockSize = diskutils.MiB - noMaxSize = 0 - ) - - // Set up partitions - partIDToDevPathMap, partIDToFsTypeMap, encryptedRoot, readOnlyRoot, err = diskutils.CreatePartitions(diskDevPath, diskConfig, rootEncryption, readOnlyRootConfig) - if err != nil { - logger.Log.Errorf("Failed to create partitions on disk (%s)", diskDevPath) - return - } - - // Apply firmware - err = diskutils.ApplyRawBinaries(diskDevPath, diskConfig) - if err != nil { - logger.Log.Errorf("Failed to add add raw binaries to disk (%s)", diskDevPath) - return - } - - return -} - -// fixupExtraFilesIntoChroot will copy extra files needed for the build -// into the chroot and alter the extra files in the config to point at their new paths. -func fixupExtraFilesIntoChroot(installChroot *safechroot.Chroot, config *configuration.SystemConfig) (err error) { - var filesToCopy []safechroot.FileToCopy - - for i, user := range config.Users { - for j, pubKey := range user.SSHPubKeyPaths { - newFilePath := filepath.Join(sshPubKeysTempDirectory, pubKey) - - fileToCopy := safechroot.FileToCopy{ - Src: pubKey, - Dest: newFilePath, - } - - config.Users[i].SSHPubKeyPaths[j] = newFilePath - filesToCopy = append(filesToCopy, fileToCopy) - } - } - - fixedUpAdditionalFiles := make(map[string]string) - for srcFile, dstFile := range config.AdditionalFiles { - newFilePath := filepath.Join(additionalFilesTempDirectory, srcFile) - - fileToCopy := safechroot.FileToCopy{ - Src: srcFile, - Dest: newFilePath, - } - - fixedUpAdditionalFiles[newFilePath] = dstFile - filesToCopy = append(filesToCopy, fileToCopy) - } - config.AdditionalFiles = fixedUpAdditionalFiles - - for i, script := range config.PostInstallScripts { - newFilePath := filepath.Join(postInstallScriptTempDirectory, script.Path) - - fileToCopy := safechroot.FileToCopy{ - Src: script.Path, - Dest: newFilePath, - } - - config.PostInstallScripts[i].Path = newFilePath - filesToCopy = append(filesToCopy, fileToCopy) - } - - err = installChroot.AddFiles(filesToCopy...) - return -} - -func cleanupExtraFiles() (err error) { - dirsToRemove := []string{additionalFilesTempDirectory, postInstallScriptTempDirectory, sshPubKeysTempDirectory} - - for _, dir := range dirsToRemove { - logger.Log.Infof("Cleaning up directory %s", dir) - err = os.RemoveAll(dir) - if err != nil { - logger.Log.Warnf("Failed to cleanup directory (%s). Error: %s", dir, err) - return - } - } - return -} - -func cleanupExtraFilesInChroot(chroot *safechroot.Chroot) (err error) { - logger.Log.Infof("Proceeding to cleanup extra files in chroot %s.", chroot.RootDir()) - err = chroot.Run(func() error { - return cleanupExtraFiles() - }) - return -} -func buildImage(mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, partIDToDevPathMap, partIDToFsTypeMap map[string]string, mountPointToOverlayMap map[string]*installutils.Overlay, packagesToInstall []string, systemConfig configuration.SystemConfig, diskDevPath string, isRootFS bool, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice, diffDiskBuild bool) (err error) { - const ( - installRoot = "/installroot" - verityWorkingDir = "verityworkingdir" - emptyWorkerTar = "" - rootDir = "/" - existingChrootDir = true - leaveChrootOnDisk = true - ) - - var installMap map[string]string - - // Only invoke CreateInstallRoot for a raw disk. This call will result in mount points being created from a raw disk - // into the install root. A rootfs will not have these. - if !isRootFS { - installMap, err = installutils.CreateInstallRoot(installRoot, mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, mountPointToOverlayMap) - if err != nil { - err = fmt.Errorf("failed to create install root: %s", err) - return - } - defer installutils.DestroyInstallRoot(installRoot, installMap, mountPointToOverlayMap) - } - - // Install any tools required for the setup root to function - setupChrootPackages := []string{} - toolingPackages := installutils.GetRequiredPackagesForInstall() - for _, toolingPackage := range toolingPackages { - setupChrootPackages = append(setupChrootPackages, toolingPackage.Name) - } - - logger.Log.Infof("HidepidDisabled is %v.", systemConfig.HidepidDisabled) - hidepidEnabled := !systemConfig.HidepidDisabled - - if systemConfig.ReadOnlyVerityRoot.Enable { - // We will need the veritysetup package (and its dependencies) to manage the verity disk, add them to our - // image setup environment (setuproot chroot or live installer). - verityPackages := []string{"device-mapper", "veritysetup"} - setupChrootPackages = append(setupChrootPackages, verityPackages...) - } - - for _, setupChrootPackage := range setupChrootPackages { - _, err = installutils.TdnfInstall(setupChrootPackage, rootDir) - if err != nil { - err = fmt.Errorf("failed to install required setup chroot package '%s': %w", setupChrootPackage, err) - return - } - } - - // Create new chroot for the new image - installChroot := safechroot.NewChroot(installRoot, existingChrootDir) - extraInstallMountPoints := []*safechroot.MountPoint{} - extraDirectories := []string{} - err = installChroot.Initialize(emptyWorkerTar, extraDirectories, extraInstallMountPoints) - if err != nil { - err = fmt.Errorf("failed to create install chroot: %s", err) - return - } - defer installChroot.Close(leaveChrootOnDisk) - - // Populate image contents - err = installutils.PopulateInstallRoot(installChroot, packagesToInstall, systemConfig, installMap, mountPointToFsTypeMap, mountPointToMountArgsMap, partIDToDevPathMap, partIDToFsTypeMap, isRootFS, encryptedRoot, diffDiskBuild, hidepidEnabled) - if err != nil { - err = fmt.Errorf("failed to populate image contents: %s", err) - return - } - - // Only configure the bootloader or read only partitions for actual disks, a rootfs does not need these - if !isRootFS { - err = configureDiskBootloader(systemConfig, installChroot, diskDevPath, installMap, encryptedRoot, readOnlyRoot) - if err != nil { - err = fmt.Errorf("failed to configure boot loader: %w", err) - return - } - - // Preconfigure SELinux labels now since all the changes to the filesystem should be done - if systemConfig.KernelCommandLine.SELinux != configuration.SELinuxOff { - err = installutils.SELinuxConfigure(systemConfig, installChroot, mountPointToFsTypeMap) - if err != nil { - err = fmt.Errorf("failed to configure selinux: %w", err) - return - } - } - - // Snapshot the root filesystem as a read-only verity disk and update the initramfs. - if systemConfig.ReadOnlyVerityRoot.Enable { - var initramfsPathList []string - err = readOnlyRoot.SwitchDeviceToReadOnly(mountPointMap["/"], mountPointToMountArgsMap["/"]) - if err != nil { - err = fmt.Errorf("failed to switch root to read-only: %w", err) - return - } - installutils.ReportAction("Hashing root for read-only with dm-verity, this may take a long time if error correction is enabled") - initramfsPathList, err = filepath.Glob(filepath.Join(installRoot, "/boot/initrd.img*")) - if err != nil || len(initramfsPathList) != 1 { - return fmt.Errorf("could not find single initramfs (%v): %w", initramfsPathList, err) - } - err = readOnlyRoot.AddRootVerityFilesToInitramfs(verityWorkingDir, initramfsPathList[0]) - if err != nil { - err = fmt.Errorf("failed to include read-only root files in initramfs: %w", err) - return - } - } - } - - return -} - -func configureDiskBootloader(systemConfig configuration.SystemConfig, installChroot *safechroot.Chroot, diskDevPath string, installMap map[string]string, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice) (err error) { - const rootMountPoint = "/" - const bootMountPoint = "/boot" - - var rootDevice string - - // Add bootloader. Prefer a separate boot partition if one exists. - bootDevice, ok := installMap[bootMountPoint] - bootPrefix := "" - if !ok { - bootDevice = installMap[rootMountPoint] - // If we do not have a separate boot partition we will need to add a prefix to all paths used in the configs. - bootPrefix = "/boot" - } - - if installMap[rootMountPoint] == installutils.NullDevice { - // In case of overlay device being mounted at root, no need to change the bootloader. - return - } - - // Grub only accepts UUID, not PARTUUID or PARTLABEL - bootUUID, err := installutils.GetUUID(bootDevice) - if err != nil { - err = fmt.Errorf("failed to get UUID: %s", err) - return - } - - bootType := systemConfig.BootType - err = installutils.InstallBootloader(installChroot, systemConfig.Encryption.Enable, bootType, bootUUID, bootPrefix, diskDevPath) - if err != nil { - err = fmt.Errorf("failed to install bootloader: %s", err) - return - } - - // Add grub config to image - rootPartitionSetting := systemConfig.GetRootPartitionSetting() - if rootPartitionSetting == nil { - err = fmt.Errorf("failed to find partition setting for root mountpoint") - return - } - rootMountIdentifier := rootPartitionSetting.MountIdentifier - if systemConfig.Encryption.Enable { - // Encrypted devices don't currently support identifiers - rootDevice = installMap[rootMountPoint] - } else if systemConfig.ReadOnlyVerityRoot.Enable { - var partIdentifier string - partIdentifier, err = installutils.FormatMountIdentifier(rootMountIdentifier, readOnlyRoot.BackingDevice) - if err != nil { - err = fmt.Errorf("failed to get partIdentifier: %s", err) - return - } - rootDevice = fmt.Sprintf("verityroot:%v", partIdentifier) - } else { - var partIdentifier string - partIdentifier, err = installutils.FormatMountIdentifier(rootMountIdentifier, installMap[rootMountPoint]) - if err != nil { - err = fmt.Errorf("failed to get partIdentifier: %s", err) - return - } - - rootDevice = partIdentifier - } - - // Grub will always use filesystem UUID, never PARTUUID or PARTLABEL - err = installutils.InstallGrubCfg(installChroot.RootDir(), rootDevice, bootUUID, bootPrefix, encryptedRoot, systemConfig.KernelCommandLine, readOnlyRoot) - if err != nil { - err = fmt.Errorf("failed to install main grub config file: %s", err) - return - } - - return } diff --git a/toolkit/tools/internal/buildpipeline/buildpipeline.go b/toolkit/tools/internal/buildpipeline/buildpipeline.go index d070c96eff7..44d4e405877 100644 --- a/toolkit/tools/internal/buildpipeline/buildpipeline.go +++ b/toolkit/tools/internal/buildpipeline/buildpipeline.go @@ -11,7 +11,7 @@ import ( "path/filepath" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "golang.org/x/sys/unix" ) diff --git a/toolkit/tools/internal/debugutils/debugutils.go b/toolkit/tools/internal/debugutils/debugutils.go index 87e4d5bf117..1b16dbdf584 100644 --- a/toolkit/tools/internal/debugutils/debugutils.go +++ b/toolkit/tools/internal/debugutils/debugutils.go @@ -8,7 +8,7 @@ import ( "os" "time" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // These are functions which are useful for adding breakpoints into code running in a chroot, while running diff --git a/toolkit/tools/internal/exe/exe.go b/toolkit/tools/internal/exe/exe.go index cd9d0983670..a30d7647d8b 100644 --- a/toolkit/tools/internal/exe/exe.go +++ b/toolkit/tools/internal/exe/exe.go @@ -8,8 +8,9 @@ import ( "fmt" "strings" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "gopkg.in/alecthomas/kingpin.v2" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // ToolkitVersion specifies the version of the toolkit and the reported version of all tools in it. diff --git a/toolkit/tools/internal/file/file.go b/toolkit/tools/internal/file/file.go index a793ce45baf..73e87646f52 100644 --- a/toolkit/tools/internal/file/file.go +++ b/toolkit/tools/internal/file/file.go @@ -13,8 +13,8 @@ import ( "os" "path/filepath" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // IsDir check if a given file path is a directory. diff --git a/toolkit/tools/internal/jsonutils/jsonutils.go b/toolkit/tools/internal/jsonutils/jsonutils.go index 3ad7c650223..36e18a88408 100644 --- a/toolkit/tools/internal/jsonutils/jsonutils.go +++ b/toolkit/tools/internal/jsonutils/jsonutils.go @@ -10,7 +10,7 @@ import ( "io/ioutil" "os" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) const ( diff --git a/toolkit/tools/internal/network/network.go b/toolkit/tools/internal/network/network.go index d74aa04d855..caef54251a4 100644 --- a/toolkit/tools/internal/network/network.go +++ b/toolkit/tools/internal/network/network.go @@ -13,9 +13,9 @@ import ( "strings" "time" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/retry" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // JoinURL concatenates baseURL with extraPaths diff --git a/toolkit/tools/internal/packagerepo/repocloner/rpmrepocloner/rpmrepocloner.go b/toolkit/tools/internal/packagerepo/repocloner/rpmrepocloner/rpmrepocloner.go index ea338c4551d..4579fa357fe 100644 --- a/toolkit/tools/internal/packagerepo/repocloner/rpmrepocloner/rpmrepocloner.go +++ b/toolkit/tools/internal/packagerepo/repocloner/rpmrepocloner/rpmrepocloner.go @@ -12,13 +12,13 @@ import ( "strings" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/buildpipeline" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repocloner" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repomanager/rpmrepomanager" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/safechroot" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/tdnf" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/safechroot" ) const ( @@ -78,12 +78,12 @@ func New() *RpmRepoCloner { } // Initialize initializes rpmrepocloner, enabling Clone() to be called. -// - destinationDir is the directory to save RPMs -// - tmpDir is the directory to create a chroot -// - workerTar is the path to the worker tar used to seed the chroot -// - existingRpmsDir is the directory with prebuilt RPMs -// - usePreviewRepo if set, the upstream preview repository will be used. -// - repoDefinitions is a list of repo files to use when cloning RPMs +// - destinationDir is the directory to save RPMs +// - tmpDir is the directory to create a chroot +// - workerTar is the path to the worker tar used to seed the chroot +// - existingRpmsDir is the directory with prebuilt RPMs +// - usePreviewRepo if set, the upstream preview repository will be used. +// - repoDefinitions is a list of repo files to use when cloning RPMs func (r *RpmRepoCloner) Initialize(destinationDir, tmpDir, workerTar, existingRpmsDir string, usePreviewRepo bool, repoDefinitions []string) (err error) { const ( isExistingDir = false diff --git a/toolkit/tools/internal/packagerepo/repomanager/rpmrepomanager/rpmrepomanager.go b/toolkit/tools/internal/packagerepo/repomanager/rpmrepomanager/rpmrepomanager.go index f71edfb7a63..714c9f6d36d 100644 --- a/toolkit/tools/internal/packagerepo/repomanager/rpmrepomanager/rpmrepomanager.go +++ b/toolkit/tools/internal/packagerepo/repomanager/rpmrepomanager/rpmrepomanager.go @@ -10,8 +10,8 @@ import ( "strings" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // CreateRepo will create an RPM repository at repoDir diff --git a/toolkit/tools/internal/packagerepo/repoutils/repoutils.go b/toolkit/tools/internal/packagerepo/repoutils/repoutils.go index cf2b08a522b..c5bb6627668 100644 --- a/toolkit/tools/internal/packagerepo/repoutils/repoutils.go +++ b/toolkit/tools/internal/packagerepo/repoutils/repoutils.go @@ -9,9 +9,9 @@ import ( "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/jsonutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repocloner" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // RestoreClonedRepoContents restores a cloner's repo contents using a JSON file at `srcFile`. diff --git a/toolkit/tools/internal/pkgjson/pkgjson.go b/toolkit/tools/internal/pkgjson/pkgjson.go index fd8f65af12a..4ab41ef1af0 100644 --- a/toolkit/tools/internal/pkgjson/pkgjson.go +++ b/toolkit/tools/internal/pkgjson/pkgjson.go @@ -9,8 +9,8 @@ import ( "strings" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/jsonutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/versioncompare" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) const ( diff --git a/toolkit/tools/internal/rpm/rpm.go b/toolkit/tools/internal/rpm/rpm.go index 161a1bb3438..c84c616d676 100644 --- a/toolkit/tools/internal/rpm/rpm.go +++ b/toolkit/tools/internal/rpm/rpm.go @@ -9,8 +9,8 @@ import ( "strings" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) const ( diff --git a/toolkit/tools/internal/rpm/rpm_test.go b/toolkit/tools/internal/rpm/rpm_test.go index beacad895cb..fbefb618b6e 100644 --- a/toolkit/tools/internal/rpm/rpm_test.go +++ b/toolkit/tools/internal/rpm/rpm_test.go @@ -8,7 +8,7 @@ import ( "path/filepath" "testing" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "github.com/stretchr/testify/assert" ) diff --git a/toolkit/tools/internal/shell/shell.go b/toolkit/tools/internal/shell/shell.go index 4a4bdfc89e3..e3344f22f00 100644 --- a/toolkit/tools/internal/shell/shell.go +++ b/toolkit/tools/internal/shell/shell.go @@ -11,9 +11,9 @@ import ( "strings" "sync" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "golang.org/x/sys/unix" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // ShellProgram is the default shell program used by the tooling. diff --git a/toolkit/tools/internal/tdnf/tdnf_test.go b/toolkit/tools/internal/tdnf/tdnf_test.go index b5fa9bbdd38..d79bd67fbaf 100644 --- a/toolkit/tools/internal/tdnf/tdnf_test.go +++ b/toolkit/tools/internal/tdnf/tdnf_test.go @@ -7,7 +7,7 @@ import ( "os" "testing" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "github.com/stretchr/testify/assert" ) diff --git a/toolkit/tools/isomaker/isomaker.go b/toolkit/tools/isomaker/isomaker.go index 56d420adee7..84517317f57 100644 --- a/toolkit/tools/isomaker/isomaker.go +++ b/toolkit/tools/isomaker/isomaker.go @@ -6,10 +6,11 @@ package main import ( "os" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "gopkg.in/alecthomas/kingpin.v2" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/image/isomaker" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) var ( @@ -36,7 +37,7 @@ func main() { logger.InitBestEffort(*logFilePath, *logLevel) - isoMaker := NewIsoMaker( + isoMaker := isomaker.NewIsoMaker( *unattendedInstall, *baseDirPath, *buildDirPath, diff --git a/toolkit/tools/liveinstaller/liveinstaller.go b/toolkit/tools/liveinstaller/liveinstaller.go index d8a6fb56748..d3e00d7c93d 100644 --- a/toolkit/tools/liveinstaller/liveinstaller.go +++ b/toolkit/tools/liveinstaller/liveinstaller.go @@ -4,26 +4,13 @@ package main import ( - "bufio" - "fmt" "os" - "os/signal" - "path/filepath" - "regexp" - "strconv" - "strings" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/diskutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/jsonutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" - - "golang.org/x/sys/unix" "gopkg.in/alecthomas/kingpin.v2" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/image/liveinstaller" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) var ( @@ -41,355 +28,25 @@ var ( logLevel = exe.LogLevelFlag(app) ) -// Every valid mouse event handler will follow the format: -// H: Handlers=eventX mouseX -var mouseEventHandlerRegex = regexp.MustCompile(`^H:\s+Handlers=(\w+)\s+mouse\d+`) - -type imagerArguments struct { - imagerTool string - configFile string - buildDir string - baseDirPath string - emitProgress bool - logFile string - logLevel string -} - -func handleCtrlC(signals chan os.Signal) { - <-signals - logger.Log.Error("Installation in progress, please wait until finished.") +func populateLiveinstallerConfig() *liveinstaller.Config { + const imagerLogFile = "/var/log/imager.log" + return &liveinstaller.Config{ + ConfigFile: *configFile, + TemplateConfigFile: *templateConfigFile, + ForceAttended: *forceAttended, + ImagerTool: *imagerTool, + BuildDir: *buildDir, + BaseDirPath: *baseDirPath, + ImagerLogFile: imagerLogFile, + } } func main() { - const imagerLogFile = "/var/log/imager.log" - app.Version(exe.ToolkitVersion) kingpin.MustParse(app.Parse(os.Args[1:])) logger.InitBestEffort(*logFile, *logLevel) - // Prevent a SIGINT (Ctr-C) from stopping liveinstaller while an installation is in progress. - // It is the responsibility of the installer's user interface (terminal installer or Calamares) to handle quit requests from the user. - signals := make(chan os.Signal, 1) - signal.Notify(signals, unix.SIGINT) - go handleCtrlC(signals) - - // Imager's stdout/stderr will be combined with this tool's, so it will automatically be logged to the current log file - args := imagerArguments{ - imagerTool: *imagerTool, - buildDir: *buildDir, - baseDirPath: *baseDirPath, - logLevel: logger.Log.GetLevel().String(), - logFile: imagerLogFile, - } - - installFunc := installerFactory(*forceAttended, *configFile, *templateConfigFile) - installationQuit, err := installFunc(args) - if installationQuit { - logger.Log.Error("User quit installation") - // Return a non-zero exit code to drop the user to shell - os.Exit(1) - } - + cfg := populateLiveinstallerConfig() + err := cfg.Install() logger.PanicOnError(err) - - // Change the boot order by either changing the EFI boot order or ejecting CDROM. - updateBootOrder() -} - -func installerFactory(forceAttended bool, configFile, templateConfigFile string) (installFunc func(imagerArguments) (bool, error)) { - isAttended := false - - // Determine if the attended installer should be shown - if forceAttended { - logger.Log.Info("`attended` flag set, using attended installation") - isAttended = true - } else { - unattendedExists, _ := file.PathExists(configFile) - - if !unattendedExists { - logger.Log.Infof("Config file (%s) does not exist, using attended installation", configFile) - isAttended = true - } - } - - if isAttended { - templateExists, _ := file.PathExists(templateConfigFile) - if !templateExists { - logger.Log.Panicf("Attended installation requires a template config file. Specified template (%s) does not exist.", templateConfigFile) - } - } - - if isAttended { - installFunc = func(args imagerArguments) (bool, error) { - return terminalUIAttendedInstall(templateConfigFile, args) - } - } else { - installFunc = func(args imagerArguments) (bool, error) { - return unattendedInstall(configFile, args) - } - } - - return -} - -func updateBootOrder() (err error) { - logger.Log.Info("Ejecting CD-ROM.") - _, _, err = shell.Execute("eject", "--cdrom") - - if err != nil { - // If there was an error ejecting the CD-ROM, assume this is a USB installation and prompt the user - // to remove the USB device before rebooting. - logger.Log.Info("==================================================================================") - logger.Log.Info("Installation Complete. Please Remove USB installation media and reboot if present.") - logger.Log.Info("==================================================================================") - } - - return -} - -func findMouseHandlers() (handlers string, err error) { - const ( - deviceHandlerFile = "/proc/bus/input/devices" - eventPrefix = "/dev/input" - handlerDelimiter = ":" - absoluteInputEvents = "abs" - eventMatchGroup = 1 - ) - - devicesFile, err := os.Open(deviceHandlerFile) - if err != nil { - return - } - defer devicesFile.Close() - - // Gather a list of all mouse event handlers from the devices file - eventHandlers := []string{} - scanner := bufio.NewScanner(devicesFile) - for scanner.Scan() { - matches := mouseEventHandlerRegex.FindStringSubmatch(scanner.Text()) - if len(matches) == 0 { - continue - } - - eventPath := filepath.Join(eventPrefix, matches[eventMatchGroup]) - eventHandlers = append(eventHandlers, eventPath) - } - - err = scanner.Err() - if err != nil { - return - } - - if len(eventHandlers) == 0 { - err = fmt.Errorf("no mouse handler detected") - return - } - - // Add the the absolute input modifier to the handler list as mouse events are absolute. - // QT's default behavior is to take in relative events. - eventHandlers = append(eventHandlers, absoluteInputEvents) - - // Join all mouse event handlers together so they all function inside QT - handlers = strings.Join(eventHandlers, handlerDelimiter) - - return -} - -func calamaresInstall(templateConfigFile string, args imagerArguments) (err error) { - const ( - squashErrors = false - calamaresDir = "/etc/calamares" - ) - - args.emitProgress = true - args.configFile = filepath.Join(calamaresDir, "unattended_config.json") - - launchScript := filepath.Join(calamaresDir, "mariner-install.sh") - skuDir := filepath.Join(calamaresDir, "mariner-skus") - - bootType := diskutils.SystemBootType() - logger.Log.Infof("Boot type detected: %s", bootType) - - mouseHandlers, err := findMouseHandlers() - if err != nil { - // Not finding a mouse isn't fatal as the installer can instead be driven with - // a keyboard only. - logger.Log.Warnf("No mouse detected: %v", err) - } - - logger.Log.Infof("Using (%s) for mouse input", mouseHandlers) - newEnv := append(shell.CurrentEnvironment(), fmt.Sprintf("QT_QPA_EVDEV_MOUSE_PARAMETERS=%s", mouseHandlers)) - shell.SetEnvironment(newEnv) - - // Generate the files needed for calamares - err = os.MkdirAll(skuDir, os.ModePerm) - if err != nil { - return - } - - err = generateCalamaresLaunchScript(launchScript, args) - if err != nil { - return - } - - // Generate the partial JSONs for SKUs - err = generateCalamaresSKUs(templateConfigFile, skuDir, bootType) - if err != nil { - return - } - - return shell.ExecuteLive(squashErrors, "calamares", "-platform", "linuxfb") -} - -func generateCalamaresLaunchScript(launchScriptPath string, args imagerArguments) (err error) { - const executionPerm = 0755 - - // Generate the script calamares will invoke to install - scriptFile, err := os.OpenFile(launchScriptPath, os.O_CREATE|os.O_RDWR, executionPerm) - if err != nil { - return - } - defer scriptFile.Close() - - logger.Log.Infof("Generating install script (%s)", launchScriptPath) - program, commandArgs := formatImagerCommand(args) - - scriptFile.WriteString("#!/bin/bash\n") - scriptFile.WriteString(fmt.Sprintf("%s %s", program, strings.Join(commandArgs, " "))) - scriptFile.WriteString("\n") - - return -} - -func generateCalamaresSKUs(templateConfigFile, skuDir, bootType string) (err error) { - // Parse template config - templateConfig, err := configuration.Load(templateConfigFile) - if err != nil { - return - } - - // Generate JSON snippets for each SKU - for _, sysConfig := range templateConfig.SystemConfigs { - sysConfig.BootType = bootType - err = generateSingleCalamaresSKU(sysConfig, skuDir) - if err != nil { - return - } - } - - return -} - -func generateSingleCalamaresSKU(sysConfig configuration.SystemConfig, skuDir string) (err error) { - skuFilePath := filepath.Join(skuDir, sysConfig.Name+".json") - logger.Log.Infof("Generating SKU option (%s)", skuFilePath) - - // Write the individual system config to a file. - return jsonutils.WriteJSONFile(skuFilePath, sysConfig) -} - -func terminalUIAttendedInstall(templateConfigFile string, args imagerArguments) (installationQuit bool, err error) { - const configFileName = "attendedconfig.json" - - // Parse template config - templateConfig, err := configuration.Load(templateConfigFile) - if err != nil { - return - } - - // Store the config file generated by the attended installer under the build dir - err = os.MkdirAll(args.buildDir, os.ModePerm) - if err != nil { - return - } - - args.configFile = filepath.Join(args.buildDir, configFileName) - attendedInstaller, err := attendedinstaller.New(templateConfig, - // Terminal-UI based installation - func(cfg configuration.Config, progress chan int, status chan string) (err error) { - return terminalAttendedInstall(cfg, progress, status, args) - }, - - // Calamares based installation - func() (err error) { - return calamaresInstall(templateConfigFile, args) - }) - - if err != nil { - return - } - - _, installationQuit, err = attendedInstaller.Run() - return -} - -func terminalAttendedInstall(cfg configuration.Config, progress chan int, status chan string, args imagerArguments) (err error) { - defer close(progress) - defer close(status) - - logger.Log.Infof("Writing temporary config file to (%s)", args.configFile) - err = jsonutils.WriteJSONFile(args.configFile, cfg) - if err != nil { - return - } - - onStdout := func(args ...interface{}) { - const ( - progressPrefix = "progress:" - actionPrefix = "action:" - ) - - if len(args) == 0 { - return - } - - line := args[0].(string) - - if strings.HasPrefix(line, progressPrefix) { - reportedProgress, err := strconv.Atoi(strings.TrimPrefix(line, progressPrefix)) - if err != nil { - logger.Log.Warnf("Failed to convert progress to an integer (%s). Error: %v", line, err) - return - } - - progress <- reportedProgress - } else if strings.HasPrefix(line, actionPrefix) { - status <- strings.TrimPrefix(line, actionPrefix) - } - } - - args.emitProgress = true - program, commandArgs := formatImagerCommand(args) - err = shell.ExecuteLiveWithCallback(onStdout, logger.Log.Warn, false, program, commandArgs...) - - return -} - -func unattendedInstall(configFile string, args imagerArguments) (installationQuit bool, err error) { - const squashErrors = false - - args.configFile = configFile - - program, commandArgs := formatImagerCommand(args) - err = shell.ExecuteLive(squashErrors, program, commandArgs...) - return -} - -func formatImagerCommand(args imagerArguments) (program string, commandArgs []string) { - program = args.imagerTool - - commandArgs = []string{ - "--live-install", - fmt.Sprintf("--input=%s", args.configFile), - fmt.Sprintf("--build-dir=%s", args.buildDir), - fmt.Sprintf("--base-dir=%s", args.baseDirPath), - fmt.Sprintf("--log-file=%s", args.logFile), - fmt.Sprintf("--log-level=%s", args.logLevel), - } - - if args.emitProgress { - commandArgs = append(commandArgs, "--emit-progress") - } - - return } diff --git a/toolkit/tools/pkg/graph/grapher/config.go b/toolkit/tools/pkg/graph/grapher/config.go new file mode 100644 index 00000000000..930b043c3f6 --- /dev/null +++ b/toolkit/tools/pkg/graph/grapher/config.go @@ -0,0 +1,8 @@ +package grapher + +type Config struct { + Input string + Output string + StrictGoals bool + StrictUnresolved bool +} diff --git a/toolkit/tools/pkg/graph/grapher/grahper.go b/toolkit/tools/pkg/graph/grapher/grahper.go new file mode 100644 index 00000000000..2b8b2b59274 --- /dev/null +++ b/toolkit/tools/pkg/graph/grapher/grahper.go @@ -0,0 +1,249 @@ +package grapher + +import ( + "fmt" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" +) + +func (cfg *Config) GenerateDependencyGraph() (*pkggraph.PkgGraph, error) { + const goalNodeName = "ALL" + depGraph := pkggraph.NewPkgGraph() + localPackages := pkgjson.PackageRepo{} + + err := localPackages.ParsePackageJSON(cfg.Input) + if err != nil { + return nil, err + } + + err = populateGraph(depGraph, &localPackages, cfg.Input, cfg.StrictUnresolved) + if err != nil { + return nil, err + } + + // Add a default "ALL" goal to build everything local + _, err = depGraph.AddGoalNode(goalNodeName, nil, cfg.StrictGoals) + if err != nil { + return nil, err + } + + logger.Log.Info("Running cycle resolution to fix any cycles in the dependency graph") + err = depGraph.MakeDAG() + if err != nil { + return nil, err + } + + return depGraph, nil +} + +// addUnresolvedPackage adds an unresolved node to the graph representing the +// packged described in the PackgetVer structure. Returns an error if the node +// could not be created. +func addUnresolvedPackage(g *pkggraph.PkgGraph, pkgVer *pkgjson.PackageVer, strictUnresolved bool) (newRunNode *pkggraph.PkgNode, err error) { + logger.Log.Debugf("Adding unresolved %s", pkgVer) + if strictUnresolved { + err = fmt.Errorf("strict-unresolved does not allow unresolved packages, attempting to add %s", pkgVer) + return + } + + nodes, err := g.FindBestPkgNode(pkgVer) + if err != nil { + return + } + if nodes != nil { + err = fmt.Errorf(`attempted to mark a local package "%+v" as unresolved`, pkgVer) + return + } + + // Create a new node + newRunNode, err = g.AddPkgNode(pkgVer, pkggraph.StateUnresolved, pkggraph.TypeRemote, "", "", "", "", "", "") + if err != nil { + return + } + + logger.Log.Infof("Adding unresolved node %s\n", newRunNode.FriendlyName()) + + return +} + +// addNodesForPackage creates a "Run" and "Build" node for the package described +// in the PackageVer structure. Returns pointers to the build and run Nodes +// created, or an error if one of the nodes could not be created. +func addNodesForPackage(g *pkggraph.PkgGraph, pkgVer *pkgjson.PackageVer, pkg *pkgjson.Package) (newRunNode *pkggraph.PkgNode, newBuildNode *pkggraph.PkgNode, err error) { + nodes, err := g.FindExactPkgNodeFromPkg(pkgVer) + if err != nil { + return + } + if nodes != nil { + logger.Log.Warnf(`Duplicate package name for package %+v read from SRPM "%s" (Previous: %+v)`, pkgVer, pkg.SrpmPath, nodes.RunNode) + err = nil + if nodes.RunNode != nil { + newRunNode = nodes.RunNode + } + if nodes.BuildNode != nil { + newBuildNode = nodes.BuildNode + } + } + + if newRunNode == nil { + // Add "Run" node + newRunNode, err = g.AddPkgNode(pkgVer, pkggraph.StateMeta, pkggraph.TypeRun, pkg.SrpmPath, pkg.RpmPath, pkg.SpecPath, pkg.SourceDir, pkg.Architecture, "") + logger.Log.Debugf("Adding run node %s with id %d\n", newRunNode.FriendlyName(), newRunNode.ID()) + if err != nil { + return + } + } + + if newBuildNode == nil { + // Add "Build" node + newBuildNode, err = g.AddPkgNode(pkgVer, pkggraph.StateBuild, pkggraph.TypeBuild, pkg.SrpmPath, pkg.RpmPath, pkg.SpecPath, pkg.SourceDir, pkg.Architecture, "") + logger.Log.Debugf("Adding build node %s with id %d\n", newBuildNode.FriendlyName(), newBuildNode.ID()) + if err != nil { + return + } + } + + // A "run" node has an implicit dependency on its coresponding "build" node, encode that here. + err = g.AddEdge(newRunNode, newBuildNode) + if err != nil { + logger.Log.Errorf("Adding edge failed for %+v", pkgVer) + } + + return +} + +// addSingleDependency will add an edge between packageNode and the "Run" node for the +// dependency described in the PackageVer structure. Returns an error if the +// addition failed. +func addSingleDependency(g *pkggraph.PkgGraph, packageNode *pkggraph.PkgNode, dependency *pkgjson.PackageVer, strictUnresolved bool) (err error) { + var dependentNode *pkggraph.PkgNode + logger.Log.Tracef("Adding a dependency from %+v to %+v", packageNode.VersionedPkg, dependency) + nodes, err := g.FindBestPkgNode(dependency) + if err != nil { + logger.Log.Errorf("Unable to check lookup list for %+v (%s)", dependency, err) + return err + } + + if nodes == nil { + dependentNode, err = addUnresolvedPackage(g, dependency, strictUnresolved) + if err != nil { + logger.Log.Errorf(`Could not add a package "%s"`, dependency.Name) + return err + } + } else { + // All dependencies are assumed to be "Run" dependencies + dependentNode = nodes.RunNode + } + + if packageNode == dependentNode { + logger.Log.Debugf("Package %+v requires itself!", packageNode) + return nil + } + + // Avoid creating runtime dependencies from an RPM to a different provide from the same RPM as the dependency will always be met on RPM installation. + // Creating these edges may cause non-problematic cycles that can significantly increase memory usage and runtime during cycle resolution. + // If there are enough of these cycles it can exhaust the system's memory when resolving them. + // - Only check run nodes. If a build node has a reflexive cycle then it cannot be built without a bootstrap version. + if packageNode.Type == pkggraph.TypeRun && + dependentNode.Type == pkggraph.TypeRun && + packageNode.RpmPath == dependentNode.RpmPath { + + logger.Log.Debugf("%+v requires %+v which is provided by the same RPM.", packageNode, dependentNode) + return nil + } + + err = g.AddEdge(packageNode, dependentNode) + if err != nil { + logger.Log.Errorf("Failed to add edge failed between %+v and %+v.", packageNode, dependency) + } + + return err +} + +// addLocalPackage adds the package provided by the Package structure, and +// updates the SRPM path name +func addLocalPackage(g *pkggraph.PkgGraph, pkg *pkgjson.Package) error { + _, _, err := addNodesForPackage(g, pkg.Provides, pkg) + return err +} + +// addDependencies adds edges for both build and runtime requirements for the +// package described in the Package structure. Returns an error if the edges +// could not be created. +func addPkgDependencies(g *pkggraph.PkgGraph, pkg *pkgjson.Package, strictUnresolved bool) (dependenciesAdded int, err error) { + provide := pkg.Provides + runDependencies := pkg.Requires + buildDependencies := pkg.BuildRequires + + // Find the current node in the lookup list. + logger.Log.Debugf("Adding dependencies for package %s", pkg.SrpmPath) + nodes, err := g.FindExactPkgNodeFromPkg(provide) + if err != nil { + return + } + if nodes == nil { + return dependenciesAdded, fmt.Errorf("can't add dependencies to a missing package %+v", pkg) + } + runNode := nodes.RunNode + buildNode := nodes.BuildNode + + // For each run time and build time dependency, add the edges + logger.Log.Tracef("Adding run dependencies") + for _, dependency := range runDependencies { + err = addSingleDependency(g, runNode, dependency, strictUnresolved) + if err != nil { + logger.Log.Errorf("Unable to add run-time dependencies for %+v", pkg) + return + } + dependenciesAdded++ + } + + logger.Log.Tracef("Adding build dependencies") + for _, dependency := range buildDependencies { + err = addSingleDependency(g, buildNode, dependency, strictUnresolved) + if err != nil { + logger.Log.Errorf("Unable to add build-time dependencies for %+v", pkg) + return + } + dependenciesAdded++ + } + + return +} + +// populateGraph adds all the data contained in the PackageRepo structure into +// the graph. +func populateGraph(graph *pkggraph.PkgGraph, repo *pkgjson.PackageRepo, input string, strictUnresolved bool) (err error) { + packages := repo.Repo + + // Scan and add each package we know about + logger.Log.Infof("Adding all packages from %s", input) + // NOTE: range iterates by value, not reference. Manually access slice + for idx := range packages { + pkg := packages[idx] + err = addLocalPackage(graph, pkg) + if err != nil { + logger.Log.Errorf("Failed to add local package %+v", pkg) + return err + } + } + logger.Log.Infof("\tAdded %d packages", len(packages)) + + // Rescan and add all the dependencies + logger.Log.Infof("Adding all dependencies from %s", input) + dependenciesAdded := 0 + for idx := range packages { + pkg := packages[idx] + num, err := addPkgDependencies(graph, pkg, strictUnresolved) + if err != nil { + logger.Log.Errorf("Failed to add dependency %+v", pkg) + return err + } + dependenciesAdded += num + } + logger.Log.Infof("\tAdded %d dependencies", dependenciesAdded) + + return err +} diff --git a/toolkit/tools/pkg/graph/pkgfetcher/config.go b/toolkit/tools/pkg/graph/pkgfetcher/config.go new file mode 100644 index 00000000000..9456bb1cf49 --- /dev/null +++ b/toolkit/tools/pkg/graph/pkgfetcher/config.go @@ -0,0 +1,19 @@ +package pkgfetcher + +type Config struct { + InputGraph string + OutputGraph string + OutDir string + ExistingRpmDir string + TmpDir string + WorkerTar string + RepoFiles []string + UsePreviewRepo bool + DisableUpstreamRepos bool + ToolchainManifest string + TlsClientCert string + TlsClientKey string + StopOnFailure bool + InputSummaryFile string + OutputSummaryFile string +} diff --git a/toolkit/tools/pkg/graph/pkgfetcher/pkgfetcher.go b/toolkit/tools/pkg/graph/pkgfetcher/pkgfetcher.go new file mode 100644 index 00000000000..a298a68b88a --- /dev/null +++ b/toolkit/tools/pkg/graph/pkgfetcher/pkgfetcher.go @@ -0,0 +1,251 @@ +package pkgfetcher + +import ( + "fmt" + "path/filepath" + "strings" + + "gonum.org/v1/gonum/graph" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repocloner/rpmrepocloner" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repoutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/rpm" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/scheduler/schedulerutils" +) + +func (cfg *Config) ResolvePackages() error { + dependencyGraph := pkggraph.NewPkgGraph() + + err := pkggraph.ReadDOTGraphFile(dependencyGraph, cfg.InputGraph) + if err != nil { + logger.Log.Panicf("Failed to read graph to file. Error: %s", err) + } + + var toolchainPackages []string + toolchainManifest := cfg.ToolchainManifest + if len(toolchainManifest) > 0 { + toolchainPackages, err = schedulerutils.ReadReservedFilesList(toolchainManifest) + if err != nil { + logger.Log.Fatalf("unable to read toolchain manifest file '%s': %s", toolchainManifest, err) + } + } + cloner, err := initializeCloner(cfg) + if err != nil { + logger.Log.Errorf("Failed to initialize RPM repo cloner. Error: %s", err) + return err + } + defer cloner.Close() + + if hasUnresolvedNodes(dependencyGraph) { + err = cfg.resolveGraphNodes(dependencyGraph, cfg.InputSummaryFile, cfg.OutputSummaryFile, toolchainPackages, cloner, cfg.OutDir, cfg.DisableUpstreamRepos, cfg.StopOnFailure) + if err != nil { + logger.Log.Panicf("Failed to resolve graph. Error: %s", err) + } + } else { + logger.Log.Info("No unresolved packages to cache") + } + + err = pkggraph.WriteDOTGraphFile(dependencyGraph, cfg.OutputGraph) + return err +} + +func initializeCloner(cfg *Config) (*rpmrepocloner.RpmRepoCloner, error) { + // Create the worker environment + cloner := rpmrepocloner.New() + err := cloner.Initialize(cfg.OutDir, cfg.TmpDir, cfg.WorkerTar, cfg.ExistingRpmDir, cfg.UsePreviewRepo, cfg.RepoFiles) + //if err != nil { + // logger.Log.Errorf("Failed to initialize RPM repo cloner. Error: %s", err) + // return + //} + // defer cloner.Close() + + if !cfg.DisableUpstreamRepos { + tlsKey, tlsCert := strings.TrimSpace(cfg.TlsClientKey), strings.TrimSpace(cfg.TlsClientCert) + err = cloner.AddNetworkFiles(tlsCert, tlsKey) + if err != nil { + logger.Log.Panicf("Failed to customize RPM repo cloner. Error: %s", err) + } + } + return cloner, err +} + +// hasUnresolvedNodes scans through the graph to see if there is anything to do +func hasUnresolvedNodes(graph *pkggraph.PkgGraph) bool { + for _, n := range graph.AllRunNodes() { + if n.State == pkggraph.StateUnresolved { + return true + } + } + return false +} + +// resolveGraphNodes scans a graph and for each unresolved node in the graph clones the RPMs needed +// to satisfy it. +func (cfg *Config) resolveGraphNodes(dependencyGraph *pkggraph.PkgGraph, inputSummaryFile, outputSummaryFile string, toolchainPackages []string, cloner *rpmrepocloner.RpmRepoCloner, outDir string, disableUpstreamRepos, stopOnFailure bool) (err error) { + + cachingSucceeded := true + if strings.TrimSpace(inputSummaryFile) == "" { + // Cache an RPM for each unresolved node in the graph. + fetchedPackages := make(map[string]bool) + prebuiltPackages := make(map[string]bool) + for _, n := range dependencyGraph.AllRunNodes() { + if n.State == pkggraph.StateUnresolved { + resolveErr := cfg.resolveSingleNode(cloner, n, toolchainPackages, fetchedPackages, prebuiltPackages, outDir) + // Failing to clone a dependency should not halt a build. + // The build should continue and attempt best effort to build as many packages as possible. + if resolveErr != nil { + cachingSucceeded = false + errorMessage := strings.Builder{} + errorMessage.WriteString(fmt.Sprintf("Failed to resolve all nodes in the graph while resolving '%s'\n", n)) + errorMessage.WriteString("Nodes which have this as a dependency:\n") + for _, dependant := range graph.NodesOf(dependencyGraph.To(n.ID())) { + errorMessage.WriteString(fmt.Sprintf("\t'%s' depends on '%s'\n", dependant.(*pkggraph.PkgNode), n)) + } + logger.Log.Debugf(errorMessage.String()) + } + } + } + } else { + // If an input summary file was provided, simply restore the cache using the file. + err = repoutils.RestoreClonedRepoContents(cloner, inputSummaryFile) + cachingSucceeded = err == nil + } + if stopOnFailure && !cachingSucceeded { + return fmt.Errorf("failed to cache unresolved nodes") + } + + logger.Log.Info("Configuring downloaded RPMs as a local repository") + err = cloner.ConvertDownloadedPackagesIntoRepo() + if err != nil { + logger.Log.Errorf("Failed to convert downloaded RPMs into a repo. Error: %s", err) + return + } + + if strings.TrimSpace(outputSummaryFile) != "" { + err = repoutils.SaveClonedRepoContents(cloner, outputSummaryFile) + if err != nil { + logger.Log.Errorf("Failed to save cloned repo contents.") + return + } + } + + return +} + +// resolveSingleNode caches the RPM for a single node. +// It will modify fetchedPackages on a successful package clone. +func (cfg *Config) resolveSingleNode(cloner *rpmrepocloner.RpmRepoCloner, node *pkggraph.PkgNode, toolchainPackages []string, fetchedPackages, prebuiltPackages map[string]bool, outDir string) (err error) { + const cloneDeps = true + logger.Log.Debugf("Adding node %s to the cache", node.FriendlyName()) + + logger.Log.Debugf("Searching for a package which supplies: %s", node.VersionedPkg.Name) + // Resolve nodes to exact package names so they can be referenced in the graph. + resolvedPackages, err := cloner.WhatProvides(node.VersionedPkg) + if err != nil { + msg := fmt.Sprintf("Failed to resolve (%s) to a package. Error: %s", node.VersionedPkg, err) + // It is not an error if an implicit node could not be resolved as it may become available later in the build. + // If it does not become available scheduler will print an error at the end of the build. + if node.Implicit { + logger.Log.Debug(msg) + } else { + logger.Log.Error(msg) + } + return + } + + if len(resolvedPackages) == 0 { + return fmt.Errorf("failed to find any packages providing '%v'", node.VersionedPkg) + } + + preBuilt := false + for _, resolvedPackage := range resolvedPackages { + if !fetchedPackages[resolvedPackage] { + desiredPackage := &pkgjson.PackageVer{ + Name: resolvedPackage, + } + + preBuilt, err = cloner.Clone(cloneDeps, desiredPackage) + if err != nil { + logger.Log.Errorf("Failed to clone '%s' from RPM repo. Error: %s", resolvedPackage, err) + return + } + fetchedPackages[resolvedPackage] = true + prebuiltPackages[resolvedPackage] = preBuilt + + logger.Log.Debugf("Fetched '%s' as potential candidate (is pre-built: %v).", resolvedPackage, prebuiltPackages[resolvedPackage]) + } + } + + err = cfg.assignRPMPath(node, outDir, resolvedPackages) + if err != nil { + logger.Log.Errorf("Failed to find an RPM to provide '%s'. Error: %s", node.VersionedPkg.Name, err) + return + } + + // If a package is available locally, and it is part of the toolchain, mark it as a prebuilt so the scheduler knows it can use it + // immediately (especially for dynamic generator created capabilities) + if (preBuilt || prebuiltPackages[node.RpmPath]) && isToolchainPackage(node.RpmPath, toolchainPackages) { + logger.Log.Debugf("Using a prebuilt toolchain package to resolve this dependency") + prebuiltPackages[node.RpmPath] = true + node.State = pkggraph.StateUpToDate + node.Type = pkggraph.TypePreBuilt + } else { + node.State = pkggraph.StateCached + } + + logger.Log.Infof("Choosing '%s' to provide '%s'.", filepath.Base(node.RpmPath), node.VersionedPkg.Name) + + return +} + +func (cfg *Config) assignRPMPath(node *pkggraph.PkgNode, outDir string, resolvedPackages []string) (err error) { + rpmPaths := []string{} + for _, resolvedPackage := range resolvedPackages { + rpmPaths = append(rpmPaths, rpmPackageToRPMPath(resolvedPackage, outDir)) + } + + node.RpmPath = rpmPaths[0] + if len(rpmPaths) > 1 { + var resolvedRPMs []string + logger.Log.Debugf("Found %d candidates. Resolving.", len(rpmPaths)) + + resolvedRPMs, err = rpm.ResolveCompetingPackages(cfg.TmpDir, rpmPaths...) + if err != nil { + logger.Log.Errorf("Failed while trying to pick an RPM providing '%s' from the following RPMs: %v", node.VersionedPkg.Name, rpmPaths) + return + } + + resolvedRPMsCount := len(resolvedRPMs) + if resolvedRPMsCount == 0 { + logger.Log.Errorf("Failed while trying to pick an RPM providing '%s'. No RPM can be installed from the following: %v", node.VersionedPkg.Name, rpmPaths) + return + } + + if resolvedRPMsCount > 1 { + logger.Log.Warnf("Found %d candidates to provide '%s'. Picking the first one.", resolvedRPMsCount, node.VersionedPkg.Name) + } + + node.RpmPath = rpmPackageToRPMPath(resolvedRPMs[0], outDir) + } + + return +} + +func rpmPackageToRPMPath(rpmPackage, outDir string) string { + // Construct the rpm path of the cloned package. + rpmName := fmt.Sprintf("%s.rpm", rpmPackage) + return filepath.Join(outDir, rpmName) +} + +func isToolchainPackage(rpmPath string, toolchainRPMs []string) bool { + base := filepath.Base(rpmPath) + for _, t := range toolchainRPMs { + if t == base { + return true + } + } + return false +} diff --git a/toolkit/tools/internal/pkggraph/cyclefind.go b/toolkit/tools/pkg/graph/pkggraph/cyclefind.go similarity index 98% rename from toolkit/tools/internal/pkggraph/cyclefind.go rename to toolkit/tools/pkg/graph/pkggraph/cyclefind.go index 19e9b8267d4..30b87a6b0de 100644 --- a/toolkit/tools/internal/pkggraph/cyclefind.go +++ b/toolkit/tools/pkg/graph/pkggraph/cyclefind.go @@ -6,9 +6,9 @@ package pkggraph import ( "fmt" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "gonum.org/v1/gonum/graph" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) const ( diff --git a/toolkit/tools/internal/pkggraph/cyclefind_test.go b/toolkit/tools/pkg/graph/pkggraph/cyclefind_test.go similarity index 100% rename from toolkit/tools/internal/pkggraph/cyclefind_test.go rename to toolkit/tools/pkg/graph/pkggraph/cyclefind_test.go diff --git a/toolkit/tools/internal/pkggraph/pkggraph.go b/toolkit/tools/pkg/graph/pkggraph/pkggraph.go similarity index 99% rename from toolkit/tools/internal/pkggraph/pkggraph.go rename to toolkit/tools/pkg/graph/pkggraph/pkggraph.go index e2b70329ba3..90b04a30e49 100644 --- a/toolkit/tools/internal/pkggraph/pkggraph.go +++ b/toolkit/tools/pkg/graph/pkggraph/pkggraph.go @@ -17,9 +17,9 @@ import ( "sync" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/versioncompare" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "gonum.org/v1/gonum/graph" "gonum.org/v1/gonum/graph/encoding" diff --git a/toolkit/tools/internal/pkggraph/pkggraph_test.go b/toolkit/tools/pkg/graph/pkggraph/pkggraph_test.go similarity index 99% rename from toolkit/tools/internal/pkggraph/pkggraph_test.go rename to toolkit/tools/pkg/graph/pkggraph/pkggraph_test.go index bcc3e9d5b5f..254ef59987c 100644 --- a/toolkit/tools/internal/pkggraph/pkggraph_test.go +++ b/toolkit/tools/pkg/graph/pkggraph/pkggraph_test.go @@ -10,8 +10,8 @@ import ( "os" "testing" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "github.com/stretchr/testify/assert" "gonum.org/v1/gonum/graph" diff --git a/toolkit/tools/internal/pkggraph/test_graph_reference.dot b/toolkit/tools/pkg/graph/pkggraph/test_graph_reference.dot similarity index 100% rename from toolkit/tools/internal/pkggraph/test_graph_reference.dot rename to toolkit/tools/pkg/graph/pkggraph/test_graph_reference.dot diff --git a/toolkit/tools/pkg/graph/preprocessor/config.go b/toolkit/tools/pkg/graph/preprocessor/config.go new file mode 100644 index 00000000000..2412bda2e9b --- /dev/null +++ b/toolkit/tools/pkg/graph/preprocessor/config.go @@ -0,0 +1,7 @@ +package preprocessor + +type Config struct { + InputGraphFile string + OutputGraphFile string + HydratedBuild bool +} diff --git a/toolkit/tools/pkg/graph/preprocessor/preprocessor.go b/toolkit/tools/pkg/graph/preprocessor/preprocessor.go new file mode 100644 index 00000000000..5e3f3bddef7 --- /dev/null +++ b/toolkit/tools/pkg/graph/preprocessor/preprocessor.go @@ -0,0 +1,70 @@ +package preprocessor + +import ( + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" +) + +func (cfg *Config) ReadAndPreprocessGraph() (*pkggraph.PkgGraph, error) { + scrubbedGraph := pkggraph.NewPkgGraph() + + err := pkggraph.ReadDOTGraphFile(scrubbedGraph, cfg.InputGraphFile) + if err != nil { + logger.Log.Infof("Failed to read graph to file, %s. Error: %s", cfg.InputGraphFile, err) + return nil, err + } + if cfg.HydratedBuild { + logger.Log.Debugf("Nodes before replacing prebuilt nodes: %d", len(scrubbedGraph.AllNodes())) + err = replaceRunNodesWithPrebuiltNodes(scrubbedGraph) + logger.Log.Debugf("Nodes after replacing prebuilt nodes: %d", len(scrubbedGraph.AllNodes())) + if err != nil { + logger.Log.Infof("Failed to replace run nodes with preBuilt nodes. Error: %s", err) + return nil, err + } + } + return scrubbedGraph, nil +} + +func PreprocessGraph(g *pkggraph.PkgGraph) (*pkggraph.PkgGraph, error) { + err := replaceRunNodesWithPrebuiltNodes(g) + return g, err +} + +func replaceRunNodesWithPrebuiltNodes(pkgGraph *pkggraph.PkgGraph) (err error) { + for _, node := range pkgGraph.AllNodes() { + + if node.Type != pkggraph.TypeRun { + continue + } + + isPrebuilt, _, missing := pkggraph.IsSRPMPrebuilt(node.SrpmPath, pkgGraph, nil) + + if isPrebuilt == false { + logger.Log.Tracef("Can't mark %s as prebuilt, missing: %v", node.SrpmPath, missing) + continue + } + + preBuiltNode := pkgGraph.CloneNode(node) + preBuiltNode.State = pkggraph.StateUpToDate + preBuiltNode.Type = pkggraph.TypePreBuilt + + parentNodes := pkgGraph.To(node.ID()) + for parentNodes.Next() { + parentNode := parentNodes.Node().(*pkggraph.PkgNode) + + if parentNode.Type != pkggraph.TypeGoal { + pkgGraph.RemoveEdge(parentNode.ID(), node.ID()) + + logger.Log.Debugf("Adding a 'PreBuilt' node '%s' with id %d. For '%s'", preBuiltNode.FriendlyName(), preBuiltNode.ID(), parentNode.FriendlyName()) + err = pkgGraph.AddEdge(parentNode, preBuiltNode) + + if err != nil { + logger.Log.Errorf("Adding edge failed for %v -> %v", parentNode, preBuiltNode) + return err + } + } + } + } + + return nil +} diff --git a/toolkit/tools/pkg/image/configvalidator/configvalidator.go b/toolkit/tools/pkg/image/configvalidator/configvalidator.go new file mode 100644 index 00000000000..1f953a8697f --- /dev/null +++ b/toolkit/tools/pkg/image/configvalidator/configvalidator.go @@ -0,0 +1,77 @@ +package configvalidator + +import ( + "fmt" + "strings" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/installutils" +) + +// ValidateConfiguration will run sanity checks on a configuration structure +func ValidateConfiguration(config configuration.Config) (err error) { + err = config.IsValid() + if err != nil { + return + } + err = validatePackages(config) + return +} + +func validatePackages(config configuration.Config) (err error) { + const ( + selinuxPkgName = "selinux-policy" + validateError = "failed to validate package lists in config" + verityPkgName = "verity-read-only-root" + verityDebugPkgName = "verity-read-only-root-debug-tools" + dracutFipsPkgName = "dracut-fips" + fipsKernelCmdLine = "fips=1" + ) + for _, systemConfig := range config.SystemConfigs { + packageList, err := installutils.PackageNamesFromSingleSystemConfig(systemConfig) + if err != nil { + return fmt.Errorf("%s: %w", validateError, err) + } + foundSELinuxPackage := false + foundVerityInitramfsPackage := false + foundVerityInitramfsDebugPackage := false + foundDracutFipsPackage := false + kernelCmdLineString := systemConfig.KernelCommandLine.ExtraCommandLine + for _, pkg := range packageList { + if pkg == "kernel" { + return fmt.Errorf("%s: kernel should not be included in a package list, add via config file's [KernelOptions] entry", validateError) + } + if pkg == verityPkgName { + foundVerityInitramfsPackage = true + } + if pkg == verityDebugPkgName { + foundVerityInitramfsDebugPackage = true + } + if pkg == dracutFipsPkgName { + foundDracutFipsPackage = true + } + if pkg == selinuxPkgName { + foundSELinuxPackage = true + } + } + if systemConfig.ReadOnlyVerityRoot.Enable { + if !foundVerityInitramfsPackage { + return fmt.Errorf("%s: [ReadOnlyVerityRoot] selected, but '%s' package is not included in the package lists", validateError, verityPkgName) + } + if systemConfig.ReadOnlyVerityRoot.TmpfsOverlayDebugEnabled && !foundVerityInitramfsDebugPackage { + return fmt.Errorf("%s: [ReadOnlyVerityRoot] and [TmpfsOverlayDebugEnabled] selected, but '%s' package is not included in the package lists", validateError, verityDebugPkgName) + } + } + if strings.Contains(kernelCmdLineString, fipsKernelCmdLine) { + if !foundDracutFipsPackage { + return fmt.Errorf("%s: 'fips=1' provided on kernel cmdline, but '%s' package is not included in the package lists", validateError, dracutFipsPkgName) + } + } + if systemConfig.KernelCommandLine.SELinux != configuration.SELinuxOff { + if !foundSELinuxPackage { + return fmt.Errorf("%s: [SELinux] selected, but '%s' package is not included in the package lists", validateError, selinuxPkgName) + } + } + } + return +} diff --git a/toolkit/tools/isomaker/maker.go b/toolkit/tools/pkg/image/isomaker/isomaker.go similarity index 99% rename from toolkit/tools/isomaker/maker.go rename to toolkit/tools/pkg/image/isomaker/isomaker.go index c8f037c460e..7410a04124b 100644 --- a/toolkit/tools/isomaker/maker.go +++ b/toolkit/tools/pkg/image/isomaker/isomaker.go @@ -1,7 +1,7 @@ // Copyright Microsoft Corporation. // Licensed under the MIT License. -package main +package isomaker import ( "fmt" @@ -15,11 +15,11 @@ import ( "github.com/cavaliercoder/go-cpio" "github.com/klauspost/pgzip" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/jsonutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) const ( diff --git a/toolkit/tools/pkg/image/liveinstaller/config.go b/toolkit/tools/pkg/image/liveinstaller/config.go new file mode 100644 index 00000000000..68abf42500e --- /dev/null +++ b/toolkit/tools/pkg/image/liveinstaller/config.go @@ -0,0 +1,11 @@ +package liveinstaller + +type Config struct { + ConfigFile string + TemplateConfigFile string + ForceAttended bool + ImagerTool string + BuildDir string + BaseDirPath string + ImagerLogFile string +} diff --git a/toolkit/tools/pkg/image/liveinstaller/liveinstaller.go b/toolkit/tools/pkg/image/liveinstaller/liveinstaller.go new file mode 100644 index 00000000000..dc58f26928d --- /dev/null +++ b/toolkit/tools/pkg/image/liveinstaller/liveinstaller.go @@ -0,0 +1,367 @@ +package liveinstaller + +import ( + "bufio" + "fmt" + "os" + "os/signal" + "path/filepath" + "regexp" + "strconv" + "strings" + + "golang.org/x/sys/unix" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/jsonutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/diskutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" +) + +// Every valid mouse event handler will follow the format: +// H: Handlers=eventX mouseX +var mouseEventHandlerRegex = regexp.MustCompile(`^H:\s+Handlers=(\w+)\s+mouse\d+`) + +func handleCtrlC(signals chan os.Signal) { + <-signals + logger.Log.Error("Installation in progress, please wait until finished.") +} + +func (cfg *Config) Install() error { + // Prevent a SIGINT (Ctr-C) from stopping liveinstaller while an installation is in progress. + // It is the responsibility of the installer's user interface (terminal installer or Calamares) to handle quit requests from the user. + signals := make(chan os.Signal, 1) + signal.Notify(signals, unix.SIGINT) + go handleCtrlC(signals) + + // Imager's stdout/stderr will be combined with this tool's, so it will automatically be logged to the current log file + args := imagerArguments{ + imagerTool: cfg.ImagerTool, + buildDir: cfg.BuildDir, + baseDirPath: cfg.BaseDirPath, + logLevel: logger.Log.GetLevel().String(), + logFile: cfg.ImagerLogFile, + } + + installFunc := installerFactory(cfg.ForceAttended, cfg.ConfigFile, cfg.TemplateConfigFile) + installationQuit, err := installFunc(args) + logger.PanicOnError(err) + if installationQuit { + logger.Log.Error("User quit installation") + // Return a non-zero exit code to drop the user to shell + os.Exit(1) + } + // Change the boot order by either changing the EFI boot order or ejecting CDROM. + return updateBootOrder() +} + +type imagerArguments struct { + imagerTool string + configFile string + buildDir string + baseDirPath string + emitProgress bool + logFile string + logLevel string +} + +func installerFactory(forceAttended bool, configFile, templateConfigFile string) (installFunc func(imagerArguments) (bool, error)) { + isAttended := false + + // Determine if the attended installer should be shown + if forceAttended { + logger.Log.Info("`attended` flag set, using attended installation") + isAttended = true + } else { + unattendedExists, _ := file.PathExists(configFile) + + if !unattendedExists { + logger.Log.Infof("Config file (%s) does not exist, using attended installation", configFile) + isAttended = true + } + } + + if isAttended { + templateExists, _ := file.PathExists(templateConfigFile) + if !templateExists { + logger.Log.Panicf("Attended installation requires a template config file. Specified template (%s) does not exist.", templateConfigFile) + } + } + + if isAttended { + installFunc = func(args imagerArguments) (bool, error) { + return terminalUIAttendedInstall(templateConfigFile, args) + } + } else { + installFunc = func(args imagerArguments) (bool, error) { + return unattendedInstall(configFile, args) + } + } + + return +} + +func updateBootOrder() (err error) { + logger.Log.Info("Ejecting CD-ROM.") + _, _, err = shell.Execute("eject", "--cdrom") + + if err != nil { + // If there was an error ejecting the CD-ROM, assume this is a USB installation and prompt the user + // to remove the USB device before rebooting. + logger.Log.Info("==================================================================================") + logger.Log.Info("Installation Complete. Please Remove USB installation media and reboot if present.") + logger.Log.Info("==================================================================================") + } + + return +} + +func findMouseHandlers() (handlers string, err error) { + const ( + deviceHandlerFile = "/proc/bus/input/devices" + eventPrefix = "/dev/input" + handlerDelimiter = ":" + absoluteInputEvents = "abs" + eventMatchGroup = 1 + ) + + devicesFile, err := os.Open(deviceHandlerFile) + if err != nil { + return + } + defer devicesFile.Close() + + // Gather a list of all mouse event handlers from the devices file + eventHandlers := []string{} + scanner := bufio.NewScanner(devicesFile) + for scanner.Scan() { + matches := mouseEventHandlerRegex.FindStringSubmatch(scanner.Text()) + if len(matches) == 0 { + continue + } + + eventPath := filepath.Join(eventPrefix, matches[eventMatchGroup]) + eventHandlers = append(eventHandlers, eventPath) + } + + err = scanner.Err() + if err != nil { + return + } + + if len(eventHandlers) == 0 { + err = fmt.Errorf("no mouse handler detected") + return + } + + // Add the the absolute input modifier to the handler list as mouse events are absolute. + // QT's default behavior is to take in relative events. + eventHandlers = append(eventHandlers, absoluteInputEvents) + + // Join all mouse event handlers together so they all function inside QT + handlers = strings.Join(eventHandlers, handlerDelimiter) + + return +} + +func calamaresInstall(templateConfigFile string, args imagerArguments) (err error) { + const ( + squashErrors = false + calamaresDir = "/etc/calamares" + ) + + args.emitProgress = true + args.configFile = filepath.Join(calamaresDir, "unattended_config.json") + + launchScript := filepath.Join(calamaresDir, "mariner-install.sh") + skuDir := filepath.Join(calamaresDir, "mariner-skus") + + bootType := diskutils.SystemBootType() + logger.Log.Infof("Boot type detected: %s", bootType) + + mouseHandlers, err := findMouseHandlers() + if err != nil { + // Not finding a mouse isn't fatal as the installer can instead be driven with + // a keyboard only. + logger.Log.Warnf("No mouse detected: %v", err) + } + + logger.Log.Infof("Using (%s) for mouse input", mouseHandlers) + newEnv := append(shell.CurrentEnvironment(), fmt.Sprintf("QT_QPA_EVDEV_MOUSE_PARAMETERS=%s", mouseHandlers)) + shell.SetEnvironment(newEnv) + + // Generate the files needed for calamares + err = os.MkdirAll(skuDir, os.ModePerm) + if err != nil { + return + } + + err = generateCalamaresLaunchScript(launchScript, args) + if err != nil { + return + } + + // Generate the partial JSONs for SKUs + err = generateCalamaresSKUs(templateConfigFile, skuDir, bootType) + if err != nil { + return + } + + return shell.ExecuteLive(squashErrors, "calamares", "-platform", "linuxfb") +} + +func generateCalamaresLaunchScript(launchScriptPath string, args imagerArguments) (err error) { + const executionPerm = 0755 + + // Generate the script calamares will invoke to install + scriptFile, err := os.OpenFile(launchScriptPath, os.O_CREATE|os.O_RDWR, executionPerm) + if err != nil { + return + } + defer scriptFile.Close() + + logger.Log.Infof("Generating install script (%s)", launchScriptPath) + program, commandArgs := formatImagerCommand(args) + + scriptFile.WriteString("#!/bin/bash\n") + scriptFile.WriteString(fmt.Sprintf("%s %s", program, strings.Join(commandArgs, " "))) + scriptFile.WriteString("\n") + + return +} + +func generateCalamaresSKUs(templateConfigFile, skuDir, bootType string) (err error) { + // Parse template config + templateConfig, err := configuration.Load(templateConfigFile) + if err != nil { + return + } + + // Generate JSON snippets for each SKU + for _, sysConfig := range templateConfig.SystemConfigs { + sysConfig.BootType = bootType + err = generateSingleCalamaresSKU(sysConfig, skuDir) + if err != nil { + return + } + } + + return +} + +func generateSingleCalamaresSKU(sysConfig configuration.SystemConfig, skuDir string) (err error) { + skuFilePath := filepath.Join(skuDir, sysConfig.Name+".json") + logger.Log.Infof("Generating SKU option (%s)", skuFilePath) + + // Write the individual system config to a file. + return jsonutils.WriteJSONFile(skuFilePath, sysConfig) +} + +func terminalUIAttendedInstall(templateConfigFile string, args imagerArguments) (installationQuit bool, err error) { + const configFileName = "attendedconfig.json" + + // Parse template config + templateConfig, err := configuration.Load(templateConfigFile) + if err != nil { + return + } + + // Store the config file generated by the attended installer under the build dir + err = os.MkdirAll(args.buildDir, os.ModePerm) + if err != nil { + return + } + + args.configFile = filepath.Join(args.buildDir, configFileName) + attendedInstaller, err := attendedinstaller.New(templateConfig, + // Terminal-UI based installation + func(cfg configuration.Config, progress chan int, status chan string) (err error) { + return terminalAttendedInstall(cfg, progress, status, args) + }, + + // Calamares based installation + func() (err error) { + return calamaresInstall(templateConfigFile, args) + }) + + if err != nil { + return + } + + _, installationQuit, err = attendedInstaller.Run() + return +} + +func terminalAttendedInstall(cfg configuration.Config, progress chan int, status chan string, args imagerArguments) (err error) { + defer close(progress) + defer close(status) + + logger.Log.Infof("Writing temporary config file to (%s)", args.configFile) + err = jsonutils.WriteJSONFile(args.configFile, cfg) + if err != nil { + return + } + + onStdout := func(args ...interface{}) { + const ( + progressPrefix = "progress:" + actionPrefix = "action:" + ) + + if len(args) == 0 { + return + } + + line := args[0].(string) + + if strings.HasPrefix(line, progressPrefix) { + reportedProgress, err := strconv.Atoi(strings.TrimPrefix(line, progressPrefix)) + if err != nil { + logger.Log.Warnf("Failed to convert progress to an integer (%s). Error: %v", line, err) + return + } + + progress <- reportedProgress + } else if strings.HasPrefix(line, actionPrefix) { + status <- strings.TrimPrefix(line, actionPrefix) + } + } + + args.emitProgress = true + program, commandArgs := formatImagerCommand(args) + err = shell.ExecuteLiveWithCallback(onStdout, logger.Log.Warn, false, program, commandArgs...) + + return +} + +func unattendedInstall(configFile string, args imagerArguments) (installationQuit bool, err error) { + const squashErrors = false + + args.configFile = configFile + + program, commandArgs := formatImagerCommand(args) + err = shell.ExecuteLive(squashErrors, program, commandArgs...) + return +} + +func formatImagerCommand(args imagerArguments) (program string, commandArgs []string) { + program = args.imagerTool + + commandArgs = []string{ + "--live-install", + fmt.Sprintf("--input=%s", args.configFile), + fmt.Sprintf("--build-dir=%s", args.buildDir), + fmt.Sprintf("--base-dir=%s", args.baseDirPath), + fmt.Sprintf("--log-file=%s", args.logFile), + fmt.Sprintf("--log-level=%s", args.logLevel), + } + + if args.emitProgress { + commandArgs = append(commandArgs, "--emit-progress") + } + + return +} diff --git a/toolkit/tools/pkg/image/pkgfetcher/config.go b/toolkit/tools/pkg/image/pkgfetcher/config.go new file mode 100644 index 00000000000..7c9e1969401 --- /dev/null +++ b/toolkit/tools/pkg/image/pkgfetcher/config.go @@ -0,0 +1,19 @@ +package pkgfetcher + +type Config struct { + ConfigFile string + OutDir string + BaseDirPath string + ExistingRpmDir string + TmpDir string + WorkerTar string + RepoFiles []string + UsePreviewRepo bool + DisableUpstreamRepos bool + TlsClientCert string + TlsClientKey string + ExternalOnly bool + InputGraph string + InputSummaryFile string + OutputSummaryFile string +} diff --git a/toolkit/tools/pkg/image/pkgfetcher/pkgfetcher.go b/toolkit/tools/pkg/image/pkgfetcher/pkgfetcher.go new file mode 100644 index 00000000000..74c73905879 --- /dev/null +++ b/toolkit/tools/pkg/image/pkgfetcher/pkgfetcher.go @@ -0,0 +1,123 @@ +package pkgfetcher + +import ( + "errors" + "strings" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repocloner" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repocloner/rpmrepocloner" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repoutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/installutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" +) + +func FetchPkgsAndCreateRepo(cfg *Config) error { + if cfg.ExternalOnly && strings.TrimSpace(cfg.InputGraph) == "" { + logger.Log.Info("input-graph must be provided if external-only is set.") + return errors.New("input-graph must be provided") + } + + cloner := rpmrepocloner.New() + err := cloner.Initialize(cfg.OutDir, cfg.TmpDir, cfg.WorkerTar, cfg.ExistingRpmDir, cfg.UsePreviewRepo, cfg.RepoFiles) + if err != nil { + logger.Log.Infof("Failed to initialize RPM repo cloner. Error: %s", err) + return err + } + defer cloner.Close() + + if !cfg.DisableUpstreamRepos { + tlsKey, tlsCert := strings.TrimSpace(cfg.TlsClientKey), strings.TrimSpace(cfg.TlsClientCert) + err = cloner.AddNetworkFiles(tlsCert, tlsKey) + if err != nil { + logger.Log.Infof("Failed to customize RPM repo cloner. Error: %s", err) + return err + } + } + + if strings.TrimSpace(cfg.InputSummaryFile) != "" { + // If an input summary file was provided, simply restore the cache using the file. + err = repoutils.RestoreClonedRepoContents(cloner, cfg.InputSummaryFile) + } else { + err = cloneSystemConfigs(cloner, cfg.ConfigFile, cfg.BaseDirPath, cfg.ExternalOnly, cfg.InputGraph) + } + + if err != nil { + logger.Log.Infof("Failed to clone RPM repo. Error: %s", err) + return err + } + + logger.Log.Info("Configuring downloaded RPMs as a local repository") + err = cloner.ConvertDownloadedPackagesIntoRepo() + if err != nil { + logger.Log.Infof("Failed to convert downloaded RPMs into a repo. Error: %s", err) + return err + } + + if strings.TrimSpace(cfg.OutputSummaryFile) != "" { + err = repoutils.SaveClonedRepoContents(cloner, cfg.OutputSummaryFile) + if err != nil { + return err + } + } + + return nil +} + +func cloneSystemConfigs(cloner repocloner.RepoCloner, configFile, baseDirPath string, externalOnly bool, inputGraph string) (err error) { + const cloneDeps = true + + cfg, err := configuration.LoadWithAbsolutePaths(configFile, baseDirPath) + if err != nil { + return + } + + packageVersionsInConfig, err := installutils.PackageNamesFromConfig(cfg) + if err != nil { + return + } + + // Add kernel packages from KernelOptions + packageVersionsInConfig = append(packageVersionsInConfig, installutils.KernelPackages(cfg)...) + + if externalOnly { + packageVersionsInConfig, err = filterExternalPackagesOnly(packageVersionsInConfig, inputGraph) + if err != nil { + return + } + } + + // Add any packages required by the install tools + packageVersionsInConfig = append(packageVersionsInConfig, installutils.GetRequiredPackagesForInstall()...) + + logger.Log.Infof("Cloning: %v", packageVersionsInConfig) + // The image tools don't care if a package was created locally or not, just that it exists. Disregard if it is prebuilt or not. + _, err = cloner.Clone(cloneDeps, packageVersionsInConfig...) + return +} + +// filterExternalPackagesOnly returns the subset of packageVersionsInConfig that only contains external packages. +func filterExternalPackagesOnly(packageVersionsInConfig []*pkgjson.PackageVer, inputGraph string) (filteredPackages []*pkgjson.PackageVer, err error) { + dependencyGraph := pkggraph.NewPkgGraph() + err = pkggraph.ReadDOTGraphFile(dependencyGraph, inputGraph) + if err != nil { + return + } + + for _, pkgVer := range packageVersionsInConfig { + pkgNode, _ := dependencyGraph.FindBestPkgNode(pkgVer) + + // There are two ways an external package will be represented by pkgNode. + // 1) pkgNode may be nil. This is possible if the package is never consumed during the build phase, + // which means it will not be in the graph. + // 2) pkgNode will be of 'StateUnresolved'. This will be the case if a local package has it listed as + // a Requires or BuildRequires. + if pkgNode == nil || pkgNode.RunNode.State == pkggraph.StateUnresolved { + filteredPackages = append(filteredPackages, pkgVer) + } + } + + return +} diff --git a/toolkit/tools/pkg/image/roast/config.go b/toolkit/tools/pkg/image/roast/config.go new file mode 100644 index 00000000000..a183027f4f5 --- /dev/null +++ b/toolkit/tools/pkg/image/roast/config.go @@ -0,0 +1,11 @@ +package roast + +type Config struct { + InputDir string + OutputDir string + ConfigFile string + TmpDir string + ReleaseVersion string + Workers int + ImageTag string +} diff --git a/toolkit/tools/roast/formats/definition.go b/toolkit/tools/pkg/image/roast/formats/definition.go similarity index 100% rename from toolkit/tools/roast/formats/definition.go rename to toolkit/tools/pkg/image/roast/formats/definition.go diff --git a/toolkit/tools/roast/formats/diff.go b/toolkit/tools/pkg/image/roast/formats/diff.go similarity index 100% rename from toolkit/tools/roast/formats/diff.go rename to toolkit/tools/pkg/image/roast/formats/diff.go diff --git a/toolkit/tools/roast/formats/ext4.go b/toolkit/tools/pkg/image/roast/formats/ext4.go similarity index 100% rename from toolkit/tools/roast/formats/ext4.go rename to toolkit/tools/pkg/image/roast/formats/ext4.go diff --git a/toolkit/tools/roast/formats/gzip.go b/toolkit/tools/pkg/image/roast/formats/gzip.go similarity index 100% rename from toolkit/tools/roast/formats/gzip.go rename to toolkit/tools/pkg/image/roast/formats/gzip.go diff --git a/toolkit/tools/roast/formats/initrd.go b/toolkit/tools/pkg/image/roast/formats/initrd.go similarity index 98% rename from toolkit/tools/roast/formats/initrd.go rename to toolkit/tools/pkg/image/roast/formats/initrd.go index ebba5558e5e..9990e45d3cf 100644 --- a/toolkit/tools/roast/formats/initrd.go +++ b/toolkit/tools/pkg/image/roast/formats/initrd.go @@ -12,7 +12,7 @@ import ( "github.com/cavaliercoder/go-cpio" "github.com/klauspost/pgzip" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // InitrdType represents the format for a compressed initrd file loaded by the Linux kernel at boot diff --git a/toolkit/tools/roast/formats/ova.go b/toolkit/tools/pkg/image/roast/formats/ova.go similarity index 98% rename from toolkit/tools/roast/formats/ova.go rename to toolkit/tools/pkg/image/roast/formats/ova.go index 5ede47bb246..50610810507 100644 --- a/toolkit/tools/roast/formats/ova.go +++ b/toolkit/tools/pkg/image/roast/formats/ova.go @@ -16,11 +16,11 @@ import ( "path/filepath" "strings" + "golang.org/x/sys/unix" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" - - "golang.org/x/sys/unix" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // OvaType represents the ova format diff --git a/toolkit/tools/roast/formats/raw.go b/toolkit/tools/pkg/image/roast/formats/raw.go similarity index 100% rename from toolkit/tools/roast/formats/raw.go rename to toolkit/tools/pkg/image/roast/formats/raw.go diff --git a/toolkit/tools/roast/formats/rdiff.go b/toolkit/tools/pkg/image/roast/formats/rdiff.go similarity index 100% rename from toolkit/tools/roast/formats/rdiff.go rename to toolkit/tools/pkg/image/roast/formats/rdiff.go diff --git a/toolkit/tools/roast/formats/targzip.go b/toolkit/tools/pkg/image/roast/formats/targzip.go similarity index 100% rename from toolkit/tools/roast/formats/targzip.go rename to toolkit/tools/pkg/image/roast/formats/targzip.go diff --git a/toolkit/tools/roast/formats/tarxz.go b/toolkit/tools/pkg/image/roast/formats/tarxz.go similarity index 100% rename from toolkit/tools/roast/formats/tarxz.go rename to toolkit/tools/pkg/image/roast/formats/tarxz.go diff --git a/toolkit/tools/roast/formats/vhd.go b/toolkit/tools/pkg/image/roast/formats/vhd.go similarity index 100% rename from toolkit/tools/roast/formats/vhd.go rename to toolkit/tools/pkg/image/roast/formats/vhd.go diff --git a/toolkit/tools/roast/formats/xz.go b/toolkit/tools/pkg/image/roast/formats/xz.go similarity index 100% rename from toolkit/tools/roast/formats/xz.go rename to toolkit/tools/pkg/image/roast/formats/xz.go diff --git a/toolkit/tools/pkg/image/roast/roast.go b/toolkit/tools/pkg/image/roast/roast.go new file mode 100644 index 00000000000..042b45d47c3 --- /dev/null +++ b/toolkit/tools/pkg/image/roast/roast.go @@ -0,0 +1,295 @@ +package roast + +import ( + "fmt" + "os" + "path" + "path/filepath" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/image/roast/formats" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" +) + +type convertRequest struct { + inputPath string + isInputFile bool + artifact configuration.Artifact +} + +type convertResult struct { + artifactName string + originalPath string + convertedFile string +} + +func (cfg *Config) GenerateImageArtifacts() error { + if cfg.Workers <= 0 { + logger.Log.Panicf("Value in --workers must be greater than zero. Found %d", cfg.Workers) + } + + inDirPath, err := filepath.Abs(cfg.InputDir) + if err != nil { + logger.Log.Panicf("Error when calculating input directory path: %s", err) + } + + outDirPath, err := filepath.Abs(cfg.OutputDir) + if err != nil { + logger.Log.Panicf("Error when calculating absolute output path: %s", err) + } + + tmpDirPath, err := filepath.Abs(cfg.OutputDir) + if err != nil { + logger.Log.Panicf("Error when calculating absolute temporary path: %s", err) + } + + err = os.MkdirAll(outDirPath, os.ModePerm) + if err != nil { + logger.Log.Panicf("Error when creating output directory. Error: %s", err) + } + + config, err := configuration.Load(cfg.ConfigFile) + if err != nil { + logger.Log.Panicf("Failed loading image configuration. Error: %s", err) + } + + return generateImageArtifacts(cfg.Workers, inDirPath, outDirPath, cfg.ReleaseVersion, cfg.ImageTag, tmpDirPath, config) +} + +func generateImageArtifacts(workers int, inDir, outDir, releaseVersion, imageTag, tmpDir string, config configuration.Config) (err error) { + const defaultSystemConfig = 0 + + err = os.MkdirAll(tmpDir, os.ModePerm) + if err != nil { + return + } + + if len(config.Disks) > 1 { + err = fmt.Errorf("this program currently only supports one disk") + return + } + + numberOfArtifacts := 0 + for _, disk := range config.Disks { + numberOfArtifacts += len(disk.Artifacts) + for _, partition := range disk.Partitions { + numberOfArtifacts += len(partition.Artifacts) + } + } + + logger.Log.Infof("Converting (%d) artifacts", numberOfArtifacts) + + convertRequests := make(chan *convertRequest, numberOfArtifacts) + convertedResults := make(chan *convertResult, numberOfArtifacts) + + // Start the workers now so they begin working as soon as a new job is buffered. + for i := 0; i < workers; i++ { + go artifactConverterWorker(convertRequests, convertedResults, releaseVersion, tmpDir, imageTag, outDir) + } + + for i, disk := range config.Disks { + for _, artifact := range disk.Artifacts { + inputName, isFile := diskArtifactInput(i, disk) + convertRequests <- &convertRequest{ + inputPath: filepath.Join(inDir, inputName), + isInputFile: isFile, + artifact: artifact, + } + } + + for j, partition := range disk.Partitions { + for _, artifact := range partition.Artifacts { + // Currently only process 1 system config + inputName, isFile := partitionArtifactInput(i, j, &artifact, retrievePartitionSettings(&config.SystemConfigs[defaultSystemConfig], partition.ID)) + convertRequests <- &convertRequest{ + inputPath: filepath.Join(inDir, inputName), + isInputFile: isFile, + artifact: artifact, + } + } + } + } + + close(convertRequests) + + failedArtifacts := []string{} + for i := 0; i < numberOfArtifacts; i++ { + result := <-convertedResults + if result.convertedFile == "" { + failedArtifacts = append(failedArtifacts, result.artifactName) + } else { + logger.Log.Infof("[%d/%d] Converted (%s) -> (%s)", (i + 1), numberOfArtifacts, result.originalPath, result.convertedFile) + } + } + + if len(failedArtifacts) != 0 { + err = fmt.Errorf("failed to generate the following artifacts: %v", failedArtifacts) + } + + return +} + +func retrievePartitionSettings(systemConfig *configuration.SystemConfig, searchedID string) (foundSetting *configuration.PartitionSetting) { + for i := range systemConfig.PartitionSettings { + if systemConfig.PartitionSettings[i].ID == searchedID { + foundSetting = &systemConfig.PartitionSettings[i] + return + } + } + logger.Log.Warningf("Couldn't find partition setting '%s' under system config '%s'", searchedID, systemConfig.Name) + return +} + +func artifactConverterWorker(convertRequests chan *convertRequest, convertedResults chan *convertResult, releaseVersion, tmpDir, imageTag, outDir string) { + const ( + initrdArtifactType = "initrd" + ) + + for req := range convertRequests { + fullArtifactName := req.artifact.Name + + // Append release version if necessary + // Note: ISOs creation is a two step process. The first step's initrd artifact type should not append a release version + // since the release version value could change between the end of the first step and the start of the second step. + if req.artifact.Type != initrdArtifactType { + if releaseVersion != "" { + fullArtifactName = fullArtifactName + "-" + releaseVersion + } + } + result := &convertResult{ + artifactName: fullArtifactName, + originalPath: req.inputPath, + } + + workingArtifactPath := req.inputPath + isInputFile := req.isInputFile + + if req.artifact.Type != "" { + const appendExtension = false + outputFile, err := convertArtifact(fullArtifactName, tmpDir, req.artifact.Type, imageTag, workingArtifactPath, isInputFile, appendExtension) + if err != nil { + logger.Log.Errorf("Failed to convert artifact (%s) to type (%s). Error: %s", req.artifact.Name, req.artifact.Type, err) + convertedResults <- result + continue + } + isInputFile = true + workingArtifactPath = outputFile + } + + if req.artifact.Compression != "" { + const appendExtension = true + outputFile, err := convertArtifact(fullArtifactName, tmpDir, req.artifact.Compression, imageTag, workingArtifactPath, isInputFile, appendExtension) + if err != nil { + logger.Log.Errorf("Failed to compress (%s) using (%s). Error: %s", workingArtifactPath, req.artifact.Compression, err) + convertedResults <- result + continue + } + workingArtifactPath = outputFile + } + + if workingArtifactPath == req.inputPath { + logger.Log.Errorf("Artifact (%s) has no type or compression", req.artifact.Name) + } else { + finalFile := filepath.Join(outDir, filepath.Base(workingArtifactPath)) + err := file.Move(workingArtifactPath, finalFile) + if err != nil { + logger.Log.Errorf("Failed to move (%s) to (%s). Error: %s", workingArtifactPath, finalFile, err) + } else { + result.convertedFile = finalFile + } + } + + convertedResults <- result + } +} + +func convertArtifact(artifactName, outDir, format, imageTag, input string, isInputFile, appendExtension bool) (outputFile string, err error) { + typeConverter, err := converterFactory(format) + if err != nil { + return + } + + var originalExt string + + if appendExtension { + originalExt = path.Ext(input) + } + + newExt := fmt.Sprintf(".%s", typeConverter.Extension()) + if originalExt != "" { + newExt = fmt.Sprintf("%s%s", originalExt, newExt) + } + + if imageTag != "" { + imageTag = "-" + imageTag + } + + outputPath := filepath.Join(outDir, artifactName) + outputFile = fmt.Sprintf("%s%s%s", outputPath, imageTag, newExt) + + err = typeConverter.Convert(input, outputFile, isInputFile) + return +} + +func converterFactory(formatType string) (converter formats.Converter, err error) { + switch formatType { + case formats.RawType: + converter = formats.NewRaw() + case formats.Ext4Type: + converter = formats.NewExt4() + case formats.DiffType: + converter = formats.NewDiff() + case formats.RdiffType: + converter = formats.NewRdiff() + case formats.GzipType: + converter = formats.NewGzip() + case formats.TarGzipType: + converter = formats.NewTarGzip() + case formats.XzType: + converter = formats.NewXz() + case formats.TarXzType: + converter = formats.NewTarXz() + case formats.VhdType: + const gen2 = false + converter = formats.NewVhd(gen2) + case formats.VhdxType: + const gen2 = true + converter = formats.NewVhd(gen2) + case formats.InitrdType: + converter = formats.NewInitrd() + case formats.OvaType: + converter = formats.NewOva() + default: + err = fmt.Errorf("unsupported output format: %s", formatType) + } + + return +} + +func diskArtifactInput(diskIndex int, disk configuration.Disk) (input string, isFile bool) { + const rootfsPrefix = "rootfs" + + // If there are no paritions, this is a rootfs + if len(disk.Partitions) == 0 { + input = rootfsPrefix + } else { + input = fmt.Sprintf("disk%d.raw", diskIndex) + isFile = true + } + + return +} + +func partitionArtifactInput(diskIndex, partitionIndex int, diskPartArtifact *configuration.Artifact, partitionSetting *configuration.PartitionSetting) (input string, isFile bool) { + // Currently all file artifacts have a raw file for input + if diskPartArtifact.Type == "diff" && partitionSetting.OverlayBaseImage != "" { + input = fmt.Sprintf("disk%d.partition%d.diff", diskIndex, partitionIndex) + } else if diskPartArtifact.Type == "rdiff" && partitionSetting.RdiffBaseImage != "" { + input = fmt.Sprintf("disk%d.partition%d.rdiff", diskIndex, partitionIndex) + } else { + input = fmt.Sprintf("disk%d.partition%d.raw", diskIndex, partitionIndex) + } + isFile = true + return +} diff --git a/toolkit/tools/imagegen/attendedinstaller/_manualrun/EULA.txt b/toolkit/tools/pkg/imagegen/attendedinstaller/_manualrun/EULA.txt similarity index 100% rename from toolkit/tools/imagegen/attendedinstaller/_manualrun/EULA.txt rename to toolkit/tools/pkg/imagegen/attendedinstaller/_manualrun/EULA.txt diff --git a/toolkit/tools/imagegen/attendedinstaller/_manualrun/manualrun.go b/toolkit/tools/pkg/imagegen/attendedinstaller/_manualrun/manualrun.go similarity index 95% rename from toolkit/tools/imagegen/attendedinstaller/_manualrun/manualrun.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/_manualrun/manualrun.go index be43e773705..6d9d5be7faf 100644 --- a/toolkit/tools/imagegen/attendedinstaller/_manualrun/manualrun.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/_manualrun/manualrun.go @@ -8,9 +8,9 @@ import ( "fmt" "time" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // manualrun is a tool to test the attendedinstaller in the current terminal window. diff --git a/toolkit/tools/imagegen/attendedinstaller/attendedinstaller.go b/toolkit/tools/pkg/imagegen/attendedinstaller/attendedinstaller.go similarity index 90% rename from toolkit/tools/imagegen/attendedinstaller/attendedinstaller.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/attendedinstaller.go index 6cf2b33727d..42f1ad73462 100644 --- a/toolkit/tools/imagegen/attendedinstaller/attendedinstaller.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/attendedinstaller.go @@ -11,25 +11,25 @@ import ( "strings" "time" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/speakuputils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views/confirmview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views/diskview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views/encryptview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views/eulaview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views/finishview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views/hostnameview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views/installationview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views/installerview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views/progressview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views/userview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/bendahl/uinput" "github.com/gdamore/tcell" "github.com/rivo/tview" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/speakuputils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views/confirmview" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views/diskview" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views/encryptview" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views/eulaview" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views/finishview" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views/hostnameview" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views/installationview" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views/installerview" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views/progressview" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views/userview" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // UI constants. diff --git a/toolkit/tools/imagegen/attendedinstaller/primitives/customshortcutlist/customshortcutlist.go b/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/customshortcutlist/customshortcutlist.go similarity index 100% rename from toolkit/tools/imagegen/attendedinstaller/primitives/customshortcutlist/customshortcutlist.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/primitives/customshortcutlist/customshortcutlist.go diff --git a/toolkit/tools/imagegen/attendedinstaller/primitives/enumfield/enumfield.go b/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/enumfield/enumfield.go similarity index 100% rename from toolkit/tools/imagegen/attendedinstaller/primitives/enumfield/enumfield.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/primitives/enumfield/enumfield.go diff --git a/toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar/navigationbar.go b/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar/navigationbar.go similarity index 98% rename from toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar/navigationbar.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar/navigationbar.go index f7e1b55ce2f..74c3f7d662d 100644 --- a/toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar/navigationbar.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar/navigationbar.go @@ -7,7 +7,7 @@ import ( "github.com/gdamore/tcell" "github.com/rivo/tview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" ) const ( diff --git a/toolkit/tools/imagegen/attendedinstaller/primitives/progressbar/progressbar.go b/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/progressbar/progressbar.go similarity index 100% rename from toolkit/tools/imagegen/attendedinstaller/primitives/progressbar/progressbar.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/primitives/progressbar/progressbar.go diff --git a/toolkit/tools/imagegen/attendedinstaller/speakuputils/speakuputils.go b/toolkit/tools/pkg/imagegen/attendedinstaller/speakuputils/speakuputils.go similarity index 100% rename from toolkit/tools/imagegen/attendedinstaller/speakuputils/speakuputils.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/speakuputils/speakuputils.go diff --git a/toolkit/tools/imagegen/attendedinstaller/uitext/uitext.go b/toolkit/tools/pkg/imagegen/attendedinstaller/uitext/uitext.go similarity index 100% rename from toolkit/tools/imagegen/attendedinstaller/uitext/uitext.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/uitext/uitext.go diff --git a/toolkit/tools/imagegen/attendedinstaller/uiutils/uiutils.go b/toolkit/tools/pkg/imagegen/attendedinstaller/uiutils/uiutils.go similarity index 97% rename from toolkit/tools/imagegen/attendedinstaller/uiutils/uiutils.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/uiutils/uiutils.go index 1de186515dc..baa49675420 100644 --- a/toolkit/tools/imagegen/attendedinstaller/uiutils/uiutils.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/uiutils/uiutils.go @@ -7,8 +7,7 @@ import ( "strings" "unicode" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/customshortcutlist" - + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/customshortcutlist" "github.com/rivo/tview" ) diff --git a/toolkit/tools/imagegen/attendedinstaller/views/confirmview/confirmview.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/confirmview/confirmview.go similarity index 87% rename from toolkit/tools/imagegen/attendedinstaller/views/confirmview/confirmview.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/confirmview/confirmview.go index 3e3ce5b9338..d5b06ccafae 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/confirmview/confirmview.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/confirmview/confirmview.go @@ -7,10 +7,10 @@ import ( "github.com/gdamore/tcell" "github.com/rivo/tview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uiutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uiutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" ) // UI constants. diff --git a/toolkit/tools/imagegen/attendedinstaller/views/diskview/autopartitionwidget/autopartitionwidget.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/diskview/autopartitionwidget/autopartitionwidget.go similarity index 93% rename from toolkit/tools/imagegen/attendedinstaller/views/diskview/autopartitionwidget/autopartitionwidget.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/diskview/autopartitionwidget/autopartitionwidget.go index e8ec1f2b70a..9d2d9caab78 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/diskview/autopartitionwidget/autopartitionwidget.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/diskview/autopartitionwidget/autopartitionwidget.go @@ -10,13 +10,13 @@ import ( "github.com/gdamore/tcell" "github.com/rivo/tview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/customshortcutlist" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uiutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/diskutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/customshortcutlist" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uiutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/diskutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // UI constants. diff --git a/toolkit/tools/imagegen/attendedinstaller/views/diskview/diskview.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/diskview/diskview.go similarity index 89% rename from toolkit/tools/imagegen/attendedinstaller/views/diskview/diskview.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/diskview/diskview.go index e9d484be1d0..4171ca52d02 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/diskview/diskview.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/diskview/diskview.go @@ -7,11 +7,11 @@ import ( "github.com/gdamore/tcell" "github.com/rivo/tview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views/diskview/autopartitionwidget" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/views/diskview/manualpartitionwidget" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/diskutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views/diskview/autopartitionwidget" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/views/diskview/manualpartitionwidget" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/diskutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // UI constants. diff --git a/toolkit/tools/imagegen/attendedinstaller/views/diskview/manualpartitionwidget/manualpartitionwidget.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/diskview/manualpartitionwidget/manualpartitionwidget.go similarity index 97% rename from toolkit/tools/imagegen/attendedinstaller/views/diskview/manualpartitionwidget/manualpartitionwidget.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/diskview/manualpartitionwidget/manualpartitionwidget.go index 853da051bf2..187df11d216 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/diskview/manualpartitionwidget/manualpartitionwidget.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/diskview/manualpartitionwidget/manualpartitionwidget.go @@ -10,13 +10,13 @@ import ( "github.com/gdamore/tcell" "github.com/rivo/tview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/enumfield" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uiutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/diskutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/enumfield" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uiutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/diskutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) const ( diff --git a/toolkit/tools/imagegen/attendedinstaller/views/encryptview/encryptview.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/encryptview/encryptview.go similarity index 92% rename from toolkit/tools/imagegen/attendedinstaller/views/encryptview/encryptview.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/encryptview/encryptview.go index a0666542c37..8b3e4f6b552 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/encryptview/encryptview.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/encryptview/encryptview.go @@ -8,10 +8,10 @@ import ( "github.com/muesli/crunchy" "github.com/rivo/tview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uiutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uiutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" ) // UI constants. diff --git a/toolkit/tools/imagegen/attendedinstaller/views/eulaview/eulaview.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/eulaview/eulaview.go similarity index 90% rename from toolkit/tools/imagegen/attendedinstaller/views/eulaview/eulaview.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/eulaview/eulaview.go index e1ecee74d4c..9ad7336d31a 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/eulaview/eulaview.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/eulaview/eulaview.go @@ -11,9 +11,9 @@ import ( "github.com/gdamore/tcell" "github.com/rivo/tview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" ) // Resource constants. diff --git a/toolkit/tools/imagegen/attendedinstaller/views/finishview/finishview.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/finishview/finishview.go similarity index 89% rename from toolkit/tools/imagegen/attendedinstaller/views/finishview/finishview.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/finishview/finishview.go index 5a0d2aba97e..7952db743c8 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/finishview/finishview.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/finishview/finishview.go @@ -10,11 +10,11 @@ import ( "github.com/gdamore/tcell" "github.com/rivo/tview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uiutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uiutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // UI constants. diff --git a/toolkit/tools/imagegen/attendedinstaller/views/hostnameview/hostnameview.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/hostnameview/hostnameview.go similarity index 93% rename from toolkit/tools/imagegen/attendedinstaller/views/hostnameview/hostnameview.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/hostnameview/hostnameview.go index ac97349977a..1a022628e42 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/hostnameview/hostnameview.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/hostnameview/hostnameview.go @@ -10,11 +10,11 @@ import ( "github.com/gdamore/tcell" "github.com/rivo/tview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uiutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/randomization" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uiutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" ) // Input validation constants. diff --git a/toolkit/tools/imagegen/attendedinstaller/views/hostnameview/hostnameview_test.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/hostnameview/hostnameview_test.go similarity index 96% rename from toolkit/tools/imagegen/attendedinstaller/views/hostnameview/hostnameview_test.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/hostnameview/hostnameview_test.go index 15c36d70811..aa4f2ef2f70 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/hostnameview/hostnameview_test.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/hostnameview/hostnameview_test.go @@ -7,9 +7,9 @@ import ( "os" "testing" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/stretchr/testify/assert" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) func TestMain(m *testing.M) { diff --git a/toolkit/tools/imagegen/attendedinstaller/views/installationview/installationview.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/installationview/installationview.go similarity index 90% rename from toolkit/tools/imagegen/attendedinstaller/views/installationview/installationview.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/installationview/installationview.go index d1c82c6c62a..a2b057a1dd6 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/installationview/installationview.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/installationview/installationview.go @@ -6,13 +6,12 @@ package installationview import ( "fmt" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/customshortcutlist" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uiutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/gdamore/tcell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/customshortcutlist" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uiutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" "github.com/rivo/tview" ) diff --git a/toolkit/tools/imagegen/attendedinstaller/views/installerview/installerview.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/installerview/installerview.go similarity index 87% rename from toolkit/tools/imagegen/attendedinstaller/views/installerview/installerview.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/installerview/installerview.go index 2a7522e8bc5..7ab1f1ccac0 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/installerview/installerview.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/installerview/installerview.go @@ -10,13 +10,13 @@ import ( "github.com/gdamore/tcell" "github.com/rivo/tview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/customshortcutlist" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/speakuputils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uiutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/customshortcutlist" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/speakuputils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uiutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // UI constants. diff --git a/toolkit/tools/imagegen/attendedinstaller/views/progressview/progressview.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/progressview/progressview.go similarity index 91% rename from toolkit/tools/imagegen/attendedinstaller/views/progressview/progressview.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/progressview/progressview.go index 4b1b01770be..ad928d3d6ee 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/progressview/progressview.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/progressview/progressview.go @@ -10,11 +10,11 @@ import ( "github.com/rivo/tview" "github.com/sirupsen/logrus" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/progressbar" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uiutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/progressbar" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uiutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // UI constants. diff --git a/toolkit/tools/imagegen/attendedinstaller/views/userview/userview.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/userview/userview.go similarity index 95% rename from toolkit/tools/imagegen/attendedinstaller/views/userview/userview.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/userview/userview.go index eaf5c6b9f9d..f22cb96eb20 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/userview/userview.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/userview/userview.go @@ -10,10 +10,10 @@ import ( "github.com/muesli/crunchy" "github.com/rivo/tview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/primitives/navigationbar" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uitext" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/attendedinstaller/uiutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/primitives/navigationbar" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uitext" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/attendedinstaller/uiutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" ) // UI constants. diff --git a/toolkit/tools/imagegen/attendedinstaller/views/view.go b/toolkit/tools/pkg/imagegen/attendedinstaller/views/view.go similarity index 92% rename from toolkit/tools/imagegen/attendedinstaller/views/view.go rename to toolkit/tools/pkg/imagegen/attendedinstaller/views/view.go index 11c6526ed3d..fa00b8657b9 100644 --- a/toolkit/tools/imagegen/attendedinstaller/views/view.go +++ b/toolkit/tools/pkg/imagegen/attendedinstaller/views/view.go @@ -7,7 +7,7 @@ import ( "github.com/gdamore/tcell" "github.com/rivo/tview" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" ) // View is the interface for different "pages" in the attended installer. diff --git a/toolkit/tools/imagegen/configuration/configuration.go b/toolkit/tools/pkg/imagegen/configuration/configuration.go similarity index 99% rename from toolkit/tools/imagegen/configuration/configuration.go rename to toolkit/tools/pkg/imagegen/configuration/configuration.go index 0cd61fae797..45ab27e119a 100644 --- a/toolkit/tools/imagegen/configuration/configuration.go +++ b/toolkit/tools/pkg/imagegen/configuration/configuration.go @@ -14,7 +14,7 @@ import ( "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/jsonutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // Artifact [non-ISO image building only] defines the name, type diff --git a/toolkit/tools/imagegen/configuration/configuration_test.go b/toolkit/tools/pkg/imagegen/configuration/configuration_test.go similarity index 99% rename from toolkit/tools/imagegen/configuration/configuration_test.go rename to toolkit/tools/pkg/imagegen/configuration/configuration_test.go index 114f0d09101..e0ab20058a1 100644 --- a/toolkit/tools/imagegen/configuration/configuration_test.go +++ b/toolkit/tools/pkg/imagegen/configuration/configuration_test.go @@ -10,9 +10,9 @@ import ( "reflect" "testing" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/stretchr/testify/assert" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) func TestMain(m *testing.M) { diff --git a/toolkit/tools/imagegen/configuration/disk.go b/toolkit/tools/pkg/imagegen/configuration/disk.go similarity index 98% rename from toolkit/tools/imagegen/configuration/disk.go rename to toolkit/tools/pkg/imagegen/configuration/disk.go index 1083ef3763a..16e12d1d313 100644 --- a/toolkit/tools/imagegen/configuration/disk.go +++ b/toolkit/tools/pkg/imagegen/configuration/disk.go @@ -11,7 +11,7 @@ import ( "sort" "strconv" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // Disk holds the disk partitioning, formatting and size information. diff --git a/toolkit/tools/imagegen/configuration/disk_test.go b/toolkit/tools/pkg/imagegen/configuration/disk_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/disk_test.go rename to toolkit/tools/pkg/imagegen/configuration/disk_test.go diff --git a/toolkit/tools/imagegen/configuration/imapolicy.go b/toolkit/tools/pkg/imagegen/configuration/imapolicy.go similarity index 100% rename from toolkit/tools/imagegen/configuration/imapolicy.go rename to toolkit/tools/pkg/imagegen/configuration/imapolicy.go diff --git a/toolkit/tools/imagegen/configuration/imapolicy_test.go b/toolkit/tools/pkg/imagegen/configuration/imapolicy_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/imapolicy_test.go rename to toolkit/tools/pkg/imagegen/configuration/imapolicy_test.go diff --git a/toolkit/tools/imagegen/configuration/kernelcommandline.go b/toolkit/tools/pkg/imagegen/configuration/kernelcommandline.go similarity index 100% rename from toolkit/tools/imagegen/configuration/kernelcommandline.go rename to toolkit/tools/pkg/imagegen/configuration/kernelcommandline.go diff --git a/toolkit/tools/imagegen/configuration/kernelcommandline_test.go b/toolkit/tools/pkg/imagegen/configuration/kernelcommandline_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/kernelcommandline_test.go rename to toolkit/tools/pkg/imagegen/configuration/kernelcommandline_test.go diff --git a/toolkit/tools/imagegen/configuration/mountidentifier.go b/toolkit/tools/pkg/imagegen/configuration/mountidentifier.go similarity index 100% rename from toolkit/tools/imagegen/configuration/mountidentifier.go rename to toolkit/tools/pkg/imagegen/configuration/mountidentifier.go diff --git a/toolkit/tools/imagegen/configuration/mountidentifier_test.go b/toolkit/tools/pkg/imagegen/configuration/mountidentifier_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/mountidentifier_test.go rename to toolkit/tools/pkg/imagegen/configuration/mountidentifier_test.go diff --git a/toolkit/tools/imagegen/configuration/networkconfig.go b/toolkit/tools/pkg/imagegen/configuration/networkconfig.go similarity index 98% rename from toolkit/tools/imagegen/configuration/networkconfig.go rename to toolkit/tools/pkg/imagegen/configuration/networkconfig.go index e327cfb7f29..41c6ce8489a 100644 --- a/toolkit/tools/imagegen/configuration/networkconfig.go +++ b/toolkit/tools/pkg/imagegen/configuration/networkconfig.go @@ -14,9 +14,9 @@ import ( "strings" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/safechroot" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/safechroot" ) type Network struct { diff --git a/toolkit/tools/imagegen/configuration/networkconfig_test.go b/toolkit/tools/pkg/imagegen/configuration/networkconfig_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/networkconfig_test.go rename to toolkit/tools/pkg/imagegen/configuration/networkconfig_test.go diff --git a/toolkit/tools/imagegen/configuration/packagerepo.go b/toolkit/tools/pkg/imagegen/configuration/packagerepo.go similarity index 97% rename from toolkit/tools/imagegen/configuration/packagerepo.go rename to toolkit/tools/pkg/imagegen/configuration/packagerepo.go index 05554ba8881..a1982615ed9 100644 --- a/toolkit/tools/imagegen/configuration/packagerepo.go +++ b/toolkit/tools/pkg/imagegen/configuration/packagerepo.go @@ -14,10 +14,10 @@ import ( "strings" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/network" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/safechroot" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/safechroot" ) // PackageRepo defines the RPM repo to pull packages from during the installation diff --git a/toolkit/tools/imagegen/configuration/packagerepo_test.go b/toolkit/tools/pkg/imagegen/configuration/packagerepo_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/packagerepo_test.go rename to toolkit/tools/pkg/imagegen/configuration/packagerepo_test.go diff --git a/toolkit/tools/imagegen/configuration/parse_partition.go b/toolkit/tools/pkg/imagegen/configuration/parse_partition.go similarity index 99% rename from toolkit/tools/imagegen/configuration/parse_partition.go rename to toolkit/tools/pkg/imagegen/configuration/parse_partition.go index 21290718e91..7ebf5f3d426 100644 --- a/toolkit/tools/imagegen/configuration/parse_partition.go +++ b/toolkit/tools/pkg/imagegen/configuration/parse_partition.go @@ -12,7 +12,7 @@ import ( "strconv" "strings" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) const ( diff --git a/toolkit/tools/imagegen/configuration/parse_partition_test.go b/toolkit/tools/pkg/imagegen/configuration/parse_partition_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/parse_partition_test.go rename to toolkit/tools/pkg/imagegen/configuration/parse_partition_test.go diff --git a/toolkit/tools/imagegen/configuration/partition.go b/toolkit/tools/pkg/imagegen/configuration/partition.go similarity index 100% rename from toolkit/tools/imagegen/configuration/partition.go rename to toolkit/tools/pkg/imagegen/configuration/partition.go diff --git a/toolkit/tools/imagegen/configuration/partition_test.go b/toolkit/tools/pkg/imagegen/configuration/partition_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/partition_test.go rename to toolkit/tools/pkg/imagegen/configuration/partition_test.go diff --git a/toolkit/tools/imagegen/configuration/partitionflag.go b/toolkit/tools/pkg/imagegen/configuration/partitionflag.go similarity index 100% rename from toolkit/tools/imagegen/configuration/partitionflag.go rename to toolkit/tools/pkg/imagegen/configuration/partitionflag.go diff --git a/toolkit/tools/imagegen/configuration/partitionflag_test.go b/toolkit/tools/pkg/imagegen/configuration/partitionflag_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/partitionflag_test.go rename to toolkit/tools/pkg/imagegen/configuration/partitionflag_test.go diff --git a/toolkit/tools/imagegen/configuration/partitionsetting.go b/toolkit/tools/pkg/imagegen/configuration/partitionsetting.go similarity index 100% rename from toolkit/tools/imagegen/configuration/partitionsetting.go rename to toolkit/tools/pkg/imagegen/configuration/partitionsetting.go diff --git a/toolkit/tools/imagegen/configuration/partitionsetting_test.go b/toolkit/tools/pkg/imagegen/configuration/partitionsetting_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/partitionsetting_test.go rename to toolkit/tools/pkg/imagegen/configuration/partitionsetting_test.go diff --git a/toolkit/tools/imagegen/configuration/partitiontabletype.go b/toolkit/tools/pkg/imagegen/configuration/partitiontabletype.go similarity index 100% rename from toolkit/tools/imagegen/configuration/partitiontabletype.go rename to toolkit/tools/pkg/imagegen/configuration/partitiontabletype.go diff --git a/toolkit/tools/imagegen/configuration/partitiontabletype_test.go b/toolkit/tools/pkg/imagegen/configuration/partitiontabletype_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/partitiontabletype_test.go rename to toolkit/tools/pkg/imagegen/configuration/partitiontabletype_test.go diff --git a/toolkit/tools/imagegen/configuration/selinux.go b/toolkit/tools/pkg/imagegen/configuration/selinux.go similarity index 100% rename from toolkit/tools/imagegen/configuration/selinux.go rename to toolkit/tools/pkg/imagegen/configuration/selinux.go diff --git a/toolkit/tools/imagegen/configuration/selinux_test.go b/toolkit/tools/pkg/imagegen/configuration/selinux_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/selinux_test.go rename to toolkit/tools/pkg/imagegen/configuration/selinux_test.go diff --git a/toolkit/tools/imagegen/configuration/systemconfig.go b/toolkit/tools/pkg/imagegen/configuration/systemconfig.go similarity index 99% rename from toolkit/tools/imagegen/configuration/systemconfig.go rename to toolkit/tools/pkg/imagegen/configuration/systemconfig.go index fc91b1d6eb0..e96f50c75f1 100644 --- a/toolkit/tools/imagegen/configuration/systemconfig.go +++ b/toolkit/tools/pkg/imagegen/configuration/systemconfig.go @@ -11,7 +11,7 @@ import ( "strings" "github.com/asaskevich/govalidator" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // SystemConfig defines how each system present on the image is supposed to be configured. diff --git a/toolkit/tools/imagegen/configuration/systemconfig_test.go b/toolkit/tools/pkg/imagegen/configuration/systemconfig_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/systemconfig_test.go rename to toolkit/tools/pkg/imagegen/configuration/systemconfig_test.go diff --git a/toolkit/tools/imagegen/configuration/testdata/test_configuration.json b/toolkit/tools/pkg/imagegen/configuration/testdata/test_configuration.json similarity index 100% rename from toolkit/tools/imagegen/configuration/testdata/test_configuration.json rename to toolkit/tools/pkg/imagegen/configuration/testdata/test_configuration.json diff --git a/toolkit/tools/imagegen/configuration/user.go b/toolkit/tools/pkg/imagegen/configuration/user.go similarity index 100% rename from toolkit/tools/imagegen/configuration/user.go rename to toolkit/tools/pkg/imagegen/configuration/user.go diff --git a/toolkit/tools/imagegen/configuration/user_test.go b/toolkit/tools/pkg/imagegen/configuration/user_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/user_test.go rename to toolkit/tools/pkg/imagegen/configuration/user_test.go diff --git a/toolkit/tools/imagegen/configuration/veritydisk.go b/toolkit/tools/pkg/imagegen/configuration/veritydisk.go similarity index 100% rename from toolkit/tools/imagegen/configuration/veritydisk.go rename to toolkit/tools/pkg/imagegen/configuration/veritydisk.go diff --git a/toolkit/tools/imagegen/configuration/veritydisk_test.go b/toolkit/tools/pkg/imagegen/configuration/veritydisk_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/veritydisk_test.go rename to toolkit/tools/pkg/imagegen/configuration/veritydisk_test.go diff --git a/toolkit/tools/imagegen/configuration/verityerrorbehavior.go b/toolkit/tools/pkg/imagegen/configuration/verityerrorbehavior.go similarity index 100% rename from toolkit/tools/imagegen/configuration/verityerrorbehavior.go rename to toolkit/tools/pkg/imagegen/configuration/verityerrorbehavior.go diff --git a/toolkit/tools/imagegen/configuration/verityerrorbehavior_test.go b/toolkit/tools/pkg/imagegen/configuration/verityerrorbehavior_test.go similarity index 100% rename from toolkit/tools/imagegen/configuration/verityerrorbehavior_test.go rename to toolkit/tools/pkg/imagegen/configuration/verityerrorbehavior_test.go diff --git a/toolkit/tools/imagegen/diskutils/diskutils.go b/toolkit/tools/pkg/imagegen/diskutils/diskutils.go similarity index 99% rename from toolkit/tools/imagegen/diskutils/diskutils.go rename to toolkit/tools/pkg/imagegen/diskutils/diskutils.go index bab86941344..e4ebc5fdf7f 100644 --- a/toolkit/tools/imagegen/diskutils/diskutils.go +++ b/toolkit/tools/pkg/imagegen/diskutils/diskutils.go @@ -14,11 +14,11 @@ import ( "strings" "time" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/retry" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) type blockDevicesOutput struct { diff --git a/toolkit/tools/imagegen/diskutils/diskutils_test.go b/toolkit/tools/pkg/imagegen/diskutils/diskutils_test.go similarity index 100% rename from toolkit/tools/imagegen/diskutils/diskutils_test.go rename to toolkit/tools/pkg/imagegen/diskutils/diskutils_test.go diff --git a/toolkit/tools/imagegen/diskutils/encryption.go b/toolkit/tools/pkg/imagegen/diskutils/encryption.go similarity index 97% rename from toolkit/tools/imagegen/diskutils/encryption.go rename to toolkit/tools/pkg/imagegen/diskutils/encryption.go index ac72bb88012..506326ee560 100644 --- a/toolkit/tools/imagegen/diskutils/encryption.go +++ b/toolkit/tools/pkg/imagegen/diskutils/encryption.go @@ -11,9 +11,9 @@ import ( "path/filepath" "strings" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) const ( diff --git a/toolkit/tools/imagegen/diskutils/initramfs.go b/toolkit/tools/pkg/imagegen/diskutils/initramfs.go similarity index 99% rename from toolkit/tools/imagegen/diskutils/initramfs.go rename to toolkit/tools/pkg/imagegen/diskutils/initramfs.go index ee2cda0d353..7ea890b7839 100644 --- a/toolkit/tools/imagegen/diskutils/initramfs.go +++ b/toolkit/tools/pkg/imagegen/diskutils/initramfs.go @@ -10,10 +10,10 @@ import ( "io" "os" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/cavaliercoder/go-cpio" "github.com/klauspost/pgzip" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // InitramfsMount represented an editable initramfs diff --git a/toolkit/tools/imagegen/diskutils/lvm.go b/toolkit/tools/pkg/imagegen/diskutils/lvm.go similarity index 97% rename from toolkit/tools/imagegen/diskutils/lvm.go rename to toolkit/tools/pkg/imagegen/diskutils/lvm.go index 142c8258530..24c4b17d31d 100644 --- a/toolkit/tools/imagegen/diskutils/lvm.go +++ b/toolkit/tools/pkg/imagegen/diskutils/lvm.go @@ -9,8 +9,8 @@ import ( "fmt" "path/filepath" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) const ( diff --git a/toolkit/tools/imagegen/diskutils/verity.go b/toolkit/tools/pkg/imagegen/diskutils/verity.go similarity index 98% rename from toolkit/tools/imagegen/diskutils/verity.go rename to toolkit/tools/pkg/imagegen/diskutils/verity.go index a2f2a6bf0e0..5164f26460c 100644 --- a/toolkit/tools/imagegen/diskutils/verity.go +++ b/toolkit/tools/pkg/imagegen/diskutils/verity.go @@ -14,10 +14,10 @@ import ( "strconv" "strings" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/randomization" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) const ( diff --git a/toolkit/tools/imagegen/installutils/installutils.go b/toolkit/tools/pkg/imagegen/installutils/installutils.go similarity index 99% rename from toolkit/tools/imagegen/installutils/installutils.go rename to toolkit/tools/pkg/imagegen/installutils/installutils.go index da67568ceca..2e0d27bb3cd 100644 --- a/toolkit/tools/imagegen/installutils/installutils.go +++ b/toolkit/tools/pkg/imagegen/installutils/installutils.go @@ -15,17 +15,17 @@ import ( "syscall" "time" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/diskutils" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/jsonutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/randomization" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/retry" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/safechroot" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/tdnf" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/diskutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/safechroot" ) const ( @@ -292,7 +292,9 @@ func umount(path string) (err error) { // PackageNamesFromSingleSystemConfig goes through the "PackageLists" and "Packages" fields in the "SystemConfig" object, extracting // from packageList JSONs and packages listed in config itself to create one comprehensive package list. // NOTE: the package list contains the versions restrictions for the packages, if present, in the form "[package][condition][version]". -// Example: gcc=9.1.0 +// +// Example: gcc=9.1.0 +// // - systemConfig is the systemconfig field from the config file // Since kernel is not part of the packagelist, it is added separately from KernelOptions. func PackageNamesFromSingleSystemConfig(systemConfig configuration.SystemConfig) (finalPkgList []string, err error) { @@ -1762,7 +1764,8 @@ func GetPartLabel(device string) (stdout string, err error) { } // FormatMountIdentifier finds the requested identifier type for the given device, and formats it for use -// ie "UUID=12345678-abcd..." +// +// ie "UUID=12345678-abcd..." func FormatMountIdentifier(identifier configuration.MountIdentifier, device string) (identifierString string, err error) { var id string switch identifier { @@ -2298,7 +2301,7 @@ func createRDiffArtifact(workDirPath, devPath, rDiffBaseImage, name string) (err return shell.ExecuteLive(squashErrors, "rdiff", rdiffArgs...) } -//KernelPackages returns a list of kernel packages obtained from KernelOptions in the config's SystemConfigs +// KernelPackages returns a list of kernel packages obtained from KernelOptions in the config's SystemConfigs func KernelPackages(config configuration.Config) []*pkgjson.PackageVer { var packageList []*pkgjson.PackageVer // Add all the provided kernels to the package list diff --git a/toolkit/tools/imagegen/installutils/installutils_test.go b/toolkit/tools/pkg/imagegen/installutils/installutils_test.go similarity index 100% rename from toolkit/tools/imagegen/installutils/installutils_test.go rename to toolkit/tools/pkg/imagegen/installutils/installutils_test.go diff --git a/toolkit/tools/imagegen/installutils/overlay.go b/toolkit/tools/pkg/imagegen/installutils/overlay.go similarity index 93% rename from toolkit/tools/imagegen/installutils/overlay.go rename to toolkit/tools/pkg/imagegen/installutils/overlay.go index 66b5db3aead..d25eeb5e42e 100644 --- a/toolkit/tools/imagegen/installutils/overlay.go +++ b/toolkit/tools/pkg/imagegen/installutils/overlay.go @@ -8,8 +8,8 @@ import ( "os" "path/filepath" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/diskutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/diskutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // Overlay Struct representing an overlay mount diff --git a/toolkit/tools/imagegen/installutils/progressreporter.go b/toolkit/tools/pkg/imagegen/installutils/progressreporter.go similarity index 95% rename from toolkit/tools/imagegen/installutils/progressreporter.go rename to toolkit/tools/pkg/imagegen/installutils/progressreporter.go index 1a918f1a7f8..f2572fa7134 100644 --- a/toolkit/tools/imagegen/installutils/progressreporter.go +++ b/toolkit/tools/pkg/imagegen/installutils/progressreporter.go @@ -6,7 +6,7 @@ package installutils import ( "fmt" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) var doEmitProgress bool diff --git a/toolkit/tools/pkg/imager/config.go b/toolkit/tools/pkg/imager/config.go new file mode 100644 index 00000000000..213dc916087 --- /dev/null +++ b/toolkit/tools/pkg/imager/config.go @@ -0,0 +1,15 @@ +package imager + +type Config struct { + BuildDir string + ConfigFile string + LocalRepo string + TdnfTar string + RepoFile string + Assets string + BaseDirPath string + OutputDir string + LiveInstallFlag bool + EmitProgress bool + SystemConfig int +} diff --git a/toolkit/tools/pkg/imager/imager.go b/toolkit/tools/pkg/imager/imager.go new file mode 100644 index 00000000000..ca17bde74c0 --- /dev/null +++ b/toolkit/tools/pkg/imager/imager.go @@ -0,0 +1,618 @@ +package imager + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/diskutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/installutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/safechroot" +) + +const ( + // additionalFilesTempDirectory is the location where installutils expects to pick up any additional files + // to add to the install directory + additionalFilesTempDirectory = "/tmp/additionalfiles" + + // postInstallScriptTempDirectory is the directory where installutils expects to pick up any post install scripts + // to run inside the install directory environment + postInstallScriptTempDirectory = "/tmp/postinstall" + + // sshPubKeysTempDirectory is the directory where installutils expects to pick up ssh public key files to add into + // the install directory + sshPubKeysTempDirectory = "/tmp/sshpubkeys" + + // kickstartPartitionFile is the file that includes the partitioning schema used by + // kickstart installation + kickstartPartitionFile = "/tmp/part-include" +) + +func (cfg *Config) BuildSysConfig(defaultSystemConfig int) error { + // Parse Config + config, err := configuration.LoadWithAbsolutePaths(cfg.ConfigFile, cfg.BaseDirPath) + logger.PanicOnError(err, "Failed to load configuration file (%s) with base directory (%s)", cfg.ConfigFile, cfg.BaseDirPath) + + // Currently only process 1 system config + systemConfig := config.SystemConfigs[defaultSystemConfig] + + // Execute preinstall scripts and parse partitioning when performing kickstart installation + if systemConfig.IsKickStartBoot { + err = installutils.RunPreInstallScripts(systemConfig) + logger.PanicOnError(err, "Failed to preinstall scripts") + + disks, partitionSettings, err := configuration.ParseKickStartPartitionScheme(kickstartPartitionFile) + logger.PanicOnError(err, "Failed to parse partition schema") + + config.Disks = disks + systemConfig.PartitionSettings = partitionSettings + + err = config.IsValid() + if err != nil { + logger.PanicOnError(err, "Invalid image configuration: %s", err) + } + } + + return cfg.buildSystemConfig(systemConfig, config.Disks, cfg.OutputDir, cfg.BuildDir) + +} + +func (cfg *Config) buildSystemConfig(systemConfig configuration.SystemConfig, disks []configuration.Disk, outputDir, buildDir string) (err error) { + logger.Log.Infof("Building system configuration (%s)", systemConfig.Name) + + const ( + assetsMountPoint = "/installer" + localRepoMountPoint = "/mnt/cdrom/RPMS" + repoFileMountPoint = "/etc/yum.repos.d" + setupRoot = "/setuproot" + installRoot = "/installroot" + rootID = "rootfs" + defaultDiskIndex = 0 + defaultTempDiskName = "disk.raw" + existingChrootDir = false + leaveChrootOnDisk = false + ) + + var ( + isRootFS bool + isLoopDevice bool + isOfflineInstall bool + diskDevPath string + kernelPkg string + encryptedRoot diskutils.EncryptedRootDevice + readOnlyRoot diskutils.VerityDevice + partIDToDevPathMap map[string]string + partIDToFsTypeMap map[string]string + mountPointToOverlayMap map[string]*installutils.Overlay + extraMountPoints []*safechroot.MountPoint + extraDirectories []string + ) + + // Get list of packages to install into image + packagesToInstall, err := installutils.PackageNamesFromSingleSystemConfig(systemConfig) + if err != nil { + logger.Log.Error("Failed to import packages from package lists in config file") + return + } + + isRootFS = len(systemConfig.PartitionSettings) == 0 + if isRootFS { + logger.Log.Infof("Creating rootfs") + additionalExtraMountPoints, additionalExtraDirectories, err := setupRootFS(outputDir, installRoot) + if err != nil { + return err + } + + extraDirectories = append(extraDirectories, additionalExtraDirectories...) + extraMountPoints = append(extraMountPoints, additionalExtraMountPoints...) + isOfflineInstall = true + + // Select the best kernel package for this environment. + kernelPkg, err = installutils.SelectKernelPackage(systemConfig, cfg.LiveInstallFlag) + // Rootfs images will usually not set a kernel, ignore errors + if err != nil { + logger.Log.Debugf("Rootfs did not find a kernel, this is normal: '%s'", err.Error()) + } else { + logger.Log.Infof("Rootfs is including a kernel (%s)", kernelPkg) + packagesToInstall = append([]string{kernelPkg}, packagesToInstall...) + } + } else { + logger.Log.Info("Creating raw disk in build directory") + diskConfig := disks[defaultDiskIndex] + diskDevPath, partIDToDevPathMap, partIDToFsTypeMap, isLoopDevice, encryptedRoot, readOnlyRoot, err = setupDisk(buildDir, defaultTempDiskName, cfg.LiveInstallFlag, diskConfig, systemConfig.Encryption, systemConfig.ReadOnlyVerityRoot) + if err != nil { + return + } + + if isLoopDevice { + isOfflineInstall = true + defer diskutils.DetachLoopbackDevice(diskDevPath) + defer diskutils.BlockOnDiskIO(diskDevPath) + } + + if systemConfig.ReadOnlyVerityRoot.Enable { + defer readOnlyRoot.CleanupVerityDevice() + } + + // Add additional system settings for root encryption + err = setupDiskEncryption(&systemConfig, &encryptedRoot, buildDir) + if err != nil { + return + } + + // Select the best kernel package for this environment + kernelPkg, err = installutils.SelectKernelPackage(systemConfig, cfg.LiveInstallFlag) + if err != nil { + logger.Log.Errorf("Failed to select a suitable kernel to install in config (%s)", systemConfig.Name) + return + } + + logger.Log.Infof("Selected (%s) for the kernel", kernelPkg) + packagesToInstall = append([]string{kernelPkg}, packagesToInstall...) + } + + setupChrootDir := filepath.Join(buildDir, setupRoot) + + // Create Parition to Mountpoint map + mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, diffDiskBuild := installutils.CreateMountPointPartitionMap(partIDToDevPathMap, partIDToFsTypeMap, systemConfig) + if diffDiskBuild { + mountPointToOverlayMap, err = installutils.UpdatePartitionMapWithOverlays(partIDToDevPathMap, partIDToFsTypeMap, mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, systemConfig) + // Schedule unmount of overlays after the upper layers are unmounted. + defer installutils.OverlayUnmount(mountPointToOverlayMap) + if err != nil { + logger.Log.Error("Failed to create the partition map") + return + } + } + + if isOfflineInstall { + // Create setup chroot + additionalExtraMountPoints := []*safechroot.MountPoint{ + safechroot.NewMountPoint(cfg.Assets, assetsMountPoint, "", safechroot.BindMountPointFlags, ""), + safechroot.NewMountPoint(cfg.LocalRepo, localRepoMountPoint, "", safechroot.BindMountPointFlags, ""), + safechroot.NewMountPoint(filepath.Dir(cfg.RepoFile), repoFileMountPoint, "", safechroot.BindMountPointFlags, ""), + } + extraMountPoints = append(extraMountPoints, additionalExtraMountPoints...) + + setupChroot := safechroot.NewChroot(setupChrootDir, existingChrootDir) + err = setupChroot.Initialize(cfg.TdnfTar, extraDirectories, extraMountPoints) + if err != nil { + logger.Log.Error("Failed to create setup chroot") + return + } + defer setupChroot.Close(leaveChrootOnDisk) + + // Before entering the chroot, copy in any and all host files needed and + // fix up their paths to be in the tmp directory. + err = fixupExtraFilesIntoChroot(setupChroot, &systemConfig) + if err != nil { + logger.Log.Error("Failed to copy extra files into setup chroot") + return + } + + err = setupChroot.Run(func() error { + return buildImage(mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, partIDToDevPathMap, partIDToFsTypeMap, mountPointToOverlayMap, packagesToInstall, systemConfig, diskDevPath, isRootFS, encryptedRoot, readOnlyRoot, diffDiskBuild) + }) + if err != nil { + logger.Log.Error("Failed to build image") + return + } + + err = cleanupExtraFilesInChroot(setupChroot) + if err != nil { + logger.Log.Error("Failed to cleanup extra files in setup chroot") + return + } + + // Create any partition-based artifacts + err = installutils.ExtractPartitionArtifacts(setupChrootDir, outputDir, defaultDiskIndex, disks[defaultDiskIndex], systemConfig, partIDToDevPathMap, mountPointToOverlayMap) + if err != nil { + return + } + + // Copy disk artifact if necessary. + // Currently only supports one disk config + if !isRootFS { + if disks[defaultDiskIndex].Artifacts != nil { + input := filepath.Join(buildDir, defaultTempDiskName) + output := filepath.Join(outputDir, fmt.Sprintf("disk%d.raw", defaultDiskIndex)) + err = file.Copy(input, output) + if err != nil { + return + } + } + } + } else { + err = buildImage(mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, partIDToDevPathMap, partIDToFsTypeMap, mountPointToOverlayMap, packagesToInstall, systemConfig, diskDevPath, isRootFS, encryptedRoot, readOnlyRoot, diffDiskBuild) + if err != nil { + logger.Log.Error("Failed to build image") + return + } + } + + // Cleanup encrypted disks + if systemConfig.Encryption.Enable { + err = diskutils.CleanupEncryptedDisks(encryptedRoot, isOfflineInstall) + if err != nil { + logger.Log.Warn("Failed to cleanup encrypted disks") + return + } + } + + return +} + +func setupDiskEncryption(systemConfig *configuration.SystemConfig, encryptedRoot *diskutils.EncryptedRootDevice, keyFileDir string) (err error) { + if systemConfig.Encryption.Enable { + // Add a default keyfile for initramfs unlock + encryptedRoot.HostKeyFile, err = diskutils.AddDefaultKeyfile(keyFileDir, encryptedRoot.Device, systemConfig.Encryption) + if err != nil { + logger.Log.Warnf("Failed to add default keyfile: %v", err) + return + } + + // Copy the default keyfile into the image + if len(systemConfig.AdditionalFiles) == 0 { + systemConfig.AdditionalFiles = make(map[string]string) + } + + systemConfig.AdditionalFiles[encryptedRoot.HostKeyFile] = diskutils.DefaultKeyFilePath + logger.Log.Infof("Adding default key file to systemConfig additional files") + } + + return +} + +func setupRootFS(outputDir, installRoot string) (extraMountPoints []*safechroot.MountPoint, extraDirectories []string, err error) { + const rootFSDirName = "rootfs" + + rootFSOutDir := filepath.Join(outputDir, rootFSDirName) + + // Ensure there is not already a directory at rootFSOutDir + exists, err := file.DirExists(rootFSOutDir) + logger.PanicOnError(err, "Failed while checking if directory (%s) exists.", rootFSOutDir) + if exists { + err = fmt.Errorf("output rootfs directory (%s) already exists", rootFSOutDir) + return + } + + err = os.MkdirAll(rootFSOutDir, os.ModePerm) + if err != nil { + return + } + + // For a rootfs, bind-mount the output directory to the chroot directory being installed to + rootFSMountPoint := safechroot.NewMountPoint(rootFSOutDir, installRoot, "", safechroot.BindMountPointFlags, "") + extraMountPoints = []*safechroot.MountPoint{rootFSMountPoint} + extraDirectories = []string{installRoot} + + return +} + +func setupDisk(outputDir, diskName string, liveInstallFlag bool, diskConfig configuration.Disk, rootEncryption configuration.RootEncryption, readOnlyRootConfig configuration.ReadOnlyVerityRoot) (diskDevPath string, partIDToDevPathMap, partIDToFsTypeMap map[string]string, isLoopDevice bool, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice, err error) { + const ( + realDiskType = "path" + ) + if diskConfig.TargetDisk.Type == realDiskType { + if liveInstallFlag { + diskDevPath = diskConfig.TargetDisk.Value + partIDToDevPathMap, partIDToFsTypeMap, encryptedRoot, readOnlyRoot, err = setupRealDisk(diskDevPath, diskConfig, rootEncryption, readOnlyRootConfig) + } else { + err = fmt.Errorf("target Disk Type is set but --live-install option is not set. Please check your config or enable the --live-install option") + return + } + } else { + diskDevPath, partIDToDevPathMap, partIDToFsTypeMap, encryptedRoot, readOnlyRoot, err = setupLoopDeviceDisk(outputDir, diskName, diskConfig, rootEncryption, readOnlyRootConfig) + isLoopDevice = true + } + return +} + +func setupLoopDeviceDisk(outputDir, diskName string, diskConfig configuration.Disk, rootEncryption configuration.RootEncryption, readOnlyRootConfig configuration.ReadOnlyVerityRoot) (diskDevPath string, partIDToDevPathMap, partIDToFsTypeMap map[string]string, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice, err error) { + defer func() { + // Detach the loopback device on failure + if err != nil && diskDevPath != "" { + detachErr := diskutils.DetachLoopbackDevice(diskDevPath) + if detachErr != nil { + logger.Log.Errorf("Failed to detach loopback device on failed initialization. Error: %s", detachErr) + } + } + }() + + // Create Raw Disk File + rawDisk, err := diskutils.CreateEmptyDisk(outputDir, diskName, diskConfig) + if err != nil { + logger.Log.Errorf("Failed to create empty disk file in (%s)", outputDir) + return + } + + diskDevPath, err = diskutils.SetupLoopbackDevice(rawDisk) + if err != nil { + logger.Log.Errorf("Failed to mount raw disk (%s) as a loopback device", rawDisk) + return + } + + partIDToDevPathMap, partIDToFsTypeMap, encryptedRoot, readOnlyRoot, err = setupRealDisk(diskDevPath, diskConfig, rootEncryption, readOnlyRootConfig) + if err != nil { + logger.Log.Errorf("Failed to setup loopback disk partitions (%s)", rawDisk) + return + } + + return +} + +func setupRealDisk(diskDevPath string, diskConfig configuration.Disk, rootEncryption configuration.RootEncryption, readOnlyRootConfig configuration.ReadOnlyVerityRoot) (partIDToDevPathMap, partIDToFsTypeMap map[string]string, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice, err error) { + const ( + defaultBlockSize = diskutils.MiB + noMaxSize = 0 + ) + + // Set up partitions + partIDToDevPathMap, partIDToFsTypeMap, encryptedRoot, readOnlyRoot, err = diskutils.CreatePartitions(diskDevPath, diskConfig, rootEncryption, readOnlyRootConfig) + if err != nil { + logger.Log.Errorf("Failed to create partitions on disk (%s)", diskDevPath) + return + } + + // Apply firmware + err = diskutils.ApplyRawBinaries(diskDevPath, diskConfig) + if err != nil { + logger.Log.Errorf("Failed to add add raw binaries to disk (%s)", diskDevPath) + return + } + + return +} + +// fixupExtraFilesIntoChroot will copy extra files needed for the build +// into the chroot and alter the extra files in the config to point at their new paths. +func fixupExtraFilesIntoChroot(installChroot *safechroot.Chroot, config *configuration.SystemConfig) (err error) { + var filesToCopy []safechroot.FileToCopy + + for i, user := range config.Users { + for j, pubKey := range user.SSHPubKeyPaths { + newFilePath := filepath.Join(sshPubKeysTempDirectory, pubKey) + + fileToCopy := safechroot.FileToCopy{ + Src: pubKey, + Dest: newFilePath, + } + + config.Users[i].SSHPubKeyPaths[j] = newFilePath + filesToCopy = append(filesToCopy, fileToCopy) + } + } + + fixedUpAdditionalFiles := make(map[string]string) + for srcFile, dstFile := range config.AdditionalFiles { + newFilePath := filepath.Join(additionalFilesTempDirectory, srcFile) + + fileToCopy := safechroot.FileToCopy{ + Src: srcFile, + Dest: newFilePath, + } + + fixedUpAdditionalFiles[newFilePath] = dstFile + filesToCopy = append(filesToCopy, fileToCopy) + } + config.AdditionalFiles = fixedUpAdditionalFiles + + for i, script := range config.PostInstallScripts { + newFilePath := filepath.Join(postInstallScriptTempDirectory, script.Path) + + fileToCopy := safechroot.FileToCopy{ + Src: script.Path, + Dest: newFilePath, + } + + config.PostInstallScripts[i].Path = newFilePath + filesToCopy = append(filesToCopy, fileToCopy) + } + + err = installChroot.AddFiles(filesToCopy...) + return +} + +func cleanupExtraFiles() (err error) { + dirsToRemove := []string{additionalFilesTempDirectory, postInstallScriptTempDirectory, sshPubKeysTempDirectory} + + for _, dir := range dirsToRemove { + logger.Log.Infof("Cleaning up directory %s", dir) + err = os.RemoveAll(dir) + if err != nil { + logger.Log.Warnf("Failed to cleanup directory (%s). Error: %s", dir, err) + return + } + } + return +} + +func cleanupExtraFilesInChroot(chroot *safechroot.Chroot) (err error) { + logger.Log.Infof("Proceeding to cleanup extra files in chroot %s.", chroot.RootDir()) + err = chroot.Run(func() error { + return cleanupExtraFiles() + }) + return +} +func buildImage(mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, partIDToDevPathMap, partIDToFsTypeMap map[string]string, mountPointToOverlayMap map[string]*installutils.Overlay, packagesToInstall []string, systemConfig configuration.SystemConfig, diskDevPath string, isRootFS bool, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice, diffDiskBuild bool) (err error) { + const ( + installRoot = "/installroot" + verityWorkingDir = "verityworkingdir" + emptyWorkerTar = "" + rootDir = "/" + existingChrootDir = true + leaveChrootOnDisk = true + ) + + var installMap map[string]string + + // Only invoke CreateInstallRoot for a raw disk. This call will result in mount points being created from a raw disk + // into the install root. A rootfs will not have these. + if !isRootFS { + installMap, err = installutils.CreateInstallRoot(installRoot, mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, mountPointToOverlayMap) + if err != nil { + err = fmt.Errorf("failed to create install root: %s", err) + return + } + defer installutils.DestroyInstallRoot(installRoot, installMap, mountPointToOverlayMap) + } + + // Install any tools required for the setup root to function + setupChrootPackages := []string{} + toolingPackages := installutils.GetRequiredPackagesForInstall() + for _, toolingPackage := range toolingPackages { + setupChrootPackages = append(setupChrootPackages, toolingPackage.Name) + } + + logger.Log.Infof("HidepidDisabled is %v.", systemConfig.HidepidDisabled) + hidepidEnabled := !systemConfig.HidepidDisabled + + if systemConfig.ReadOnlyVerityRoot.Enable { + // We will need the veritysetup package (and its dependencies) to manage the verity disk, add them to our + // image setup environment (setuproot chroot or live installer). + verityPackages := []string{"device-mapper", "veritysetup"} + setupChrootPackages = append(setupChrootPackages, verityPackages...) + } + + for _, setupChrootPackage := range setupChrootPackages { + _, err = installutils.TdnfInstall(setupChrootPackage, rootDir) + if err != nil { + err = fmt.Errorf("failed to install required setup chroot package '%s': %w", setupChrootPackage, err) + return + } + } + + // Create new chroot for the new image + installChroot := safechroot.NewChroot(installRoot, existingChrootDir) + extraInstallMountPoints := []*safechroot.MountPoint{} + extraDirectories := []string{} + err = installChroot.Initialize(emptyWorkerTar, extraDirectories, extraInstallMountPoints) + if err != nil { + err = fmt.Errorf("failed to create install chroot: %s", err) + return + } + defer installChroot.Close(leaveChrootOnDisk) + + // Populate image contents + err = installutils.PopulateInstallRoot(installChroot, packagesToInstall, systemConfig, installMap, mountPointToFsTypeMap, mountPointToMountArgsMap, partIDToDevPathMap, partIDToFsTypeMap, isRootFS, encryptedRoot, diffDiskBuild, hidepidEnabled) + if err != nil { + err = fmt.Errorf("failed to populate image contents: %s", err) + return + } + + // Only configure the bootloader or read only partitions for actual disks, a rootfs does not need these + if !isRootFS { + err = configureDiskBootloader(systemConfig, installChroot, diskDevPath, installMap, encryptedRoot, readOnlyRoot) + if err != nil { + err = fmt.Errorf("failed to configure boot loader: %w", err) + return + } + + // Preconfigure SELinux labels now since all the changes to the filesystem should be done + if systemConfig.KernelCommandLine.SELinux != configuration.SELinuxOff { + err = installutils.SELinuxConfigure(systemConfig, installChroot, mountPointToFsTypeMap) + if err != nil { + err = fmt.Errorf("failed to configure selinux: %w", err) + return + } + } + + // Snapshot the root filesystem as a read-only verity disk and update the initramfs. + if systemConfig.ReadOnlyVerityRoot.Enable { + var initramfsPathList []string + err = readOnlyRoot.SwitchDeviceToReadOnly(mountPointMap["/"], mountPointToMountArgsMap["/"]) + if err != nil { + err = fmt.Errorf("failed to switch root to read-only: %w", err) + return + } + installutils.ReportAction("Hashing root for read-only with dm-verity, this may take a long time if error correction is enabled") + initramfsPathList, err = filepath.Glob(filepath.Join(installRoot, "/boot/initrd.img*")) + if err != nil || len(initramfsPathList) != 1 { + return fmt.Errorf("could not find single initramfs (%v): %w", initramfsPathList, err) + } + err = readOnlyRoot.AddRootVerityFilesToInitramfs(verityWorkingDir, initramfsPathList[0]) + if err != nil { + err = fmt.Errorf("failed to include read-only root files in initramfs: %w", err) + return + } + } + } + + return +} + +func configureDiskBootloader(systemConfig configuration.SystemConfig, installChroot *safechroot.Chroot, diskDevPath string, installMap map[string]string, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice) (err error) { + const rootMountPoint = "/" + const bootMountPoint = "/boot" + + var rootDevice string + + // Add bootloader. Prefer a separate boot partition if one exists. + bootDevice, ok := installMap[bootMountPoint] + bootPrefix := "" + if !ok { + bootDevice = installMap[rootMountPoint] + // If we do not have a separate boot partition we will need to add a prefix to all paths used in the configs. + bootPrefix = "/boot" + } + + if installMap[rootMountPoint] == installutils.NullDevice { + // In case of overlay device being mounted at root, no need to change the bootloader. + return + } + + // Grub only accepts UUID, not PARTUUID or PARTLABEL + bootUUID, err := installutils.GetUUID(bootDevice) + if err != nil { + err = fmt.Errorf("failed to get UUID: %s", err) + return + } + + bootType := systemConfig.BootType + err = installutils.InstallBootloader(installChroot, systemConfig.Encryption.Enable, bootType, bootUUID, bootPrefix, diskDevPath) + if err != nil { + err = fmt.Errorf("failed to install bootloader: %s", err) + return + } + + // Add grub config to image + rootPartitionSetting := systemConfig.GetRootPartitionSetting() + if rootPartitionSetting == nil { + err = fmt.Errorf("failed to find partition setting for root mountpoint") + return + } + rootMountIdentifier := rootPartitionSetting.MountIdentifier + if systemConfig.Encryption.Enable { + // Encrypted devices don't currently support identifiers + rootDevice = installMap[rootMountPoint] + } else if systemConfig.ReadOnlyVerityRoot.Enable { + var partIdentifier string + partIdentifier, err = installutils.FormatMountIdentifier(rootMountIdentifier, readOnlyRoot.BackingDevice) + if err != nil { + err = fmt.Errorf("failed to get partIdentifier: %s", err) + return + } + rootDevice = fmt.Sprintf("verityroot:%v", partIdentifier) + } else { + var partIdentifier string + partIdentifier, err = installutils.FormatMountIdentifier(rootMountIdentifier, installMap[rootMountPoint]) + if err != nil { + err = fmt.Errorf("failed to get partIdentifier: %s", err) + return + } + + rootDevice = partIdentifier + } + + // Grub will always use filesystem UUID, never PARTUUID or PARTLABEL + err = installutils.InstallGrubCfg(installChroot.RootDir(), rootDevice, bootUUID, bootPrefix, encryptedRoot, systemConfig.KernelCommandLine, readOnlyRoot) + if err != nil { + err = fmt.Errorf("failed to install main grub config file: %s", err) + return + } + + return +} diff --git a/toolkit/tools/internal/logger/hooks/writerhook/writerhook.go b/toolkit/tools/pkg/logger/hooks/writerhook/writerhook.go similarity index 100% rename from toolkit/tools/internal/logger/hooks/writerhook/writerhook.go rename to toolkit/tools/pkg/logger/hooks/writerhook/writerhook.go diff --git a/toolkit/tools/internal/logger/log.go b/toolkit/tools/pkg/logger/log.go similarity index 98% rename from toolkit/tools/internal/logger/log.go rename to toolkit/tools/pkg/logger/log.go index 6e8fc49239e..b433bcf3ca6 100644 --- a/toolkit/tools/internal/logger/log.go +++ b/toolkit/tools/pkg/logger/log.go @@ -15,7 +15,7 @@ import ( log "github.com/sirupsen/logrus" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger/hooks/writerhook" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger/hooks/writerhook" ) var ( diff --git a/toolkit/tools/pkg/pkgworker/config.go b/toolkit/tools/pkg/pkgworker/config.go new file mode 100644 index 00000000000..0b0369b9f88 --- /dev/null +++ b/toolkit/tools/pkg/pkgworker/config.go @@ -0,0 +1,18 @@ +package pkgworker + +type Config struct { + SrpmFile string + WorkDir string + WorkerTar string + RepoFile string + RpmsDirPath string + SrpmsDirPath string + CacheDir string + NoCleanup bool + DistTag string + DistroReleaseVersion string + DistroBuildNumber string + RpmmacrosFile string + RunCheck bool + PackagesToInstall []string +} diff --git a/toolkit/tools/pkg/pkgworker/pkgworker.go b/toolkit/tools/pkg/pkgworker/pkgworker.go new file mode 100644 index 00000000000..dda1754d7b7 --- /dev/null +++ b/toolkit/tools/pkg/pkgworker/pkgworker.go @@ -0,0 +1,354 @@ +package pkgworker + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repomanager/rpmrepomanager" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/rpm" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/sliceutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/safechroot" +) + +const ( + chrootRpmBuildRoot = "/usr/src/mariner" + chrootLocalRpmsDir = "/localrpms" + chrootLocalRpmsCacheDir = "/upstream-cached-rpms" +) + +var ( + brPackageNameRegex = regexp.MustCompile(`^[^\s]+`) + equalToRegex = regexp.MustCompile(` '?='? `) + greaterThanOrEqualRegex = regexp.MustCompile(` '?>='? [^ ]*`) + installedPackageNameRegex = regexp.MustCompile(`^(.+)(-[^-]+-[^-]+)`) + lessThanOrEqualToRegex = regexp.MustCompile(` '?<='? `) + packageUnavailableRegex = regexp.MustCompile(`^No package \\x1b\[1m\\x1b\[30m(.+) \\x1b\[0mavailable`) +) + +func (cfg *Config) BuildSRPMInChrootAndCopyToOut() ([]string, error) { + rpmsDirAbsPath, err := filepath.Abs(cfg.RpmsDirPath) + // Todo .. do not panic here .. but bubble it up accordingly + logger.PanicOnError(err, "Unable to find absolute path for RPMs directory '%s'", cfg.RpmsDirPath) + + srpmsDirAbsPath, err := filepath.Abs(cfg.SrpmsDirPath) + logger.PanicOnError(err, "Unable to find absolute path for SRPMs directory '%s'", cfg.SrpmsDirPath) + + srpmName := strings.TrimSuffix(filepath.Base(cfg.SrpmFile), ".src.rpm") + chrootDir := filepath.Join(cfg.WorkDir, srpmName) + + defines := rpm.DefaultDefines(cfg.RunCheck) + defines[rpm.DistTagDefine] = cfg.DistTag + defines[rpm.DistroReleaseVersionDefine] = cfg.DistroReleaseVersion + defines[rpm.DistroBuildNumberDefine] = cfg.DistroBuildNumber + + builtRPMs, err := buildSRPMInChroot(chrootDir, rpmsDirAbsPath, cfg.WorkerTar, cfg.SrpmFile, cfg.RepoFile, cfg.RpmmacrosFile, cfg.CacheDir, defines, cfg.NoCleanup, cfg.RunCheck, cfg.PackagesToInstall) + err = copySRPMToOutput(cfg.SrpmFile, srpmsDirAbsPath) + logger.PanicOnError(err, "Failed to copy SRPM '%s' to output directory '%s'.", cfg.SrpmFile, rpmsDirAbsPath) + + return builtRPMs, err +} + +func copySRPMToOutput(srpmFilePath, srpmOutputDirPath string) (err error) { + const srpmsDirName = "SRPMS" + + srpmFileName := filepath.Base(srpmFilePath) + srpmOutputFilePath := filepath.Join(srpmOutputDirPath, srpmFileName) + + err = file.Copy(srpmFilePath, srpmOutputFilePath) + + return +} + +func buildSRPMInChroot(chrootDir, rpmDirPath, workerTar, srpmFile, repoFile, rpmmacrosFile, cacheDir string, defines map[string]string, noCleanup, runCheck bool, packagesToInstall []string) (builtRPMs []string, err error) { + const ( + buildHeartbeatTimeout = 30 * time.Minute + + existingChrootDir = false + squashErrors = false + + overlaySource = "" + overlayWorkDir = "/overlaywork" + rpmDirName = "RPMS" + ) + + srpmBaseName := filepath.Base(srpmFile) + + quit := make(chan bool) + go func() { + logger.Log.Infof("Building (%s).", srpmBaseName) + + for { + select { + case <-quit: + if err == nil { + logger.Log.Infof("Built (%s) -> %v.", srpmBaseName, builtRPMs) + } + return + case <-time.After(buildHeartbeatTimeout): + logger.Log.Infof("Heartbeat: still building (%s).", srpmBaseName) + } + } + }() + defer func() { + quit <- true + }() + + // Create the chroot used to build the SRPM + chroot := safechroot.NewChroot(chrootDir, existingChrootDir) + + overlayMount, overlayExtraDirs := safechroot.NewOverlayMountPoint(chroot.RootDir(), overlaySource, chrootLocalRpmsDir, rpmDirPath, chrootLocalRpmsDir, overlayWorkDir) + rpmCacheMount := safechroot.NewMountPoint(cacheDir, chrootLocalRpmsCacheDir, "", safechroot.BindMountPointFlags, "") + mountPoints := []*safechroot.MountPoint{overlayMount, rpmCacheMount} + extraDirs := append(overlayExtraDirs, chrootLocalRpmsCacheDir) + + err = chroot.Initialize(workerTar, extraDirs, mountPoints) + if err != nil { + return + } + defer chroot.Close(noCleanup) + + // Place extra files that will be needed to build into the chroot + srpmFileInChroot, err := copyFilesIntoChroot(chroot, srpmFile, repoFile, rpmmacrosFile, runCheck) + if err != nil { + return + } + + err = chroot.Run(func() (err error) { + return buildRPMFromSRPMInChroot(srpmFileInChroot, runCheck, defines, packagesToInstall) + }) + if err != nil { + return + } + + rpmBuildOutputDir := filepath.Join(chroot.RootDir(), chrootRpmBuildRoot, rpmDirName) + builtRPMs, err = moveBuiltRPMs(rpmBuildOutputDir, rpmDirPath) + + return +} + +func buildRPMFromSRPMInChroot(srpmFile string, runCheck bool, defines map[string]string, packagesToInstall []string) (err error) { + // Convert /localrpms into a repository that a package manager can use. + err = rpmrepomanager.CreateRepo(chrootLocalRpmsDir) + if err != nil { + return + } + + // install any additional packages, such as build dependencies. + err = tdnfInstall(packagesToInstall) + if err != nil { + return + } + + // Remove all libarchive files on the system before issuing a build. + // If the build environment has libtool archive files present, gnu configure + // could detect it and create more libtool archive files which can cause + // build failures. + err = removeLibArchivesFromSystem() + if err != nil { + return + } + + // Build the SRPM + if runCheck { + err = rpm.BuildRPMFromSRPM(srpmFile, defines) + } else { + err = rpm.BuildRPMFromSRPM(srpmFile, defines, "--nocheck") + } + + return +} + +func moveBuiltRPMs(rpmOutDir, dstDir string) (builtRPMs []string, err error) { + const rpmExtension = ".rpm" + err = filepath.Walk(rpmOutDir, func(path string, info os.FileInfo, fileErr error) (err error) { + if fileErr != nil { + return fileErr + } + + // Only copy regular files (not unix sockets, directories, links, ...) + if !info.Mode().IsRegular() { + return + } + + if !strings.HasSuffix(path, rpmExtension) { + return + } + + // Get the relative path of the RPM, this will include the architecture directory it lives in. + // Then join the relative path to the destination directory, this will ensure the RPM gets placed + // in its correct architecture directory. + relPath, err := filepath.Rel(rpmOutDir, path) + if err != nil { + return + } + + dstFile := filepath.Join(dstDir, relPath) + err = file.Move(path, dstFile) + if err != nil { + return + } + + builtRPMs = append(builtRPMs, dstFile) + return + }) + + return +} + +func tdnfInstall(packages []string) (err error) { + const ( + alreadyInstalledPostfix = "is already installed" + noMatchingPackagesErr = "Error(1011) : No matching packages" + packageMatchGroup = 1 + ) + + if len(packages) == 0 { + return + } + + // TDNF supports requesting versioned packages in the form of {name}-{version}.{dist}.{arch}. + // The packages to install list may contain file paths to rpm files so those will need to be filtered: + // - Strip any .rpm from packages as TDNF does not support requesting a package with the extension. + // - Strip any filepath from packages. + for i := range packages { + packages[i] = filepath.Base(strings.TrimSuffix(packages[i], ".rpm")) + } + + installArgs := []string{"install", "-y"} + installArgs = append(installArgs, packages...) + stdout, stderr, err := shell.Execute("tdnf", installArgs...) + foundNoMatchingPackages := false + + if err != nil { + logger.Log.Warnf("Failed to install build requirements. stderr: %s\nstdout: %s", stderr, stdout) + // TDNF will output an error if all packages are already installed. + // Ignore it iff there is no other error present in stderr. + splitStderr := strings.Split(stderr, "\n") + for _, line := range splitStderr { + trimmedLine := strings.TrimSpace(line) + if trimmedLine == "" { + continue + } + + if strings.Contains(trimmedLine, noMatchingPackagesErr) { + foundNoMatchingPackages = true + } + + if !strings.HasSuffix(trimmedLine, alreadyInstalledPostfix) && trimmedLine != noMatchingPackagesErr { + err = fmt.Errorf(trimmedLine) + return + } + } + err = nil + } + + // TDNF will ignore unavailable packages that have been requested to be installed without reporting an error code. + // Search the stdout of TDNF for such a failure and warn the user. + // This may happen if a SPEC requires the the path to a tool (e.g. /bin/cp), so mark it as a warning for now. + var failedToInstall []string + splitStdout := strings.Split(stdout, "\n") + for _, line := range splitStdout { + trimmedLine := strings.TrimSpace(line) + matches := packageUnavailableRegex.FindStringSubmatch(trimmedLine) + if len(matches) == 0 { + continue + } + + failedToInstall = append(failedToInstall, matches[packageMatchGroup]) + } + + // TDNF will output the error "Error(1011) : No matching packages" if all packages could not be found. + // In this case it will not print any of the individual packages that failed. + if foundNoMatchingPackages && len(failedToInstall) == 0 { + failedToInstall = packages + } + + if len(failedToInstall) != 0 { + err = fmt.Errorf("unable to install the following packages: %v", failedToInstall) + } + + return +} + +// removeLibArchivesFromSystem removes all libarchive files on the system. If +// the build environment has libtool archive files present, gnu configure could +// detect it and create more libtool archive files which can cause build failures. +func removeLibArchivesFromSystem() (err error) { + dirsToExclude := []string{"/proc", "/dev", "/sys", "/run"} + + err = filepath.Walk("/", func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip directories that are meant for device files and kernel virtual filesystems. + // These will not contain .la files and are mounted into the safechroot from the host. + if info.IsDir() && sliceutils.Contains(dirsToExclude, path, sliceutils.StringMatch) { + return filepath.SkipDir + } + + if strings.HasSuffix(info.Name(), ".la") { + return os.Remove(path) + } + + return nil + }) + + if err != nil { + logger.Log.Warnf("Unable to remove lib archive file: %s", err) + } + + return +} + +// copyFilesIntoChroot copies several required build specific files into the chroot. +func copyFilesIntoChroot(chroot *safechroot.Chroot, srpmFile, repoFile, rpmmacrosFile string, runCheck bool) (srpmFileInChroot string, err error) { + const ( + chrootRepoDestDir = "/etc/yum.repos.d" + chrootSrpmDestDir = "/root/SRPMS" + resolvFilePath = "/etc/resolv.conf" + rpmmacrosDest = "/usr/lib/rpm/macros.d/macros.override" + ) + + repoFileInChroot := filepath.Join(chrootRepoDestDir, filepath.Base(repoFile)) + srpmFileInChroot = filepath.Join(chrootSrpmDestDir, filepath.Base(srpmFile)) + + filesToCopy := []safechroot.FileToCopy{ + safechroot.FileToCopy{ + Src: repoFile, + Dest: repoFileInChroot, + }, + safechroot.FileToCopy{ + Src: srpmFile, + Dest: srpmFileInChroot, + }, + } + + if rpmmacrosFile != "" { + rpmmacrosCopy := safechroot.FileToCopy{ + Src: rpmmacrosFile, + Dest: rpmmacrosDest, + } + filesToCopy = append(filesToCopy, rpmmacrosCopy) + } + + if runCheck { + logger.Log.Debug("Enabling network access because we're running package tests.") + + resolvFileCopy := safechroot.FileToCopy{ + Src: resolvFilePath, + Dest: resolvFilePath, + } + filesToCopy = append(filesToCopy, resolvFileCopy) + } + + err = chroot.AddFiles(filesToCopy...) + return +} diff --git a/toolkit/tools/internal/safechroot/safechroot.go b/toolkit/tools/pkg/safechroot/safechroot.go similarity index 99% rename from toolkit/tools/internal/safechroot/safechroot.go rename to toolkit/tools/pkg/safechroot/safechroot.go index c1bbb523711..55f7bf34fad 100644 --- a/toolkit/tools/internal/safechroot/safechroot.go +++ b/toolkit/tools/pkg/safechroot/safechroot.go @@ -14,10 +14,10 @@ import ( "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/buildpipeline" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/retry" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/systemdependency" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" diff --git a/toolkit/tools/internal/safechroot/safechroot_test.go b/toolkit/tools/pkg/safechroot/safechroot_test.go similarity index 99% rename from toolkit/tools/internal/safechroot/safechroot_test.go rename to toolkit/tools/pkg/safechroot/safechroot_test.go index b6c36c544c3..ec26de7c535 100644 --- a/toolkit/tools/internal/safechroot/safechroot_test.go +++ b/toolkit/tools/pkg/safechroot/safechroot_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/buildpipeline" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "github.com/stretchr/testify/assert" ) diff --git a/toolkit/tools/internal/safechroot/testdata/testchroot.tar.gz b/toolkit/tools/pkg/safechroot/testdata/testchroot.tar.gz similarity index 100% rename from toolkit/tools/internal/safechroot/testdata/testchroot.tar.gz rename to toolkit/tools/pkg/safechroot/testdata/testchroot.tar.gz diff --git a/toolkit/tools/internal/safechroot/testdata/testmount/testfile.txt b/toolkit/tools/pkg/safechroot/testdata/testmount/testfile.txt similarity index 100% rename from toolkit/tools/internal/safechroot/testdata/testmount/testfile.txt rename to toolkit/tools/pkg/safechroot/testdata/testmount/testfile.txt diff --git a/toolkit/tools/scheduler/buildagents/chrootagent.go b/toolkit/tools/pkg/scheduler/buildagents/chrootagent.go similarity index 98% rename from toolkit/tools/scheduler/buildagents/chrootagent.go rename to toolkit/tools/pkg/scheduler/buildagents/chrootagent.go index 79f96806b37..d4aae4b7e96 100644 --- a/toolkit/tools/scheduler/buildagents/chrootagent.go +++ b/toolkit/tools/pkg/scheduler/buildagents/chrootagent.go @@ -8,8 +8,8 @@ import ( "path/filepath" "strings" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // ChrootAgentFlag is the build-agent option for ChrootAgent. diff --git a/toolkit/tools/scheduler/buildagents/definition.go b/toolkit/tools/pkg/scheduler/buildagents/definition.go similarity index 100% rename from toolkit/tools/scheduler/buildagents/definition.go rename to toolkit/tools/pkg/scheduler/buildagents/definition.go diff --git a/toolkit/tools/scheduler/buildagents/testagent.go b/toolkit/tools/pkg/scheduler/buildagents/testagent.go similarity index 100% rename from toolkit/tools/scheduler/buildagents/testagent.go rename to toolkit/tools/pkg/scheduler/buildagents/testagent.go diff --git a/toolkit/tools/pkg/scheduler/config.go b/toolkit/tools/pkg/scheduler/config.go new file mode 100644 index 00000000000..e4783274f6f --- /dev/null +++ b/toolkit/tools/pkg/scheduler/config.go @@ -0,0 +1,36 @@ +package scheduler + +type Config struct { + InputGraphFile string + OutputGraphFile string + OutputCSVFile string + WorkDir string + WorkerTar string + RepoFile string + RpmDir string + SrpmDir string + CacheDir string + BuildLogsDir string + ImageConfig string + BaseDirPath string + DistTag string + DistroReleaseVersion string + DistroBuildNumber string + RpmmacrosFile string + BuildAttempts int + RunCheck bool + NoCleanup bool + NoCache bool + StopOnFailure bool + ReservedFileListFile string + DeltaBuild bool + ValidBuildAgentFlags []string + BuildAgent string + BuildAgentProgram string + Workers int + IgnoredPackages string + PkgsToBuild string + PkgsToRebuild string + LogFile string + LogLevel string +} diff --git a/toolkit/tools/pkg/scheduler/scheduler.go b/toolkit/tools/pkg/scheduler/scheduler.go new file mode 100644 index 00000000000..62e4d49227c --- /dev/null +++ b/toolkit/tools/pkg/scheduler/scheduler.go @@ -0,0 +1,397 @@ +package scheduler + +import ( + "fmt" + "os" + "os/signal" + "runtime" + "sync" + "time" + + "github.com/juliangruber/go-intersect" + "golang.org/x/sys/unix" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/scheduler/buildagents" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/scheduler/schedulerutils" +) + +// schedulerChannels represents the communication channels used by a build agent. +// Unlike BuildChannels, schedulerChannels holds bidirectional channels that +// only the top-level scheduler should have. BuildChannels contains directional channels. +type schedulerChannels struct { + Requests chan *schedulerutils.BuildRequest + PriorityRequests chan *schedulerutils.BuildRequest + Results chan *schedulerutils.BuildResult + Cancel chan struct{} + Done chan struct{} +} + +func (cfg *Config) ScheduleBuild() error { + if cfg.Workers <= 0 { + cfg.Workers = runtime.NumCPU() + logger.Log.Debugf("No worker count supplied, discovered %d logical CPUs.", cfg.Workers) + } + + if cfg.BuildAttempts <= 0 { + logger.Log.Fatalf("Value in --build-attempts must be greater than zero. Found %d", cfg.BuildAttempts) + } + + ignoredPackages := exe.ParseListArgument(cfg.IgnoredPackages) + reservedFileListFile := cfg.ReservedFileListFile + + // Generate the list of packages that need to be built. + // If none are requested then all packages will be built. + packagesNamesToBuild := exe.ParseListArgument(cfg.PkgsToBuild) + packagesNamesToRebuild := exe.ParseListArgument(cfg.PkgsToRebuild) + + ignoredAndRebuiltPackages := intersect.Hash(ignoredPackages, packagesNamesToRebuild) + if len(ignoredAndRebuiltPackages) != 0 { + logger.Log.Fatalf("Can't ignore and force a rebuild of a package at the same time. Abusing packages: %v", ignoredAndRebuiltPackages) + } + + packageVersToBuild, err := schedulerutils.CalculatePackagesToBuild(packagesNamesToBuild, packagesNamesToRebuild, cfg.InputGraphFile, cfg.ImageConfig, cfg.BaseDirPath) + if err != nil { + logger.Log.Fatalf("Unable to generate package build list, error: %s", err) + } + + var reservedFiles []string + if len(reservedFileListFile) > 0 { + reservedFiles, err = schedulerutils.ReadReservedFilesList(reservedFileListFile) + if err != nil { + logger.Log.Fatalf("unable to read reserved file list %s: %s", reservedFileListFile, err) + } + } + + // Setup a build agent to handle build requests from the scheduler. + buildAgentConfig := &buildagents.BuildAgentConfig{ + Program: cfg.BuildAgentProgram, + CacheDir: cfg.CacheDir, + RepoFile: cfg.RepoFile, + RpmDir: cfg.RpmDir, + SrpmDir: cfg.SrpmDir, + WorkDir: cfg.WorkDir, + WorkerTar: cfg.WorkerTar, + DistTag: cfg.DistTag, + DistroReleaseVersion: cfg.DistroReleaseVersion, + DistroBuildNumber: cfg.DistroBuildNumber, + RpmmacrosFile: cfg.RpmmacrosFile, + NoCleanup: cfg.NoCleanup, + RunCheck: cfg.RunCheck, + LogDir: cfg.BuildLogsDir, + LogLevel: cfg.LogLevel, + } + + agent, err := buildagents.BuildAgentFactory(cfg.BuildAgent) + if err != nil { + logger.Log.Fatalf("Unable to select build agent, error: %s", err) + } + + err = agent.Initialize(buildAgentConfig) + if err != nil { + logger.Log.Fatalf("Unable to initialize build agent, error: %s", err) + } + + // Setup cleanup routines to ensure no builds are left running when scheduler is exiting. + // Ensure no outstanding agents are running on graceful exit + defer cancelOutstandingBuilds(agent) + // On a SIGINT or SIGTERM stop all agents. + signals := make(chan os.Signal, 1) + signal.Notify(signals, unix.SIGINT, unix.SIGTERM) + go cancelBuildsOnSignal(signals, agent) + + err = cfg.buildGraph(cfg.InputGraphFile, cfg.OutputGraphFile, agent, cfg.Workers, cfg.BuildAttempts, cfg.StopOnFailure, !cfg.NoCache, packageVersToBuild, packagesNamesToRebuild, ignoredPackages, reservedFiles, cfg.DeltaBuild) + return err +} + +// cancelOutstandingBuilds stops any builds that are currently running. +func cancelOutstandingBuilds(agent buildagents.BuildAgent) { + err := agent.Close() + if err != nil { + logger.Log.Errorf("Unable to close build agent, error: %s", err) + } + + // Issue a SIGINT to all children processes to allow them to gracefully exit. + shell.PermanentlyStopAllProcesses(unix.SIGINT) +} + +// cancelBuildsOnSignal will stop any builds running on SIGINT/SIGTERM. +func cancelBuildsOnSignal(signals chan os.Signal, agent buildagents.BuildAgent) { + sig := <-signals + logger.Log.Error(sig) + + cancelOutstandingBuilds(agent) + os.Exit(1) +} + +// buildGraph builds all packages in the dependency graph requested. +// It will save the resulting graph to outputFile. +func (cfg *Config) buildGraph(inputFile, outputFile string, agent buildagents.BuildAgent, workers, buildAttempts int, stopOnFailure, canUseCache bool, packagesToBuild []*pkgjson.PackageVer, packagesNamesToRebuild, ignoredPackages, reservedFiles []string, deltaBuild bool) (err error) { + // graphMutex guards pkgGraph from concurrent reads and writes during build. + var graphMutex sync.RWMutex + + isGraphOptimized, pkgGraph, goalNode, err := schedulerutils.InitializeGraph(inputFile, packagesToBuild, deltaBuild) + if err != nil { + return + } + + // Setup and start the worker pool and scheduler routine. + numberOfNodes := pkgGraph.Nodes().Len() + + channels := startWorkerPool(agent, workers, buildAttempts, numberOfNodes, &graphMutex, ignoredPackages) + logger.Log.Infof("Building %d nodes with %d workers", numberOfNodes, workers) + + // After this call pkgGraph will be given to multiple routines and accessing it requires acquiring the mutex. + builtGraph, err := cfg.buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache, packagesNamesToRebuild, pkgGraph, &graphMutex, goalNode, channels, reservedFiles, deltaBuild) + + if builtGraph != nil { + graphMutex.RLock() + defer graphMutex.RUnlock() + + saveErr := pkggraph.WriteDOTGraphFile(builtGraph, outputFile) + if saveErr != nil { + logger.Log.Errorf("Failed to save built graph, error: %s", saveErr) + } + } + + return +} + +// startWorkerPool starts the worker pool and returns the communication channels between the workers and the scheduler. +// channelBufferSize controls how many entries in the channels can be buffered before blocking writes to them. +func startWorkerPool(agent buildagents.BuildAgent, workers, buildAttempts, channelBufferSize int, graphMutex *sync.RWMutex, ignoredPackages []string) (channels *schedulerChannels) { + channels = &schedulerChannels{ + Requests: make(chan *schedulerutils.BuildRequest, channelBufferSize), + PriorityRequests: make(chan *schedulerutils.BuildRequest, channelBufferSize), + Results: make(chan *schedulerutils.BuildResult, channelBufferSize), + Cancel: make(chan struct{}), + Done: make(chan struct{}), + } + + // Downcast the bidirectional scheduler channels into directional channels for the build workers. + directionalChannels := &schedulerutils.BuildChannels{ + Requests: channels.Requests, + PriorityRequests: channels.PriorityRequests, + Results: channels.Results, + Cancel: channels.Cancel, + Done: channels.Done, + } + + // Start the workers now so they begin working as soon as a new job is queued. + for i := 0; i < workers; i++ { + logger.Log.Debugf("Starting worker #%d", i) + go schedulerutils.BuildNodeWorker(directionalChannels, agent, graphMutex, buildAttempts, ignoredPackages) + } + + return +} + +// buildAllNodes will build all nodes in a given dependency graph. +// This routine only contains control flow logic for build scheduling. +// It iteratively: +// - Calculates any unblocked nodes. +// - Submits these nodes to the worker pool to be processed. +// - Grabs a single build result from the worker pool. +// - Attempts to satisfy any unresolved dynamic dependencies with new implicit provides from the build result. +// - Attempts to subgraph the graph to only contain the requested packages if possible. +// - Repeat. +func (cfg *Config) buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache bool, packagesNamesToRebuild []string, pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, goalNode *pkggraph.PkgNode, channels *schedulerChannels, reservedFiles []string, deltaBuild bool) (builtGraph *pkggraph.PkgGraph, err error) { + var ( + // stopBuilding tracks if the build has entered a failed state and this routine should stop as soon as possible. + stopBuilding bool + // useCachedImplicit tracks if cached implicit provides can be used to satisfy unresolved dynamic dependencies. + // Local packages are preferred over cached remotes ones to satisfy these unresolved dependencies, however + // the scheduler does not know what packages provide which implicit provides until the packages have been built. + // Therefore the scheduler will attempt to build all possible packages without consuming any cached dynamic dependencies first. + useCachedImplicit bool + ) + + // Start the build at the leaf nodes. + // The build will bubble up through the graph as it processes nodes. + buildState := schedulerutils.NewGraphBuildState(reservedFiles) + nodesToBuild := schedulerutils.LeafNodes(pkgGraph, graphMutex, goalNode, buildState, useCachedImplicit) + + for { + logger.Log.Debugf("Found %d unblocked nodes", len(nodesToBuild)) + + // Each node that is ready to build must be converted into a build request and submitted to the worker pool. + newRequests := schedulerutils.ConvertNodesToRequests(pkgGraph, graphMutex, nodesToBuild, packagesNamesToRebuild, buildState, canUseCache, deltaBuild) + for _, req := range newRequests { + buildState.RecordBuildRequest(req) + // Decide which priority the build should be. Generally we want to get any remote or prebuilt nodes out of the + // way as quickly as possible since they may help us optimize the graph early. + // Meta nodes may also be blocking something we want to examine and give higher priority (priority inheritance from + // the hypothetical high priority node hidden further into the tree) + switch req.Node.Type { + case pkggraph.TypePreBuilt: + channels.PriorityRequests <- req + + // For now all build nodes are of equal priority + case pkggraph.TypeGoal: + fallthrough + case pkggraph.TypePureMeta: + fallthrough + case pkggraph.TypeRun: + fallthrough + case pkggraph.TypeRemote: + fallthrough + case pkggraph.TypeBuild: + fallthrough + default: + channels.Requests <- req + } + } + nodesToBuild = nil + + // If there are no active builds running try enabling cached packages for unresolved dynamic dependencies to unblocked more nodes. + // Otherwise there is nothing left that can be built. + if len(buildState.ActiveBuilds()) == 0 { + if useCachedImplicit { + err = fmt.Errorf("could not build all packages") + break + } else { + logger.Log.Warn("Enabling cached packages to satisfy unresolved dynamic dependencies.") + useCachedImplicit = true + nodesToBuild = schedulerutils.LeafNodes(pkgGraph, graphMutex, goalNode, buildState, useCachedImplicit) + continue + } + } + + // Process the the next build result + res := <-channels.Results + schedulerutils.PrintBuildResult(res) + buildState.RecordBuildResult(res) + + if !stopBuilding { + if res.Err == nil { + // If the graph has already been optimized and is now solvable without any additional information + // then skip processing any new implicit provides. + if !isGraphOptimized { + var ( + didOptimize bool + newGraph *pkggraph.PkgGraph + newGoalNode *pkggraph.PkgNode + ) + didOptimize, newGraph, newGoalNode, err = updateGraphWithImplicitProvides(res, pkgGraph, graphMutex, useCachedImplicit) + if err != nil { + // Failures to manipulate the graph are fatal. + // There is no guarantee the graph is still a directed acyclic graph and is solvable. + stopBuilding = true + stopBuild(channels, buildState) + } else if didOptimize { + isGraphOptimized = true + // Replace the graph and goal node pointers. + // Any outstanding builds of nodes that are no longer in the graph will gracefully handle this. + // When querying their edges, the graph library will return an empty iterator (graph.Empty). + pkgGraph = newGraph + goalNode = newGoalNode + } + } + + nodesToBuild = schedulerutils.FindUnblockedNodesFromResult(res, pkgGraph, graphMutex, buildState) + } else if stopOnFailure { + stopBuilding = true + err = res.Err + stopBuild(channels, buildState) + } + } + + // If the goal node is available, mark the build as stopping. + // There may still be outstanding builds if the graph was recently subgraphed + // due to an unresolved implicit provide being satisfied and nodes that are no + // longer in the graph are building. + if buildState.IsNodeAvailable(goalNode) { + logger.Log.Infof("All packages built") + stopBuilding = true + } + + activeSRPMs := buildState.ActiveSRPMs() + activeSRPMsCount := len(activeSRPMs) + if stopBuilding { + if activeSRPMsCount == 0 { + break + } + } + + if res.Node.Type == pkggraph.TypeBuild { + logger.Log.Infof("%d currently active build(s): %v.", activeSRPMsCount, activeSRPMs) + } + } + + // Let the workers know they are done + doneBuild(channels, buildState) + // Give the workers time to finish so they don't mess up the summary we want to print. + // Some nodes may still be busy with long running builds we don't care about anymore, so we don't + // want to actually block here. + time.Sleep(time.Second) + + builtGraph = pkgGraph + schedulerutils.PrintBuildSummary(builtGraph, graphMutex, buildState) + schedulerutils.RecordBuildSummary(builtGraph, graphMutex, buildState, cfg.OutputCSVFile) + + return +} + +// updateGraphWithImplicitProvides will update the graph with new implicit provides if available. +// It will also attempt to subgraph the graph if it becomes solvable with the new implicit provides. +func updateGraphWithImplicitProvides(res *schedulerutils.BuildResult, pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, useCachedImplicit bool) (didOptimize bool, newGraph *pkggraph.PkgGraph, newGoalNode *pkggraph.PkgNode, err error) { + // acquire a writer lock since this routine will collapse nodes + graphMutex.Lock() + defer graphMutex.Unlock() + + didInjectAny, err := schedulerutils.InjectMissingImplicitProvides(res, pkgGraph, useCachedImplicit) + if err != nil { + logger.Log.Errorf("Failed to add implicit provides for (%s). Error: %s", res.Node.FriendlyName(), err) + } else if didInjectAny { + // Failure to optimize the graph is non fatal as there may simply be unresolved dynamic dependencies + var subgraphErr error + newGraph, newGoalNode, subgraphErr = schedulerutils.OptimizeGraph(pkgGraph, useCachedImplicit) + if subgraphErr == nil { + logger.Log.Infof("Created solvable subgraph with new implicit provide information") + didOptimize = true + } + } + + return +} + +func drainChannels(channels *schedulerChannels, buildState *schedulerutils.GraphBuildState) { + // For any workers that are current parked with no buffered requests, close the + // requests channel to wake up any build workers waiting on a request to be buffered. + // Upon being woken up by a closed requests channel, the build worker will stop. + close(channels.Requests) + close(channels.PriorityRequests) + + // Drain the request buffers to sync the build state with the new number of outstanding builds. + for req := range channels.PriorityRequests { + buildState.RemoveBuildRequest(req) + } + for req := range channels.Requests { + buildState.RemoveBuildRequest(req) + } +} + +func doneBuild(channels *schedulerChannels, buildState *schedulerutils.GraphBuildState) { + // Close the done channel. The build workers will finish processing any work, then return + // upon seeing this channel is closed. + close(channels.Done) + + drainChannels(channels, buildState) +} + +// stopBuild will stop all future builds from being scheduled by sending a cancellation signal +// to the worker pool and draining any outstanding build requests. +func stopBuild(channels *schedulerChannels, buildState *schedulerutils.GraphBuildState) { + logger.Log.Error("Stopping build") + + // Close the cancel channel to prevent and buffered requests from being built. + // Upon seeing the cancel channel is closed, the build worker will stop instead + // of processing a new request. + close(channels.Cancel) + + drainChannels(channels, buildState) +} diff --git a/toolkit/tools/scheduler/schedulerutils/buildlist.go b/toolkit/tools/pkg/scheduler/schedulerutils/buildlist.go similarity index 89% rename from toolkit/tools/scheduler/schedulerutils/buildlist.go rename to toolkit/tools/pkg/scheduler/schedulerutils/buildlist.go index e5fcc2293e7..b54bfb1d5ac 100644 --- a/toolkit/tools/scheduler/schedulerutils/buildlist.go +++ b/toolkit/tools/pkg/scheduler/schedulerutils/buildlist.go @@ -7,19 +7,19 @@ import ( "bufio" "os" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/installutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/configuration" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/imagegen/installutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // CalculatePackagesToBuild generates a comprehensive list of all PackageVers that the scheduler should attempt to build. // The build list is a superset of: -// - packagesNamesToBuild, -// - packagesNamesToRebuild, -// - local packages listed in the image config, and -// - kernels in the image config (if built locally). +// - packagesNamesToBuild, +// - packagesNamesToRebuild, +// - local packages listed in the image config, and +// - kernels in the image config (if built locally). func CalculatePackagesToBuild(packagesNamesToBuild, packagesNamesToRebuild []string, inputGraphFile, imageConfig, baseDirPath string) (packageVersToBuild []*pkgjson.PackageVer, err error) { packageVersToBuild = convertPackageNamesIntoPackageVers(packagesNamesToBuild) packageVersToBuild = append(packageVersToBuild, convertPackageNamesIntoPackageVers(packagesNamesToRebuild)...) diff --git a/toolkit/tools/scheduler/schedulerutils/buildworker.go b/toolkit/tools/pkg/scheduler/schedulerutils/buildworker.go similarity index 97% rename from toolkit/tools/scheduler/schedulerutils/buildworker.go rename to toolkit/tools/pkg/scheduler/schedulerutils/buildworker.go index 36b555849a1..03844752b4e 100644 --- a/toolkit/tools/scheduler/schedulerutils/buildworker.go +++ b/toolkit/tools/pkg/scheduler/schedulerutils/buildworker.go @@ -9,13 +9,14 @@ import ( "sync" "time" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/retry" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/sliceutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/scheduler/buildagents" "gonum.org/v1/gonum/graph" "gonum.org/v1/gonum/graph/traverse" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/retry" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/sliceutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/scheduler/buildagents" ) // BuildChannels represents the communicate channels used by a build agent. diff --git a/toolkit/tools/scheduler/schedulerutils/depsolver.go b/toolkit/tools/pkg/scheduler/schedulerutils/depsolver.go similarity index 97% rename from toolkit/tools/scheduler/schedulerutils/depsolver.go rename to toolkit/tools/pkg/scheduler/schedulerutils/depsolver.go index 69cb489073c..a0a3693cc8b 100644 --- a/toolkit/tools/scheduler/schedulerutils/depsolver.go +++ b/toolkit/tools/pkg/scheduler/schedulerutils/depsolver.go @@ -6,10 +6,11 @@ package schedulerutils import ( "sync" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" "gonum.org/v1/gonum/graph" "gonum.org/v1/gonum/graph/traverse" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // CanSubGraph returns true if a node can be subgraphed without any unresolved dynamic dependencies. diff --git a/toolkit/tools/scheduler/schedulerutils/graphbuildstate.go b/toolkit/tools/pkg/scheduler/schedulerutils/graphbuildstate.go similarity index 97% rename from toolkit/tools/scheduler/schedulerutils/graphbuildstate.go rename to toolkit/tools/pkg/scheduler/schedulerutils/graphbuildstate.go index 98425976685..b03d3463e3b 100644 --- a/toolkit/tools/scheduler/schedulerutils/graphbuildstate.go +++ b/toolkit/tools/pkg/scheduler/schedulerutils/graphbuildstate.go @@ -7,8 +7,8 @@ import ( "path/filepath" "sort" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // nodeState represents the build state of a single node diff --git a/toolkit/tools/scheduler/schedulerutils/implicitprovides.go b/toolkit/tools/pkg/scheduler/schedulerutils/implicitprovides.go similarity index 97% rename from toolkit/tools/scheduler/schedulerutils/implicitprovides.go rename to toolkit/tools/pkg/scheduler/schedulerutils/implicitprovides.go index 8f227d3431b..99a515b6b95 100644 --- a/toolkit/tools/scheduler/schedulerutils/implicitprovides.go +++ b/toolkit/tools/pkg/scheduler/schedulerutils/implicitprovides.go @@ -7,10 +7,10 @@ import ( "fmt" "strings" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/rpm" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // InjectMissingImplicitProvides will inject implicit provide nodes into the graph from a build result if they satisfy any unresolved nodes. diff --git a/toolkit/tools/scheduler/schedulerutils/initializegraph.go b/toolkit/tools/pkg/scheduler/schedulerutils/initializegraph.go similarity index 94% rename from toolkit/tools/scheduler/schedulerutils/initializegraph.go rename to toolkit/tools/pkg/scheduler/schedulerutils/initializegraph.go index 99900ba020a..1e233fdfa15 100644 --- a/toolkit/tools/scheduler/schedulerutils/initializegraph.go +++ b/toolkit/tools/pkg/scheduler/schedulerutils/initializegraph.go @@ -6,9 +6,9 @@ package schedulerutils import ( "fmt" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) const ( diff --git a/toolkit/tools/scheduler/schedulerutils/preparerequest.go b/toolkit/tools/pkg/scheduler/schedulerutils/preparerequest.go similarity index 96% rename from toolkit/tools/scheduler/schedulerutils/preparerequest.go rename to toolkit/tools/pkg/scheduler/schedulerutils/preparerequest.go index f7f2b36bed6..b7d8741a17f 100644 --- a/toolkit/tools/scheduler/schedulerutils/preparerequest.go +++ b/toolkit/tools/pkg/scheduler/schedulerutils/preparerequest.go @@ -6,9 +6,9 @@ package schedulerutils import ( "sync" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/sliceutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // ConvertNodesToRequests converts a slice of nodes into a slice of build requests. diff --git a/toolkit/tools/scheduler/schedulerutils/printresults.go b/toolkit/tools/pkg/scheduler/schedulerutils/printresults.go similarity index 98% rename from toolkit/tools/scheduler/schedulerutils/printresults.go rename to toolkit/tools/pkg/scheduler/schedulerutils/printresults.go index 3e7de861d35..7871ad2864c 100644 --- a/toolkit/tools/scheduler/schedulerutils/printresults.go +++ b/toolkit/tools/pkg/scheduler/schedulerutils/printresults.go @@ -9,8 +9,8 @@ import ( "path/filepath" "sync" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/graph/pkggraph" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" ) // PrintBuildResult prints a build result to the logger. diff --git a/toolkit/tools/pkg/specreader/config.go b/toolkit/tools/pkg/specreader/config.go new file mode 100644 index 00000000000..b0909ad8f04 --- /dev/null +++ b/toolkit/tools/pkg/specreader/config.go @@ -0,0 +1,13 @@ +package specreader + +type Config struct { + SpecsDir string + Output string + Workers int + BuildDir string + SrpmsDir string + RpmsDir string + DistTag string + WorkerTar string + RunCheck bool +} diff --git a/toolkit/tools/pkg/specreader/specreader.go b/toolkit/tools/pkg/specreader/specreader.go new file mode 100644 index 00000000000..56dc3bb26a5 --- /dev/null +++ b/toolkit/tools/pkg/specreader/specreader.go @@ -0,0 +1,552 @@ +package specreader + +import ( + "encoding/json" + "fmt" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/jinzhu/copier" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/buildpipeline" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/directory" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/rpm" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/safechroot" +) + +// parseResult holds the worker results from parsing a SPEC file. +type parseResult struct { + packages []*pkgjson.Package + err error +} + +func (cfg *Config) ParseSPECsWrapper() error { + return parseSPECsWrapper(cfg.BuildDir, cfg.SpecsDir, cfg.RpmsDir, cfg.SrpmsDir, cfg.DistTag, cfg.Output, cfg.WorkerTar, cfg.Workers, cfg.RunCheck) + +} + +// parseSPECsWrapper wraps parseSPECs to conditionally run it inside a chroot. +// If workerTar is non-empty, parsing will occur inside a chroot, otherwise it will run on the host system. +func parseSPECsWrapper(buildDir, specsDir, rpmsDir, srpmsDir, distTag, outputFile, workerTar string, workers int, runCheck bool) (err error) { + var ( + chroot *safechroot.Chroot + packageRepo *pkgjson.PackageRepo + ) + + if workerTar != "" { + const leaveFilesOnDisk = false + chroot, err = createChroot(workerTar, buildDir, specsDir, srpmsDir) + if err != nil { + return + } + defer chroot.Close(leaveFilesOnDisk) + } + + doParse := func() error { + var parseError error + packageRepo, parseError = parseSPECs(specsDir, rpmsDir, srpmsDir, distTag, workers, runCheck) + return parseError + } + + if chroot != nil { + logger.Log.Info("Parsing SPECs inside a chroot environment") + err = chroot.Run(doParse) + } else { + logger.Log.Info("Parsing SPECs in the host environment") + err = doParse() + } + + if err != nil { + return + } + + b, err := json.MarshalIndent(packageRepo, "", " ") + if err != nil { + logger.Log.Error("Unable to marshal package info JSON") + return + } + + err = file.Write(string(b), outputFile) + if err != nil { + logger.Log.Errorf("Failed to write file (%s)", outputFile) + return + } + + return +} + +// createChroot creates a chroot to parse SPECs inside of. +func createChroot(workerTar, buildDir, specsDir, srpmsDir string) (chroot *safechroot.Chroot, err error) { + const ( + chrootName = "specparser_chroot" + existingDir = false + leaveFilesOnDisk = false + ) + + // Mount the specs and srpms directories to an identical path inside the chroot. + // Since specreader saves the full paths to specs in its output that grapher will then consume, + // the pathing needs to be preserved from the host system. + var extraDirectories []string + + extraMountPoints := []*safechroot.MountPoint{ + safechroot.NewMountPoint(specsDir, specsDir, "", safechroot.BindMountPointFlags, ""), + safechroot.NewMountPoint(srpmsDir, srpmsDir, "", safechroot.BindMountPointFlags, ""), + } + + chrootDir := filepath.Join(buildDir, chrootName) + chroot = safechroot.NewChroot(chrootDir, existingDir) + + err = chroot.Initialize(workerTar, extraDirectories, extraMountPoints) + if err != nil { + return + } + + // If this is not a regular build then copy in all of the SPECs since there are no bind mounts. + if !buildpipeline.IsRegularBuild() { + dirsToCopy := []string{specsDir, srpmsDir} + for _, dir := range dirsToCopy { + dirInChroot := filepath.Join(chroot.RootDir(), dir) + err = directory.CopyContents(dir, dirInChroot) + if err != nil { + closeErr := chroot.Close(leaveFilesOnDisk) + if closeErr != nil { + logger.Log.Errorf("Failed to close chroot, err: %s", err) + } + return + } + } + } + + return +} + +// parseSPECs will parse all specs in specsDir and return a summary of the SPECs. +func parseSPECs(specsDir, rpmsDir, srpmsDir, distTag string, workers int, runCheck bool) (packageRepo *pkgjson.PackageRepo, err error) { + var ( + packageList []*pkgjson.Package + wg sync.WaitGroup + specFiles []string + ) + + packageRepo = &pkgjson.PackageRepo{} + + // Find the filepath for each spec in the SPECS directory. + specSearch, err := filepath.Abs(filepath.Join(specsDir, "**/*.spec")) + if err == nil { + specFiles, err = filepath.Glob(specSearch) + } + if err != nil { + logger.Log.Errorf("Failed to find *.spec files. Check that %s is the correct directory. Error: %v", specsDir, err) + return + } + + results := make(chan *parseResult, len(specFiles)) + requests := make(chan string, len(specFiles)) + cancel := make(chan struct{}) + + // Start the workers now so they begin working as soon as a new job is buffered. + for i := 0; i < workers; i++ { + wg.Add(1) + go readSpecWorker(requests, results, cancel, &wg, distTag, rpmsDir, srpmsDir, runCheck) + } + + for _, specFile := range specFiles { + requests <- specFile + } + + close(requests) + + // Receive the parsed spec structures from the workers and place them into a list. + for i := 0; i < len(specFiles); i++ { + parseResult := <-results + if parseResult.err != nil { + err = parseResult.err + close(cancel) + break + } + packageList = append(packageList, parseResult.packages...) + } + + logger.Log.Debug("Waiting for outstanding workers to finish") + wg.Wait() + + if err != nil { + return + } + + packageRepo.Repo = packageList + sortPackages(packageRepo) + + return +} + +// sortPackages orders the package lists into reasonable and deterministic orders. +// Sort the main package list by "Name", "Version", "SRPM" +// Sort each nested Requires/BuildRequires by "Name", "Version" +func sortPackages(packageRepo *pkgjson.PackageRepo) { + sort.Slice(packageRepo.Repo, func(i, j int) bool { + iName := packageRepo.Repo[i].Provides.Name + packageRepo.Repo[i].Provides.Version + packageRepo.Repo[i].SrpmPath + jName := packageRepo.Repo[j].Provides.Name + packageRepo.Repo[j].Provides.Version + packageRepo.Repo[j].SrpmPath + return strings.Compare(iName, jName) < 0 + }) + + for _, pkg := range packageRepo.Repo { + sort.Slice(pkg.Requires, func(i, j int) bool { + iName := pkg.Requires[i].Name + pkg.Requires[i].Version + jName := pkg.Requires[j].Name + pkg.Requires[j].Version + return strings.Compare(iName, jName) < 0 + }) + sort.Slice(pkg.BuildRequires, func(i, j int) bool { + iName := pkg.BuildRequires[i].Name + pkg.BuildRequires[i].Version + jName := pkg.BuildRequires[j].Name + pkg.BuildRequires[j].Version + return strings.Compare(iName, jName) < 0 + }) + } +} + +// readspec is a goroutine that takes a full filepath to a spec file and scrapes it into the Specdef structure +// Concurrency is limited by the size of the semaphore channel passed in. Too many goroutines at once can deplete +// available filehandles. +func readSpecWorker(requests <-chan string, results chan<- *parseResult, cancel <-chan struct{}, wg *sync.WaitGroup, distTag, rpmsDir, srpmsDir string, runCheck bool) { + const ( + emptyQueryFormat = `` + querySrpm = `%{NAME}-%{VERSION}-%{RELEASE}.src.rpm` + queryProvidedPackages = `rpm %{ARCH}/%{nvra}.rpm\n[provides %{PROVIDENEVRS}\n][requires %{REQUIRENEVRS}\n][arch %{ARCH}\n]` + ) + + defer wg.Done() + + defines := rpm.DefaultDefines(runCheck) + defines[rpm.DistTagDefine] = distTag + + for specfile := range requests { + select { + case <-cancel: + logger.Log.Debug("Cancellation signal received") + return + default: + } + + result := &parseResult{} + + providerList := []*pkgjson.Package{} + buildRequiresList := []*pkgjson.PackageVer{} + sourcedir := filepath.Dir(specfile) + + // Find the SRPM associated with the SPEC. + srpmResults, err := rpm.QuerySPEC(specfile, sourcedir, querySrpm, defines, rpm.QueryHeaderArgument) + if err != nil { + result.err = err + results <- result + continue + } + + srpmPath := filepath.Join(srpmsDir, srpmResults[0]) + + isCompatible, err := rpm.SpecArchIsCompatible(specfile, sourcedir, defines) + if err != nil { + result.err = err + results <- result + continue + } + + if !isCompatible { + logger.Log.Debugf(`Skipping (%s) since it cannot be built on current architecture.`, specfile) + results <- result + continue + } + + // Find every package that the spec provides + queryResults, err := rpm.QuerySPEC(specfile, sourcedir, queryProvidedPackages, defines, rpm.QueryBuiltRPMHeadersArgument) + if err == nil && len(queryResults) != 0 { + providerList, err = parseProvides(rpmsDir, srpmPath, queryResults) + if err != nil { + result.err = err + results <- result + continue + } + } + + // Query the BuildRequires fields from this spec and turn them into an array of PackageVersions + queryResults, err = rpm.QuerySPEC(specfile, sourcedir, emptyQueryFormat, defines, rpm.BuildRequiresArgument) + if err == nil && len(queryResults) != 0 { + buildRequiresList, err = parsePackageVersionList(queryResults) + if err != nil { + result.err = err + results <- result + continue + } + } + + // Every package provided by a spec will have the same BuildRequires and SrpmPath + for i := range providerList { + providerList[i].SpecPath = specfile + providerList[i].SourceDir = sourcedir + providerList[i].Requires, err = condensePackageVersionArray(providerList[i].Requires, specfile) + if err != nil { + break + } + + providerList[i].BuildRequires, err = condensePackageVersionArray(buildRequiresList, specfile) + if err != nil { + break + } + } + + if err != nil { + result.err = err + } else { + result.packages = providerList + } + + // Submit the result to the main thread, the deferred function will clear the semaphore. + results <- result + } +} + +// parseProvides parses a newline separated list of Provides, Requires, and Arch from a single spec file. +// Several Provides may be in a row, so for each Provide the parser needs to look ahead for the first line that starts +// with a Require then ingest that line and every subsequent as a Requires until it sees a line that begins with Arch. +// Provide: package +// Require: requiresa = 1.0 +// Require: requiresb +// Arch: noarch +// The return is an array of Package structures, one for each Provides in the spec (implicit and explicit). +func parseProvides(rpmsDir, srpmPath string, list []string) (providerlist []*pkgjson.Package, err error) { + var ( + reqlist []*pkgjson.PackageVer + packagearch string + rpmPath string + listEntry []string + sublistEntry []string + ) + + const ( + tag = iota + value = iota + ) + + listEntry = strings.SplitN(list[0], " ", 2) + err = minSliceLength(listEntry, 2) + if err != nil { + return + } + + if listEntry[tag] != "rpm" { + err = fmt.Errorf("first element returned by rpmspec was not an rpm tag: %v", list) + return + } + + rpmPath = filepath.Join(rpmsDir, listEntry[value]) + + logger.Log.Trace(list) + for i := range list { + listEntry = strings.SplitN(list[i], " ", 2) + err = minSliceLength(listEntry, 1) + if err != nil { + return + } + + if listEntry[tag] == "rpm" { + logger.Log.Trace("rpm ", listEntry[value]) + rpmPath = filepath.Join(rpmsDir, listEntry[value]) + } else if listEntry[tag] == "provides" { + logger.Log.Trace("provides ", listEntry[value]) + for _, v := range list[i:] { + sublistEntry = strings.SplitN(v, " ", 2) + err = minSliceLength(sublistEntry, 2) + if err != nil { + return + } + + if sublistEntry[tag] == "requires" { + logger.Log.Trace(" requires ", sublistEntry[value]) + var requirePkgVers []*pkgjson.PackageVer + requirePkgVers, err = parsePackageVersions(sublistEntry[value]) + if err != nil { + return + } + filteredRequirePkgVers := filterOutDynamicDependencies(requirePkgVers) + reqlist = append(reqlist, filteredRequirePkgVers...) + } else if sublistEntry[tag] == "arch" { + logger.Log.Trace(" arch ", sublistEntry[value]) + packagearch = sublistEntry[value] + break + } + } + + var newProviderVer []*pkgjson.PackageVer + newProviderVer, err = parsePackageVersions(listEntry[value]) + if err != nil { + return + } + + providerPkgVer := &pkgjson.Package{ + Provides: newProviderVer[0], + SrpmPath: srpmPath, + RpmPath: rpmPath, + Architecture: packagearch, + Requires: reqlist, + } + + providerlist = append(providerlist, providerPkgVer) + reqlist = nil + } + } + + logger.Log.Tracef("Provider: %+v", providerlist) + + return +} + +// parsePackageVersions takes a package name and splits it into a set of PackageVer structures. +// Normally a list of length 1 is returned, however parsePackageVersions is also responsible for +// identifying if the package name is an "or" condition and returning all options. +func parsePackageVersions(packagename string) (newpkgs []*pkgjson.PackageVer, err error) { + const ( + NameField = iota + ConditionField = iota + VersionField = iota + ) + + packageSplit := strings.Split(packagename, " ") + err = minSliceLength(packageSplit, 1) + if err != nil { + return + } + + // If first character of the packagename is a "(" then its an "or" condition + if packagename[0] == '(' { + return parseOrCondition(packagename) + } + + newpkg := &pkgjson.PackageVer{Name: packageSplit[NameField]} + if len(packageSplit) == 1 { + // Nothing to do, no condition or version was found. + } else if packageSplit[ConditionField] != "or" { + newpkg.Condition = packageSplit[ConditionField] + newpkg.Version = packageSplit[VersionField] + } else { + // Replace the name with the first name that appears in (foo or bar) + substr := packageSplit[NameField][1:] + newpkg.Name = substr + } + + newpkgs = append(newpkgs, newpkg) + return +} + +// parsePackageVersionList takes the output from rpmspec --buildrequires +// and parses it into an array of PackageVersion structures +func parsePackageVersionList(pkgList []string) (pkgVerList []*pkgjson.PackageVer, err error) { + for _, pkgListEntry := range pkgList { + var parsedPkgVers []*pkgjson.PackageVer + parsedPkgVers, err = parsePackageVersions(pkgListEntry) + if err != nil { + return + } + pkgVerList = append(pkgVerList, parsedPkgVers...) + } + return +} + +// condensePackageVersionArray deduplicates entries in an array of Package Versions +// and represents double conditionals in a single PackageVersion structure. +// If a non-blank package version is specified more than twice in a SPEC then return an error. +func condensePackageVersionArray(packagelist []*pkgjson.PackageVer, specfile string) (processedPkgList []*pkgjson.PackageVer, err error) { + for _, pkg := range packagelist { + nameMatch := false + for i, processedPkg := range processedPkgList { + if pkg.Name == processedPkg.Name { + nameMatch = true + if processedPkg.Version == "" { + processedPkgList[i].Version = pkg.Version + processedPkgList[i].Condition = pkg.Condition + break + } else if processedPkg.SVersion == "" { + processedPkgList[i].SVersion = pkg.Version + processedPkgList[i].SCondition = pkg.Condition + break + } else if processedPkg.Version == processedPkg.SVersion { + processedPkgList[i].Version = pkg.Version + processedPkgList[i].SVersion = pkg.Version + processedPkgList[i].Condition = pkg.Condition + processedPkgList[i].SCondition = pkg.Condition + break + } else { + err = fmt.Errorf("spec (%s) attempted to set more than two conditions for package (%s)", specfile, processedPkg.Name) + return + } + } + } + if !nameMatch { + var processPkg pkgjson.PackageVer + copier.Copy(&processPkg, pkg) + processedPkgList = append(processedPkgList, &processPkg) + } + } + return +} + +// parseOrCondition splits a package name like (foo or bar) and returns both foo and bar as separate requirements. +func parseOrCondition(packagename string) (versions []*pkgjson.PackageVer, err error) { + logger.Log.Warnf("'OR' clause found (%s), make sure both packages are available. Please refer to 'docs/how_it_works/3_package_building.md#or-clauses' for explanation of limitations.", packagename) + packagename = strings.ReplaceAll(packagename, "(", "") + packagename = strings.ReplaceAll(packagename, ")", "") + + packageSplit := strings.Split(packagename, " or ") + err = minSliceLength(packageSplit, 1) + if err != nil { + return + } + + versions = make([]*pkgjson.PackageVer, 0, len(packageSplit)) + for _, condition := range packageSplit { + var parsedPkgVers []*pkgjson.PackageVer + parsedPkgVers, err = parsePackageVersions(condition) + if err != nil { + return + } + versions = append(versions, parsedPkgVers...) + } + + return +} + +// minSliceLength checks that a string slice is >= a minimum length and returns an error +// if the condition is not met. +func minSliceLength(slice []string, minLength int) (err error) { + if len(slice) < minLength { + return fmt.Errorf("slice is not required length (minLength = %d) %+v", minLength, slice) + } + return +} + +// filterOutDynamicDependencies removes dynamic RPM dependencies from pkgVers. +// These entries are automatically injected by RPM when processing an SRPM +// and represent an internal RPM feature requirement. +// +// For example if a SPEC uses a Lua scriplet, RPM will inject a requirement for +// `rpmlib(BuiltinLuaScripts)` so that future RPM invocations on the SRPM know +// what features it needs to properly handle the package. +// +// These dynamic dependencies are not backed by a real package or a provides, but +// are instead an internal notation of RPM itself. Filter these out from the list of +// requirements of actual packages. +func filterOutDynamicDependencies(pkgVers []*pkgjson.PackageVer) (filteredPkgVers []*pkgjson.PackageVer) { + const dynamicDependencyPrefix = "rpmlib(" + for _, req := range pkgVers { + if strings.HasPrefix(req.Name, dynamicDependencyPrefix) { + logger.Log.Debugf("Ignoring dynamic dependency: %s", req.Name) + continue + } + filteredPkgVers = append(filteredPkgVers, req) + } + + return +} diff --git a/toolkit/tools/pkg/srpmpacker/config.go b/toolkit/tools/pkg/srpmpacker/config.go new file mode 100644 index 00000000000..cec2ad3dd03 --- /dev/null +++ b/toolkit/tools/pkg/srpmpacker/config.go @@ -0,0 +1,20 @@ +package srpmpacker + +type Config struct { + SpecsDir string + OutDir string + BuildDir string + DistTag string + PackListFile string + RunCheck bool + Workers int + RepackAll bool + NestedSourcesDir bool + SourceURL string + CaCertFile string + TlsClientCert string + TlsClientKey string + WorkerTar string + ValidSignatureLevels []string + SignatureHandling string +} diff --git a/toolkit/tools/pkg/srpmpacker/srpmpacker.go b/toolkit/tools/pkg/srpmpacker/srpmpacker.go new file mode 100644 index 00000000000..b49d81582b1 --- /dev/null +++ b/toolkit/tools/pkg/srpmpacker/srpmpacker.go @@ -0,0 +1,979 @@ +package srpmpacker + +import ( + "bufio" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "sync" + "time" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/buildpipeline" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/directory" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/jsonutils" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/network" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/retry" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/rpm" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/safechroot" +) + +type fileSignaturesWrapper struct { + FileSignatures map[string]string `json:"Signatures"` +} + +const ( + signatureEnforceString = "enforce" + signatureSkipCheckString = "skip" + signatureUpdateString = "update" +) + +const ( + srpmOutDir = "SRPMS" + srpmSPECDir = "SPECS" + srpmSOURCESDir = "SOURCES" +) + +type fileType int + +const ( + fileTypePatch fileType = iota + fileTypeSource fileType = iota +) + +type signatureHandlingType int + +const ( + signatureEnforce signatureHandlingType = iota + signatureSkipCheck signatureHandlingType = iota + signatureUpdate signatureHandlingType = iota +) + +// sourceRetrievalConfiguration holds information on where to hydrate files from. +type sourceRetrievalConfiguration struct { + localSourceDir string + sourceURL string + caCerts *x509.CertPool + tlsCerts []tls.Certificate + + signatureHandling signatureHandlingType + signatureLookup map[string]string +} + +// packResult holds the worker results from packing a SPEC file into an SRPM. +type packResult struct { + specFile string + srpmFile string + err error +} + +// specState holds the state of a SPEC file: if it should be packed and the resulting SRPM if it is. +type specState struct { + specFile string + srpmFile string + toPack bool + err error +} + +// Create a template configuration that all packed SRPM will be based on. +func newTemplateSrcConfig(cfg *Config) sourceRetrievalConfiguration { + var templateSrcConfig sourceRetrievalConfiguration + + switch cfg.SignatureHandling { + case signatureEnforceString: + templateSrcConfig.signatureHandling = signatureEnforce + case signatureSkipCheckString: + logger.Log.Warn("Skipping signature enforcement") + templateSrcConfig.signatureHandling = signatureSkipCheck + case signatureUpdateString: + logger.Log.Warn("Will update signature files as needed") + templateSrcConfig.signatureHandling = signatureUpdate + default: + logger.Log.Fatalf("Invalid signature handling encountered: %s. Allowed: %s", cfg.SignatureHandling, cfg.ValidSignatureLevels) + } + + // Setup remote source configuration + var err error + templateSrcConfig.sourceURL = cfg.SourceURL + templateSrcConfig.caCerts, err = x509.SystemCertPool() + logger.PanicOnError(err, "Received error calling x509.SystemCertPool(). Error: %v", err) + if cfg.CaCertFile != "" { + newCACert, err := ioutil.ReadFile(cfg.CaCertFile) + if err != nil { + logger.Log.Panicf("Invalid CA certificate (%s), error: %s", cfg.CaCertFile, err) + } + + templateSrcConfig.caCerts.AppendCertsFromPEM(newCACert) + } + + if cfg.TlsClientCert != "" && cfg.TlsClientKey != "" { + cert, err := tls.LoadX509KeyPair(cfg.TlsClientCert, cfg.TlsClientKey) + if err != nil { + logger.Log.Panicf("Invalid TLS client key pair (%s) (%s), error: %s", cfg.TlsClientCert, cfg.TlsClientKey, err) + } + + templateSrcConfig.tlsCerts = append(templateSrcConfig.tlsCerts, cert) + } + return templateSrcConfig +} + +func (cfg *Config) CreateAllSRPMsWrapper() error { + templateSrcConfig := newTemplateSrcConfig(cfg) + + // A pack list may be provided, if so only pack this subset. + // If non is provided, pack all srpms. + packList, err := parsePackListFile(cfg.PackListFile) + logger.PanicOnError(err) + return cfg.createAllSRPMsWrapper(cfg.SpecsDir, cfg.DistTag, cfg.BuildDir, cfg.OutDir, cfg.WorkerTar, cfg.Workers, cfg.NestedSourcesDir, cfg.RepackAll, cfg.RunCheck, packList, templateSrcConfig) + +} + +// removeDuplicateStrings will remove duplicate entries from a string slice +func removeDuplicateStrings(packList []string) (deduplicatedPackList []string) { + var ( + packListSet = make(map[string]struct{}) + exists = struct{}{} + ) + + for _, entry := range packList { + packListSet[entry] = exists + } + + for entry := range packListSet { + deduplicatedPackList = append(deduplicatedPackList, entry) + } + + return +} + +// parsePackListFile will parse a list of packages to pack if one is specified. +// Duplicate list entries in the file will be removed. +func parsePackListFile(packListFile string) (packList []string, err error) { + if packListFile == "" { + return + } + + file, err := os.Open(packListFile) + if err != nil { + return + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line != "" { + packList = append(packList, line) + } + } + + if len(packList) == 0 { + err = fmt.Errorf("cannot have empty pack list (%s)", packListFile) + } + + packList = removeDuplicateStrings(packList) + + return +} + +// createAllSRPMsWrapper wraps createAllSRPMs to conditionally run it inside a chroot. +// If workerTar is non-empty, packing will occur inside a chroot, otherwise it will run on the host system. +func (cfg *Config) createAllSRPMsWrapper(specsDir, distTag, buildDir, outDir, workerTar string, workers int, nestedSourcesDir, repackAll, runCheck bool, packList []string, templateSrcConfig sourceRetrievalConfiguration) (err error) { + var chroot *safechroot.Chroot + originalOutDir := outDir + if workerTar != "" { + const leaveFilesOnDisk = false + chroot, buildDir, outDir, specsDir, err = createChroot(workerTar, buildDir, outDir, specsDir) + if err != nil { + return + } + defer chroot.Close(leaveFilesOnDisk) + } + + doCreateAll := func() error { + return createAllSRPMs(specsDir, distTag, buildDir, outDir, workers, nestedSourcesDir, repackAll, runCheck, packList, templateSrcConfig) + } + + if chroot != nil { + logger.Log.Info("Packing SRPMs inside a chroot environment") + err = chroot.Run(doCreateAll) + } else { + logger.Log.Info("Packing SRPMs in the host environment") + err = doCreateAll() + } + + if err != nil { + return + } + + // If this is container build then the bind mounts will not have been created. + // Copy the chroot output to host output folder. + if !buildpipeline.IsRegularBuild() { + srpmsInChroot := filepath.Join(chroot.RootDir(), outDir) + err = directory.CopyContents(srpmsInChroot, originalOutDir) + } + + return +} + +// createAllSRPMs will find all SPEC files in specsDir and pack SRPMs for them if needed. +func createAllSRPMs(specsDir, distTag, buildDir, outDir string, workers int, nestedSourcesDir, repackAll, runCheck bool, packList []string, templateSrcConfig sourceRetrievalConfiguration) (err error) { + logger.Log.Infof("Finding all SPEC files") + + specFiles, err := findSPECFiles(specsDir, packList) + if err != nil { + return + } + + specStates, err := calculateSPECsToRepack(specFiles, distTag, outDir, nestedSourcesDir, repackAll, runCheck, workers) + if err != nil { + return + } + + err = packSRPMs(specStates, distTag, buildDir, runCheck, templateSrcConfig, workers) + return +} + +// findSPECFiles finds all SPEC files that should be considered for packing. +// Takes into consideration a packList if provided. +func findSPECFiles(specsDir string, packList []string) (specFiles []string, err error) { + if len(packList) == 0 { + specSearch := filepath.Join(specsDir, "**/*.spec") + specFiles, err = filepath.Glob(specSearch) + } else { + for _, specName := range packList { + var specFile []string + + specSearch := filepath.Join(specsDir, fmt.Sprintf("**/%s.spec", specName)) + specFile, err = filepath.Glob(specSearch) + + // If a SPEC is in the pack list, it must be packed. + if err != nil { + return + } + if len(specFile) != 1 { + if strings.HasPrefix(specName, "msopenjdk-11") { + logger.Log.Debugf("Ignoring missing match for '%s', which is externally-provided and thus doesn't have a local spec.", specName) + continue + } else { + err = fmt.Errorf("unexpected number of matches (%d) for spec file (%s)", len(specFile), specName) + return + } + } + + specFiles = append(specFiles, specFile[0]) + } + } + + return +} + +// createChroot creates a chroot to pack SRPMs inside of. +func createChroot(workerTar, buildDir, outDir, specsDir string) (chroot *safechroot.Chroot, newBuildDir, newOutDir, newSpecsDir string, err error) { + const ( + chrootName = "srpmpacker_chroot" + existingDir = false + leaveFilesOnDisk = false + + outMountPoint = "/output" + specsMountPoint = "/specs" + buildDirInChroot = "/build" + ) + + extraMountPoints := []*safechroot.MountPoint{ + safechroot.NewMountPoint(outDir, outMountPoint, "", safechroot.BindMountPointFlags, ""), + safechroot.NewMountPoint(specsDir, specsMountPoint, "", safechroot.BindMountPointFlags, ""), + } + + extraDirectories := []string{ + buildDirInChroot, + } + + newBuildDir = buildDirInChroot + newOutDir = outMountPoint + newSpecsDir = specsMountPoint + + chrootDir := filepath.Join(buildDir, chrootName) + chroot = safechroot.NewChroot(chrootDir, existingDir) + + err = chroot.Initialize(workerTar, extraDirectories, extraMountPoints) + if err != nil { + return + } + + defer func() { + if err != nil { + closeErr := chroot.Close(leaveFilesOnDisk) + if closeErr != nil { + logger.Log.Errorf("Failed to close chroot, err: %s", closeErr) + } + } + }() + + // If this is container build then the bind mounts will not have been created. + if !buildpipeline.IsRegularBuild() { + // Copy in all of the SPECs so they can be packed. + specsInChroot := filepath.Join(chroot.RootDir(), newSpecsDir) + err = directory.CopyContents(specsDir, specsInChroot) + if err != nil { + return + } + + // Copy any prepacked srpms so they will not be repacked. + srpmsInChroot := filepath.Join(chroot.RootDir(), newOutDir) + err = directory.CopyContents(outDir, srpmsInChroot) + if err != nil { + return + } + } + + // Networking support is needed to download sources. + files := []safechroot.FileToCopy{ + {Src: "/etc/resolv.conf", Dest: "/etc/resolv.conf"}, + } + + err = chroot.AddFiles(files...) + return +} + +// calculateSPECsToRepack will check which SPECs should be packed. +// If the resulting SRPM does not exist, or is older than a modification to +// one of the files used by the SPEC then it is repacked. +func calculateSPECsToRepack(specFiles []string, distTag, outDir string, nestedSourcesDir, repackAll, runCheck bool, workers int) (states []*specState, err error) { + var wg sync.WaitGroup + + requests := make(chan string, len(specFiles)) + results := make(chan *specState, len(specFiles)) + cancel := make(chan struct{}) + + logger.Log.Infof("Calculating SPECs to repack") + + // Start the workers now so they begin working as soon as a new job is buffered. + for i := 0; i < workers; i++ { + wg.Add(1) + go specsToPackWorker(requests, results, cancel, &wg, distTag, outDir, nestedSourcesDir, repackAll, runCheck) + } + + for _, specFile := range specFiles { + requests <- specFile + } + + // Signal to the workers that there are no more new spec files + close(requests) + + // Transfer the results from the channel into states. + // + // While the channel itself could be returned and passed to the consumer of + // the results, additional functionality would have to be added to limit the total workers + // in use at any given time. + // + // Since this worker pool and future worker pools in the application are opening file descriptors + // if too many are active at once it can exhaust the file descriptor limit. + // Currently all functions that employ workers pool of size `workers` are serialized, + // resulting in `workers` being the upper capacity at any given time. + totalToRepack := 0 + states = make([]*specState, len(specFiles)) + for i := 0; i < len(specFiles); i++ { + result := <-results + states[i] = result + + if result.err != nil { + logger.Log.Errorf("Failed to check (%s). Error: %s", result.specFile, result.err) + err = result.err + close(cancel) + break + } + + if result.toPack { + totalToRepack++ + } + } + + logger.Log.Debug("Waiting for outstanding workers to finish") + wg.Wait() + + if err != nil { + return + } + + logger.Log.Infof("Packing %d/%d SPECs", totalToRepack, len(specFiles)) + return +} + +// specsToPackWorker will process a channel of spec files that should be checked if packing is needed. +func specsToPackWorker(requests <-chan string, results chan<- *specState, cancel <-chan struct{}, wg *sync.WaitGroup, distTag, outDir string, nestedSourcesDir, repackAll, runCheck bool) { + const ( + queryFormat = `%{NAME}-%{VERSION}-%{RELEASE}.src.rpm` + nestedSourceDirName = "SOURCES" + ) + + const ( + srpmQueryResultsIndex = iota + expectedQueryResultsLen = iota + ) + + defer wg.Done() + + for specFile := range requests { + select { + case <-cancel: + logger.Log.Debug("Cancellation signal received") + return + default: + } + + result := &specState{ + specFile: specFile, + } + + containingDir := filepath.Dir(specFile) + + // Find the SRPM that this SPEC will produce. + defines := rpm.DefaultDefines(runCheck) + defines[rpm.DistTagDefine] = distTag + + // Allow the user to configure if the SPEC sources are in a nested 'SOURCES' directory. + // Otherwise assume source files are next to the SPEC file. + sourceDir := containingDir + if nestedSourcesDir { + sourceDir = filepath.Join(sourceDir, nestedSourceDirName) + } + specQueryResults, err := rpm.QuerySPEC(specFile, sourceDir, queryFormat, defines, rpm.QueryHeaderArgument) + + if err != nil { + if err.Error() == rpm.NoCompatibleArchError { + logger.Log.Infof("Skipping SPEC (%s) due to incompatible build architecture", specFile) + } else { + result.err = err + } + + results <- result + continue + } + + if len(specQueryResults) != expectedQueryResultsLen { + result.err = fmt.Errorf("unexpected query results, wanted (%d) results but got (%d), results: %v", expectedQueryResultsLen, len(specQueryResults), specQueryResults) + results <- result + continue + } + + // Resolve the full path of the SRPM that would be packed from this SPEC file. + producedSRPM := specQueryResults[srpmQueryResultsIndex] + fullSRPMPath := filepath.Join(outDir, producedSRPM) + result.srpmFile = fullSRPMPath + + if repackAll { + result.toPack = true + results <- result + continue + } + + // Sanity check that SRPMS is meant to be built for the machine architecture + isCompatible, err := rpm.SpecArchIsCompatible(specFile, sourceDir, defines) + if err != nil { + result.err = err + results <- result + continue + } + + if !isCompatible { + logger.Log.Infof(`Skipping (%s) since it cannot be built on current architecture.`, specFile) + results <- result + continue + } + + // Check if the SRPM is already on disk and if so its modification time. + srpmInfo, err := os.Stat(fullSRPMPath) + if err != nil { + logger.Log.Debugf("Updating (%s) since (%s) is not yet built", specFile, fullSRPMPath) + result.toPack = true + results <- result + continue + } + + // Check if a file used by the SPEC has been modified since the resulting SRPM was previously packed. + specModTime, latestFile, err := directory.LastModifiedFile(containingDir) + if err != nil { + result.err = fmt.Errorf("failed to query modification time for SPEC (%s). Error: %s", specFile, err) + results <- result + continue + } + + if specModTime.After(srpmInfo.ModTime()) { + logger.Log.Debugf("Updating (%s) since (%s) has changed", specFile, latestFile) + result.toPack = true + } + + results <- result + } +} + +// packSRPMs will pack any SPEC files that have been marked as `toPack`. +func packSRPMs(specStates []*specState, distTag, buildDir string, runCheck bool, templateSrcConfig sourceRetrievalConfiguration, workers int) (err error) { + var wg sync.WaitGroup + + allSpecStates := make(chan *specState, len(specStates)) + results := make(chan *packResult, len(specStates)) + cancel := make(chan struct{}) + + // Start the workers now so they begin working as soon as a new job is buffered. + for i := 0; i < workers; i++ { + wg.Add(1) + go packSRPMWorker(allSpecStates, results, cancel, &wg, distTag, buildDir, runCheck, templateSrcConfig) + } + + for _, state := range specStates { + allSpecStates <- state + } + + // Signal to the workers that there are no more new spec files + close(allSpecStates) + + for i := 0; i < len(specStates); i++ { + result := <-results + + if result.err != nil { + logger.Log.Errorf("Failed to pack (%s). Error: %s", result.specFile, result.err) + err = result.err + close(cancel) + break + } + + // Skip results for states that were not packed by request + if result.srpmFile == "" { + continue + } + + logger.Log.Infof("Packed (%s) -> (%s)", filepath.Base(result.specFile), filepath.Base(result.srpmFile)) + } + + logger.Log.Debug("Waiting for outstanding workers to finish") + wg.Wait() + + return +} + +// packSRPMWorker will process a channel of SPECs and pack any that are marked as toPack. +func packSRPMWorker(allSpecStates <-chan *specState, results chan<- *packResult, cancel <-chan struct{}, wg *sync.WaitGroup, distTag, buildDir string, runCheck bool, templateSrcConfig sourceRetrievalConfiguration) { + defer wg.Done() + + for specState := range allSpecStates { + select { + case <-cancel: + logger.Log.Debug("Cancellation signal received") + return + default: + } + + result := &packResult{ + specFile: specState.specFile, + } + + // Its a no-op if the SPEC does not need to be packed + if !specState.toPack { + results <- result + continue + } + + // Setup a source retrieval configuration based on the provided template + signaturesFilePath := specPathToSignaturesPath(specState.specFile) + srcConfig, err := initializeSourceConfig(templateSrcConfig, signaturesFilePath) + if err != nil { + result.err = err + results <- result + continue + } + + fullOutDirPath := filepath.Dir(specState.srpmFile) + err = os.MkdirAll(fullOutDirPath, os.ModePerm) + if err != nil { + result.err = err + results <- result + continue + } + + outputPath, err := packSingleSPEC(specState.specFile, specState.srpmFile, signaturesFilePath, buildDir, fullOutDirPath, distTag, runCheck, srcConfig) + if err != nil { + result.err = err + results <- result + continue + } + + result.srpmFile = outputPath + + results <- result + } +} + +func specPathToSignaturesPath(specFilePath string) string { + const ( + specSuffix = ".spec" + signatureFileSuffix = "signatures.json" + ) + + specName := strings.TrimSuffix(filepath.Base(specFilePath), specSuffix) + signatureFileName := fmt.Sprintf("%s.%s", specName, signatureFileSuffix) + signatureFileDirPath := filepath.Dir(specFilePath) + + return filepath.Join(signatureFileDirPath, signatureFileName) +} + +func initializeSourceConfig(templateSrcConfig sourceRetrievalConfiguration, signaturesFilePath string) (srcConfig sourceRetrievalConfiguration, err error) { + srcConfig = templateSrcConfig + srcConfig.localSourceDir = filepath.Dir(signaturesFilePath) + + // Read the signatures file for the SPEC sources if applicable + if srcConfig.signatureHandling != signatureSkipCheck { + srcConfig.signatureLookup, err = readSignatures(signaturesFilePath) + } + + return srcConfig, err +} + +func readSignatures(signaturesFilePath string) (readSignatures map[string]string, err error) { + var signaturesWrapper fileSignaturesWrapper + signaturesWrapper.FileSignatures = make(map[string]string) + + err = jsonutils.ReadJSONFile(signaturesFilePath, &signaturesWrapper) + if err != nil { + if os.IsNotExist(err) { + // Non-fatal as some SPECs may not have sources + logger.Log.Debugf("The signatures file (%s) doesn't exist, will not pre-populate signatures.", signaturesFilePath) + err = nil + } else { + logger.Log.Errorf("Failed to read the signatures file (%s): %v.", signaturesFilePath, err) + } + } + + return signaturesWrapper.FileSignatures, err +} + +// packSingleSPEC will pack a given SPEC file into an SRPM. +func packSingleSPEC(specFile, srpmFile, signaturesFile, buildDir, outDir, distTag string, runCheck bool, srcConfig sourceRetrievalConfiguration) (outputPath string, err error) { + srpmName := filepath.Base(srpmFile) + workingDir := filepath.Join(buildDir, srpmName) + + logger.Log.Debugf("Working directory: %s", workingDir) + + err = os.MkdirAll(workingDir, os.ModePerm) + if err != nil { + return + } + defer cleanupSRPMWorkingDir(workingDir) + + // Make the folder structure needed for rpmbuild + err = createRPMBuildFolderStructure(workingDir) + if err != nil { + return + } + + // Copy the SPEC file in + srpmSpecFile := filepath.Join(workingDir, srpmSPECDir, filepath.Base(specFile)) + err = file.Copy(specFile, srpmSpecFile) + if err != nil { + return + } + + // Track the current signatures of source files used by the SPEC. + // This will only contain signatures that have either been validated or updated by this tool. + currentSignatures := make(map[string]string) + + defines := rpm.DefaultDefines(runCheck) + if distTag != "" { + defines[rpm.DistTagDefine] = distTag + } + + // Hydrate all patches. Exclusively using `sourceDir` + err = hydrateFiles(fileTypePatch, specFile, workingDir, srcConfig, currentSignatures, defines) + if err != nil { + return + } + + // Hydrate all sources. Download any missing ones not in `sourceDir` + err = hydrateFiles(fileTypeSource, specFile, workingDir, srcConfig, currentSignatures, defines) + if err != nil { + return + } + + err = updateSignaturesIfApplicable(signaturesFile, srcConfig, currentSignatures) + + // Build the SRPM itself, using `workingDir` as the topdir + err = rpm.GenerateSRPMFromSPEC(specFile, workingDir, defines) + if err != nil { + return + } + + // Save the output of the build to `outDir` + outputPath, err = copyOutput(workingDir, outDir) + return +} + +func updateSignaturesIfApplicable(signaturesFile string, srcConfig sourceRetrievalConfiguration, currentSignatures map[string]string) (err error) { + if srcConfig.signatureHandling == signatureUpdate && !reflect.DeepEqual(srcConfig.signatureLookup, currentSignatures) { + logger.Log.Infof("Updating (%s)", signaturesFile) + + outputSignatures := fileSignaturesWrapper{ + FileSignatures: currentSignatures, + } + + err = jsonutils.WriteJSONFile(signaturesFile, outputSignatures) + if err != nil { + logger.Log.Warnf("Unable to update signatures file (%s)", signaturesFile) + return + } + } + + return +} + +func createRPMBuildFolderStructure(workingDir string) (err error) { + dirsToCreate := []string{ + srpmSOURCESDir, + srpmSPECDir, + srpmOutDir, + } + + for _, dir := range dirsToCreate { + err = os.MkdirAll(path.Join(workingDir, dir), os.ModePerm) + if err != nil { + return + } + } + + return +} + +// readSPECTagArray will return an array of tag values from the given specfile. +// (e.g. all SOURCE entries) +func readSPECTagArray(specFile, sourceDir, tag string, defines map[string]string) (tagValues []string, err error) { + queryFormat := fmt.Sprintf(`[%%{%s}\n]`, tag) + return rpm.QuerySPEC(specFile, sourceDir, queryFormat, defines, rpm.QueryHeaderArgument) +} + +// hydrateFiles will attempt to retrieve all sources needed to build an SRPM from a SPEC. +// Will alter `currentSignatures`, +func hydrateFiles(fileTypeToHydrate fileType, specFile, workingDir string, srcConfig sourceRetrievalConfiguration, currentSignatures, defines map[string]string) (err error) { + const ( + downloadMissingPatchFiles = false + skipPatchSignatures = true + + downloadMissingSourceFiles = true + skipSourceSignatures = false + + patchTag = "PATCH" + sourceTag = "SOURCE" + ) + + var ( + specTag string + hydrateRemotely bool + skipSignatureHandling bool + ) + + switch fileTypeToHydrate { + case fileTypePatch: + specTag = patchTag + hydrateRemotely = downloadMissingPatchFiles + skipSignatureHandling = skipPatchSignatures + case fileTypeSource: + specTag = sourceTag + hydrateRemotely = downloadMissingSourceFiles + skipSignatureHandling = skipSourceSignatures + default: + return fmt.Errorf("invalid filetype (%d)", fileTypeToHydrate) + } + + newSourceDir := filepath.Join(workingDir, srpmSOURCESDir) + fileHydrationState := make(map[string]bool) + + // Collect a list of files of type `specTag` needed for this SRPM + filesNeeded, err := readSPECTagArray(specFile, srcConfig.localSourceDir, specTag, defines) + if err != nil { + return + } + + for _, fileNeeded := range filesNeeded { + fileHydrationState[fileNeeded] = false + } + + // If the user provided an existing source dir, prefer it over remote sources. + if srcConfig.localSourceDir != "" { + err = hydrateFromLocalSource(fileHydrationState, newSourceDir, srcConfig, skipSignatureHandling, currentSignatures) + // On error warn and default to hydrating from an external server. + if err != nil { + logger.Log.Warnf("Error hydrating from local source directory (%s): %v", srcConfig.localSourceDir, err) + } + } + + if hydrateRemotely && srcConfig.sourceURL != "" { + hydrateFromRemoteSource(fileHydrationState, newSourceDir, srcConfig, skipSignatureHandling, currentSignatures) + } + + for fileNeeded, alreadyHydrated := range fileHydrationState { + if !alreadyHydrated { + err = fmt.Errorf("unable to hydrate file: %s", fileNeeded) + logger.Log.Error(err) + } + } + + return +} + +// hydrateFromLocalSource will update fileHydrationState. +// Will alter currentSignatures. +func hydrateFromLocalSource(fileHydrationState map[string]bool, newSourceDir string, srcConfig sourceRetrievalConfiguration, skipSignatureHandling bool, currentSignatures map[string]string) (err error) { + err = filepath.Walk(srcConfig.localSourceDir, func(path string, info os.FileInfo, err error) error { + isFile, _ := file.IsFile(path) + if !isFile { + return nil + } + + fileName := filepath.Base(path) + + isHydrated, found := fileHydrationState[fileName] + if !found { + return nil + } + + if isHydrated { + logger.Log.Warnf("Duplicate matching file found at (%s), skipping", path) + return nil + } + + if !skipSignatureHandling { + err = validateSignature(path, srcConfig, currentSignatures) + if err != nil { + logger.Log.Warn(err.Error()) + return nil + } + } + + err = file.Copy(path, filepath.Join(newSourceDir, fileName)) + if err != nil { + logger.Log.Warnf("Failed to copy file (%s), skipping. Error: %s", path, err) + return nil + } + + logger.Log.Debugf("Hydrated (%s) from (%s)", fileName, path) + + fileHydrationState[fileName] = true + return nil + }) + + return +} + +// hydrateFromRemoteSource will update fileHydrationState. +// Will alter `currentSignatures`. +func hydrateFromRemoteSource(fileHydrationState map[string]bool, newSourceDir string, srcConfig sourceRetrievalConfiguration, skipSignatureHandling bool, currentSignatures map[string]string) { + const ( + downloadRetryAttempts = 3 + downloadRetryDuration = time.Second + ) + + for fileName, alreadyHydrated := range fileHydrationState { + if alreadyHydrated { + continue + } + + destinationFile := filepath.Join(newSourceDir, fileName) + + url := network.JoinURL(srcConfig.sourceURL, fileName) + + err := retry.Run(func() error { + err := network.DownloadFile(url, destinationFile, srcConfig.caCerts, srcConfig.tlsCerts) + if err != nil { + logger.Log.Warnf("Failed to download (%s). Error: %s", url, err) + } + + return err + }, downloadRetryAttempts, downloadRetryDuration) + + if err != nil { + continue + } + + if !skipSignatureHandling { + err = validateSignature(destinationFile, srcConfig, currentSignatures) + if err != nil { + logger.Log.Warn(err.Error()) + + // If the delete fails, just warn as there will be another cleanup + // attempt when exiting the program. + err = os.Remove(destinationFile) + if err != nil { + logger.Log.Warnf("Failed to delete file (%s). Error: %s", destinationFile, err) + } + + continue + } + } + + fileHydrationState[fileName] = true + logger.Log.Debugf("Hydrated (%s) from (%s)", fileName, url) + } +} + +// validateSignature will compare the SHA256 of the file at path against the signature for it in srcConfig.signatureLookup +// Will skip if signature handling is set to skip. +// Will alter `currentSignatures`. +func validateSignature(path string, srcConfig sourceRetrievalConfiguration, currentSignatures map[string]string) (err error) { + if srcConfig.signatureHandling == signatureSkipCheck { + return + } + + fileName := filepath.Base(path) + expectedSignature, found := srcConfig.signatureLookup[fileName] + if !found && srcConfig.signatureHandling != signatureUpdate { + err = fmt.Errorf("no signature for file (%s) found. full path is (%s)", fileName, path) + return + } + + newSignature, err := file.GenerateSHA256(path) + if err != nil { + return + } + + if strings.EqualFold(expectedSignature, newSignature) { + currentSignatures[fileName] = newSignature + } else { + if srcConfig.signatureHandling == signatureUpdate { + logger.Log.Warnf("Updating signature for (%s) from (%s) to (%s)", fileName, expectedSignature, newSignature) + currentSignatures[fileName] = newSignature + } else { + return fmt.Errorf("file (%s) has mismatching signature: expected (%s) - actual (%s)", path, expectedSignature, newSignature) + } + } + + return +} + +// copyOutput will copy the built SRPMs from workingDir to the specified output directory. +func copyOutput(workingDir, outDir string) (outputPath string, err error) { + rpmbuildOutDir := filepath.Join(workingDir, srpmOutDir) + err = filepath.Walk(rpmbuildOutDir, func(path string, info os.FileInfo, err error) error { + isFile, _ := file.IsFile(path) + if !isFile { + return nil + } + outputPath = filepath.Join(outDir, filepath.Base(path)) + return file.Copy(path, outputPath) + }) + + return +} + +// cleanupSRPMWorkingDir will delete the working directory for the SRPM build. +func cleanupSRPMWorkingDir(workingDir string) { + err := os.RemoveAll(workingDir) + if err != nil { + logger.Log.Warnf("Unable to cleanup working directory: %s", workingDir) + } +} diff --git a/toolkit/tools/pkg/validatechroot/config.go b/toolkit/tools/pkg/validatechroot/config.go new file mode 100644 index 00000000000..ada3240a065 --- /dev/null +++ b/toolkit/tools/pkg/validatechroot/config.go @@ -0,0 +1,9 @@ +package validatechroot + +type Config struct { + ToolchainRpmsDir string + TmpDir string + WorkerTar string + WorkerManifest string + LeaveChrootFilesOnDisk bool +} diff --git a/toolkit/tools/pkg/validatechroot/validatechroot.go b/toolkit/tools/pkg/validatechroot/validatechroot.go new file mode 100644 index 00000000000..560681d5088 --- /dev/null +++ b/toolkit/tools/pkg/validatechroot/validatechroot.go @@ -0,0 +1,100 @@ +package validatechroot + +import ( + "fmt" + "path" + "path/filepath" + "regexp" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/safechroot" +) + +func (cfg *Config) Validate() error { + return cfg.validateWorker(cfg.ToolchainRpmsDir, cfg.TmpDir, cfg.WorkerTar, cfg.WorkerManifest) +} + +func (cfg *Config) validateWorker(rpmsDir, chrootDir, workerTarPath, manifestPath string) (err error) { + const ( + chrootToolchainRpmsDir = "/toolchainrpms" + isExistingDir = false + ) + + var ( + chroot *safechroot.Chroot + // Every valid line will be of the form: -..rpm + packageArchLookupRegex = regexp.MustCompile(`^.+(?Px86_64|aarch64|noarch)\.rpm$`) + ) + + // Ensure that if initialization fails, the chroot is closed + defer func() { + if chroot != nil { + closeErr := chroot.Close(cfg.LeaveChrootFilesOnDisk) + if closeErr != nil { + logger.Log.Panicf("Unable to close chroot on failed initialization. Error: %s", closeErr) + } + } + }() + + logger.Log.Infof("Creating chroot environment to validate '%s' against '%s'", workerTarPath, manifestPath) + + chroot = safechroot.NewChroot(chrootDir, isExistingDir) + rpmMount := safechroot.NewMountPoint(rpmsDir, chrootToolchainRpmsDir, "", safechroot.BindMountPointFlags, "") + extraDirectories := []string{chrootToolchainRpmsDir} + rpmMounts := []*safechroot.MountPoint{rpmMount} + err = chroot.Initialize(workerTarPath, extraDirectories, rpmMounts) + if err != nil { + chroot = nil + return + } + + manifestEntries, err := file.ReadLines(manifestPath) + if err != nil { + return + } + badEntries := make(map[string]string) + + err = chroot.Run(func() (err error) { + for _, rpm := range manifestEntries { + archMatches := packageArchLookupRegex.FindStringSubmatch(rpm) + if len(archMatches) != 2 { + logger.Log.Errorf("%v", archMatches) + return fmt.Errorf("'%s' is an invalid rpm file path", rpm) + } + arch := archMatches[1] + rpmPath := path.Join(chrootToolchainRpmsDir, arch, rpm) + + // --replacepkgs instructs RPM to gracefully re-install a package, including checking dependencies + args := []string{ + "-ihv", + "--replacepkgs", + "--nosignature", + rpmPath, + } + logger.Log.Infof("Validating %s", filepath.Base(rpmPath)) + stdout, stderr, err := shell.Execute("rpm", args...) + + logger.Log.Debug(stdout) + + if err != nil || len(stderr) > 0 { + logger.Log.Warn(stderr) + if len(stderr) > 0 { + badEntries[rpm] = stderr + } else { + badEntries[rpm] = err.Error() + } + } + } + return + }) + + if len(badEntries) > 0 { + for rpm, errMsg := range badEntries { + logger.Log.Errorf("%s:\n %s", rpm, errMsg) + } + err = fmt.Errorf("found invalid packages in the worker chroot") + } + return +} diff --git a/toolkit/tools/pkgworker/pkgworker.go b/toolkit/tools/pkgworker/pkgworker.go index 2ff21c5f224..446ea9f379a 100644 --- a/toolkit/tools/pkgworker/pkgworker.go +++ b/toolkit/tools/pkgworker/pkgworker.go @@ -8,30 +8,15 @@ package main import ( "fmt" "os" - "path/filepath" - "regexp" "strings" - "time" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/packagerepo/repomanager/rpmrepomanager" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/rpm" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/safechroot" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/sliceutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/tdnf" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/pkgworker" "gopkg.in/alecthomas/kingpin.v2" ) -const ( - chrootRpmBuildRoot = "/usr/src/mariner" - chrootLocalRpmsDir = "/localrpms" - chrootLocalRpmsCacheDir = "/upstream-cached-rpms" -) - var ( app = kingpin.New("pkgworker", "A worker for building packages locally") srpmFile = exe.InputFlag(app, "Full path to the SRPM to build") @@ -53,349 +38,36 @@ var ( logLevel = exe.LogLevelFlag(app) ) -var ( - brPackageNameRegex = regexp.MustCompile(`^[^\s]+`) - equalToRegex = regexp.MustCompile(` '?='? `) - greaterThanOrEqualRegex = regexp.MustCompile(` '?>='? [^ ]*`) - installedPackageNameRegex = regexp.MustCompile(`^(.+)(-[^-]+-[^-]+)`) - lessThanOrEqualToRegex = regexp.MustCompile(` '?<='? `) - packageUnavailableRegex = regexp.MustCompile(`^No package \\x1b\[1m\\x1b\[30m(.+) \\x1b\[0mavailable`) -) +func populatePkgworkerConfig() *pkgworker.Config { + return &pkgworker.Config{ + SrpmFile: *srpmFile, + WorkDir: *workDir, + WorkerTar: *workerTar, + RepoFile: *repoFile, + RpmsDirPath: *rpmsDirPath, + SrpmsDirPath: *srpmsDirPath, + CacheDir: *cacheDir, + NoCleanup: *noCleanup, + DistTag: *distTag, + DistroReleaseVersion: *distroReleaseVersion, + DistroBuildNumber: *distroBuildNumber, + RpmmacrosFile: *rpmmacrosFile, + RunCheck: *runCheck, + PackagesToInstall: *packagesToInstall, + } + +} func main() { app.Version(exe.ToolkitVersion) kingpin.MustParse(app.Parse(os.Args[1:])) logger.InitBestEffort(*logFile, *logLevel) - rpmsDirAbsPath, err := filepath.Abs(*rpmsDirPath) - logger.PanicOnError(err, "Unable to find absolute path for RPMs directory '%s'", *rpmsDirPath) - - srpmsDirAbsPath, err := filepath.Abs(*srpmsDirPath) - logger.PanicOnError(err, "Unable to find absolute path for SRPMs directory '%s'", *srpmsDirPath) - - srpmName := strings.TrimSuffix(filepath.Base(*srpmFile), ".src.rpm") - chrootDir := filepath.Join(*workDir, srpmName) - - defines := rpm.DefaultDefines(*runCheck) - defines[rpm.DistTagDefine] = *distTag - defines[rpm.DistroReleaseVersionDefine] = *distroReleaseVersion - defines[rpm.DistroBuildNumberDefine] = *distroBuildNumber - defines[rpm.MarinerModuleLdflagsDefine] = "-Wl,-dT,%{_topdir}/BUILD/module_info.ld" - - builtRPMs, err := buildSRPMInChroot(chrootDir, rpmsDirAbsPath, *workerTar, *srpmFile, *repoFile, *rpmmacrosFile, defines, *noCleanup, *runCheck, *packagesToInstall) + cfg := populatePkgworkerConfig() + builtRPMs, err := cfg.BuildSRPMInChrootAndCopyToOut() logger.PanicOnError(err, "Failed to build SRPM '%s'. For details see log file: %s .", *srpmFile, *logFile) - err = copySRPMToOutput(*srpmFile, srpmsDirAbsPath) - logger.PanicOnError(err, "Failed to copy SRPM '%s' to output directory '%s'.", *srpmFile, rpmsDirAbsPath) - // On success write a comma-seperated list of RPMs built to stdout that can be parsed by the invoker. // Any output from logger will be on stderr so stdout will only contain this output. fmt.Printf(strings.Join(builtRPMs, ",")) } - -func copySRPMToOutput(srpmFilePath, srpmOutputDirPath string) (err error) { - const srpmsDirName = "SRPMS" - - srpmFileName := filepath.Base(srpmFilePath) - srpmOutputFilePath := filepath.Join(srpmOutputDirPath, srpmFileName) - - err = file.Copy(srpmFilePath, srpmOutputFilePath) - - return -} - -func buildSRPMInChroot(chrootDir, rpmDirPath, workerTar, srpmFile, repoFile, rpmmacrosFile string, defines map[string]string, noCleanup, runCheck bool, packagesToInstall []string) (builtRPMs []string, err error) { - const ( - buildHeartbeatTimeout = 30 * time.Minute - - existingChrootDir = false - squashErrors = false - - overlaySource = "" - overlayWorkDir = "/overlaywork" - rpmDirName = "RPMS" - ) - - srpmBaseName := filepath.Base(srpmFile) - - quit := make(chan bool) - go func() { - logger.Log.Infof("Building (%s).", srpmBaseName) - - for { - select { - case <-quit: - if err == nil { - logger.Log.Infof("Built (%s) -> %v.", srpmBaseName, builtRPMs) - } - return - case <-time.After(buildHeartbeatTimeout): - logger.Log.Infof("Heartbeat: still building (%s).", srpmBaseName) - } - } - }() - defer func() { - quit <- true - }() - - // Create the chroot used to build the SRPM - chroot := safechroot.NewChroot(chrootDir, existingChrootDir) - - overlayMount, overlayExtraDirs := safechroot.NewOverlayMountPoint(chroot.RootDir(), overlaySource, chrootLocalRpmsDir, rpmDirPath, chrootLocalRpmsDir, overlayWorkDir) - rpmCacheMount := safechroot.NewMountPoint(*cacheDir, chrootLocalRpmsCacheDir, "", safechroot.BindMountPointFlags, "") - mountPoints := []*safechroot.MountPoint{overlayMount, rpmCacheMount} - extraDirs := append(overlayExtraDirs, chrootLocalRpmsCacheDir) - - err = chroot.Initialize(workerTar, extraDirs, mountPoints) - if err != nil { - return - } - defer chroot.Close(noCleanup) - - // Place extra files that will be needed to build into the chroot - srpmFileInChroot, err := copyFilesIntoChroot(chroot, srpmFile, repoFile, rpmmacrosFile, runCheck) - if err != nil { - return - } - - err = chroot.Run(func() (err error) { - return buildRPMFromSRPMInChroot(srpmFileInChroot, runCheck, defines, packagesToInstall) - }) - if err != nil { - return - } - - rpmBuildOutputDir := filepath.Join(chroot.RootDir(), chrootRpmBuildRoot, rpmDirName) - builtRPMs, err = moveBuiltRPMs(rpmBuildOutputDir, rpmDirPath) - - return -} - -func buildRPMFromSRPMInChroot(srpmFile string, runCheck bool, defines map[string]string, packagesToInstall []string) (err error) { - // Convert /localrpms into a repository that a package manager can use. - err = rpmrepomanager.CreateRepo(chrootLocalRpmsDir) - if err != nil { - return - } - - // install any additional packages, such as build dependencies. - err = tdnfInstall(packagesToInstall) - if err != nil { - return - } - - // Remove all libarchive files on the system before issuing a build. - // If the build environment has libtool archive files present, gnu configure - // could detect it and create more libtool archive files which can cause - // build failures. - err = removeLibArchivesFromSystem() - if err != nil { - return - } - - // Build the SRPM - if runCheck { - err = rpm.BuildRPMFromSRPM(srpmFile, defines) - } else { - err = rpm.BuildRPMFromSRPM(srpmFile, defines, "--nocheck") - } - - return -} - -func moveBuiltRPMs(rpmOutDir, dstDir string) (builtRPMs []string, err error) { - const rpmExtension = ".rpm" - err = filepath.Walk(rpmOutDir, func(path string, info os.FileInfo, fileErr error) (err error) { - if fileErr != nil { - return fileErr - } - - // Only copy regular files (not unix sockets, directories, links, ...) - if !info.Mode().IsRegular() { - return - } - - if !strings.HasSuffix(path, rpmExtension) { - return - } - - // Get the relative path of the RPM, this will include the architecture directory it lives in. - // Then join the relative path to the destination directory, this will ensure the RPM gets placed - // in its correct architecture directory. - relPath, err := filepath.Rel(rpmOutDir, path) - if err != nil { - return - } - - dstFile := filepath.Join(dstDir, relPath) - err = file.Move(path, dstFile) - if err != nil { - return - } - - builtRPMs = append(builtRPMs, dstFile) - return - }) - - return -} - -func tdnfInstall(packages []string) (err error) { - const ( - alreadyInstalledPostfix = "is already installed" - noMatchingPackagesErr = "Error(1011) : No matching packages" - packageMatchGroup = 1 - ) - - var ( - releaseverCliArg string - ) - - if len(packages) == 0 { - return - } - - // TDNF supports requesting versioned packages in the form of {name}-{version}.{dist}.{arch}. - // The packages to install list may contain file paths to rpm files so those will need to be filtered: - // - Strip any .rpm from packages as TDNF does not support requesting a package with the extension. - // - Strip any filepath from packages. - for i := range packages { - packages[i] = filepath.Base(strings.TrimSuffix(packages[i], ".rpm")) - } - - releaseverCliArg, err = tdnf.GetReleaseverCliArg() - if err != nil { - return - } - - installArgs := []string{"install", "-y", releaseverCliArg} - installArgs = append(installArgs, packages...) - stdout, stderr, err := shell.Execute("tdnf", installArgs...) - foundNoMatchingPackages := false - - if err != nil { - logger.Log.Warnf("Failed to install build requirements. stderr: %s\nstdout: %s", stderr, stdout) - // TDNF will output an error if all packages are already installed. - // Ignore it iff there is no other error present in stderr. - splitStderr := strings.Split(stderr, "\n") - for _, line := range splitStderr { - trimmedLine := strings.TrimSpace(line) - if trimmedLine == "" { - continue - } - - if strings.Contains(trimmedLine, noMatchingPackagesErr) { - foundNoMatchingPackages = true - } - - if !strings.HasSuffix(trimmedLine, alreadyInstalledPostfix) && trimmedLine != noMatchingPackagesErr { - err = fmt.Errorf(trimmedLine) - return - } - } - err = nil - } - - // TDNF will ignore unavailable packages that have been requested to be installed without reporting an error code. - // Search the stdout of TDNF for such a failure and warn the user. - // This may happen if a SPEC requires the the path to a tool (e.g. /bin/cp), so mark it as a warning for now. - var failedToInstall []string - splitStdout := strings.Split(stdout, "\n") - for _, line := range splitStdout { - trimmedLine := strings.TrimSpace(line) - matches := packageUnavailableRegex.FindStringSubmatch(trimmedLine) - if len(matches) == 0 { - continue - } - - failedToInstall = append(failedToInstall, matches[packageMatchGroup]) - } - - // TDNF will output the error "Error(1011) : No matching packages" if all packages could not be found. - // In this case it will not print any of the individual packages that failed. - if foundNoMatchingPackages && len(failedToInstall) == 0 { - failedToInstall = packages - } - - if len(failedToInstall) != 0 { - err = fmt.Errorf("unable to install the following packages: %v", failedToInstall) - } - - return -} - -// removeLibArchivesFromSystem removes all libarchive files on the system. If -// the build environment has libtool archive files present, gnu configure could -// detect it and create more libtool archive files which can cause build failures. -func removeLibArchivesFromSystem() (err error) { - dirsToExclude := []string{"/proc", "/dev", "/sys", "/run"} - - err = filepath.Walk("/", func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Skip directories that are meant for device files and kernel virtual filesystems. - // These will not contain .la files and are mounted into the safechroot from the host. - if info.IsDir() && sliceutils.Contains(dirsToExclude, path, sliceutils.StringMatch) { - return filepath.SkipDir - } - - if strings.HasSuffix(info.Name(), ".la") { - return os.Remove(path) - } - - return nil - }) - - if err != nil { - logger.Log.Warnf("Unable to remove lib archive file: %s", err) - } - - return -} - -// copyFilesIntoChroot copies several required build specific files into the chroot. -func copyFilesIntoChroot(chroot *safechroot.Chroot, srpmFile, repoFile, rpmmacrosFile string, runCheck bool) (srpmFileInChroot string, err error) { - const ( - chrootRepoDestDir = "/etc/yum.repos.d" - chrootSrpmDestDir = "/root/SRPMS" - resolvFilePath = "/etc/resolv.conf" - rpmmacrosDest = "/usr/lib/rpm/macros.d/macros.override" - ) - - repoFileInChroot := filepath.Join(chrootRepoDestDir, filepath.Base(repoFile)) - srpmFileInChroot = filepath.Join(chrootSrpmDestDir, filepath.Base(srpmFile)) - - filesToCopy := []safechroot.FileToCopy{ - safechroot.FileToCopy{ - Src: repoFile, - Dest: repoFileInChroot, - }, - safechroot.FileToCopy{ - Src: srpmFile, - Dest: srpmFileInChroot, - }, - } - - if rpmmacrosFile != "" { - rpmmacrosCopy := safechroot.FileToCopy{ - Src: rpmmacrosFile, - Dest: rpmmacrosDest, - } - filesToCopy = append(filesToCopy, rpmmacrosCopy) - } - - if runCheck { - logger.Log.Debug("Enabling network access because we're running package tests.") - - resolvFileCopy := safechroot.FileToCopy{ - Src: resolvFilePath, - Dest: resolvFilePath, - } - filesToCopy = append(filesToCopy, resolvFileCopy) - } - - err = chroot.AddFiles(filesToCopy...) - return -} diff --git a/toolkit/tools/roast/roast.go b/toolkit/tools/roast/roast.go index 7c1e133d2c2..7dfda82e4e8 100644 --- a/toolkit/tools/roast/roast.go +++ b/toolkit/tools/roast/roast.go @@ -6,34 +6,16 @@ package main import ( - "fmt" "os" - "path" - "path/filepath" - "github.com/microsoft/CBL-Mariner/toolkit/tools/imagegen/configuration" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/roast/formats" - + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/image/roast" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" "gopkg.in/alecthomas/kingpin.v2" ) const defaultWorkerCount = "10" -type convertRequest struct { - inputPath string - isInputFile bool - artifact configuration.Artifact -} - -type convertResult struct { - artifactName string - originalPath string - convertedFile string -} - var ( app = kingpin.New("roast", "A tool to convert raw disk file into another image type") @@ -53,279 +35,26 @@ var ( imageTag = app.Flag("image-tag", "Tag (text) appended to the image name. Empty by default.").String() ) +func populateRoastConfig() *roast.Config { + return &roast.Config{ + InputDir: *inputDir, + OutputDir: *outputDir, + ConfigFile: *configFile, + TmpDir: *tmpDir, + ReleaseVersion: *releaseVersion, + Workers: *workers, + ImageTag: *imageTag, + } +} + func main() { app.Version(exe.ToolkitVersion) kingpin.MustParse(app.Parse(os.Args[1:])) logger.InitBestEffort(*logFile, *logLevel) - if *workers <= 0 { - logger.Log.Panicf("Value in --workers must be greater than zero. Found %d", *workers) - } - - inDirPath, err := filepath.Abs(*inputDir) - if err != nil { - logger.Log.Panicf("Error when calculating input directory path: %s", err) - } - - outDirPath, err := filepath.Abs(*outputDir) - if err != nil { - logger.Log.Panicf("Error when calculating absolute output path: %s", err) - } - - tmpDirPath, err := filepath.Abs(*outputDir) - if err != nil { - logger.Log.Panicf("Error when calculating absolute temporary path: %s", err) - } - - err = os.MkdirAll(outDirPath, os.ModePerm) - if err != nil { - logger.Log.Panicf("Error when creating output directory. Error: %s", err) - } - - config, err := configuration.Load(*configFile) - if err != nil { - logger.Log.Panicf("Failed loading image configuration. Error: %s", err) - } - - err = generateImageArtifacts(*workers, inDirPath, outDirPath, *releaseVersion, *imageTag, tmpDirPath, config) + cfg := populateRoastConfig() + err := cfg.GenerateImageArtifacts() if err != nil { logger.Log.Panic(err) } } - -func generateImageArtifacts(workers int, inDir, outDir, releaseVersion, imageTag, tmpDir string, config configuration.Config) (err error) { - const defaultSystemConfig = 0 - - err = os.MkdirAll(tmpDir, os.ModePerm) - if err != nil { - return - } - - if len(config.Disks) > 1 { - err = fmt.Errorf("this program currently only supports one disk") - return - } - - numberOfArtifacts := 0 - for _, disk := range config.Disks { - numberOfArtifacts += len(disk.Artifacts) - for _, partition := range disk.Partitions { - numberOfArtifacts += len(partition.Artifacts) - } - } - - logger.Log.Infof("Converting (%d) artifacts", numberOfArtifacts) - - convertRequests := make(chan *convertRequest, numberOfArtifacts) - convertedResults := make(chan *convertResult, numberOfArtifacts) - - // Start the workers now so they begin working as soon as a new job is buffered. - for i := 0; i < workers; i++ { - go artifactConverterWorker(convertRequests, convertedResults, releaseVersion, tmpDir, imageTag, outDir) - } - - for i, disk := range config.Disks { - for _, artifact := range disk.Artifacts { - inputName, isFile := diskArtifactInput(i, disk) - convertRequests <- &convertRequest{ - inputPath: filepath.Join(inDir, inputName), - isInputFile: isFile, - artifact: artifact, - } - } - - for j, partition := range disk.Partitions { - for _, artifact := range partition.Artifacts { - // Currently only process 1 system config - inputName, isFile := partitionArtifactInput(i, j, &artifact, retrievePartitionSettings(&config.SystemConfigs[defaultSystemConfig], partition.ID)) - convertRequests <- &convertRequest{ - inputPath: filepath.Join(inDir, inputName), - isInputFile: isFile, - artifact: artifact, - } - } - } - } - - close(convertRequests) - - failedArtifacts := []string{} - for i := 0; i < numberOfArtifacts; i++ { - result := <-convertedResults - if result.convertedFile == "" { - failedArtifacts = append(failedArtifacts, result.artifactName) - } else { - logger.Log.Infof("[%d/%d] Converted (%s) -> (%s)", (i + 1), numberOfArtifacts, result.originalPath, result.convertedFile) - } - } - - if len(failedArtifacts) != 0 { - err = fmt.Errorf("failed to generate the following artifacts: %v", failedArtifacts) - } - - return -} - -func retrievePartitionSettings(systemConfig *configuration.SystemConfig, searchedID string) (foundSetting *configuration.PartitionSetting) { - for i := range systemConfig.PartitionSettings { - if systemConfig.PartitionSettings[i].ID == searchedID { - foundSetting = &systemConfig.PartitionSettings[i] - return - } - } - logger.Log.Warningf("Couldn't find partition setting '%s' under system config '%s'", searchedID, systemConfig.Name) - return -} - -func artifactConverterWorker(convertRequests chan *convertRequest, convertedResults chan *convertResult, releaseVersion, tmpDir, imageTag, outDir string) { - const ( - initrdArtifactType = "initrd" - ) - - for req := range convertRequests { - fullArtifactName := req.artifact.Name - - // Append release version if necessary - // Note: ISOs creation is a two step process. The first step's initrd artifact type should not append a release version - // since the release version value could change between the end of the first step and the start of the second step. - if req.artifact.Type != initrdArtifactType { - if releaseVersion != "" { - fullArtifactName = fullArtifactName + "-" + releaseVersion - } - } - result := &convertResult{ - artifactName: fullArtifactName, - originalPath: req.inputPath, - } - - workingArtifactPath := req.inputPath - isInputFile := req.isInputFile - - if req.artifact.Type != "" { - const appendExtension = false - outputFile, err := convertArtifact(fullArtifactName, tmpDir, req.artifact.Type, imageTag, workingArtifactPath, isInputFile, appendExtension) - if err != nil { - logger.Log.Errorf("Failed to convert artifact (%s) to type (%s). Error: %s", req.artifact.Name, req.artifact.Type, err) - convertedResults <- result - continue - } - isInputFile = true - workingArtifactPath = outputFile - } - - if req.artifact.Compression != "" { - const appendExtension = true - outputFile, err := convertArtifact(fullArtifactName, tmpDir, req.artifact.Compression, imageTag, workingArtifactPath, isInputFile, appendExtension) - if err != nil { - logger.Log.Errorf("Failed to compress (%s) using (%s). Error: %s", workingArtifactPath, req.artifact.Compression, err) - convertedResults <- result - continue - } - workingArtifactPath = outputFile - } - - if workingArtifactPath == req.inputPath { - logger.Log.Errorf("Artifact (%s) has no type or compression", req.artifact.Name) - } else { - finalFile := filepath.Join(outDir, filepath.Base(workingArtifactPath)) - err := file.Move(workingArtifactPath, finalFile) - if err != nil { - logger.Log.Errorf("Failed to move (%s) to (%s). Error: %s", workingArtifactPath, finalFile, err) - } else { - result.convertedFile = finalFile - } - } - - convertedResults <- result - } -} - -func convertArtifact(artifactName, outDir, format, imageTag, input string, isInputFile, appendExtension bool) (outputFile string, err error) { - typeConverter, err := converterFactory(format) - if err != nil { - return - } - - var originalExt string - - if appendExtension { - originalExt = path.Ext(input) - } - - newExt := fmt.Sprintf(".%s", typeConverter.Extension()) - if originalExt != "" { - newExt = fmt.Sprintf("%s%s", originalExt, newExt) - } - - if imageTag != "" { - imageTag = "-" + imageTag - } - - outputPath := filepath.Join(outDir, artifactName) - outputFile = fmt.Sprintf("%s%s%s", outputPath, imageTag, newExt) - - err = typeConverter.Convert(input, outputFile, isInputFile) - return -} - -func converterFactory(formatType string) (converter formats.Converter, err error) { - switch formatType { - case formats.RawType: - converter = formats.NewRaw() - case formats.Ext4Type: - converter = formats.NewExt4() - case formats.DiffType: - converter = formats.NewDiff() - case formats.RdiffType: - converter = formats.NewRdiff() - case formats.GzipType: - converter = formats.NewGzip() - case formats.TarGzipType: - converter = formats.NewTarGzip() - case formats.XzType: - converter = formats.NewXz() - case formats.TarXzType: - converter = formats.NewTarXz() - case formats.VhdType: - const gen2 = false - converter = formats.NewVhd(gen2) - case formats.VhdxType: - const gen2 = true - converter = formats.NewVhd(gen2) - case formats.InitrdType: - converter = formats.NewInitrd() - case formats.OvaType: - converter = formats.NewOva() - default: - err = fmt.Errorf("unsupported output format: %s", formatType) - } - - return -} - -func diskArtifactInput(diskIndex int, disk configuration.Disk) (input string, isFile bool) { - const rootfsPrefix = "rootfs" - - // If there are no paritions, this is a rootfs - if len(disk.Partitions) == 0 { - input = rootfsPrefix - } else { - input = fmt.Sprintf("disk%d.raw", diskIndex) - isFile = true - } - - return -} - -func partitionArtifactInput(diskIndex, partitionIndex int, diskPartArtifact *configuration.Artifact, partitionSetting *configuration.PartitionSetting) (input string, isFile bool) { - // Currently all file artifacts have a raw file for input - if diskPartArtifact.Type == "diff" && partitionSetting.OverlayBaseImage != "" { - input = fmt.Sprintf("disk%d.partition%d.diff", diskIndex, partitionIndex) - } else if diskPartArtifact.Type == "rdiff" && partitionSetting.RdiffBaseImage != "" { - input = fmt.Sprintf("disk%d.partition%d.rdiff", diskIndex, partitionIndex) - } else { - input = fmt.Sprintf("disk%d.partition%d.raw", diskIndex, partitionIndex) - } - isFile = true - return -} diff --git a/toolkit/tools/scheduler/scheduler.go b/toolkit/tools/scheduler/scheduler.go index 502c00a08e3..ef0a4443555 100644 --- a/toolkit/tools/scheduler/scheduler.go +++ b/toolkit/tools/scheduler/scheduler.go @@ -4,24 +4,14 @@ package main import ( - "fmt" "os" - "os/signal" - "runtime" - "sync" - "time" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" - "github.com/microsoft/CBL-Mariner/toolkit/tools/scheduler/buildagents" - "github.com/microsoft/CBL-Mariner/toolkit/tools/scheduler/schedulerutils" - - "github.com/juliangruber/go-intersect" - "golang.org/x/sys/unix" "gopkg.in/alecthomas/kingpin.v2" + + "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/scheduler" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/scheduler/buildagents" ) const ( @@ -30,17 +20,6 @@ const ( defaultBuildAttempts = "1" ) -// schedulerChannels represents the communication channels used by a build agent. -// Unlike BuildChannels, schedulerChannels holds bidirectional channels that -// only the top-level scheduler should have. BuildChannels contains directional channels. -type schedulerChannels struct { - Requests chan *schedulerutils.BuildRequest - PriorityRequests chan *schedulerutils.BuildRequest - Results chan *schedulerutils.BuildResult - Cancel chan struct{} - Done chan struct{} -} - var ( app = kingpin.New("scheduler", "A tool to schedule package builds from a dependency graph.") @@ -85,377 +64,53 @@ var ( logLevel = exe.LogLevelFlag(app) ) -func main() { - app.Version(exe.ToolkitVersion) - - kingpin.MustParse(app.Parse(os.Args[1:])) - logger.InitBestEffort(*logFile, *logLevel) - - if *workers <= 0 { - *workers = runtime.NumCPU() - logger.Log.Debugf("No worker count supplied, discovered %d logical CPUs.", *workers) - } - - if *buildAttempts <= 0 { - logger.Log.Fatalf("Value in --build-attempts must be greater than zero. Found %d", *buildAttempts) - } - - ignoredPackages := exe.ParseListArgument(*ignoredPackages) - reservedFileListFile := *reservedFileListFile - - // Generate the list of packages that need to be built. - // If none are requested then all packages will be built. - packagesNamesToBuild := exe.ParseListArgument(*pkgsToBuild) - packagesNamesToRebuild := exe.ParseListArgument(*pkgsToRebuild) - - ignoredAndRebuiltPackages := intersect.Hash(ignoredPackages, packagesNamesToRebuild) - if len(ignoredAndRebuiltPackages) != 0 { - logger.Log.Fatalf("Can't ignore and force a rebuild of a package at the same time. Abusing packages: %v", ignoredAndRebuiltPackages) - } - - packageVersToBuild, err := schedulerutils.CalculatePackagesToBuild(packagesNamesToBuild, packagesNamesToRebuild, *inputGraphFile, *imageConfig, *baseDirPath) - if err != nil { - logger.Log.Fatalf("Unable to generate package build list, error: %s", err) - } - - var reservedFiles []string - if len(reservedFileListFile) > 0 { - reservedFiles, err = schedulerutils.ReadReservedFilesList(reservedFileListFile) - if err != nil { - logger.Log.Fatalf("unable to read reserved file list %s: %s", reservedFileListFile, err) - } - } - - // Setup a build agent to handle build requests from the scheduler. - buildAgentConfig := &buildagents.BuildAgentConfig{ - Program: *buildAgentProgram, - CacheDir: *cacheDir, - RepoFile: *repoFile, - RpmDir: *rpmDir, - SrpmDir: *srpmDir, - WorkDir: *workDir, - WorkerTar: *workerTar, - +func populateSchedulerConfig() *scheduler.Config { + return &scheduler.Config{ + InputGraphFile: *inputGraphFile, + OutputGraphFile: *outputGraphFile, + OutputCSVFile: *outputCSVFile, + WorkDir: *workDir, + WorkerTar: *workerTar, + RepoFile: *repoFile, + RpmDir: *rpmDir, + SrpmDir: *srpmDir, + CacheDir: *cacheDir, + BuildLogsDir: *buildLogsDir, + ImageConfig: *imageConfig, + BaseDirPath: *baseDirPath, DistTag: *distTag, DistroReleaseVersion: *distroReleaseVersion, DistroBuildNumber: *distroBuildNumber, RpmmacrosFile: *rpmmacrosFile, - - NoCleanup: *noCleanup, - RunCheck: *runCheck, - - LogDir: *buildLogsDir, - LogLevel: *logLevel, - } - - agent, err := buildagents.BuildAgentFactory(*buildAgent) - if err != nil { - logger.Log.Fatalf("Unable to select build agent, error: %s", err) - } - - err = agent.Initialize(buildAgentConfig) - if err != nil { - logger.Log.Fatalf("Unable to initialize build agent, error: %s", err) - } - - // Setup cleanup routines to ensure no builds are left running when scheduler is exiting. - // Ensure no outstanding agents are running on graceful exit - defer cancelOutstandingBuilds(agent) - // On a SIGINT or SIGTERM stop all agents. - signals := make(chan os.Signal, 1) - signal.Notify(signals, unix.SIGINT, unix.SIGTERM) - go cancelBuildsOnSignal(signals, agent) - - err = buildGraph(*inputGraphFile, *outputGraphFile, agent, *workers, *buildAttempts, *stopOnFailure, !*noCache, packageVersToBuild, packagesNamesToRebuild, ignoredPackages, reservedFiles, *deltaBuild) - if err != nil { - logger.Log.Fatalf("Unable to build package graph.\nFor details see the build summary section above.\nError: %s", err) - } -} - -// cancelOutstandingBuilds stops any builds that are currently running. -func cancelOutstandingBuilds(agent buildagents.BuildAgent) { - err := agent.Close() - if err != nil { - logger.Log.Errorf("Unable to close build agent, error: %s", err) - } - - // Issue a SIGINT to all children processes to allow them to gracefully exit. - shell.PermanentlyStopAllProcesses(unix.SIGINT) -} - -// cancelBuildsOnSignal will stop any builds running on SIGINT/SIGTERM. -func cancelBuildsOnSignal(signals chan os.Signal, agent buildagents.BuildAgent) { - sig := <-signals - logger.Log.Error(sig) - - cancelOutstandingBuilds(agent) - os.Exit(1) -} - -// buildGraph builds all packages in the dependency graph requested. -// It will save the resulting graph to outputFile. -func buildGraph(inputFile, outputFile string, agent buildagents.BuildAgent, workers, buildAttempts int, stopOnFailure, canUseCache bool, packagesToBuild []*pkgjson.PackageVer, packagesNamesToRebuild, ignoredPackages, reservedFiles []string, deltaBuild bool) (err error) { - // graphMutex guards pkgGraph from concurrent reads and writes during build. - var graphMutex sync.RWMutex - - isGraphOptimized, pkgGraph, goalNode, err := schedulerutils.InitializeGraph(inputFile, packagesToBuild, deltaBuild) - if err != nil { - return + BuildAttempts: *buildAttempts, + RunCheck: *runCheck, + NoCleanup: *noCleanup, + NoCache: *noCache, + StopOnFailure: *stopOnFailure, + ReservedFileListFile: *reservedFileListFile, + DeltaBuild: *deltaBuild, + ValidBuildAgentFlags: validBuildAgentFlags, + BuildAgent: *buildAgent, + BuildAgentProgram: *buildAgentProgram, + Workers: *workers, + IgnoredPackages: *ignoredPackages, + PkgsToBuild: *pkgsToBuild, + PkgsToRebuild: *pkgsToRebuild, + LogFile: *logFile, + LogLevel: *logLevel, } - // Setup and start the worker pool and scheduler routine. - numberOfNodes := pkgGraph.Nodes().Len() - - channels := startWorkerPool(agent, workers, buildAttempts, numberOfNodes, &graphMutex, ignoredPackages) - logger.Log.Infof("Building %d nodes with %d workers", numberOfNodes, workers) - - // After this call pkgGraph will be given to multiple routines and accessing it requires acquiring the mutex. - builtGraph, err := buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache, packagesNamesToRebuild, pkgGraph, &graphMutex, goalNode, channels, reservedFiles, deltaBuild) - - if builtGraph != nil { - graphMutex.RLock() - defer graphMutex.RUnlock() - - saveErr := pkggraph.WriteDOTGraphFile(builtGraph, outputFile) - if saveErr != nil { - logger.Log.Errorf("Failed to save built graph, error: %s", saveErr) - } - } - - return } -// startWorkerPool starts the worker pool and returns the communication channels between the workers and the scheduler. -// channelBufferSize controls how many entries in the channels can be buffered before blocking writes to them. -func startWorkerPool(agent buildagents.BuildAgent, workers, buildAttempts, channelBufferSize int, graphMutex *sync.RWMutex, ignoredPackages []string) (channels *schedulerChannels) { - channels = &schedulerChannels{ - Requests: make(chan *schedulerutils.BuildRequest, channelBufferSize), - PriorityRequests: make(chan *schedulerutils.BuildRequest, channelBufferSize), - Results: make(chan *schedulerutils.BuildResult, channelBufferSize), - Cancel: make(chan struct{}), - Done: make(chan struct{}), - } - - // Downcast the bidirectional scheduler channels into directional channels for the build workers. - directionalChannels := &schedulerutils.BuildChannels{ - Requests: channels.Requests, - PriorityRequests: channels.PriorityRequests, - Results: channels.Results, - Cancel: channels.Cancel, - Done: channels.Done, - } - - // Start the workers now so they begin working as soon as a new job is queued. - for i := 0; i < workers; i++ { - logger.Log.Debugf("Starting worker #%d", i) - go schedulerutils.BuildNodeWorker(directionalChannels, agent, graphMutex, buildAttempts, ignoredPackages) - } - - return -} - -// buildAllNodes will build all nodes in a given dependency graph. -// This routine only contains control flow logic for build scheduling. -// It iteratively: -// - Calculates any unblocked nodes. -// - Submits these nodes to the worker pool to be processed. -// - Grabs a single build result from the worker pool. -// - Attempts to satisfy any unresolved dynamic dependencies with new implicit provides from the build result. -// - Attempts to subgraph the graph to only contain the requested packages if possible. -// - Repeat. -func buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache bool, packagesNamesToRebuild []string, pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, goalNode *pkggraph.PkgNode, channels *schedulerChannels, reservedFiles []string, deltaBuild bool) (builtGraph *pkggraph.PkgGraph, err error) { - var ( - // stopBuilding tracks if the build has entered a failed state and this routine should stop as soon as possible. - stopBuilding bool - // useCachedImplicit tracks if cached implicit provides can be used to satisfy unresolved dynamic dependencies. - // Local packages are preferred over cached remotes ones to satisfy these unresolved dependencies, however - // the scheduler does not know what packages provide which implicit provides until the packages have been built. - // Therefore the scheduler will attempt to build all possible packages without consuming any cached dynamic dependencies first. - useCachedImplicit bool - ) - - // Start the build at the leaf nodes. - // The build will bubble up through the graph as it processes nodes. - buildState := schedulerutils.NewGraphBuildState(reservedFiles) - nodesToBuild := schedulerutils.LeafNodes(pkgGraph, graphMutex, goalNode, buildState, useCachedImplicit) - - for { - logger.Log.Debugf("Found %d unblocked nodes", len(nodesToBuild)) - - // Each node that is ready to build must be converted into a build request and submitted to the worker pool. - newRequests := schedulerutils.ConvertNodesToRequests(pkgGraph, graphMutex, nodesToBuild, packagesNamesToRebuild, buildState, canUseCache, deltaBuild) - for _, req := range newRequests { - buildState.RecordBuildRequest(req) - // Decide which priority the build should be. Generally we want to get any remote or prebuilt nodes out of the - // way as quickly as possible since they may help us optimize the graph early. - // Meta nodes may also be blocking something we want to examine and give higher priority (priority inheritance from - // the hypothetical high priority node hidden further into the tree) - switch req.Node.Type { - case pkggraph.TypePreBuilt: - channels.PriorityRequests <- req - - // For now all build nodes are of equal priority - case pkggraph.TypeGoal: - fallthrough - case pkggraph.TypePureMeta: - fallthrough - case pkggraph.TypeRun: - fallthrough - case pkggraph.TypeRemote: - fallthrough - case pkggraph.TypeBuild: - fallthrough - default: - channels.Requests <- req - } - } - nodesToBuild = nil - - // If there are no active builds running try enabling cached packages for unresolved dynamic dependencies to unblocked more nodes. - // Otherwise there is nothing left that can be built. - if len(buildState.ActiveBuilds()) == 0 { - if useCachedImplicit { - err = fmt.Errorf("could not build all packages") - break - } else { - logger.Log.Warn("Enabling cached packages to satisfy unresolved dynamic dependencies.") - useCachedImplicit = true - nodesToBuild = schedulerutils.LeafNodes(pkgGraph, graphMutex, goalNode, buildState, useCachedImplicit) - continue - } - } - - // Process the the next build result - res := <-channels.Results - schedulerutils.PrintBuildResult(res) - buildState.RecordBuildResult(res) - - if !stopBuilding { - if res.Err == nil { - // If the graph has already been optimized and is now solvable without any additional information - // then skip processing any new implicit provides. - if !isGraphOptimized { - var ( - didOptimize bool - newGraph *pkggraph.PkgGraph - newGoalNode *pkggraph.PkgNode - ) - didOptimize, newGraph, newGoalNode, err = updateGraphWithImplicitProvides(res, pkgGraph, graphMutex, useCachedImplicit) - if err != nil { - // Failures to manipulate the graph are fatal. - // There is no guarantee the graph is still a directed acyclic graph and is solvable. - stopBuilding = true - stopBuild(channels, buildState) - } else if didOptimize { - isGraphOptimized = true - // Replace the graph and goal node pointers. - // Any outstanding builds of nodes that are no longer in the graph will gracefully handle this. - // When querying their edges, the graph library will return an empty iterator (graph.Empty). - pkgGraph = newGraph - goalNode = newGoalNode - } - } - - nodesToBuild = schedulerutils.FindUnblockedNodesFromResult(res, pkgGraph, graphMutex, buildState) - } else if stopOnFailure { - stopBuilding = true - err = res.Err - stopBuild(channels, buildState) - } - } - - // If the goal node is available, mark the build as stopping. - // There may still be outstanding builds if the graph was recently subgraphed - // due to an unresolved implicit provide being satisfied and nodes that are no - // longer in the graph are building. - if buildState.IsNodeAvailable(goalNode) { - logger.Log.Infof("All packages built") - stopBuilding = true - } - - activeSRPMs := buildState.ActiveSRPMs() - activeSRPMsCount := len(activeSRPMs) - if stopBuilding { - if activeSRPMsCount == 0 { - break - } - } - - if res.Node.Type == pkggraph.TypeBuild { - logger.Log.Infof("%d currently active build(s): %v.", activeSRPMsCount, activeSRPMs) - } - } - - // Let the workers know they are done - doneBuild(channels, buildState) - // Give the workers time to finish so they don't mess up the summary we want to print. - // Some nodes may still be busy with long running builds we don't care about anymore, so we don't - // want to actually block here. - time.Sleep(time.Second) - - builtGraph = pkgGraph - schedulerutils.PrintBuildSummary(builtGraph, graphMutex, buildState) - schedulerutils.RecordBuildSummary(builtGraph, graphMutex, buildState, *outputCSVFile) - - return -} +func main() { + app.Version(exe.ToolkitVersion) -// updateGraphWithImplicitProvides will update the graph with new implicit provides if available. -// It will also attempt to subgraph the graph if it becomes solvable with the new implicit provides. -func updateGraphWithImplicitProvides(res *schedulerutils.BuildResult, pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, useCachedImplicit bool) (didOptimize bool, newGraph *pkggraph.PkgGraph, newGoalNode *pkggraph.PkgNode, err error) { - // acquire a writer lock since this routine will collapse nodes - graphMutex.Lock() - defer graphMutex.Unlock() + kingpin.MustParse(app.Parse(os.Args[1:])) + logger.InitBestEffort(*logFile, *logLevel) - didInjectAny, err := schedulerutils.InjectMissingImplicitProvides(res, pkgGraph, useCachedImplicit) + cfg := populateSchedulerConfig() + err := cfg.ScheduleBuild() if err != nil { - logger.Log.Errorf("Failed to add implicit provides for (%s). Error: %s", res.Node.FriendlyName(), err) - } else if didInjectAny { - // Failure to optimize the graph is non fatal as there may simply be unresolved dynamic dependencies - var subgraphErr error - newGraph, newGoalNode, subgraphErr = schedulerutils.OptimizeGraph(pkgGraph, useCachedImplicit) - if subgraphErr == nil { - logger.Log.Infof("Created solvable subgraph with new implicit provide information") - didOptimize = true - } - } - - return -} - -func drainChannels(channels *schedulerChannels, buildState *schedulerutils.GraphBuildState) { - // For any workers that are current parked with no buffered requests, close the - // requests channel to wake up any build workers waiting on a request to be buffered. - // Upon being woken up by a closed requests channel, the build worker will stop. - close(channels.Requests) - close(channels.PriorityRequests) - - // Drain the request buffers to sync the build state with the new number of outstanding builds. - for req := range channels.PriorityRequests { - buildState.RemoveBuildRequest(req) - } - for req := range channels.Requests { - buildState.RemoveBuildRequest(req) + logger.Log.Fatalf("Unable to build package graph.\nFor details see the build summary section above.\nError: %s", err) } } - -func doneBuild(channels *schedulerChannels, buildState *schedulerutils.GraphBuildState) { - // Close the done channel. The build workers will finish processing any work, then return - // upon seeing this channel is closed. - close(channels.Done) - - drainChannels(channels, buildState) -} - -// stopBuild will stop all future builds from being scheduled by sending a cancellation signal -// to the worker pool and draining any outstanding build requests. -func stopBuild(channels *schedulerChannels, buildState *schedulerutils.GraphBuildState) { - logger.Log.Error("Stopping build") - - // Close the cancel channel to prevent and buffered requests from being built. - // Upon seeing the cancel channel is closed, the build worker will stop instead - // of processing a new request. - close(channels.Cancel) - - drainChannels(channels, buildState) -} diff --git a/toolkit/tools/specreader/specreader.go b/toolkit/tools/specreader/specreader.go index e016f7ce0d5..a3f11a58b5b 100644 --- a/toolkit/tools/specreader/specreader.go +++ b/toolkit/tools/specreader/specreader.go @@ -6,24 +6,12 @@ package main import ( - "encoding/json" - "fmt" "os" - "path/filepath" - "sort" - "strings" - "sync" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/buildpipeline" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/directory" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/rpm" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/safechroot" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/specreader" - "github.com/jinzhu/copier" "gopkg.in/alecthomas/kingpin.v2" ) @@ -31,12 +19,6 @@ const ( defaultWorkerCount = "10" ) -// parseResult holds the worker results from parsing a SPEC file. -type parseResult struct { - packages []*pkgjson.Package - err error -} - var ( app = kingpin.New("specreader", "A tool to parse spec dependencies into JSON") specsDir = exe.InputDirFlag(app, "Directory to scan for SPECS") @@ -52,6 +34,20 @@ var ( logLevel = exe.LogLevelFlag(app) ) +func populateSpecReaderConfig() *specreader.Config { + return &specreader.Config{ + SpecsDir: *specsDir, + Output: *output, + Workers: *workers, + BuildDir: *buildDir, + SrpmsDir: *srpmsDir, + RpmsDir: *rpmsDir, + DistTag: *distTag, + WorkerTar: *workerTar, + RunCheck: *runCheck, + } +} + func main() { app.Version(exe.ToolkitVersion) kingpin.MustParse(app.Parse(os.Args[1:])) @@ -60,528 +56,8 @@ func main() { if *workers <= 0 { logger.Log.Panicf("Value in --workers must be greater than zero. Found %d", *workers) } + cfg := populateSpecReaderConfig() - err := parseSPECsWrapper(*buildDir, *specsDir, *rpmsDir, *srpmsDir, *distTag, *output, *workerTar, *workers, *runCheck) + err := cfg.ParseSPECsWrapper() logger.PanicOnError(err) } - -// parseSPECsWrapper wraps parseSPECs to conditionally run it inside a chroot. -// If workerTar is non-empty, parsing will occur inside a chroot, otherwise it will run on the host system. -func parseSPECsWrapper(buildDir, specsDir, rpmsDir, srpmsDir, distTag, outputFile, workerTar string, workers int, runCheck bool) (err error) { - var ( - chroot *safechroot.Chroot - packageRepo *pkgjson.PackageRepo - ) - - if workerTar != "" { - const leaveFilesOnDisk = false - chroot, err = createChroot(workerTar, buildDir, specsDir, srpmsDir) - if err != nil { - return - } - defer chroot.Close(leaveFilesOnDisk) - } - - doParse := func() error { - var parseError error - packageRepo, parseError = parseSPECs(specsDir, rpmsDir, srpmsDir, distTag, workers, runCheck) - return parseError - } - - if chroot != nil { - logger.Log.Info("Parsing SPECs inside a chroot environment") - err = chroot.Run(doParse) - } else { - logger.Log.Info("Parsing SPECs in the host environment") - err = doParse() - } - - if err != nil { - return - } - - b, err := json.MarshalIndent(packageRepo, "", " ") - if err != nil { - logger.Log.Error("Unable to marshal package info JSON") - return - } - - err = file.Write(string(b), outputFile) - if err != nil { - logger.Log.Errorf("Failed to write file (%s)", outputFile) - return - } - - return -} - -// createChroot creates a chroot to parse SPECs inside of. -func createChroot(workerTar, buildDir, specsDir, srpmsDir string) (chroot *safechroot.Chroot, err error) { - const ( - chrootName = "specparser_chroot" - existingDir = false - leaveFilesOnDisk = false - ) - - // Mount the specs and srpms directories to an identical path inside the chroot. - // Since specreader saves the full paths to specs in its output that grapher will then consume, - // the pathing needs to be preserved from the host system. - var extraDirectories []string - - extraMountPoints := []*safechroot.MountPoint{ - safechroot.NewMountPoint(specsDir, specsDir, "", safechroot.BindMountPointFlags, ""), - safechroot.NewMountPoint(srpmsDir, srpmsDir, "", safechroot.BindMountPointFlags, ""), - } - - chrootDir := filepath.Join(buildDir, chrootName) - chroot = safechroot.NewChroot(chrootDir, existingDir) - - err = chroot.Initialize(workerTar, extraDirectories, extraMountPoints) - if err != nil { - return - } - - // If this is not a regular build then copy in all of the SPECs since there are no bind mounts. - if !buildpipeline.IsRegularBuild() { - dirsToCopy := []string{specsDir, srpmsDir} - for _, dir := range dirsToCopy { - dirInChroot := filepath.Join(chroot.RootDir(), dir) - err = directory.CopyContents(dir, dirInChroot) - if err != nil { - closeErr := chroot.Close(leaveFilesOnDisk) - if closeErr != nil { - logger.Log.Errorf("Failed to close chroot, err: %s", err) - } - return - } - } - } - - return -} - -// parseSPECs will parse all specs in specsDir and return a summary of the SPECs. -func parseSPECs(specsDir, rpmsDir, srpmsDir, distTag string, workers int, runCheck bool) (packageRepo *pkgjson.PackageRepo, err error) { - var ( - packageList []*pkgjson.Package - wg sync.WaitGroup - specFiles []string - ) - - packageRepo = &pkgjson.PackageRepo{} - - // Find the filepath for each spec in the SPECS directory. - specSearch, err := filepath.Abs(filepath.Join(specsDir, "**/*.spec")) - if err == nil { - specFiles, err = filepath.Glob(specSearch) - } - if err != nil { - logger.Log.Errorf("Failed to find *.spec files. Check that %s is the correct directory. Error: %v", specsDir, err) - return - } - - results := make(chan *parseResult, len(specFiles)) - requests := make(chan string, len(specFiles)) - cancel := make(chan struct{}) - - // Start the workers now so they begin working as soon as a new job is buffered. - for i := 0; i < workers; i++ { - wg.Add(1) - go readSpecWorker(requests, results, cancel, &wg, distTag, rpmsDir, srpmsDir, runCheck) - } - - for _, specFile := range specFiles { - requests <- specFile - } - - close(requests) - - // Receive the parsed spec structures from the workers and place them into a list. - for i := 0; i < len(specFiles); i++ { - parseResult := <-results - if parseResult.err != nil { - err = parseResult.err - close(cancel) - break - } - packageList = append(packageList, parseResult.packages...) - } - - logger.Log.Debug("Waiting for outstanding workers to finish") - wg.Wait() - - if err != nil { - return - } - - packageRepo.Repo = packageList - sortPackages(packageRepo) - - return -} - -// sortPackages orders the package lists into reasonable and deterministic orders. -// Sort the main package list by "Name", "Version", "SRPM" -// Sort each nested Requires/BuildRequires by "Name", "Version" -func sortPackages(packageRepo *pkgjson.PackageRepo) { - sort.Slice(packageRepo.Repo, func(i, j int) bool { - iName := packageRepo.Repo[i].Provides.Name + packageRepo.Repo[i].Provides.Version + packageRepo.Repo[i].SrpmPath - jName := packageRepo.Repo[j].Provides.Name + packageRepo.Repo[j].Provides.Version + packageRepo.Repo[j].SrpmPath - return strings.Compare(iName, jName) < 0 - }) - - for _, pkg := range packageRepo.Repo { - sort.Slice(pkg.Requires, func(i, j int) bool { - iName := pkg.Requires[i].Name + pkg.Requires[i].Version - jName := pkg.Requires[j].Name + pkg.Requires[j].Version - return strings.Compare(iName, jName) < 0 - }) - sort.Slice(pkg.BuildRequires, func(i, j int) bool { - iName := pkg.BuildRequires[i].Name + pkg.BuildRequires[i].Version - jName := pkg.BuildRequires[j].Name + pkg.BuildRequires[j].Version - return strings.Compare(iName, jName) < 0 - }) - } -} - -// readspec is a goroutine that takes a full filepath to a spec file and scrapes it into the Specdef structure -// Concurrency is limited by the size of the semaphore channel passed in. Too many goroutines at once can deplete -// available filehandles. -func readSpecWorker(requests <-chan string, results chan<- *parseResult, cancel <-chan struct{}, wg *sync.WaitGroup, distTag, rpmsDir, srpmsDir string, runCheck bool) { - const ( - emptyQueryFormat = `` - querySrpm = `%{NAME}-%{VERSION}-%{RELEASE}.src.rpm` - queryProvidedPackages = `rpm %{ARCH}/%{nvra}.rpm\n[provides %{PROVIDENEVRS}\n][requires %{REQUIRENEVRS}\n][arch %{ARCH}\n]` - ) - - defer wg.Done() - - defines := rpm.DefaultDefines(runCheck) - defines[rpm.DistTagDefine] = distTag - - for specfile := range requests { - select { - case <-cancel: - logger.Log.Debug("Cancellation signal received") - return - default: - } - - result := &parseResult{} - - providerList := []*pkgjson.Package{} - buildRequiresList := []*pkgjson.PackageVer{} - sourcedir := filepath.Dir(specfile) - - // Find the SRPM associated with the SPEC. - srpmResults, err := rpm.QuerySPEC(specfile, sourcedir, querySrpm, defines, rpm.QueryHeaderArgument) - if err != nil { - result.err = err - results <- result - continue - } - - srpmPath := filepath.Join(srpmsDir, srpmResults[0]) - - isCompatible, err := rpm.SpecArchIsCompatible(specfile, sourcedir, defines) - if err != nil { - result.err = err - results <- result - continue - } - - if !isCompatible { - logger.Log.Debugf(`Skipping (%s) since it cannot be built on current architecture.`, specfile) - results <- result - continue - } - - // Find every package that the spec provides - queryResults, err := rpm.QuerySPEC(specfile, sourcedir, queryProvidedPackages, defines, rpm.QueryBuiltRPMHeadersArgument) - if err == nil && len(queryResults) != 0 { - providerList, err = parseProvides(rpmsDir, srpmPath, queryResults) - if err != nil { - result.err = err - results <- result - continue - } - } - - // Query the BuildRequires fields from this spec and turn them into an array of PackageVersions - queryResults, err = rpm.QuerySPEC(specfile, sourcedir, emptyQueryFormat, defines, rpm.BuildRequiresArgument) - if err == nil && len(queryResults) != 0 { - buildRequiresList, err = parsePackageVersionList(queryResults) - if err != nil { - result.err = err - results <- result - continue - } - } - - // Every package provided by a spec will have the same BuildRequires and SrpmPath - for i := range providerList { - providerList[i].SpecPath = specfile - providerList[i].SourceDir = sourcedir - providerList[i].Requires, err = condensePackageVersionArray(providerList[i].Requires, specfile) - if err != nil { - break - } - - providerList[i].BuildRequires, err = condensePackageVersionArray(buildRequiresList, specfile) - if err != nil { - break - } - } - - if err != nil { - result.err = err - } else { - result.packages = providerList - } - - // Submit the result to the main thread, the deferred function will clear the semaphore. - results <- result - } -} - -// parseProvides parses a newline separated list of Provides, Requires, and Arch from a single spec file. -// Several Provides may be in a row, so for each Provide the parser needs to look ahead for the first line that starts -// with a Require then ingest that line and every subsequent as a Requires until it sees a line that begins with Arch. -// Provide: package -// Require: requiresa = 1.0 -// Require: requiresb -// Arch: noarch -// The return is an array of Package structures, one for each Provides in the spec (implicit and explicit). -func parseProvides(rpmsDir, srpmPath string, list []string) (providerlist []*pkgjson.Package, err error) { - var ( - reqlist []*pkgjson.PackageVer - packagearch string - rpmPath string - listEntry []string - sublistEntry []string - ) - - const ( - tag = iota - value = iota - ) - - listEntry = strings.SplitN(list[0], " ", 2) - err = minSliceLength(listEntry, 2) - if err != nil { - return - } - - if listEntry[tag] != "rpm" { - err = fmt.Errorf("first element returned by rpmspec was not an rpm tag: %v", list) - return - } - - rpmPath = filepath.Join(rpmsDir, listEntry[value]) - - logger.Log.Trace(list) - for i := range list { - listEntry = strings.SplitN(list[i], " ", 2) - err = minSliceLength(listEntry, 1) - if err != nil { - return - } - - if listEntry[tag] == "rpm" { - logger.Log.Trace("rpm ", listEntry[value]) - rpmPath = filepath.Join(rpmsDir, listEntry[value]) - } else if listEntry[tag] == "provides" { - logger.Log.Trace("provides ", listEntry[value]) - for _, v := range list[i:] { - sublistEntry = strings.SplitN(v, " ", 2) - err = minSliceLength(sublistEntry, 2) - if err != nil { - return - } - - if sublistEntry[tag] == "requires" { - logger.Log.Trace(" requires ", sublistEntry[value]) - var requirePkgVers []*pkgjson.PackageVer - requirePkgVers, err = parsePackageVersions(sublistEntry[value]) - if err != nil { - return - } - filteredRequirePkgVers := filterOutDynamicDependencies(requirePkgVers) - reqlist = append(reqlist, filteredRequirePkgVers...) - } else if sublistEntry[tag] == "arch" { - logger.Log.Trace(" arch ", sublistEntry[value]) - packagearch = sublistEntry[value] - break - } - } - - var newProviderVer []*pkgjson.PackageVer - newProviderVer, err = parsePackageVersions(listEntry[value]) - if err != nil { - return - } - - providerPkgVer := &pkgjson.Package{ - Provides: newProviderVer[0], - SrpmPath: srpmPath, - RpmPath: rpmPath, - Architecture: packagearch, - Requires: reqlist, - } - - providerlist = append(providerlist, providerPkgVer) - reqlist = nil - } - } - - logger.Log.Tracef("Provider: %+v", providerlist) - - return -} - -// parsePackageVersions takes a package name and splits it into a set of PackageVer structures. -// Normally a list of length 1 is returned, however parsePackageVersions is also responsible for -// identifying if the package name is an "or" condition and returning all options. -func parsePackageVersions(packagename string) (newpkgs []*pkgjson.PackageVer, err error) { - const ( - NameField = iota - ConditionField = iota - VersionField = iota - ) - - packageSplit := strings.Split(packagename, " ") - err = minSliceLength(packageSplit, 1) - if err != nil { - return - } - - // If first character of the packagename is a "(" then its an "or" condition - if packagename[0] == '(' { - return parseOrCondition(packagename) - } - - newpkg := &pkgjson.PackageVer{Name: packageSplit[NameField]} - if len(packageSplit) == 1 { - // Nothing to do, no condition or version was found. - } else if packageSplit[ConditionField] != "or" { - newpkg.Condition = packageSplit[ConditionField] - newpkg.Version = packageSplit[VersionField] - } else { - // Replace the name with the first name that appears in (foo or bar) - substr := packageSplit[NameField][1:] - newpkg.Name = substr - } - - newpkgs = append(newpkgs, newpkg) - return -} - -// parsePackageVersionList takes the output from rpmspec --buildrequires -// and parses it into an array of PackageVersion structures -func parsePackageVersionList(pkgList []string) (pkgVerList []*pkgjson.PackageVer, err error) { - for _, pkgListEntry := range pkgList { - var parsedPkgVers []*pkgjson.PackageVer - parsedPkgVers, err = parsePackageVersions(pkgListEntry) - if err != nil { - return - } - pkgVerList = append(pkgVerList, parsedPkgVers...) - } - return -} - -// condensePackageVersionArray deduplicates entries in an array of Package Versions -// and represents double conditionals in a single PackageVersion structure. -// If a non-blank package version is specified more than twice in a SPEC then return an error. -func condensePackageVersionArray(packagelist []*pkgjson.PackageVer, specfile string) (processedPkgList []*pkgjson.PackageVer, err error) { - for _, pkg := range packagelist { - nameMatch := false - for i, processedPkg := range processedPkgList { - if pkg.Name == processedPkg.Name { - nameMatch = true - if processedPkg.Version == "" { - processedPkgList[i].Version = pkg.Version - processedPkgList[i].Condition = pkg.Condition - break - } else if processedPkg.SVersion == "" { - processedPkgList[i].SVersion = pkg.Version - processedPkgList[i].SCondition = pkg.Condition - break - } else if processedPkg.Version == processedPkg.SVersion { - processedPkgList[i].Version = pkg.Version - processedPkgList[i].SVersion = pkg.Version - processedPkgList[i].Condition = pkg.Condition - processedPkgList[i].SCondition = pkg.Condition - break - } else { - err = fmt.Errorf("spec (%s) attempted to set more than two conditions for package (%s)", specfile, processedPkg.Name) - return - } - } - } - if !nameMatch { - var processPkg pkgjson.PackageVer - copier.Copy(&processPkg, pkg) - processedPkgList = append(processedPkgList, &processPkg) - } - } - return -} - -// parseOrCondition splits a package name like (foo or bar) and returns both foo and bar as separate requirements. -func parseOrCondition(packagename string) (versions []*pkgjson.PackageVer, err error) { - logger.Log.Warnf("'OR' clause found (%s), make sure both packages are available. Please refer to 'docs/how_it_works/3_package_building.md#or-clauses' for explanation of limitations.", packagename) - packagename = strings.ReplaceAll(packagename, "(", "") - packagename = strings.ReplaceAll(packagename, ")", "") - - packageSplit := strings.Split(packagename, " or ") - err = minSliceLength(packageSplit, 1) - if err != nil { - return - } - - versions = make([]*pkgjson.PackageVer, 0, len(packageSplit)) - for _, condition := range packageSplit { - var parsedPkgVers []*pkgjson.PackageVer - parsedPkgVers, err = parsePackageVersions(condition) - if err != nil { - return - } - versions = append(versions, parsedPkgVers...) - } - - return -} - -// minSliceLength checks that a string slice is >= a minimum length and returns an error -// if the condition is not met. -func minSliceLength(slice []string, minLength int) (err error) { - if len(slice) < minLength { - return fmt.Errorf("slice is not required length (minLength = %d) %+v", minLength, slice) - } - return -} - -// filterOutDynamicDependencies removes dynamic RPM dependencies from pkgVers. -// These entries are automatically injected by RPM when processing an SRPM -// and represent an internal RPM feature requirement. -// -// For example if a SPEC uses a Lua scriplet, RPM will inject a requirement for -// `rpmlib(BuiltinLuaScripts)` so that future RPM invocations on the SRPM know -// what features it needs to properly handle the package. -// -// These dynamic dependencies are not backed by a real package or a provides, but -// are instead an internal notation of RPM itself. Filter these out from the list of -// requirements of actual packages. -func filterOutDynamicDependencies(pkgVers []*pkgjson.PackageVer) (filteredPkgVers []*pkgjson.PackageVer) { - const dynamicDependencyPrefix = "rpmlib(" - for _, req := range pkgVers { - if strings.HasPrefix(req.Name, dynamicDependencyPrefix) { - logger.Log.Debugf("Ignoring dynamic dependency: %s", req.Name) - continue - } - filteredPkgVers = append(filteredPkgVers, req) - } - - return -} diff --git a/toolkit/tools/srpmpacker/srpmpacker.go b/toolkit/tools/srpmpacker/srpmpacker.go index bd7513ce880..70ca663cd2d 100644 --- a/toolkit/tools/srpmpacker/srpmpacker.go +++ b/toolkit/tools/srpmpacker/srpmpacker.go @@ -4,95 +4,25 @@ package main import ( - "bufio" - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" "os" - "path" - "path/filepath" - "reflect" - "strings" - "sync" - "time" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/buildpipeline" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/directory" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/jsonutils" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/network" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/retry" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/rpm" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/safechroot" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/srpmpacker" "gopkg.in/alecthomas/kingpin.v2" ) -type fileSignaturesWrapper struct { - FileSignatures map[string]string `json:"Signatures"` -} - -const ( - srpmOutDir = "SRPMS" - srpmSPECDir = "SPECS" - srpmSOURCESDir = "SOURCES" -) - -type fileType int - -const ( - fileTypePatch fileType = iota - fileTypeSource fileType = iota -) - -type signatureHandlingType int - -const ( - signatureEnforce signatureHandlingType = iota - signatureSkipCheck signatureHandlingType = iota - signatureUpdate signatureHandlingType = iota -) - const ( signatureEnforceString = "enforce" signatureSkipCheckString = "skip" signatureUpdateString = "update" ) - const ( defaultBuildDir = "./build/SRPMS" defaultWorkerCount = "10" ) -// sourceRetrievalConfiguration holds information on where to hydrate files from. -type sourceRetrievalConfiguration struct { - localSourceDir string - sourceURL string - caCerts *x509.CertPool - tlsCerts []tls.Certificate - - signatureHandling signatureHandlingType - signatureLookup map[string]string -} - -// packResult holds the worker results from packing a SPEC file into an SRPM. -type packResult struct { - specFile string - srpmFile string - err error -} - -// specState holds the state of a SPEC file: if it should be packed and the resulting SRPM if it is. -type specState struct { - specFile string - srpmFile string - toPack bool - err error -} - var ( app = kingpin.New("srpmpacker", "A tool to package a SRPM.") @@ -122,6 +52,27 @@ var ( signatureHandling = app.Flag("signature-handling", "Specifies how to handle signature mismatches for source files.").Default(signatureEnforceString).PlaceHolder(exe.PlaceHolderize(validSignatureLevels)).Enum(validSignatureLevels...) ) +func populateSrpmPackerConfig() *srpmpacker.Config { + return &srpmpacker.Config{ + SpecsDir: *specsDir, + OutDir: *outDir, + BuildDir: *buildDir, + DistTag: *distTag, + PackListFile: *packListFile, + RunCheck: *runCheck, + Workers: *workers, + RepackAll: *repackAll, + NestedSourcesDir: *nestedSourcesDir, + SourceURL: *sourceURL, + CaCertFile: *caCertFile, + TlsClientCert: *tlsClientCert, + TlsClientKey: *tlsClientKey, + WorkerTar: *workerTar, + ValidSignatureLevels: validSignatureLevels, + SignatureHandling: *signatureHandling, + } +} + func main() { app.Version(exe.ToolkitVersion) kingpin.MustParse(app.Parse(os.Args[1:])) @@ -131,893 +82,9 @@ func main() { logger.Log.Fatalf("Value in --workers must be greater than zero. Found %d", *workers) } - // Create a template configuration that all packed SRPM will be based on. - var templateSrcConfig sourceRetrievalConfiguration - - switch *signatureHandling { - case signatureEnforceString: - templateSrcConfig.signatureHandling = signatureEnforce - case signatureSkipCheckString: - logger.Log.Warn("Skipping signature enforcement") - templateSrcConfig.signatureHandling = signatureSkipCheck - case signatureUpdateString: - logger.Log.Warn("Will update signature files as needed") - templateSrcConfig.signatureHandling = signatureUpdate - default: - logger.Log.Fatalf("Invalid signature handling encountered: %s. Allowed: %s", *signatureHandling, validSignatureLevels) - } + cfg := populateSrpmPackerConfig() - // Setup remote source configuration - var err error - templateSrcConfig.sourceURL = *sourceURL - templateSrcConfig.caCerts, err = x509.SystemCertPool() - logger.PanicOnError(err, "Received error calling x509.SystemCertPool(). Error: %v", err) - if *caCertFile != "" { - newCACert, err := ioutil.ReadFile(*caCertFile) - if err != nil { - logger.Log.Panicf("Invalid CA certificate (%s), error: %s", *caCertFile, err) - } - - templateSrcConfig.caCerts.AppendCertsFromPEM(newCACert) - } - - if *tlsClientCert != "" && *tlsClientKey != "" { - cert, err := tls.LoadX509KeyPair(*tlsClientCert, *tlsClientKey) - if err != nil { - logger.Log.Panicf("Invalid TLS client key pair (%s) (%s), error: %s", *tlsClientCert, *tlsClientKey, err) - } - - templateSrcConfig.tlsCerts = append(templateSrcConfig.tlsCerts, cert) - } - - // A pack list may be provided, if so only pack this subset. - // If non is provided, pack all srpms. - packList, err := parsePackListFile(*packListFile) + err := cfg.CreateAllSRPMsWrapper() logger.PanicOnError(err) - err = createAllSRPMsWrapper(*specsDir, *distTag, *buildDir, *outDir, *workerTar, *workers, *nestedSourcesDir, *repackAll, *runCheck, packList, templateSrcConfig) - logger.PanicOnError(err) -} - -// removeDuplicateStrings will remove duplicate entries from a string slice -func removeDuplicateStrings(packList []string) (deduplicatedPackList []string) { - var ( - packListSet = make(map[string]struct{}) - exists = struct{}{} - ) - - for _, entry := range packList { - packListSet[entry] = exists - } - - for entry := range packListSet { - deduplicatedPackList = append(deduplicatedPackList, entry) - } - - return -} - -// parsePackListFile will parse a list of packages to pack if one is specified. -// Duplicate list entries in the file will be removed. -func parsePackListFile(packListFile string) (packList []string, err error) { - if packListFile == "" { - return - } - - file, err := os.Open(packListFile) - if err != nil { - return - } - defer file.Close() - - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if line != "" { - packList = append(packList, line) - } - } - - if len(packList) == 0 { - err = fmt.Errorf("cannot have empty pack list (%s)", packListFile) - } - - packList = removeDuplicateStrings(packList) - - return -} - -// createAllSRPMsWrapper wraps createAllSRPMs to conditionally run it inside a chroot. -// If workerTar is non-empty, packing will occur inside a chroot, otherwise it will run on the host system. -func createAllSRPMsWrapper(specsDir, distTag, buildDir, outDir, workerTar string, workers int, nestedSourcesDir, repackAll, runCheck bool, packList []string, templateSrcConfig sourceRetrievalConfiguration) (err error) { - var chroot *safechroot.Chroot - originalOutDir := outDir - if workerTar != "" { - const leaveFilesOnDisk = false - chroot, buildDir, outDir, specsDir, err = createChroot(workerTar, buildDir, outDir, specsDir) - if err != nil { - return - } - defer chroot.Close(leaveFilesOnDisk) - } - - doCreateAll := func() error { - return createAllSRPMs(specsDir, distTag, buildDir, outDir, workers, nestedSourcesDir, repackAll, runCheck, packList, templateSrcConfig) - } - - if chroot != nil { - logger.Log.Info("Packing SRPMs inside a chroot environment") - err = chroot.Run(doCreateAll) - } else { - logger.Log.Info("Packing SRPMs in the host environment") - err = doCreateAll() - } - - if err != nil { - return - } - - // If this is container build then the bind mounts will not have been created. - // Copy the chroot output to host output folder. - if !buildpipeline.IsRegularBuild() { - srpmsInChroot := filepath.Join(chroot.RootDir(), outDir) - err = directory.CopyContents(srpmsInChroot, originalOutDir) - } - - return -} - -// createAllSRPMs will find all SPEC files in specsDir and pack SRPMs for them if needed. -func createAllSRPMs(specsDir, distTag, buildDir, outDir string, workers int, nestedSourcesDir, repackAll, runCheck bool, packList []string, templateSrcConfig sourceRetrievalConfiguration) (err error) { - logger.Log.Infof("Finding all SPEC files") - - specFiles, err := findSPECFiles(specsDir, packList) - if err != nil { - return - } - - specStates, err := calculateSPECsToRepack(specFiles, distTag, outDir, nestedSourcesDir, repackAll, runCheck, workers) - if err != nil { - return - } - - err = packSRPMs(specStates, distTag, buildDir, templateSrcConfig, workers) - return -} - -// findSPECFiles finds all SPEC files that should be considered for packing. -// Takes into consideration a packList if provided. -func findSPECFiles(specsDir string, packList []string) (specFiles []string, err error) { - if len(packList) == 0 { - specSearch := filepath.Join(specsDir, "**/*.spec") - specFiles, err = filepath.Glob(specSearch) - } else { - for _, specName := range packList { - var specFile []string - - specSearch := filepath.Join(specsDir, fmt.Sprintf("**/%s.spec", specName)) - specFile, err = filepath.Glob(specSearch) - - // If a SPEC is in the pack list, it must be packed. - if err != nil { - return - } - if len(specFile) != 1 { - if strings.HasPrefix(specName, "msopenjdk-11") { - logger.Log.Debugf("Ignoring missing match for '%s', which is externally-provided and thus doesn't have a local spec.", specName) - continue - } else { - err = fmt.Errorf("unexpected number of matches (%d) for spec file (%s)", len(specFile), specName) - return - } - } - - specFiles = append(specFiles, specFile[0]) - } - } - - return -} - -// createChroot creates a chroot to pack SRPMs inside of. -func createChroot(workerTar, buildDir, outDir, specsDir string) (chroot *safechroot.Chroot, newBuildDir, newOutDir, newSpecsDir string, err error) { - const ( - chrootName = "srpmpacker_chroot" - existingDir = false - leaveFilesOnDisk = false - - outMountPoint = "/output" - specsMountPoint = "/specs" - buildDirInChroot = "/build" - ) - - extraMountPoints := []*safechroot.MountPoint{ - safechroot.NewMountPoint(outDir, outMountPoint, "", safechroot.BindMountPointFlags, ""), - safechroot.NewMountPoint(specsDir, specsMountPoint, "", safechroot.BindMountPointFlags, ""), - } - - extraDirectories := []string{ - buildDirInChroot, - } - - newBuildDir = buildDirInChroot - newOutDir = outMountPoint - newSpecsDir = specsMountPoint - - chrootDir := filepath.Join(buildDir, chrootName) - chroot = safechroot.NewChroot(chrootDir, existingDir) - - err = chroot.Initialize(workerTar, extraDirectories, extraMountPoints) - if err != nil { - return - } - - defer func() { - if err != nil { - closeErr := chroot.Close(leaveFilesOnDisk) - if closeErr != nil { - logger.Log.Errorf("Failed to close chroot, err: %s", closeErr) - } - } - }() - - // If this is container build then the bind mounts will not have been created. - if !buildpipeline.IsRegularBuild() { - // Copy in all of the SPECs so they can be packed. - specsInChroot := filepath.Join(chroot.RootDir(), newSpecsDir) - err = directory.CopyContents(specsDir, specsInChroot) - if err != nil { - return - } - - // Copy any prepacked srpms so they will not be repacked. - srpmsInChroot := filepath.Join(chroot.RootDir(), newOutDir) - err = directory.CopyContents(outDir, srpmsInChroot) - if err != nil { - return - } - } - - // Networking support is needed to download sources. - files := []safechroot.FileToCopy{ - {Src: "/etc/resolv.conf", Dest: "/etc/resolv.conf"}, - } - - err = chroot.AddFiles(files...) - return -} - -// calculateSPECsToRepack will check which SPECs should be packed. -// If the resulting SRPM does not exist, or is older than a modification to -// one of the files used by the SPEC then it is repacked. -func calculateSPECsToRepack(specFiles []string, distTag, outDir string, nestedSourcesDir, repackAll, runCheck bool, workers int) (states []*specState, err error) { - var wg sync.WaitGroup - - requests := make(chan string, len(specFiles)) - results := make(chan *specState, len(specFiles)) - cancel := make(chan struct{}) - - logger.Log.Infof("Calculating SPECs to repack") - - // Start the workers now so they begin working as soon as a new job is buffered. - for i := 0; i < workers; i++ { - wg.Add(1) - go specsToPackWorker(requests, results, cancel, &wg, distTag, outDir, nestedSourcesDir, repackAll, runCheck) - } - - for _, specFile := range specFiles { - requests <- specFile - } - - // Signal to the workers that there are no more new spec files - close(requests) - - // Transfer the results from the channel into states. - // - // While the channel itself could be returned and passed to the consumer of - // the results, additional functionality would have to be added to limit the total workers - // in use at any given time. - // - // Since this worker pool and future worker pools in the application are opening file descriptors - // if too many are active at once it can exhaust the file descriptor limit. - // Currently all functions that employ workers pool of size `workers` are serialized, - // resulting in `workers` being the upper capacity at any given time. - totalToRepack := 0 - states = make([]*specState, len(specFiles)) - for i := 0; i < len(specFiles); i++ { - result := <-results - states[i] = result - - if result.err != nil { - logger.Log.Errorf("Failed to check (%s). Error: %s", result.specFile, result.err) - err = result.err - close(cancel) - break - } - - if result.toPack { - totalToRepack++ - } - } - - logger.Log.Debug("Waiting for outstanding workers to finish") - wg.Wait() - - if err != nil { - return - } - - logger.Log.Infof("Packing %d/%d SPECs", totalToRepack, len(specFiles)) - return -} - -// specsToPackWorker will process a channel of spec files that should be checked if packing is needed. -func specsToPackWorker(requests <-chan string, results chan<- *specState, cancel <-chan struct{}, wg *sync.WaitGroup, distTag, outDir string, nestedSourcesDir, repackAll, runCheck bool) { - const ( - queryFormat = `%{NAME}-%{VERSION}-%{RELEASE}.src.rpm` - nestedSourceDirName = "SOURCES" - ) - - const ( - srpmQueryResultsIndex = iota - expectedQueryResultsLen = iota - ) - - defer wg.Done() - - for specFile := range requests { - select { - case <-cancel: - logger.Log.Debug("Cancellation signal received") - return - default: - } - - result := &specState{ - specFile: specFile, - } - - containingDir := filepath.Dir(specFile) - - // Find the SRPM that this SPEC will produce. - defines := rpm.DefaultDefines(runCheck) - defines[rpm.DistTagDefine] = distTag - - // Allow the user to configure if the SPEC sources are in a nested 'SOURCES' directory. - // Otherwise assume source files are next to the SPEC file. - sourceDir := containingDir - if nestedSourcesDir { - sourceDir = filepath.Join(sourceDir, nestedSourceDirName) - } - specQueryResults, err := rpm.QuerySPEC(specFile, sourceDir, queryFormat, defines, rpm.QueryHeaderArgument) - - if err != nil { - if err.Error() == rpm.NoCompatibleArchError { - logger.Log.Infof("Skipping SPEC (%s) due to incompatible build architecture", specFile) - } else { - result.err = err - } - - results <- result - continue - } - - if len(specQueryResults) != expectedQueryResultsLen { - result.err = fmt.Errorf("unexpected query results, wanted (%d) results but got (%d), results: %v", expectedQueryResultsLen, len(specQueryResults), specQueryResults) - results <- result - continue - } - - // Resolve the full path of the SRPM that would be packed from this SPEC file. - producedSRPM := specQueryResults[srpmQueryResultsIndex] - fullSRPMPath := filepath.Join(outDir, producedSRPM) - result.srpmFile = fullSRPMPath - - if repackAll { - result.toPack = true - results <- result - continue - } - - // Sanity check that SRPMS is meant to be built for the machine architecture - isCompatible, err := rpm.SpecArchIsCompatible(specFile, sourceDir, defines) - if err != nil { - result.err = err - results <- result - continue - } - - if !isCompatible { - logger.Log.Infof(`Skipping (%s) since it cannot be built on current architecture.`, specFile) - results <- result - continue - } - - // Check if the SRPM is already on disk and if so its modification time. - srpmInfo, err := os.Stat(fullSRPMPath) - if err != nil { - logger.Log.Debugf("Updating (%s) since (%s) is not yet built", specFile, fullSRPMPath) - result.toPack = true - results <- result - continue - } - - // Check if a file used by the SPEC has been modified since the resulting SRPM was previously packed. - specModTime, latestFile, err := directory.LastModifiedFile(containingDir) - if err != nil { - result.err = fmt.Errorf("failed to query modification time for SPEC (%s). Error: %s", specFile, err) - results <- result - continue - } - - if specModTime.After(srpmInfo.ModTime()) { - logger.Log.Debugf("Updating (%s) since (%s) has changed", specFile, latestFile) - result.toPack = true - } - - results <- result - } -} - -// packSRPMs will pack any SPEC files that have been marked as `toPack`. -func packSRPMs(specStates []*specState, distTag, buildDir string, templateSrcConfig sourceRetrievalConfiguration, workers int) (err error) { - var wg sync.WaitGroup - - allSpecStates := make(chan *specState, len(specStates)) - results := make(chan *packResult, len(specStates)) - cancel := make(chan struct{}) - - // Start the workers now so they begin working as soon as a new job is buffered. - for i := 0; i < workers; i++ { - wg.Add(1) - go packSRPMWorker(allSpecStates, results, cancel, &wg, distTag, buildDir, templateSrcConfig) - } - - for _, state := range specStates { - allSpecStates <- state - } - - // Signal to the workers that there are no more new spec files - close(allSpecStates) - - for i := 0; i < len(specStates); i++ { - result := <-results - - if result.err != nil { - logger.Log.Errorf("Failed to pack (%s). Error: %s", result.specFile, result.err) - err = result.err - close(cancel) - break - } - - // Skip results for states that were not packed by request - if result.srpmFile == "" { - continue - } - - logger.Log.Infof("Packed (%s) -> (%s)", filepath.Base(result.specFile), filepath.Base(result.srpmFile)) - } - - logger.Log.Debug("Waiting for outstanding workers to finish") - wg.Wait() - - return -} - -// packSRPMWorker will process a channel of SPECs and pack any that are marked as toPack. -func packSRPMWorker(allSpecStates <-chan *specState, results chan<- *packResult, cancel <-chan struct{}, wg *sync.WaitGroup, distTag, buildDir string, templateSrcConfig sourceRetrievalConfiguration) { - defer wg.Done() - - for specState := range allSpecStates { - select { - case <-cancel: - logger.Log.Debug("Cancellation signal received") - return - default: - } - - result := &packResult{ - specFile: specState.specFile, - } - - // Its a no-op if the SPEC does not need to be packed - if !specState.toPack { - results <- result - continue - } - - // Setup a source retrieval configuration based on the provided template - signaturesFilePath := specPathToSignaturesPath(specState.specFile) - srcConfig, err := initializeSourceConfig(templateSrcConfig, signaturesFilePath) - if err != nil { - result.err = err - results <- result - continue - } - - fullOutDirPath := filepath.Dir(specState.srpmFile) - err = os.MkdirAll(fullOutDirPath, os.ModePerm) - if err != nil { - result.err = err - results <- result - continue - } - - outputPath, err := packSingleSPEC(specState.specFile, specState.srpmFile, signaturesFilePath, buildDir, fullOutDirPath, distTag, srcConfig) - if err != nil { - result.err = err - results <- result - continue - } - - result.srpmFile = outputPath - - results <- result - } -} - -func specPathToSignaturesPath(specFilePath string) string { - const ( - specSuffix = ".spec" - signatureFileSuffix = "signatures.json" - ) - - specName := strings.TrimSuffix(filepath.Base(specFilePath), specSuffix) - signatureFileName := fmt.Sprintf("%s.%s", specName, signatureFileSuffix) - signatureFileDirPath := filepath.Dir(specFilePath) - - return filepath.Join(signatureFileDirPath, signatureFileName) -} - -func initializeSourceConfig(templateSrcConfig sourceRetrievalConfiguration, signaturesFilePath string) (srcConfig sourceRetrievalConfiguration, err error) { - srcConfig = templateSrcConfig - srcConfig.localSourceDir = filepath.Dir(signaturesFilePath) - - // Read the signatures file for the SPEC sources if applicable - if srcConfig.signatureHandling != signatureSkipCheck { - srcConfig.signatureLookup, err = readSignatures(signaturesFilePath) - } - - return srcConfig, err -} - -func readSignatures(signaturesFilePath string) (readSignatures map[string]string, err error) { - var signaturesWrapper fileSignaturesWrapper - signaturesWrapper.FileSignatures = make(map[string]string) - - err = jsonutils.ReadJSONFile(signaturesFilePath, &signaturesWrapper) - if err != nil { - if os.IsNotExist(err) { - // Non-fatal as some SPECs may not have sources - logger.Log.Debugf("The signatures file (%s) doesn't exist, will not pre-populate signatures.", signaturesFilePath) - err = nil - } else { - logger.Log.Errorf("Failed to read the signatures file (%s): %v.", signaturesFilePath, err) - } - } - - return signaturesWrapper.FileSignatures, err -} - -// packSingleSPEC will pack a given SPEC file into an SRPM. -func packSingleSPEC(specFile, srpmFile, signaturesFile, buildDir, outDir, distTag string, srcConfig sourceRetrievalConfiguration) (outputPath string, err error) { - srpmName := filepath.Base(srpmFile) - workingDir := filepath.Join(buildDir, srpmName) - - logger.Log.Debugf("Working directory: %s", workingDir) - - err = os.MkdirAll(workingDir, os.ModePerm) - if err != nil { - return - } - defer cleanupSRPMWorkingDir(workingDir) - - // Make the folder structure needed for rpmbuild - err = createRPMBuildFolderStructure(workingDir) - if err != nil { - return - } - - // Copy the SPEC file in - srpmSpecFile := filepath.Join(workingDir, srpmSPECDir, filepath.Base(specFile)) - err = file.Copy(specFile, srpmSpecFile) - if err != nil { - return - } - - // Track the current signatures of source files used by the SPEC. - // This will only contain signatures that have either been validated or updated by this tool. - currentSignatures := make(map[string]string) - - defines := rpm.DefaultDefines(*runCheck) - if distTag != "" { - defines[rpm.DistTagDefine] = distTag - } - - // Hydrate all patches. Exclusively using `sourceDir` - err = hydrateFiles(fileTypePatch, specFile, workingDir, srcConfig, currentSignatures, defines) - if err != nil { - return - } - - // Hydrate all sources. Download any missing ones not in `sourceDir` - err = hydrateFiles(fileTypeSource, specFile, workingDir, srcConfig, currentSignatures, defines) - if err != nil { - return - } - - err = updateSignaturesIfApplicable(signaturesFile, srcConfig, currentSignatures) - - // Build the SRPM itself, using `workingDir` as the topdir - err = rpm.GenerateSRPMFromSPEC(specFile, workingDir, defines) - if err != nil { - return - } - - // Save the output of the build to `outDir` - outputPath, err = copyOutput(workingDir, outDir) - return -} - -func updateSignaturesIfApplicable(signaturesFile string, srcConfig sourceRetrievalConfiguration, currentSignatures map[string]string) (err error) { - if srcConfig.signatureHandling == signatureUpdate && !reflect.DeepEqual(srcConfig.signatureLookup, currentSignatures) { - logger.Log.Infof("Updating (%s)", signaturesFile) - - outputSignatures := fileSignaturesWrapper{ - FileSignatures: currentSignatures, - } - - err = jsonutils.WriteJSONFile(signaturesFile, outputSignatures) - if err != nil { - logger.Log.Warnf("Unable to update signatures file (%s)", signaturesFile) - return - } - } - - return -} - -func createRPMBuildFolderStructure(workingDir string) (err error) { - dirsToCreate := []string{ - srpmSOURCESDir, - srpmSPECDir, - srpmOutDir, - } - - for _, dir := range dirsToCreate { - err = os.MkdirAll(path.Join(workingDir, dir), os.ModePerm) - if err != nil { - return - } - } - - return -} - -// readSPECTagArray will return an array of tag values from the given specfile. -// (e.g. all SOURCE entries) -func readSPECTagArray(specFile, sourceDir, tag string, defines map[string]string) (tagValues []string, err error) { - queryFormat := fmt.Sprintf(`[%%{%s}\n]`, tag) - return rpm.QuerySPEC(specFile, sourceDir, queryFormat, defines, rpm.QueryHeaderArgument) -} - -// hydrateFiles will attempt to retrieve all sources needed to build an SRPM from a SPEC. -// Will alter `currentSignatures`, -func hydrateFiles(fileTypeToHydrate fileType, specFile, workingDir string, srcConfig sourceRetrievalConfiguration, currentSignatures, defines map[string]string) (err error) { - const ( - downloadMissingPatchFiles = false - skipPatchSignatures = true - - downloadMissingSourceFiles = true - skipSourceSignatures = false - - patchTag = "PATCH" - sourceTag = "SOURCE" - ) - - var ( - specTag string - hydrateRemotely bool - skipSignatureHandling bool - ) - - switch fileTypeToHydrate { - case fileTypePatch: - specTag = patchTag - hydrateRemotely = downloadMissingPatchFiles - skipSignatureHandling = skipPatchSignatures - case fileTypeSource: - specTag = sourceTag - hydrateRemotely = downloadMissingSourceFiles - skipSignatureHandling = skipSourceSignatures - default: - return fmt.Errorf("invalid filetype (%d)", fileTypeToHydrate) - } - - newSourceDir := filepath.Join(workingDir, srpmSOURCESDir) - fileHydrationState := make(map[string]bool) - - // Collect a list of files of type `specTag` needed for this SRPM - filesNeeded, err := readSPECTagArray(specFile, srcConfig.localSourceDir, specTag, defines) - if err != nil { - return - } - - for _, fileNeeded := range filesNeeded { - fileHydrationState[fileNeeded] = false - } - - // If the user provided an existing source dir, prefer it over remote sources. - if srcConfig.localSourceDir != "" { - err = hydrateFromLocalSource(fileHydrationState, newSourceDir, srcConfig, skipSignatureHandling, currentSignatures) - // On error warn and default to hydrating from an external server. - if err != nil { - logger.Log.Warnf("Error hydrating from local source directory (%s): %v", srcConfig.localSourceDir, err) - } - } - - if hydrateRemotely && srcConfig.sourceURL != "" { - hydrateFromRemoteSource(fileHydrationState, newSourceDir, srcConfig, skipSignatureHandling, currentSignatures) - } - - for fileNeeded, alreadyHydrated := range fileHydrationState { - if !alreadyHydrated { - err = fmt.Errorf("unable to hydrate file: %s", fileNeeded) - logger.Log.Error(err) - } - } - - return -} - -// hydrateFromLocalSource will update fileHydrationState. -// Will alter currentSignatures. -func hydrateFromLocalSource(fileHydrationState map[string]bool, newSourceDir string, srcConfig sourceRetrievalConfiguration, skipSignatureHandling bool, currentSignatures map[string]string) (err error) { - err = filepath.Walk(srcConfig.localSourceDir, func(path string, info os.FileInfo, err error) error { - isFile, _ := file.IsFile(path) - if !isFile { - return nil - } - - fileName := filepath.Base(path) - - isHydrated, found := fileHydrationState[fileName] - if !found { - return nil - } - - if isHydrated { - logger.Log.Warnf("Duplicate matching file found at (%s), skipping", path) - return nil - } - - if !skipSignatureHandling { - err = validateSignature(path, srcConfig, currentSignatures) - if err != nil { - logger.Log.Warn(err.Error()) - return nil - } - } - - err = file.Copy(path, filepath.Join(newSourceDir, fileName)) - if err != nil { - logger.Log.Warnf("Failed to copy file (%s), skipping. Error: %s", path, err) - return nil - } - - logger.Log.Debugf("Hydrated (%s) from (%s)", fileName, path) - - fileHydrationState[fileName] = true - return nil - }) - - return -} - -// hydrateFromRemoteSource will update fileHydrationState. -// Will alter `currentSignatures`. -func hydrateFromRemoteSource(fileHydrationState map[string]bool, newSourceDir string, srcConfig sourceRetrievalConfiguration, skipSignatureHandling bool, currentSignatures map[string]string) { - const ( - downloadRetryAttempts = 3 - downloadRetryDuration = time.Second - ) - - for fileName, alreadyHydrated := range fileHydrationState { - if alreadyHydrated { - continue - } - - destinationFile := filepath.Join(newSourceDir, fileName) - - url := network.JoinURL(srcConfig.sourceURL, fileName) - - err := retry.Run(func() error { - err := network.DownloadFile(url, destinationFile, srcConfig.caCerts, srcConfig.tlsCerts) - if err != nil { - logger.Log.Warnf("Failed to download (%s). Error: %s", url, err) - } - - return err - }, downloadRetryAttempts, downloadRetryDuration) - - if err != nil { - continue - } - - if !skipSignatureHandling { - err = validateSignature(destinationFile, srcConfig, currentSignatures) - if err != nil { - logger.Log.Warn(err.Error()) - - // If the delete fails, just warn as there will be another cleanup - // attempt when exiting the program. - err = os.Remove(destinationFile) - if err != nil { - logger.Log.Warnf("Failed to delete file (%s). Error: %s", destinationFile, err) - } - - continue - } - } - - fileHydrationState[fileName] = true - logger.Log.Debugf("Hydrated (%s) from (%s)", fileName, url) - } -} - -// validateSignature will compare the SHA256 of the file at path against the signature for it in srcConfig.signatureLookup -// Will skip if signature handling is set to skip. -// Will alter `currentSignatures`. -func validateSignature(path string, srcConfig sourceRetrievalConfiguration, currentSignatures map[string]string) (err error) { - if srcConfig.signatureHandling == signatureSkipCheck { - return - } - - fileName := filepath.Base(path) - expectedSignature, found := srcConfig.signatureLookup[fileName] - if !found && srcConfig.signatureHandling != signatureUpdate { - err = fmt.Errorf("no signature for file (%s) found. full path is (%s)", fileName, path) - return - } - - newSignature, err := file.GenerateSHA256(path) - if err != nil { - return - } - - if strings.EqualFold(expectedSignature, newSignature) { - currentSignatures[fileName] = newSignature - } else { - if srcConfig.signatureHandling == signatureUpdate { - logger.Log.Warnf("Updating signature for (%s) from (%s) to (%s)", fileName, expectedSignature, newSignature) - currentSignatures[fileName] = newSignature - } else { - return fmt.Errorf("file (%s) has mismatching signature: expected (%s) - actual (%s)", path, expectedSignature, newSignature) - } - } - - return -} - -// copyOutput will copy the built SRPMs from workingDir to the specified output directory. -func copyOutput(workingDir, outDir string) (outputPath string, err error) { - rpmbuildOutDir := filepath.Join(workingDir, srpmOutDir) - err = filepath.Walk(rpmbuildOutDir, func(path string, info os.FileInfo, err error) error { - isFile, _ := file.IsFile(path) - if !isFile { - return nil - } - outputPath = filepath.Join(outDir, filepath.Base(path)) - return file.Copy(path, outputPath) - }) - - return -} - -// cleanupSRPMWorkingDir will delete the working directory for the SRPM build. -func cleanupSRPMWorkingDir(workingDir string) { - err := os.RemoveAll(workingDir) - if err != nil { - logger.Log.Warnf("Unable to cleanup working directory: %s", workingDir) - } } diff --git a/toolkit/tools/validatechroot/validatechroot.go b/toolkit/tools/validatechroot/validatechroot.go index 2a254178e11..4ec5b70c4ee 100644 --- a/toolkit/tools/validatechroot/validatechroot.go +++ b/toolkit/tools/validatechroot/validatechroot.go @@ -4,129 +4,47 @@ package main import ( - "fmt" "os" - "path" - "path/filepath" - "regexp" "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/exe" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/file" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/safechroot" - "github.com/microsoft/CBL-Mariner/toolkit/tools/internal/shell" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/logger" + "github.com/microsoft/CBL-Mariner/toolkit/tools/pkg/validatechroot" "gopkg.in/alecthomas/kingpin.v2" ) -const ( - leaveChrootFilesOnDisk = false -) - var ( app = kingpin.New("validatechroot", "A tool to validate that the worker chroot is well configured and all dependencies are satisfied.") toolchainRpmsDir = app.Flag("rpm-dir", "Directory that contains already built toolchain RPMs. Should contain top level directories for architecture.").Required().ExistingDir() tmpDir = app.Flag("tmp-dir", "Temporary chroot directory.").String() - workerTar = app.Flag("worker-chroot", "Full path to worker_chroot.tar.gz").Required().ExistingFile() - workerManifest = app.Flag("worker-manifest", "Full path to the worker manifest file").Required().ExistingFile() + workerTar = app.Flag("worker-chroot", "Full path to worker_chroot.tar.gz").Required().ExistingFile() + workerManifest = app.Flag("worker-manifest", "Full path to the worker manifest file").Required().ExistingFile() + leaveChrootFilesOnDisk = app.Flag("leave-chroot-files-on-disk", "Cleanup Chroot Files on Disk").Bool() logFile = exe.LogFileFlag(app) logLevel = exe.LogLevelFlag(app) ) +func populateValidateChrootCfg() *validatechroot.Config { + return &validatechroot.Config{ + ToolchainRpmsDir: *toolchainRpmsDir, + TmpDir: *tmpDir, + WorkerTar: *workerTar, + WorkerManifest: *workerManifest, + LeaveChrootFilesOnDisk: *leaveChrootFilesOnDisk, + } +} + func main() { app.Version(exe.ToolkitVersion) kingpin.MustParse(app.Parse(os.Args[1:])) logger.InitBestEffort(*logFile, *logLevel) - err := validateWorker(*toolchainRpmsDir, *tmpDir, *workerTar, *workerManifest) - + cfg := populateValidateChrootCfg() + err := cfg.Validate() if err != nil { logger.Log.Fatalf("Failed to validate worker. Error: %s", err) } } - -func validateWorker(rpmsDir, chrootDir, workerTarPath, manifestPath string) (err error) { - const ( - chrootToolchainRpmsDir = "/toolchainrpms" - isExistingDir = false - ) - - var ( - chroot *safechroot.Chroot - // Every valid line will be of the form: -..rpm - packageArchLookupRegex = regexp.MustCompile(`^.+(?Px86_64|aarch64|noarch)\.rpm$`) - ) - - // Ensure that if initialization fails, the chroot is closed - defer func() { - if chroot != nil { - closeErr := chroot.Close(leaveChrootFilesOnDisk) - if closeErr != nil { - logger.Log.Panicf("Unable to close chroot on failed initialization. Error: %s", closeErr) - } - } - }() - - logger.Log.Infof("Creating chroot environment to validate '%s' against '%s'", workerTarPath, manifestPath) - - chroot = safechroot.NewChroot(chrootDir, isExistingDir) - rpmMount := safechroot.NewMountPoint(rpmsDir, chrootToolchainRpmsDir, "", safechroot.BindMountPointFlags, "") - extraDirectories := []string{chrootToolchainRpmsDir} - rpmMounts := []*safechroot.MountPoint{rpmMount} - err = chroot.Initialize(workerTarPath, extraDirectories, rpmMounts) - if err != nil { - chroot = nil - return - } - - manifestEntries, err := file.ReadLines(manifestPath) - if err != nil { - return - } - badEntries := make(map[string]string) - - err = chroot.Run(func() (err error) { - for _, rpm := range manifestEntries { - archMatches := packageArchLookupRegex.FindStringSubmatch(rpm) - if len(archMatches) != 2 { - logger.Log.Errorf("%v", archMatches) - return fmt.Errorf("'%s' is an invalid rpm file path", rpm) - } - arch := archMatches[1] - rpmPath := path.Join(chrootToolchainRpmsDir, arch, rpm) - - // --replacepkgs instructs RPM to gracefully re-install a package, including checking dependencies - args := []string{ - "-ihv", - "--replacepkgs", - "--nosignature", - rpmPath, - } - logger.Log.Infof("Validating %s", filepath.Base(rpmPath)) - stdout, stderr, err := shell.Execute("rpm", args...) - - logger.Log.Debug(stdout) - - if err != nil || len(stderr) > 0 { - logger.Log.Warn(stderr) - if len(stderr) > 0 { - badEntries[rpm] = stderr - } else { - badEntries[rpm] = err.Error() - } - } - } - return - }) - - if len(badEntries) > 0 { - for rpm, errMsg := range badEntries { - logger.Log.Errorf("%s:\n %s", rpm, errMsg) - } - err = fmt.Errorf("found invalid packages in the worker chroot") - } - return -}