Skip to content

Commit

Permalink
Merge pull request #95 from radiofrance/feat-dev-mode
Browse files Browse the repository at this point in the history
feat(build): improve UX when testing builds locally
  • Loading branch information
graillus authored Apr 29, 2022
2 parents fc34875 + 3f28f52 commit c1f0874
Show file tree
Hide file tree
Showing 10 changed files with 221 additions and 169 deletions.
20 changes: 9 additions & 11 deletions cmd/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,30 +211,28 @@ func doBuild(opts buildOpts) error {
DAG := dib.GenerateDAG(path.Join(workingDir, opts.BuildPath), opts.RegistryURL)
logrus.Debug("Generate DAG -- Done")

err = dib.Plan(DAG, gcrRegistry, diffs, previousVersion, currentVersion, opts.ForceRebuild, !opts.DisableRunTests)
if err != nil {
return err
}

err = dib.Retag(DAG, tagger, previousVersion, currentVersion)
err = dib.Plan(DAG, gcrRegistry, diffs, previousVersion, currentVersion,
opts.Release, opts.ForceRebuild, !opts.DisableRunTests)
if err != nil {
return err
}

rateLimiter := ratelimit.NewChannelRateLimiter(opts.RateLimit)
if err := dib.Rebuild(DAG, builder, testRunners, rateLimiter, currentVersion, opts.LocalOnly); err != nil {
if err := dib.Rebuild(DAG, builder, testRunners, rateLimiter, opts.LocalOnly); err != nil {
return err
}

if opts.Release {
logrus.Info("--release is set to true, tags defined by dib.extra-tags will now use current image versions")
if err := dib.TagWithExtraTags(DAG, tagger, currentVersion); err != nil {
err = dib.Retag(DAG, tagger)
if err != nil {
return err
}

// We retag the referential image to explicit this commit was build using dib
if err := tagger.Tag(fmt.Sprintf("%s:%s", path.Join(opts.RegistryURL, opts.ReferentialImage), "latest"),
fmt.Sprintf("%s:%s", path.Join(opts.RegistryURL, opts.ReferentialImage), currentVersion)); err != nil {
if err := tagger.Tag(
fmt.Sprintf("%s:%s", path.Join(opts.RegistryURL, opts.ReferentialImage), "latest"),
fmt.Sprintf("%s:%s", path.Join(opts.RegistryURL, opts.ReferentialImage), currentVersion),
); err != nil {
return err
}
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/graph.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ func doGraph(opts rootOpts) error {
DAG := dib.GenerateDAG(path.Join(workingDir, opts.BuildPath), opts.RegistryURL)
logrus.Debug("Generate DAG -- Done")

err = dib.Plan(DAG, gcrRegistry, diffs, previousVersion, currentVersion, false, false)
err = dib.Plan(DAG, gcrRegistry, diffs, previousVersion, currentVersion, true, false, false)
if err != nil {
return err
}
Expand Down
24 changes: 13 additions & 11 deletions dag/image.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,19 @@ import (

// Image holds the docker image information.
type Image struct {
Name string
ShortName string
Dockerfile *dockerfile.Dockerfile
IgnorePatterns []string
NeedsRebuild bool
NeedsTests bool
NeedsRetag bool
RetagDone bool
TagWithExtraTagsDone bool
RebuildDone bool
RebuildFailed bool
Name string
ShortName string
CurrentTag string // Current tag expected to be present on the registry before the build.
TargetTag string // New tag, not present in registry until the image is built and pushed.
ExtraTags []string // A list of tags to make in addition to TargetTag.
Dockerfile *dockerfile.Dockerfile
IgnorePatterns []string
NeedsRebuild bool
NeedsTests bool
NeedsRetag bool
RetagDone bool
RebuildDone bool
RebuildFailed bool
}

// DockerRef returns the fully-qualified docker ref for a given version.
Expand Down
39 changes: 26 additions & 13 deletions dib/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (
// Rebuild iterates over the graph to rebuild all images that are marked to be rebuilt.
// It also collects the reports ant prints them to the user.
func Rebuild(graph *dag.DAG, builder types.ImageBuilder, testRunners []types.TestRunner,
rateLimiter ratelimit.RateLimiter, newTag string, localOnly bool,
rateLimiter ratelimit.RateLimiter, localOnly bool,
) error {
reportChan := make(chan BuildReport)
wgBuild := sync.WaitGroup{}
Expand All @@ -29,7 +29,7 @@ func Rebuild(graph *dag.DAG, builder types.ImageBuilder, testRunners []types.Tes
}

wgBuild.Add(1)
go RebuildNode(node, builder, testRunners, rateLimiter, newTag, localOnly, &wgBuild, reportChan)
go RebuildNode(node, builder, testRunners, rateLimiter, localOnly, &wgBuild, reportChan)
})

go func() {
Expand All @@ -48,7 +48,7 @@ func Rebuild(graph *dag.DAG, builder types.ImageBuilder, testRunners []types.Tes

// RebuildNode build the image on the given node, and run tests if necessary.
func RebuildNode(node *dag.Node, builder types.ImageBuilder, testRunners []types.TestRunner,
rateLimiter ratelimit.RateLimiter, newTag string, localOnly bool, wg *sync.WaitGroup, reportChan chan BuildReport,
rateLimiter ratelimit.RateLimiter, localOnly bool, wg *sync.WaitGroup, reportChan chan BuildReport,
) {
defer wg.Done()

Expand Down Expand Up @@ -85,7 +85,7 @@ func RebuildNode(node *dag.Node, builder types.ImageBuilder, testRunners []types
}

if img.NeedsRebuild && !img.RebuildDone {
err := doRebuild(img, builder, rateLimiter, newTag, localOnly)
err := doRebuild(node, builder, rateLimiter, localOnly)
if err != nil {
img.RebuildFailed = true
reportChan <- report.withError(err)
Expand All @@ -96,7 +96,7 @@ func RebuildNode(node *dag.Node, builder types.ImageBuilder, testRunners []types

if img.NeedsTests {
report.TestsStatus = TestsStatusPassed
if err := testImage(img, testRunners, newTag); err != nil {
if err := testImage(img, testRunners, img.TargetTag); err != nil {
report.TestsStatus = TestsStatusFailed
report.FailureMessage = err.Error()
}
Expand All @@ -107,20 +107,31 @@ func RebuildNode(node *dag.Node, builder types.ImageBuilder, testRunners []types
}

// doRebuild do the effective build action.
func doRebuild(img *dag.Image, builder types.ImageBuilder, rateLimiter ratelimit.RateLimiter,
newTag string, localOnly bool,
) error {
func doRebuild(node *dag.Node, builder types.ImageBuilder, rateLimiter ratelimit.RateLimiter, localOnly bool) error {
rateLimiter.Acquire()
defer rateLimiter.Release()

logrus.Infof("Building \"%s:%s\" in context \"%s\"", img.Name, newTag, img.Dockerfile.ContextPath)
img := node.Image

if err := dockerfile.ReplaceFromTag(*img.Dockerfile, newTag); err != nil {
// Before building the image, we need to replace all references to tags
// of any dib-managed images used as dependencies in the Dockerfile.
tagsToReplace := make(map[string]string)
for _, parent := range node.Parents() {
if parent.Image.NeedsRebuild {
// The parent image was rebuilt, we have to use its new tag.
tagsToReplace[parent.Image.Name] = parent.Image.DockerRef(parent.Image.TargetTag)
continue
}

// The parent image has not changed, we can use the existing tag.
tagsToReplace[parent.Image.Name] = parent.Image.DockerRef(parent.Image.CurrentTag)
}
if err := dockerfile.ReplaceTags(*img.Dockerfile, tagsToReplace); err != nil {
return fmt.Errorf("failed to replace tag in dockerfile %s: %w", img.Dockerfile.ContextPath, err)
}
defer func() {
if err := dockerfile.ResetFromTag(*img.Dockerfile, newTag); err != nil {
logrus.Errorf("failed to reset tag in dockerfile %s: %v", img.Dockerfile.ContextPath, err)
if err := dockerfile.ResetTags(*img.Dockerfile, tagsToReplace); err != nil {
logrus.Warnf("failed to reset tag in dockerfile %s: %v", img.Dockerfile.ContextPath, err)
}
}()

Expand Down Expand Up @@ -149,12 +160,14 @@ func doRebuild(img *dag.Image, builder types.ImageBuilder, rateLimiter ratelimit

opts := types.ImageBuilderOpts{
Context: img.Dockerfile.ContextPath,
Tag: fmt.Sprintf("%s:%s", img.Name, newTag),
Tag: fmt.Sprintf("%s:%s", img.Name, img.TargetTag),
Labels: labels,
Push: !localOnly,
LogOutput: fileOutput,
}

logrus.Infof("Building \"%s:%s\" in context \"%s\"", img.Name, img.TargetTag, img.Dockerfile.ContextPath)

if err := builder.Build(opts); err != nil {
return fmt.Errorf("building image %s failed: %w", img.ShortName, err)
}
Expand Down
10 changes: 5 additions & 5 deletions dib/build_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func Test_Rebuild_NothingToDo(t *testing.T) {
reportChan := make(chan dib.BuildReport, 1)
wg := sync.WaitGroup{}
wg.Add(1)
dib.RebuildNode(node, builder, testRunners, mock.RateLimiter{}, "new-123", false, &wg, reportChan)
dib.RebuildNode(node, builder, testRunners, mock.RateLimiter{}, false, &wg, reportChan)
wg.Wait()
close(reportChan)

Expand All @@ -50,7 +50,7 @@ func Test_Rebuild_BuildAndTest(t *testing.T) {
reportChan := make(chan dib.BuildReport, 1)
wg := sync.WaitGroup{}
wg.Add(1)
dib.RebuildNode(node, builder, testRunners, mock.RateLimiter{}, "new-123", false, &wg, reportChan)
dib.RebuildNode(node, builder, testRunners, mock.RateLimiter{}, false, &wg, reportChan)
wg.Wait()
close(reportChan)

Expand All @@ -75,7 +75,7 @@ func Test_Rebuild_TestOnly(t *testing.T) {
reportChan := make(chan dib.BuildReport, 1)
wg := sync.WaitGroup{}
wg.Add(1)
dib.RebuildNode(node, builder, testRunners, mock.RateLimiter{}, "new-123", false, &wg, reportChan)
dib.RebuildNode(node, builder, testRunners, mock.RateLimiter{}, false, &wg, reportChan)
wg.Wait()
close(reportChan)

Expand Down Expand Up @@ -103,7 +103,7 @@ func Test_Rebuild_TestNotSupported(t *testing.T) {
reportChan := make(chan dib.BuildReport, 1)
wg := sync.WaitGroup{}
wg.Add(1)
dib.RebuildNode(node, builder, testRunners, mock.RateLimiter{}, "new-123", false, &wg, reportChan)
dib.RebuildNode(node, builder, testRunners, mock.RateLimiter{}, false, &wg, reportChan)
wg.Wait()
close(reportChan)

Expand Down Expand Up @@ -132,7 +132,7 @@ func Test_Rebuild_TestError(t *testing.T) {
reportChan := make(chan dib.BuildReport, 1)
wg := sync.WaitGroup{}
wg.Add(1)
dib.RebuildNode(node, builder, testRunners, mock.RateLimiter{}, "new-123", false, &wg, reportChan)
dib.RebuildNode(node, builder, testRunners, mock.RateLimiter{}, false, &wg, reportChan)
wg.Wait()
close(reportChan)

Expand Down
73 changes: 54 additions & 19 deletions dib/plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,27 @@ import (
const dockerignore = ".dockerignore"

// Plan decides which actions need to be performed on each image.
func Plan(graph *dag.DAG, registry types.DockerRegistry, diffs []string,
oldTag, newTag string, forceRebuild, testsEnabled bool,
func Plan(graph *dag.DAG, registry types.DockerRegistry, diffs []string, oldTag, newTag string,
releaseMode, forceRebuild, testsEnabled bool,
) error {
// Populate CurrentTag, TargetTag and ExtraTags for all images in the graph.
graph.Walk(func(node *dag.Node) {
img := node.Image
img.CurrentTag = oldTag
img.TargetTag = newTag

if img.Dockerfile == nil || img.Dockerfile.Labels == nil {
return
}

extraTagsLabel, hasLabel := img.Dockerfile.Labels["dib.extra-tags"]
if !hasLabel {
return
}

node.Image.ExtraTags = append(node.Image.ExtraTags, strings.Split(extraTagsLabel, ",")...)
})

if forceRebuild {
logrus.Info("force rebuild mode enabled, all images will be rebuild regardless of their changes")
graph.Walk(func(node *dag.Node) {
Expand All @@ -38,14 +56,23 @@ func Plan(graph *dag.DAG, registry types.DockerRegistry, diffs []string,
return err
}

if err = checkAlreadyBuilt(graph, currentTagExistsMap, newTag); err != nil {
err = checkNeedsRebuild(graph, previousTagExistsMap, oldTag)
if err != nil {
return err
}

if err = checkNeedsRetag(graph, currentTagExistsMap, previousTagExistsMap, oldTag, newTag); err != nil {
err = checkAlreadyBuilt(graph, currentTagExistsMap, newTag)
if err != nil {
return err
}

if releaseMode { // In release mode, we retag all images.
err = checkNeedsRetag(graph, currentTagExistsMap, newTag)
if err != nil {
return err
}
}

if !testsEnabled {
return nil
}
Expand Down Expand Up @@ -130,6 +157,26 @@ func tagForRebuildFunc(node *dag.Node) {
node.Image.NeedsRebuild = true
}

// checkNeedsRebuild iterates over the graph to find out which images
// can't be found with the former tag, and must be rebuilt.
func checkNeedsRebuild(graph *dag.DAG, previousTagExistsMap *sync.Map, oldTag string) error {
return graph.WalkErr(func(node *dag.Node) error {
img := node.Image
previousTagExists, present := previousTagExistsMap.Load(img.DockerRef(oldTag))
if !present {
return fmt.Errorf("could not find ref %s in previousTagExists map", img.DockerRef(oldTag))
}
if previousTagExists.(bool) { //nolint:forcetypeassert
logrus.Debugf("Previous tag \"%s:%s\" exists, no rebuild required", img.Name, oldTag)
return nil
}

logrus.Warnf("Previous tag \"%s:%s\" missing, image must be rebuilt", img.Name, oldTag)
node.Walk(tagForRebuildFunc)
return nil
})
}

// checkAlreadyBuilt iterates over the graph to find out which images
// already exist in the new version, so they don't need to be built again.
func checkAlreadyBuilt(graph *dag.DAG, currentTagExistsMap *sync.Map, newTag string) error {
Expand Down Expand Up @@ -160,8 +207,7 @@ func checkAlreadyBuilt(graph *dag.DAG, currentTagExistsMap *sync.Map, newTag str

// checkNeedsRetag iterates over the graph to find out which images need
// to be tagged with the new tag from the latest version.
func checkNeedsRetag(graph *dag.DAG, currentTagExistsMap, previousTagExistsMap *sync.Map, oldTag string, newTag string,
) error {
func checkNeedsRetag(graph *dag.DAG, currentTagExistsMap *sync.Map, newTag string) error {
return graph.WalkErr(func(node *dag.Node) error {
img := node.Image
if img.NeedsRebuild {
Expand All @@ -178,19 +224,8 @@ func checkNeedsRetag(graph *dag.DAG, currentTagExistsMap, previousTagExistsMap *
return nil
}

previousTagExists, present := previousTagExistsMap.Load(img.DockerRef(oldTag))
if !present {
return fmt.Errorf("could not find ref %s in previousTagExists map", img.DockerRef(oldTag))
}
if previousTagExists.(bool) { //nolint:forcetypeassert
logrus.Debugf("Previous tag \"%s:%s\" exists, image will be retagged", img.Name, oldTag)
img.NeedsRetag = true
return nil
}

logrus.Warnf("Previous tag \"%s:%s\" missing, image will be rebuilt", img.Name, oldTag)
node.Walk(tagForRebuildFunc)

logrus.Debugf("Current tag \"%s:%s\" does not exist, image will be tagged", img.Name, newTag)
img.NeedsRetag = true
return nil
})
}
Expand Down
Loading

0 comments on commit c1f0874

Please sign in to comment.