From b17c9189c3c40b00c36715b55e6f2e4f44663b22 Mon Sep 17 00:00:00 2001 From: istae <14264581+istae@users.noreply.github.com> Date: Sun, 20 Oct 2024 00:53:53 +0300 Subject: [PATCH 1/4] fix: upload requests should check for non 200 responses --- pkg/bee/api/api.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pkg/bee/api/api.go b/pkg/bee/api/api.go index 4a86d5e5e..bf33c9c12 100644 --- a/pkg/bee/api/api.go +++ b/pkg/bee/api/api.go @@ -236,15 +236,20 @@ func (c *Client) requestWithHeader(ctx context.Context, method, path string, hea return err } + if err = responseErrorHandler(r); err != nil { + return err + } + if v != nil && strings.Contains(r.Header.Get("Content-Type"), "application/json") { - _ = json.NewDecoder(r.Body).Decode(&v) + if err := json.NewDecoder(r.Body).Decode(&v); err != nil { + return err + } for _, parser := range headerParser { parser(r.Header) } - return err } - return err + return nil } // drain discards all of the remaining data from the reader and closes it, From 7a46c3853175293c24e0ec99c6bcbef38d9bcef4 Mon Sep 17 00:00:00 2001 From: istae <14264581+istae@users.noreply.github.com> Date: Mon, 21 Oct 2024 15:49:59 +0300 Subject: [PATCH 2/4] fix: get create batches based on label and utilization --- pkg/bee/client.go | 7 +++++-- pkg/check/act/act.go | 2 +- pkg/check/manifest/manifest.go | 2 +- pkg/check/networkavailability/check.go | 2 +- pkg/check/pss/pss.go | 2 +- pkg/check/pushsync/check_chunks.go | 2 +- pkg/check/pushsync/check_lightnode.go | 4 ++-- pkg/check/pushsync/pushsync.go | 2 +- pkg/check/redundancy/redundancy.go | 2 +- pkg/check/settlements/settlements.go | 2 +- pkg/check/smoke/load.go | 2 +- pkg/check/smoke/smoke.go | 2 +- pkg/check/soc/soc.go | 2 +- pkg/simulate/retrieval/retrieval.go | 2 +- pkg/simulate/upload/upload.go | 2 +- pkg/test/node.go | 2 +- 16 files changed, 21 insertions(+), 18 deletions(-) diff --git a/pkg/bee/client.go b/pkg/bee/client.go index e362c11e4..ea97add60 100644 --- a/pkg/bee/client.go +++ b/pkg/bee/client.go @@ -432,7 +432,7 @@ func (c *Client) CreatePostageBatch(ctx context.Context, amount int64, depth uin return id, nil } -func (c *Client) GetOrCreateBatch(ctx context.Context, amount int64, depth uint64, label string) (string, error) { +func (c *Client) GetOrCreateMutableBatch(ctx context.Context, amount int64, depth uint64, label string) (string, error) { batches, err := c.PostageBatches(ctx) if err != nil { return "", err @@ -445,8 +445,11 @@ func (c *Client) GetOrCreateBatch(ctx context.Context, amount int64, depth uint6 if b.ImmutableFlag { // skip immutable batches continue } + if b.Label != label { + continue + } - if b.Usable && (b.BatchTTL == -1 || b.BatchTTL > 0) { + if b.Usable && (b.BatchTTL == -1 || b.BatchTTL > 0) && b.Utilization < (1<<(b.Depth-b.BucketDepth)) { return b.BatchID, nil } } diff --git a/pkg/check/act/act.go b/pkg/check/act/act.go index ac782cb82..f92430644 100644 --- a/pkg/check/act/act.go +++ b/pkg/check/act/act.go @@ -95,7 +95,7 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int file := bee.NewRandomFile(rnds[0], fileName, o.FileSize) - batchID, err := upClient.GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, postagelabel) + batchID, err := upClient.GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, postagelabel) if err != nil { return fmt.Errorf("created batched id %w", err) } diff --git a/pkg/check/manifest/manifest.go b/pkg/check/manifest/manifest.go index cad39a6da..9c06c2a48 100644 --- a/pkg/check/manifest/manifest.go +++ b/pkg/check/manifest/manifest.go @@ -96,7 +96,7 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int client := clients[node] - batchID, err := client.GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) + batchID, err := client.GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) if err != nil { return fmt.Errorf("node %s: batch id %w", node, err) } diff --git a/pkg/check/networkavailability/check.go b/pkg/check/networkavailability/check.go index 6f964823f..7908e9d92 100644 --- a/pkg/check/networkavailability/check.go +++ b/pkg/check/networkavailability/check.go @@ -101,7 +101,7 @@ iteration: var chunks []swarm.Chunk for _, n := range neighborhoods(int(storageRadius)) { - batch, err := uploadClient.GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, "net-avail-check") + batch, err := uploadClient.GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, "net-avail-check") if err != nil { c.logger.Errorf("create batch failed failed") continue iteration diff --git a/pkg/check/pss/pss.go b/pkg/check/pss/pss.go index b6bce4626..2a727e5f4 100644 --- a/pkg/check/pss/pss.go +++ b/pkg/check/pss/pss.go @@ -122,7 +122,7 @@ func (c *Check) testPss(nodeAName, nodeBName string, clients map[string]*bee.Cli return err } - batchID, err := nodeA.GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) + batchID, err := nodeA.GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) if err != nil { cancel() return fmt.Errorf("node %s: batched id %w", nodeAName, err) diff --git a/pkg/check/pushsync/check_chunks.go b/pkg/check/pushsync/check_chunks.go index eafc397a2..72508b838 100644 --- a/pkg/check/pushsync/check_chunks.go +++ b/pkg/check/pushsync/check_chunks.go @@ -38,7 +38,7 @@ func checkChunks(ctx context.Context, c orchestration.Cluster, o Options, l logg uploader := clients[nodeName] - batchID, err := uploader.GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) + batchID, err := uploader.GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) if err != nil { return fmt.Errorf("node %s: batch id %w", nodeName, err) } diff --git a/pkg/check/pushsync/check_lightnode.go b/pkg/check/pushsync/check_lightnode.go index f1dd73d8e..47246e4ff 100644 --- a/pkg/check/pushsync/check_lightnode.go +++ b/pkg/check/pushsync/check_lightnode.go @@ -34,7 +34,7 @@ func checkLightChunks(ctx context.Context, cluster orchestration.Cluster, o Opti // prepare postage batches for i := 0; i < len(lightNodes); i++ { nodeName := lightNodes[i] - batchID, err := clients[nodeName].GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) + batchID, err := clients[nodeName].GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) if err != nil { return fmt.Errorf("node %s: batch id %w", nodeName, err) } @@ -46,7 +46,7 @@ func checkLightChunks(ctx context.Context, cluster orchestration.Cluster, o Opti nodeName := lightNodes[i] uploader := clients[nodeName] - batchID, err := uploader.GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) + batchID, err := uploader.GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) if err != nil { return fmt.Errorf("node %s: batch id %w", nodeName, err) } diff --git a/pkg/check/pushsync/pushsync.go b/pkg/check/pushsync/pushsync.go index e2c63753b..5045b8821 100644 --- a/pkg/check/pushsync/pushsync.go +++ b/pkg/check/pushsync/pushsync.go @@ -99,7 +99,7 @@ func (c *Check) defaultCheck(ctx context.Context, cluster orchestration.Cluster, nodeName := sortedNodes[i] client := clients[nodeName] - batchID, err := client.GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) + batchID, err := client.GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) if err != nil { return fmt.Errorf("node %s: batch id %w", nodeName, err) } diff --git a/pkg/check/redundancy/redundancy.go b/pkg/check/redundancy/redundancy.go index 847c82e94..1981346c0 100644 --- a/pkg/check/redundancy/redundancy.go +++ b/pkg/check/redundancy/redundancy.go @@ -77,7 +77,7 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, o interf } c.logger.Infof("root hash: %s, chunks: %d", root.String(), len(chunks)) - batchID, err := uploadClient.GetOrCreateBatch(ctx, opts.PostageAmount, opts.PostageDepth, "ci-redundancy") + batchID, err := uploadClient.GetOrCreateMutableBatch(ctx, opts.PostageAmount, opts.PostageDepth, "ci-redundancy") if err != nil { return fmt.Errorf("get or create batch: %w", err) } diff --git a/pkg/check/settlements/settlements.go b/pkg/check/settlements/settlements.go index 03cb1ba41..613d086d7 100644 --- a/pkg/check/settlements/settlements.go +++ b/pkg/check/settlements/settlements.go @@ -120,7 +120,7 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int client := clients[uNode] c.logger.Info("node", uNode) - batchID, err := client.GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) + batchID, err := client.GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) if err != nil { return fmt.Errorf("node %s: batch id %w", uNode, err) } diff --git a/pkg/check/smoke/load.go b/pkg/check/smoke/load.go index 43ee0ef5e..bda5718d7 100644 --- a/pkg/check/smoke/load.go +++ b/pkg/check/smoke/load.go @@ -130,7 +130,7 @@ func (c *LoadCheck) Run(ctx context.Context, cluster orchestration.Cluster, opts batchID := batches.Get(txName) if batchID == "" { - batchID, err = clients[txName].CreatePostageBatch(ctx, o.PostageAmount, o.PostageDepth, "load-test", true) + batchID, err = clients[txName].GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, "load-test") if err != nil { c.log.Errorf("create new batch: %v", err) return diff --git a/pkg/check/smoke/smoke.go b/pkg/check/smoke/smoke.go index 195dc1596..34cd1751c 100644 --- a/pkg/check/smoke/smoke.go +++ b/pkg/check/smoke/smoke.go @@ -156,7 +156,7 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int c.metrics.BatchCreateAttempts.Inc() - batchID, err = clients[txName].GetOrCreateBatch(txCtx, o.PostageAmount, o.PostageDepth, "smoke-test") + batchID, err = clients[txName].GetOrCreateMutableBatch(txCtx, o.PostageAmount, o.PostageDepth, "smoke-test") if err != nil { c.logger.Errorf("create new batch: %v", err) c.metrics.BatchCreateErrors.Inc() diff --git a/pkg/check/soc/soc.go b/pkg/check/soc/soc.go index 38090b171..30be29ab7 100644 --- a/pkg/check/soc/soc.go +++ b/pkg/check/soc/soc.go @@ -110,7 +110,7 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int id := hex.EncodeToString(idBytes) sig := hex.EncodeToString(signatureBytes) - batchID, err := node.GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) + batchID, err := node.GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) if err != nil { return fmt.Errorf("node %s: batch id %w", nodeName, err) } diff --git a/pkg/simulate/retrieval/retrieval.go b/pkg/simulate/retrieval/retrieval.go index 3b64c4ffb..bb51686ae 100644 --- a/pkg/simulate/retrieval/retrieval.go +++ b/pkg/simulate/retrieval/retrieval.go @@ -84,7 +84,7 @@ func (s *Simulation) Run(ctx context.Context, cluster orchestration.Cluster, opt nodeName := sortedNodes[i] client := clients[nodeName] - batchID, err := client.GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) + batchID, err := client.GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) if err != nil { s.logger.Infof("error: node %s: batch id %v", nodeName, err) continue diff --git a/pkg/simulate/upload/upload.go b/pkg/simulate/upload/upload.go index 095924127..c8f2974e7 100644 --- a/pkg/simulate/upload/upload.go +++ b/pkg/simulate/upload/upload.go @@ -162,7 +162,7 @@ func (s *Simulation) Run(ctx context.Context, cluster orchestration.Cluster, opt return ctx.Err() } - batchID, err = c.GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) + batchID, err = c.GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) if err != nil { if errors.Is(ctx.Err(), context.DeadlineExceeded) { return nil diff --git a/pkg/test/node.go b/pkg/test/node.go index 1503753d3..db395269a 100644 --- a/pkg/test/node.go +++ b/pkg/test/node.go @@ -79,7 +79,7 @@ func (b *BeeV2) ExpectToHaveFile(ctx context.Context, file File) error { func (b *BeeV2) NewChunkUploader(ctx context.Context) (*ChunkUploader, error) { o := b.opts - batchID, err := b.client.GetOrCreateBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) + batchID, err := b.client.GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, o.PostageLabel) if err != nil { return nil, fmt.Errorf("node %s: batch id %w", b.name, err) } From f4fbfcab130d9246ef83b8673ecfaee12b075ef9 Mon Sep 17 00:00:00 2001 From: istae <14264581+istae@users.noreply.github.com> Date: Mon, 21 Oct 2024 15:58:27 +0300 Subject: [PATCH 3/4] fix: load --- pkg/check/smoke/batchstore.go | 46 ---------------------- pkg/check/smoke/load.go | 74 +++++++++++++++++------------------ 2 files changed, 35 insertions(+), 85 deletions(-) delete mode 100644 pkg/check/smoke/batchstore.go diff --git a/pkg/check/smoke/batchstore.go b/pkg/check/smoke/batchstore.go deleted file mode 100644 index e25d25e80..000000000 --- a/pkg/check/smoke/batchstore.go +++ /dev/null @@ -1,46 +0,0 @@ -package smoke - -import ( - "sync" - "time" -) - -type batch struct { - batchID string - expires time.Time -} - -type store struct { - mtx sync.Mutex - batches map[string]batch - maxDur time.Duration -} - -func NewStore(dur time.Duration) *store { - - return &store{ - batches: map[string]batch{}, - maxDur: dur, - } -} - -func (s *store) Get(key string) string { - s.mtx.Lock() - defer s.mtx.Unlock() - - if b, ok := s.batches[key]; ok { - if time.Now().After(b.expires) { - delete(s.batches, key) - return "" - } - return b.batchID - } - - return "" -} - -func (s *store) Store(key, batchID string) { - s.mtx.Lock() - defer s.mtx.Unlock() - s.batches[key] = batch{batchID: batchID, expires: time.Now().Add(s.maxDur)} -} diff --git a/pkg/check/smoke/load.go b/pkg/check/smoke/load.go index bda5718d7..d31775810 100644 --- a/pkg/check/smoke/load.go +++ b/pkg/check/smoke/load.go @@ -26,14 +26,14 @@ var _ beekeeper.Action = (*LoadCheck)(nil) // Check instance type LoadCheck struct { metrics metrics - log logging.Logger + logger logging.Logger } // NewCheck returns new check func NewLoadCheck(log logging.Logger) beekeeper.Action { return &LoadCheck{ metrics: newMetrics("check_load"), - log: log, + logger: log, } } @@ -52,11 +52,11 @@ func (c *LoadCheck) Run(ctx context.Context, cluster orchestration.Cluster, opts return errors.New("max storage radius is not set") } - c.log.Infof("random seed: %v", o.RndSeed) - c.log.Infof("content size: %v", o.ContentSize) - c.log.Infof("max batch lifespan: %v", o.MaxUseBatch) - c.log.Infof("max storage radius: %v", o.MaxStorageRadius) - c.log.Infof("storage radius check wait time: %v", o.StorageRadiusCheckWait) + c.logger.Infof("random seed: %v", o.RndSeed) + c.logger.Infof("content size: %v", o.ContentSize) + c.logger.Infof("max batch lifespan: %v", o.MaxUseBatch) + c.logger.Infof("max storage radius: %v", o.MaxStorageRadius) + c.logger.Infof("storage radius check wait time: %v", o.StorageRadiusCheckWait) clients, err := cluster.NodesClients(ctx) if err != nil { @@ -66,20 +66,18 @@ func (c *LoadCheck) Run(ctx context.Context, cluster orchestration.Cluster, opts ctx, cancel := context.WithTimeout(ctx, o.Duration) defer cancel() - test := &test{clients: clients, logger: c.log} + test := &test{clients: clients, logger: c.logger} uploaders := selectNames(cluster, o.UploadGroups...) downloaders := selectNames(cluster, o.DownloadGroups...) - batches := NewStore(o.MaxUseBatch) - for i := 0; true; i++ { select { case <-ctx.Done(): - c.log.Info("we are done") + c.logger.Info("we are done") return nil default: - c.log.Infof("starting iteration: #%d", i) + c.logger.Infof("starting iteration: #%d", i) } var ( @@ -90,13 +88,13 @@ func (c *LoadCheck) Run(ctx context.Context, cluster orchestration.Cluster, opts txData = make([]byte, o.ContentSize) if _, err := crand.Read(txData); err != nil { - c.log.Infof("unable to create random content: %v", err) + c.logger.Infof("unable to create random content: %v", err) continue } txNames := pickRandom(o.UploaderCount, uploaders) - c.log.Infof("uploader: %s", txNames) + c.logger.Infof("uploader: %s", txNames) var ( upload sync.WaitGroup @@ -115,7 +113,7 @@ func (c *LoadCheck) Run(ctx context.Context, cluster orchestration.Cluster, opts for retries := 10; txDuration == 0 && retries > 0; retries-- { select { case <-ctx.Done(): - c.log.Info("we are done") + c.logger.Info("we are done") return default: } @@ -126,23 +124,21 @@ func (c *LoadCheck) Run(ctx context.Context, cluster orchestration.Cluster, opts c.metrics.UploadAttempts.Inc() var duration time.Duration - c.log.Infof("uploading to: %s", txName) - - batchID := batches.Get(txName) - if batchID == "" { - batchID, err = clients[txName].GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, "load-test") - if err != nil { - c.log.Errorf("create new batch: %v", err) - return - } - batches.Store(txName, batchID) + c.logger.Infof("uploading to: %s", txName) + + batchID, err := clients[txName].GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, "load-test") + if err != nil { + c.logger.Errorf("create new batch: %v", err) + continue } + c.logger.Info("using batch", "batch_id", batchID) + address, duration, err = test.upload(ctx, txName, txData, batchID) if err != nil { c.metrics.UploadErrors.Inc() - c.log.Infof("upload failed: %v", err) - c.log.Infof("retrying in: %v", o.TxOnErrWait) + c.logger.Infof("upload failed: %v", err) + c.logger.Infof("retrying in: %v", o.TxOnErrWait) time.Sleep(o.TxOnErrWait) return } @@ -157,12 +153,12 @@ func (c *LoadCheck) Run(ctx context.Context, cluster orchestration.Cluster, opts continue } - c.log.Infof("sleeping for: %v seconds", o.NodesSyncWait.Seconds()) + c.logger.Infof("sleeping for: %v seconds", o.NodesSyncWait.Seconds()) time.Sleep(o.NodesSyncWait) // Wait for nodes to sync. // pick a batch of downloaders rxNames := pickRandom(o.DownloaderCount, downloaders) - c.log.Infof("downloaders: %s", rxNames) + c.logger.Infof("downloaders: %s", rxNames) var wg sync.WaitGroup @@ -180,7 +176,7 @@ func (c *LoadCheck) Run(ctx context.Context, cluster orchestration.Cluster, opts for retries := 10; rxDuration == 0 && retries > 0; retries-- { select { case <-ctx.Done(): - c.log.Infof("context done in retry: %v", retries) + c.logger.Infof("context done in retry: %v", retries) return default: } @@ -190,8 +186,8 @@ func (c *LoadCheck) Run(ctx context.Context, cluster orchestration.Cluster, opts rxData, rxDuration, err = test.download(ctx, rxName, address) if err != nil { c.metrics.DownloadErrors.Inc() - c.log.Infof("download failed: %v", err) - c.log.Infof("retrying in: %v", o.RxOnErrWait) + c.logger.Infof("download failed: %v", err) + c.logger.Infof("retrying in: %v", o.RxOnErrWait) time.Sleep(o.RxOnErrWait) } } @@ -202,15 +198,15 @@ func (c *LoadCheck) Run(ctx context.Context, cluster orchestration.Cluster, opts } if !bytes.Equal(rxData, txData) { - c.log.Info("uploaded data does not match downloaded data") + c.logger.Info("uploaded data does not match downloaded data") c.metrics.DownloadMismatch.Inc() rxLen, txLen := len(rxData), len(txData) if rxLen != txLen { - c.log.Infof("length mismatch: download length %d; upload length %d", rxLen, txLen) + c.logger.Infof("length mismatch: download length %d; upload length %d", rxLen, txLen) if txLen < rxLen { - c.log.Info("length mismatch: rx length is bigger then tx length") + c.logger.Info("length mismatch: rx length is bigger then tx length") } return } @@ -221,7 +217,7 @@ func (c *LoadCheck) Run(ctx context.Context, cluster orchestration.Cluster, opts diff++ } } - c.log.Infof("data mismatch: found %d different bytes, ~%.2f%%", diff, float64(diff)/float64(txLen)*100) + c.logger.Infof("data mismatch: found %d different bytes, ~%.2f%%", diff, float64(diff)/float64(txLen)*100) return } @@ -242,17 +238,17 @@ func (c *LoadCheck) checkStorageRadius(ctx context.Context, client *bee.Client, for { rs, err := client.ReserveState(ctx) if err != nil { - c.log.Infof("error getting state: %v", err) + c.logger.Infof("error getting state: %v", err) return false } if rs.StorageRadius < maxRadius { return true } - c.log.Infof("waiting %v for StorageRadius to decrease. Current: %d, Max: %d", wait, rs.StorageRadius, maxRadius) + c.logger.Infof("waiting %v for StorageRadius to decrease. Current: %d, Max: %d", wait, rs.StorageRadius, maxRadius) select { case <-ctx.Done(): - c.log.Infof("context done in StorageRadius check: %v", ctx.Err()) + c.logger.Infof("context done in StorageRadius check: %v", ctx.Err()) return false case <-time.After(wait): } From b9b9153980363cc260572bc22e44548c888f675e Mon Sep 17 00:00:00 2001 From: istae <14264581+istae@users.noreply.github.com> Date: Mon, 21 Oct 2024 16:00:15 +0300 Subject: [PATCH 4/4] fix: asd --- pkg/check/smoke/load.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/check/smoke/load.go b/pkg/check/smoke/load.go index d31775810..ec1e0b2d4 100644 --- a/pkg/check/smoke/load.go +++ b/pkg/check/smoke/load.go @@ -129,7 +129,7 @@ func (c *LoadCheck) Run(ctx context.Context, cluster orchestration.Cluster, opts batchID, err := clients[txName].GetOrCreateMutableBatch(ctx, o.PostageAmount, o.PostageDepth, "load-test") if err != nil { c.logger.Errorf("create new batch: %v", err) - continue + return } c.logger.Info("using batch", "batch_id", batchID)