diff --git a/.github/actions/setup-go/action.yml b/.github/actions/setup-go/action.yml index d36e379a9..9cc2fdd9e 100644 --- a/.github/actions/setup-go/action.yml +++ b/.github/actions/setup-go/action.yml @@ -6,4 +6,4 @@ runs: - name: Setup go uses: actions/setup-go@v4 with: - go-version: '1.22.5' + go-version: '1.23' diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 8206c430c..5dbb112d6 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,6 +4,9 @@ updates: directory: "/" schedule: interval: "daily" + ignore: + - dependency-name: "github.com/herumi/bls-eth-go-binary" + update-types: ["version-update:semver-major","version-update:semver-minor"] - package-ecosystem: "docker" directories: - "/" diff --git a/.github/workflows/build-push-deploy.yml b/.github/workflows/build-push-deploy.yml index e4207501d..7e779a349 100644 --- a/.github/workflows/build-push-deploy.yml +++ b/.github/workflows/build-push-deploy.yml @@ -92,6 +92,6 @@ jobs: uses: peter-evans/repository-dispatch@v2 with: token: ${{ secrets.CHARON_K8S_REPO_ACCESS_TOKEN }} - repository: ObolNetwork/charon-k8s + repository: ObolNetwork/obol-infrastructure event-type: charon-package-published client-payload: '{"sha": "${{ github.sha }}"}' diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 9c6ba92dc..a05d7464c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -21,7 +21,7 @@ on: - cron: '18 19 * * 6' env: - GOLANG_VERSION: '1.22' + GOLANG_VERSION: '1.23' jobs: analyze: diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 735861149..2552b6411 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -19,7 +19,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.59.1 + version: v1.61.0 - name: notify failure if: failure() && github.ref == 'refs/heads/main' env: diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index d7e2cbcb1..bd2beb84b 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -13,5 +13,5 @@ jobs: steps: - uses: actions/checkout@v3 - uses: ./.github/actions/setup-go - - run: go install golang.org/x/vuln/cmd/govulncheck@v1.1.0 + - run: go install golang.org/x/vuln/cmd/govulncheck@v1.1.3 - run: govulncheck -show=traces -test ./... diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8c4a36c4f..712856d65 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,15 +10,55 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 # Disable shallow checkout - - uses: ./.github/actions/setup-go - - run: go run . --help > cli-reference.txt - - run: go run testutil/genchangelog/main.go - - uses: softprops/action-gh-release@v1 - with: - draft: true - files: cli-reference.txt - body_path: changelog.md - token: ${{ secrets.RELEASE_SECRET }} + - name: Checkout repository + uses: actions /checkout@v4 + with: + fetch-depth: 0 # Disable shallow checkout + + - name: Setup Go environment + uses: ./.github/actions/setup-go + + - name: Generate CLI reference + run: go run . --help > cli-reference.txt + + - name: Generate changelog + run: go run testutil/genchangelog/main.go + + - name: Create GitHub release draft + uses: softprops/action-gh-release@v1 + with: + draft: true + files: cli-reference.txt + body_path: changelog.md + token: ${{ secrets.RELEASE_SECRET }} + + trigger-dispatch: + needs: release + runs-on: ubuntu-latest + steps: + - name: Extract tag name + run: echo "TAG_NAME=${GITHUB_REF##*/}" >> $GITHUB_ENV + + - name: Trigger dispatch for obol-docs + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.OBOL_PLATFORM_PAT }} + repository: ObolNetwork/obol-docs + event-type: update-version + client-payload: '{"tag": "${{ env.TAG_NAME }}"}' + + - name: Trigger dispatch for helm-charts + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.OBOL_PLATFORM_PAT }} + repository: ObolNetwork/helm-charts + event-type: update-version + client-payload: '{"tag": "${{ env.TAG_NAME }}"}' + + - name: Trigger dispatch for obol-ansible + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.OBOL_PLATFORM_PAT }} + repository: ObolNetwork/obol-ansible + event-type: update-version + client-payload: '{"tag": "${{ env.TAG_NAME }}"}' diff --git a/.golangci.yml b/.golangci.yml index 41a679662..20a6b434a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,6 @@ run: timeout: 5m - go: "1.22.5" + go: "1.23" linters-settings: cyclop: max-complexity: 15 @@ -116,6 +116,12 @@ linters-settings: - expected-actual go-require: ignore-http-handlers: true + gosec: + excludes: + # Flags for potentially-unsafe casting of ints, seems good, + # but currently is really unstable with no clear way to make the linter pass. + # https://github.com/securego/gosec/issues/1187 + - G115 issues: fix: true @@ -145,6 +151,7 @@ linters: enable-all: true disable: # Keep disabled + - intrange - containedctx - contextcheck - cyclop @@ -158,7 +165,6 @@ linters: - gocyclo - godot - godox - - goerr113 - gomnd - gomoddirectives - inamedparam @@ -176,13 +182,5 @@ linters: - varnamelen - wsl # Deprecated - - deadcode - - exhaustivestruct - - golint - - ifshort - - interfacer - - maligned - - nosnakecase - - structcheck - - scopelint - - varcheck + - goerr113 + - execinquery diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 82f26597f..beb5019b9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,7 +15,7 @@ repos: rev: v0.0.3 hooks: - id: check-go-version - args: [ -v=go1.22 ] # Only check minor version locally + args: [ -v=go1.23 ] # Only check minor version locally pass_filenames: false additional_dependencies: [ packaging ] - id: check-licence-header diff --git a/.pre-commit/run_linter.sh b/.pre-commit/run_linter.sh index 46a738ec7..b9996f10f 100755 --- a/.pre-commit/run_linter.sh +++ b/.pre-commit/run_linter.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -VERSION="1.59.1" +VERSION="1.61.0" if ! command -v golangci-lint &> /dev/null then diff --git a/Dockerfile b/Dockerfile index f9ab8a8be..2fd16ed50 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,14 +1,18 @@ # Container for building Go binary. -FROM golang:1.22.5-bookworm AS builder +FROM golang:1.23.3-bookworm AS builder # Install dependencies -RUN apt-get update && apt-get install -y build-essential git +RUN apt-get update && apt-get install -y --no-install-recommends build-essential git + # Prep and copy source WORKDIR /app/charon + COPY . . + # Populate GO_BUILD_FLAG with a build arg to provide an optional go build flag. ARG GO_BUILD_FLAG ENV GO_BUILD_FLAG=${GO_BUILD_FLAG} RUN echo "Building with GO_BUILD_FLAG='${GO_BUILD_FLAG}'" + # Build with Go module and Go build caches. RUN \ --mount=type=cache,target=/go/pkg \ @@ -18,30 +22,35 @@ RUN echo "Built charon version=$(./charon version)" # Copy final binary into light stage. FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y ca-certificates wget fio +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates fio wget \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* ARG GITHUB_SHA=local ENV GITHUB_SHA=${GITHUB_SHA} + COPY --from=builder /app/charon/charon /usr/local/bin/ + # Don't run container as root ENV USER=charon ENV UID=1000 ENV GID=1000 -RUN addgroup --gid "$GID" "$USER" -RUN adduser \ +RUN addgroup --gid "$GID" "$USER" \ + && adduser \ --disabled-password \ --gecos "charon" \ --home "/opt/$USER" \ --ingroup "$USER" \ --no-create-home \ --uid "$UID" \ - "$USER" -RUN chown charon /usr/local/bin/charon -RUN chmod u+x /usr/local/bin/charon + "$USER" \ + && chown "$USER" /usr/local/bin/charon \ + && chmod u+x /usr/local/bin/charon + WORKDIR "/opt/$USER" -RUN chown charon "/opt/$USER" USER charon + ENTRYPOINT ["/usr/local/bin/charon"] CMD ["run"] + # Used by GitHub to associate container with repo. LABEL org.opencontainers.image.source="https://github.com/obolnetwork/charon" LABEL org.opencontainers.image.title="charon" diff --git a/README.md b/README.md index b868d6ad6..bb96bfd39 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ This repo contains the source code for the distributed validator client _Charon_ (pronounced 'kharon'); a HTTP middleware client for Ethereum Staking that enables you to safely run a single validator across a group of independent nodes. -Charon is accompanied by a webapp called the [Distributed Validator Launchpad](https://goerli.launchpad.obol.tech/) for distributed validator key creation. +Charon is accompanied by a webapp called the [Distributed Validator Launchpad](https://holesky.launchpad.obol.tech/) for distributed validator key creation. Charon is used by stakers to distribute the responsibility of running Ethereum Validators across a number of different instances and client implementations. diff --git a/app/eth2wrap/httpwrap.go b/app/eth2wrap/httpwrap.go index d257fa5d1..747ab546d 100644 --- a/app/eth2wrap/httpwrap.go +++ b/app/eth2wrap/httpwrap.go @@ -296,7 +296,6 @@ func httpPost(ctx context.Context, base string, endpoint string, body io.Reader, return nil, errors.Wrap(err, "failed to read POST response") } - //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable if res.StatusCode/100 != 2 { return nil, errors.New("post failed", z.Int("status", res.StatusCode), z.Str("body", string(data))) } diff --git a/app/expbackoff/expbackoff.go b/app/expbackoff/expbackoff.go index f76c647a8..863c37816 100644 --- a/app/expbackoff/expbackoff.go +++ b/app/expbackoff/expbackoff.go @@ -148,14 +148,14 @@ func Backoff(config Config, retries int) time.Duration { } backoff := float64(config.BaseDelay) - max := float64(config.MaxDelay) + maxVal := float64(config.MaxDelay) - for backoff < max && retries > 0 { + for backoff < maxVal && retries > 0 { backoff *= config.Multiplier retries-- } - if backoff > max { - backoff = max + if backoff > maxVal { + backoff = maxVal } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. diff --git a/app/health/checks.go b/app/health/checks.go index 0af5b4e61..ef1603829 100644 --- a/app/health/checks.go +++ b/app/health/checks.go @@ -70,12 +70,12 @@ var checks = []check{ Description: "Beacon Node in syncing state.", Severity: severityCritical, Func: func(q query, _ Metadata) (bool, error) { - max, err := q("app_monitoring_beacon_node_syncing", noLabels, gaugeMax) + maxVal, err := q("app_monitoring_beacon_node_syncing", noLabels, gaugeMax) if err != nil { return false, err } - return max == 1, nil + return maxVal == 1, nil }, }, { @@ -83,14 +83,14 @@ var checks = []check{ Description: "Not connected to at least quorum peers. Check logs for networking issue or coordinate with peers.", Severity: severityCritical, Func: func(q query, m Metadata) (bool, error) { - max, err := q("p2p_ping_success", countNonZeroLabels, gaugeMax) + maxVal, err := q("p2p_ping_success", countNonZeroLabels, gaugeMax) if err != nil { return false, err } required := float64(m.QuorumPeers) - 1 // Exclude self - return max < required, nil + return maxVal < required, nil }, }, { @@ -98,14 +98,14 @@ var checks = []check{ Description: "Pending validators detected. Activate them to start validating.", Severity: severityInfo, Func: func(q query, _ Metadata) (bool, error) { - max, err := q("core_scheduler_validator_status", + maxVal, err := q("core_scheduler_validator_status", countLabels(l("status", "pending")), gaugeMax) if err != nil { return false, err } - return max > 0, nil + return maxVal > 0, nil }, }, { @@ -140,12 +140,12 @@ var checks = []check{ Description: "Metrics reached high cardinality threshold. Please check metrics reported by app_health_metrics_high_cardinality.", Severity: severityWarning, Func: func(q query, _ Metadata) (bool, error) { - max, err := q("app_health_metrics_high_cardinality", sumLabels(), gaugeMax) + maxVal, err := q("app_health_metrics_high_cardinality", sumLabels(), gaugeMax) if err != nil { return false, err } - return max > 0, nil + return maxVal > 0, nil }, }, } diff --git a/app/health/checks_internal_test.go b/app/health/checks_internal_test.go index 2e3b17aa0..332bc6bba 100644 --- a/app/health/checks_internal_test.go +++ b/app/health/checks_internal_test.go @@ -407,19 +407,19 @@ func testCheck(t *testing.T, m Metadata, checkName string, expect bool, metrics genGauge(genLabels("bar", "bar2"), 1, 1, 1), ) - var max int - if len(metrics) > max { - max = len(metrics) + var maxVal int + if len(metrics) > maxVal { + maxVal = len(metrics) } - if len(randomFamFoo) > max { - max = len(randomFamFoo) + if len(randomFamFoo) > maxVal { + maxVal = len(randomFamFoo) } - if len(randomFamBar) > max { - max = len(randomFamBar) + if len(randomFamBar) > maxVal { + maxVal = len(randomFamBar) } - multiFams := make([][]*pb.MetricFamily, max) - for i := range max { + multiFams := make([][]*pb.MetricFamily, maxVal) + for i := range maxVal { var fam []*pb.MetricFamily if i < len(metrics) { fam = append(fam, metrics[i]) @@ -455,14 +455,14 @@ func genFam(name string, metrics ...[]*pb.Metric) []*pb.MetricFamily { typ = pb.MetricType_GAUGE } - var max int + var maxVal int for _, series := range metrics { - if len(series) > max { - max = len(series) + if len(series) > maxVal { + maxVal = len(series) } } - resp := make([]*pb.MetricFamily, max) + resp := make([]*pb.MetricFamily, maxVal) for _, series := range metrics { for i, metric := range series { if resp[i] == nil { diff --git a/app/health/reducers.go b/app/health/reducers.go index 52507cb7f..a81ab5428 100644 --- a/app/health/reducers.go +++ b/app/health/reducers.go @@ -29,16 +29,16 @@ func increase(samples []*pb.Metric) (float64, error) { // gaugeMax returns the maximum value in a time series of gauge metrics. func gaugeMax(samples []*pb.Metric) (float64, error) { - var max float64 + var maxVal float64 for _, sample := range samples { if sample.GetGauge() == nil { return 0, errors.New("bug: non-gauge metric passed") } - if sample.GetGauge().GetValue() > max { - max = sample.GetGauge().GetValue() + if sample.GetGauge().GetValue() > maxVal { + maxVal = sample.GetGauge().GetValue() } } - return max, nil + return maxVal, nil } diff --git a/app/health/select.go b/app/health/select.go index 3d42f5b68..a1c17d62e 100644 --- a/app/health/select.go +++ b/app/health/select.go @@ -15,8 +15,8 @@ type labelSelector func(*pb.MetricFamily) (*pb.Metric, error) // maxLabel returns the metric with the highest value. func maxLabel(metricsFam *pb.MetricFamily) *pb.Metric { //nolint: unused // This is used in the future. var ( - max float64 - resp *pb.Metric + maxVal float64 + resp *pb.Metric ) for _, metric := range metricsFam.GetMetric() { var val float64 @@ -29,8 +29,8 @@ func maxLabel(metricsFam *pb.MetricFamily) *pb.Metric { //nolint: unused // This panic("invalid metric type for simple value labelSelector") } - if max == 0 || val > max { - max = val + if maxVal == 0 || val > maxVal { + maxVal = val resp = metric } } diff --git a/app/log/loki/client.go b/app/log/loki/client.go index e1b3c18c9..327fd72be 100644 --- a/app/log/loki/client.go +++ b/app/log/loki/client.go @@ -210,7 +210,6 @@ func send(ctx context.Context, client *http.Client, endpoint string, batch *batc } defer resp.Body.Close() - //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable if resp.StatusCode/100 != 2 { scanner := bufio.NewScanner(io.LimitReader(resp.Body, maxErrMsgLen)) line := "" diff --git a/app/log/loki/lokipb/v1/loki.pb.go b/app/log/loki/lokipb/v1/loki.pb.go index 4b6d54ab0..e1f71c092 100644 --- a/app/log/loki/lokipb/v1/loki.pb.go +++ b/app/log/loki/lokipb/v1/loki.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: app/log/loki/lokipb/v1/loki.proto @@ -31,11 +31,9 @@ type PushRequest struct { func (x *PushRequest) Reset() { *x = PushRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PushRequest) String() string { @@ -46,7 +44,7 @@ func (*PushRequest) ProtoMessage() {} func (x *PushRequest) ProtoReflect() protoreflect.Message { mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -80,11 +78,9 @@ type Stream struct { func (x *Stream) Reset() { *x = Stream{} - if protoimpl.UnsafeEnabled { - mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Stream) String() string { @@ -95,7 +91,7 @@ func (*Stream) ProtoMessage() {} func (x *Stream) ProtoReflect() protoreflect.Message { mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -142,11 +138,9 @@ type Entry struct { func (x *Entry) Reset() { *x = Entry{} - if protoimpl.UnsafeEnabled { - mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Entry) String() string { @@ -157,7 +151,7 @@ func (*Entry) ProtoMessage() {} func (x *Entry) ProtoReflect() protoreflect.Message { mi := &file_app_log_loki_lokipb_v1_loki_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -253,44 +247,6 @@ func file_app_log_loki_lokipb_v1_loki_proto_init() { if File_app_log_loki_lokipb_v1_loki_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_app_log_loki_lokipb_v1_loki_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*PushRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_log_loki_lokipb_v1_loki_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Stream); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_log_loki_lokipb_v1_loki_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Entry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/app/obolapi/api.go b/app/obolapi/api.go index 0c58a67b4..3224a3e9a 100644 --- a/app/obolapi/api.go +++ b/app/obolapi/api.go @@ -28,7 +28,7 @@ const ( func New(urlStr string, options ...func(*Client)) (Client, error) { _, err := url.ParseRequestURI(urlStr) // check that urlStr is valid if err != nil { - return Client{}, errors.Wrap(err, "could not parse Obol API URL") + return Client{}, errors.Wrap(err, "parse Obol API URL") } // always set a default timeout, even if no options are provided @@ -63,7 +63,7 @@ func WithTimeout(timeout time.Duration) func(*Client) { func (c Client) url() *url.URL { baseURL, err := url.ParseRequestURI(c.baseURL) if err != nil { - panic(errors.Wrap(err, "could not parse Obol API URL, this should never happen")) + panic(errors.Wrap(err, "parse Obol API URL, this should never happen")) } return baseURL @@ -83,7 +83,7 @@ func (c Client) PublishLock(ctx context.Context, lock cluster.Lock) error { ctx, cancel := context.WithTimeout(ctx, c.reqTimeout) defer cancel() - err = httpPost(ctx, addr, b) + err = httpPost(ctx, addr, b, nil) if err != nil { return err } @@ -105,28 +105,58 @@ func launchpadURLPath(lock cluster.Lock) string { return fmt.Sprintf(launchpadReturnPathFmt, lock.LockHash) } -func httpPost(ctx context.Context, url *url.URL, b []byte) error { - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url.String(), bytes.NewReader(b)) +func httpPost(ctx context.Context, url *url.URL, body []byte, headers map[string]string) error { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url.String(), bytes.NewReader(body)) if err != nil { return errors.Wrap(err, "new POST request with ctx") } req.Header.Add("Content-Type", "application/json") + for key, val := range headers { + req.Header.Set(key, val) + } res, err := new(http.Client).Do(req) if err != nil { - return errors.Wrap(err, "failed to call POST endpoint") + return errors.Wrap(err, "call POST endpoint") } defer res.Body.Close() - data, err := io.ReadAll(res.Body) + if res.StatusCode/100 != 2 { + data, err := io.ReadAll(res.Body) + if err != nil { + return errors.Wrap(err, "read POST response", z.Int("status", res.StatusCode)) + } + + return errors.New("http POST failed", z.Int("status", res.StatusCode), z.Str("body", string(data))) + } + + return nil +} + +func httpGet(ctx context.Context, url *url.URL, headers map[string]string) (io.ReadCloser, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil) if err != nil { - return errors.Wrap(err, "failed to read POST response") + return nil, errors.Wrap(err, "new GET request with ctx") + } + req.Header.Add("Content-Type", "application/json") + + for key, val := range headers { + req.Header.Set(key, val) + } + + res, err := new(http.Client).Do(req) + if err != nil { + return nil, errors.Wrap(err, "call GET endpoint") } - //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable if res.StatusCode/100 != 2 { - return errors.New("post failed", z.Int("status", res.StatusCode), z.Str("body", string(data))) + data, err := io.ReadAll(res.Body) + if err != nil { + return nil, errors.Wrap(err, "read POST response", z.Int("status", res.StatusCode)) + } + + return nil, errors.New("http GET failed", z.Int("status", res.StatusCode), z.Str("body", string(data))) } - return nil + return res.Body, nil } diff --git a/app/obolapi/api_internal_test.go b/app/obolapi/api_internal_test.go index 32bf7800a..babcc9a84 100644 --- a/app/obolapi/api_internal_test.go +++ b/app/obolapi/api_internal_test.go @@ -3,6 +3,11 @@ package obolapi import ( + "context" + "io" + "net/http" + "net/http/httptest" + "net/url" "testing" "time" @@ -21,3 +26,153 @@ func TestWithTimeout(t *testing.T) { require.NoError(t, err) require.Equal(t, timeout, oapi.reqTimeout) } + +func TestHttpPost(t *testing.T) { + tests := []struct { + name string + body []byte + headers map[string]string + server *httptest.Server + endpoint string + expectedError string + }{ + { + name: "default scenario", + body: nil, + headers: nil, + endpoint: "/post-request", + server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/post-request") + require.Equal(t, r.Method, http.MethodPost) + require.Equal(t, r.Header.Get("Content-Type"), "application/json") + w.WriteHeader(http.StatusOK) + })), + expectedError: "", + }, + { + name: "default scenario with body and headers", + body: []byte(`{"test_body_key": "test_body_value"}`), + headers: map[string]string{"test_header_key": "test_header_value"}, + endpoint: "/post-request", + server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/post-request") + require.Equal(t, r.Method, http.MethodPost) + require.Equal(t, r.Header.Get("Content-Type"), "application/json") + require.Equal(t, r.Header.Get("test_header_key"), "test_header_value") //nolint:canonicalheader + + data, err := io.ReadAll(r.Body) + require.NoError(t, err) + defer r.Body.Close() + require.Equal(t, string(data), `{"test_body_key": "test_body_value"}`) + + w.WriteHeader(http.StatusOK) + _, err = w.Write([]byte(`"OK"`)) + require.NoError(t, err) + })), + expectedError: "", + }, + { + name: "status code not 2XX", + body: nil, + headers: nil, + endpoint: "/post-request", + server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/post-request") + require.Equal(t, r.Method, http.MethodPost) + require.Equal(t, r.Header.Get("Content-Type"), "application/json") + + w.WriteHeader(http.StatusBadRequest) + _, err := w.Write([]byte(`"Bad Request response"`)) + require.NoError(t, err) + })), + expectedError: "POST failed", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testServerURL, err := url.ParseRequestURI(test.server.URL) + require.NoError(t, err) + err = httpPost(context.Background(), testServerURL.JoinPath(test.endpoint), test.body, test.headers) + if test.expectedError != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedError) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestHttpGet(t *testing.T) { + tests := []struct { + name string + headers map[string]string + server *httptest.Server + endpoint string + expectedResp []byte + expectedError string + }{ + { + name: "default scenario", + headers: nil, + endpoint: "/get-request", + server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/get-request") + require.Equal(t, r.Method, http.MethodGet) + require.Equal(t, r.Header.Get("Content-Type"), "application/json") + w.WriteHeader(http.StatusOK) + })), + expectedError: "", + }, + { + name: "default scenario with headers", + headers: map[string]string{"test_header_key": "test_header_value"}, + endpoint: "/get-request", + server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/get-request") + require.Equal(t, r.Method, http.MethodGet) + require.Equal(t, r.Header.Get("Content-Type"), "application/json") + require.Equal(t, r.Header.Get("test_header_key"), "test_header_value") //nolint:canonicalheader + + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte(`"OK"`)) + require.NoError(t, err) + })), + expectedResp: []byte(`"OK"`), + expectedError: "", + }, + { + name: "status code not 2XX", + headers: nil, + endpoint: "/get-request", + server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/get-request") + require.Equal(t, r.Method, http.MethodGet) + require.Equal(t, r.Header.Get("Content-Type"), "application/json") + + w.WriteHeader(http.StatusBadRequest) + _, err := w.Write([]byte(`"Bad Request response"`)) + require.NoError(t, err) + })), + expectedResp: []byte(`"Bad Request response"`), + expectedError: "GET failed", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testServerURL, err := url.ParseRequestURI(test.server.URL) + require.NoError(t, err) + respBody, err := httpGet(context.Background(), testServerURL.JoinPath(test.endpoint), test.headers) + if test.expectedError != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedError) + } else { + require.NoError(t, err) + defer respBody.Close() + resp, err := io.ReadAll(respBody) + require.NoError(t, err) + require.Equal(t, string(resp), string(test.expectedResp)) + } + }) + } +} diff --git a/app/obolapi/exit.go b/app/obolapi/exit.go index 2a142b11b..dae3653d9 100644 --- a/app/obolapi/exit.go +++ b/app/obolapi/exit.go @@ -3,12 +3,10 @@ package obolapi import ( - "bytes" "context" "encoding/hex" "encoding/json" "fmt" - "net/http" "net/url" "sort" "strconv" @@ -62,16 +60,16 @@ func fullExitURL(valPubkey, lockHash string, shareIndex uint64) string { ).Replace(fullExitTmpl) } -// PostPartialExit POSTs the set of msg's to the Obol API, for a given lock hash. +// PostPartialExits POSTs the set of msg's to the Obol API, for a given lock hash. // It respects the timeout specified in the Client instance. -func (c Client) PostPartialExit(ctx context.Context, lockHash []byte, shareIndex uint64, identityKey *k1.PrivateKey, exitBlobs ...ExitBlob) error { +func (c Client) PostPartialExits(ctx context.Context, lockHash []byte, shareIndex uint64, identityKey *k1.PrivateKey, exitBlobs ...ExitBlob) error { lockHashStr := "0x" + hex.EncodeToString(lockHash) path := partialExitURL(lockHashStr) u, err := url.ParseRequestURI(c.baseURL) if err != nil { - return errors.Wrap(err, "bad obol api url") + return errors.Wrap(err, "bad Obol API url") } u.Path = path @@ -107,24 +105,9 @@ func (c Client) PostPartialExit(ctx context.Context, lockHash []byte, shareIndex ctx, cancel := context.WithTimeout(ctx, c.reqTimeout) defer cancel() - req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewReader(data)) + err = httpPost(ctx, u, data, nil) if err != nil { - return errors.Wrap(err, "http new post request") - } - - req.Header.Set("Content-Type", "application/json") - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return errors.Wrap(err, "http post error") - } - - defer func() { - _ = resp.Body.Close() - }() - - if resp.StatusCode != http.StatusCreated { - return errors.New("http error", z.Int("status_code", resp.StatusCode)) + return errors.Wrap(err, "http Obol API POST request") } return nil @@ -142,7 +125,7 @@ func (c Client) GetFullExit(ctx context.Context, valPubkey string, lockHash []by u, err := url.ParseRequestURI(c.baseURL) if err != nil { - return ExitBlob{}, errors.Wrap(err, "bad obol api url") + return ExitBlob{}, errors.Wrap(err, "bad Obol API url") } u.Path = path @@ -150,11 +133,6 @@ func (c Client) GetFullExit(ctx context.Context, valPubkey string, lockHash []by ctx, cancel := context.WithTimeout(ctx, c.reqTimeout) defer cancel() - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) - if err != nil { - return ExitBlob{}, errors.Wrap(err, "http new get request") - } - exitAuthData := FullExitAuthBlob{ LockHash: lockHash, ValidatorPubkey: valPubkeyBytes, @@ -172,28 +150,15 @@ func (c Client) GetFullExit(ctx context.Context, valPubkey string, lockHash []by return ExitBlob{}, errors.Wrap(err, "k1 sign") } - req.Header.Set("Authorization", bearerString(lockHashSignature)) - req.Header.Set("Content-Type", "application/json") - - resp, err := http.DefaultClient.Do(req) + respBody, err := httpGet(ctx, u, map[string]string{"Authorization": bearerString(lockHashSignature)}) if err != nil { - return ExitBlob{}, errors.Wrap(err, "http get error") - } - - if resp.StatusCode != http.StatusOK { - if resp.StatusCode == http.StatusNotFound { - return ExitBlob{}, ErrNoExit - } - - return ExitBlob{}, errors.New("http error", z.Int("status_code", resp.StatusCode)) + return ExitBlob{}, errors.Wrap(err, "http Obol API GET request") } - defer func() { - _ = resp.Body.Close() - }() + defer respBody.Close() var er FullExitResponse - if err := json.NewDecoder(resp.Body).Decode(&er); err != nil { + if err := json.NewDecoder(respBody).Decode(&er); err != nil { return ExitBlob{}, errors.Wrap(err, "json unmarshal error") } diff --git a/app/obolapi/exit_test.go b/app/obolapi/exit_test.go index 329b8312f..22b3ae82c 100644 --- a/app/obolapi/exit_test.go +++ b/app/obolapi/exit_test.go @@ -97,7 +97,7 @@ func TestAPIFlow(t *testing.T) { // send all the partial exits for idx, exit := range exits { - require.NoError(t, cl.PostPartialExit(ctx, lock.LockHash, uint64(idx+1), identityKeys[idx], exit), "share index: %d", idx+1) + require.NoError(t, cl.PostPartialExits(ctx, lock.LockHash, uint64(idx+1), identityKeys[idx], exit), "share index: %d", idx+1) } for idx := range exits { @@ -188,7 +188,7 @@ func TestAPIFlowMissingSig(t *testing.T) { // send all the partial exits for idx, exit := range exits { - require.NoError(t, cl.PostPartialExit(ctx, lock.LockHash, uint64(idx+1), identityKeys[idx], exit), "share index: %d", idx+1) + require.NoError(t, cl.PostPartialExits(ctx, lock.LockHash, uint64(idx+1), identityKeys[idx], exit), "share index: %d", idx+1) } for idx := range exits { diff --git a/app/peerinfo/peerinfopb/v1/peerinfo.pb.go b/app/peerinfo/peerinfopb/v1/peerinfo.pb.go index daaa60454..8663f4408 100644 --- a/app/peerinfo/peerinfopb/v1/peerinfo.pb.go +++ b/app/peerinfo/peerinfopb/v1/peerinfo.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: app/peerinfo/peerinfopb/v1/peerinfo.proto @@ -36,11 +36,9 @@ type PeerInfo struct { func (x *PeerInfo) Reset() { *x = PeerInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_app_peerinfo_peerinfopb_v1_peerinfo_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_peerinfo_peerinfopb_v1_peerinfo_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PeerInfo) String() string { @@ -51,7 +49,7 @@ func (*PeerInfo) ProtoMessage() {} func (x *PeerInfo) ProtoReflect() protoreflect.Message { mi := &file_app_peerinfo_peerinfopb_v1_peerinfo_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -175,20 +173,6 @@ func file_app_peerinfo_peerinfopb_v1_peerinfo_proto_init() { if File_app_peerinfo_peerinfopb_v1_peerinfo_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_app_peerinfo_peerinfopb_v1_peerinfo_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*PeerInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_app_peerinfo_peerinfopb_v1_peerinfo_proto_msgTypes[0].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ diff --git a/app/protonil/testdata/v1/test.pb.go b/app/protonil/testdata/v1/test.pb.go index 8b50c2d1b..19a41ae25 100644 --- a/app/protonil/testdata/v1/test.pb.go +++ b/app/protonil/testdata/v1/test.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: app/protonil/testdata/v1/test.proto @@ -32,11 +32,9 @@ type M1 struct { func (x *M1) Reset() { *x = M1{} - if protoimpl.UnsafeEnabled { - mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *M1) String() string { @@ -47,7 +45,7 @@ func (*M1) ProtoMessage() {} func (x *M1) ProtoReflect() protoreflect.Message { mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -95,11 +93,9 @@ type M2 struct { func (x *M2) Reset() { *x = M2{} - if protoimpl.UnsafeEnabled { - mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *M2) String() string { @@ -110,7 +106,7 @@ func (*M2) ProtoMessage() {} func (x *M2) ProtoReflect() protoreflect.Message { mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -156,11 +152,9 @@ type M3 struct { func (x *M3) Reset() { *x = M3{} - if protoimpl.UnsafeEnabled { - mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *M3) String() string { @@ -171,7 +165,7 @@ func (*M3) ProtoMessage() {} func (x *M3) ProtoReflect() protoreflect.Message { mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -205,11 +199,9 @@ type M4 struct { func (x *M4) Reset() { *x = M4{} - if protoimpl.UnsafeEnabled { - mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *M4) String() string { @@ -220,7 +212,7 @@ func (*M4) ProtoMessage() {} func (x *M4) ProtoReflect() protoreflect.Message { mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -267,11 +259,9 @@ type MaxIndex struct { func (x *MaxIndex) Reset() { *x = MaxIndex{} - if protoimpl.UnsafeEnabled { - mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MaxIndex) String() string { @@ -282,7 +272,7 @@ func (*MaxIndex) ProtoMessage() {} func (x *MaxIndex) ProtoReflect() protoreflect.Message { mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -321,11 +311,9 @@ type Attack struct { func (x *Attack) Reset() { *x = Attack{} - if protoimpl.UnsafeEnabled { - mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Attack) String() string { @@ -336,7 +324,7 @@ func (*Attack) ProtoMessage() {} func (x *Attack) ProtoReflect() protoreflect.Message { mi := &file_app_protonil_testdata_v1_test_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -502,80 +490,6 @@ func file_app_protonil_testdata_v1_test_proto_init() { if File_app_protonil_testdata_v1_test_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_app_protonil_testdata_v1_test_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*M1); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_protonil_testdata_v1_test_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*M2); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_protonil_testdata_v1_test_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*M3); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_protonil_testdata_v1_test_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*M4); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_protonil_testdata_v1_test_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*MaxIndex); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_app_protonil_testdata_v1_test_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Attack); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_app_protonil_testdata_v1_test_proto_msgTypes[0].OneofWrappers = []any{} file_app_protonil_testdata_v1_test_proto_msgTypes[1].OneofWrappers = []any{} file_app_protonil_testdata_v1_test_proto_msgTypes[5].OneofWrappers = []any{} diff --git a/app/version/version.go b/app/version/version.go index 8fdc5c540..a2cc30cee 100644 --- a/app/version/version.go +++ b/app/version/version.go @@ -15,7 +15,7 @@ import ( ) // version a string since it is overwritten at build-time with the git tag for official releases. -var version = "v1.1-rc" +var version = "v1.2-rc" // Version is the branch version of the codebase. // - Main branch: v0.X-dev @@ -25,6 +25,7 @@ var Version, _ = Parse(version) // Error is caught in tests. // Supported returns the supported minor versions in order of precedence. func Supported() []SemVer { return []SemVer{ + {major: 1, minor: 2}, {major: 1, minor: 1}, {major: 1, minor: 0}, } diff --git a/cluster/helpers.go b/cluster/helpers.go index 788bdc696..8ff3ee1ef 100644 --- a/cluster/helpers.go +++ b/cluster/helpers.go @@ -40,7 +40,6 @@ func FetchDefinition(ctx context.Context, url string) (Definition, error) { return Definition{}, errors.Wrap(err, "fetch file") } - //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable if resp.StatusCode/100 != 2 { return Definition{}, errors.New("http error", z.Int("status_code", resp.StatusCode)) } diff --git a/cluster/manifestpb/v1/manifest.pb.go b/cluster/manifestpb/v1/manifest.pb.go index aa6b000c1..6b7a68bb4 100644 --- a/cluster/manifestpb/v1/manifest.pb.go +++ b/cluster/manifestpb/v1/manifest.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: cluster/manifestpb/v1/manifest.proto @@ -39,11 +39,9 @@ type Cluster struct { func (x *Cluster) Reset() { *x = Cluster{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Cluster) String() string { @@ -54,7 +52,7 @@ func (*Cluster) ProtoMessage() {} func (x *Cluster) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -138,11 +136,9 @@ type Mutation struct { func (x *Mutation) Reset() { *x = Mutation{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Mutation) String() string { @@ -153,7 +149,7 @@ func (*Mutation) ProtoMessage() {} func (x *Mutation) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -202,11 +198,9 @@ type SignedMutation struct { func (x *SignedMutation) Reset() { *x = SignedMutation{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SignedMutation) String() string { @@ -217,7 +211,7 @@ func (*SignedMutation) ProtoMessage() {} func (x *SignedMutation) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -264,11 +258,9 @@ type SignedMutationList struct { func (x *SignedMutationList) Reset() { *x = SignedMutationList{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SignedMutationList) String() string { @@ -279,7 +271,7 @@ func (*SignedMutationList) ProtoMessage() {} func (x *SignedMutationList) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -313,11 +305,9 @@ type Operator struct { func (x *Operator) Reset() { *x = Operator{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Operator) String() string { @@ -328,7 +318,7 @@ func (*Operator) ProtoMessage() {} func (x *Operator) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -372,11 +362,9 @@ type Validator struct { func (x *Validator) Reset() { *x = Validator{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Validator) String() string { @@ -387,7 +375,7 @@ func (*Validator) ProtoMessage() {} func (x *Validator) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -448,11 +436,9 @@ type ValidatorList struct { func (x *ValidatorList) Reset() { *x = ValidatorList{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ValidatorList) String() string { @@ -463,7 +449,7 @@ func (*ValidatorList) ProtoMessage() {} func (x *ValidatorList) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -496,11 +482,9 @@ type LegacyLock struct { func (x *LegacyLock) Reset() { *x = LegacyLock{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LegacyLock) String() string { @@ -511,7 +495,7 @@ func (*LegacyLock) ProtoMessage() {} func (x *LegacyLock) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -542,11 +526,9 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Empty) String() string { @@ -557,7 +539,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_cluster_manifestpb_v1_manifest_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -700,116 +682,6 @@ func file_cluster_manifestpb_v1_manifest_proto_init() { if File_cluster_manifestpb_v1_manifest_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_cluster_manifestpb_v1_manifest_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Cluster); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Mutation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*SignedMutation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*SignedMutationList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Operator); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Validator); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ValidatorList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*LegacyLock); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cluster_manifestpb_v1_manifest_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/cmd/ascii.go b/cmd/ascii.go index ca81f1fcb..6b4d6de64 100644 --- a/cmd/ascii.go +++ b/cmd/ascii.go @@ -39,7 +39,7 @@ func validatorASCII() []string { func mevASCII() []string { return []string{ - "__ __ ________ __ ", + " __ __ ________ __ ", "| \\/ | ____\\ \\ / / ", "| \\ / | |__ \\ \\ / / ", "| |\\/| | __| \\ \\/ / ", @@ -48,14 +48,14 @@ func mevASCII() []string { } } -func performanceASCII() []string { +func infraASCII() []string { return []string{ - " _____ __ ", - "| __ \\ / _| ", - "| |__) |__ _ __| |_ ___ _ __ _ __ ___ __ _ _ __ ___ ___ ", - "| ___/ _ \\ '__| _/ _ \\| '__| '_ ` _ \\ / _` | '_ \\ / __/ _ \\ ", - "| | | __/ | | || (_) | | | | | | | | (_| | | | | (_| __/ ", - "|_| \\___|_| |_| \\___/|_| |_| |_| |_|\\__,_|_| |_|\\___\\___| ", + " _____ __ ", + "|_ _| / _| ", + " | | _ __ | |_ _ __ __ _ ", + " | | | '_ \\| _| '__/ _` | ", + " _| |_| | | | | | | | (_| | ", + "|_____|_| |_|_| |_| \\__,_| ", } } diff --git a/cmd/cmd.go b/cmd/cmd.go index 2b2d15ab5..0b152c01d 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -47,23 +47,24 @@ func New() *cobra.Command { ), newCombineCmd(newCombineFunc), newAlphaCmd( - newTestCmd( - newTestPeersCmd(runTestPeers), - newTestBeaconCmd(runTestBeacon), - newTestValidatorCmd(runTestValidator), - newTestMEVCmd(runTestMEV), - newTestPerformanceCmd(runTestPerformance), - ), newAddValidatorsCmd(runAddValidatorsSolo), newViewClusterManifestCmd(runViewClusterManifest), ), newExitCmd( newListActiveValidatorsCmd(runListActiveValidatorsCmd), - newSubmitPartialExitCmd(runSignPartialExit), + newSignPartialExitCmd(runSignPartialExit), newBcastFullExitCmd(runBcastFullExit), newFetchExitCmd(runFetchExit), ), newUnsafeCmd(newRunCmd(app.Run, true)), + newTestCmd( + newTestAllCmd(runTestAll), + newTestPeersCmd(runTestPeers), + newTestBeaconCmd(runTestBeacon), + newTestValidatorCmd(runTestValidator), + newTestMEVCmd(runTestMEV), + newTestInfraCmd(runTestInfra), + ), ) } diff --git a/cmd/cmd_internal_test.go b/cmd/cmd_internal_test.go index af7452edc..2a985838b 100644 --- a/cmd/cmd_internal_test.go +++ b/cmd/cmd_internal_test.go @@ -62,7 +62,7 @@ func TestCmdFlags(t *testing.T) { LokiService: "charon", }, P2P: p2p.Config{ - Relays: []string{"https://0.relay.obol.tech", "https://1.relay.obol.tech"}, + Relays: []string{"https://0.relay.obol.tech", "https://2.relay.obol.dev", "https://1.relay.obol.tech"}, TCPAddrs: nil, }, Feature: featureset.Config{ @@ -113,7 +113,7 @@ func TestCmdFlags(t *testing.T) { LokiService: "charon", }, P2P: p2p.Config{ - Relays: []string{"https://0.relay.obol.tech", "https://1.relay.obol.tech"}, + Relays: []string{"https://0.relay.obol.tech", "https://2.relay.obol.dev", "https://1.relay.obol.tech"}, TCPAddrs: nil, }, Feature: featureset.Config{ diff --git a/cmd/createcluster.go b/cmd/createcluster.go index 505018217..065c713af 100644 --- a/cmd/createcluster.go +++ b/cmd/createcluster.go @@ -51,6 +51,7 @@ const ( zeroAddress = "0x0000000000000000000000000000000000000000" defaultNetwork = "mainnet" minNodes = 3 + minThreshold = 2 ) type clusterConfig struct { @@ -98,6 +99,22 @@ func newCreateClusterCmd(runFunc func(context.Context, io.Writer, clusterConfig) bindClusterFlags(cmd.Flags(), &conf) bindInsecureFlags(cmd.Flags(), &conf.InsecureKeys) + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + thresholdPresent := cmd.Flags().Lookup("threshold").Changed + + if thresholdPresent { + if conf.Threshold < minThreshold { + return errors.New("threshold must be greater than 1", z.Int("threshold", conf.Threshold), z.Int("min", minThreshold)) + } + if conf.Threshold > conf.NumNodes { + return errors.New("threshold cannot be greater than number of operators", + z.Int("threshold", conf.Threshold), z.Int("operators", conf.NumNodes)) + } + } + + return nil + }) + return cmd } @@ -144,6 +161,7 @@ func runCreateCluster(ctx context.Context, w io.Writer, conf clusterConfig) erro } conf.NumNodes = len(def.Operators) + conf.Threshold = def.Threshold } if err = validateCreateConfig(ctx, conf); err != nil { @@ -366,6 +384,11 @@ func validateCreateConfig(ctx context.Context, conf clusterConfig) error { } } + // Don't allow cluster size to be less than 3. + if conf.NumNodes < minNodes { + return errors.New("number of operators is below minimum", z.Int("operators", conf.NumNodes), z.Int("min", minNodes)) + } + return nil } diff --git a/cmd/createcluster_internal_test.go b/cmd/createcluster_internal_test.go index 0d066474f..a611206c1 100644 --- a/cmd/createcluster_internal_test.go +++ b/cmd/createcluster_internal_test.go @@ -39,6 +39,10 @@ func TestCreateCluster(t *testing.T) { def, err := loadDefinition(context.Background(), defPath) require.NoError(t, err) + defPathTwoNodes := "../cluster/examples/cluster-definition-001.json" + defTwoNodes, err := loadDefinition(context.Background(), defPathTwoNodes) + require.NoError(t, err) + tests := []struct { Name string Config clusterConfig @@ -218,7 +222,7 @@ func TestCreateCluster(t *testing.T) { Config: clusterConfig{ Name: "test_cluster", NumNodes: 3, - Threshold: 4, + Threshold: 3, NumDVs: 5, Network: "goerli", }, @@ -246,6 +250,23 @@ func TestCreateCluster(t *testing.T) { }, }, }, + { + Name: "test with number of nodes below minimum", + Config: clusterConfig{ + Name: "test_cluster", + NumNodes: 2, + Threshold: 2, + NumDVs: 1, + Network: "goerli", + }, + defFileProvider: func() []byte { + data, err := json.Marshal(defTwoNodes) + require.NoError(t, err) + + return data + }, + expectedErr: "number of operators is below minimum", + }, } for _, test := range tests { t.Run(test.Name, func(t *testing.T) { @@ -555,6 +576,7 @@ func TestMultipleAddresses(t *testing.T) { err := runCreateCluster(context.Background(), io.Discard, clusterConfig{ NumDVs: 4, NumNodes: 4, + Threshold: 3, Network: defaultNetwork, FeeRecipientAddrs: []string{}, WithdrawalAddrs: []string{}, @@ -566,6 +588,7 @@ func TestMultipleAddresses(t *testing.T) { err := runCreateCluster(context.Background(), io.Discard, clusterConfig{ NumDVs: 1, NumNodes: 4, + Threshold: 3, Network: defaultNetwork, FeeRecipientAddrs: []string{feeRecipientAddr}, WithdrawalAddrs: []string{}, @@ -639,6 +662,7 @@ func TestKeymanager(t *testing.T) { SplitKeysDir: keyDir, SplitKeys: true, NumNodes: minNodes, + Threshold: minThreshold, KeymanagerAddrs: addrs, KeymanagerAuthTokens: authTokens, Network: eth2util.Goerli.Name, @@ -720,6 +744,7 @@ func TestPublish(t *testing.T) { conf := clusterConfig{ Name: t.Name(), NumNodes: minNodes, + Threshold: minThreshold, NumDVs: 1, Network: eth2util.Goerli.Name, WithdrawalAddrs: []string{zeroAddress}, @@ -743,6 +768,82 @@ func TestPublish(t *testing.T) { }) } +func TestClusterCLI(t *testing.T) { + feeRecipientArg := "--fee-recipient-addresses=" + validEthAddr + withdrawalArg := "--withdrawal-addresses=" + validEthAddr + + tests := []struct { + name string + network string + nodes string + numValidators string + feeRecipient string + withdrawal string + threshold string + expectedErr string + cleanup func(*testing.T) + }{ + { + name: "threshold below minimum", + nodes: "--nodes=3", + network: "--network=holesky", + numValidators: "--num-validators=1", + feeRecipient: feeRecipientArg, + withdrawal: withdrawalArg, + threshold: "--threshold=1", + expectedErr: "threshold must be greater than 1", + }, + { + name: "threshold above maximum", + nodes: "--nodes=4", + network: "--network=holesky", + numValidators: "--num-validators=1", + feeRecipient: feeRecipientArg, + withdrawal: withdrawalArg, + threshold: "--threshold=5", + expectedErr: "threshold cannot be greater than number of operators", + }, + { + name: "no threshold provided", + nodes: "--nodes=3", + network: "--network=holesky", + numValidators: "--num-validators=1", + feeRecipient: feeRecipientArg, + withdrawal: withdrawalArg, + threshold: "", + expectedErr: "", + cleanup: func(t *testing.T) { + t.Helper() + require.NoError(t, os.RemoveAll("node0")) + require.NoError(t, os.RemoveAll("node1")) + require.NoError(t, os.RemoveAll("node2")) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := newCreateCmd(newCreateClusterCmd(runCreateCluster)) + if test.threshold != "" { + cmd.SetArgs([]string{"cluster", test.nodes, test.feeRecipient, test.withdrawal, test.network, test.numValidators, test.threshold}) + } else { + cmd.SetArgs([]string{"cluster", test.nodes, test.feeRecipient, test.withdrawal, test.network, test.numValidators}) + } + + err := cmd.Execute() + if test.expectedErr != "" { + require.ErrorContains(t, err, test.expectedErr) + } else { + require.NoError(t, err) + } + + if test.cleanup != nil { + test.cleanup(t) + } + }) + } +} + // mockKeymanagerReq is a mock keymanager request for use in tests. type mockKeymanagerReq struct { Keystores []string `json:"keystores"` diff --git a/cmd/createdkg.go b/cmd/createdkg.go index 8deb0e9f0..4bc9e28ab 100644 --- a/cmd/createdkg.go +++ b/cmd/createdkg.go @@ -49,6 +49,22 @@ func newCreateDKGCmd(runFunc func(context.Context, createDKGConfig) error) *cobr bindCreateDKGFlags(cmd, &config) + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + thresholdPresent := cmd.Flags().Lookup("threshold").Changed + + if thresholdPresent { + if config.Threshold < minThreshold { + return errors.New("threshold must be greater than 1", z.Int("threshold", config.Threshold), z.Int("min", minThreshold)) + } + if config.Threshold > len(config.OperatorENRs) { + return errors.New("threshold cannot be greater than number of operators", + z.Int("threshold", config.Threshold), z.Int("operators", len(config.OperatorENRs))) + } + } + + return nil + }) + return cmd } @@ -81,7 +97,7 @@ func runCreateDKG(ctx context.Context, conf createDKGConfig) (err error) { conf.Network = eth2util.Goerli.Name } - if err = validateDKGConfig(conf.Threshold, len(conf.OperatorENRs), conf.Network, conf.DepositAmounts); err != nil { + if err = validateDKGConfig(len(conf.OperatorENRs), conf.Network, conf.DepositAmounts); err != nil { return err } @@ -114,7 +130,7 @@ func runCreateDKG(ctx context.Context, conf createDKGConfig) (err error) { safeThreshold := cluster.Threshold(len(conf.OperatorENRs)) if conf.Threshold == 0 { conf.Threshold = safeThreshold - } else if conf.Threshold != safeThreshold { + } else { log.Warn(ctx, "Non standard `--threshold` flag provided, this will affect cluster safety", nil, z.Int("threshold", conf.Threshold), z.Int("safe_threshold", safeThreshold)) } @@ -180,15 +196,10 @@ func validateWithdrawalAddrs(addrs []string, network string) error { } // validateDKGConfig returns an error if any of the provided config parameter is invalid. -func validateDKGConfig(threshold, numOperators int, network string, depositAmounts []int) error { - if threshold > numOperators { - return errors.New("threshold cannot be greater than length of operators", - z.Int("threshold", threshold), z.Int("operators", numOperators)) - } - - // Don't allow cluster size to be less than 4. +func validateDKGConfig(numOperators int, network string, depositAmounts []int) error { + // Don't allow cluster size to be less than 3. if numOperators < minNodes { - return errors.New("insufficient operator ENRs", z.Int("count", numOperators), z.Int("min", minNodes)) + return errors.New("number of operators is below minimum", z.Int("operators", numOperators), z.Int("min", minNodes)) } if !eth2util.ValidNetwork(network) { diff --git a/cmd/createdkg_internal_test.go b/cmd/createdkg_internal_test.go index fbf53da4e..c6f9371e0 100644 --- a/cmd/createdkg_internal_test.go +++ b/cmd/createdkg_internal_test.go @@ -57,7 +57,8 @@ func TestCreateDkgInvalid(t *testing.T) { OperatorENRs: append([]string{ "-JG4QDKNYm_JK-w6NuRcUFKvJAlq2L4CwkECelzyCVrMWji4YnVRn8AqQEL5fTQotPL2MKxiKNmn2k6XEINtq-6O3Z2GAYGvzr_LgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKlO7fSaBa3h48CdM-qb_Xb2_hSrJOy6nNjR0mapAqMboN0Y3CCDhqDdWRwgg4u", }, validENRs...), - Network: defaultNetwork, + Threshold: 3, + Network: defaultNetwork, }, errMsg: "invalid ENR: missing 'enr:' prefix", }, @@ -66,7 +67,8 @@ func TestCreateDkgInvalid(t *testing.T) { OperatorENRs: append([]string{ "enr:JG4QDKNYm_JK-w6NuRcUFKvJAlq2L4CwkECelzyCVrMWji4YnVRn8AqQEL5fTQotPL2MKxiKNmn2k6XEINtq-6O3Z2GAYGvzr_LgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKlO7fSaBa3h48CdM-qb_Xb2_hSrJOy6nNjR0mapAqMboN0Y3CCDhqDdWRwgg4u", }, validENRs...), - Network: defaultNetwork, + Threshold: 3, + Network: defaultNetwork, }, errMsg: "invalid ENR: invalid enr record, too few elements", }, @@ -75,7 +77,8 @@ func TestCreateDkgInvalid(t *testing.T) { OperatorENRs: append([]string{ "enrJG4QDKNYm_JK-w6NuRcUFKvJAlq2L4CwkECelzyCVrMWji4YnVRn8AqQEL5fTQotPL2MKxiKNmn2k6XEINtq-6O3Z2GAYGvzr_LgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKlO7fSaBa3h48CdM-qb_Xb2_hSrJOy6nNjR0mapAqMboN0Y3CCDhqDdWRwgg4u", }, validENRs...), - Network: defaultNetwork, + Threshold: 3, + Network: defaultNetwork, }, errMsg: "invalid ENR: missing 'enr:' prefix", }, @@ -84,17 +87,18 @@ func TestCreateDkgInvalid(t *testing.T) { OperatorENRs: append([]string{ "JG4QDKNYm_JK-w6NuRcUFKvJAlq2L4CwkECelzyCVrMWji4YnVRn8AqQEL5fTQotPL2MKxiKNmn2k6XEINtq-6O3Z2GAYGvzr_LgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKlO7fSaBa3h48CdM-qb_Xb2_hSrJOy6nNjR0mapAqMboN0Y3CCDhqDdWRwgg4u", }, validENRs...), - Network: defaultNetwork, + Threshold: 3, + Network: defaultNetwork, }, errMsg: "invalid ENR: missing 'enr:' prefix", }, { conf: createDKGConfig{OperatorENRs: []string{""}}, - errMsg: "insufficient operator ENRs", + errMsg: "number of operators is below minimum", }, { conf: createDKGConfig{}, - errMsg: "insufficient operator ENRs", + errMsg: "number of operators is below minimum", }, } @@ -120,7 +124,7 @@ func TestRequireOperatorENRFlag(t *testing.T) { { name: "operator ENRs less than threshold", args: []string{"dkg", "--operator-enrs=enr:-JG4QG472ZVvl8ySSnUK9uNVDrP_hjkUrUqIxUC75aayzmDVQedXkjbqc7QKyOOS71VmlqnYzri_taV8ZesFYaoQSIOGAYHtv1WsgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKwwq_CAld6oVKOrixE-JzMtvvNgb9yyI-_rwq4NFtajIN0Y3CCDhqDdWRwgg4u", "--fee-recipient-addresses=0xa6430105220d0b29688b734b8ea0f3ca9936e846", "--withdrawal-addresses=0xa6430105220d0b29688b734b8ea0f3ca9936e846"}, - err: "insufficient operator ENRs", + err: "number of operators is below minimum", }, } @@ -146,9 +150,10 @@ func TestExistingClusterDefinition(t *testing.T) { feeRecipientArg := "--fee-recipient-addresses=" + validEthAddr withdrawalArg := "--withdrawal-addresses=" + validEthAddr outputDirArg := "--output-dir=" + charonDir + thresholdArg := "--threshold=2" cmd := newCreateCmd(newCreateDKGCmd(runCreateDKG)) - cmd.SetArgs([]string{"dkg", enrArg, feeRecipientArg, withdrawalArg, outputDirArg}) + cmd.SetArgs([]string{"dkg", enrArg, feeRecipientArg, withdrawalArg, outputDirArg, thresholdArg}) require.EqualError(t, cmd.Execute(), "existing cluster-definition.json found. Try again after deleting it") } @@ -179,29 +184,108 @@ func TestValidateWithdrawalAddr(t *testing.T) { } func TestValidateDKGConfig(t *testing.T) { - t.Run("invalid threshold", func(t *testing.T) { - threshold := 5 - numOperators := 4 - err := validateDKGConfig(threshold, numOperators, "", nil) - require.ErrorContains(t, err, "threshold cannot be greater than length of operators") - }) - t.Run("insufficient ENRs", func(t *testing.T) { - threshold := 1 numOperators := 2 - err := validateDKGConfig(threshold, numOperators, "", nil) - require.ErrorContains(t, err, "insufficient operator ENRs") + err := validateDKGConfig(numOperators, "", nil) + require.ErrorContains(t, err, "number of operators is below minimum") }) t.Run("invalid network", func(t *testing.T) { - threshold := 3 numOperators := 4 - err := validateDKGConfig(threshold, numOperators, "cosmos", nil) + err := validateDKGConfig(numOperators, "cosmos", nil) require.ErrorContains(t, err, "unsupported network") }) t.Run("wrong deposit amounts sum", func(t *testing.T) { - err := validateDKGConfig(3, 4, "goerli", []int{8, 16}) + err := validateDKGConfig(4, "goerli", []int{8, 16}) require.ErrorContains(t, err, "sum of partial deposit amounts must sum up to 32ETH") }) } + +func TestDKGCLI(t *testing.T) { + var enrs []string + for range minNodes { + enrs = append(enrs, "enr:-JG4QG472ZVvl8ySSnUK9uNVDrP_hjkUrUqIxUC75aayzmDVQedXkjbqc7QKyOOS71VmlqnYzri_taV8ZesFYaoQSIOGAYHtv1WsgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKwwq_CAld6oVKOrixE-JzMtvvNgb9yyI-_rwq4NFtajIN0Y3CCDhqDdWRwgg4u") + } + enrArg := "--operator-enrs=" + strings.Join(enrs, ",") + feeRecipientArg := "--fee-recipient-addresses=" + validEthAddr + withdrawalArg := "--withdrawal-addresses=" + validEthAddr + outputDirArg := "--output-dir=.charon" + + tests := []struct { + name string + enr string + feeRecipient string + withdrawal string + outputDir string + threshold string + expectedErr string + prepare func(*testing.T) + cleanup func(*testing.T) + }{ + { + name: "threshold below minimum", + enr: enrArg, + feeRecipient: feeRecipientArg, + withdrawal: withdrawalArg, + outputDir: outputDirArg, + threshold: "--threshold=1", + expectedErr: "threshold must be greater than 1", + }, + { + name: "threshold above maximum", + enr: enrArg, + feeRecipient: feeRecipientArg, + withdrawal: withdrawalArg, + outputDir: outputDirArg, + threshold: "--threshold=4", + expectedErr: "threshold cannot be greater than number of operators", + }, + { + name: "no threshold provided", + enr: enrArg, + feeRecipient: feeRecipientArg, + withdrawal: withdrawalArg, + outputDir: outputDirArg, + threshold: "", + expectedErr: "", + prepare: func(t *testing.T) { + t.Helper() + charonDir := testutil.CreateTempCharonDir(t) + b := []byte("sample definition") + require.NoError(t, os.WriteFile(path.Join(charonDir, "cluster-definition.json"), b, 0o600)) + }, + cleanup: func(t *testing.T) { + t.Helper() + err := os.RemoveAll(".charon") + require.NoError(t, err) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.prepare != nil { + test.prepare(t) + } + + cmd := newCreateCmd(newCreateDKGCmd(runCreateDKG)) + if test.threshold != "" { + cmd.SetArgs([]string{"dkg", test.enr, test.feeRecipient, test.withdrawal, test.outputDir, test.threshold}) + } else { + cmd.SetArgs([]string{"dkg", test.enr, test.feeRecipient, test.withdrawal, test.outputDir}) + } + + err := cmd.Execute() + if test.expectedErr != "" { + require.ErrorContains(t, err, test.expectedErr) + } else { + require.NoError(t, err) + } + + if test.cleanup != nil { + test.cleanup(t) + } + }) + } +} diff --git a/cmd/exit.go b/cmd/exit.go index 97e8350d4..702bb0037 100644 --- a/cmd/exit.go +++ b/cmd/exit.go @@ -13,6 +13,7 @@ import ( "github.com/obolnetwork/charon/app/errors" "github.com/obolnetwork/charon/app/eth2wrap" "github.com/obolnetwork/charon/app/log" + "github.com/obolnetwork/charon/eth2util" "github.com/obolnetwork/charon/eth2util/signing" "github.com/obolnetwork/charon/tbls" ) @@ -22,7 +23,7 @@ type exitConfig struct { ValidatorPubkey string ValidatorIndex uint64 ValidatorIndexPresent bool - ExpertMode bool + SkipBeaconNodeCheck bool PrivateKeyPath string ValidatorKeysDir string LockFilePath string @@ -33,7 +34,10 @@ type exitConfig struct { PlaintextOutput bool BeaconNodeTimeout time.Duration ExitFromFilePath string + ExitFromFileDir string Log log.Config + All bool + testnetConfig eth2util.Network } func newExitCmd(cmds ...*cobra.Command) *cobra.Command { @@ -59,10 +63,17 @@ const ( validatorPubkey exitEpoch exitFromFile + exitFromDir beaconNodeTimeout fetchedExitPath publishTimeout validatorIndex + all + testnetName + testnetForkVersion + testnetChainID + testnetGenesisTimestamp + testnetCapellaHardFork ) func (ef exitFlag) String() string { @@ -83,6 +94,8 @@ func (ef exitFlag) String() string { return "exit-epoch" case exitFromFile: return "exit-from-file" + case exitFromDir: + return "exit-from-dir" case beaconNodeTimeout: return "beacon-node-timeout" case fetchedExitPath: @@ -91,6 +104,18 @@ func (ef exitFlag) String() string { return "publish-timeout" case validatorIndex: return "validator-index" + case all: + return "all" + case testnetName: + return "testnet-name" + case testnetForkVersion: + return "testnet-fork-version" + case testnetChainID: + return "testnet-chain-id" + case testnetGenesisTimestamp: + return "testnet-genesis-timestamp" + case testnetCapellaHardFork: + return "testnet-capella-hard-fork" default: return "unknown" } @@ -130,14 +155,28 @@ func bindExitFlags(cmd *cobra.Command, config *exitConfig, flags []exitCLIFlag) cmd.Flags().Uint64Var(&config.ExitEpoch, exitEpoch.String(), 162304, maybeRequired("Exit epoch at which the validator will exit, must be the same across all the partial exits.")) case exitFromFile: cmd.Flags().StringVar(&config.ExitFromFilePath, exitFromFile.String(), "", maybeRequired("Retrieves a signed exit message from a pre-prepared file instead of --publish-address.")) + case exitFromDir: + cmd.Flags().StringVar(&config.ExitFromFileDir, exitFromDir.String(), "", maybeRequired("Retrieves a signed exit messages from a pre-prepared files in a directory instead of --publish-address.")) case beaconNodeTimeout: cmd.Flags().DurationVar(&config.BeaconNodeTimeout, beaconNodeTimeout.String(), 30*time.Second, maybeRequired("Timeout for beacon node HTTP calls.")) case fetchedExitPath: cmd.Flags().StringVar(&config.FetchedExitPath, fetchedExitPath.String(), "./", maybeRequired("Path to store fetched signed exit messages.")) case publishTimeout: - cmd.Flags().DurationVar(&config.PublishTimeout, publishTimeout.String(), 30*time.Second, "Timeout for publishing a signed exit to the publish-address API.") + cmd.Flags().DurationVar(&config.PublishTimeout, publishTimeout.String(), 5*time.Minute, "Timeout for publishing a signed exit to the publish-address API.") case validatorIndex: cmd.Flags().Uint64Var(&config.ValidatorIndex, validatorIndex.String(), 0, "Validator index of the validator to exit, the associated public key must be present in the cluster lock manifest. If --validator-public-key is also provided, validator existence won't be checked on the beacon chain.") + case all: + cmd.Flags().BoolVar(&config.All, all.String(), false, "Exit all currently active validators in the cluster.") + case testnetName: + cmd.Flags().StringVar(&config.testnetConfig.Name, testnetName.String(), "", "Name of the custom test network.") + case testnetForkVersion: + cmd.Flags().StringVar(&config.testnetConfig.GenesisForkVersionHex, testnetForkVersion.String(), "", "Genesis fork version of the custom test network (in hex).") + case testnetChainID: + cmd.Flags().Uint64Var(&config.testnetConfig.ChainID, "testnet-chain-id", 0, "Chain ID of the custom test network.") + case testnetGenesisTimestamp: + cmd.Flags().Int64Var(&config.testnetConfig.GenesisTimestamp, "testnet-genesis-timestamp", 0, "Genesis timestamp of the custom test network.") + case testnetCapellaHardFork: + cmd.Flags().StringVar(&config.testnetConfig.CapellaHardFork, "testnet-capella-hard-fork", "", "Capella hard fork version of the custom test network.") } if f.required { @@ -153,7 +192,7 @@ func eth2Client(ctx context.Context, u []string, timeout time.Duration, forkVers } if _, err = cl.NodeVersion(ctx, ð2api.NodeVersionOpts{}); err != nil { - return nil, errors.Wrap(err, "can't connect to beacon node") + return nil, errors.Wrap(err, "connect to beacon node") } return cl, nil diff --git a/cmd/exit_broadcast.go b/cmd/exit_broadcast.go index a93ae492c..101a81a5b 100644 --- a/cmd/exit_broadcast.go +++ b/cmd/exit_broadcast.go @@ -4,8 +4,11 @@ package cmd import ( "context" + "encoding/hex" "encoding/json" + "fmt" "os" + "path/filepath" "strings" "time" @@ -15,12 +18,14 @@ import ( "github.com/spf13/cobra" "github.com/obolnetwork/charon/app/errors" + "github.com/obolnetwork/charon/app/eth2wrap" "github.com/obolnetwork/charon/app/k1util" "github.com/obolnetwork/charon/app/log" "github.com/obolnetwork/charon/app/obolapi" "github.com/obolnetwork/charon/app/z" manifestpb "github.com/obolnetwork/charon/cluster/manifestpb/v1" "github.com/obolnetwork/charon/core" + "github.com/obolnetwork/charon/eth2util" "github.com/obolnetwork/charon/eth2util/keystore" "github.com/obolnetwork/charon/tbls" "github.com/obolnetwork/charon/tbls/tblsconv" @@ -52,89 +57,208 @@ func newBcastFullExitCmd(runFunc func(context.Context, exitConfig) error) *cobra {lockFilePath, false}, {validatorKeysDir, false}, {exitEpoch, false}, - {validatorPubkey, true}, + {validatorPubkey, false}, {beaconNodeEndpoints, true}, {exitFromFile, false}, + {exitFromDir, false}, {beaconNodeTimeout, false}, {publishTimeout, false}, + {all, false}, + {testnetName, false}, + {testnetForkVersion, false}, + {testnetChainID, false}, + {testnetGenesisTimestamp, false}, + {testnetCapellaHardFork, false}, }) bindLogFlags(cmd.Flags(), &config.Log) + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + valPubkPresent := cmd.Flags().Lookup(validatorPubkey.String()).Changed + exitFilePresent := cmd.Flags().Lookup(exitFromFile.String()).Changed + exitDirPresent := cmd.Flags().Lookup(exitFromDir.String()).Changed + + if !valPubkPresent && !config.All { + //nolint:revive,perfsprint // we use our own version of the errors package; keep consistency with other checks. + return errors.New(fmt.Sprintf("%s must be specified when exiting single validator.", validatorPubkey.String())) + } + + if config.All && valPubkPresent { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("%s should not be specified when %s is, as it is obsolete and misleading.", validatorPubkey.String(), all.String())) + } + + if valPubkPresent && exitDirPresent { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("if you want to specify exit file for single validator, you must provide %s and not %s.", exitFromFile.String(), exitFromDir.String())) + } + + if config.All && exitFilePresent { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("if you want to specify exit file directory for all validators, you must provide %s and not %s.", exitFromDir.String(), exitFromFile.String())) + } + + return nil + }) + return cmd } func runBcastFullExit(ctx context.Context, config exitConfig) error { + // Check if custom testnet configuration is provided. + if config.testnetConfig.IsNonZero() { + // Add testnet config to supported networks. + eth2util.AddTestNetwork(config.testnetConfig) + } + identityKey, err := k1util.Load(config.PrivateKeyPath) if err != nil { - return errors.Wrap(err, "could not load identity key") + return errors.Wrap(err, "load identity key") } cl, err := loadClusterManifest("", config.LockFilePath) if err != nil { - return errors.Wrap(err, "could not load cluster-lock.json") + return errors.Wrap(err, "load cluster lock", z.Str("lock_file_path", config.LockFilePath)) } - validator := core.PubKey(config.ValidatorPubkey) - if _, err := validator.Bytes(); err != nil { - return errors.Wrap(err, "cannot convert validator pubkey to bytes") - } - - ctx = log.WithCtx(ctx, z.Str("validator", validator.String())) - eth2Cl, err := eth2Client(ctx, config.BeaconNodeEndpoints, config.BeaconNodeTimeout, [4]byte(cl.GetForkVersion())) if err != nil { - return errors.Wrap(err, "cannot create eth2 client for specified beacon node") + return errors.Wrap(err, "create eth2 client for specified beacon node(s)", z.Any("beacon_nodes_endpoints", config.BeaconNodeEndpoints)) } - var fullExit eth2p0.SignedVoluntaryExit - maybeExitFilePath := strings.TrimSpace(config.ExitFromFilePath) - - if len(maybeExitFilePath) != 0 { - log.Info(ctx, "Retrieving full exit message from path", z.Str("path", maybeExitFilePath)) - fullExit, err = exitFromPath(maybeExitFilePath) + fullExits := make(map[core.PubKey]eth2p0.SignedVoluntaryExit) + if config.All { + if config.ExitFromFileDir != "" { + entries, err := os.ReadDir(config.ExitFromFileDir) + if err != nil { + return errors.Wrap(err, "read exits directory", z.Str("exit_file_dir", config.ExitFromFileDir)) + } + for _, entry := range entries { + if !strings.HasPrefix(entry.Name(), "exit-") { + continue + } + valCtx := log.WithCtx(ctx, z.Str("validator_exit_file", entry.Name())) + exit, err := fetchFullExit(valCtx, filepath.Join(config.ExitFromFileDir, entry.Name()), config, cl, identityKey, "") + if err != nil { + return err + } + + validatorPubKey, err := validatorPubKeyFromFileName(entry.Name()) + if err != nil { + return err + } + + fullExits[validatorPubKey] = exit + } + } else { + for _, validator := range cl.GetValidators() { + validatorPubKeyHex := fmt.Sprintf("0x%x", validator.GetPublicKey()) + + valCtx := log.WithCtx(ctx, z.Str("validator_public_key", validatorPubKeyHex)) + exit, err := fetchFullExit(valCtx, "", config, cl, identityKey, validatorPubKeyHex) + if err != nil { + return errors.Wrap(err, "fetch full exit for all validators from public key") + } + validatorPubKey, err := core.PubKeyFromBytes(validator.GetPublicKey()) + if err != nil { + return errors.Wrap(err, "convert public key for validator") + } + fullExits[validatorPubKey] = exit + } + } } else { - log.Info(ctx, "Retrieving full exit message from publish address") - fullExit, err = exitFromObolAPI(ctx, config.ValidatorPubkey, config.PublishAddress, config.PublishTimeout, cl, identityKey) + valCtx := log.WithCtx(ctx, z.Str("validator_public_key", config.ValidatorPubkey), z.Str("validator_exit_file", config.ExitFromFilePath)) + exit, err := fetchFullExit(valCtx, strings.TrimSpace(config.ExitFromFilePath), config, cl, identityKey, config.ValidatorPubkey) + if err != nil { + return errors.Wrap(err, "fetch full exit for validator", z.Str("validator_public_key", config.ValidatorPubkey), z.Str("validator_exit_file", config.ExitFromFilePath)) + } + var validatorPubKey core.PubKey + if len(strings.TrimSpace(config.ExitFromFilePath)) != 0 { + validatorPubKey, err = validatorPubKeyFromFileName(config.ExitFromFilePath) + if err != nil { + return err + } + } else { + validatorPubKey = core.PubKey(config.ValidatorPubkey) + } + fullExits[validatorPubKey] = exit } - if err != nil { - return err - } + return broadcastExitsToBeacon(ctx, eth2Cl, fullExits) +} - // parse validator public key - rawPkBytes, err := validator.Bytes() +func validatorPubKeyFromFileName(fileName string) (core.PubKey, error) { + fileNameChecked := filepath.Base(fileName) + fileExtension := filepath.Ext(fileNameChecked) + validatorPubKeyHex := strings.TrimPrefix(strings.TrimSuffix(fileNameChecked, fileExtension), "exit-0x") + validatorPubKeyBytes, err := hex.DecodeString(validatorPubKeyHex) if err != nil { - return errors.Wrap(err, "could not serialize validator key bytes") + return "", errors.Wrap(err, "decode public key hex from file name", z.Str("public_key", validatorPubKeyHex)) } - - pubkey, err := tblsconv.PubkeyFromBytes(rawPkBytes) + validatorPubKey, err := core.PubKeyFromBytes(validatorPubKeyBytes) if err != nil { - return errors.Wrap(err, "could not convert validator key bytes to BLS public key") + return "", errors.Wrap(err, "decode core public key from hex") } - // parse signature - signature, err := tblsconv.SignatureFromBytes(fullExit.Signature[:]) - if err != nil { - return errors.Wrap(err, "could not parse BLS signature from bytes") - } + return validatorPubKey, nil +} - exitRoot, err := sigDataForExit( - ctx, - *fullExit.Message, - eth2Cl, - fullExit.Message.Epoch, - ) - if err != nil { - return errors.Wrap(err, "cannot calculate hash tree root for exit message for verification") +func fetchFullExit(ctx context.Context, exitFilePath string, config exitConfig, cl *manifestpb.Cluster, identityKey *k1.PrivateKey, validatorPubKey string) (eth2p0.SignedVoluntaryExit, error) { + var fullExit eth2p0.SignedVoluntaryExit + var err error + + if len(exitFilePath) != 0 { + log.Info(ctx, "Retrieving full exit message from path") + fullExit, err = exitFromPath(exitFilePath) + } else { + log.Info(ctx, "Retrieving full exit message from publish address") + fullExit, err = exitFromObolAPI(ctx, validatorPubKey, config.PublishAddress, config.PublishTimeout, cl, identityKey) } - if err := tbls.Verify(pubkey, exitRoot[:], signature); err != nil { - return errors.Wrap(err, "exit message signature not verified") + return fullExit, err +} + +func broadcastExitsToBeacon(ctx context.Context, eth2Cl eth2wrap.Client, exits map[core.PubKey]eth2p0.SignedVoluntaryExit) error { + for validator, fullExit := range exits { + valCtx := log.WithCtx(ctx, z.Str("validator", validator.String())) + + rawPkBytes, err := validator.Bytes() + if err != nil { + return errors.Wrap(err, "serialize validator key bytes", z.Str("validator", validator.String())) + } + + pubkey, err := tblsconv.PubkeyFromBytes(rawPkBytes) + if err != nil { + return errors.Wrap(err, "convert validator key bytes to BLS public key") + } + + // parse signature + signature, err := tblsconv.SignatureFromBytes(fullExit.Signature[:]) + if err != nil { + return errors.Wrap(err, "parse BLS signature from bytes", z.Str("exit_signature", fullExit.Signature.String())) + } + + exitRoot, err := sigDataForExit( + valCtx, + *fullExit.Message, + eth2Cl, + fullExit.Message.Epoch, + ) + if err != nil { + return errors.Wrap(err, "calculate hash tree root for exit message for verification") + } + + if err := tbls.Verify(pubkey, exitRoot[:], signature); err != nil { + return errors.Wrap(err, "exit message signature not verified") + } } - if err := eth2Cl.SubmitVoluntaryExit(ctx, &fullExit); err != nil { - return errors.Wrap(err, "could not submit voluntary exit") + for validator, fullExit := range exits { + valCtx := log.WithCtx(ctx, z.Str("validator", validator.String())) + if err := eth2Cl.SubmitVoluntaryExit(valCtx, &fullExit); err != nil { + return errors.Wrap(err, "submit voluntary exit") + } + log.Info(valCtx, "Successfully submitted voluntary exit for validator") } return nil @@ -144,17 +268,17 @@ func runBcastFullExit(ctx context.Context, config exitConfig) error { func exitFromObolAPI(ctx context.Context, validatorPubkey, publishAddr string, publishTimeout time.Duration, cl *manifestpb.Cluster, identityKey *k1.PrivateKey) (eth2p0.SignedVoluntaryExit, error) { oAPI, err := obolapi.New(publishAddr, obolapi.WithTimeout(publishTimeout)) if err != nil { - return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "could not create obol api client") + return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "create Obol API client", z.Str("publish_address", publishAddr)) } shareIdx, err := keystore.ShareIdxForCluster(cl, *identityKey.PubKey()) if err != nil { - return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "could not determine operator index from cluster lock for supplied identity key") + return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "determine operator index from cluster lock for supplied identity key") } fullExit, err := oAPI.GetFullExit(ctx, validatorPubkey, cl.GetInitialMutationHash(), shareIdx, identityKey) if err != nil { - return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "could not load full exit data from Obol API") + return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "load full exit data from Obol API", z.Str("publish_address", publishAddr)) } return fullExit.SignedExitMessage, nil @@ -164,7 +288,7 @@ func exitFromObolAPI(ctx context.Context, validatorPubkey, publishAddr string, p func exitFromPath(path string) (eth2p0.SignedVoluntaryExit, error) { f, err := os.Open(path) if err != nil { - return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "can't open signed exit message from path") + return eth2p0.SignedVoluntaryExit{}, errors.Wrap(err, "open signed exit message from path") } var exit eth2p0.SignedVoluntaryExit diff --git a/cmd/exit_broadcast_internal_test.go b/cmd/exit_broadcast_internal_test.go index 5c2c7b3fe..83990d5d2 100644 --- a/cmd/exit_broadcast_internal_test.go +++ b/cmd/exit_broadcast_internal_test.go @@ -31,16 +31,24 @@ func Test_runBcastFullExitCmd(t *testing.T) { t.Parallel() t.Run("main flow from api", func(t *testing.T) { t.Parallel() - testRunBcastFullExitCmdFlow(t, false) + testRunBcastFullExitCmdFlow(t, false, false) }) t.Run("main flow from file", func(t *testing.T) { t.Parallel() - testRunBcastFullExitCmdFlow(t, true) + testRunBcastFullExitCmdFlow(t, true, false) + }) + t.Run("main flow from api for all", func(t *testing.T) { + t.Parallel() + testRunBcastFullExitCmdFlow(t, false, true) + }) + t.Run("main flow from file for all", func(t *testing.T) { + t.Parallel() + testRunBcastFullExitCmdFlow(t, true, true) }) t.Run("config", Test_runBcastFullExitCmd_Config) } -func testRunBcastFullExitCmdFlow(t *testing.T, fromFile bool) { +func testRunBcastFullExitCmdFlow(t *testing.T, fromFile bool, all bool) { t.Helper() ctx := context.Background() @@ -114,7 +122,6 @@ func testRunBcastFullExitCmdFlow(t *testing.T, fromFile bool) { config := exitConfig{ BeaconNodeEndpoints: []string{beaconMock.Address()}, - ValidatorPubkey: lock.Validators[0].PublicKeyHex(), PrivateKeyPath: filepath.Join(baseDir, "charon-enr-private-key"), ValidatorKeysDir: filepath.Join(baseDir, "validator_keys"), LockFilePath: filepath.Join(baseDir, "cluster-lock.json"), @@ -124,6 +131,12 @@ func testRunBcastFullExitCmdFlow(t *testing.T, fromFile bool) { PublishTimeout: 10 * time.Second, } + if all { + config.All = all + } else { + config.ValidatorPubkey = lock.Validators[0].PublicKeyHex() + } + require.NoError(t, runSignPartialExit(ctx, config), "operator index: %v", idx) } @@ -131,7 +144,6 @@ func testRunBcastFullExitCmdFlow(t *testing.T, fromFile bool) { config := exitConfig{ BeaconNodeEndpoints: []string{beaconMock.Address()}, - ValidatorPubkey: lock.Validators[0].PublicKeyHex(), PrivateKeyPath: filepath.Join(baseDir, "charon-enr-private-key"), ValidatorKeysDir: filepath.Join(baseDir, "validator_keys"), LockFilePath: filepath.Join(baseDir, "cluster-lock.json"), @@ -141,17 +153,39 @@ func testRunBcastFullExitCmdFlow(t *testing.T, fromFile bool) { PublishTimeout: 10 * time.Second, } + if all { + config.All = all + } else { + config.ValidatorPubkey = lock.Validators[0].PublicKeyHex() + } + if fromFile { - exit, err := exitFromObolAPI(ctx, lock.Validators[0].PublicKeyHex(), srv.URL, 10*time.Second, cl, enrs[0]) - require.NoError(t, err) + if all { + for _, validator := range lock.Validators { + validatorPublicKey := validator.PublicKeyHex() + exit, err := exitFromObolAPI(ctx, validatorPublicKey, srv.URL, 10*time.Second, cl, enrs[0]) + require.NoError(t, err) - exitBytes, err := json.Marshal(exit) - require.NoError(t, err) + exitBytes, err := json.Marshal(exit) + require.NoError(t, err) - exitPath := filepath.Join(baseDir, "exit.json") - require.NoError(t, os.WriteFile(exitPath, exitBytes, 0o755)) + exitPath := filepath.Join(baseDir, fmt.Sprintf("exit-%s.json", validatorPublicKey)) + require.NoError(t, os.WriteFile(exitPath, exitBytes, 0o755)) + } + config.ExitFromFileDir = baseDir + } else { + validatorPublicKey := lock.Validators[0].PublicKeyHex() + exit, err := exitFromObolAPI(ctx, validatorPublicKey, srv.URL, 10*time.Second, cl, enrs[0]) + require.NoError(t, err) + + exitBytes, err := json.Marshal(exit) + require.NoError(t, err) - config.ExitFromFilePath = exitPath + exitPath := filepath.Join(baseDir, fmt.Sprintf("exit-%s.json", validatorPublicKey)) + require.NoError(t, os.WriteFile(exitPath, exitBytes, 0o755)) + + config.ExitFromFilePath = exitPath + } } require.NoError(t, runBcastFullExit(ctx, config)) @@ -168,33 +202,34 @@ func Test_runBcastFullExitCmd_Config(t *testing.T) { badValidatorAddr bool badExistingExitPath bool errData string + all bool } tests := []test{ { name: "No identity key", noIdentity: true, - errData: "could not load identity key", + errData: "load identity key", }, { name: "No lock", noLock: true, - errData: "could not load cluster-lock.json", + errData: "load cluster lock", }, { name: "Bad Obol API URL", badOAPIURL: true, - errData: "could not create obol api client", + errData: "create Obol API client", }, { name: "Bad beacon node URLs", badBeaconNodeEndpoints: true, - errData: "cannot create eth2 client for specified beacon node", + errData: "create eth2 client for specified beacon node", }, { name: "Bad validator address", badValidatorAddr: true, - errData: "cannot convert validator pubkey to bytes", + errData: "validator pubkey to bytes", }, { name: "Bad existing exit file", @@ -289,10 +324,11 @@ func Test_runBcastFullExitCmd_Config(t *testing.T) { ExitEpoch: 0, BeaconNodeTimeout: 30 * time.Second, PublishTimeout: 10 * time.Second, + All: test.all, } if test.badExistingExitPath { - path := filepath.Join(baseDir, "exit.json") + path := filepath.Join(baseDir, fmt.Sprintf("exit-%s.json", lock.Validators[0].PublicKeyHex())) require.NoError(t, os.WriteFile(path, []byte("bad"), 0o755)) config.ExitFromFilePath = path } @@ -301,3 +337,92 @@ func Test_runBcastFullExitCmd_Config(t *testing.T) { }) } } + +func TestExitBroadcastCLI(t *testing.T) { + tests := []struct { + name string + expectedErr string + + flags []string + }{ + { + name: "check flags", + expectedErr: "load identity key: read private key from disk: open test: no such file or directory", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", + "--exit-epoch=1", + "--validator-public-key=test", // single exit + "--beacon-node-endpoints=test1,test2", + "--exit-from-file=test", // single exit + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all=false", // single exit + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "check flags all", + expectedErr: "load identity key: read private key from disk: open test: no such file or directory", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", // exit all + "--exit-epoch=1", + "--beacon-node-endpoints=test1,test2", + "--exit-from-dir=test", + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all", // exit all + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "check flags all", + expectedErr: "load identity key: read private key from disk: open test: no such file or directory", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", // exit all + "--exit-epoch=1", + "--beacon-node-endpoints=test1,test2", + "--exit-from-dir=test", + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all", // exit all + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := newExitCmd(newBcastFullExitCmd(runBcastFullExit)) + cmd.SetArgs(append([]string{"broadcast"}, test.flags...)) + + err := cmd.Execute() + if test.expectedErr != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/cmd/exit_fetch.go b/cmd/exit_fetch.go index 67f956696..9a27a3974 100644 --- a/cmd/exit_fetch.go +++ b/cmd/exit_fetch.go @@ -18,6 +18,7 @@ import ( "github.com/obolnetwork/charon/app/obolapi" "github.com/obolnetwork/charon/app/z" "github.com/obolnetwork/charon/core" + "github.com/obolnetwork/charon/eth2util" "github.com/obolnetwork/charon/eth2util/keystore" ) @@ -45,67 +46,123 @@ func newFetchExitCmd(runFunc func(context.Context, exitConfig) error) *cobra.Com {publishAddress, false}, {privateKeyPath, false}, {lockFilePath, false}, - {validatorPubkey, true}, + {validatorPubkey, false}, + {all, false}, {fetchedExitPath, false}, {publishTimeout, false}, + {testnetName, false}, + {testnetForkVersion, false}, + {testnetChainID, false}, + {testnetGenesisTimestamp, false}, + {testnetCapellaHardFork, false}, }) bindLogFlags(cmd.Flags(), &config.Log) + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + valPubkPresent := cmd.Flags().Lookup(validatorPubkey.String()).Changed + + if !valPubkPresent && !config.All { + //nolint:revive,perfsprint // we use our own version of the errors package; keep consistency with other checks. + return errors.New(fmt.Sprintf("%s must be specified when exiting single validator.", validatorPubkey.String())) + } + + if config.All && valPubkPresent { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("%s should not be specified when %s is, as it is obsolete and misleading.", validatorPubkey.String(), all.String())) + } + + return nil + }) + return cmd } func runFetchExit(ctx context.Context, config exitConfig) error { + // Check if custom testnet configuration is provided. + if config.testnetConfig.IsNonZero() { + // Add testnet config to supported networks. + eth2util.AddTestNetwork(config.testnetConfig) + } + if _, err := os.Stat(config.FetchedExitPath); err != nil { - return errors.Wrap(err, "store exit path") + return errors.Wrap(err, "store exit path", z.Str("fetched_exit_path", config.FetchedExitPath)) } writeTestFile := filepath.Join(config.FetchedExitPath, ".write-test") if err := os.WriteFile(writeTestFile, []byte{}, 0o755); err != nil { //nolint:gosec // write test file - return errors.Wrap(err, "can't write to destination directory") + return errors.Wrap(err, "write to destination directory", z.Str("fetched_exit_path", config.FetchedExitPath)) } if err := os.Remove(writeTestFile); err != nil { - return errors.Wrap(err, "can't delete write test file") + return errors.Wrap(err, "delete write test file", z.Str("test_file_path", writeTestFile)) } identityKey, err := k1util.Load(config.PrivateKeyPath) if err != nil { - return errors.Wrap(err, "could not load identity key") + return errors.Wrap(err, "load identity key", z.Str("private_key_path", config.PrivateKeyPath)) } cl, err := loadClusterManifest("", config.LockFilePath) if err != nil { - return errors.Wrap(err, "could not load cluster-lock.json") - } - - validator := core.PubKey(config.ValidatorPubkey) - if _, err := validator.Bytes(); err != nil { - return errors.Wrap(err, "cannot convert validator pubkey to bytes") + return errors.Wrap(err, "load cluster lock", z.Str("lock_file_path", config.LockFilePath)) } - ctx = log.WithCtx(ctx, z.Str("validator", validator.String())) - oAPI, err := obolapi.New(config.PublishAddress, obolapi.WithTimeout(config.PublishTimeout)) if err != nil { - return errors.Wrap(err, "could not create obol api client") + return errors.Wrap(err, "create Obol API client", z.Str("publish_address", config.PublishAddress)) } - log.Info(ctx, "Retrieving full exit message") - shareIdx, err := keystore.ShareIdxForCluster(cl, *identityKey.PubKey()) if err != nil { - return errors.Wrap(err, "could not determine operator index from cluster lock for supplied identity key") + return errors.Wrap(err, "determine operator index from cluster lock for supplied identity key") } - fullExit, err := oAPI.GetFullExit(ctx, config.ValidatorPubkey, cl.GetInitialMutationHash(), shareIdx, identityKey) - if err != nil { - return errors.Wrap(err, "could not load full exit data from Obol API") + if config.All { + for _, validator := range cl.GetValidators() { + validatorPubKeyHex := fmt.Sprintf("0x%x", validator.GetPublicKey()) + + valCtx := log.WithCtx(ctx, z.Str("validator", validatorPubKeyHex)) + + log.Info(valCtx, "Retrieving full exit message") + + fullExit, err := oAPI.GetFullExit(valCtx, validatorPubKeyHex, cl.GetInitialMutationHash(), shareIdx, identityKey) + if err != nil { + return errors.Wrap(err, "load full exit data from Obol API", z.Str("validator_public_key", validatorPubKeyHex)) + } + + err = writeExitToFile(valCtx, validatorPubKeyHex, config.FetchedExitPath, fullExit) + if err != nil { + return err + } + } + } else { + validator := core.PubKey(config.ValidatorPubkey) + if _, err := validator.Bytes(); err != nil { + return errors.Wrap(err, "convert validator pubkey to bytes", z.Str("validator_public_key", config.ValidatorPubkey)) + } + + ctx = log.WithCtx(ctx, z.Str("validator", validator.String())) + + log.Info(ctx, "Retrieving full exit message") + + fullExit, err := oAPI.GetFullExit(ctx, config.ValidatorPubkey, cl.GetInitialMutationHash(), shareIdx, identityKey) + if err != nil { + return errors.Wrap(err, "load full exit data from Obol API", z.Str("validator_public_key", config.ValidatorPubkey)) + } + + err = writeExitToFile(ctx, config.ValidatorPubkey, config.FetchedExitPath, fullExit) + if err != nil { + return err + } } - fetchedExitFname := fmt.Sprintf("exit-%s.json", config.ValidatorPubkey) + return nil +} - fetchedExitPath := filepath.Join(config.FetchedExitPath, fetchedExitFname) +func writeExitToFile(ctx context.Context, valPubKey string, exitPath string, fullExit obolapi.ExitBlob) error { + fetchedExitFname := fmt.Sprintf("exit-%s.json", valPubKey) + fetchedExitPath := filepath.Join(exitPath, fetchedExitFname) exitData, err := json.Marshal(fullExit.SignedExitMessage) if err != nil { diff --git a/cmd/exit_fetch_internal_test.go b/cmd/exit_fetch_internal_test.go index f34d1015a..0496eb1da 100644 --- a/cmd/exit_fetch_internal_test.go +++ b/cmd/exit_fetch_internal_test.go @@ -26,12 +26,19 @@ import ( func Test_runFetchExit(t *testing.T) { t.Parallel() - t.Run("full flow", Test_runFetchExitFullFlow) + t.Run("full flow", func(t *testing.T) { + t.Parallel() + testRunFetchExitFullFlow(t, false) + }) + t.Run("full flow all", func(t *testing.T) { + t.Parallel() + testRunFetchExitFullFlow(t, true) + }) t.Run("bad out dir", Test_runFetchExitBadOutDir) } -func Test_runFetchExitFullFlow(t *testing.T) { - t.Parallel() +func testRunFetchExitFullFlow(t *testing.T, all bool) { + t.Helper() ctx := context.Background() valAmt := 100 @@ -106,6 +113,7 @@ func Test_runFetchExitFullFlow(t *testing.T) { ExitEpoch: 194048, BeaconNodeTimeout: 30 * time.Second, PublishTimeout: 10 * time.Second, + All: all, } require.NoError(t, runSignPartialExit(ctx, config), "operator index: %v", idx) @@ -120,6 +128,7 @@ func Test_runFetchExitFullFlow(t *testing.T) { PublishAddress: srv.URL, FetchedExitPath: root, PublishTimeout: 10 * time.Second, + All: all, } require.NoError(t, runFetchExit(ctx, config)) @@ -160,3 +169,80 @@ func Test_runFetchExitBadOutDir(t *testing.T) { require.ErrorContains(t, runFetchExit(context.Background(), config), "permission denied") } + +func TestExitFetchCLI(t *testing.T) { + tests := []struct { + name string + expectedErr string + flags []string + }{ + { + name: "check flags", + expectedErr: "store exit path: stat 1: no such file or directory", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-public-key=test", + "--fetched-exit-path=1", + "--publish-timeout=1ms", + "--all=false", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "no validator public key and not all", + expectedErr: "validator-public-key must be specified when exiting single validator.", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--fetched-exit-path=1", + "--publish-timeout=1ms", + "--all=false", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "validator public key and all", + expectedErr: "validator-public-key should not be specified when all is, as it is obsolete and misleading.", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-public-key=test", + "--fetched-exit-path=1", + "--publish-timeout=1ms", + "--all=true", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := newExitCmd(newFetchExitCmd(runFetchExit)) + cmd.SetArgs(append([]string{"fetch"}, test.flags...)) + + err := cmd.Execute() + if test.expectedErr != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/cmd/exit_list.go b/cmd/exit_list.go index 79c753df4..e60959b0d 100644 --- a/cmd/exit_list.go +++ b/cmd/exit_list.go @@ -15,6 +15,7 @@ import ( "github.com/obolnetwork/charon/app/errors" "github.com/obolnetwork/charon/app/log" "github.com/obolnetwork/charon/app/z" + "github.com/obolnetwork/charon/eth2util" ) func newListActiveValidatorsCmd(runFunc func(context.Context, exitConfig) error) *cobra.Command { @@ -43,6 +44,11 @@ func newListActiveValidatorsCmd(runFunc func(context.Context, exitConfig) error) {lockFilePath, false}, {beaconNodeEndpoints, true}, {beaconNodeTimeout, false}, + {testnetName, false}, + {testnetForkVersion, false}, + {testnetChainID, false}, + {testnetGenesisTimestamp, false}, + {testnetCapellaHardFork, false}, }) bindLogFlags(cmd.Flags(), &config.Log) @@ -51,6 +57,12 @@ func newListActiveValidatorsCmd(runFunc func(context.Context, exitConfig) error) } func runListActiveValidatorsCmd(ctx context.Context, config exitConfig) error { + // Check if custom testnet configuration is provided. + if config.testnetConfig.IsNonZero() { + // Add testnet config to supported networks. + eth2util.AddTestNetwork(config.testnetConfig) + } + valList, err := listActiveVals(ctx, config) if err != nil { return err @@ -63,7 +75,7 @@ func runListActiveValidatorsCmd(ctx context.Context, config exitConfig) error { continue } - log.Info(ctx, "Validator", z.Str("pubkey", validator)) + log.Info(ctx, "Validator", z.Str("validator_public_key", validator)) } return nil @@ -72,12 +84,12 @@ func runListActiveValidatorsCmd(ctx context.Context, config exitConfig) error { func listActiveVals(ctx context.Context, config exitConfig) ([]string, error) { cl, err := loadClusterManifest("", config.LockFilePath) if err != nil { - return nil, errors.Wrap(err, "could not load cluster-lock.json") + return nil, errors.Wrap(err, "load cluster lock", z.Str("lock_file_path", config.LockFilePath)) } eth2Cl, err := eth2Client(ctx, config.BeaconNodeEndpoints, config.BeaconNodeTimeout, [4]byte{}) // fine to avoid initializing a fork version, we're just querying the BN if err != nil { - return nil, errors.Wrap(err, "cannot create eth2 client for specified beacon node") + return nil, errors.Wrap(err, "create eth2 client for specified beacon node(s)", z.Any("beacon_nodes_endpoints", config.BeaconNodeEndpoints)) } var allVals []eth2p0.BLSPubKey @@ -91,7 +103,7 @@ func listActiveVals(ctx context.Context, config exitConfig) ([]string, error) { State: "head", }) if err != nil { - return nil, errors.Wrap(err, "cannot fetch validator list") + return nil, errors.Wrap(err, "fetch validator list from beacon", z.Str("beacon_address", eth2Cl.Address()), z.Any("validators", allVals)) } var ret []string diff --git a/cmd/exit_list_internal_test.go b/cmd/exit_list_internal_test.go index 05450b9b6..974e1f2c2 100644 --- a/cmd/exit_list_internal_test.go +++ b/cmd/exit_list_internal_test.go @@ -197,3 +197,41 @@ func Test_listActiveVals(t *testing.T) { require.Len(t, vals, len(lock.Validators)/2) }) } + +func TestExitListCLI(t *testing.T) { + tests := []struct { + name string + expectedErr string + flags []string + }{ + { + name: "check flags", + expectedErr: "load cluster lock: load cluster manifest from disk: load dag from disk: no file found", + flags: []string{ + "--lock-file=test", + "--beacon-node-endpoints=test1,test2", + "--beacon-node-timeout=1ms", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := newExitCmd(newListActiveValidatorsCmd(runListActiveValidatorsCmd)) + cmd.SetArgs(append([]string{"active-validator-list"}, test.flags...)) + + err := cmd.Execute() + if test.expectedErr != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/cmd/exit_sign.go b/cmd/exit_sign.go index 2723f69b5..342d6d7cc 100644 --- a/cmd/exit_sign.go +++ b/cmd/exit_sign.go @@ -5,23 +5,25 @@ package cmd import ( "context" "fmt" - "strings" eth2api "github.com/attestantio/go-eth2-client/api" + eth2v1 "github.com/attestantio/go-eth2-client/api/v1" eth2p0 "github.com/attestantio/go-eth2-client/spec/phase0" libp2plog "github.com/ipfs/go-log/v2" "github.com/spf13/cobra" "github.com/obolnetwork/charon/app/errors" + "github.com/obolnetwork/charon/app/eth2wrap" "github.com/obolnetwork/charon/app/k1util" "github.com/obolnetwork/charon/app/log" "github.com/obolnetwork/charon/app/obolapi" "github.com/obolnetwork/charon/app/z" "github.com/obolnetwork/charon/core" + "github.com/obolnetwork/charon/eth2util" "github.com/obolnetwork/charon/eth2util/keystore" ) -func newSubmitPartialExitCmd(runFunc func(context.Context, exitConfig) error) *cobra.Command { +func newSignPartialExitCmd(runFunc func(context.Context, exitConfig) error) *cobra.Command { var config exitConfig cmd := &cobra.Command{ @@ -52,6 +54,12 @@ func newSubmitPartialExitCmd(runFunc func(context.Context, exitConfig) error) *c {beaconNodeEndpoints, true}, {beaconNodeTimeout, false}, {publishTimeout, false}, + {all, false}, + {testnetName, false}, + {testnetForkVersion, false}, + {testnetChainID, false}, + {testnetGenesisTimestamp, false}, + {testnetCapellaHardFork, false}, }) bindLogFlags(cmd.Flags(), &config.Log) @@ -60,13 +68,18 @@ func newSubmitPartialExitCmd(runFunc func(context.Context, exitConfig) error) *c valIdxPresent := cmd.Flags().Lookup(validatorIndex.String()).Changed valPubkPresent := cmd.Flags().Lookup(validatorPubkey.String()).Changed - if strings.TrimSpace(config.ValidatorPubkey) == "" && !valIdxPresent { + if !valPubkPresent && !valIdxPresent && !config.All { //nolint:revive // we use our own version of the errors package. - return errors.New(fmt.Sprintf("either %s or %s must be specified at least.", validatorIndex.String(), validatorPubkey.String())) + return errors.New(fmt.Sprintf("either %s or %s must be specified at least when exiting single validator.", validatorIndex.String(), validatorPubkey.String())) + } + + if config.All && (valIdxPresent || valPubkPresent) { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("%s or %s should not be specified when %s is, as they are obsolete and misleading.", validatorIndex.String(), validatorPubkey.String(), all.String())) } config.ValidatorIndexPresent = valIdxPresent - config.ExpertMode = valIdxPresent && valPubkPresent + config.SkipBeaconNodeCheck = valIdxPresent && valPubkPresent return nil }) @@ -75,135 +88,222 @@ func newSubmitPartialExitCmd(runFunc func(context.Context, exitConfig) error) *c } func runSignPartialExit(ctx context.Context, config exitConfig) error { + // Check if custom testnet configuration is provided. + if config.testnetConfig.IsNonZero() { + // Add testnet config to supported networks. + eth2util.AddTestNetwork(config.testnetConfig) + } + identityKey, err := k1util.Load(config.PrivateKeyPath) if err != nil { - return errors.Wrap(err, "could not load identity key") + return errors.Wrap(err, "load identity key", z.Str("private_key_path", config.PrivateKeyPath)) } cl, err := loadClusterManifest("", config.LockFilePath) if err != nil { - return errors.Wrap(err, "could not load cluster-lock.json") + return errors.Wrap(err, "load cluster lock", z.Str("lock_file_path", config.LockFilePath)) } rawValKeys, err := keystore.LoadFilesUnordered(config.ValidatorKeysDir) if err != nil { - return errors.Wrap(err, "could not load keystore, check if path exists", z.Str("path", config.ValidatorKeysDir)) + return errors.Wrap(err, "load keystore, check if path exists", z.Str("validator_keys_dir", config.ValidatorKeysDir)) } valKeys, err := rawValKeys.SequencedKeys() if err != nil { - return errors.Wrap(err, "could not load keystore") + return errors.Wrap(err, "load keystore") } shares, err := keystore.KeysharesToValidatorPubkey(cl, valKeys) if err != nil { - return errors.Wrap(err, "could not match local validator key shares with their counterparty in cluster lock") + return errors.Wrap(err, "match local validator key shares with their counterparty in cluster lock") } - validator := core.PubKey(config.ValidatorPubkey) + shareIdx, err := keystore.ShareIdxForCluster(cl, *identityKey.PubKey()) + if err != nil { + return errors.Wrap(err, "determine operator index from cluster lock for supplied identity key") + } - valEth2, err := validator.ToETH2() + oAPI, err := obolapi.New(config.PublishAddress, obolapi.WithTimeout(config.PublishTimeout)) if err != nil { - if (strings.TrimSpace(config.ValidatorPubkey) != "" && !config.ValidatorIndexPresent) || config.ExpertMode { - return errors.Wrap(err, "cannot convert validator pubkey to bytes") - } + return errors.Wrap(err, "create Obol API client", z.Str("publish_address", config.PublishAddress)) } - switch { - case config.ExpertMode: - ctx = log.WithCtx(ctx, z.U64("validator_index", config.ValidatorIndex), z.Str("validator", validator.String())) - case config.ValidatorIndexPresent && !config.ExpertMode: + eth2Cl, err := eth2Client(ctx, config.BeaconNodeEndpoints, config.BeaconNodeTimeout, [4]byte(cl.GetForkVersion())) + if err != nil { + return errors.Wrap(err, "create eth2 client for specified beacon node(s)", z.Any("beacon_nodes_endpoints", config.BeaconNodeEndpoints)) + } + + if config.ValidatorIndexPresent { ctx = log.WithCtx(ctx, z.U64("validator_index", config.ValidatorIndex)) - default: - ctx = log.WithCtx(ctx, z.Str("validator", validator.String())) + } + if config.ValidatorPubkey != "" { + ctx = log.WithCtx(ctx, z.Str("validator_pubkey", config.ValidatorPubkey)) } - shareIdx, err := keystore.ShareIdxForCluster(cl, *identityKey.PubKey()) + if config.SkipBeaconNodeCheck { + log.Info(ctx, "Both public key and index are specified, beacon node won't be checked for validator existence/liveness") + } + + var exitBlobs []obolapi.ExitBlob + if config.All { + exitBlobs, err = signAllValidatorsExits(ctx, config, eth2Cl, shares) + if err != nil { + return errors.Wrap(err, "sign exits for all validators") + } + } else { + exitBlobs, err = signSingleValidatorExit(ctx, config, eth2Cl, shares) + if err != nil { + return errors.Wrap(err, "sign exit for validator") + } + } + + if err := oAPI.PostPartialExits(ctx, cl.GetInitialMutationHash(), shareIdx, identityKey, exitBlobs...); err != nil { + return errors.Wrap(err, "http POST partial exit message to Obol API") + } + + return nil +} + +func signSingleValidatorExit(ctx context.Context, config exitConfig, eth2Cl eth2wrap.Client, shares keystore.ValidatorShares) ([]obolapi.ExitBlob, error) { + valEth2, err := fetchValidatorBLSPubKey(ctx, config, eth2Cl) if err != nil { - return errors.Wrap(err, "could not determine operator index from cluster lock for supplied identity key") + return nil, errors.Wrap(err, "fetch validator public key") } + validator := core.PubKeyFrom48Bytes(valEth2) + ourShare, ok := shares[validator] if !ok { - if (strings.TrimSpace(config.ValidatorPubkey) != "" && !config.ValidatorIndexPresent) || config.ExpertMode { - return errors.New("validator not present in cluster lock", z.Str("validator", validator.String())) - } + return nil, errors.New("validator not present in cluster lock", z.Str("validator", validator.String())) } - oAPI, err := obolapi.New(config.PublishAddress, obolapi.WithTimeout(config.PublishTimeout)) + valIndex, err := fetchValidatorIndex(ctx, config, eth2Cl) if err != nil { - return errors.Wrap(err, "could not create obol api client") + return nil, errors.Wrap(err, "fetch validator index") } - log.Info(ctx, "Signing exit message for validator") - - var valIndex eth2p0.ValidatorIndex - var valIndexFound bool + log.Info(ctx, "Signing partial exit message for validator", z.Str("validator_public_key", valEth2.String()), z.U64("validator_index", uint64(valIndex))) - valAPICallOpts := ð2api.ValidatorsOpts{ - State: "head", + exitMsg, err := signExit(ctx, eth2Cl, valIndex, ourShare.Share, eth2p0.Epoch(config.ExitEpoch)) + if err != nil { + return nil, errors.Wrap(err, "sign partial exit message", z.Str("validator_public_key", valEth2.String()), z.U64("validator_index", uint64(valIndex)), z.Int("exit_epoch", int(config.ExitEpoch))) } - if config.ValidatorIndexPresent { - valAPICallOpts.Indices = []eth2p0.ValidatorIndex{ - eth2p0.ValidatorIndex(config.ValidatorIndex), - } - valIndex = eth2p0.ValidatorIndex(config.ValidatorIndex) - } else { - valAPICallOpts.PubKeys = []eth2p0.BLSPubKey{ - valEth2, + return []obolapi.ExitBlob{ + { + PublicKey: valEth2.String(), + SignedExitMessage: exitMsg, + }, + }, nil +} + +func signAllValidatorsExits(ctx context.Context, config exitConfig, eth2Cl eth2wrap.Client, shares keystore.ValidatorShares) ([]obolapi.ExitBlob, error) { + var valsEth2 []eth2p0.BLSPubKey + for pk := range shares { + eth2PK, err := pk.ToETH2() + if err != nil { + return nil, errors.Wrap(err, "convert core pubkey to eth2 pubkey", z.Str("pub_key", eth2PK.String())) } + valsEth2 = append(valsEth2, eth2PK) } - eth2Cl, err := eth2Client(ctx, config.BeaconNodeEndpoints, config.BeaconNodeTimeout, [4]byte(cl.GetForkVersion())) + rawValData, err := queryBeaconForValidator(ctx, eth2Cl, valsEth2, nil) if err != nil { - return errors.Wrap(err, "cannot create eth2 client for specified beacon node") + return nil, errors.Wrap(err, "fetch all validators indices from beacon") } - if !config.ExpertMode { - rawValData, err := eth2Cl.Validators(ctx, valAPICallOpts) - if err != nil { - return errors.Wrap(err, "cannot fetch validator") + for _, val := range rawValData.Data { + share, ok := shares[core.PubKeyFrom48Bytes(val.Validator.PublicKey)] + if !ok { + return nil, errors.New("validator public key not found in cluster lock", z.Str("validator_public_key", val.Validator.PublicKey.String())) } + share.Index = int(val.Index) + shares[core.PubKeyFrom48Bytes(val.Validator.PublicKey)] = share + } - valData := rawValData.Data + log.Info(ctx, "Signing partial exit message for all active validators") - for _, val := range valData { - if val.Validator.PublicKey == valEth2 || val.Index == eth2p0.ValidatorIndex(config.ValidatorIndex) { - valIndex = val.Index - valIndexFound = true + var exitBlobs []obolapi.ExitBlob + for pk, share := range shares { + exitMsg, err := signExit(ctx, eth2Cl, eth2p0.ValidatorIndex(share.Index), share.Share, eth2p0.Epoch(config.ExitEpoch)) + if err != nil { + return nil, errors.Wrap(err, "sign partial exit message", z.Str("validator_public_key", pk.String()), z.Int("validator_index", share.Index), z.Int("exit_epoch", int(config.ExitEpoch))) + } + eth2PK, err := pk.ToETH2() + if err != nil { + return nil, errors.Wrap(err, "convert core pubkey to eth2 pubkey", z.Str("core_pubkey", pk.String())) + } + exitBlob := obolapi.ExitBlob{ + PublicKey: eth2PK.String(), + SignedExitMessage: exitMsg, + } + exitBlobs = append(exitBlobs, exitBlob) + log.Info(ctx, "Successfully signed exit message", z.Str("validator_public_key", pk.String()), z.Int("validator_index", share.Index)) + } - // re-initialize state variable after looking up all the necessary details, since user only provided a validator index - if config.ValidatorIndexPresent { - valEth2 = val.Validator.PublicKey - ourShare, ok = shares[core.PubKeyFrom48Bytes(valEth2)] - if !ok && !config.ValidatorIndexPresent { - return errors.New("validator not present in cluster lock", z.U64("validator_index", config.ValidatorIndex), z.Str("validator", validator.String())) - } - } + return exitBlobs, nil +} - break - } +func fetchValidatorBLSPubKey(ctx context.Context, config exitConfig, eth2Cl eth2wrap.Client) (eth2p0.BLSPubKey, error) { + if config.ValidatorPubkey != "" { + valEth2, err := core.PubKey(config.ValidatorPubkey).ToETH2() + if err != nil { + return eth2p0.BLSPubKey{}, errors.Wrap(err, "convert core pubkey to eth2 pubkey", z.Str("core_pubkey", config.ValidatorPubkey)) } - if !valIndexFound { - return errors.New("validator index not found in beacon node response") + return valEth2, nil + } + + rawValData, err := queryBeaconForValidator(ctx, eth2Cl, nil, []eth2p0.ValidatorIndex{eth2p0.ValidatorIndex(config.ValidatorIndex)}) + if err != nil { + return eth2p0.BLSPubKey{}, errors.Wrap(err, "fetch validator pubkey from beacon", z.Str("beacon_address", eth2Cl.Address()), z.U64("validator_index", config.ValidatorIndex)) + } + + for _, val := range rawValData.Data { + if val.Index == eth2p0.ValidatorIndex(config.ValidatorIndex) { + return val.Validator.PublicKey, nil } } - exitMsg, err := signExit(ctx, eth2Cl, valIndex, ourShare.Share, eth2p0.Epoch(config.ExitEpoch)) + return eth2p0.BLSPubKey{}, errors.New("validator index not found in beacon node response", z.Str("beacon_address", eth2Cl.Address()), z.U64("validator_index", config.ValidatorIndex), z.Any("raw_response", rawValData)) +} + +func fetchValidatorIndex(ctx context.Context, config exitConfig, eth2Cl eth2wrap.Client) (eth2p0.ValidatorIndex, error) { + if config.ValidatorIndexPresent { + return eth2p0.ValidatorIndex(config.ValidatorIndex), nil + } + + valEth2, err := core.PubKey(config.ValidatorPubkey).ToETH2() if err != nil { - return errors.Wrap(err, "cannot sign partial exit message") + return 0, errors.Wrap(err, "convert core pubkey to eth2 pubkey", z.Str("core_pubkey", config.ValidatorPubkey)) } - exitBlob := obolapi.ExitBlob{ - PublicKey: valEth2.String(), - SignedExitMessage: exitMsg, + rawValData, err := queryBeaconForValidator(ctx, eth2Cl, []eth2p0.BLSPubKey{valEth2}, nil) + if err != nil { + return 0, errors.Wrap(err, "fetch validator index from beacon", z.Str("beacon_address", eth2Cl.Address()), z.Str("validator_pubkey", valEth2.String())) } - if err := oAPI.PostPartialExit(ctx, cl.GetInitialMutationHash(), shareIdx, identityKey, exitBlob); err != nil { - return errors.Wrap(err, "could not POST partial exit message to Obol API") + for _, val := range rawValData.Data { + if val.Validator.PublicKey == valEth2 { + return val.Index, nil + } } - return nil + return 0, errors.New("validator public key not found in beacon node response", z.Str("beacon_address", eth2Cl.Address()), z.Str("validator_pubkey", valEth2.String()), z.Any("raw_response", rawValData)) +} + +func queryBeaconForValidator(ctx context.Context, eth2Cl eth2wrap.Client, pubKeys []eth2p0.BLSPubKey, indices []eth2p0.ValidatorIndex) (*eth2api.Response[map[eth2p0.ValidatorIndex]*eth2v1.Validator], error) { + valAPICallOpts := ð2api.ValidatorsOpts{ + State: "head", + PubKeys: pubKeys, + Indices: indices, + } + + rawValData, err := eth2Cl.Validators(ctx, valAPICallOpts) + if err != nil { + return nil, errors.Wrap(err, "fetch validators from beacon", z.Str("beacon_address", eth2Cl.Address()), z.Any("options", valAPICallOpts)) + } + + return rawValData, nil } diff --git a/cmd/exit_sign_internal_test.go b/cmd/exit_sign_internal_test.go index 772da2650..7ec08ad95 100644 --- a/cmd/exit_sign_internal_test.go +++ b/cmd/exit_sign_internal_test.go @@ -66,7 +66,8 @@ func Test_runSubmitPartialExit(t *testing.T) { false, "test", 0, - "cannot convert validator pubkey to bytes", + "convert core pubkey to eth2 pubkey", + false, ) }) @@ -78,6 +79,7 @@ func Test_runSubmitPartialExit(t *testing.T) { testutil.RandomEth2PubKey(t).String(), 0, "validator not present in cluster lock", + false, ) }) @@ -89,21 +91,23 @@ func Test_runSubmitPartialExit(t *testing.T) { "", 9999, "validator index not found in beacon node response", + false, ) }) - t.Run("main flow with expert mode with bad pubkey", func(t *testing.T) { + t.Run("main flow with skipBeaconNodeCheck mode with bad pubkey", func(t *testing.T) { runSubmitPartialExitFlowTest( t, true, true, "test", 9999, - "cannot convert validator pubkey to bytes", + "convert core pubkey to eth2 pubkey", + false, ) }) - t.Run("main flow with expert mode with pubkey not found in cluster lock", func(t *testing.T) { + t.Run("main flow with skipBeaconNodeCheck mode with pubkey not found in cluster lock", func(t *testing.T) { runSubmitPartialExitFlowTest( t, true, @@ -111,23 +115,27 @@ func Test_runSubmitPartialExit(t *testing.T) { testutil.RandomEth2PubKey(t).String(), 9999, "validator not present in cluster lock", + false, ) }) t.Run("main flow with pubkey", func(t *testing.T) { - runSubmitPartialExitFlowTest(t, false, false, "", 0, "") + runSubmitPartialExitFlowTest(t, false, false, "", 0, "", false) }) t.Run("main flow with validator index", func(t *testing.T) { - runSubmitPartialExitFlowTest(t, true, false, "", 0, "") + runSubmitPartialExitFlowTest(t, true, false, "", 0, "", false) + }) + t.Run("main flow with skipBeaconNodeCheck mode", func(t *testing.T) { + runSubmitPartialExitFlowTest(t, true, true, "", 0, "", false) }) - t.Run("main flow with expert mode", func(t *testing.T) { - runSubmitPartialExitFlowTest(t, true, true, "", 0, "") + t.Run("main flow with all mode", func(t *testing.T) { + runSubmitPartialExitFlowTest(t, false, false, "", 0, "", true) }) t.Run("config", Test_runSubmitPartialExit_Config) } -func runSubmitPartialExitFlowTest(t *testing.T, useValIdx bool, expertMode bool, valPubkey string, valIndex uint64, errString string) { +func runSubmitPartialExitFlowTest(t *testing.T, useValIdx bool, skipBeaconNodeCheck bool, valPubkey string, valIndex uint64, errString string, all bool) { t.Helper() t.Parallel() ctx := context.Background() @@ -202,6 +210,7 @@ func runSubmitPartialExitFlowTest(t *testing.T, useValIdx bool, expertMode bool, ExitEpoch: 194048, BeaconNodeTimeout: 30 * time.Second, PublishTimeout: 10 * time.Second, + All: all, } index := uint64(0) @@ -215,11 +224,11 @@ func runSubmitPartialExitFlowTest(t *testing.T, useValIdx bool, expertMode bool, pubkey = valPubkey } - if expertMode { + if skipBeaconNodeCheck { config.ValidatorIndex = index config.ValidatorIndexPresent = true config.ValidatorPubkey = pubkey - config.ExpertMode = true + config.SkipBeaconNodeCheck = true } else { if useValIdx { config.ValidatorIndex = index @@ -254,32 +263,32 @@ func Test_runSubmitPartialExit_Config(t *testing.T) { { name: "No identity key", noIdentity: true, - errData: "could not load identity key", + errData: "load identity key", }, { name: "No cluster lock", noLock: true, - errData: "could not load cluster-lock.json", + errData: "load cluster lock", }, { name: "No keystore", noKeystore: true, - errData: "could not load keystore", + errData: "load keystore", }, { name: "Bad Obol API URL", badOAPIURL: true, - errData: "could not create obol api client", + errData: "create Obol API client", }, { name: "Bad beacon node URL", badBeaconNodeEndpoints: true, - errData: "cannot create eth2 client for specified beacon node", + errData: "create eth2 client for specified beacon node", }, { name: "Bad validator address", badValidatorAddr: true, - errData: "cannot convert validator pubkey to bytes", + errData: "convert core pubkey to eth2 pubkey", }, } @@ -377,3 +386,111 @@ func Test_runSubmitPartialExit_Config(t *testing.T) { }) } } + +func TestExitSignCLI(t *testing.T) { + tests := []struct { + name string + expectedErr string + flags []string + }{ + { + name: "check flags", + expectedErr: "load identity key: read private key from disk: open test: no such file or directory", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", + "--exit-epoch=1", + "--validator-public-key=test", + "--validator-index=1", + "--beacon-node-endpoints=test1,test2", + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all=false", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "no pubkey, no index, single validator", + expectedErr: "either validator-index or validator-public-key must be specified at least when exiting single validator.", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", + "--exit-epoch=1", + "--beacon-node-endpoints=test1,test2", + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all=false", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "pubkey present, all validators", + expectedErr: "validator-index or validator-public-key should not be specified when all is, as they are obsolete and misleading.", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", + "--exit-epoch=1", + "--validator-public-key=test", + "--beacon-node-endpoints=test1,test2", + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all=true", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + { + name: "index present, all validators", + expectedErr: "validator-index or validator-public-key should not be specified when all is, as they are obsolete and misleading.", + flags: []string{ + "--publish-address=test", + "--private-key-file=test", + "--lock-file=test", + "--validator-keys-dir=test", + "--exit-epoch=1", + "--validator-index=1", + "--beacon-node-endpoints=test1,test2", + "--beacon-node-timeout=1ms", + "--publish-timeout=1ms", + "--all=true", + "--testnet-name=test", + "--testnet-fork-version=test", + "--testnet-chain-id=1", + "--testnet-genesis-timestamp=1", + "--testnet-capella-hard-fork=test", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cmd := newExitCmd(newSignPartialExitCmd(runSignPartialExit)) + cmd.SetArgs(append([]string{"sign"}, test.flags...)) + + err := cmd.Execute() + if test.expectedErr != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/cmd/markdown_internal_test.go b/cmd/markdown_internal_test.go index b46a95a47..bc8626bf0 100644 --- a/cmd/markdown_internal_test.go +++ b/cmd/markdown_internal_test.go @@ -84,7 +84,7 @@ This document contains all the prometheus metrics exposed by a charon node. All metrics contain the following labels, so they are omitted from the table below: - 'cluster_hash': The cluster lock hash uniquely identifying the cluster. -- 'clustter_name': The cluster lock name. +- 'cluster_name': The cluster lock name. - 'cluster_network': The cluster network name; goerli, mainnet, etc. - 'cluster_peer': The name of this node in the cluster. It is determined from the operator ENR. @@ -205,6 +205,7 @@ func writeMarkdown(t *testing.T, file string, tpl *template.Template, data any) content, err := os.ReadFile(file) require.NoError(t, err) + //nolint:testifylint // don't remove fmt.Sprintf, it's not unnecessary require.Equal(t, string(content), result, fmt.Sprintf("%s doesn't contain latest metrics.\n"+ "To fix, run: go test github.com/obolnetwork/charon/cmd -update-markdown", file)) diff --git a/cmd/run.go b/cmd/run.go index 4ebaf210b..1bc174f05 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -119,7 +119,7 @@ func bindLogFlags(flags *pflag.FlagSet, config *log.Config) { } func bindP2PFlags(cmd *cobra.Command, config *p2p.Config) { - cmd.Flags().StringSliceVar(&config.Relays, "p2p-relays", []string{"https://0.relay.obol.tech", "https://1.relay.obol.tech"}, "Comma-separated list of libp2p relay URLs or multiaddrs.") + cmd.Flags().StringSliceVar(&config.Relays, "p2p-relays", []string{"https://0.relay.obol.tech", "https://2.relay.obol.dev", "https://1.relay.obol.tech"}, "Comma-separated list of libp2p relay URLs or multiaddrs.") cmd.Flags().StringVar(&config.ExternalIP, "p2p-external-ip", "", "The IP address advertised by libp2p. This may be used to advertise an external IP.") cmd.Flags().StringVar(&config.ExternalHost, "p2p-external-hostname", "", "The DNS hostname advertised by libp2p. This may be used to advertise an external DNS.") cmd.Flags().StringSliceVar(&config.TCPAddrs, "p2p-tcp-address", nil, "Comma-separated list of listening TCP addresses (ip and port) for libP2P traffic. Empty default doesn't bind to local port therefore only supports outgoing connections.") diff --git a/cmd/test.go b/cmd/test.go index c202c6696..dfc00901c 100644 --- a/cmd/test.go +++ b/cmd/test.go @@ -4,18 +4,23 @@ package cmd import ( "context" + "encoding/json" "fmt" "io" + "net/http" + "net/http/httptrace" "os" "os/signal" + "path/filepath" + "slices" "sort" "strings" "syscall" "time" "unicode/utf8" - "github.com/pelletier/go-toml/v2" "github.com/spf13/cobra" + "github.com/spf13/pflag" "golang.org/x/exp/maps" "github.com/obolnetwork/charon/app/errors" @@ -29,15 +34,22 @@ var ( ) const ( - peersTestCategory = "peers" - beaconTestCategory = "beacon" - validatorTestCategory = "validator" - mevTestCategory = "mev" - performanceTestCategory = "performance" + peersTestCategory = "peers" + beaconTestCategory = "beacon" + validatorTestCategory = "validator" + mevTestCategory = "mev" + infraTestCategory = "infra" + allTestCategory = "all" + + committeeSizePerSlot = 64 + subCommitteeSize = 4 + slotTime = 12 * time.Second + slotsInEpoch = 32 + epochTime = slotsInEpoch * slotTime ) type testConfig struct { - OutputToml string + OutputJSON string Quiet bool TestCases []string Timeout time.Duration @@ -47,7 +59,7 @@ func newTestCmd(cmds ...*cobra.Command) *cobra.Command { root := &cobra.Command{ Use: "test", Short: "Test subcommands provide test suite to evaluate current cluster setup", - Long: `Test subcommands provide test suite to evaluate current cluster setup. The full validator stack can be tested - charon peers, consensus layer, validator client, MEV. Current machine's performance can be examined as well.`, + Long: `Test subcommands provide test suite to evaluate current cluster setup. The full validator stack can be tested - charon peers, consensus layer, validator client, MEV. Current machine's infra can be examined as well.`, } root.AddCommand(cmds...) @@ -56,12 +68,19 @@ func newTestCmd(cmds ...*cobra.Command) *cobra.Command { } func bindTestFlags(cmd *cobra.Command, config *testConfig) { - cmd.Flags().StringVar(&config.OutputToml, "output-toml", "", "File path to which output can be written in TOML format.") + cmd.Flags().StringVar(&config.OutputJSON, "output-json", "", "File path to which output can be written in JSON format.") cmd.Flags().StringSliceVar(&config.TestCases, "test-cases", nil, fmt.Sprintf("List of comma separated names of tests to be exeucted. Available tests are: %v", listTestCases(cmd))) - cmd.Flags().DurationVar(&config.Timeout, "timeout", 5*time.Minute, "Execution timeout for all tests.") + cmd.Flags().DurationVar(&config.Timeout, "timeout", time.Hour, "Execution timeout for all tests.") cmd.Flags().BoolVar(&config.Quiet, "quiet", false, "Do not print test results to stdout.") } +func bindTestLogFlags(flags *pflag.FlagSet, config *log.Config) { + flags.StringVar(&config.Format, "log-format", "console", "Log format; console, logfmt or json") + flags.StringVar(&config.Level, "log-level", "info", "Log level; debug, info, warn or error") + flags.StringVar(&config.Color, "log-color", "auto", "Log color; auto, force, disable.") + flags.StringVar(&config.LogOutputPath, "log-output-path", "", "Path in which to write on-disk logs.") +} + func listTestCases(cmd *cobra.Command) []string { var testCaseNames []testCaseName switch cmd.Name() { @@ -74,8 +93,18 @@ func listTestCases(cmd *cobra.Command) []string { testCaseNames = maps.Keys(supportedValidatorTestCases()) case mevTestCategory: testCaseNames = maps.Keys(supportedMEVTestCases()) - case performanceTestCategory: - testCaseNames = maps.Keys(supportedPerformanceTestCases()) + case infraTestCategory: + testCaseNames = maps.Keys(supportedInfraTestCases()) + case allTestCategory: + testCaseNames = slices.Concat( + maps.Keys(supportedPeerTestCases()), + maps.Keys(supportedSelfTestCases()), + maps.Keys(supportedRelayTestCases()), + maps.Keys(supportedBeaconTestCases()), + maps.Keys(supportedValidatorTestCases()), + maps.Keys(supportedMEVTestCases()), + maps.Keys(supportedInfraTestCases()), + ) default: log.Warn(cmd.Context(), "Unknown command for listing test cases", nil, z.Str("name", cmd.Name())) } @@ -89,8 +118,8 @@ func listTestCases(cmd *cobra.Command) []string { } func mustOutputToFileOnQuiet(cmd *cobra.Command) error { - if cmd.Flag("quiet").Changed && !cmd.Flag("output-toml").Changed { - return errors.New("on --quiet, an --output-toml is required") + if cmd.Flag("quiet").Changed && !cmd.Flag("output-json").Changed { + return errors.New("on --quiet, an --output-json is required") } return nil @@ -122,16 +151,15 @@ const ( categoryScoreC categoryScore = "C" ) -// toml fails on marshaling errors to string, so we wrap the errors and add custom marshal type testResultError struct{ error } type testResult struct { - Name string - Verdict testVerdict - Measurement string - Suggestion string - Error testResultError - IsAcceptable bool + Name string `json:"name"` + Verdict testVerdict `json:"verdict"` + Measurement string `json:"measurement,omitempty"` + Suggestion string `json:"suggestion,omitempty"` + Error testResultError `json:"error,omitempty"` + IsAcceptable bool `json:"-"` } func failedTestResult(testRes testResult, err error) testResult { @@ -141,6 +169,10 @@ func failedTestResult(testRes testResult, err error) testResult { return testRes } +func httpStatusError(code int) string { + return fmt.Sprintf("HTTP status code %v", code) +} + func (s *testResultError) UnmarshalText(data []byte) error { if len(data) == 0 { return nil @@ -166,10 +198,10 @@ type testCaseName struct { } type testCategoryResult struct { - CategoryName string - Targets map[string][]testResult - ExecutionTime Duration - Score categoryScore + CategoryName string `json:"category_name,omitempty"` + Targets map[string][]testResult `json:"targets,omitempty"` + ExecutionTime Duration `json:"execution_time,omitempty"` + Score categoryScore `json:"score,omitempty"` } func appendScore(cat []string, score []string) []string { @@ -181,15 +213,73 @@ func appendScore(cat []string, score []string) []string { return res } +type fileResult struct { + Peers testCategoryResult `json:"charon_peers,omitempty"` + Beacon testCategoryResult `json:"beacon_node,omitempty"` + Validator testCategoryResult `json:"validator_client,omitempty"` + MEV testCategoryResult `json:"mev,omitempty"` + Infra testCategoryResult `json:"infra,omitempty"` +} + func writeResultToFile(res testCategoryResult, path string) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o444) + // open or create a file + existingFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644) if err != nil { return errors.Wrap(err, "create/open file") } - defer f.Close() - err = toml.NewEncoder(f).Encode(res) + defer existingFile.Close() + stat, err := existingFile.Stat() + if err != nil { + return errors.Wrap(err, "get file stat") + } + // read file contents or default to empty structure + var file fileResult + if stat.Size() == 0 { + file = fileResult{} + } else { + err = json.NewDecoder(existingFile).Decode(&file) + if err != nil { + return errors.Wrap(err, "decode fileResult from JSON") + } + } + + switch res.CategoryName { + case peersTestCategory: + file.Peers = res + case beaconTestCategory: + file.Beacon = res + case validatorTestCategory: + file.Validator = res + case mevTestCategory: + file.MEV = res + case infraTestCategory: + file.Infra = res + } + + // write data to temp file + tmpFile, err := os.CreateTemp(filepath.Dir(path), fmt.Sprintf("%v-tmp-*.json", filepath.Base(path))) + if err != nil { + return errors.Wrap(err, "create temp file") + } + defer tmpFile.Close() + err = tmpFile.Chmod(0o644) if err != nil { - return errors.Wrap(err, "encode testCategoryResult to TOML") + return errors.Wrap(err, "chmod temp file") + } + + fileContentJSON, err := json.Marshal(file) + if err != nil { + return errors.Wrap(err, "marshal fileResult to JSON") + } + + _, err = tmpFile.Write(fileContentJSON) + if err != nil { + return errors.Wrap(err, "write json to file") + } + + err = os.Rename(tmpFile.Name(), path) + if err != nil { + return errors.Wrap(err, "rename temp file") } return nil @@ -207,8 +297,8 @@ func writeResultToWriter(res testCategoryResult, w io.Writer) error { lines = append(lines, validatorASCII()...) case mevTestCategory: lines = append(lines, mevASCII()...) - case performanceTestCategory: - lines = append(lines, performanceASCII()...) + case infraTestCategory: + lines = append(lines, infraASCII()...) default: lines = append(lines, categoryDefaultASCII()...) } @@ -225,12 +315,14 @@ func writeResultToWriter(res testCategoryResult, w io.Writer) error { lines = append(lines, "") lines = append(lines, fmt.Sprintf("%-64s%s", "TEST NAME", "RESULT")) suggestions := []string{} - for target, testResults := range res.Targets { - if target != "" && len(testResults) > 0 { + targets := maps.Keys(res.Targets) + slices.Sort(targets) + for _, target := range targets { + if target != "" && len(res.Targets[target]) > 0 { lines = append(lines, "") lines = append(lines, target) } - for _, singleTestRes := range testResults { + for _, singleTestRes := range res.Targets[target] { testOutput := "" testOutput += fmt.Sprintf("%-64s", singleTestRes.Name) if singleTestRes.Measurement != "" { @@ -269,6 +361,30 @@ func writeResultToWriter(res testCategoryResult, w io.Writer) error { return nil } +func evaluateHighestRTTScores(testResCh chan time.Duration, testRes testResult, avg time.Duration, poor time.Duration) testResult { + highestRTT := time.Duration(0) + for rtt := range testResCh { + if rtt > highestRTT { + highestRTT = rtt + } + } + + return evaluateRTT(highestRTT, testRes, avg, poor) +} + +func evaluateRTT(rtt time.Duration, testRes testResult, avg time.Duration, poor time.Duration) testResult { + if rtt == 0 || rtt > poor { + testRes.Verdict = testVerdictPoor + } else if rtt > avg { + testRes.Verdict = testVerdictAvg + } else { + testRes.Verdict = testVerdictGood + } + testRes.Measurement = Duration{rtt}.String() + + return testRes +} + func calculateScore(results []testResult) categoryScore { // TODO(kalo): calculate score more elaborately (potentially use weights) avg := 0 @@ -344,3 +460,37 @@ func sleepWithContext(ctx context.Context, d time.Duration) { case <-timer.C: } } + +func requestRTT(ctx context.Context, url string, method string, body io.Reader, expectedStatus int) (time.Duration, error) { + var start time.Time + var firstByte time.Duration + + trace := &httptrace.ClientTrace{ + GotFirstResponseByte: func() { + firstByte = time.Since(start) + }, + } + + start = time.Now() + req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, trace), method, url, body) + if err != nil { + return 0, errors.Wrap(err, "create new request with trace and context") + } + + resp, err := http.DefaultTransport.RoundTrip(req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if resp.StatusCode != expectedStatus { + data, err := io.ReadAll(resp.Body) + if err != nil { + log.Warn(ctx, "Unexpected status code", nil, z.Int("status_code", resp.StatusCode), z.Int("expected_status_code", expectedStatus), z.Str("endpoint", url)) + } else { + log.Warn(ctx, "Unexpected status code", nil, z.Int("status_code", resp.StatusCode), z.Int("expected_status_code", expectedStatus), z.Str("endpoint", url), z.Str("body", string(data))) + } + } + + return firstByte, nil +} diff --git a/cmd/testall.go b/cmd/testall.go new file mode 100644 index 000000000..995cc99cb --- /dev/null +++ b/cmd/testall.go @@ -0,0 +1,117 @@ +// Copyright © 2022-2024 Obol Labs Inc. Licensed under the terms of a Business Source License 1.1 + +package cmd + +import ( + "context" + "io" + + "github.com/spf13/cobra" + + "github.com/obolnetwork/charon/app/errors" +) + +type testAllConfig struct { + testConfig + Peers testPeersConfig + Beacon testBeaconConfig + Validator testValidatorConfig + MEV testMEVConfig + Infra testInfraConfig +} + +func newTestAllCmd(runFunc func(context.Context, io.Writer, testAllConfig) error) *cobra.Command { + var config testAllConfig + + cmd := &cobra.Command{ + Use: "all", + Short: "Run tests towards peer nodes, beacon nodes, validator client, MEV relays, own hardware and internet connectivity.", + Long: `Run tests towards peer nodes, beacon nodes, validator client, MEV relays, own hardware and internet connectivity. Verify that Charon can efficiently do its duties on the tested setup.`, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, _ []string) error { + return mustOutputToFileOnQuiet(cmd) + }, + RunE: func(cmd *cobra.Command, _ []string) error { + return runFunc(cmd.Context(), cmd.OutOrStdout(), config) + }, + } + + bindTestFlags(cmd, &config.testConfig) + + bindTestPeersFlags(cmd, &config.Peers, "peers-") + bindTestBeaconFlags(cmd, &config.Beacon, "beacon-") + bindTestValidatorFlags(cmd, &config.Validator, "validator-") + bindTestMEVFlags(cmd, &config.MEV, "mev-") + bindTestInfraFlags(cmd, &config.Infra, "infra-") + + bindP2PFlags(cmd, &config.Peers.P2P) + bindDataDirFlag(cmd.Flags(), &config.Peers.DataDir) + bindTestLogFlags(cmd.Flags(), &config.Peers.Log) + + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + testCasesPresent := cmd.Flags().Lookup("test-cases").Changed + + if testCasesPresent { + //nolint:revive // we use our own version of the errors package + return errors.New("test-cases cannot be specified when explicitly running all test cases.") + } + + return nil + }) + + return cmd +} + +func runTestAll(ctx context.Context, w io.Writer, cfg testAllConfig) (err error) { + cfg.Beacon.testConfig = cfg.testConfig + cfg.Beacon.Quiet = true + var results []testCategoryResult + beaconRes, err := runTestBeacon(ctx, w, cfg.Beacon) + if err != nil { + return err + } + results = append(results, beaconRes) + + cfg.Validator.testConfig = cfg.testConfig + cfg.Validator.Quiet = true + validatorRes, err := runTestValidator(ctx, w, cfg.Validator) + if err != nil { + return err + } + results = append(results, validatorRes) + + cfg.MEV.testConfig = cfg.testConfig + cfg.MEV.Quiet = true + mevRes, err := runTestMEV(ctx, w, cfg.MEV) + if err != nil { + return err + } + results = append(results, mevRes) + + cfg.Infra.testConfig = cfg.testConfig + cfg.Infra.Quiet = true + infraRes, err := runTestInfra(ctx, w, cfg.Infra) + if err != nil { + return err + } + results = append(results, infraRes) + + cfg.Peers.testConfig = cfg.testConfig + cfg.Peers.Quiet = true + peersRes, err := runTestPeers(ctx, w, cfg.Peers) + if err != nil { + return err + } + results = append(results, peersRes) + + if !cfg.Quiet { + for _, res := range results { + err = writeResultToWriter(res, w) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/cmd/testbeacon.go b/cmd/testbeacon.go index 78a14adb4..70b240961 100644 --- a/cmd/testbeacon.go +++ b/cmd/testbeacon.go @@ -10,8 +10,11 @@ import ( "math" "math/rand" "net/http" - "net/http/httptrace" + "os" + "path/filepath" + "sort" "strconv" + "strings" "sync" "time" @@ -27,23 +30,131 @@ import ( type testBeaconConfig struct { testConfig - Endpoints []string - EnableLoadTest bool - LoadTestDuration time.Duration + Endpoints []string + LoadTest bool + LoadTestDuration time.Duration + SimulationValidators int + SimulationFileDir string + SimulationDuration int + SimulationVerbose bool + SimulationCustom int } type testCaseBeacon func(context.Context, *testBeaconConfig, string) testResult +type simParams struct { + TotalValidatorsCount int + AttestationValidatorsCount int // attestation + aggregation + ProposalValidatorsCount int // attestation + aggregation + proposals + SyncCommitteeValidatorsCount int // attestation + aggregation + proposals + sync committee + RequestIntensity RequestsIntensity +} + +type SimulationValues struct { + Endpoint string `json:"endpoint,omitempty"` + All []Duration `json:"all,omitempty"` + Min Duration `json:"min"` + Max Duration `json:"max"` + Median Duration `json:"median"` + Avg Duration `json:"avg"` +} + +type RequestsIntensity struct { + AttestationDuty time.Duration + AggregatorDuty time.Duration + ProposalDuty time.Duration + SyncCommitteeSubmit time.Duration + SyncCommitteeContribution time.Duration + SyncCommitteeSubscribe time.Duration +} + +type DutiesPerformed struct { + Attestation bool + Aggregation bool + Proposal bool + SyncCommittee bool +} + +type Simulation struct { + GeneralClusterRequests SimulationCluster `json:"general_cluster_requests"` + ValidatorsRequests SimulationValidators `json:"validators_requests"` +} + +type SimulationValidators struct { + Averaged SimulationSingleValidator `json:"averaged"` + AllValidators []SimulationSingleValidator `json:"all_validators,omitempty"` +} + +type SimulationSingleValidator struct { + AttestationDuty SimulationAttestation `json:"attestation_duty"` + AggregationDuty SimulationAggregation `json:"aggregation_duty"` + ProposalDuty SimulationProposal `json:"proposal_duty"` + SyncCommitteeDuties SimulationSyncCommittee `json:"sync_committee_duties"` + SimulationValues +} + +type SimulationAttestation struct { + GetAttestationDataRequest SimulationValues `json:"get_attestation_data_request"` + PostAttestationsRequest SimulationValues `json:"post_attestations_request"` + SimulationValues +} + +type SimulationAggregation struct { + GetAggregateAttestationRequest SimulationValues `json:"get_aggregate_attestation_request"` + PostAggregateAndProofsRequest SimulationValues `json:"post_aggregate_and_proofs_request"` + SimulationValues +} + +type SimulationProposal struct { + ProduceBlockRequest SimulationValues `json:"produce_block_request"` + PublishBlindedBlockRequest SimulationValues `json:"publish_blinded_block_request"` + SimulationValues +} + +type SimulationSyncCommittee struct { + MessageDuty SyncCommitteeMessageDuty `json:"message_duty"` + ContributionDuty SyncCommitteeContributionDuty `json:"contribution_duty"` + SubscribeSyncCommitteeRequest SimulationValues `json:"subscribe_sync_committee_request"` + SimulationValues +} + +type SyncCommitteeContributionDuty struct { + ProduceSyncCommitteeContributionRequest SimulationValues `json:"produce_sync_committee_contribution_request"` + SubmitSyncCommitteeContributionRequest SimulationValues `json:"submit_sync_committee_contribution_request"` + SimulationValues +} + +type SyncCommitteeMessageDuty struct { + SubmitSyncCommitteeMessageRequest SimulationValues `json:"submit_sync_committee_message_request"` +} + +type SimulationCluster struct { + AttestationsForBlockRequest SimulationValues `json:"attestations_for_block_request"` + ProposalDutiesForEpochRequest SimulationValues `json:"proposal_duties_for_epoch_request"` + SyncingRequest SimulationValues `json:"syncing_request"` + PeerCountRequest SimulationValues `json:"peer_count_request"` + BeaconCommitteeSubscriptionRequest SimulationValues `json:"beacon_committee_subscription_request"` + DutiesAttesterForEpochRequest SimulationValues `json:"duties_attester_for_epoch_request"` + DutiesSyncCommitteeForEpochRequest SimulationValues `json:"duties_sync_committee_for_epoch_request"` + BeaconHeadValidatorsRequest SimulationValues `json:"beacon_head_validators_request"` + BeaconGenesisRequest SimulationValues `json:"beacon_genesis_request"` + PrepBeaconProposerRequest SimulationValues `json:"prep_beacon_proposer_request"` + ConfigSpecRequest SimulationValues `json:"config_spec_request"` + NodeVersionRequest SimulationValues `json:"node_version_request"` +} + const ( - thresholdBeaconMeasureAvg = 40 * time.Millisecond - thresholdBeaconMeasurePoor = 100 * time.Millisecond - thresholdBeaconLoadAvg = 40 * time.Millisecond - thresholdBeaconLoadPoor = 100 * time.Millisecond - thresholdBeaconPeersAvg = 50 - thresholdBeaconPeersPoor = 20 + thresholdBeaconMeasureAvg = 40 * time.Millisecond + thresholdBeaconMeasurePoor = 100 * time.Millisecond + thresholdBeaconLoadAvg = 40 * time.Millisecond + thresholdBeaconLoadPoor = 100 * time.Millisecond + thresholdBeaconPeersAvg = 50 + thresholdBeaconPeersPoor = 20 + thresholdBeaconSimulationAvg = 200 * time.Millisecond + thresholdBeaconSimulationPoor = 400 * time.Millisecond ) -func newTestBeaconCmd(runFunc func(context.Context, io.Writer, testBeaconConfig) error) *cobra.Command { +func newTestBeaconCmd(runFunc func(context.Context, io.Writer, testBeaconConfig) (testCategoryResult, error)) *cobra.Command { var config testBeaconConfig cmd := &cobra.Command{ @@ -55,39 +166,53 @@ func newTestBeaconCmd(runFunc func(context.Context, io.Writer, testBeaconConfig) return mustOutputToFileOnQuiet(cmd) }, RunE: func(cmd *cobra.Command, _ []string) error { - return runFunc(cmd.Context(), cmd.OutOrStdout(), config) + _, err := runFunc(cmd.Context(), cmd.OutOrStdout(), config) + return err }, } bindTestFlags(cmd, &config.testConfig) - bindTestBeaconFlags(cmd, &config) + bindTestBeaconFlags(cmd, &config, "") return cmd } -func bindTestBeaconFlags(cmd *cobra.Command, config *testBeaconConfig) { - const endpoints = "endpoints" - cmd.Flags().StringSliceVar(&config.Endpoints, endpoints, nil, "[REQUIRED] Comma separated list of one or more beacon node endpoint URLs.") - mustMarkFlagRequired(cmd, endpoints) - cmd.Flags().BoolVar(&config.EnableLoadTest, "enable-load-test", false, "Enable load test, not advisable when testing towards external beacon nodes.") - cmd.Flags().DurationVar(&config.LoadTestDuration, "load-test-duration", 5*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") +func bindTestBeaconFlags(cmd *cobra.Command, config *testBeaconConfig, flagsPrefix string) { + cmd.Flags().StringSliceVar(&config.Endpoints, flagsPrefix+"endpoints", nil, "[REQUIRED] Comma separated list of one or more beacon node endpoint URLs.") + cmd.Flags().BoolVar(&config.LoadTest, flagsPrefix+"load-test", false, "Enable load test, not advisable when testing towards external beacon nodes.") + cmd.Flags().DurationVar(&config.LoadTestDuration, flagsPrefix+"load-test-duration", 5*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") + cmd.Flags().IntVar(&config.SimulationDuration, flagsPrefix+"simulation-duration-in-slots", slotsInEpoch, "Time to keep running the simulation in slots.") + cmd.Flags().StringVar(&config.SimulationFileDir, flagsPrefix+"simulation-file-dir", "./", "Time to keep running the simulation in slots.") + cmd.Flags().BoolVar(&config.SimulationVerbose, flagsPrefix+"simulation-verbose", false, "Show results for each request and each validator.") + cmd.Flags().IntVar(&config.SimulationCustom, flagsPrefix+"simulation-custom", 0, "Run custom simulation with the specified amount of validators.") + mustMarkFlagRequired(cmd, flagsPrefix+"endpoints") } func supportedBeaconTestCases() map[testCaseName]testCaseBeacon { return map[testCaseName]testCaseBeacon{ - {name: "ping", order: 1}: beaconPingTest, - {name: "pingMeasure", order: 2}: beaconPingMeasureTest, - {name: "isSynced", order: 3}: beaconIsSyncedTest, - {name: "peerCount", order: 4}: beaconPeerCountTest, - {name: "pingLoad", order: 5}: beaconPingLoadTest, + {name: "Ping", order: 1}: beaconPingTest, + {name: "PingMeasure", order: 2}: beaconPingMeasureTest, + {name: "Version", order: 3}: beaconVersionTest, + {name: "Synced", order: 4}: beaconIsSyncedTest, + {name: "PeerCount", order: 5}: beaconPeerCountTest, + {name: "PingLoad", order: 6}: beaconPingLoadTest, + + {name: "Simulate1", order: 7}: beaconSimulation1Test, + {name: "Simulate10", order: 8}: beaconSimulation10Test, + {name: "Simulate100", order: 9}: beaconSimulation100Test, + {name: "Simulate500", order: 10}: beaconSimulation500Test, + {name: "Simulate1000", order: 11}: beaconSimulation1000Test, + {name: "SimulateCustom", order: 12}: beaconSimulationCustomTest, } } -func runTestBeacon(ctx context.Context, w io.Writer, cfg testBeaconConfig) (err error) { +func runTestBeacon(ctx context.Context, w io.Writer, cfg testBeaconConfig) (res testCategoryResult, err error) { + log.Info(ctx, "Starting beacon node test") + testCases := supportedBeaconTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { - return errors.New("test case not supported") + return res, errors.New("test case not supported") } sortTests(queuedTests) @@ -116,7 +241,7 @@ func runTestBeacon(ctx context.Context, w io.Writer, cfg testBeaconConfig) (err } } - res := testCategoryResult{ + res = testCategoryResult{ CategoryName: beaconTestCategory, Targets: testResults, ExecutionTime: execTime, @@ -126,20 +251,22 @@ func runTestBeacon(ctx context.Context, w io.Writer, cfg testBeaconConfig) (err if !cfg.Quiet { err = writeResultToWriter(res, w) if err != nil { - return err + return res, err } } - if cfg.OutputToml != "" { - err = writeResultToFile(res, cfg.OutputToml) + if cfg.OutputJSON != "" { + err = writeResultToFile(res, cfg.OutputJSON) if err != nil { - return err + return res, err } } - return nil + return res, nil } +// beacon node tests + func testAllBeacons(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseBeacon, conf testBeaconConfig, allBeaconsResCh chan map[string][]testResult) { defer close(allBeaconsResCh) // run tests for all beacon nodes @@ -148,9 +275,8 @@ func testAllBeacons(ctx context.Context, queuedTestCases []testCaseName, allTest group, _ := errgroup.WithContext(ctx) for _, endpoint := range conf.Endpoints { - currEndpoint := endpoint // TODO: can be removed after go1.22 version bump group.Go(func() error { - return testSingleBeacon(ctx, queuedTestCases, allTestCases, conf, currEndpoint, singleBeaconResCh) + return testSingleBeacon(ctx, queuedTestCases, allTestCases, conf, endpoint, singleBeaconResCh) }) } @@ -192,9 +318,7 @@ func testSingleBeacon(ctx context.Context, queuedTestCases []testCaseName, allTe finished = true break } - testName = queuedTestCases[testCounter].name testCounter++ - result.Name = testName allTestRes = append(allTestRes, result) } } @@ -219,20 +343,19 @@ func runBeaconTest(ctx context.Context, queuedTestCases []testCaseName, allTestC func beaconPingTest(ctx context.Context, _ *testBeaconConfig, target string) testResult { testRes := testResult{Name: "Ping"} - client := http.Client{} targetEndpoint := fmt.Sprintf("%v/eth/v1/node/health", target) req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetEndpoint, nil) if err != nil { return failedTestResult(testRes, err) } - resp, err := client.Do(req) + resp, err := new(http.Client).Do(req) if err != nil { return failedTestResult(testRes, err) } defer resp.Body.Close() if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } testRes.Verdict = testVerdictOk @@ -240,75 +363,71 @@ func beaconPingTest(ctx context.Context, _ *testBeaconConfig, target string) tes return testRes } -func beaconPingOnce(ctx context.Context, target string) (time.Duration, error) { - var start time.Time - var firstByte time.Duration +func beaconPingMeasureTest(ctx context.Context, _ *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "PingMeasure"} - trace := &httptrace.ClientTrace{ - GotFirstResponseByte: func() { - firstByte = time.Since(start) - }, + rtt, err := beaconPingOnce(ctx, target) + if err != nil { + return failedTestResult(testRes, err) } - start = time.Now() - targetEndpoint := fmt.Sprintf("%v/eth/v1/node/health", target) - req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, trace), http.MethodGet, targetEndpoint, nil) - if err != nil { - return 0, errors.Wrap(err, "create new request with trace and context") + testRes = evaluateRTT(rtt, testRes, thresholdBeaconMeasureAvg, thresholdBeaconMeasurePoor) + + return testRes +} + +func beaconVersionTest(ctx context.Context, _ *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "Version"} + + type versionData struct { + Version string `json:"version"` + } + type versionResponse struct { + Data versionData `json:"data"` } - resp, err := http.DefaultTransport.RoundTrip(req) + targetEndpoint := fmt.Sprintf("%v/eth/v1/node/version", target) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetEndpoint, nil) + if err != nil { + return failedTestResult(testRes, err) + } + resp, err := new(http.Client).Do(req) if err != nil { - return 0, err + return failedTestResult(testRes, err) } - defer resp.Body.Close() if resp.StatusCode > 399 { - return 0, errors.New("status code %v", z.Int("status_code", resp.StatusCode)) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } - return firstByte, nil -} - -func beaconPingMeasureTest(ctx context.Context, _ *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "PingMeasure"} + b, err := io.ReadAll(resp.Body) + if err != nil { + return failedTestResult(testRes, err) + } + defer resp.Body.Close() - rtt, err := beaconPingOnce(ctx, target) + var versionResp versionResponse + err = json.Unmarshal(b, &versionResp) if err != nil { return failedTestResult(testRes, err) } - if rtt > thresholdBeaconMeasurePoor { - testRes.Verdict = testVerdictPoor - } else if rtt > thresholdBeaconMeasureAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood + // keep only provider, version and platform + splitVersion := strings.Split(versionResp.Data.Version, "/") + if len(splitVersion) > 3 { + splitVersion = splitVersion[:3] } - testRes.Measurement = Duration{rtt}.String() + version := strings.Join(splitVersion, "/") - return testRes -} + testRes.Measurement = version + testRes.Verdict = testVerdictOk -func pingBeaconContinuously(ctx context.Context, target string, resCh chan<- time.Duration) { - for { - rtt, err := beaconPingOnce(ctx, target) - if err != nil { - return - } - select { - case <-ctx.Done(): - return - case resCh <- rtt: - awaitTime := rand.Intn(100) //nolint:gosec // weak generator is not an issue here - sleepWithContext(ctx, time.Duration(awaitTime)*time.Millisecond) - } - } + return testRes } func beaconPingLoadTest(ctx context.Context, conf *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "BeaconLoad"} - if !conf.EnableLoadTest { + testRes := testResult{Name: "PingLoad"} + if !conf.LoadTest { testRes.Verdict = testVerdictSkipped return testRes } @@ -339,44 +458,30 @@ func beaconPingLoadTest(ctx context.Context, conf *testBeaconConfig, target stri close(testResCh) log.Info(ctx, "Ping load tests finished", z.Any("target", target)) - highestRTT := time.Duration(0) - for rtt := range testResCh { - if rtt > highestRTT { - highestRTT = rtt - } - } - if highestRTT > thresholdBeaconLoadPoor { - testRes.Verdict = testVerdictPoor - } else if highestRTT > thresholdBeaconLoadAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood - } - testRes.Measurement = Duration{highestRTT}.String() + testRes = evaluateHighestRTTScores(testResCh, testRes, thresholdBeaconLoadAvg, thresholdBeaconLoadPoor) return testRes } func beaconIsSyncedTest(ctx context.Context, _ *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "isSynced"} + testRes := testResult{Name: "Synced"} type isSyncedResponse struct { Data eth2v1.SyncState `json:"data"` } - client := http.Client{} targetEndpoint := fmt.Sprintf("%v/eth/v1/node/syncing", target) req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetEndpoint, nil) if err != nil { return failedTestResult(testRes, err) } - resp, err := client.Do(req) + resp, err := new(http.Client).Do(req) if err != nil { return failedTestResult(testRes, err) } if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } b, err := io.ReadAll(resp.Body) @@ -402,7 +507,7 @@ func beaconIsSyncedTest(ctx context.Context, _ *testBeaconConfig, target string) } func beaconPeerCountTest(ctx context.Context, _ *testBeaconConfig, target string) testResult { - testRes := testResult{Name: "peerCount"} + testRes := testResult{Name: "PeerCount"} type peerCountResponseMeta struct { Count int `json:"count"` @@ -412,19 +517,18 @@ func beaconPeerCountTest(ctx context.Context, _ *testBeaconConfig, target string Meta peerCountResponseMeta `json:"meta"` } - client := http.Client{} targetEndpoint := fmt.Sprintf("%v/eth/v1/node/peers?state=connected", target) req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetEndpoint, nil) if err != nil { return failedTestResult(testRes, err) } - resp, err := client.Do(req) + resp, err := new(http.Client).Do(req) if err != nil { return failedTestResult(testRes, err) } if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } b, err := io.ReadAll(resp.Body) @@ -451,3 +555,1324 @@ func beaconPeerCountTest(ctx context.Context, _ *testBeaconConfig, target string return testRes } + +// helper functions + +func beaconPingOnce(ctx context.Context, target string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/node/health", target), http.MethodGet, nil, 200) +} + +func pingBeaconContinuously(ctx context.Context, target string, resCh chan<- time.Duration) { + for { + rtt, err := beaconPingOnce(ctx, target) + if err != nil { + return + } + select { + case <-ctx.Done(): + return + case resCh <- rtt: + awaitTime := rand.Intn(100) //nolint:gosec // weak generator is not an issue here + sleepWithContext(ctx, time.Duration(awaitTime)*time.Millisecond) + } + } +} + +// beacon simulation tests + +func beaconSimulation1Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "Simulate1"} + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + + params := simParams{ + TotalValidatorsCount: 1, + AttestationValidatorsCount: 0, + ProposalValidatorsCount: 0, + SyncCommitteeValidatorsCount: 1, + RequestIntensity: RequestsIntensity{ + AttestationDuty: slotTime, + AggregatorDuty: slotTime * 2, + ProposalDuty: slotTime * 4, + SyncCommitteeSubmit: slotTime, + SyncCommitteeContribution: slotTime * 4, + SyncCommitteeSubscribe: epochTime, + }, + } + + return beaconSimulationTest(ctx, conf, target, testRes, params) +} + +func beaconSimulation10Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "Simulate10"} + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + + params := simParams{ + TotalValidatorsCount: 10, + AttestationValidatorsCount: 6, + ProposalValidatorsCount: 3, + SyncCommitteeValidatorsCount: 1, + RequestIntensity: RequestsIntensity{ + AttestationDuty: slotTime, + AggregatorDuty: slotTime * 2, + ProposalDuty: slotTime * 4, + SyncCommitteeSubmit: slotTime, + SyncCommitteeContribution: slotTime * 4, + SyncCommitteeSubscribe: epochTime, + }, + } + + return beaconSimulationTest(ctx, conf, target, testRes, params) +} + +func beaconSimulation100Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "Simulate100"} + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + + params := simParams{ + TotalValidatorsCount: 100, + AttestationValidatorsCount: 80, + ProposalValidatorsCount: 18, + SyncCommitteeValidatorsCount: 2, + RequestIntensity: RequestsIntensity{ + AttestationDuty: slotTime, + AggregatorDuty: slotTime * 2, + ProposalDuty: slotTime * 4, + SyncCommitteeSubmit: slotTime, + SyncCommitteeContribution: slotTime * 4, + SyncCommitteeSubscribe: epochTime, + }, + } + + return beaconSimulationTest(ctx, conf, target, testRes, params) +} + +func beaconSimulation500Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "Simulate500"} + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + + params := simParams{ + TotalValidatorsCount: 500, + AttestationValidatorsCount: 450, + ProposalValidatorsCount: 45, + SyncCommitteeValidatorsCount: 5, + RequestIntensity: RequestsIntensity{ + AttestationDuty: slotTime, + AggregatorDuty: slotTime * 2, + ProposalDuty: slotTime * 4, + SyncCommitteeSubmit: slotTime, + SyncCommitteeContribution: slotTime * 4, + SyncCommitteeSubscribe: epochTime, + }, + } + + return beaconSimulationTest(ctx, conf, target, testRes, params) +} + +func beaconSimulation1000Test(ctx context.Context, conf *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "Simulate1000"} + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + + params := simParams{ + TotalValidatorsCount: 1000, + AttestationValidatorsCount: 930, + ProposalValidatorsCount: 65, + SyncCommitteeValidatorsCount: 5, + RequestIntensity: RequestsIntensity{ + AttestationDuty: slotTime, + AggregatorDuty: slotTime * 2, + ProposalDuty: slotTime * 4, + SyncCommitteeSubmit: slotTime, + SyncCommitteeContribution: slotTime * 4, + SyncCommitteeSubscribe: epochTime, + }, + } + + return beaconSimulationTest(ctx, conf, target, testRes, params) +} + +func beaconSimulationCustomTest(ctx context.Context, conf *testBeaconConfig, target string) testResult { + testRes := testResult{Name: "SimulateCustom"} + if conf.SimulationCustom < 1 { + testRes.Verdict = testVerdictSkipped + return testRes + } + testRes.Name = fmt.Sprintf("Simulate%v", conf.SimulationCustom) + + total := conf.SimulationCustom + syncCommittees := total / 100 + if syncCommittees == 0 { + syncCommittees++ + } + proposals := total / 15 + if proposals == 0 && (total-syncCommittees != 0) { + proposals++ + } + attestations := total - syncCommittees - proposals + + params := simParams{ + TotalValidatorsCount: total, + AttestationValidatorsCount: attestations, + ProposalValidatorsCount: proposals, + SyncCommitteeValidatorsCount: syncCommittees, + RequestIntensity: RequestsIntensity{ + AttestationDuty: slotTime, + AggregatorDuty: slotTime * 2, + ProposalDuty: slotTime * 4, + SyncCommitteeSubmit: slotTime, + SyncCommitteeContribution: slotTime * 4, + SyncCommitteeSubscribe: epochTime, + }, + } + + return beaconSimulationTest(ctx, conf, target, testRes, params) +} + +func beaconSimulationTest(ctx context.Context, conf *testBeaconConfig, target string, testRes testResult, params simParams) testResult { + duration := time.Duration(conf.SimulationDuration)*slotTime + time.Second + var wg sync.WaitGroup + + log.Info(ctx, "Running beacon node simulation...", + z.Any("validators_count", params.TotalValidatorsCount), + z.Any("target", target), + z.Any("duration_in_slots", conf.SimulationDuration), + z.Any("slot_duration", slotTime), + ) + + // start general cluster requests + simulationGeneralResCh := make(chan SimulationCluster, 1) + var simulationGeneralRes SimulationCluster + wg.Add(1) + log.Info(ctx, "Starting general cluster requests...") + go singleClusterSimulation(ctx, duration, target, simulationGeneralResCh, wg.Done) + + // start validator requests + simulationResCh := make(chan SimulationSingleValidator, params.TotalValidatorsCount) + simulationResAll := []SimulationSingleValidator{} + + log.Info(ctx, "Starting validators performing duties attestation, aggregation, proposal, sync committee...", + z.Any("validators", params.SyncCommitteeValidatorsCount), + ) + syncCommitteeValidatorsDuties := DutiesPerformed{Attestation: true, Aggregation: true, Proposal: true, SyncCommittee: true} + for range params.SyncCommitteeValidatorsCount { + wg.Add(1) + go singleValidatorSimulation(ctx, duration, target, simulationResCh, params.RequestIntensity, syncCommitteeValidatorsDuties, &wg) + } + + log.Info(ctx, "Starting validators performing duties attestation, aggregation, proposal...", + z.Any("validators", params.ProposalValidatorsCount), + ) + proposalValidatorsDuties := DutiesPerformed{Attestation: true, Aggregation: true, Proposal: true, SyncCommittee: false} + for range params.ProposalValidatorsCount { + wg.Add(1) + go singleValidatorSimulation(ctx, duration, target, simulationResCh, params.RequestIntensity, proposalValidatorsDuties, &wg) + } + + log.Info(ctx, "Starting validators performing duties attestation, aggregation...", + z.Any("validators", params.AttestationValidatorsCount), + ) + attesterValidatorsDuties := DutiesPerformed{Attestation: true, Aggregation: true, Proposal: false, SyncCommittee: false} + for range params.AttestationValidatorsCount { + wg.Add(1) + go singleValidatorSimulation(ctx, duration, target, simulationResCh, params.RequestIntensity, attesterValidatorsDuties, &wg) + } + + log.Info(ctx, "Waiting for simulation to complete...") + // evaluate results + wg.Wait() + close(simulationGeneralResCh) + close(simulationResCh) + log.Info(ctx, "Simulation finished, evaluating results...") + simulationGeneralRes = <-simulationGeneralResCh + for result := range simulationResCh { + simulationResAll = append(simulationResAll, result) + } + + averageValidatorResult := averageValidatorsResult(simulationResAll) + + finalSimulation := Simulation{ + GeneralClusterRequests: simulationGeneralRes, + ValidatorsRequests: SimulationValidators{ + Averaged: averageValidatorResult, + AllValidators: simulationResAll, + }, + } + + if !conf.SimulationVerbose { + finalSimulation = nonVerboseFinalSimulation(finalSimulation) + } + simulationResAllJSON, err := json.Marshal(finalSimulation) + if err != nil { + log.Error(ctx, "Failed to marshal simulation result", err) + } + err = os.WriteFile(filepath.Join(conf.SimulationFileDir, fmt.Sprintf("%v-validators.json", params.TotalValidatorsCount)), simulationResAllJSON, 0o644) //nolint:gosec + if err != nil { + log.Error(ctx, "Failed to write file", err) + } + + highestRTT := Duration{0} + for _, sim := range simulationResAll { + if sim.Max.Duration > highestRTT.Duration { + highestRTT = sim.Max + } + } + if highestRTT.Duration > thresholdBeaconSimulationPoor { + testRes.Verdict = testVerdictPoor + } else if highestRTT.Duration > thresholdBeaconSimulationAvg { + testRes.Verdict = testVerdictAvg + } else { + testRes.Verdict = testVerdictGood + } + testRes.Measurement = highestRTT.String() + + log.Info(ctx, "Validators simulation finished", + z.Any("validators_count", params.TotalValidatorsCount), + z.Any("target", target), + ) + + return testRes +} + +// requests per 1 cluster + +func singleClusterSimulation(ctx context.Context, simulationDuration time.Duration, target string, resultCh chan SimulationCluster, wgDone func()) { + defer wgDone() + // per slot requests + attestationsForBlockCh := make(chan time.Duration) + attestationsForBlockAll := []time.Duration{} + proposalDutiesForEpochCh := make(chan time.Duration) + proposalDutiesForEpochAll := []time.Duration{} + // per 10 sec requests + syncingCh := make(chan time.Duration) + syncingAll := []time.Duration{} + // per minute requests + peerCountCh := make(chan time.Duration) + peerCountAll := []time.Duration{} + // per 12 slots requests + beaconCommitteeSubCh := make(chan time.Duration) + beaconCommitteeSubAll := []time.Duration{} + // 3 times per epoch - at first slot of the epoch, at the last but one and the last + dutiesAttesterCh := make(chan time.Duration) + dutiesAttesterAll := []time.Duration{} + // 3 times per epoch - 10 seconds before the epoch - call for the epoch, at the time of epoch - call for the epoch and call for the epoch+256 + dutiesSyncCommitteeCh := make(chan time.Duration) + dutiesSyncCommitteeAll := []time.Duration{} + // once per epoch, at the beginning of the epoch + beaconHeadValidatorsCh := make(chan time.Duration) + beaconHeadValidatorsAll := []time.Duration{} + beaconGenesisCh := make(chan time.Duration) + beaconGenesisAll := []time.Duration{} + prepBeaconProposerCh := make(chan time.Duration) + prepBeaconProposerAll := []time.Duration{} + configSpecCh := make(chan time.Duration) + configSpecAll := []time.Duration{} + nodeVersionCh := make(chan time.Duration) // 7 seconds after start of epoch + nodeVersionAll := []time.Duration{} + // two endpoints called are not included: + // 1. /eth/v1/config/fork_schedule - it seemed at random every 240-600 epochs, didn't seem worth to do it + // 2. /eth/v1/events?topics=head - it happened only once for 26 hours, it didn't seem related to anything + + go clusterGeneralRequests(ctx, target, slotTime, simulationDuration, + attestationsForBlockCh, proposalDutiesForEpochCh, syncingCh, + peerCountCh, beaconCommitteeSubCh, dutiesAttesterCh, + dutiesSyncCommitteeCh, beaconHeadValidatorsCh, beaconGenesisCh, + prepBeaconProposerCh, configSpecCh, nodeVersionCh) + + finished := false + for !finished { + select { + case <-ctx.Done(): + finished = true + case result, ok := <-attestationsForBlockCh: + if !ok { + finished = true + continue + } + attestationsForBlockAll = append(attestationsForBlockAll, result) + case result, ok := <-proposalDutiesForEpochCh: + if !ok { + finished = true + continue + } + proposalDutiesForEpochAll = append(proposalDutiesForEpochAll, result) + case result, ok := <-syncingCh: + if !ok { + finished = true + continue + } + syncingAll = append(syncingAll, result) + case result, ok := <-peerCountCh: + if !ok { + finished = true + continue + } + peerCountAll = append(peerCountAll, result) + case result, ok := <-beaconCommitteeSubCh: + if !ok { + finished = true + continue + } + beaconCommitteeSubAll = append(beaconCommitteeSubAll, result) + case result, ok := <-dutiesAttesterCh: + if !ok { + finished = true + continue + } + dutiesAttesterAll = append(dutiesAttesterAll, result) + case result, ok := <-dutiesSyncCommitteeCh: + if !ok { + finished = true + continue + } + dutiesSyncCommitteeAll = append(dutiesSyncCommitteeAll, result) + case result, ok := <-beaconHeadValidatorsCh: + if !ok { + finished = true + continue + } + beaconHeadValidatorsAll = append(beaconHeadValidatorsAll, result) + case result, ok := <-beaconGenesisCh: + if !ok { + finished = true + continue + } + beaconGenesisAll = append(beaconGenesisAll, result) + case result, ok := <-prepBeaconProposerCh: + if !ok { + finished = true + continue + } + prepBeaconProposerAll = append(prepBeaconProposerAll, result) + case result, ok := <-configSpecCh: + if !ok { + finished = true + continue + } + configSpecAll = append(configSpecAll, result) + case result, ok := <-nodeVersionCh: + if !ok { + finished = true + continue + } + nodeVersionAll = append(nodeVersionAll, result) + } + } + + attestationsForBlockValues := generateSimulationValues(attestationsForBlockAll, "GET /eth/v1/beacon/blocks/{BLOCK}/attestations") + proposalDutiesForEpochValues := generateSimulationValues(proposalDutiesForEpochAll, "GET /eth/v1/validator/duties/proposer/{EPOCH}") + syncingValues := generateSimulationValues(syncingAll, "GET /eth/v1/node/syncing") + peerCountValues := generateSimulationValues(peerCountAll, "GET /eth/v1/node/peer_count") + beaconCommitteeSubValues := generateSimulationValues(beaconCommitteeSubAll, "POST /eth/v1/validator/beacon_committee_subscriptions") + dutiesAttesterValues := generateSimulationValues(dutiesAttesterAll, "POST /eth/v1/validator/duties/attester/{EPOCH}") + dutiesSyncCommitteeValues := generateSimulationValues(dutiesSyncCommitteeAll, "POST /eth/v1/validator/duties/sync/{EPOCH}") + beaconHeadValidatorsValues := generateSimulationValues(beaconHeadValidatorsAll, "POST /eth/v1/beacon/states/head/validators") + beaconGenesisValues := generateSimulationValues(beaconGenesisAll, "GET /eth/v1/beacon/genesis") + prepBeaconProposerValues := generateSimulationValues(prepBeaconProposerAll, "POST /eth/v1/validator/prepare_beacon_proposer") + configSpecValues := generateSimulationValues(configSpecAll, "GET /eth/v1/config/spec") + nodeVersionValues := generateSimulationValues(nodeVersionAll, "GET /eth/v1/node/version") + + generalResults := SimulationCluster{ + AttestationsForBlockRequest: attestationsForBlockValues, + ProposalDutiesForEpochRequest: proposalDutiesForEpochValues, + SyncingRequest: syncingValues, + PeerCountRequest: peerCountValues, + BeaconCommitteeSubscriptionRequest: beaconCommitteeSubValues, + DutiesAttesterForEpochRequest: dutiesAttesterValues, + DutiesSyncCommitteeForEpochRequest: dutiesSyncCommitteeValues, + BeaconHeadValidatorsRequest: beaconHeadValidatorsValues, + BeaconGenesisRequest: beaconGenesisValues, + PrepBeaconProposerRequest: prepBeaconProposerValues, + ConfigSpecRequest: configSpecValues, + NodeVersionRequest: nodeVersionValues, + } + + resultCh <- generalResults +} + +func clusterGeneralRequests( + ctx context.Context, target string, slotTime time.Duration, simulationDuration time.Duration, + attestationsForBlockCh chan time.Duration, proposalDutiesForEpochCh chan time.Duration, syncingCh chan time.Duration, + peerCountCh chan time.Duration, beaconCommitteeSubCh chan time.Duration, dutiesAttesterCh chan time.Duration, + dutiesSyncCommitteeCh chan time.Duration, beaconHeadValidatorsCh chan time.Duration, beaconGenesisCh chan time.Duration, + prepBeaconProposerCh chan time.Duration, configSpecCh chan time.Duration, nodeVersionCh chan time.Duration, +) { + defer func() { + close(proposalDutiesForEpochCh) + close(attestationsForBlockCh) + close(syncingCh) + close(peerCountCh) + close(beaconCommitteeSubCh) + close(dutiesAttesterCh) + close(dutiesSyncCommitteeCh) + close(beaconHeadValidatorsCh) + close(beaconGenesisCh) + close(prepBeaconProposerCh) + close(configSpecCh) + close(nodeVersionCh) + }() + // slot ticker + tickerSlot := time.NewTicker(slotTime) + defer tickerSlot.Stop() + // 12 slots ticker + ticker12Slots := time.NewTicker(12 * slotTime) + defer ticker12Slots.Stop() + // 10 sec ticker + ticker10Sec := time.NewTicker(10 * time.Second) + defer ticker10Sec.Stop() + // minute ticker + tickerMinute := time.NewTicker(time.Minute) + defer tickerMinute.Stop() + + slot, err := getCurrentSlot(ctx, target) + if err != nil { + log.Error(ctx, "Failed to get current slot", err) + slot = 1 + } + + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + for pingCtx.Err() == nil { + select { + case <-tickerSlot.C: + slot++ + epoch := slot / slotsInEpoch + + // requests executed at every slot + attestationsResult, err := getAttestationsForBlock(ctx, target, slot-6) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAttestationsForBlock failure", err) + } + attestationsForBlockCh <- attestationsResult + submitResult, err := getProposalDutiesForEpoch(ctx, target, epoch) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getProposalDutiesForEpoch failure", err) + } + proposalDutiesForEpochCh <- submitResult + + // requests executed at the first slot of the epoch + if slot%slotsInEpoch == 0 { + dutiesAttesterResult, err := getAttesterDutiesForEpoch(ctx, target, epoch) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAttesterDutiesForEpoch failure", err) + } + dutiesAttesterCh <- dutiesAttesterResult + + dutiesSyncCommitteeResult, err := getSyncCommitteeDutiesForEpoch(ctx, target, epoch) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getSyncCommitteeDutiesForEpoch failure", err) + } + dutiesSyncCommitteeCh <- dutiesSyncCommitteeResult + + beaconHeadValidatorsResult, err := beaconHeadValidators(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected beaconHeadValidators failure", err) + } + beaconHeadValidatorsCh <- beaconHeadValidatorsResult + + beaconGenesisResult, err := beaconGenesis(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected beaconGenesis failure", err) + } + beaconGenesisCh <- beaconGenesisResult + + prepBeaconProposerResult, err := prepBeaconProposer(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected prepBeaconProposer failure", err) + } + prepBeaconProposerCh <- prepBeaconProposerResult + + configSpecResult, err := configSpec(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected configSpec failure", err) + } + configSpecCh <- configSpecResult + + nodeVersionResult, err := nodeVersion(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected nodeVersion failure", err) + } + nodeVersionCh <- nodeVersionResult + } + + // requests executed at the last but one slot of the epoch + if slot%slotsInEpoch == slotsInEpoch-2 { + dutiesAttesterResult, err := getAttesterDutiesForEpoch(ctx, target, epoch) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAttesterDutiesForEpoch failure", err) + } + dutiesAttesterCh <- dutiesAttesterResult + } + + // requests executed at the last slot of the epoch + if slot%slotsInEpoch == slotsInEpoch-1 { + dutiesAttesterResult, err := getAttesterDutiesForEpoch(ctx, target, epoch) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAttesterDutiesForEpoch failure", err) + } + dutiesAttesterCh <- dutiesAttesterResult + + dutiesSyncCommitteeResult, err := getSyncCommitteeDutiesForEpoch(ctx, target, epoch) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getSyncCommitteeDutiesForEpoch failure", err) + } + dutiesSyncCommitteeCh <- dutiesSyncCommitteeResult + + dutiesSyncCommitteeResultFuture, err := getSyncCommitteeDutiesForEpoch(ctx, target, epoch+256) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getSyncCommitteeDutiesForEpoch for the future epoch failure", err) + } + dutiesSyncCommitteeCh <- dutiesSyncCommitteeResultFuture + } + case <-ticker12Slots.C: + beaconCommitteeSubResult, err := beaconCommitteeSub(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected beaconCommitteeSub failure", err) + } + beaconCommitteeSubCh <- beaconCommitteeSubResult + case <-ticker10Sec.C: + getSyncingResult, err := getSyncing(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getSyncing failure", err) + } + syncingCh <- getSyncingResult + case <-tickerMinute.C: + peerCountResult, err := getPeerCount(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getPeerCount failure", err) + } + peerCountCh <- peerCountResult + case <-pingCtx.Done(): + } + } +} + +// requests per 1 validator + +func singleValidatorSimulation(ctx context.Context, simulationDuration time.Duration, target string, resultCh chan SimulationSingleValidator, intensity RequestsIntensity, dutiesPerformed DutiesPerformed, wg *sync.WaitGroup) { + defer wg.Done() + // attestations + getAttestationDataCh := make(chan time.Duration) + getAttestationDataAll := []time.Duration{} + submitAttestationObjectCh := make(chan time.Duration) + submitAttestationObjectAll := []time.Duration{} + if dutiesPerformed.Attestation { + go attestationDuty(ctx, target, simulationDuration, intensity.AttestationDuty, getAttestationDataCh, submitAttestationObjectCh) + } + + // aggregations + getAggregateAttestationsCh := make(chan time.Duration) + getAggregateAttestationsAll := []time.Duration{} + submitAggregateAndProofsCh := make(chan time.Duration) + submitAggregateAndProofsAll := []time.Duration{} + if dutiesPerformed.Aggregation { + go aggregationDuty(ctx, target, simulationDuration, intensity.AggregatorDuty, getAggregateAttestationsCh, submitAggregateAndProofsCh) + } + + // proposals + produceBlockCh := make(chan time.Duration) + produceBlockAll := []time.Duration{} + publishBlindedBlockCh := make(chan time.Duration) + publishBlindedBlockAll := []time.Duration{} + if dutiesPerformed.Proposal { + go proposalDuty(ctx, target, simulationDuration, intensity.ProposalDuty, produceBlockCh, publishBlindedBlockCh) + } + + // sync_committee + syncCommitteeSubscriptionCh := make(chan time.Duration) + syncCommitteeSubscriptionAll := []time.Duration{} + submitSyncCommitteeMessageCh := make(chan time.Duration) + submitSyncCommitteeMessageAll := []time.Duration{} + produceSyncCommitteeContributionCh := make(chan time.Duration) + produceSyncCommitteeContributionAll := []time.Duration{} + submitSyncCommitteeContributionCh := make(chan time.Duration) + submitSyncCommitteeContributionAll := []time.Duration{} + if dutiesPerformed.SyncCommittee { + go syncCommitteeDuties(ctx, target, + simulationDuration, intensity.SyncCommitteeSubmit, intensity.SyncCommitteeSubscribe, intensity.SyncCommitteeContribution, + submitSyncCommitteeMessageCh, produceSyncCommitteeContributionCh, syncCommitteeSubscriptionCh, submitSyncCommitteeContributionCh) + } + + // capture results + finished := false + for !finished { + select { + case <-ctx.Done(): + finished = true + // attestations + case result, ok := <-getAttestationDataCh: + if !ok { + finished = true + continue + } + getAttestationDataAll = append(getAttestationDataAll, result) + case result, ok := <-submitAttestationObjectCh: + if !ok { + finished = true + continue + } + submitAttestationObjectAll = append(submitAttestationObjectAll, result) + // aggregations + case result, ok := <-getAggregateAttestationsCh: + if !ok { + finished = true + continue + } + getAggregateAttestationsAll = append(getAggregateAttestationsAll, result) + case result, ok := <-submitAggregateAndProofsCh: + if !ok { + finished = true + continue + } + submitAggregateAndProofsAll = append(submitAggregateAndProofsAll, result) + // proposals + case result, ok := <-produceBlockCh: + if !ok { + finished = true + continue + } + produceBlockAll = append(produceBlockAll, result) + case result, ok := <-publishBlindedBlockCh: + if !ok { + finished = true + continue + } + publishBlindedBlockAll = append(publishBlindedBlockAll, result) + // sync_committee + case result, ok := <-syncCommitteeSubscriptionCh: + if !ok { + finished = true + continue + } + syncCommitteeSubscriptionAll = append(syncCommitteeSubscriptionAll, result) + case result, ok := <-submitSyncCommitteeMessageCh: + if !ok { + finished = true + continue + } + submitSyncCommitteeMessageAll = append(submitSyncCommitteeMessageAll, result) + case result, ok := <-produceSyncCommitteeContributionCh: + if !ok { + finished = true + continue + } + produceSyncCommitteeContributionAll = append(produceSyncCommitteeContributionAll, result) + case result, ok := <-submitSyncCommitteeContributionCh: + + if !ok { + finished = true + continue + } + submitSyncCommitteeContributionAll = append(submitSyncCommitteeContributionAll, result) + } + } + + var allRequests []time.Duration + + // attestation results grouping + var attestationResult SimulationAttestation + if dutiesPerformed.Attestation { + getSimulationValues := generateSimulationValues(getAttestationDataAll, "GET /eth/v1/validator/attestation_data") + submitSimulationValues := generateSimulationValues(submitAttestationObjectAll, "POST /eth/v1/beacon/pool/attestations") + + cumulativeAttestation := []time.Duration{} + for i := range min(len(getAttestationDataAll), len(submitAttestationObjectAll)) { + cumulativeAttestation = append(cumulativeAttestation, getAttestationDataAll[i]+submitAttestationObjectAll[i]) + } + cumulativeSimulationValues := generateSimulationValues(cumulativeAttestation, "") + allRequests = append(allRequests, cumulativeAttestation...) + + attestationResult = SimulationAttestation{ + GetAttestationDataRequest: getSimulationValues, + PostAttestationsRequest: submitSimulationValues, + SimulationValues: cumulativeSimulationValues, + } + } + + // aggregation results grouping + var aggregationResults SimulationAggregation + if dutiesPerformed.Aggregation { + getAggregateSimulationValues := generateSimulationValues(getAggregateAttestationsAll, "GET /eth/v1/validator/aggregate_attestation") + submitAggregateSimulationValues := generateSimulationValues(submitAggregateAndProofsAll, "POST /eth/v1/validator/aggregate_and_proofs") + + cumulativeAggregations := []time.Duration{} + for i := range min(len(getAggregateAttestationsAll), len(submitAggregateAndProofsAll)) { + cumulativeAggregations = append(cumulativeAggregations, getAggregateAttestationsAll[i]+submitAggregateAndProofsAll[i]) + } + cumulativeAggregationsSimulationValues := generateSimulationValues(cumulativeAggregations, "") + allRequests = append(allRequests, cumulativeAggregations...) + + aggregationResults = SimulationAggregation{ + GetAggregateAttestationRequest: getAggregateSimulationValues, + PostAggregateAndProofsRequest: submitAggregateSimulationValues, + SimulationValues: cumulativeAggregationsSimulationValues, + } + } + + // proposal results grouping + var proposalResults SimulationProposal + if dutiesPerformed.Proposal { + produceBlockValues := generateSimulationValues(produceBlockAll, "GET /eth/v3/validator/blocks/{SLOT}") + publishBlindedBlockValues := generateSimulationValues(publishBlindedBlockAll, "POST /eth/v2/beacon/blinded") + + cumulativeProposals := []time.Duration{} + for i := range min(len(produceBlockAll), len(publishBlindedBlockAll)) { + cumulativeProposals = append(cumulativeProposals, produceBlockAll[i]+publishBlindedBlockAll[i]) + } + cumulativeProposalsSimulationValues := generateSimulationValues(cumulativeProposals, "") + allRequests = append(allRequests, cumulativeProposals...) + + proposalResults = SimulationProposal{ + ProduceBlockRequest: produceBlockValues, + PublishBlindedBlockRequest: publishBlindedBlockValues, + SimulationValues: cumulativeProposalsSimulationValues, + } + } + + // sync committee results grouping + var syncCommitteeResults SimulationSyncCommittee + if dutiesPerformed.SyncCommittee { + syncCommitteeAll := []time.Duration{} + syncCommitteeSubscriptionValues := generateSimulationValues(syncCommitteeSubscriptionAll, "POST /eth/v1/validator/sync_committee_subscriptions") + syncCommitteeAll = append(syncCommitteeAll, syncCommitteeSubscriptionAll...) + allRequests = append(allRequests, syncCommitteeSubscriptionAll...) + + submitSyncCommitteeMessageValues := generateSimulationValues(submitSyncCommitteeMessageAll, "POST /eth/v1/beacon/pool/sync_committees") + syncCommitteeAll = append(syncCommitteeAll, submitSyncCommitteeMessageAll...) + allRequests = append(allRequests, submitSyncCommitteeMessageAll...) + + produceSyncCommitteeContributionValues := generateSimulationValues(produceSyncCommitteeContributionAll, "GET /eth/v1/validator/sync_committee_contribution") + submitSyncCommitteeContributionValues := generateSimulationValues(submitSyncCommitteeContributionAll, "POST /eth/v1/validator/contribution_and_proofs") + + syncCommitteeContributionAll := []time.Duration{} + for i := range min(len(produceSyncCommitteeContributionAll), len(submitSyncCommitteeContributionAll)) { + syncCommitteeContributionAll = append(syncCommitteeContributionAll, produceSyncCommitteeContributionAll[i]+submitSyncCommitteeContributionAll[i]) + } + syncCommitteeContributionValues := generateSimulationValues(syncCommitteeContributionAll, "") + syncCommitteeAll = append(syncCommitteeAll, syncCommitteeContributionAll...) + allRequests = append(allRequests, syncCommitteeContributionAll...) + + cumulativeSyncCommitteesSimulationValues := generateSimulationValues(syncCommitteeAll, "") + + syncCommitteeResults = SimulationSyncCommittee{ + MessageDuty: SyncCommitteeMessageDuty{ + SubmitSyncCommitteeMessageRequest: submitSyncCommitteeMessageValues, + }, + ContributionDuty: SyncCommitteeContributionDuty{ + ProduceSyncCommitteeContributionRequest: produceSyncCommitteeContributionValues, + SubmitSyncCommitteeContributionRequest: submitSyncCommitteeContributionValues, + SimulationValues: syncCommitteeContributionValues, + }, + SubscribeSyncCommitteeRequest: syncCommitteeSubscriptionValues, + SimulationValues: cumulativeSyncCommitteesSimulationValues, + } + } + + allResult := generateSimulationValues(allRequests, "") + + resultCh <- SimulationSingleValidator{ + AttestationDuty: attestationResult, + AggregationDuty: aggregationResults, + ProposalDuty: proposalResults, + SyncCommitteeDuties: syncCommitteeResults, + SimulationValues: allResult, + } +} + +func attestationDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, getAttestationDataCh chan time.Duration, submitAttestationObjectCh chan time.Duration) { + defer close(getAttestationDataCh) + defer close(submitAttestationObjectCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + time.Sleep(randomizeStart(tickTime)) + ticker := time.NewTicker(tickTime) + defer ticker.Stop() + slot, err := getCurrentSlot(ctx, target) + if err != nil { + log.Error(ctx, "Failed to get current slot", err) + slot = 1 + } + for pingCtx.Err() == nil { + getResult, err := getAttestationData(ctx, target, slot, rand.Intn(committeeSizePerSlot)) //nolint:gosec // weak generator is not an issue here + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAttestationData failure", err) + } + getAttestationDataCh <- getResult + + submitResult, err := submitAttestationObject(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected submitAttestationObject failure", err) + } + submitAttestationObjectCh <- submitResult + + select { + case <-pingCtx.Done(): + case <-ticker.C: + slot += int(tickTime.Seconds()) / int(slotTime.Seconds()) + } + } +} + +func aggregationDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, getAggregateAttestationsCh chan time.Duration, submitAggregateAndProofsCh chan time.Duration) { + defer close(getAggregateAttestationsCh) + defer close(submitAggregateAndProofsCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + slot, err := getCurrentSlot(ctx, target) + if err != nil { + log.Error(ctx, "Failed to get current slot", err) + slot = 1 + } + + time.Sleep(randomizeStart(tickTime)) + ticker := time.NewTicker(tickTime) + defer ticker.Stop() + + for pingCtx.Err() == nil { + getResult, err := getAggregateAttestations(ctx, target, slot, "0x87db5c50a4586fa37662cf332382d56a0eeea688a7d7311a42735683dfdcbfa4") + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected getAggregateAttestations failure", err) + } + submitResult, err := postAggregateAndProofs(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected aggregateAndProofs failure", err) + } + getAggregateAttestationsCh <- getResult + submitAggregateAndProofsCh <- submitResult + select { + case <-pingCtx.Done(): + case <-ticker.C: + slot += int(tickTime.Seconds()) / int(slotTime.Seconds()) + } + } +} + +func proposalDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, produceBlockCh chan time.Duration, publishBlindedBlockCh chan time.Duration) { + defer close(produceBlockCh) + defer close(publishBlindedBlockCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + time.Sleep(randomizeStart(tickTime)) + ticker := time.NewTicker(tickTime) + defer ticker.Stop() + slot, err := getCurrentSlot(ctx, target) + if err != nil { + log.Error(ctx, "Failed to get current slot", err) + slot = 1 + } + for pingCtx.Err() == nil { + produceResult, err := produceBlock(ctx, target, slot, "0x1fe79e4193450abda94aec753895cfb2aac2c2a930b6bab00fbb27ef6f4a69f4400ad67b5255b91837982b4c511ae1d94eae1cf169e20c11bd417c1fffdb1f99f4e13e2de68f3b5e73f1de677d73cd43e44bf9b133a79caf8e5fad06738e1b0c") + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected produceBlock failure", err) + } + publishResult, err := publishBlindedBlock(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected publishBlindedBlock failure", err) + } + produceBlockCh <- produceResult + publishBlindedBlockCh <- publishResult + select { + case <-pingCtx.Done(): + case <-ticker.C: + slot += int(tickTime.Seconds())/int(slotTime.Seconds()) + 1 // produce block for the next slot, as the current one might have already been proposed + } + } +} + +func syncCommitteeDuties( + ctx context.Context, target string, + simulationDuration time.Duration, tickTimeSubmit time.Duration, tickTimeSubscribe time.Duration, tickTimeContribution time.Duration, + submitSyncCommitteesCh chan time.Duration, produceSyncCommitteeContributionCh chan time.Duration, syncCommitteeSubscriptionCh chan time.Duration, syncCommitteeContributionCh chan time.Duration, +) { + go syncCommitteeContributionDuty(ctx, target, simulationDuration, tickTimeContribution, produceSyncCommitteeContributionCh, syncCommitteeContributionCh) + go syncCommitteeMessageDuty(ctx, target, simulationDuration, tickTimeSubmit, submitSyncCommitteesCh) + + defer close(syncCommitteeSubscriptionCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + time.Sleep(randomizeStart(tickTimeSubscribe)) + ticker := time.NewTicker(tickTimeSubscribe) + defer ticker.Stop() + + for pingCtx.Err() == nil { + subscribeResult, err := syncCommitteeSubscription(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected syncCommitteeSubscription failure", err) + } + syncCommitteeSubscriptionCh <- subscribeResult + + select { + case <-pingCtx.Done(): + case <-ticker.C: + } + } +} + +func syncCommitteeContributionDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, produceSyncCommitteeContributionCh chan time.Duration, syncCommitteeContributionCh chan time.Duration) { + defer close(produceSyncCommitteeContributionCh) + defer close(syncCommitteeContributionCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + time.Sleep(randomizeStart(tickTime)) + ticker := time.NewTicker(tickTime) + defer ticker.Stop() + + slot, err := getCurrentSlot(ctx, target) + if err != nil { + log.Error(ctx, "Failed to get current slot", err) + slot = 1 + } + for pingCtx.Err() == nil { + produceResult, err := produceSyncCommitteeContribution(ctx, target, slot, rand.Intn(subCommitteeSize), "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2") //nolint:gosec // weak generator is not an issue here + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected produceSyncCommitteeContribution failure", err) + } + produceSyncCommitteeContributionCh <- produceResult + contributeResult, err := submitSyncCommitteeContribution(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected submitSyncCommitteeContribution failure", err) + } + syncCommitteeContributionCh <- contributeResult + select { + case <-pingCtx.Done(): + case <-ticker.C: + slot += int(tickTime.Seconds()) / int(slotTime.Seconds()) + } + } +} + +func syncCommitteeMessageDuty(ctx context.Context, target string, simulationDuration time.Duration, tickTime time.Duration, submitSyncCommitteesCh chan time.Duration) { + defer close(submitSyncCommitteesCh) + pingCtx, cancel := context.WithTimeout(ctx, simulationDuration) + defer cancel() + + time.Sleep(randomizeStart(tickTime)) + ticker := time.NewTicker(tickTime) + defer ticker.Stop() + + for pingCtx.Err() == nil { + submitResult, err := submitSyncCommittee(ctx, target) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Unexpected submitSyncCommittee failure", err) + } + submitSyncCommitteesCh <- submitResult + select { + case <-pingCtx.Done(): + case <-ticker.C: + } + } +} + +// simulation helper functions + +func getCurrentSlot(ctx context.Context, target string) (int, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, target+"/eth/v1/node/syncing", nil) + if err != nil { + return 0, errors.Wrap(err, "create new http request") + } + resp, err := new(http.Client).Do(req) + if err != nil { + return 0, errors.Wrap(err, "call /eth/v1/node/syncing endpoint") + } + defer resp.Body.Close() + + if resp.StatusCode/100 != 2 { + return 0, errors.New("post failed", z.Int("status", resp.StatusCode)) + } + + type syncingResponseData struct { + HeadSlot string `json:"head_slot"` + } + type syncingResponse struct { + Data syncingResponseData `json:"data"` + } + var sr syncingResponse + if err := json.NewDecoder(resp.Body).Decode(&sr); err != nil { + return 0, errors.Wrap(err, "json unmarshal error") + } + + head, err := strconv.Atoi(sr.Data.HeadSlot) + if err != nil { + return 0, errors.Wrap(err, "head slot string to int") + } + + return head, nil +} + +// if verbose flag is not passed, don't output `All` field and results per single validator +func nonVerboseFinalSimulation(s Simulation) Simulation { + s.ValidatorsRequests.AllValidators = []SimulationSingleValidator{} + + s.ValidatorsRequests.Averaged.All = []Duration{} + s.ValidatorsRequests.Averaged.AggregationDuty.All = []Duration{} + s.ValidatorsRequests.Averaged.AggregationDuty.GetAggregateAttestationRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.AggregationDuty.PostAggregateAndProofsRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.AttestationDuty.All = []Duration{} + s.ValidatorsRequests.Averaged.AttestationDuty.GetAttestationDataRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.AttestationDuty.PostAttestationsRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.ProposalDuty.All = []Duration{} + s.ValidatorsRequests.Averaged.ProposalDuty.ProduceBlockRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.ProposalDuty.PublishBlindedBlockRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.SyncCommitteeDuties.All = []Duration{} + s.ValidatorsRequests.Averaged.SyncCommitteeDuties.ContributionDuty.All = []Duration{} + s.ValidatorsRequests.Averaged.SyncCommitteeDuties.ContributionDuty.ProduceSyncCommitteeContributionRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.SyncCommitteeDuties.ContributionDuty.SubmitSyncCommitteeContributionRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.SyncCommitteeDuties.MessageDuty.SubmitSyncCommitteeMessageRequest.All = []Duration{} + s.ValidatorsRequests.Averaged.SyncCommitteeDuties.SubscribeSyncCommitteeRequest.All = []Duration{} + + s.GeneralClusterRequests.AttestationsForBlockRequest.All = []Duration{} + s.GeneralClusterRequests.ProposalDutiesForEpochRequest.All = []Duration{} + s.GeneralClusterRequests.SyncingRequest.All = []Duration{} + s.GeneralClusterRequests.PeerCountRequest.All = []Duration{} + s.GeneralClusterRequests.BeaconCommitteeSubscriptionRequest.All = []Duration{} + s.GeneralClusterRequests.DutiesAttesterForEpochRequest.All = []Duration{} + s.GeneralClusterRequests.DutiesSyncCommitteeForEpochRequest.All = []Duration{} + s.GeneralClusterRequests.BeaconHeadValidatorsRequest.All = []Duration{} + s.GeneralClusterRequests.BeaconGenesisRequest.All = []Duration{} + s.GeneralClusterRequests.PrepBeaconProposerRequest.All = []Duration{} + s.GeneralClusterRequests.ConfigSpecRequest.All = []Duration{} + s.GeneralClusterRequests.NodeVersionRequest.All = []Duration{} + + return s +} + +func mapDurationToTime(dur []Duration) []time.Duration { + result := make([]time.Duration, len(dur)) + for i, e := range dur { + result[i] = e.Duration + } + + return result +} + +func generateSimulationValues(s []time.Duration, endpoint string) SimulationValues { + if len(s) == 0 { + return SimulationValues{ + Endpoint: endpoint, + All: []Duration{}, + Min: Duration{0}, + Max: Duration{0}, + Median: Duration{0}, + Avg: Duration{0}, + } + } + + sorted := make([]time.Duration, len(s)) + copy(sorted, s) + sort.Slice(sorted, func(i, j int) bool { + return sorted[i] < sorted[j] + }) + minVal := sorted[0] + maxVal := sorted[len(s)-1] + medianVal := sorted[len(s)/2] + var sum time.Duration + all := []Duration{} + for _, t := range s { + sum += t + all = append(all, Duration{t}) + } + avgVal := time.Duration(int(sum.Nanoseconds()) / len(s)) + + return SimulationValues{ + Endpoint: endpoint, + All: all, + Min: Duration{minVal}, + Max: Duration{maxVal}, + Median: Duration{medianVal}, + Avg: Duration{avgVal}, + } +} + +func averageValidatorsResult(s []SimulationSingleValidator) SimulationSingleValidator { + if len(s) == 0 { + return SimulationSingleValidator{} + } + + var attestation, attestationGetDuties, attestationPostData, + aggregation, aggregationGetAggregationAttestations, aggregationSubmitAggregateAndProofs, + proposal, proposalProduceBlock, proposalPublishBlindedBlock, + syncCommittee, syncCommitteeSubmitMessage, syncCommitteeProduceContribution, syncCommitteeSubmitContribution, syncCommitteeContribution, syncCommitteeSusbscription, + all []time.Duration + + for _, sim := range s { + attestationGetDuties = append(attestationGetDuties, mapDurationToTime(sim.AttestationDuty.GetAttestationDataRequest.All)...) + attestationPostData = append(attestationPostData, mapDurationToTime(sim.AttestationDuty.PostAttestationsRequest.All)...) + attestation = append(attestation, mapDurationToTime(sim.AttestationDuty.All)...) + aggregationGetAggregationAttestations = append(aggregationGetAggregationAttestations, mapDurationToTime(sim.AggregationDuty.GetAggregateAttestationRequest.All)...) + aggregationSubmitAggregateAndProofs = append(aggregationSubmitAggregateAndProofs, mapDurationToTime(sim.AggregationDuty.PostAggregateAndProofsRequest.All)...) + aggregation = append(aggregation, mapDurationToTime(sim.AggregationDuty.All)...) + proposalProduceBlock = append(proposalProduceBlock, mapDurationToTime(sim.ProposalDuty.ProduceBlockRequest.All)...) + proposalPublishBlindedBlock = append(proposalPublishBlindedBlock, mapDurationToTime(sim.ProposalDuty.PublishBlindedBlockRequest.All)...) + proposal = append(proposal, mapDurationToTime(sim.ProposalDuty.All)...) + syncCommitteeSubmitMessage = append(syncCommitteeSubmitMessage, mapDurationToTime(sim.SyncCommitteeDuties.MessageDuty.SubmitSyncCommitteeMessageRequest.All)...) + syncCommitteeProduceContribution = append(syncCommitteeProduceContribution, mapDurationToTime(sim.SyncCommitteeDuties.ContributionDuty.ProduceSyncCommitteeContributionRequest.All)...) + syncCommitteeSubmitContribution = append(syncCommitteeSubmitContribution, mapDurationToTime(sim.SyncCommitteeDuties.ContributionDuty.SubmitSyncCommitteeContributionRequest.All)...) + syncCommitteeContribution = append(syncCommitteeContribution, mapDurationToTime(sim.SyncCommitteeDuties.ContributionDuty.All)...) + syncCommitteeSusbscription = append(syncCommitteeSusbscription, mapDurationToTime(sim.SyncCommitteeDuties.SubscribeSyncCommitteeRequest.All)...) + syncCommittee = append(syncCommittee, mapDurationToTime(sim.SyncCommitteeDuties.All)...) + all = append(all, mapDurationToTime(sim.All)...) + } + + return SimulationSingleValidator{ + AttestationDuty: SimulationAttestation{ + GetAttestationDataRequest: generateSimulationValues(attestationGetDuties, "GET /eth/v1/validator/attestation_data"), + PostAttestationsRequest: generateSimulationValues(attestationPostData, "POST /eth/v1/beacon/pool/attestations"), + SimulationValues: generateSimulationValues(attestation, ""), + }, + AggregationDuty: SimulationAggregation{ + GetAggregateAttestationRequest: generateSimulationValues(aggregationGetAggregationAttestations, "GET /eth/v1/validator/aggregate_attestation"), + PostAggregateAndProofsRequest: generateSimulationValues(aggregationSubmitAggregateAndProofs, "POST /eth/v1/validator/aggregate_and_proofs"), + SimulationValues: generateSimulationValues(aggregation, ""), + }, + ProposalDuty: SimulationProposal{ + ProduceBlockRequest: generateSimulationValues(proposalProduceBlock, "GET /eth/v3/validator/blocks/{SLOT}"), + PublishBlindedBlockRequest: generateSimulationValues(proposalPublishBlindedBlock, "POST /eth/v2/beacon/blinded"), + SimulationValues: generateSimulationValues(proposal, ""), + }, + SyncCommitteeDuties: SimulationSyncCommittee{ + MessageDuty: SyncCommitteeMessageDuty{ + SubmitSyncCommitteeMessageRequest: generateSimulationValues(syncCommitteeSubmitMessage, "POST /eth/v1/beacon/pool/sync_committees"), + }, + ContributionDuty: SyncCommitteeContributionDuty{ + ProduceSyncCommitteeContributionRequest: generateSimulationValues(syncCommitteeProduceContribution, "GET /eth/v1/validator/sync_committee_contribution"), + SubmitSyncCommitteeContributionRequest: generateSimulationValues(syncCommitteeSubmitContribution, "POST /eth/v1/validator/contribution_and_proofs"), + SimulationValues: generateSimulationValues(syncCommitteeContribution, ""), + }, + SubscribeSyncCommitteeRequest: generateSimulationValues(syncCommitteeSusbscription, "POST /eth/v1/validator/sync_committee_subscriptions"), + SimulationValues: generateSimulationValues(syncCommittee, ""), + }, + SimulationValues: generateSimulationValues(all, ""), + } +} + +// randomize duty execution start to be in [0, n*slot), where n is the frequency of the request per slot +func randomizeStart(tickTime time.Duration) time.Duration { + return slotTime * time.Duration(rand.Intn(int((tickTime / slotTime)))) //nolint:gosec // weak generator is not an issue here +} + +// simulation http requests - cluster + +func getAttestationsForBlock(ctx context.Context, target string, block int) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/blocks/%v/attestations", target, block), http.MethodGet, nil, 200) +} + +func getProposalDutiesForEpoch(ctx context.Context, target string, epoch int) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/duties/proposer/%v", target, epoch), http.MethodGet, nil, 200) +} + +func getSyncing(ctx context.Context, target string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/node/syncing", target), http.MethodGet, nil, 200) +} + +func getPeerCount(ctx context.Context, target string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/node/peer_count", target), http.MethodGet, nil, 200) +} + +func beaconCommitteeSub(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`[{"validator_index":"1","committee_index":"1","committees_at_slot":"1","slot":"1","is_aggregator":true}]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/beacon_committee_subscriptions", target), http.MethodPost, body, 200) +} + +func getAttesterDutiesForEpoch(ctx context.Context, target string, epoch int) (time.Duration, error) { + body := strings.NewReader(`["1"]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/duties/attester/%v", target, epoch), http.MethodPost, body, 200) +} + +func getSyncCommitteeDutiesForEpoch(ctx context.Context, target string, epoch int) (time.Duration, error) { + body := strings.NewReader(`["1"]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/duties/sync/%v", target, epoch), http.MethodPost, body, 200) +} + +func beaconHeadValidators(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`{"ids":["0xb6066945aa87a1e0e4b55e347d3a8a0ef7f0d9f7ef2c46abebadb25d7de176b83c88547e5f8644b659598063c845719a"]}`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/states/head/validators", target), http.MethodPost, body, 200) +} + +func beaconGenesis(ctx context.Context, target string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/genesis", target), http.MethodGet, nil, 200) +} + +func prepBeaconProposer(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`[{"validator_index":"1725802","fee_recipient":"0x74b1C2f5788510c9ecA5f56D367B0a3D8a15a430"}]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/prepare_beacon_proposer", target), http.MethodPost, body, 200) +} + +func configSpec(ctx context.Context, target string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/config/spec", target), http.MethodGet, nil, 200) +} + +func nodeVersion(ctx context.Context, target string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/node/version", target), http.MethodGet, nil, 200) +} + +// simulation http requests - attestation duty + +func getAttestationData(ctx context.Context, target string, slot int, committeeIndex int) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/attestation_data?slot=%v&committee_index=%v", target, slot, committeeIndex), http.MethodGet, nil, 200) +} + +func submitAttestationObject(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`{{"aggregation_bits":"0x01","signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505","data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}}`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/pool/attestations", target), http.MethodPost, body, 400) +} + +// simulation http requests - aggregation duty + +func getAggregateAttestations(ctx context.Context, target string, slot int, attestationDataRoot string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/aggregate_attestation?slot=%v&attestation_data_root=%v", target, slot, attestationDataRoot), http.MethodGet, nil, 404) +} + +func postAggregateAndProofs(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`[{"message":{"aggregator_index":"1","aggregate":{"aggregation_bits":"0x01","signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505","data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}},"selection_proof":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/aggregate_and_proofs", target), http.MethodPost, body, 400) +} + +// simulation http requests - proposal duty + +func produceBlock(ctx context.Context, target string, slot int, randaoReveal string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v3/validator/blocks/%v?randao_reveal=%v", target, slot, randaoReveal), http.MethodGet, nil, 200) +} + +func publishBlindedBlock(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`{"message":{"slot":"2872079","proposer_index":"1725813","parent_root":"0x05bea9b8e9cc28c4efa5586b4efac20b7a42c3112dbe144fb552b37ded249abd","state_root":"0x0138e6e8e956218aa534597a450a93c2c98f07da207077b4be05742279688da2","body":{"randao_reveal":"0x9880dad5a0e900906a1355da0697821af687b4c2cd861cd219f2d779c50a47d3c0335c08d840c86c167986ae0aaf50070b708fe93a83f66c99a4f931f9a520aebb0f5b11ca202c3d76343e30e49f43c0479e850af0e410333f7c59c4d37fa95a","eth1_data":{"deposit_root":"0x7dbea1a0af14d774da92d94a88d3bb1ae7abad16374da4db2c71dd086c84029e","deposit_count":"452100","block_hash":"0xc4bf450c9e362dcb2b50e76b45938c78d455acd1e1aec4e1ce4338ec023cd32a"},"graffiti":"0x636861726f6e2f76312e312e302d613139336638340000000000000000000000","proposer_slashings":[],"attester_slashings":[],"attestations":[{"aggregation_bits":"0xdbedbfa74eccaf3d7ef570bfdbbf84b4dffc5beede1c1f8b59feb8b3f2fbabdbdef3ceeb7b3dfdeeef8efcbdcd7bebbeff7adfff5ae3bf66bc5613feffef3deb987f7e7fff87ed6f8bbd1fffa57f1677efff646f0d3bd79fffdc5dfd78df6cf79fb7febff5dfdefb8e03","data":{"slot":"2872060","index":"12","beacon_block_root":"0x310506169f7f92dcd2bf00e8b4c2daac999566929395120fbbf4edd222e003eb","source":{"epoch":"89750","root":"0xcdb449d69e3e2d22378bfc2299ee1e9aeb1b2d15066022e854759dda73d1e219"},"target":{"epoch":"89751","root":"0x4ad0882f7adbb735c56b0b3f09d8e45dbd79db9528110f7117ec067f3a19eb0e"}},"signature":"0xa9d91d6cbc669ffcc8ba2435c633e0ec0eebecaa3acdcaa1454282ece1f816e8b853f00ba67ec1244703221efae4c834012819ca7b199354669f24ba8ab1c769f072c9f46b803082eac32e3611cd323eeb5b17fcd6201b41f3063834ff26ef53"}],"deposits":[],"voluntary_exits":[],"sync_aggregate":{"sync_committee_bits":"0xf9ff3ff7ffffb7dbfefddff5fffffefdbffffffffffedfefffffff7fbe9fdffffdb5feffffffbfdbefff3ffdf7f3fc6ff7fffbffff9df6fbbaf3beffefffffff","sync_committee_signature":"0xa9cf7d9f23a62e84f11851e2e4b3b929b1d03719a780b59ecba5daf57e21a0ceccaf13db4e1392a42e3603abeb839a2d16373dcdd5e696f11c5a809972c1e368d794f1c61d4d10b220df52616032f09b33912febf8c7a64f3ce067ab771c7ddf"},"execution_payload_header":{"parent_hash":"0x71c564f4a0c1dea921e8063fc620ccfa39c1b073e4ac0845ce7e9e6f909752de","fee_recipient":"0x148914866080716b10D686F5570631Fbb2207002","state_root":"0x89e74be562cd4a10eb20cdf674f65b1b0e53b33a7c3f2df848eb4f7e226742e0","receipts_root":"0x55b494ee1bb919e7abffaab1d5be05a109612c59a77406d929d77c0ce714f21d","logs_bloom":"0x20500886140245d001002010680c10411a2540420182810440a108800fc008440801180020011008004045005a2007826802e102000005c0c04030590004044810d0d20745c0904a4d583008a01758018001082024e40046000410020042400100012260220299a8084415e20002891224c132220010003a00006010020ed0c108920a13c0e200a1a00251100888c01408008132414068c88b028920440248209a280581a0e10800c14ea63082c1781308208b130508d4000400802d1224521094260912473404012810001503417b4050141100c1103004000c8900644560080472688450710084088800c4c80000c02008931188204c008009011784488060","prev_randao":"0xf4e9a4a7b88a3d349d779e13118b6d099f7773ec5323921343ac212df19c620f","block_number":"2643688","gas_limit":"30000000","gas_used":"24445884","timestamp":"1730367348","extra_data":"0x546974616e2028746974616e6275696c6465722e78797a29","base_fee_per_gas":"122747440","block_hash":"0x7524d779d328159e4d9ee8a4b04c4b251261da9a6da1d1461243125faa447227","transactions_root":"0x7e8a3391a77eaea563bf4e0ca4cf3190425b591ed8572818924c38f7e423c257","withdrawals_root":"0x61a5653b614ec3db0745ae5568e6de683520d84bc3db2dedf6a5158049cee807","blob_gas_used":"0","excess_blob_gas":"0"},"bls_to_execution_changes":[],"blob_kzg_commitments":[]}},"signature":"0x94320e6aecd65da3ef3e55e45208978844b262fe21cacbb0a8448b2caf21e8619b205c830116d8aad0a2c55d879fb571123a3fcf31b515f9508eb346ecd3de2db07cea6700379c00831cfb439f4aeb3bfa164395367c8d8befb92aa6682eae51"}`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v2/beacon/blinded", target), http.MethodPost, body, 404) +} + +// simulation http requests - sync committee duty + +func submitSyncCommittee(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`{{"aggregation_bits":"0x01","signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505","data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}}`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/beacon/pool/sync_committees", target), http.MethodPost, body, 400) +} + +func produceSyncCommitteeContribution(ctx context.Context, target string, slot int, subCommitteeIndex int, beaconBlockRoot string) (time.Duration, error) { + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/sync_committee_contribution?slot=%v&subcommittee_index=%v&beacon_block_root=%v", target, slot, subCommitteeIndex, beaconBlockRoot), http.MethodGet, nil, 404) +} + +func syncCommitteeSubscription(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`[{"message":{"aggregator_index":"1","aggregate":{"aggregation_bits":"0x01","signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505","data":{"slot":"1","index":"1","beacon_block_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","source":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"target":{"epoch":"1","root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}}},"selection_proof":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/sync_committee_subscriptions", target), http.MethodPost, body, 400) +} + +func submitSyncCommitteeContribution(ctx context.Context, target string) (time.Duration, error) { + body := strings.NewReader(`[{"message":{"aggregator_index":"1","contribution":{"slot":"1","beacon_block_root":"0xace2cad95a1b113457ccc680372880694a3ef820584d04a165aa2bda0f261950","subcommittee_index":"3","aggregation_bits":"0xfffffbfff7ddffffbef3bfffebffff7f","signature":"0xaa4cf0db0677555025fe12223572e67b509b0b24a2b07dc162aed38522febb2a64ad293e6dbfa1b81481eec250a2cdb61619456291f8d0e3f86097a42a71985d6dabd256107af8b4dfc2982a7d67ac63e2d6b7d59d24a9e87546c71b9c68ca1f"},"selection_proof":"0xb177453ba19233da0625b354d6a43e8621b676243ec4aa5dbb269ac750079cc23fced007ea6cdc1bfb6cc0e2fc796fbb154abed04d9aac7c1171810085beff2b9e5cff961975dbdce4199f39d97b4c46339e26eb7946762394905dbdb9818afe"},"signature":"0x8f73f3185164454f6807549bcbf9d1b0b5516279f35ead1a97812da5db43088de344fdc46aaafd20650bd6685515fb4e18f9f053e9e3691065f8a87f6160456ef8aa550f969ef8260368aae3e450e8763c6317f40b09863ad9b265a0e618e472"}]`) + return requestRTT(ctx, fmt.Sprintf("%v/eth/v1/validator/contribution_and_proofs", target), http.MethodPost, body, 200) +} diff --git a/cmd/testbeacon_internal_test.go b/cmd/testbeacon_internal_test.go index 13d5d933a..38ec77e2b 100644 --- a/cmd/testbeacon_internal_test.go +++ b/cmd/testbeacon_internal_test.go @@ -41,7 +41,7 @@ func TestBeaconTest(t *testing.T) { name: "default scenario", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -51,11 +51,18 @@ func TestBeaconTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ mockedBeaconNode.URL: { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "isSynced", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "peerCount", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Version", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Synced", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PeerCount", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate1", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate10", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate100", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate500", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate1000", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "SimulateCustom", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, }, @@ -65,7 +72,7 @@ func TestBeaconTest(t *testing.T) { name: "connection refused", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -73,22 +80,7 @@ func TestBeaconTest(t *testing.T) { Endpoints: []string{endpoint1, endpoint2}, }, expected: testCategoryResult{ - Targets: map[string][]testResult{ - endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - }, - endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - }, - }, + Targets: defaultFailingBNTests(t, endpoint1, endpoint2, port1, port2), }, expectedErr: "", }, @@ -96,7 +88,7 @@ func TestBeaconTest(t *testing.T) { name: "timeout", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: 100 * time.Nanosecond, @@ -106,10 +98,10 @@ func TestBeaconTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, }, @@ -119,7 +111,7 @@ func TestBeaconTest(t *testing.T) { name: "quiet", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: true, TestCases: nil, Timeout: time.Minute, @@ -127,22 +119,7 @@ func TestBeaconTest(t *testing.T) { Endpoints: []string{endpoint1, endpoint2}, }, expected: testCategoryResult{ - Targets: map[string][]testResult{ - endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - }, - endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - }, - }, + Targets: defaultFailingBNTests(t, endpoint1, endpoint2, port1, port2), }, expectedErr: "", }, @@ -150,7 +127,7 @@ func TestBeaconTest(t *testing.T) { name: "unsupported test", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"notSupportedTest"}, Timeout: time.Minute, @@ -164,9 +141,9 @@ func TestBeaconTest(t *testing.T) { name: "custom test cases", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, - TestCases: []string{"ping"}, + TestCases: []string{"Ping"}, Timeout: time.Minute, }, Endpoints: []string{endpoint1, endpoint2}, @@ -174,10 +151,10 @@ func TestBeaconTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, }, }, }, @@ -187,7 +164,7 @@ func TestBeaconTest(t *testing.T) { name: "write to file", config: testBeaconConfig{ testConfig: testConfig{ - OutputToml: "./write-to-file-test.toml.tmp", + OutputJSON: "./write-to-file-test.json.tmp", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -195,22 +172,7 @@ func TestBeaconTest(t *testing.T) { Endpoints: []string{endpoint1, endpoint2}, }, expected: testCategoryResult{ - Targets: map[string][]testResult{ - endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - }, - endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "isSynced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "peerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, - }, - }, + Targets: defaultFailingBNTests(t, endpoint1, endpoint2, port1, port2), Score: categoryScoreC, CategoryName: beaconTestCategory, }, @@ -226,7 +188,7 @@ func TestBeaconTest(t *testing.T) { t.Run(test.name, func(t *testing.T) { var buf bytes.Buffer ctx := context.Background() - err := runTestBeacon(ctx, &buf, test.config) + _, err := runTestBeacon(ctx, &buf, test.config) if test.expectedErr != "" { require.ErrorContains(t, err, test.expectedErr) return @@ -235,7 +197,7 @@ func TestBeaconTest(t *testing.T) { } defer func() { if test.cleanup != nil { - test.cleanup(t, test.config.OutputToml) + test.cleanup(t, test.config.OutputJSON) } }() @@ -245,13 +207,46 @@ func TestBeaconTest(t *testing.T) { testWriteOut(t, test.expected, buf) } - if test.config.OutputToml != "" { - testWriteFile(t, test.expected, test.config.OutputToml) + if test.config.OutputJSON != "" { + testWriteFile(t, test.expected, test.config.OutputJSON) } }) } } +func defaultFailingBNTests(_ *testing.T, endpoint1 string, endpoint2 string, port1 int, port2 int) map[string][]testResult { + return map[string][]testResult{ + endpoint1: { + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "Version", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "Synced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "PeerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "PingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate1", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate10", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate100", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate500", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate1000", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "SimulateCustom", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + }, + endpoint2: { + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "Version", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "Synced", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "PeerCount", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "PingLoad", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate1", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate10", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate100", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate500", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Simulate1000", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "SimulateCustom", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + }, + } +} + func startHealthyMockedBeaconNode(t *testing.T) *httptest.Server { t.Helper() @@ -264,6 +259,9 @@ func startHealthyMockedBeaconNode(t *testing.T) *httptest.Server { case "/eth/v1/node/peers": _, err := w.Write([]byte(`{"meta":{"count":500}}`)) require.NoError(t, err) + case "/eth/v1/node/version": + _, err := w.Write([]byte(`{"data":{"version":"BeaconNodeProvider/v1.0.0/linux_x86_64"}}`)) + require.NoError(t, err) } w.WriteHeader(http.StatusOK) })) @@ -286,15 +284,17 @@ func TestBeaconTestFlags(t *testing.T) { expectedErr: "required flag(s) \"endpoints\" not set", }, { - name: "no output toml on quiet", + name: "no output json on quiet", args: []string{"beacon", "--endpoints=\"test.endpoint\"", "--quiet"}, - expectedErr: "on --quiet, an --output-toml is required", + expectedErr: "on --quiet, an --output-json is required", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cmd := newAlphaCmd(newTestBeaconCmd(func(context.Context, io.Writer, testBeaconConfig) error { return nil })) + cmd := newAlphaCmd(newTestBeaconCmd(func(context.Context, io.Writer, testBeaconConfig) (testCategoryResult, error) { + return testCategoryResult{}, nil + })) cmd.SetArgs(test.args) err := cmd.Execute() if test.expectedErr != "" { diff --git a/cmd/testperformance.go b/cmd/testinfra.go similarity index 79% rename from cmd/testperformance.go rename to cmd/testinfra.go index 7acd4b839..4eae88f39 100644 --- a/cmd/testperformance.go +++ b/cmd/testinfra.go @@ -26,7 +26,7 @@ import ( "github.com/obolnetwork/charon/app/z" ) -type testPerformanceConfig struct { +type testInfraConfig struct { testConfig DiskIOTestFileDir string DiskIOBlockSizeKb int @@ -75,54 +75,57 @@ const ( var errFioNotFound = errors.New("fio command not found, install fio from https://fio.readthedocs.io/en/latest/fio_doc.html#binary-packages or using the package manager of your choice (apt, yum, brew, etc.)") -func newTestPerformanceCmd(runFunc func(context.Context, io.Writer, testPerformanceConfig) error) *cobra.Command { - var config testPerformanceConfig +func newTestInfraCmd(runFunc func(context.Context, io.Writer, testInfraConfig) (res testCategoryResult, err error)) *cobra.Command { + var config testInfraConfig cmd := &cobra.Command{ - Use: "performance", - Short: "Run multiple hardware and connectivity performance tests", - Long: `Run multiple hardware and connectivity performance tests. Verify that Charon is running on host with sufficient capabilities.`, + Use: "infra", + Short: "Run multiple hardware and internet connectivity tests", + Long: `Run multiple hardware and internet connectivity tests. Verify that Charon is running on host with sufficient capabilities.`, Args: cobra.NoArgs, PreRunE: func(cmd *cobra.Command, _ []string) error { return mustOutputToFileOnQuiet(cmd) }, RunE: func(cmd *cobra.Command, _ []string) error { - return runFunc(cmd.Context(), cmd.OutOrStdout(), config) + _, err := runFunc(cmd.Context(), cmd.OutOrStdout(), config) + return err }, } bindTestFlags(cmd, &config.testConfig) - bindTestPerformanceFlags(cmd, &config) + bindTestInfraFlags(cmd, &config, "") return cmd } -func bindTestPerformanceFlags(cmd *cobra.Command, config *testPerformanceConfig) { - cmd.Flags().StringVar(&config.DiskIOTestFileDir, "disk-io-test-file-dir", "", "Directory at which disk performance will be measured. If none specified, current user's home directory will be used.") - cmd.Flags().IntVar(&config.DiskIOBlockSizeKb, "disk-io-block-size-kb", 4096, "The block size in kilobytes used for I/O units. Same value applies for both reads and writes.") - cmd.Flags().StringSliceVar(&config.InternetTestServersOnly, "internet-test-servers-only", []string{}, "List of specific server names to be included for the internet tests, the best performing one is chosen. If not provided, closest and best performing servers are chosen automatically.") - cmd.Flags().StringSliceVar(&config.InternetTestServersExclude, "internet-test-servers-exclude", []string{}, "List of server names to be excluded from the tests. To be specified only if you experience issues with a server that is wrongly considered best performing.") +func bindTestInfraFlags(cmd *cobra.Command, config *testInfraConfig, flagsPrefix string) { + cmd.Flags().StringVar(&config.DiskIOTestFileDir, flagsPrefix+"disk-io-test-file-dir", "", "Directory at which disk performance will be measured. If none specified, current user's home directory will be used.") + cmd.Flags().IntVar(&config.DiskIOBlockSizeKb, flagsPrefix+"disk-io-block-size-kb", 4096, "The block size in kilobytes used for I/O units. Same value applies for both reads and writes.") + cmd.Flags().StringSliceVar(&config.InternetTestServersOnly, flagsPrefix+"internet-test-servers-only", []string{}, "List of specific server names to be included for the internet tests, the best performing one is chosen. If not provided, closest and best performing servers are chosen automatically.") + cmd.Flags().StringSliceVar(&config.InternetTestServersExclude, flagsPrefix+"internet-test-servers-exclude", []string{}, "List of server names to be excluded from the tests. To be specified only if you experience issues with a server that is wrongly considered best performing.") } -func supportedPerformanceTestCases() map[testCaseName]func(context.Context, *testPerformanceConfig) testResult { - return map[testCaseName]func(context.Context, *testPerformanceConfig) testResult{ - {name: "diskWriteSpeed", order: 1}: performanceDiskWriteSpeedTest, - {name: "diskWriteIOPS", order: 2}: performanceDiskWriteIOPSTest, - {name: "diskReadSpeed", order: 3}: performanceDiskReadSpeedTest, - {name: "diskReadIOPS", order: 4}: performanceDiskReadIOPSTest, - {name: "availableMemory", order: 5}: performanceAvailableMemoryTest, - {name: "totalMemory", order: 6}: performanceTotalMemoryTest, - {name: "internetLatency", order: 7}: performanceInternetLatencyTest, - {name: "internetDownloadSpeed", order: 8}: performanceInternetDownloadSpeedTest, - {name: "internetUploadSpeed", order: 9}: performanceInternetUploadSpeedTest, +func supportedInfraTestCases() map[testCaseName]func(context.Context, *testInfraConfig) testResult { + return map[testCaseName]func(context.Context, *testInfraConfig) testResult{ + {name: "DiskWriteSpeed", order: 1}: infraDiskWriteSpeedTest, + {name: "DiskWriteIOPS", order: 2}: infraDiskWriteIOPSTest, + {name: "DiskReadSpeed", order: 3}: infraDiskReadSpeedTest, + {name: "DiskReadIOPS", order: 4}: infraDiskReadIOPSTest, + {name: "AvailableMemory", order: 5}: infraAvailableMemoryTest, + {name: "TotalMemory", order: 6}: infraTotalMemoryTest, + {name: "InternetLatency", order: 7}: infraInternetLatencyTest, + {name: "InternetDownloadSpeed", order: 8}: infraInternetDownloadSpeedTest, + {name: "InternetUploadSpeed", order: 9}: infraInternetUploadSpeedTest, } } -func runTestPerformance(ctx context.Context, w io.Writer, cfg testPerformanceConfig) (err error) { - testCases := supportedPerformanceTestCases() +func runTestInfra(ctx context.Context, w io.Writer, cfg testInfraConfig) (res testCategoryResult, err error) { + log.Info(ctx, "Starting hardware performance and network connectivity test") + + testCases := supportedInfraTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { - return errors.New("test case not supported") + return res, errors.New("test case not supported") } sortTests(queuedTests) @@ -133,7 +136,7 @@ func runTestPerformance(ctx context.Context, w io.Writer, cfg testPerformanceCon testResults := make(map[string][]testResult) startTime := time.Now() - go testSinglePerformance(timeoutCtx, queuedTests, testCases, cfg, testResultsChan) + go testSingleInfra(timeoutCtx, queuedTests, testCases, cfg, testResultsChan) for result := range testResultsChan { maps.Copy(testResults, result) @@ -150,8 +153,8 @@ func runTestPerformance(ctx context.Context, w io.Writer, cfg testPerformanceCon } } - res := testCategoryResult{ - CategoryName: performanceTestCategory, + res = testCategoryResult{ + CategoryName: infraTestCategory, Targets: testResults, ExecutionTime: execTime, Score: score, @@ -160,26 +163,28 @@ func runTestPerformance(ctx context.Context, w io.Writer, cfg testPerformanceCon if !cfg.Quiet { err = writeResultToWriter(res, w) if err != nil { - return err + return res, err } } - if cfg.OutputToml != "" { - err = writeResultToFile(res, cfg.OutputToml) + if cfg.OutputJSON != "" { + err = writeResultToFile(res, cfg.OutputJSON) if err != nil { - return err + return res, err } } - return nil + return res, nil } -func testSinglePerformance(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]func(context.Context, *testPerformanceConfig) testResult, cfg testPerformanceConfig, resCh chan map[string][]testResult) { +// hardware and internet connectivity tests + +func testSingleInfra(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]func(context.Context, *testInfraConfig) testResult, cfg testInfraConfig, resCh chan map[string][]testResult) { defer close(resCh) singleTestResCh := make(chan testResult) allTestRes := []testResult{} - // run all performance tests for a performance client, pushing each completed test to the channel until all are complete or timeout occurs - go testPerformance(ctx, queuedTestCases, allTestCases, cfg, singleTestResCh) + // run all infra tests for a client, pushing each completed test to the channel until all are complete or timeout occurs + go testInfra(ctx, queuedTestCases, allTestCases, cfg, singleTestResCh) testCounter := 0 finished := false @@ -195,9 +200,7 @@ func testSinglePerformance(ctx context.Context, queuedTestCases []testCaseName, finished = true break } - testName = queuedTestCases[testCounter].name testCounter++ - result.Name = testName allTestRes = append(allTestRes, result) } } @@ -205,7 +208,7 @@ func testSinglePerformance(ctx context.Context, queuedTestCases []testCaseName, resCh <- map[string][]testResult{"local": allTestRes} } -func testPerformance(ctx context.Context, queuedTests []testCaseName, allTests map[testCaseName]func(context.Context, *testPerformanceConfig) testResult, cfg testPerformanceConfig, ch chan testResult) { +func testInfra(ctx context.Context, queuedTests []testCaseName, allTests map[testCaseName]func(context.Context, *testInfraConfig) testResult, cfg testInfraConfig, ch chan testResult) { defer close(ch) for _, t := range queuedTests { select { @@ -217,28 +220,7 @@ func testPerformance(ctx context.Context, queuedTests []testCaseName, allTests m } } -func fioCommand(ctx context.Context, filename string, blocksize int, operation string) ([]byte, error) { - //nolint:gosec - cmd, err := exec.CommandContext(ctx, "fio", - "--name=fioTest", - fmt.Sprintf("--filename=%v/fiotest", filename), - fmt.Sprintf("--size=%vMb", diskOpsMBsTotal/diskOpsNumOfJobs), - fmt.Sprintf("--blocksize=%vk", blocksize), - fmt.Sprintf("--numjobs=%v", diskOpsNumOfJobs), - fmt.Sprintf("--rw=%v", operation), - "--direct=1", - "--runtime=60s", - "--group_reporting", - "--output-format=json", - ).Output() - if err != nil { - return nil, errors.Wrap(err, "exec fio command") - } - - return cmd, nil -} - -func performanceDiskWriteSpeedTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraDiskWriteSpeedTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "DiskWriteSpeed"} var err error @@ -290,7 +272,7 @@ func performanceDiskWriteSpeedTest(ctx context.Context, conf *testPerformanceCon return testRes } -func performanceDiskWriteIOPSTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraDiskWriteIOPSTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "DiskWriteIOPS"} var err error @@ -341,7 +323,7 @@ func performanceDiskWriteIOPSTest(ctx context.Context, conf *testPerformanceConf return testRes } -func performanceDiskReadSpeedTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraDiskReadSpeedTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "DiskReadSpeed"} var err error @@ -393,7 +375,7 @@ func performanceDiskReadSpeedTest(ctx context.Context, conf *testPerformanceConf return testRes } -func performanceDiskReadIOPSTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraDiskReadIOPSTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "DiskReadIOPS"} var err error @@ -444,87 +426,7 @@ func performanceDiskReadIOPSTest(ctx context.Context, conf *testPerformanceConfi return testRes } -func availableMemoryLinux(context.Context) (int64, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return 0, errors.Wrap(err, "open /proc/meminfo") - } - scanner := bufio.NewScanner(file) - if scanner.Err() != nil { - return 0, errors.Wrap(err, "new scanner") - } - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, "MemAvailable") { - continue - } - splitText := strings.Split(line, ": ") - kbs := strings.Trim(strings.Split(splitText[1], "kB")[0], " ") - kbsInt, err := strconv.ParseInt(kbs, 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse MemAvailable int") - } - - return kbsInt * 1024, nil - } - - return 0, errors.New("memAvailable not found in /proc/meminfo") -} - -func availableMemoryMacos(ctx context.Context) (int64, error) { - pageSizeBytes, err := exec.CommandContext(ctx, "pagesize").Output() - if err != nil { - return 0, errors.Wrap(err, "run pagesize") - } - memorySizePerPage, err := strconv.ParseInt(strings.TrimSuffix(string(pageSizeBytes), "\n"), 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse memorySizePerPage int") - } - - out, err := exec.CommandContext(ctx, "vm_stat").Output() - if err != nil { - return 0, errors.Wrap(err, "run vm_stat") - } - outBuf := bytes.NewBuffer(out) - scanner := bufio.NewScanner(outBuf) - if scanner.Err() != nil { - return 0, errors.Wrap(err, "new scanner") - } - - var pagesFree, pagesInactive, pagesSpeculative int64 - for scanner.Scan() { - line := scanner.Text() - splitText := strings.Split(line, ": ") - - var bytes int64 - var err error - switch { - case strings.Contains(splitText[0], "Pages free"): - bytes, err = strconv.ParseInt(strings.Trim(strings.Split(splitText[1], ".")[0], " "), 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse Pages free int") - } - pagesFree = bytes - case strings.Contains(splitText[0], "Pages inactive"): - bytes, err = strconv.ParseInt(strings.Trim(strings.Split(splitText[1], ".")[0], " "), 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse Pages inactive int") - } - pagesInactive = bytes - case strings.Contains(splitText[0], "Pages speculative"): - bytes, err = strconv.ParseInt(strings.Trim(strings.Split(splitText[1], ".")[0], " "), 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse Pages speculative int") - } - pagesSpeculative = bytes - } - } - - return ((pagesFree + pagesInactive + pagesSpeculative) * memorySizePerPage), nil -} - -func performanceAvailableMemoryTest(ctx context.Context, _ *testPerformanceConfig) testResult { +func infraAvailableMemoryTest(ctx context.Context, _ *testInfraConfig) testResult { testRes := testResult{Name: "AvailableMemory"} var availableMemory int64 @@ -559,50 +461,7 @@ func performanceAvailableMemoryTest(ctx context.Context, _ *testPerformanceConfi return testRes } -func totalMemoryLinux(context.Context) (int64, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return 0, errors.Wrap(err, "open /proc/meminfo") - } - scanner := bufio.NewScanner(file) - if scanner.Err() != nil { - return 0, errors.Wrap(err, "new scanner") - } - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, "MemTotal") { - continue - } - splitText := strings.Split(line, ": ") - kbs := strings.Trim(strings.Split(splitText[1], "kB")[0], " ") - kbsInt, err := strconv.ParseInt(kbs, 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse MemTotal int") - } - - return kbsInt * 1024, nil - } - - return 0, errors.New("memTotal not found in /proc/meminfo") -} - -func totalMemoryMacos(ctx context.Context) (int64, error) { - out, err := exec.CommandContext(ctx, "sysctl", "hw.memsize").Output() - if err != nil { - return 0, errors.Wrap(err, "run sysctl hw.memsize") - } - - memSize := strings.TrimSuffix(strings.Split(string(out), ": ")[1], "\n") - memSizeInt, err := strconv.ParseInt(memSize, 10, 64) - if err != nil { - return 0, errors.Wrap(err, "parse memSize int") - } - - return memSizeInt, nil -} - -func performanceTotalMemoryTest(ctx context.Context, _ *testPerformanceConfig) testResult { +func infraTotalMemoryTest(ctx context.Context, _ *testInfraConfig) testResult { testRes := testResult{Name: "TotalMemory"} var totalMemory int64 @@ -637,46 +496,7 @@ func performanceTotalMemoryTest(ctx context.Context, _ *testPerformanceConfig) t return testRes } -func fetchOoklaServer(_ context.Context, conf *testPerformanceConfig) (speedtest.Server, error) { - speedtestClient := speedtest.New() - - serverList, err := speedtestClient.FetchServers() - if err != nil { - return speedtest.Server{}, errors.Wrap(err, "fetch Ookla servers") - } - - var targets speedtest.Servers - - if len(conf.InternetTestServersOnly) != 0 { - for _, server := range serverList { - if slices.Contains(conf.InternetTestServersOnly, server.Name) { - targets = append(targets, server) - } - } - } - - if len(conf.InternetTestServersExclude) != 0 { - var targets speedtest.Servers - for _, server := range serverList { - if !slices.Contains(conf.InternetTestServersExclude, server.Name) { - targets = append(targets, server) - } - } - } - - if targets == nil { - targets = serverList - } - - servers, err := targets.FindServer([]int{}) - if err != nil { - return speedtest.Server{}, errors.Wrap(err, "find Ookla server") - } - - return *servers[0], nil -} - -func performanceInternetLatencyTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraInternetLatencyTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "InternetLatency"} server, err := fetchOoklaServer(ctx, conf) @@ -708,7 +528,7 @@ func performanceInternetLatencyTest(ctx context.Context, conf *testPerformanceCo return testRes } -func performanceInternetDownloadSpeedTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraInternetDownloadSpeedTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "InternetDownloadSpeed"} server, err := fetchOoklaServer(ctx, conf) @@ -740,7 +560,7 @@ func performanceInternetDownloadSpeedTest(ctx context.Context, conf *testPerform return testRes } -func performanceInternetUploadSpeedTest(ctx context.Context, conf *testPerformanceConfig) testResult { +func infraInternetUploadSpeedTest(ctx context.Context, conf *testInfraConfig) testResult { testRes := testResult{Name: "InternetUploadSpeed"} server, err := fetchOoklaServer(ctx, conf) @@ -771,3 +591,187 @@ func performanceInternetUploadSpeedTest(ctx context.Context, conf *testPerforman return testRes } + +// helper functions + +func fioCommand(ctx context.Context, filename string, blocksize int, operation string) ([]byte, error) { + //nolint:gosec + cmd, err := exec.CommandContext(ctx, "fio", + "--name=fioTest", + fmt.Sprintf("--filename=%v/fiotest", filename), + fmt.Sprintf("--size=%vMb", diskOpsMBsTotal/diskOpsNumOfJobs), + fmt.Sprintf("--blocksize=%vk", blocksize), + fmt.Sprintf("--numjobs=%v", diskOpsNumOfJobs), + fmt.Sprintf("--rw=%v", operation), + "--direct=1", + "--runtime=60s", + "--group_reporting", + "--output-format=json", + ).Output() + if err != nil { + return nil, errors.Wrap(err, "exec fio command") + } + + return cmd, nil +} + +func availableMemoryLinux(context.Context) (int64, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return 0, errors.Wrap(err, "open /proc/meminfo") + } + scanner := bufio.NewScanner(file) + if scanner.Err() != nil { + return 0, errors.Wrap(err, "new scanner") + } + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, "MemAvailable") { + continue + } + splitText := strings.Split(line, ": ") + kbs := strings.Trim(strings.Split(splitText[1], "kB")[0], " ") + kbsInt, err := strconv.ParseInt(kbs, 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse MemAvailable int") + } + + return kbsInt * 1024, nil + } + + return 0, errors.New("memAvailable not found in /proc/meminfo") +} + +func availableMemoryMacos(ctx context.Context) (int64, error) { + pageSizeBytes, err := exec.CommandContext(ctx, "pagesize").Output() + if err != nil { + return 0, errors.Wrap(err, "run pagesize") + } + memorySizePerPage, err := strconv.ParseInt(strings.TrimSuffix(string(pageSizeBytes), "\n"), 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse memorySizePerPage int") + } + + out, err := exec.CommandContext(ctx, "vm_stat").Output() + if err != nil { + return 0, errors.Wrap(err, "run vm_stat") + } + outBuf := bytes.NewBuffer(out) + scanner := bufio.NewScanner(outBuf) + if scanner.Err() != nil { + return 0, errors.Wrap(err, "new scanner") + } + + var pagesFree, pagesInactive, pagesSpeculative int64 + for scanner.Scan() { + line := scanner.Text() + splitText := strings.Split(line, ": ") + + var bytes int64 + var err error + switch { + case strings.Contains(splitText[0], "Pages free"): + bytes, err = strconv.ParseInt(strings.Trim(strings.Split(splitText[1], ".")[0], " "), 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse Pages free int") + } + pagesFree = bytes + case strings.Contains(splitText[0], "Pages inactive"): + bytes, err = strconv.ParseInt(strings.Trim(strings.Split(splitText[1], ".")[0], " "), 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse Pages inactive int") + } + pagesInactive = bytes + case strings.Contains(splitText[0], "Pages speculative"): + bytes, err = strconv.ParseInt(strings.Trim(strings.Split(splitText[1], ".")[0], " "), 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse Pages speculative int") + } + pagesSpeculative = bytes + } + } + + return ((pagesFree + pagesInactive + pagesSpeculative) * memorySizePerPage), nil +} + +func totalMemoryLinux(context.Context) (int64, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return 0, errors.Wrap(err, "open /proc/meminfo") + } + scanner := bufio.NewScanner(file) + if scanner.Err() != nil { + return 0, errors.Wrap(err, "new scanner") + } + + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, "MemTotal") { + continue + } + splitText := strings.Split(line, ": ") + kbs := strings.Trim(strings.Split(splitText[1], "kB")[0], " ") + kbsInt, err := strconv.ParseInt(kbs, 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse MemTotal int") + } + + return kbsInt * 1024, nil + } + + return 0, errors.New("memTotal not found in /proc/meminfo") +} + +func totalMemoryMacos(ctx context.Context) (int64, error) { + out, err := exec.CommandContext(ctx, "sysctl", "hw.memsize").Output() + if err != nil { + return 0, errors.Wrap(err, "run sysctl hw.memsize") + } + + memSize := strings.TrimSuffix(strings.Split(string(out), ": ")[1], "\n") + memSizeInt, err := strconv.ParseInt(memSize, 10, 64) + if err != nil { + return 0, errors.Wrap(err, "parse memSize int") + } + + return memSizeInt, nil +} + +func fetchOoklaServer(_ context.Context, conf *testInfraConfig) (speedtest.Server, error) { + speedtestClient := speedtest.New() + + serverList, err := speedtestClient.FetchServers() + if err != nil { + return speedtest.Server{}, errors.Wrap(err, "fetch Ookla servers") + } + + var targets speedtest.Servers + + if len(conf.InternetTestServersOnly) != 0 { + for _, server := range serverList { + if slices.Contains(conf.InternetTestServersOnly, server.Name) { + targets = append(targets, server) + } + } + } + + if len(conf.InternetTestServersExclude) != 0 { + for _, server := range serverList { + if !slices.Contains(conf.InternetTestServersExclude, server.Name) { + targets = append(targets, server) + } + } + } + + if targets == nil { + targets = serverList + } + + servers, err := targets.FindServer([]int{}) + if err != nil { + return speedtest.Server{}, errors.Wrap(err, "find Ookla server") + } + + return *servers[0], nil +} diff --git a/cmd/testperformance_internal_test.go b/cmd/testinfra_internal_test.go similarity index 64% rename from cmd/testperformance_internal_test.go rename to cmd/testinfra_internal_test.go index 80487491f..ffa732581 100644 --- a/cmd/testperformance_internal_test.go +++ b/cmd/testinfra_internal_test.go @@ -17,23 +17,23 @@ import ( "github.com/obolnetwork/charon/app/errors" ) -//go:generate go test . -run=TestPerformanceTest -update +//go:generate go test . -run=TestInfraTest -update -func TestPerformanceTest(t *testing.T) { +func TestInfraTest(t *testing.T) { tests := []struct { name string - config testPerformanceConfig + config testInfraConfig expected testCategoryResult expectedErr string cleanup func(*testing.T, string) }{ { name: "default scenario", - config: testPerformanceConfig{ + config: testInfraConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, - TestCases: []string{"availableMemory", "totalMemory", "internetLatency"}, + TestCases: []string{"AvailableMemory", "TotalMemory", "InternetLatency"}, Timeout: time.Minute, }, DiskIOBlockSizeKb: 1, @@ -41,21 +41,21 @@ func TestPerformanceTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ "local": { - {Name: "availableMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "totalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "internetLatency", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "AvailableMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "TotalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "InternetLatency", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreC, - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, }, expectedErr: "", }, { name: "timeout", - config: testPerformanceConfig{ + config: testInfraConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: 100 * time.Nanosecond, @@ -65,21 +65,21 @@ func TestPerformanceTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ "local": { - {Name: "diskWriteSpeed", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "DiskWriteSpeed", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, Score: categoryScoreC, - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, }, expectedErr: "", }, { name: "quiet", - config: testPerformanceConfig{ + config: testInfraConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: true, - TestCases: []string{"availableMemory", "totalMemory", "internetLatency"}, + TestCases: []string{"AvailableMemory", "TotalMemory", "InternetLatency"}, Timeout: time.Minute, }, DiskIOBlockSizeKb: 1, @@ -87,21 +87,21 @@ func TestPerformanceTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ "local": { - {Name: "availableMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "totalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "internetLatency", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "AvailableMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "TotalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "InternetLatency", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreC, - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, }, expectedErr: "", }, { name: "unsupported test", - config: testPerformanceConfig{ + config: testInfraConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"notSupportedTest"}, Timeout: time.Minute, @@ -110,17 +110,17 @@ func TestPerformanceTest(t *testing.T) { }, expected: testCategoryResult{ Score: categoryScoreC, - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, }, expectedErr: "test case not supported", }, { name: "custom test cases", - config: testPerformanceConfig{ + config: testInfraConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, - TestCases: []string{"totalMemory"}, + TestCases: []string{"TotalMemory"}, Timeout: time.Minute, }, DiskIOBlockSizeKb: 1, @@ -128,21 +128,21 @@ func TestPerformanceTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ "local": { - {Name: "totalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "TotalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreC, - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, }, expectedErr: "", }, { name: "write to file", - config: testPerformanceConfig{ + config: testInfraConfig{ testConfig: testConfig{ - OutputToml: "./write-to-file-test.toml.tmp", + OutputJSON: "./write-to-file-test.json.tmp", Quiet: false, - TestCases: []string{"availableMemory", "totalMemory", "internetLatency"}, + TestCases: []string{"AvailableMemory", "TotalMemory", "InternetLatency"}, Timeout: time.Minute, }, DiskIOBlockSizeKb: 1, @@ -150,13 +150,13 @@ func TestPerformanceTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ "local": { - {Name: "availableMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "totalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "internetLatency", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "AvailableMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "TotalMemory", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "InternetLatency", Verdict: testVerdictPoor, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreA, - CategoryName: performanceTestCategory, + CategoryName: infraTestCategory, }, expectedErr: "", cleanup: func(t *testing.T, p string) { @@ -170,7 +170,7 @@ func TestPerformanceTest(t *testing.T) { t.Run(test.name, func(t *testing.T) { var buf bytes.Buffer ctx := context.Background() - err := runTestPerformance(ctx, &buf, test.config) + _, err := runTestInfra(ctx, &buf, test.config) if test.expectedErr != "" { require.ErrorContains(t, err, test.expectedErr) return @@ -179,7 +179,7 @@ func TestPerformanceTest(t *testing.T) { } defer func() { if test.cleanup != nil { - test.cleanup(t, test.config.OutputToml) + test.cleanup(t, test.config.OutputJSON) } }() @@ -189,14 +189,14 @@ func TestPerformanceTest(t *testing.T) { testWriteOut(t, test.expected, buf) } - if test.config.OutputToml != "" { - testWriteFile(t, test.expected, test.config.OutputToml) + if test.config.OutputJSON != "" { + testWriteFile(t, test.expected, test.config.OutputJSON) } }) } } -func StartHealthyPerformanceClient(t *testing.T, port int, ready chan bool) error { +func StartHealthyInfraClient(t *testing.T, port int, ready chan bool) error { t.Helper() defer close(ready) @@ -215,7 +215,7 @@ func StartHealthyPerformanceClient(t *testing.T, port int, ready chan bool) erro } } -func TestPerformanceTestFlags(t *testing.T) { +func TestInfraTestFlags(t *testing.T) { tests := []struct { name string args []string @@ -223,19 +223,21 @@ func TestPerformanceTestFlags(t *testing.T) { }{ { name: "default scenario", - args: []string{"performance", "--disk-io-block-size-kb=1"}, + args: []string{"infra", "--disk-io-block-size-kb=1"}, expectedErr: "", }, { - name: "no output toml on quiet", - args: []string{"performance", "--disk-io-block-size-kb=1", "--quiet"}, - expectedErr: "on --quiet, an --output-toml is required", + name: "no output json on quiet", + args: []string{"infra", "--disk-io-block-size-kb=1", "--quiet"}, + expectedErr: "on --quiet, an --output-json is required", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cmd := newAlphaCmd(newTestPerformanceCmd(func(context.Context, io.Writer, testPerformanceConfig) error { return nil })) + cmd := newAlphaCmd(newTestInfraCmd(func(context.Context, io.Writer, testInfraConfig) (testCategoryResult, error) { + return testCategoryResult{}, nil + })) cmd.SetArgs(test.args) err := cmd.Execute() if test.expectedErr != "" { diff --git a/cmd/testmev.go b/cmd/testmev.go index cb1ffd4ff..248f0646b 100644 --- a/cmd/testmev.go +++ b/cmd/testmev.go @@ -3,24 +3,37 @@ package cmd import ( + "bytes" "context" + "encoding/hex" + "encoding/json" "fmt" "io" "net/http" "net/http/httptrace" + "strconv" + "strings" "time" + builderspec "github.com/attestantio/go-builder-client/spec" + eth2deneb "github.com/attestantio/go-eth2-client/api/v1/deneb" + eth2a "github.com/attestantio/go-eth2-client/spec/altair" + eth2p0 "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/spf13/cobra" "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" "github.com/obolnetwork/charon/app/errors" + "github.com/obolnetwork/charon/app/log" "github.com/obolnetwork/charon/app/z" ) type testMEVConfig struct { testConfig - Endpoints []string + Endpoints []string + BeaconNodeEndpoint string + LoadTest bool + LoadTestBlocks uint } type testCaseMEV func(context.Context, *testMEVConfig, string) testResult @@ -28,48 +41,70 @@ type testCaseMEV func(context.Context, *testMEVConfig, string) testResult const ( thresholdMEVMeasureAvg = 40 * time.Millisecond thresholdMEVMeasurePoor = 100 * time.Millisecond + thresholdMEVBlockAvg = 500 * time.Millisecond + thresholdMEVBlockPoor = 800 * time.Millisecond ) -func newTestMEVCmd(runFunc func(context.Context, io.Writer, testMEVConfig) error) *cobra.Command { +var errStatusCodeNot200 = errors.New("status code not 200 OK") + +func newTestMEVCmd(runFunc func(context.Context, io.Writer, testMEVConfig) (testCategoryResult, error)) *cobra.Command { var config testMEVConfig cmd := &cobra.Command{ Use: "mev", - Short: "Run multiple tests towards mev nodes", - Long: `Run multiple tests towards mev nodes. Verify that Charon can efficiently interact with MEV Node(s).`, + Short: "Run multiple tests towards MEV relays", + Long: `Run multiple tests towards MEV relays. Verify that Charon can efficiently interact with MEV relay(s).`, Args: cobra.NoArgs, PreRunE: func(cmd *cobra.Command, _ []string) error { return mustOutputToFileOnQuiet(cmd) }, RunE: func(cmd *cobra.Command, _ []string) error { - return runFunc(cmd.Context(), cmd.OutOrStdout(), config) + _, err := runFunc(cmd.Context(), cmd.OutOrStdout(), config) + return err }, } bindTestFlags(cmd, &config.testConfig) - bindTestMEVFlags(cmd, &config) + bindTestMEVFlags(cmd, &config, "") + + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + loadTest := cmd.Flags().Lookup("load-test").Value.String() + beaconNodeEndpoint := cmd.Flags().Lookup("beacon-node-endpoint").Value.String() + + if loadTest == "true" && beaconNodeEndpoint == "" { + return errors.New("beacon-node-endpoint should be specified when load-test is") + } + + return nil + }) return cmd } -func bindTestMEVFlags(cmd *cobra.Command, config *testMEVConfig) { - const endpoints = "endpoints" - cmd.Flags().StringSliceVar(&config.Endpoints, endpoints, nil, "[REQUIRED] Comma separated list of one or more MEV relay endpoint URLs.") - mustMarkFlagRequired(cmd, endpoints) +func bindTestMEVFlags(cmd *cobra.Command, config *testMEVConfig, flagsPrefix string) { + cmd.Flags().StringSliceVar(&config.Endpoints, flagsPrefix+"endpoints", nil, "[REQUIRED] Comma separated list of one or more MEV relay endpoint URLs.") + cmd.Flags().StringVar(&config.BeaconNodeEndpoint, flagsPrefix+"beacon-node-endpoint", "", "[REQUIRED] Beacon node endpoint URL used for block creation test.") + cmd.Flags().BoolVar(&config.LoadTest, flagsPrefix+"load-test", false, "Enable load test.") + cmd.Flags().UintVar(&config.LoadTestBlocks, flagsPrefix+"load-test-blocks", 3, "Amount of blocks the 'createMultipleBlocks' test will create.") + mustMarkFlagRequired(cmd, flagsPrefix+"endpoints") } func supportedMEVTestCases() map[testCaseName]testCaseMEV { return map[testCaseName]testCaseMEV{ - {name: "ping", order: 1}: mevPingTest, - {name: "pingMeasure", order: 2}: mevPingMeasureTest, + {name: "Ping", order: 1}: mevPingTest, + {name: "PingMeasure", order: 2}: mevPingMeasureTest, + {name: "CreateBlock", order: 3}: mevCreateBlockTest, + {name: "CreateMultipleBlocks", order: 4}: mevCreateMultipleBlocksTest, } } -func runTestMEV(ctx context.Context, w io.Writer, cfg testMEVConfig) (err error) { +func runTestMEV(ctx context.Context, w io.Writer, cfg testMEVConfig) (res testCategoryResult, err error) { + log.Info(ctx, "Starting MEV relays test") + testCases := supportedMEVTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { - return errors.New("test case not supported") + return res, errors.New("test case not supported") } sortTests(queuedTests) @@ -98,7 +133,7 @@ func runTestMEV(ctx context.Context, w io.Writer, cfg testMEVConfig) (err error) } } - res := testCategoryResult{ + res = testCategoryResult{ CategoryName: mevTestCategory, Targets: testResults, ExecutionTime: execTime, @@ -108,20 +143,22 @@ func runTestMEV(ctx context.Context, w io.Writer, cfg testMEVConfig) (err error) if !cfg.Quiet { err = writeResultToWriter(res, w) if err != nil { - return err + return res, err } } - if cfg.OutputToml != "" { - err = writeResultToFile(res, cfg.OutputToml) + if cfg.OutputJSON != "" { + err = writeResultToFile(res, cfg.OutputJSON) if err != nil { - return err + return res, err } } - return nil + return res, nil } +// mev relays tests + func testAllMEVs(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseMEV, conf testMEVConfig, allMEVsResCh chan map[string][]testResult) { defer close(allMEVsResCh) // run tests for all mev nodes @@ -173,14 +210,13 @@ func testSingleMEV(ctx context.Context, queuedTestCases []testCaseName, allTestC finished = true break } - testName = queuedTestCases[testCounter].name testCounter++ - result.Name = testName allTestRes = append(allTestRes, result) } } - resCh <- map[string][]testResult{target: allTestRes} + relayName := formatMEVRelayName(target) + resCh <- map[string][]testResult{relayName: allTestRes} return nil } @@ -213,7 +249,7 @@ func mevPingTest(ctx context.Context, _ *testMEVConfig, target string) testResul defer resp.Body.Close() if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } testRes.Verdict = testVerdictOk @@ -224,40 +260,372 @@ func mevPingTest(ctx context.Context, _ *testMEVConfig, target string) testResul func mevPingMeasureTest(ctx context.Context, _ *testMEVConfig, target string) testResult { testRes := testResult{Name: "PingMeasure"} + rtt, err := requestRTT(ctx, fmt.Sprintf("%v/eth/v1/builder/status", target), http.MethodGet, nil, 200) + if err != nil { + return failedTestResult(testRes, err) + } + + testRes = evaluateRTT(rtt, testRes, thresholdMEVMeasureAvg, thresholdMEVMeasurePoor) + + return testRes +} + +func mevCreateBlockTest(ctx context.Context, conf *testMEVConfig, target string) testResult { + testRes := testResult{Name: "CreateBlock"} + + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + + latestBlock, err := latestBeaconBlock(ctx, conf.BeaconNodeEndpoint) + if err != nil { + return failedTestResult(testRes, err) + } + + // wait for beginning of next slot, as the block for current one might have already been proposed + latestBlockTSUnix, err := strconv.ParseInt(latestBlock.Body.ExecutionPayload.Timestamp, 10, 64) + if err != nil { + return failedTestResult(testRes, err) + } + latestBlockTS := time.Unix(latestBlockTSUnix, 0) + nextBlockTS := latestBlockTS.Add(slotTime) + for time.Now().Before(nextBlockTS) && ctx.Err() == nil { + sleepWithContext(ctx, time.Millisecond) + } + + latestSlot, err := strconv.ParseInt(latestBlock.Slot, 10, 64) + if err != nil { + return failedTestResult(testRes, err) + } + nextSlot := latestSlot + 1 + epoch := nextSlot / slotsInEpoch + proposerDuties, err := fetchProposersForEpoch(ctx, conf, epoch) + if err != nil { + return failedTestResult(testRes, err) + } + + log.Info(ctx, "Starting attempts for block creation", z.Any("mev_relay", target)) + rtt, err := createMEVBlock(ctx, conf, target, nextSlot, latestBlock, proposerDuties) + if err != nil { + return failedTestResult(testRes, err) + } + testRes = evaluateRTT(rtt, testRes, thresholdMEVBlockAvg, thresholdMEVBlockPoor) + + return testRes +} + +func mevCreateMultipleBlocksTest(ctx context.Context, conf *testMEVConfig, target string) testResult { + testRes := testResult{Name: "CreateMultipleBlocks"} + + if !conf.LoadTest { + testRes.Verdict = testVerdictSkipped + return testRes + } + + latestBlock, err := latestBeaconBlock(ctx, conf.BeaconNodeEndpoint) + if err != nil { + return failedTestResult(testRes, err) + } + + // wait for beginning of next slot, as the block for current one might have already been proposed + latestBlockTSUnix, err := strconv.ParseInt(latestBlock.Body.ExecutionPayload.Timestamp, 10, 64) + if err != nil { + failedTestResult(testRes, err) + } + latestBlockTS := time.Unix(latestBlockTSUnix, 0) + nextBlockTS := latestBlockTS.Add(slotTime) + for time.Now().Before(nextBlockTS) && ctx.Err() == nil { + sleepWithContext(ctx, time.Millisecond) + } + + latestSlot, err := strconv.ParseInt(latestBlock.Slot, 10, 64) + if err != nil { + return failedTestResult(testRes, err) + } + nextSlot := latestSlot + 1 + epoch := nextSlot / slotsInEpoch + proposerDuties, err := fetchProposersForEpoch(ctx, conf, epoch) + if err != nil { + return failedTestResult(testRes, err) + } + + allBlocksRTT := []time.Duration{} + log.Info(ctx, "Starting attempts for multiple block creation", z.Any("mev_relay", target), z.Any("blocks", conf.LoadTestBlocks)) + for ctx.Err() == nil { + startIteration := time.Now() + rtt, err := createMEVBlock(ctx, conf, target, nextSlot, latestBlock, proposerDuties) + if err != nil { + return failedTestResult(testRes, err) + } + allBlocksRTT = append(allBlocksRTT, rtt) + if len(allBlocksRTT) == int(conf.LoadTestBlocks) { + break + } + // wait for the next slot - time it took createMEVBlock - 1 sec + sleepWithContext(ctx, slotTime-time.Since(startIteration)%slotTime-time.Second) + startBeaconBlockFetch := time.Now() + // get the new latest block, produced during 'nextSlot' + latestBlock, err = latestBeaconBlock(ctx, conf.BeaconNodeEndpoint) + if err != nil { + return failedTestResult(testRes, err) + } + latestSlot, err := strconv.ParseInt(latestBlock.Slot, 10, 64) + if err != nil { + return failedTestResult(testRes, err) + } + nextSlot = latestSlot + 1 + // wait 1 second - the time it took to fetch the latest block + sleepWithContext(ctx, time.Second-time.Since(startBeaconBlockFetch)) + } + + totalRTT := time.Duration(0) + for _, rtt := range allBlocksRTT { + totalRTT += rtt + } + averageRTT := totalRTT / time.Duration(len(allBlocksRTT)) + + testRes = evaluateRTT(averageRTT, testRes, thresholdMEVBlockAvg, thresholdMEVBlockPoor) + + return testRes +} + +// helper functions + +// Shorten the hash of the MEV relay endpoint +// Example: https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net +// to https://0xac6e...37ae@boost-relay.flashbots.net +func formatMEVRelayName(urlString string) string { + splitScheme := strings.Split(urlString, "://") + if len(splitScheme) == 1 { + return urlString + } + hashSplit := strings.Split(splitScheme[1], "@") + if len(hashSplit) == 1 { + return urlString + } + hash := hashSplit[0] + if !strings.HasPrefix(hash, "0x") || len(hash) < 18 { + return urlString + } + hashShort := hash[:6] + "..." + hash[len(hash)-4:] + + return splitScheme[0] + "://" + hashShort + "@" + hashSplit[1] +} + +func getBlockHeader(ctx context.Context, target string, nextSlot int64, blockHash string, validatorPubKey string) (builderspec.VersionedSignedBuilderBid, time.Duration, error) { var start time.Time var firstByte time.Duration - trace := &httptrace.ClientTrace{ GotFirstResponseByte: func() { firstByte = time.Since(start) }, } - start = time.Now() - targetEndpoint := fmt.Sprintf("%v/eth/v1/builder/status", target) - req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, trace), http.MethodGet, targetEndpoint, nil) + req, err := http.NewRequestWithContext( + httptrace.WithClientTrace(ctx, trace), + http.MethodGet, + fmt.Sprintf("%v/eth/v1/builder/header/%v/%v/%v", target, nextSlot, blockHash, validatorPubKey), + nil) if err != nil { - return failedTestResult(testRes, err) + return builderspec.VersionedSignedBuilderBid{}, 0, errors.Wrap(err, "http request") } resp, err := http.DefaultTransport.RoundTrip(req) if err != nil { - return failedTestResult(testRes, err) + return builderspec.VersionedSignedBuilderBid{}, 0, errors.Wrap(err, "http request rtt") } defer resp.Body.Close() - if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + // the current proposer was not registered with the builder, wait for next block + if resp.StatusCode != http.StatusOK { + return builderspec.VersionedSignedBuilderBid{}, 0, errStatusCodeNot200 + } + rttGetHeader := firstByte + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return builderspec.VersionedSignedBuilderBid{}, 0, errors.Wrap(err, "http response body") } - if firstByte > thresholdMEVMeasurePoor { - testRes.Verdict = testVerdictPoor - } else if firstByte > thresholdMEVMeasureAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood + var builderBid builderspec.VersionedSignedBuilderBid + err = json.Unmarshal(bodyBytes, &builderBid) + if err != nil { + return builderspec.VersionedSignedBuilderBid{}, 0, errors.Wrap(err, "http response json") } - testRes.Measurement = Duration{firstByte}.String() - return testRes + return builderBid, rttGetHeader, nil +} + +func createMEVBlock(ctx context.Context, conf *testMEVConfig, target string, nextSlot int64, latestBlock BeaconBlockMessage, proposerDuties []ProposerDutiesData) (time.Duration, error) { + var rttGetHeader time.Duration + var builderBid builderspec.VersionedSignedBuilderBid + for ctx.Err() == nil { + startIteration := time.Now() + epoch := nextSlot / slotsInEpoch + + validatorPubKey, err := getValidatorPKForSlot(proposerDuties, nextSlot) + if err != nil { + // if no PK found, refresh the proposerDuties + proposerDuties, err = fetchProposersForEpoch(ctx, conf, epoch) + if err != nil { + return 0, err + } + validatorPubKey, err = getValidatorPKForSlot(proposerDuties, nextSlot) + if err != nil { + return 0, err + } + } + + builderBid, rttGetHeader, err = getBlockHeader(ctx, target, nextSlot, latestBlock.Body.ExecutionPayload.BlockHash, validatorPubKey) + if err != nil { + // the current proposer was not registered with the builder, wait for next block + if errors.Is(err, errStatusCodeNot200) { + sleepWithContext(ctx, slotTime-time.Since(startIteration)-time.Second) + startBeaconBlockFetch := time.Now() + latestBlock, err = latestBeaconBlock(ctx, conf.BeaconNodeEndpoint) + if err != nil { + return 0, err + } + nextSlot++ + // wait 1 second - the time it took to fetch the latest block + sleepWithContext(ctx, time.Second-time.Since(startBeaconBlockFetch)) + + continue + } + + return 0, err + } + log.Info(ctx, "Created block headers for slot", z.Any("slot", nextSlot), z.Any("target", target)) + + break + } + + blindedBeaconBlock := eth2deneb.BlindedBeaconBlock{ + Slot: 0, + ProposerIndex: 0, + ParentRoot: eth2p0.Root{}, + StateRoot: eth2p0.Root{}, + Body: ð2deneb.BlindedBeaconBlockBody{ + RANDAOReveal: eth2p0.BLSSignature{}, + ETH1Data: ð2p0.ETH1Data{}, + Graffiti: eth2p0.Hash32{}, + ProposerSlashings: []*eth2p0.ProposerSlashing{}, + AttesterSlashings: []*eth2p0.AttesterSlashing{}, + Attestations: []*eth2p0.Attestation{}, + Deposits: []*eth2p0.Deposit{}, + VoluntaryExits: []*eth2p0.SignedVoluntaryExit{}, + SyncAggregate: ð2a.SyncAggregate{}, + ExecutionPayloadHeader: builderBid.Deneb.Message.Header, + }, + } + + sig, err := hex.DecodeString("b9251a82040d4620b8c5665f328ee6c2eaa02d31d71d153f4abba31a7922a981e541e85283f0ced387d26e86aef9386d18c6982b9b5f8759882fe7f25a328180d86e146994ef19d28bc1432baf29751dec12b5f3d65dbbe224d72cf900c6831a") + if err != nil { + return 0, errors.Wrap(err, "decode signature") + } + + payload := eth2deneb.SignedBlindedBeaconBlock{ + Message: &blindedBeaconBlock, + Signature: eth2p0.BLSSignature(sig), + } + payloadJSON, err := json.Marshal(payload) + if err != nil { + return 0, errors.Wrap(err, "signed blinded beacon block json payload marshal") + } + rttSubmitBlock, err := requestRTT(ctx, target+"/eth/v1/builder/blinded_blocks", http.MethodPost, bytes.NewReader(payloadJSON), 400) + if err != nil { + return 0, err + } + + return rttGetHeader + rttSubmitBlock, nil +} + +type BeaconBlock struct { + Data BeaconBlockData `json:"data"` +} + +type BeaconBlockData struct { + Message BeaconBlockMessage `json:"message"` +} + +type BeaconBlockMessage struct { + Slot string `json:"slot"` + Body BeaconBlockBody `json:"body"` +} + +type BeaconBlockBody struct { + ExecutionPayload BeaconBlockExecPayload `json:"execution_payload"` +} + +type BeaconBlockExecPayload struct { + BlockHash string `json:"block_hash"` + Timestamp string `json:"timestamp"` +} + +func latestBeaconBlock(ctx context.Context, endpoint string) (BeaconBlockMessage, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%v/eth/v2/beacon/blocks/head", endpoint), nil) + if err != nil { + return BeaconBlockMessage{}, errors.Wrap(err, "http request") + } + resp, err := new(http.Client).Do(req) + if err != nil { + return BeaconBlockMessage{}, errors.Wrap(err, "http request do") + } + defer resp.Body.Close() + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return BeaconBlockMessage{}, errors.Wrap(err, "http response body") + } + + var beaconBlock BeaconBlock + err = json.Unmarshal(bodyBytes, &beaconBlock) + if err != nil { + return BeaconBlockMessage{}, errors.Wrap(err, "http response json") + } + + return beaconBlock.Data.Message, nil +} + +type ProposerDuties struct { + Data []ProposerDutiesData `json:"data"` +} + +type ProposerDutiesData struct { + PubKey string `json:"pubkey"` + Slot string `json:"slot"` +} + +func fetchProposersForEpoch(ctx context.Context, conf *testMEVConfig, epoch int64) ([]ProposerDutiesData, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%v/eth/v1/validator/duties/proposer/%v", conf.BeaconNodeEndpoint, epoch), nil) + if err != nil { + return nil, errors.Wrap(err, "http request") + } + resp, err := new(http.Client).Do(req) + if err != nil { + return nil, errors.Wrap(err, "http request do") + } + defer resp.Body.Close() + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "http response body") + } + + var proposerDuties ProposerDuties + err = json.Unmarshal(bodyBytes, &proposerDuties) + if err != nil { + return nil, errors.Wrap(err, "http response json") + } + + return proposerDuties.Data, nil +} + +func getValidatorPKForSlot(proposers []ProposerDutiesData, slot int64) (string, error) { + slotString := strconv.FormatInt(slot, 10) + for _, s := range proposers { + if s.Slot == slotString { + return s.PubKey, nil + } + } + + return "", errors.New("slot not found") } diff --git a/cmd/testmev_internal_test.go b/cmd/testmev_internal_test.go index 3b62aecb4..5032d8700 100644 --- a/cmd/testmev_internal_test.go +++ b/cmd/testmev_internal_test.go @@ -26,6 +26,8 @@ func TestMEVTest(t *testing.T) { endpoint1 := fmt.Sprintf("http://localhost:%v", port1) port2 := testutil.GetFreePort(t) endpoint2 := fmt.Sprintf("http://localhost:%v", port2) + port3 := testutil.GetFreePort(t) + endpoint3 := fmt.Sprintf("http://localhost:%v", port3) mockedMEVNode := StartHealthyMockedMEVNode(t) defer mockedMEVNode.Close() @@ -41,18 +43,46 @@ func TestMEVTest(t *testing.T) { name: "default scenario", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: time.Minute, }, - Endpoints: []string{mockedMEVNode.URL}, + Endpoints: []string{mockedMEVNode.URL}, + BeaconNodeEndpoint: endpoint3, }, expected: testCategoryResult{ Targets: map[string][]testResult{ mockedMEVNode.URL: { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + }, + }, + }, + expectedErr: "", + }, + { + name: "default load scenario", + config: testMEVConfig{ + testConfig: testConfig{ + OutputJSON: "", + Quiet: false, + TestCases: nil, + Timeout: time.Minute, + }, + Endpoints: []string{mockedMEVNode.URL}, + LoadTest: true, + BeaconNodeEndpoint: endpoint3, + }, + expected: testCategoryResult{ + Targets: map[string][]testResult{ + mockedMEVNode.URL: { + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateBlock", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, }, @@ -62,7 +92,7 @@ func TestMEVTest(t *testing.T) { name: "connection refused", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -72,12 +102,16 @@ func TestMEVTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, }, @@ -87,7 +121,7 @@ func TestMEVTest(t *testing.T) { name: "timeout", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: 100 * time.Nanosecond, @@ -97,10 +131,10 @@ func TestMEVTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, }, @@ -110,7 +144,7 @@ func TestMEVTest(t *testing.T) { name: "quiet", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: true, TestCases: nil, Timeout: time.Minute, @@ -120,12 +154,16 @@ func TestMEVTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, }, @@ -135,7 +173,7 @@ func TestMEVTest(t *testing.T) { name: "unsupported test", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"notSupportedTest"}, Timeout: time.Minute, @@ -149,9 +187,9 @@ func TestMEVTest(t *testing.T) { name: "custom test cases", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, - TestCases: []string{"ping"}, + TestCases: []string{"Ping"}, Timeout: time.Minute, }, Endpoints: []string{endpoint1, endpoint2}, @@ -159,10 +197,10 @@ func TestMEVTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, }, }, }, @@ -172,7 +210,7 @@ func TestMEVTest(t *testing.T) { name: "write to file", config: testMEVConfig{ testConfig: testConfig{ - OutputToml: "./write-to-file-test.toml.tmp", + OutputJSON: "./write-to-file-test.json.tmp", Quiet: false, TestCases: nil, Timeout: time.Minute, @@ -182,12 +220,16 @@ func TestMEVTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ endpoint1: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port1))}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, endpoint2: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, - {Name: "pingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "PingMeasure", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: testResultError{errors.New(fmt.Sprintf(`%v: connect: connection refused`, port2))}}, + {Name: "CreateBlock", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "CreateMultipleBlocks", Verdict: testVerdictSkipped, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreC, @@ -205,7 +247,7 @@ func TestMEVTest(t *testing.T) { t.Run(test.name, func(t *testing.T) { var buf bytes.Buffer ctx := context.Background() - err := runTestMEV(ctx, &buf, test.config) + _, err := runTestMEV(ctx, &buf, test.config) if test.expectedErr != "" { require.ErrorContains(t, err, test.expectedErr) return @@ -214,7 +256,7 @@ func TestMEVTest(t *testing.T) { } defer func() { if test.cleanup != nil { - test.cleanup(t, test.config.OutputToml) + test.cleanup(t, test.config.OutputJSON) } }() @@ -224,8 +266,8 @@ func TestMEVTest(t *testing.T) { testWriteOut(t, test.expected, buf) } - if test.config.OutputToml != "" { - testWriteFile(t, test.expected, test.config.OutputToml) + if test.config.OutputJSON != "" { + testWriteFile(t, test.expected, test.config.OutputJSON) } }) } @@ -256,15 +298,22 @@ func TestMEVTestFlags(t *testing.T) { expectedErr: "required flag(s) \"endpoints\" not set", }, { - name: "no output toml on quiet", + name: "no output json on quiet", args: []string{"mev", "--endpoints=\"test.endpoint\"", "--quiet"}, - expectedErr: "on --quiet, an --output-toml is required", + expectedErr: "on --quiet, an --output-json is required", + }, + { + name: "no beacon node endpoint flag on load test", + args: []string{"mev", "--endpoints=\"test.endpoint\"", "--load-test"}, + expectedErr: "beacon-node-endpoint should be specified when load-test is", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cmd := newAlphaCmd(newTestMEVCmd(func(context.Context, io.Writer, testMEVConfig) error { return nil })) + cmd := newAlphaCmd(newTestMEVCmd(func(context.Context, io.Writer, testMEVConfig) (testCategoryResult, error) { + return testCategoryResult{}, nil + })) cmd.SetArgs(test.args) err := cmd.Execute() if test.expectedErr != "" { diff --git a/cmd/testpeers.go b/cmd/testpeers.go index 8a771ef70..3843cfdd8 100644 --- a/cmd/testpeers.go +++ b/cmd/testpeers.go @@ -6,13 +6,14 @@ import ( "context" "crypto/sha256" "encoding/hex" + "encoding/json" "fmt" "io" "math" "math/rand" "net" "net/http" - "net/http/httptrace" + "os" "slices" "strings" "sync" @@ -24,26 +25,28 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/protocol/ping" "github.com/spf13/cobra" - "github.com/spf13/pflag" "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" "github.com/obolnetwork/charon/app/errors" "github.com/obolnetwork/charon/app/log" "github.com/obolnetwork/charon/app/z" + "github.com/obolnetwork/charon/cluster" "github.com/obolnetwork/charon/eth2util/enr" "github.com/obolnetwork/charon/p2p" ) type testPeersConfig struct { testConfig - ENRs []string - P2P p2p.Config - Log log.Config - DataDir string - KeepAlive time.Duration - LoadTestDuration time.Duration - DirectConnectionTimeout time.Duration + ENRs []string + P2P p2p.Config + Log log.Config + DataDir string + KeepAlive time.Duration + LoadTestDuration time.Duration + DirectConnectionTimeout time.Duration + ClusterLockFilePath string + ClusterDefinitionFilePath string } type ( @@ -61,7 +64,7 @@ const ( thresholdRelayMeasurePoor = 240 * time.Millisecond ) -func newTestPeersCmd(runFunc func(context.Context, io.Writer, testPeersConfig) error) *cobra.Command { +func newTestPeersCmd(runFunc func(context.Context, io.Writer, testPeersConfig) (testCategoryResult, error)) *cobra.Command { var config testPeersConfig cmd := &cobra.Command{ @@ -73,173 +76,79 @@ func newTestPeersCmd(runFunc func(context.Context, io.Writer, testPeersConfig) e return mustOutputToFileOnQuiet(cmd) }, RunE: func(cmd *cobra.Command, _ []string) error { - return runFunc(cmd.Context(), cmd.OutOrStdout(), config) + _, err := runFunc(cmd.Context(), cmd.OutOrStdout(), config) + return err }, } bindTestFlags(cmd, &config.testConfig) - bindTestPeersFlags(cmd, &config) + bindTestPeersFlags(cmd, &config, "") bindP2PFlags(cmd, &config.P2P) bindDataDirFlag(cmd.Flags(), &config.DataDir) bindTestLogFlags(cmd.Flags(), &config.Log) - return cmd -} + wrapPreRunE(cmd, func(cmd *cobra.Command, _ []string) error { + const ( + enrs = "enrs" + clusterLockFilePath = "cluster-lock-file-path" + clusterDefinitionFilePath = "cluster-definition-file-path" + ) + enrsValue := cmd.Flags().Lookup(enrs).Value.String() + clusterLockPathValue := cmd.Flags().Lookup(clusterLockFilePath).Value.String() + clusterDefinitionPathValue := cmd.Flags().Lookup(clusterDefinitionFilePath).Value.String() + + if enrsValue == "[]" && clusterLockPathValue == "" && clusterDefinitionPathValue == "" { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("--%v, --%v or --%v must be specified.", enrs, clusterLockFilePath, clusterDefinitionFilePath)) + } + + if (enrsValue != "[]" && clusterLockPathValue != "") || + (enrsValue != "[]" && clusterDefinitionPathValue != "") || + (clusterLockPathValue != "" && clusterDefinitionPathValue != "") { + //nolint:revive // we use our own version of the errors package. + return errors.New(fmt.Sprintf("Only one of --%v, --%v or --%v should be specified.", enrs, clusterLockFilePath, clusterDefinitionFilePath)) + } + + return nil + }) -func bindTestPeersFlags(cmd *cobra.Command, config *testPeersConfig) { - const enrs = "enrs" - cmd.Flags().StringSliceVar(&config.ENRs, enrs, nil, "[REQUIRED] Comma-separated list of each peer ENR address.") - cmd.Flags().DurationVar(&config.KeepAlive, "keep-alive", 30*time.Minute, "Time to keep TCP node alive after test completion, so connection is open for other peers to test on their end.") - cmd.Flags().DurationVar(&config.LoadTestDuration, "load-test-duration", 30*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") - cmd.Flags().DurationVar(&config.DirectConnectionTimeout, "direct-connection-timeout", 2*time.Minute, "Time to keep trying to establish direct connection to peer.") - mustMarkFlagRequired(cmd, enrs) + return cmd } -func bindTestLogFlags(flags *pflag.FlagSet, config *log.Config) { - flags.StringVar(&config.Format, "log-format", "console", "Log format; console, logfmt or json") - flags.StringVar(&config.Level, "log-level", "info", "Log level; debug, info, warn or error") - flags.StringVar(&config.Color, "log-color", "auto", "Log color; auto, force, disable.") - flags.StringVar(&config.LogOutputPath, "log-output-path", "", "Path in which to write on-disk logs.") +func bindTestPeersFlags(cmd *cobra.Command, config *testPeersConfig, flagsPrefix string) { + cmd.Flags().StringSliceVar(&config.ENRs, flagsPrefix+"enrs", nil, "[REQUIRED] Comma-separated list of each peer ENR address.") + cmd.Flags().DurationVar(&config.KeepAlive, flagsPrefix+"keep-alive", 30*time.Minute, "Time to keep TCP node alive after test completion, so connection is open for other peers to test on their end.") + cmd.Flags().DurationVar(&config.LoadTestDuration, flagsPrefix+"load-test-duration", 30*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") + cmd.Flags().DurationVar(&config.DirectConnectionTimeout, flagsPrefix+"direct-connection-timeout", 2*time.Minute, "Time to keep trying to establish direct connection to peer.") + cmd.Flags().StringVar(&config.ClusterLockFilePath, flagsPrefix+"cluster-lock-file-path", "", "Path to cluster lock file, used to fetch peers' ENR addresses.") + cmd.Flags().StringVar(&config.ClusterDefinitionFilePath, flagsPrefix+"cluster-definition-file-path", "", "Path to cluster definition file, used to fetch peers' ENR addresses.") } func supportedPeerTestCases() map[testCaseName]testCasePeer { return map[testCaseName]testCasePeer{ - {name: "ping", order: 1}: peerPingTest, - {name: "pingMeasure", order: 2}: peerPingMeasureTest, - {name: "pingLoad", order: 3}: peerPingLoadTest, - {name: "directConn", order: 4}: peerDirectConnTest, + {name: "Ping", order: 1}: peerPingTest, + {name: "PingMeasure", order: 2}: peerPingMeasureTest, + {name: "PingLoad", order: 3}: peerPingLoadTest, + {name: "DirectConn", order: 4}: peerDirectConnTest, } } func supportedRelayTestCases() map[testCaseName]testCaseRelay { return map[testCaseName]testCaseRelay{ - {name: "pingRelay", order: 1}: relayPingTest, - {name: "pingMeasureRelay", order: 2}: relayPingMeasureTest, + {name: "PingRelay", order: 1}: relayPingTest, + {name: "PingMeasureRelay", order: 2}: relayPingMeasureTest, } } func supportedSelfTestCases() map[testCaseName]testCasePeerSelf { return map[testCaseName]testCasePeerSelf{ - {name: "libp2pTCPPortOpenTest", order: 1}: libp2pTCPPortOpenTest, - } -} - -func startTCPNode(ctx context.Context, conf testPeersConfig) (host.Host, func(), error) { - var p2pPeers []p2p.Peer - for i, enrString := range conf.ENRs { - enrRecord, err := enr.Parse(enrString) - if err != nil { - return nil, nil, errors.Wrap(err, "decode enr", z.Str("enr", enrString)) - } - - p2pPeer, err := p2p.NewPeerFromENR(enrRecord, i) - if err != nil { - return nil, nil, err - } - - p2pPeers = append(p2pPeers, p2pPeer) - } - - p2pPrivKey, err := p2p.LoadPrivKey(conf.DataDir) - if err != nil { - return nil, nil, err - } - - meENR, err := enr.New(p2pPrivKey) - if err != nil { - return nil, nil, err - } - - mePeer, err := p2p.NewPeerFromENR(meENR, len(conf.ENRs)) - if err != nil { - return nil, nil, err - } - - log.Info(ctx, "Self p2p name resolved", z.Any("name", mePeer.Name)) - - p2pPeers = append(p2pPeers, mePeer) - - allENRs := conf.ENRs - allENRs = append(allENRs, meENR.String()) - slices.Sort(allENRs) - allENRsString := strings.Join(allENRs, ",") - allENRsHash := sha256.Sum256([]byte(allENRsString)) - - return setupP2P(ctx, p2pPrivKey, conf.P2P, p2pPeers, allENRsHash[:]) -} - -func setupP2P(ctx context.Context, privKey *k1.PrivateKey, conf p2p.Config, peers []p2p.Peer, enrsHash []byte) (host.Host, func(), error) { - var peerIDs []peer.ID - for _, peer := range peers { - peerIDs = append(peerIDs, peer.ID) - } - - if err := p2p.VerifyP2PKey(peers, privKey); err != nil { - return nil, nil, err - } - - relays, err := p2p.NewRelays(ctx, conf.Relays, hex.EncodeToString(enrsHash)) - if err != nil { - return nil, nil, err - } - - connGater, err := p2p.NewConnGater(peerIDs, relays) - if err != nil { - return nil, nil, err - } - - tcpNode, err := p2p.NewTCPNode(ctx, conf, privKey, connGater, false) - if err != nil { - return nil, nil, err - } - - p2p.RegisterConnectionLogger(ctx, tcpNode, peerIDs) - - for _, relay := range relays { - go p2p.NewRelayReserver(tcpNode, relay)(ctx) - } - - go p2p.NewRelayRouter(tcpNode, peerIDs, relays)(ctx) - - return tcpNode, func() { - err := tcpNode.Close() - if err != nil && !errors.Is(err, context.Canceled) { - log.Error(ctx, "Close TCP node", err) - } - }, nil -} - -func pingPeerOnce(ctx context.Context, tcpNode host.Host, peer p2p.Peer) (ping.Result, error) { - pingSvc := ping.NewPingService(tcpNode) - pingCtx, cancel := context.WithCancel(ctx) - defer cancel() - pingChan := pingSvc.Ping(pingCtx, peer.ID) - result, ok := <-pingChan - if !ok { - return ping.Result{}, errors.New("ping channel closed") + {name: "Libp2pTCPPortOpen", order: 1}: libp2pTCPPortOpenTest, } - - return result, nil } -func pingPeerContinuously(ctx context.Context, tcpNode host.Host, peer p2p.Peer, resCh chan<- ping.Result) { - for { - r, err := pingPeerOnce(ctx, tcpNode, peer) - if err != nil { - return - } - - select { - case <-ctx.Done(): - return - case resCh <- r: - awaitTime := rand.Intn(100) //nolint:gosec // weak generator is not an issue here - sleepWithContext(ctx, time.Duration(awaitTime)*time.Millisecond) - } - } -} +func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) (res testCategoryResult, err error) { + log.Info(ctx, "Starting charon peers and relays test") -func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error { relayTestCases := supportedRelayTestCases() queuedTestsRelay := filterTests(maps.Keys(relayTestCases), conf.testConfig) sortTests(queuedTestsRelay) @@ -253,7 +162,8 @@ func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error sortTests(queuedTestsSelf) if len(queuedTestsPeer) == 0 && len(queuedTestsSelf) == 0 { - return errors.New("test case not supported") + err = errors.New("test case not supported") + return res, err } timeoutCtx, cancel := context.WithTimeout(ctx, conf.Timeout) @@ -264,7 +174,7 @@ func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error tcpNode, shutdown, err := startTCPNode(ctx, conf) if err != nil { - return err + return res, err } defer shutdown() @@ -292,7 +202,7 @@ func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error err = group.Wait() execTime := Duration{time.Since(startTime)} if err != nil { - return errors.Wrap(err, "peers test errgroup") + return res, errors.Wrap(err, "peers test errgroup") } close(testResultsChan) <-doneReading @@ -306,7 +216,7 @@ func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error } } - res := testCategoryResult{ + res = testCategoryResult{ CategoryName: peersTestCategory, Targets: testResults, ExecutionTime: execTime, @@ -316,103 +226,24 @@ func runTestPeers(ctx context.Context, w io.Writer, conf testPeersConfig) error if !conf.Quiet { err = writeResultToWriter(res, w) if err != nil { - return err + return res, err } } - if conf.OutputToml != "" { - err = writeResultToFile(res, conf.OutputToml) + if conf.OutputJSON != "" { + err = writeResultToFile(res, conf.OutputJSON) if err != nil { - return err + return res, err } } log.Info(ctx, "Keeping TCP node alive for peers until keep-alive time is reached...") blockAndWait(ctx, conf.KeepAlive) - return nil -} - -func testAllRelays(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseRelay, conf testPeersConfig, allRelaysResCh chan map[string][]testResult) error { - // run tests for all relays - allRelayRes := make(map[string][]testResult) - singleRelayResCh := make(chan map[string][]testResult) - group, _ := errgroup.WithContext(ctx) - - for _, relay := range conf.P2P.Relays { - group.Go(func() error { - return testSingleRelay(ctx, queuedTestCases, allTestCases, conf, relay, singleRelayResCh) - }) - } - - doneReading := make(chan bool) - go func() { - for singleRelayRes := range singleRelayResCh { - maps.Copy(allRelayRes, singleRelayRes) - } - doneReading <- true - }() - - err := group.Wait() - if err != nil { - return errors.Wrap(err, "relays test errgroup") - } - close(singleRelayResCh) - <-doneReading - - allRelaysResCh <- allRelayRes - - return nil -} - -func testSingleRelay(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseRelay, conf testPeersConfig, target string, allTestResCh chan map[string][]testResult) error { - singleTestResCh := make(chan testResult) - allTestRes := []testResult{} - relayName := fmt.Sprintf("relay %v", target) - if len(queuedTestCases) == 0 { - allTestResCh <- map[string][]testResult{relayName: allTestRes} - return nil - } - - // run all relay tests for a relay, pushing each completed test to the channel until all are complete or timeout occurs - go runRelayTest(ctx, queuedTestCases, allTestCases, conf, target, singleTestResCh) - testCounter := 0 - finished := false - for !finished { - var testName string - select { - case <-ctx.Done(): - testName = queuedTestCases[testCounter].name - allTestRes = append(allTestRes, testResult{Name: testName, Verdict: testVerdictFail, Error: errTimeoutInterrupted}) - finished = true - case result, ok := <-singleTestResCh: - if !ok { - finished = true - continue - } - testName = queuedTestCases[testCounter].name - testCounter++ - result.Name = testName - allTestRes = append(allTestRes, result) - } - } - - allTestResCh <- map[string][]testResult{relayName: allTestRes} - - return nil + return res, nil } -func runRelayTest(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseRelay, conf testPeersConfig, target string, testResCh chan testResult) { - defer close(testResCh) - for _, t := range queuedTestCases { - select { - case <-ctx.Done(): - return - default: - testResCh <- allTestCases[t](ctx, &conf, target) - } - } -} +// charon peers tests func testAllPeers(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeer, conf testPeersConfig, tcpNode host.Host, allPeersResCh chan map[string][]testResult) error { // run tests for all peer nodes @@ -420,7 +251,11 @@ func testAllPeers(ctx context.Context, queuedTestCases []testCaseName, allTestCa singlePeerResCh := make(chan map[string][]testResult) group, _ := errgroup.WithContext(ctx) - for _, enr := range conf.ENRs { + enrs, err := fetchENRs(conf) + if err != nil { + return err + } + for _, enr := range enrs { currENR := enr // TODO: can be removed after go1.22 version bump group.Go(func() error { return testSinglePeer(ctx, queuedTestCases, allTestCases, conf, tcpNode, currENR, singlePeerResCh) @@ -435,7 +270,7 @@ func testAllPeers(ctx context.Context, queuedTestCases []testCaseName, allTestCa doneReading <- true }() - err := group.Wait() + err = group.Wait() if err != nil { return errors.Wrap(err, "peers test errgroup") } @@ -459,7 +294,8 @@ func testSinglePeer(ctx context.Context, queuedTestCases []testCaseName, allTest return err } - nameENR := fmt.Sprintf("peer %v %v", peerTarget.Name, target) + formatENR := target[:13] + "..." + target[len(target)-4:] // enr:- + first 8 chars + ... + last 4 chars + nameENR := fmt.Sprintf("peer %v %v", peerTarget.Name, formatENR) if len(queuedTestCases) == 0 { allTestResCh <- map[string][]testResult{nameENR: allTestRes} @@ -484,9 +320,7 @@ func testSinglePeer(ctx context.Context, queuedTestCases []testCaseName, allTest finished = true continue } - testName = queuedTestCases[testCounter].name testCounter++ - result.Name = testName allTestRes = append(allTestRes, result) } } @@ -509,53 +343,6 @@ func runPeerTest(ctx context.Context, queuedTestCases []testCaseName, allTestCas } } -func testSelf(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeerSelf, conf testPeersConfig, allTestResCh chan map[string][]testResult) error { - singleTestResCh := make(chan testResult) - allTestRes := []testResult{} - if len(queuedTestCases) == 0 { - allTestResCh <- map[string][]testResult{"self": allTestRes} - return nil - } - go runSelfTest(ctx, queuedTestCases, allTestCases, conf, singleTestResCh) - - testCounter := 0 - finished := false - for !finished { - var testName string - select { - case <-ctx.Done(): - testName = queuedTestCases[testCounter].name - allTestRes = append(allTestRes, testResult{Name: testName, Verdict: testVerdictFail, Error: errTimeoutInterrupted}) - finished = true - case result, ok := <-singleTestResCh: - if !ok { - finished = true - continue - } - testName = queuedTestCases[testCounter].name - testCounter++ - result.Name = testName - allTestRes = append(allTestRes, result) - } - } - - allTestResCh <- map[string][]testResult{"self": allTestRes} - - return nil -} - -func runSelfTest(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeerSelf, conf testPeersConfig, ch chan testResult) { - defer close(ch) - for _, t := range queuedTestCases { - select { - case <-ctx.Done(): - return - default: - ch <- allTestCases[t](ctx, &conf) - } - } -} - func peerPingTest(ctx context.Context, _ *testPeersConfig, tcpNode host.Host, peer p2p.Peer) testResult { testRes := testResult{Name: "Ping"} @@ -627,7 +414,7 @@ func peerPingLoadTest(ctx context.Context, conf *testPeersConfig, tcpNode host.H ) testRes := testResult{Name: "PingLoad"} - testResCh := make(chan ping.Result, math.MaxInt16) + testResCh := make(chan time.Duration, math.MaxInt16) pingCtx, cancel := context.WithTimeout(ctx, conf.LoadTestDuration) defer cancel() ticker := time.NewTicker(time.Second) @@ -649,48 +436,11 @@ func peerPingLoadTest(ctx context.Context, conf *testPeersConfig, tcpNode host.H close(testResCh) log.Info(ctx, "Ping load tests finished", z.Any("target", peer.Name)) - highestRTT := time.Duration(0) - for val := range testResCh { - if val.RTT > highestRTT { - highestRTT = val.RTT - } - } - if highestRTT > thresholdPeersLoadPoor { - testRes.Verdict = testVerdictPoor - } else if highestRTT > thresholdPeersLoadAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood - } - testRes.Measurement = Duration{highestRTT}.String() + testRes = evaluateHighestRTTScores(testResCh, testRes, thresholdPeersLoadAvg, thresholdPeersLoadPoor) return testRes } -func dialLibp2pTCPIP(ctx context.Context, address string) error { - d := net.Dialer{Timeout: time.Second} - conn, err := d.DialContext(ctx, "tcp", address) - if err != nil { - return errors.Wrap(err, "net dial") - } - defer conn.Close() - buf := new(strings.Builder) - _, err = io.CopyN(buf, conn, 19) - if err != nil { - return errors.Wrap(err, "io copy") - } - if !strings.Contains(buf.String(), "/multistream/1.0.0") { - return errors.New("multistream not found", z.Any("found", buf.String()), z.Any("address", address)) - } - - err = conn.Close() - if err != nil { - return errors.Wrap(err, "close conn") - } - - return nil -} - func peerDirectConnTest(ctx context.Context, conf *testPeersConfig, tcpNode host.Host, p2pPeer p2p.Peer) testResult { testRes := testResult{Name: "DirectConn"} @@ -721,6 +471,55 @@ func peerDirectConnTest(ctx context.Context, conf *testPeersConfig, tcpNode host return testRes } +// self tests + +func testSelf(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeerSelf, conf testPeersConfig, allTestResCh chan map[string][]testResult) error { + singleTestResCh := make(chan testResult) + allTestRes := []testResult{} + if len(queuedTestCases) == 0 { + allTestResCh <- map[string][]testResult{"self": allTestRes} + return nil + } + go runSelfTest(ctx, queuedTestCases, allTestCases, conf, singleTestResCh) + + testCounter := 0 + finished := false + for !finished { + var testName string + select { + case <-ctx.Done(): + testName = queuedTestCases[testCounter].name + allTestRes = append(allTestRes, testResult{Name: testName, Verdict: testVerdictFail, Error: errTimeoutInterrupted}) + finished = true + case result, ok := <-singleTestResCh: + if !ok { + finished = true + continue + } + testName = queuedTestCases[testCounter].name + testCounter++ + result.Name = testName + allTestRes = append(allTestRes, result) + } + } + + allTestResCh <- map[string][]testResult{"self": allTestRes} + + return nil +} + +func runSelfTest(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCasePeerSelf, conf testPeersConfig, ch chan testResult) { + defer close(ch) + for _, t := range queuedTestCases { + select { + case <-ctx.Done(): + return + default: + ch <- allTestCases[t](ctx, &conf) + } + } +} + func libp2pTCPPortOpenTest(ctx context.Context, cfg *testPeersConfig) testResult { testRes := testResult{Name: "Libp2pTCPPortOpen"} @@ -740,6 +539,89 @@ func libp2pTCPPortOpenTest(ctx context.Context, cfg *testPeersConfig) testResult return testRes } +// charon relays tests + +func testAllRelays(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseRelay, conf testPeersConfig, allRelaysResCh chan map[string][]testResult) error { + // run tests for all relays + allRelayRes := make(map[string][]testResult) + singleRelayResCh := make(chan map[string][]testResult) + group, _ := errgroup.WithContext(ctx) + + for _, relay := range conf.P2P.Relays { + group.Go(func() error { + return testSingleRelay(ctx, queuedTestCases, allTestCases, conf, relay, singleRelayResCh) + }) + } + + doneReading := make(chan bool) + go func() { + for singleRelayRes := range singleRelayResCh { + maps.Copy(allRelayRes, singleRelayRes) + } + doneReading <- true + }() + + err := group.Wait() + if err != nil { + return errors.Wrap(err, "relays test errgroup") + } + close(singleRelayResCh) + <-doneReading + + allRelaysResCh <- allRelayRes + + return nil +} + +func testSingleRelay(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseRelay, conf testPeersConfig, target string, allTestResCh chan map[string][]testResult) error { + singleTestResCh := make(chan testResult) + allTestRes := []testResult{} + relayName := fmt.Sprintf("relay %v", target) + if len(queuedTestCases) == 0 { + allTestResCh <- map[string][]testResult{relayName: allTestRes} + return nil + } + + // run all relay tests for a relay, pushing each completed test to the channel until all are complete or timeout occurs + go runRelayTest(ctx, queuedTestCases, allTestCases, conf, target, singleTestResCh) + testCounter := 0 + finished := false + for !finished { + var testName string + select { + case <-ctx.Done(): + testName = queuedTestCases[testCounter].name + allTestRes = append(allTestRes, testResult{Name: testName, Verdict: testVerdictFail, Error: errTimeoutInterrupted}) + finished = true + case result, ok := <-singleTestResCh: + if !ok { + finished = true + continue + } + testName = queuedTestCases[testCounter].name + testCounter++ + result.Name = testName + allTestRes = append(allTestRes, result) + } + } + + allTestResCh <- map[string][]testResult{relayName: allTestRes} + + return nil +} + +func runRelayTest(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]testCaseRelay, conf testPeersConfig, target string, testResCh chan testResult) { + defer close(testResCh) + for _, t := range queuedTestCases { + select { + case <-ctx.Done(): + return + default: + testResCh <- allTestCases[t](ctx, &conf, target) + } + } +} + func relayPingTest(ctx context.Context, _ *testPeersConfig, target string) testResult { testRes := testResult{Name: "PingRelay"} @@ -755,7 +637,7 @@ func relayPingTest(ctx context.Context, _ *testPeersConfig, target string) testR defer resp.Body.Close() if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + return failedTestResult(testRes, errors.New(httpStatusError(resp.StatusCode))) } testRes.Verdict = testVerdictOk @@ -766,39 +648,227 @@ func relayPingTest(ctx context.Context, _ *testPeersConfig, target string) testR func relayPingMeasureTest(ctx context.Context, _ *testPeersConfig, target string) testResult { testRes := testResult{Name: "PingMeasureRelay"} - var start time.Time - var firstByte time.Duration + rtt, err := requestRTT(ctx, target, http.MethodGet, nil, 200) + if err != nil { + return failedTestResult(testRes, err) + } + + testRes = evaluateRTT(rtt, testRes, thresholdRelayMeasureAvg, thresholdRelayMeasurePoor) - trace := &httptrace.ClientTrace{ - GotFirstResponseByte: func() { - firstByte = time.Since(start) - }, + return testRes +} + +// helper functions + +func fetchPeersFromDefinition(path string) ([]string, error) { + f, err := os.ReadFile(path) + if err != nil { + return nil, errors.Wrap(err, "read definition file", z.Str("path", path)) } - start = time.Now() - req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, trace), http.MethodGet, target, nil) + var def cluster.Definition + err = json.Unmarshal(f, &def) if err != nil { - return failedTestResult(testRes, err) + return nil, errors.Wrap(err, "unmarshal definition json", z.Str("path", path)) } - resp, err := http.DefaultTransport.RoundTrip(req) + var enrs []string + for _, o := range def.Operators { + enrs = append(enrs, o.ENR) + } + + if len(enrs) == 0 { + return nil, errors.New("no peers found in lock", z.Str("path", path)) + } + + return enrs, nil +} + +func fetchPeersFromLock(path string) ([]string, error) { + f, err := os.ReadFile(path) if err != nil { - return failedTestResult(testRes, err) + return nil, errors.Wrap(err, "read lock file", z.Str("path", path)) } - defer resp.Body.Close() - if resp.StatusCode > 399 { - return failedTestResult(testRes, errors.New("status code %v", z.Int("status_code", resp.StatusCode))) + var lock cluster.Lock + err = json.Unmarshal(f, &lock) + if err != nil { + return nil, errors.Wrap(err, "unmarshal lock json", z.Str("path", path)) } - if firstByte > thresholdRelayMeasurePoor { - testRes.Verdict = testVerdictPoor - } else if firstByte > thresholdRelayMeasureAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood + var enrs []string + for _, o := range lock.Operators { + enrs = append(enrs, o.ENR) } - testRes.Measurement = Duration{firstByte}.String() - return testRes + if len(enrs) == 0 { + return nil, errors.New("no peers found in lock", z.Str("path", path)) + } + + return enrs, nil +} + +func fetchENRs(conf testPeersConfig) ([]string, error) { + var enrs []string + var err error + switch { + case len(conf.ENRs) != 0: + enrs = conf.ENRs + case conf.ClusterDefinitionFilePath != "": + enrs, err = fetchPeersFromDefinition(conf.ClusterDefinitionFilePath) + if err != nil { + return nil, err + } + case conf.ClusterLockFilePath != "": + enrs, err = fetchPeersFromLock(conf.ClusterLockFilePath) + if err != nil { + return nil, err + } + } + + return enrs, nil +} + +func startTCPNode(ctx context.Context, conf testPeersConfig) (host.Host, func(), error) { + enrs, err := fetchENRs(conf) + if err != nil { + return nil, nil, err + } + + var peers []p2p.Peer + for i, enrString := range enrs { + enrRecord, err := enr.Parse(enrString) + if err != nil { + return nil, nil, errors.Wrap(err, "decode enr", z.Str("enr", enrString)) + } + + p2pPeer, err := p2p.NewPeerFromENR(enrRecord, i) + if err != nil { + return nil, nil, err + } + + peers = append(peers, p2pPeer) + } + + p2pPrivKey, err := p2p.LoadPrivKey(conf.DataDir) + if err != nil { + return nil, nil, err + } + + meENR, err := enr.New(p2pPrivKey) + if err != nil { + return nil, nil, err + } + + mePeer, err := p2p.NewPeerFromENR(meENR, len(enrs)) + if err != nil { + return nil, nil, err + } + + log.Info(ctx, "Self p2p name resolved", z.Any("name", mePeer.Name)) + + peers = append(peers, mePeer) + + allENRs := enrs + allENRs = append(allENRs, meENR.String()) + slices.Sort(allENRs) + allENRsString := strings.Join(allENRs, ",") + allENRsHash := sha256.Sum256([]byte(allENRsString)) + + return setupP2P(ctx, p2pPrivKey, conf.P2P, peers, allENRsHash[:]) +} + +func setupP2P(ctx context.Context, privKey *k1.PrivateKey, conf p2p.Config, peers []p2p.Peer, enrsHash []byte) (host.Host, func(), error) { + var peerIDs []peer.ID + for _, peer := range peers { + peerIDs = append(peerIDs, peer.ID) + } + + if err := p2p.VerifyP2PKey(peers, privKey); err != nil { + return nil, nil, err + } + + relays, err := p2p.NewRelays(ctx, conf.Relays, hex.EncodeToString(enrsHash)) + if err != nil { + return nil, nil, err + } + + connGater, err := p2p.NewConnGater(peerIDs, relays) + if err != nil { + return nil, nil, err + } + + tcpNode, err := p2p.NewTCPNode(ctx, conf, privKey, connGater, false) + if err != nil { + return nil, nil, err + } + + p2p.RegisterConnectionLogger(ctx, tcpNode, peerIDs) + + for _, relay := range relays { + go p2p.NewRelayReserver(tcpNode, relay)(ctx) + } + + go p2p.NewRelayRouter(tcpNode, peerIDs, relays)(ctx) + + return tcpNode, func() { + err := tcpNode.Close() + if err != nil && !errors.Is(err, context.Canceled) { + log.Error(ctx, "Close TCP node", err) + } + }, nil +} + +func pingPeerOnce(ctx context.Context, tcpNode host.Host, peer p2p.Peer) (ping.Result, error) { + pingSvc := ping.NewPingService(tcpNode) + pingCtx, cancel := context.WithCancel(ctx) + defer cancel() + pingChan := pingSvc.Ping(pingCtx, peer.ID) + result, ok := <-pingChan + if !ok { + return ping.Result{}, errors.New("ping channel closed") + } + + return result, nil +} + +func pingPeerContinuously(ctx context.Context, tcpNode host.Host, peer p2p.Peer, resCh chan<- time.Duration) { + for { + r, err := pingPeerOnce(ctx, tcpNode, peer) + if err != nil { + return + } + + select { + case <-ctx.Done(): + return + case resCh <- r.RTT: + awaitTime := rand.Intn(100) //nolint:gosec // weak generator is not an issue here + sleepWithContext(ctx, time.Duration(awaitTime)*time.Millisecond) + } + } +} + +func dialLibp2pTCPIP(ctx context.Context, address string) error { + d := net.Dialer{Timeout: time.Second} + conn, err := d.DialContext(ctx, "tcp", address) + if err != nil { + return errors.Wrap(err, "net dial") + } + defer conn.Close() + buf := new(strings.Builder) + _, err = io.CopyN(buf, conn, 19) + if err != nil { + return errors.Wrap(err, "io copy") + } + if !strings.Contains(buf.String(), "/multistream/1.0.0") { + return errors.New("multistream not found", z.Any("found", buf.String()), z.Any("address", address)) + } + + err = conn.Close() + if err != nil { + return errors.Wrap(err, "close conn") + } + + return nil } diff --git a/cmd/testpeers_internal_test.go b/cmd/testpeers_internal_test.go index 1d9fb8e4a..9622c2a7d 100644 --- a/cmd/testpeers_internal_test.go +++ b/cmd/testpeers_internal_test.go @@ -6,6 +6,7 @@ import ( "bytes" "context" "encoding/base64" + "encoding/json" "fmt" "io" "net/http" @@ -17,7 +18,6 @@ import ( k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/libp2p/go-libp2p/core/peer" - "github.com/pelletier/go-toml/v2" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" @@ -50,7 +50,7 @@ func TestPeersTest(t *testing.T) { name: "default scenario", config: testPeersConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: nil, Timeout: 10 * time.Second, @@ -72,29 +72,29 @@ func TestPeersTest(t *testing.T) { CategoryName: peersTestCategory, Targets: map[string][]testResult{ "self": { - {Name: "libp2pTCPPortOpenTest", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Libp2pTCPPortOpen", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, fmt.Sprintf("relay %v", relayAddr): { - {Name: "pingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, - "peer inexpensive-farm enr:-HW4QBHlcyD3fYWUMADiOv4OxODaL5wJG0a7P7d_ltu4VZe1MibZ1N-twFaoaq0BoCtXcY71etxLJGeEZT5p3XCO6GOAgmlkgnY0iXNlY3AyNTZrMaEDI2HRUlVBag__njkOWEEQRLlC9ylIVCrIXOuNBSlrx6o": { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "directConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + "peer inexpensive-farm enr:-HW4QBHlc...rx6o": { + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "DirectConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, - "peer anxious-pencil enr:-HW4QDwUF804f4WhUjwcp4JJ-PrRH0glQZv8s2cVHlBRPJ3SYcYO-dvJGsKhztffrski5eujJkl8oAc983MZy6-PqF2AgmlkgnY0iXNlY3AyNTZrMaECPEPryjkmUBnQFyjmMw9rl7DVtKL0243nN5iepqsvKDw": { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "directConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + "peer anxious-pencil enr:-HW4QDwUF...vKDw": { + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "DirectConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, - "peer important-pen enr:-HW4QPSBgUTag8oZs3zIsgWzlBUrSgT8pgZmFJa7HWwKXUcRLlISa68OJtp-JTzhUXsJ2vSGwKGACn0OTatWdJATxn-AgmlkgnY0iXNlY3AyNTZrMaECA3R_ffXLXCLJsfEwf6xeoAFgWnDIOdq8kS0Yqkhwbr0": { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "directConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + "peer important-pen enr:-HW4QPSBg...wbr0": { + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "DirectConn", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreC, @@ -118,7 +118,7 @@ func TestPeersTest(t *testing.T) { name: "quiet", config: testPeersConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: true, TestCases: nil, Timeout: 3 * time.Second, @@ -138,20 +138,20 @@ func TestPeersTest(t *testing.T) { CategoryName: peersTestCategory, Targets: map[string][]testResult{ "self": { - {Name: "libp2pTCPPortOpenTest", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Libp2pTCPPortOpen", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, fmt.Sprintf("relay %v", relayAddr): { - {Name: "pingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, - "peer inexpensive-farm enr:-HW4QBHlcyD3fYWUMADiOv4OxODaL5wJG0a7P7d_ltu4VZe1MibZ1N-twFaoaq0BoCtXcY71etxLJGeEZT5p3XCO6GOAgmlkgnY0iXNlY3AyNTZrMaEDI2HRUlVBag__njkOWEEQRLlC9ylIVCrIXOuNBSlrx6o": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + "peer inexpensive-farm enr:-HW4QBHlc...rx6o": { + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, - "peer anxious-pencil enr:-HW4QDwUF804f4WhUjwcp4JJ-PrRH0glQZv8s2cVHlBRPJ3SYcYO-dvJGsKhztffrski5eujJkl8oAc983MZy6-PqF2AgmlkgnY0iXNlY3AyNTZrMaECPEPryjkmUBnQFyjmMw9rl7DVtKL0243nN5iepqsvKDw": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + "peer anxious-pencil enr:-HW4QDwUF...vKDw": { + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, - "peer important-pen enr:-HW4QPSBgUTag8oZs3zIsgWzlBUrSgT8pgZmFJa7HWwKXUcRLlISa68OJtp-JTzhUXsJ2vSGwKGACn0OTatWdJATxn-AgmlkgnY0iXNlY3AyNTZrMaECA3R_ffXLXCLJsfEwf6xeoAFgWnDIOdq8kS0Yqkhwbr0": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + "peer important-pen enr:-HW4QPSBg...wbr0": { + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, Score: categoryScoreC, @@ -162,7 +162,7 @@ func TestPeersTest(t *testing.T) { name: "unsupported test", config: testPeersConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"notSupportedTest"}, Timeout: 200 * time.Millisecond, @@ -185,9 +185,9 @@ func TestPeersTest(t *testing.T) { name: "custom test cases", config: testPeersConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, - TestCases: []string{"ping"}, + TestCases: []string{"Ping"}, Timeout: 200 * time.Millisecond, }, ENRs: []string{ @@ -204,14 +204,14 @@ func TestPeersTest(t *testing.T) { expected: testCategoryResult{ CategoryName: peersTestCategory, Targets: map[string][]testResult{ - "peer inexpensive-farm enr:-HW4QBHlcyD3fYWUMADiOv4OxODaL5wJG0a7P7d_ltu4VZe1MibZ1N-twFaoaq0BoCtXcY71etxLJGeEZT5p3XCO6GOAgmlkgnY0iXNlY3AyNTZrMaEDI2HRUlVBag__njkOWEEQRLlC9ylIVCrIXOuNBSlrx6o": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + "peer inexpensive-farm enr:-HW4QBHlc...rx6o": { + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, - "peer anxious-pencil enr:-HW4QDwUF804f4WhUjwcp4JJ-PrRH0glQZv8s2cVHlBRPJ3SYcYO-dvJGsKhztffrski5eujJkl8oAc983MZy6-PqF2AgmlkgnY0iXNlY3AyNTZrMaECPEPryjkmUBnQFyjmMw9rl7DVtKL0243nN5iepqsvKDw": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + "peer anxious-pencil enr:-HW4QDwUF...vKDw": { + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, - "peer important-pen enr:-HW4QPSBgUTag8oZs3zIsgWzlBUrSgT8pgZmFJa7HWwKXUcRLlISa68OJtp-JTzhUXsJ2vSGwKGACn0OTatWdJATxn-AgmlkgnY0iXNlY3AyNTZrMaECA3R_ffXLXCLJsfEwf6xeoAFgWnDIOdq8kS0Yqkhwbr0": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + "peer important-pen enr:-HW4QPSBg...wbr0": { + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, Score: categoryScoreC, @@ -222,7 +222,7 @@ func TestPeersTest(t *testing.T) { name: "write to file", config: testPeersConfig{ testConfig: testConfig{ - OutputToml: "./write-to-file-test.toml.tmp", + OutputJSON: "./write-to-file-test.json.tmp", Quiet: false, Timeout: 3 * time.Second, }, @@ -241,20 +241,20 @@ func TestPeersTest(t *testing.T) { CategoryName: peersTestCategory, Targets: map[string][]testResult{ "self": { - {Name: "libp2pTCPPortOpenTest", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Libp2pTCPPortOpen", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, fmt.Sprintf("relay %v", relayAddr): { - {Name: "pingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingRelay", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasureRelay", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, - "peer inexpensive-farm enr:-HW4QBHlcyD3fYWUMADiOv4OxODaL5wJG0a7P7d_ltu4VZe1MibZ1N-twFaoaq0BoCtXcY71etxLJGeEZT5p3XCO6GOAgmlkgnY0iXNlY3AyNTZrMaEDI2HRUlVBag__njkOWEEQRLlC9ylIVCrIXOuNBSlrx6o": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + "peer inexpensive-farm enr:-HW4QBHlc...rx6o": { + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, - "peer anxious-pencil enr:-HW4QDwUF804f4WhUjwcp4JJ-PrRH0glQZv8s2cVHlBRPJ3SYcYO-dvJGsKhztffrski5eujJkl8oAc983MZy6-PqF2AgmlkgnY0iXNlY3AyNTZrMaECPEPryjkmUBnQFyjmMw9rl7DVtKL0243nN5iepqsvKDw": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + "peer anxious-pencil enr:-HW4QDwUF...vKDw": { + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, - "peer important-pen enr:-HW4QPSBgUTag8oZs3zIsgWzlBUrSgT8pgZmFJa7HWwKXUcRLlISa68OJtp-JTzhUXsJ2vSGwKGACn0OTatWdJATxn-AgmlkgnY0iXNlY3AyNTZrMaECA3R_ffXLXCLJsfEwf6xeoAFgWnDIOdq8kS0Yqkhwbr0": { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + "peer important-pen enr:-HW4QPSBg...wbr0": { + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, Score: categoryScoreC, @@ -280,7 +280,7 @@ func TestPeersTest(t *testing.T) { } var buf bytes.Buffer - err = runTestPeers(ctx, &buf, conf) + _, err = runTestPeers(ctx, &buf, conf) if test.expectedErr != "" { require.ErrorContains(t, err, test.expectedErr) return @@ -289,7 +289,7 @@ func TestPeersTest(t *testing.T) { } defer func() { if test.cleanup != nil { - test.cleanup(t, conf.OutputToml) + test.cleanup(t, conf.OutputJSON) } }() @@ -299,8 +299,8 @@ func TestPeersTest(t *testing.T) { testWriteOut(t, test.expected, buf) } - if test.config.OutputToml != "" { - testWriteFile(t, test.expected, test.config.OutputToml) + if test.config.OutputJSON != "" { + testWriteFile(t, test.expected, test.config.OutputJSON) } }) } @@ -320,18 +320,20 @@ func TestPeersTestFlags(t *testing.T) { { name: "no enrs flag", args: []string{"peers"}, - expectedErr: "required flag(s) \"enrs\" not set", + expectedErr: "--enrs, --cluster-lock-file-path or --cluster-definition-file-path must be specified.", }, { - name: "no output toml on quiet", + name: "no output json on quiet", args: []string{"peers", "--enrs=\"test.endpoint\"", "--quiet"}, - expectedErr: "on --quiet, an --output-toml is required", + expectedErr: "on --quiet, an --output-json is required", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cmd := newAlphaCmd(newTestPeersCmd(func(context.Context, io.Writer, testPeersConfig) error { return nil })) + cmd := newAlphaCmd(newTestPeersCmd(func(context.Context, io.Writer, testPeersConfig) (testCategoryResult, error) { + return testCategoryResult{}, nil + })) cmd.SetArgs(test.args) err := cmd.Execute() if test.expectedErr != "" { @@ -379,18 +381,36 @@ func testWriteFile(t *testing.T, expectedRes testCategoryResult, path string) { t.Helper() file, err := os.ReadFile(path) require.NoError(t, err) - var res testCategoryResult - err = toml.Unmarshal(file, &res) + var res fileResult + err = json.Unmarshal(file, &res) require.NoError(t, err) - require.Equal(t, expectedRes.CategoryName, res.CategoryName) - require.Equal(t, expectedRes.Score, res.Score) - require.Equal(t, len(expectedRes.Targets), len(res.Targets)) - for targetName, testResults := range res.Targets { + var actualRes testCategoryResult + switch expectedRes.CategoryName { + case peersTestCategory: + actualRes = res.Peers + case beaconTestCategory: + actualRes = res.Beacon + case validatorTestCategory: + actualRes = res.Validator + case mevTestCategory: + actualRes = res.MEV + case infraTestCategory: + actualRes = res.Infra + default: + t.Error("unknown category") + } + + require.Equal(t, expectedRes.CategoryName, actualRes.CategoryName) + require.Equal(t, len(expectedRes.Targets), len(actualRes.Targets)) + checkFinalScore := true + for targetName, testResults := range actualRes.Targets { for idx, testRes := range testResults { // do not test verdicts based on measurements if expectedRes.Targets[targetName][idx].Verdict == testVerdictOk || expectedRes.Targets[targetName][idx].Verdict == testVerdictFail { require.Equal(t, expectedRes.Targets[targetName][idx].Verdict, testRes.Verdict) + } else { + checkFinalScore = false } require.Equal(t, expectedRes.Targets[targetName][idx].IsAcceptable, testRes.IsAcceptable) if expectedRes.Targets[targetName][idx].Error.error != nil { @@ -402,6 +422,10 @@ func testWriteFile(t *testing.T, expectedRes testCategoryResult, path string) { require.Equal(t, expectedRes.Targets[targetName][idx].Suggestion, testRes.Suggestion) } } + // check final score only if there are no tests based on actual measurement + if checkFinalScore { + require.Equal(t, expectedRes.Score, actualRes.Score) + } } func startPeer(t *testing.T, conf testPeersConfig, peerPrivKey *k1.PrivateKey) enr.Record { diff --git a/cmd/testvalidator.go b/cmd/testvalidator.go index d60a19b29..42c734223 100644 --- a/cmd/testvalidator.go +++ b/cmd/testvalidator.go @@ -32,7 +32,7 @@ const ( thresholdValidatorLoadPoor = 240 * time.Millisecond ) -func newTestValidatorCmd(runFunc func(context.Context, io.Writer, testValidatorConfig) error) *cobra.Command { +func newTestValidatorCmd(runFunc func(context.Context, io.Writer, testValidatorConfig) (testCategoryResult, error)) *cobra.Command { var config testValidatorConfig cmd := &cobra.Command{ @@ -44,34 +44,38 @@ func newTestValidatorCmd(runFunc func(context.Context, io.Writer, testValidatorC return mustOutputToFileOnQuiet(cmd) }, RunE: func(cmd *cobra.Command, _ []string) error { - return runFunc(cmd.Context(), cmd.OutOrStdout(), config) + _, err := runFunc(cmd.Context(), cmd.OutOrStdout(), config) + return err }, } bindTestFlags(cmd, &config.testConfig) - bindTestValidatorFlags(cmd, &config) + bindTestValidatorFlags(cmd, &config, "") return cmd } -func bindTestValidatorFlags(cmd *cobra.Command, config *testValidatorConfig) { - cmd.Flags().StringVar(&config.APIAddress, "validator-api-address", "127.0.0.1:3600", "Listening address (ip and port) for validator-facing traffic proxying the beacon-node API.") - cmd.Flags().DurationVar(&config.LoadTestDuration, "load-test-duration", 5*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") +func bindTestValidatorFlags(cmd *cobra.Command, config *testValidatorConfig, flagsPrefix string) { + cmd.Flags().StringVar(&config.APIAddress, flagsPrefix+"validator-api-address", "127.0.0.1:3600", "Listening address (ip and port) for validator-facing traffic proxying the beacon-node API.") + cmd.Flags().DurationVar(&config.LoadTestDuration, flagsPrefix+"load-test-duration", 5*time.Second, "Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned.") } func supportedValidatorTestCases() map[testCaseName]func(context.Context, *testValidatorConfig) testResult { return map[testCaseName]func(context.Context, *testValidatorConfig) testResult{ - {name: "ping", order: 1}: validatorPingTest, - {name: "pingMeasure", order: 2}: validatorPingMeasureTest, - {name: "pingLoad", order: 3}: validatorPingLoadTest, + {name: "Ping", order: 1}: validatorPingTest, + {name: "PingMeasure", order: 2}: validatorPingMeasureTest, + {name: "PingLoad", order: 3}: validatorPingLoadTest, } } -func runTestValidator(ctx context.Context, w io.Writer, cfg testValidatorConfig) (err error) { +func runTestValidator(ctx context.Context, w io.Writer, cfg testValidatorConfig) (res testCategoryResult, err error) { + log.Info(ctx, "Starting validator client test") + testCases := supportedValidatorTestCases() queuedTests := filterTests(maps.Keys(testCases), cfg.testConfig) if len(queuedTests) == 0 { - return errors.New("test case not supported") + err = errors.New("test case not supported") + return res, err } sortTests(queuedTests) @@ -100,7 +104,7 @@ func runTestValidator(ctx context.Context, w io.Writer, cfg testValidatorConfig) } } - res := testCategoryResult{ + res = testCategoryResult{ CategoryName: validatorTestCategory, Targets: testResults, ExecutionTime: execTime, @@ -110,20 +114,22 @@ func runTestValidator(ctx context.Context, w io.Writer, cfg testValidatorConfig) if !cfg.Quiet { err = writeResultToWriter(res, w) if err != nil { - return err + return res, err } } - if cfg.OutputToml != "" { - err = writeResultToFile(res, cfg.OutputToml) + if cfg.OutputJSON != "" { + err = writeResultToFile(res, cfg.OutputJSON) if err != nil { - return err + return res, err } } - return nil + return res, nil } +// validator client tests + func testSingleValidator(ctx context.Context, queuedTestCases []testCaseName, allTestCases map[testCaseName]func(context.Context, *testValidatorConfig) testResult, cfg testValidatorConfig, resCh chan map[string][]testResult) { defer close(resCh) singleTestResCh := make(chan testResult) @@ -145,9 +151,7 @@ func testSingleValidator(ctx context.Context, queuedTestCases []testCaseName, al finished = true break } - testName = queuedTestCases[testCounter].name testCounter++ - result.Name = testName allTestRes = append(allTestRes, result) } } @@ -194,47 +198,17 @@ func validatorPingMeasureTest(ctx context.Context, conf *testValidatorConfig) te defer conn.Close() rtt := time.Since(before) - if rtt > thresholdValidatorMeasurePoor { - testRes.Verdict = testVerdictPoor - } else if rtt > thresholdValidatorMeasureAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood - } - testRes.Measurement = Duration{rtt}.String() + testRes = evaluateRTT(rtt, testRes, thresholdValidatorMeasureAvg, thresholdValidatorMeasurePoor) return testRes } -func pingValidatorContinuously(ctx context.Context, address string, resCh chan<- time.Duration) { - d := net.Dialer{Timeout: time.Second} - for { - before := time.Now() - conn, err := d.DialContext(ctx, "tcp", address) - if err != nil { - return - } - rtt := time.Since(before) - err = conn.Close() - if err != nil { - return - } - select { - case <-ctx.Done(): - return - case resCh <- rtt: - awaitTime := rand.Intn(100) //nolint:gosec // weak generator is not an issue here - sleepWithContext(ctx, time.Duration(awaitTime)*time.Millisecond) - } - } -} - func validatorPingLoadTest(ctx context.Context, conf *testValidatorConfig) testResult { log.Info(ctx, "Running ping load tests...", z.Any("duration", conf.LoadTestDuration), z.Any("target", conf.APIAddress), ) - testRes := testResult{Name: "ValidatorLoad"} + testRes := testResult{Name: "PingLoad"} testResCh := make(chan time.Duration, math.MaxInt16) pingCtx, cancel := context.WithTimeout(ctx, conf.LoadTestDuration) @@ -258,20 +232,32 @@ func validatorPingLoadTest(ctx context.Context, conf *testValidatorConfig) testR close(testResCh) log.Info(ctx, "Ping load tests finished", z.Any("target", conf.APIAddress)) - highestRTT := time.Duration(0) - for rtt := range testResCh { - if rtt > highestRTT { - highestRTT = rtt - } - } - if highestRTT > thresholdValidatorLoadPoor { - testRes.Verdict = testVerdictPoor - } else if highestRTT > thresholdValidatorLoadAvg { - testRes.Verdict = testVerdictAvg - } else { - testRes.Verdict = testVerdictGood - } - testRes.Measurement = Duration{highestRTT}.String() + testRes = evaluateHighestRTTScores(testResCh, testRes, thresholdValidatorLoadAvg, thresholdValidatorLoadPoor) return testRes } + +// helper functions + +func pingValidatorContinuously(ctx context.Context, address string, resCh chan<- time.Duration) { + d := net.Dialer{Timeout: time.Second} + for { + before := time.Now() + conn, err := d.DialContext(ctx, "tcp", address) + if err != nil { + return + } + rtt := time.Since(before) + err = conn.Close() + if err != nil { + return + } + select { + case <-ctx.Done(): + return + case resCh <- rtt: + awaitTime := rand.Intn(100) //nolint:gosec // weak generator is not an issue here + sleepWithContext(ctx, time.Duration(awaitTime)*time.Millisecond) + } + } +} diff --git a/cmd/testvalidator_internal_test.go b/cmd/testvalidator_internal_test.go index 88ff1a8fb..578dcaac9 100644 --- a/cmd/testvalidator_internal_test.go +++ b/cmd/testvalidator_internal_test.go @@ -42,9 +42,8 @@ func TestValidatorTest(t *testing.T) { name: "default scenario", config: testValidatorConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, - TestCases: nil, Timeout: time.Minute, }, APIAddress: validatorAPIAddress, @@ -52,9 +51,9 @@ func TestValidatorTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ validatorAPIAddress: { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreA, @@ -66,9 +65,8 @@ func TestValidatorTest(t *testing.T) { name: "timeout", config: testValidatorConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, - TestCases: nil, Timeout: 100 * time.Nanosecond, }, APIAddress: validatorAPIAddress, @@ -76,7 +74,7 @@ func TestValidatorTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ validatorAPIAddress: { - {Name: "ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, + {Name: "Ping", Verdict: testVerdictFail, Measurement: "", Suggestion: "", Error: errTimeoutInterrupted}, }, }, Score: categoryScoreC, @@ -88,9 +86,8 @@ func TestValidatorTest(t *testing.T) { name: "quiet", config: testValidatorConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: true, - TestCases: nil, Timeout: time.Minute, }, APIAddress: validatorAPIAddress, @@ -98,9 +95,9 @@ func TestValidatorTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ validatorAPIAddress: { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreA, @@ -112,7 +109,7 @@ func TestValidatorTest(t *testing.T) { name: "unsupported test", config: testValidatorConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, TestCases: []string{"notSupportedTest"}, Timeout: time.Minute, @@ -129,9 +126,9 @@ func TestValidatorTest(t *testing.T) { name: "custom test cases", config: testValidatorConfig{ testConfig: testConfig{ - OutputToml: "", + OutputJSON: "", Quiet: false, - TestCases: []string{"ping"}, + TestCases: []string{"Ping"}, Timeout: time.Minute, }, APIAddress: validatorAPIAddress, @@ -139,7 +136,7 @@ func TestValidatorTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ validatorAPIAddress: { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreA, @@ -151,9 +148,8 @@ func TestValidatorTest(t *testing.T) { name: "write to file", config: testValidatorConfig{ testConfig: testConfig{ - OutputToml: "./write-to-file-test.toml.tmp", + OutputJSON: "./write-to-file-test.json.tmp", Quiet: false, - TestCases: nil, Timeout: time.Minute, }, APIAddress: validatorAPIAddress, @@ -161,9 +157,9 @@ func TestValidatorTest(t *testing.T) { expected: testCategoryResult{ Targets: map[string][]testResult{ validatorAPIAddress: { - {Name: "ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, - {Name: "pingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "Ping", Verdict: testVerdictOk, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingMeasure", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, + {Name: "PingLoad", Verdict: testVerdictGood, Measurement: "", Suggestion: "", Error: testResultError{}}, }, }, Score: categoryScoreA, @@ -181,7 +177,7 @@ func TestValidatorTest(t *testing.T) { t.Run(test.name, func(t *testing.T) { var buf bytes.Buffer ctx := context.Background() - err := runTestValidator(ctx, &buf, test.config) + _, err := runTestValidator(ctx, &buf, test.config) if test.expectedErr != "" { require.ErrorContains(t, err, test.expectedErr) return @@ -190,7 +186,7 @@ func TestValidatorTest(t *testing.T) { } defer func() { if test.cleanup != nil { - test.cleanup(t, test.config.OutputToml) + test.cleanup(t, test.config.OutputJSON) } }() @@ -200,8 +196,8 @@ func TestValidatorTest(t *testing.T) { testWriteOut(t, test.expected, buf) } - if test.config.OutputToml != "" { - testWriteFile(t, test.expected, test.config.OutputToml) + if test.config.OutputJSON != "" { + testWriteFile(t, test.expected, test.config.OutputJSON) } }) } @@ -238,15 +234,17 @@ func TestValidatorTestFlags(t *testing.T) { expectedErr: "", }, { - name: "no output toml on quiet", + name: "no output json on quiet", args: []string{"validator", "--validator-api-address=\"test.endpoint\"", "--quiet"}, - expectedErr: "on --quiet, an --output-toml is required", + expectedErr: "on --quiet, an --output-json is required", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - cmd := newAlphaCmd(newTestValidatorCmd(func(context.Context, io.Writer, testValidatorConfig) error { return nil })) + cmd := newAlphaCmd(newTestValidatorCmd(func(context.Context, io.Writer, testValidatorConfig) (testCategoryResult, error) { + return testCategoryResult{}, nil + })) cmd.SetArgs(test.args) err := cmd.Execute() if test.expectedErr != "" { diff --git a/core/aggsigdb/memory_v2_internal_test.go b/core/aggsigdb/memory_v2_internal_test.go index 1b15cb946..6bd30a5be 100644 --- a/core/aggsigdb/memory_v2_internal_test.go +++ b/core/aggsigdb/memory_v2_internal_test.go @@ -35,8 +35,8 @@ func TestDutyExpirationV2(t *testing.T) { deadliner.Expire() - require.Zero(t, len(db.data)) - require.Zero(t, len(db.keysByDuty)) + require.Empty(t, db.data) + require.Empty(t, db.keysByDuty) } func TestCancelledQueryV2(t *testing.T) { diff --git a/core/consensus/component.go b/core/consensus/component.go index fe5ffb1dd..c98f07f89 100644 --- a/core/consensus/component.go +++ b/core/consensus/component.go @@ -432,15 +432,31 @@ func (c *Component) runInstance(ctx context.Context, duty core.Duty) (err error) } // Instrument consensus instance. - var decided bool + var ( + decided bool + nodes = len(c.peers) + ) + decideCallback := func(qcommit []qbft.Msg[core.Duty, [32]byte]) { + round := qcommit[0].Round() decided = true - decidedRoundsGauge.WithLabelValues(duty.Type.String(), string(roundTimer.Type())).Set(float64(qcommit[0].Round())) + decidedRoundsGauge.WithLabelValues(duty.Type.String(), string(roundTimer.Type())).Set(float64(round)) inst.decidedAtCh <- time.Now() + + leaderIndex := leader(duty, round, nodes) + leaderName := c.peers[leaderIndex].Name + log.Debug(ctx, "QBFT consensus decided", + z.Str("duty", duty.Type.String()), + z.U64("slot", duty.Slot), + z.I64("round", round), + z.I64("leader_index", leaderIndex), + z.Str("leader_name", leaderName)) + + decidedLeaderGauge.WithLabelValues(duty.Type.String()).Set(float64(leaderIndex)) } // Create a new qbft definition for this instance. - def := newDefinition(len(c.peers), c.subscribers, roundTimer, decideCallback) + def := newDefinition(nodes, c.subscribers, roundTimer, decideCallback) // Create a new transport that handles sending and receiving for this instance. t := transport{ @@ -466,7 +482,7 @@ func (c *Component) runInstance(ctx context.Context, duty core.Duty) (err error) } // Run the algo, blocking until the context is cancelled. - err = qbft.Run[core.Duty, [32]byte](ctx, def, qt, duty, peerIdx, inst.hashCh) + err = qbft.Run(ctx, def, qt, duty, peerIdx, inst.hashCh) if err != nil && !isContextErr(err) { consensusError.Inc() return err // Only return non-context errors. diff --git a/core/consensus/metrics.go b/core/consensus/metrics.go index 0c9b98648..8a6eee996 100644 --- a/core/consensus/metrics.go +++ b/core/consensus/metrics.go @@ -9,12 +9,21 @@ import ( ) var ( + // Using gauge since the value changes slowly, once per slot. decidedRoundsGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "core", Subsystem: "consensus", Name: "decided_rounds", Help: "Number of rounds it took to decide consensus instances by duty and timer type.", - }, []string{"duty", "timer"}) // Using gauge since the value changes slowly, once per slot. + }, []string{"duty", "timer"}) + + // Using gauge since the value changes slowly, once per slot. + decidedLeaderGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "core", + Subsystem: "consensus", + Name: "decided_leader_index", + Help: "Leader node index of the decision round by duty.", + }, []string{"duty"}) consensusDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "core", diff --git a/core/corepb/v1/consensus.pb.go b/core/corepb/v1/consensus.pb.go index 82ceec09a..0bbf9708f 100644 --- a/core/corepb/v1/consensus.pb.go +++ b/core/corepb/v1/consensus.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: core/corepb/v1/consensus.proto @@ -39,11 +39,9 @@ type QBFTMsg struct { func (x *QBFTMsg) Reset() { *x = QBFTMsg{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_consensus_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_consensus_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QBFTMsg) String() string { @@ -54,7 +52,7 @@ func (*QBFTMsg) ProtoMessage() {} func (x *QBFTMsg) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_consensus_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -137,11 +135,9 @@ type ConsensusMsg struct { func (x *ConsensusMsg) Reset() { *x = ConsensusMsg{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_consensus_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_consensus_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ConsensusMsg) String() string { @@ -152,7 +148,7 @@ func (*ConsensusMsg) ProtoMessage() {} func (x *ConsensusMsg) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_consensus_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -199,11 +195,9 @@ type SniffedConsensusMsg struct { func (x *SniffedConsensusMsg) Reset() { *x = SniffedConsensusMsg{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_consensus_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_consensus_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SniffedConsensusMsg) String() string { @@ -214,7 +208,7 @@ func (*SniffedConsensusMsg) ProtoMessage() {} func (x *SniffedConsensusMsg) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_consensus_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -256,11 +250,9 @@ type SniffedConsensusInstance struct { func (x *SniffedConsensusInstance) Reset() { *x = SniffedConsensusInstance{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_consensus_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_consensus_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SniffedConsensusInstance) String() string { @@ -271,7 +263,7 @@ func (*SniffedConsensusInstance) ProtoMessage() {} func (x *SniffedConsensusInstance) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_consensus_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -325,11 +317,9 @@ type SniffedConsensusInstances struct { func (x *SniffedConsensusInstances) Reset() { *x = SniffedConsensusInstances{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_consensus_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_consensus_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SniffedConsensusInstances) String() string { @@ -340,7 +330,7 @@ func (*SniffedConsensusInstances) ProtoMessage() {} func (x *SniffedConsensusInstances) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_consensus_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -490,68 +480,6 @@ func file_core_corepb_v1_consensus_proto_init() { return } file_core_corepb_v1_core_proto_init() - if !protoimpl.UnsafeEnabled { - file_core_corepb_v1_consensus_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*QBFTMsg); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_consensus_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ConsensusMsg); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_consensus_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*SniffedConsensusMsg); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_consensus_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*SniffedConsensusInstance); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_consensus_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*SniffedConsensusInstances); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/core/corepb/v1/core.pb.go b/core/corepb/v1/core.pb.go index 7b472d6fd..baeb54a46 100644 --- a/core/corepb/v1/core.pb.go +++ b/core/corepb/v1/core.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: core/corepb/v1/core.proto @@ -31,11 +31,9 @@ type Duty struct { func (x *Duty) Reset() { *x = Duty{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_core_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_core_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Duty) String() string { @@ -46,7 +44,7 @@ func (*Duty) ProtoMessage() {} func (x *Duty) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_core_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -85,11 +83,9 @@ type UnsignedDataSet struct { func (x *UnsignedDataSet) Reset() { *x = UnsignedDataSet{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_core_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_core_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UnsignedDataSet) String() string { @@ -100,7 +96,7 @@ func (*UnsignedDataSet) ProtoMessage() {} func (x *UnsignedDataSet) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_core_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -132,11 +128,9 @@ type ParSignedDataSet struct { func (x *ParSignedDataSet) Reset() { *x = ParSignedDataSet{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_core_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_core_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParSignedDataSet) String() string { @@ -147,7 +141,7 @@ func (*ParSignedDataSet) ProtoMessage() {} func (x *ParSignedDataSet) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_core_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -181,11 +175,9 @@ type ParSignedData struct { func (x *ParSignedData) Reset() { *x = ParSignedData{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_core_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_core_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParSignedData) String() string { @@ -196,7 +188,7 @@ func (*ParSignedData) ProtoMessage() {} func (x *ParSignedData) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_core_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -309,56 +301,6 @@ func file_core_corepb_v1_core_proto_init() { if File_core_corepb_v1_core_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_core_corepb_v1_core_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Duty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_core_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*UnsignedDataSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_core_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ParSignedDataSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_core_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ParSignedData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/core/corepb/v1/parsigex.pb.go b/core/corepb/v1/parsigex.pb.go index aa767fd59..aabb6d5ae 100644 --- a/core/corepb/v1/parsigex.pb.go +++ b/core/corepb/v1/parsigex.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: core/corepb/v1/parsigex.proto @@ -31,11 +31,9 @@ type ParSigExMsg struct { func (x *ParSigExMsg) Reset() { *x = ParSigExMsg{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_parsigex_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_parsigex_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParSigExMsg) String() string { @@ -46,7 +44,7 @@ func (*ParSigExMsg) ProtoMessage() {} func (x *ParSigExMsg) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_parsigex_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -130,20 +128,6 @@ func file_core_corepb_v1_parsigex_proto_init() { return } file_core_corepb_v1_core_proto_init() - if !protoimpl.UnsafeEnabled { - file_core_corepb_v1_parsigex_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ParSigExMsg); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/core/corepb/v1/priority.pb.go b/core/corepb/v1/priority.pb.go index e40e12cc8..1db8a4788 100644 --- a/core/corepb/v1/priority.pb.go +++ b/core/corepb/v1/priority.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: core/corepb/v1/priority.proto @@ -33,11 +33,9 @@ type PriorityResult struct { func (x *PriorityResult) Reset() { *x = PriorityResult{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_priority_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_priority_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PriorityResult) String() string { @@ -48,7 +46,7 @@ func (*PriorityResult) ProtoMessage() {} func (x *PriorityResult) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_priority_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -91,11 +89,9 @@ type PriorityMsg struct { func (x *PriorityMsg) Reset() { *x = PriorityMsg{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_priority_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_priority_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PriorityMsg) String() string { @@ -106,7 +102,7 @@ func (*PriorityMsg) ProtoMessage() {} func (x *PriorityMsg) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_priority_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -161,11 +157,9 @@ type PriorityTopicProposal struct { func (x *PriorityTopicProposal) Reset() { *x = PriorityTopicProposal{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_priority_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_priority_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PriorityTopicProposal) String() string { @@ -176,7 +170,7 @@ func (*PriorityTopicProposal) ProtoMessage() {} func (x *PriorityTopicProposal) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_priority_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -219,11 +213,9 @@ type PriorityTopicResult struct { func (x *PriorityTopicResult) Reset() { *x = PriorityTopicResult{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_priority_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_priority_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PriorityTopicResult) String() string { @@ -234,7 +226,7 @@ func (*PriorityTopicResult) ProtoMessage() {} func (x *PriorityTopicResult) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_priority_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -275,11 +267,9 @@ type PriorityScoredResult struct { func (x *PriorityScoredResult) Reset() { *x = PriorityScoredResult{} - if protoimpl.UnsafeEnabled { - mi := &file_core_corepb_v1_priority_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_corepb_v1_priority_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PriorityScoredResult) String() string { @@ -290,7 +280,7 @@ func (*PriorityScoredResult) ProtoMessage() {} func (x *PriorityScoredResult) ProtoReflect() protoreflect.Message { mi := &file_core_corepb_v1_priority_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -421,68 +411,6 @@ func file_core_corepb_v1_priority_proto_init() { return } file_core_corepb_v1_core_proto_init() - if !protoimpl.UnsafeEnabled { - file_core_corepb_v1_priority_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*PriorityResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_priority_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*PriorityMsg); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_priority_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*PriorityTopicProposal); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_priority_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*PriorityTopicResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_corepb_v1_priority_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*PriorityScoredResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/core/tracker/tracker.go b/core/tracker/tracker.go index f9bf3d695..728c0e6f7 100644 --- a/core/tracker/tracker.go +++ b/core/tracker/tracker.go @@ -196,6 +196,7 @@ func dutyFailedStep(es []event) (bool, step, error) { } // Final step was successful. + //nolint:gosec // false positive slice index out of range if lastEvent.step == lastStep(es[0].duty.Type) && lastEvent.stepErr == nil { return false, zero, nil } diff --git a/core/validatorapi/router.go b/core/validatorapi/router.go index 6de62dbb9..8abd8af58 100644 --- a/core/validatorapi/router.go +++ b/core/validatorapi/router.go @@ -1144,7 +1144,6 @@ func writeError(ctx context.Context, w http.ResponseWriter, endpoint string, err } } - //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable if aerr.StatusCode/100 == 4 { // 4xx status codes are client errors (not server), so log as debug only. log.Debug(ctx, "Validator api 4xx response", diff --git a/dkg/dkg_test.go b/dkg/dkg_test.go index b830c3d8c..97e1dd2bf 100644 --- a/dkg/dkg_test.go +++ b/dkg/dkg_test.go @@ -6,6 +6,7 @@ import ( "context" "encoding/hex" "encoding/json" + "errors" "fmt" "math/rand" "net/http" @@ -537,7 +538,9 @@ func TestSyncFlow(t *testing.T) { var disconnectedCount int for err := range dkgErrChan { testutil.SkipIfBindErr(t, err) - require.NoError(t, err) + if !errors.Is(err, context.Canceled) { + require.NoError(t, err) + } disconnectedCount++ if disconnectedCount == test.nodes { break diff --git a/dkg/dkgpb/v1/bcast.pb.go b/dkg/dkgpb/v1/bcast.pb.go index 2b31dc35e..1f8063c3e 100644 --- a/dkg/dkgpb/v1/bcast.pb.go +++ b/dkg/dkgpb/v1/bcast.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: dkg/dkgpb/v1/bcast.proto @@ -32,11 +32,9 @@ type BCastSigRequest struct { func (x *BCastSigRequest) Reset() { *x = BCastSigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BCastSigRequest) String() string { @@ -47,7 +45,7 @@ func (*BCastSigRequest) ProtoMessage() {} func (x *BCastSigRequest) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -87,11 +85,9 @@ type BCastSigResponse struct { func (x *BCastSigResponse) Reset() { *x = BCastSigResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BCastSigResponse) String() string { @@ -102,7 +98,7 @@ func (*BCastSigResponse) ProtoMessage() {} func (x *BCastSigResponse) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -143,11 +139,9 @@ type BCastMessage struct { func (x *BCastMessage) Reset() { *x = BCastMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BCastMessage) String() string { @@ -158,7 +152,7 @@ func (*BCastMessage) ProtoMessage() {} func (x *BCastMessage) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_bcast_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -258,44 +252,6 @@ func file_dkg_dkgpb_v1_bcast_proto_init() { if File_dkg_dkgpb_v1_bcast_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_dkg_dkgpb_v1_bcast_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*BCastSigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_bcast_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*BCastSigResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_bcast_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*BCastMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/dkg/dkgpb/v1/frost.pb.go b/dkg/dkgpb/v1/frost.pb.go index 84e3f2028..3a5f6d52e 100644 --- a/dkg/dkgpb/v1/frost.pb.go +++ b/dkg/dkgpb/v1/frost.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: dkg/dkgpb/v1/frost.proto @@ -32,11 +32,9 @@ type FrostMsgKey struct { func (x *FrostMsgKey) Reset() { *x = FrostMsgKey{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostMsgKey) String() string { @@ -47,7 +45,7 @@ func (*FrostMsgKey) ProtoMessage() {} func (x *FrostMsgKey) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -93,11 +91,9 @@ type FrostRound1Casts struct { func (x *FrostRound1Casts) Reset() { *x = FrostRound1Casts{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostRound1Casts) String() string { @@ -108,7 +104,7 @@ func (*FrostRound1Casts) ProtoMessage() {} func (x *FrostRound1Casts) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -143,11 +139,9 @@ type FrostRound1Cast struct { func (x *FrostRound1Cast) Reset() { *x = FrostRound1Cast{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostRound1Cast) String() string { @@ -158,7 +152,7 @@ func (*FrostRound1Cast) ProtoMessage() {} func (x *FrostRound1Cast) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -211,11 +205,9 @@ type FrostRound1P2P struct { func (x *FrostRound1P2P) Reset() { *x = FrostRound1P2P{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostRound1P2P) String() string { @@ -226,7 +218,7 @@ func (*FrostRound1P2P) ProtoMessage() {} func (x *FrostRound1P2P) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -260,11 +252,9 @@ type FrostRound1ShamirShare struct { func (x *FrostRound1ShamirShare) Reset() { *x = FrostRound1ShamirShare{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostRound1ShamirShare) String() string { @@ -275,7 +265,7 @@ func (*FrostRound1ShamirShare) ProtoMessage() {} func (x *FrostRound1ShamirShare) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -321,11 +311,9 @@ type FrostRound2Casts struct { func (x *FrostRound2Casts) Reset() { *x = FrostRound2Casts{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostRound2Casts) String() string { @@ -336,7 +324,7 @@ func (*FrostRound2Casts) ProtoMessage() {} func (x *FrostRound2Casts) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -370,11 +358,9 @@ type FrostRound2Cast struct { func (x *FrostRound2Cast) Reset() { *x = FrostRound2Cast{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FrostRound2Cast) String() string { @@ -385,7 +371,7 @@ func (*FrostRound2Cast) ProtoMessage() {} func (x *FrostRound2Cast) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_frost_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -517,92 +503,6 @@ func file_dkg_dkgpb_v1_frost_proto_init() { if File_dkg_dkgpb_v1_frost_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_dkg_dkgpb_v1_frost_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FrostMsgKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_frost_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FrostRound1Casts); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_frost_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*FrostRound1Cast); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_frost_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*FrostRound1P2P); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_frost_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FrostRound1ShamirShare); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_frost_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*FrostRound2Casts); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_frost_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*FrostRound2Cast); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/dkg/dkgpb/v1/nodesigs.pb.go b/dkg/dkgpb/v1/nodesigs.pb.go index c76d912c8..f6ac5a1e6 100644 --- a/dkg/dkgpb/v1/nodesigs.pb.go +++ b/dkg/dkgpb/v1/nodesigs.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: dkg/dkgpb/v1/nodesigs.proto @@ -31,11 +31,9 @@ type MsgNodeSig struct { func (x *MsgNodeSig) Reset() { *x = MsgNodeSig{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_nodesigs_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_nodesigs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MsgNodeSig) String() string { @@ -46,7 +44,7 @@ func (*MsgNodeSig) ProtoMessage() {} func (x *MsgNodeSig) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_nodesigs_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -120,20 +118,6 @@ func file_dkg_dkgpb_v1_nodesigs_proto_init() { if File_dkg_dkgpb_v1_nodesigs_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_dkg_dkgpb_v1_nodesigs_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*MsgNodeSig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/dkg/dkgpb/v1/sync.pb.go b/dkg/dkgpb/v1/sync.pb.go index 53d1a1e2f..e6218b050 100644 --- a/dkg/dkgpb/v1/sync.pb.go +++ b/dkg/dkgpb/v1/sync.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: dkg/dkgpb/v1/sync.proto @@ -35,11 +35,9 @@ type MsgSync struct { func (x *MsgSync) Reset() { *x = MsgSync{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_sync_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_sync_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MsgSync) String() string { @@ -50,7 +48,7 @@ func (*MsgSync) ProtoMessage() {} func (x *MsgSync) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_sync_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -111,11 +109,9 @@ type MsgSyncResponse struct { func (x *MsgSyncResponse) Reset() { *x = MsgSyncResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_dkg_dkgpb_v1_sync_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_dkg_dkgpb_v1_sync_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MsgSyncResponse) String() string { @@ -126,7 +122,7 @@ func (*MsgSyncResponse) ProtoMessage() {} func (x *MsgSyncResponse) ProtoReflect() protoreflect.Message { mi := &file_dkg_dkgpb_v1_sync_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -220,32 +216,6 @@ func file_dkg_dkgpb_v1_sync_proto_init() { if File_dkg_dkgpb_v1_sync_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_dkg_dkgpb_v1_sync_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*MsgSync); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_dkg_dkgpb_v1_sync_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*MsgSyncResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/docs/README.md b/docs/README.md index 8bc6bddde..b32cc817e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -8,6 +8,6 @@ This page acts as an index for the charon (pronounced 'kharon') markdown documen - [Architecture](architecture.md): Overview of charon cluster and node architecture - [Project Structure](structure.md): Project folder structure - [Branching and Release Model](branching.md): Git branching and release model -- [Go Guidelines](goguidelines.md): Guidelines and principals relating to go development +- [Go Guidelines](goguidelines.md): Guidelines and principles relating to go development - [Contributing](contributing.md): How to contribute to charon; githooks, PR templates, etc. - [Distributed Key Generation](dkg.md): How charon can create distributed validator key shares remotely from a cluster-definition file. diff --git a/docs/architecture.md b/docs/architecture.md index ce5421088..61fb86721 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -151,7 +151,7 @@ Therefore, Charon v1.x will not work together with Charon v0.x. See *Version com ### Scheduler -The scheduler is the initiator of a duty in the core workflow. It resolves the which DVs in the cluster are active and +The scheduler is the initiator of a duty in the core workflow. It resolves which DVs in the cluster are active and is then responsible for starting a duty at the optimal time by calling the `fetcher`. DVs are identified by their root public key `PubKey`. @@ -325,7 +325,7 @@ type Entry struct { ValCommIdx int64 // validator committee index (0 for DutyProposer) } ``` -> ℹ️ Database entry fields are persistence friendly types and are not exported or used outside this component +> ℹ️ Database entry fields are persistence-friendly types and are not exported or used outside this component The database has the following indexes: - `Slot,DutyType,PubKey`: unique index for deduplication and idempotent inserts @@ -361,7 +361,7 @@ type DutyDB interface { ### Validator API The validator API provides a [beacon-node API](https://ethereum.github.io/beacon-APIs/#/ValidatorRequiredApi) to downstream VCs, intercepting some calls and proxying others directly to the upstream beacon node. -It mostly serves unsigned duty data requests from the `DutyDB` and sends the resulting partial signed duty objects to the `ParSigDB`. +It mostly serves unsigned duty data requests from the `DutyDB` and sends the resulting partially signed duty objects to the `ParSigDB`. Partial signed duty data values are defined as `ParSignedData` which extend `SignedData` values: ```go diff --git a/docs/branching.md b/docs/branching.md index 112681b6f..8138026e7 100644 --- a/docs/branching.md +++ b/docs/branching.md @@ -19,11 +19,11 @@ We follow [Trunk Based Development](https://trunkbaseddevelopment.com/) as a bra ## Controlled introduction of change: -- Since a feature cannot be added as a single big merge of a big feature branch, tools and patterns are required that allow gradual controlled introduction of increment changes without breaking. +- Since a feature cannot be added as a single big merge of a big feature branch, tools and patterns are required that allow gradual controlled introduction of incremental changes without breaking. - New code can be added as “dead code”. So, it has not been integrated into the actual program yet. Once it is properly complete, it can be integrated in a single PR. - Some features should however not be enabled straight into prod/mainnet, but should be rolled-out slowly being first tested in `alpha` (internal devnet only), then `beta` (internal and external testnet), and then only `stable` (enabled everywhere). This can be achieved by simple [feature switches](https://trunkbaseddevelopment.com/feature-flags/) (if statements) that enable features based on their `feature_set` status. - Another powerful pattern to gradually introduce change is [branching by abstraction](https://trunkbaseddevelopment.com/branch-by-abstraction/). This basically introduces an abstraction layer at the point where a new feature has to replace an old feature (like an interface). Using dependency injection, the new feature can be integrated during testing/staging while the old feature is still being used in production. -- Note that both feature switches and/or abstraction layers used to roll out a feature should be removed once released to prod/main-net. +- Note that both feature switches and/or abstraction layers used to roll out a feature should be removed once released to prod/mainnet. ### Release Process @@ -33,7 +33,7 @@ The important aspects of the release process are: - Releases are cut from release branches, not the main branch. Release branches are named `main-v0.X`. - Release candidates, `v0.X.Y-rc[1-99]`, are created for each patch release from commits in the release branch. They are thoroughly tested both internally and externally before a release is created. - Critical patches and fixes to releases are cherry-picked from main to the release branch. -- The Charon binary version, `charon version`, are inferred from git tags at build time using `ldflags`, not hardcoded app/version versions. +- The Charon binary version, `charon version`, is inferred from git tags at build time using `ldflags`, not hardcoded app/version versions. - Hardcoded Charon app/version is only used to indicate branch type and major version, `v0.X-rc` for release branches or `v0.Y-dev` for main branch. The process to follow for the next v0.16.0 release is the following: @@ -43,8 +43,8 @@ The process to follow for the next v0.16.0 release is the following: 4. The dev team also avoids adding risky or large changes during this “pre-release” period. 5. When all relevant changes have been included in main, a new “release branch”. It must be called `main-v0.16`. - Release branches are called `main-v0.X` - - Release branches are high risk branches, and must be treated with the same security mindset as the `main` branch. - - Note that github branch matching doesn’t support OR logic, so we chose a common `main*` prefix to identify all protected branches. + - Release branches are high-risk branches, and must be treated with the same security mindset as the `main` branch. + - Note that Github branch matching doesn’t support OR logic, so we chose a common `main*` prefix to identify all protected branches. 6. After the release branch has been created, the `main` branch app/version is manually updated to `v0.17-dev` and add `v0.17` to `version.Supported()` versions. - `v0.X-dev` indicates that the code is in the main branch. - It also indicates this is development only code not an official release. @@ -57,7 +57,7 @@ The process to follow for the next v0.16.0 release is the following: - Note that the `build-push-release` action should dynamically update the app/version to the value of the git tag when building the docker image. 9. Before a `v0.16.X` release is created, a `v0.16.X-rc[1-99]` release candidate needs to be created and thoroughly tested both internally and externally. 10. After a `v0.16.X` release was created, the release notes need to be created. - - The release github action does auto generate release notes. + - The release Github action does auto-generate release notes. - If they are incorrect, manual release notes can be created via: `go run testutil/genchangelog/main.go --range=v0.15.0..v0.16.0`. Note that images are built and tagged for each commit on the main and release branch using the app/version tag, e.g. `v0.X-dev` for `main`, and `v0.X-rc` for release branches. Main branch commits are also tagged with `latest`. diff --git a/docs/configuration.md b/docs/configuration.md index f41e32cb9..4d30b14ef 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -76,7 +76,7 @@ The `cluster-lock.json` has the following schema: "16000000000" ], "lock_hash": "0xabcdef...abcedef", // Hash of the cluster definition and distributed validators. Uniquely identifies a cluster lock. - "signature_aggregate": "0xabcdef...abcedef", // BLS aggregate signature of the lock hash signed by all the key shares of all the distributed validators. Proves that the key shares exist and attested to being part of this cluster. + "signature_aggregate": "0xabcdef...abcedef", // BLS aggregate signature of the lock hash signed by all the key shares of all the distributed validators. Proves that the key shares exist and attest to being part of this cluster. "node_signatures": ["0xabcdef...abcedef"] // Signatures of the lock hash by each operator. Proves that this lock file (and the validators) was generated by all the operators } ``` @@ -173,7 +173,7 @@ Flags: --p2p-disable-reuseport Disables TCP port reuse for outgoing libp2p connections. --p2p-external-hostname string The DNS hostname advertised by libp2p. This may be used to advertise an external DNS. --p2p-external-ip string The IP address advertised by libp2p. This may be used to advertise an external IP. - --p2p-relays strings Comma-separated list of libp2p relay URLs or multiaddrs. (default [https://0.relay.obol.tech,https://1.relay.obol.tech]) + --p2p-relays strings Comma-separated list of libp2p relay URLs or multiaddrs. (default [https://0.relay.obol.tech,https://2.relay.obol.dev,https://1.relay.obol.tech]) --p2p-tcp-address strings Comma-separated list of listening TCP addresses (ip and port) for libP2P traffic. Empty default doesn't bind to local port therefore only supports outgoing connections. --private-key-file string The path to the charon enr private key file. (default ".charon/charon-enr-private-key") --private-key-file-lock Enables private key locking to prevent multiple instances using the same key. diff --git a/docs/contributing.md b/docs/contributing.md index a52846782..4b5d6a177 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -18,12 +18,12 @@ instead of opening a public issue or PR on GitHub. - If you have found a bug... - Check for existing bug reports of the same issue in GitHub. - Do not post about it publicly if it is a suspected vulnerability to protect Obol's users; - instead use `security@obol.tech`. + instead, use `security@obol.tech`. - Maybe send a message in relevant community channels if you are unsure whether you are seeing a technical issue. - Open a GitHub issue if everything else checks out 🤓 - Are you thinking of a small change that just makes sense? Feel free to submit a PR. - If you're envisioning a larger feature or are just looking for a discussion, - let's chat in the [Obol Discord](https://discord.com/invite/n6ebKsX46w)under `#dev-community`. + let's chat in the [Obol Discord](https://discord.com/invite/n6ebKsX46w) under `#dev-community`. - A quick sync before coding avoids conflicting work and makes large PRs much more likely to be accepted. - 👀 The Discord channel is currently _invite-only_ to prevent spam. Please ping a team member to get access. @@ -42,7 +42,7 @@ an associated issue with a design discussed and decided upon. Small bug fixes an improvements don't need issues. New features and bug fixes must have tests. Documentation may need to be updated. If you're -unsure what to update, open the PR, and we'll discuss during review. +unsure what to update, open the PR and we'll discuss during review. Note that PRs updating dependencies and new Go versions are not accepted. Please file an issue instead. @@ -72,15 +72,15 @@ Note: PRs can only be merged by obol-bulldozer bot. It is author's responsibilit ### PR Template - **PRs are always squash merged into main**. -- The PR title and body is used as the final squash-merged git commit message. +- The PR title and body are used as the final squash-merged git commit message. - The PR's original git commits are therefore lost (so naming isn't specified) - **PR title format** is defined as: - - Following the [go team's commit format](https://github.com/golang/go/commits/master): `package[/path]: concise overview of change` + - Following the [Go team's commit format](https://github.com/golang/go/commits/master): `package[/path]: concise overview of change` - Prefix identifies the primary package affected by the change. - Prefix can be a single or double hierarchical package name, but not three or more. E.g. `app` , or `app/tracer`. - The rest of the title must be a concise high-level overview in the present tense and starting with lower case. - **PR body format** is defined as: - - Start with detailed description of the change. + - Start with a detailed description of the change. - Description should use proper grammar in present tense. - Ends with a list of tags (some required, others optional) (`^tag: value of this tag\n`): - `category`: required; one of: `refactor`, `bug`, `feature`, `docs`, `release`, `tidy`, `fixbuild`. @@ -90,7 +90,7 @@ Note: PRs can only be merged by obol-bulldozer bot. It is author's responsibilit ``` runner/tracer: add jaeger otel exporter -Adds the jaeger exporter to our opentelemetery infra. +Adds the jaeger exporter to our opentelemetry infra. category: feature ticket: #206 @@ -113,7 +113,7 @@ for each PR commit. But it is highly recommended running the githooks locally wh To install githooks: - Follow installation instructions [here](https://pre-commit.com/#installation) to install the `pre-commit` tool. -- Once installed, run `pre-commit install` in the project's root directory. This will setup the hooks. +- Once installed, run `pre-commit install` in the project's root directory. This will set up the hooks. - Note you can skip the hooks by committing with `-n`: `git commit -n -m "look mom no githooks"` To update githooks: @@ -123,7 +123,7 @@ pre-commit clean The **linter** used is [golangci-lint](https://golangci-lint.run/). It runs as part of the githooks and is configured in [.golangci.yml](../.golangci.yml) -Different **dev tools** are used in throughout the code base and are defined and installed from [tools.go](../tools.go). To install the dev tools run: `go generate tools.go` +Different **dev tools** are used throughout the code base and are defined and installed from [tools.go](../tools.go). To install the dev tools run: `go generate tools.go` ## Code Review We tend to closely follow the following code review structure: diff --git a/docs/dkg.md b/docs/dkg.md index 876df4967..2a2053a8f 100644 --- a/docs/dkg.md +++ b/docs/dkg.md @@ -20,7 +20,7 @@ The charon client has the responsibility of securely completing a distributed ke A distributed key generation ceremony involves `Operators` and their `Charon clients`. -- An `Operator` is identified by their Ethereum address. They will sign with this address's private key to authenticate their charon client ahead of the ceremony. The signature will be of; a hash of the charon clients ENR public key, the `cluster_definition_hash`, and an incrementing `nonce`, allowing for a direct linkage between a user, their charon client, and the cluster this client is intended to service, while retaining the ability to update the charon client by incrementing the nonce value and re-signing like the standard ENR spec. +- An `Operator` is identified by their Ethereum address. They will sign with this address's private key to authenticate their charon client ahead of the ceremony. The signature will be of a hash of the charon client’s ENR public key, the `cluster_definition_hash`, and an incrementing `nonce`, allowing for a direct linkage between a user, their charon client, and the cluster this client is intended to service, while retaining the ability to update the charon client by incrementing the nonce value and re-signing like the standard ENR spec. - A `Charon client` is also identified by a public/private key pair, in this instance, the public key is represented as an [Ethereum Node Record](https://eips.ethereum.org/EIPS/eip-778) (ENR). This is a standard identity format for both EL and CL clients. These ENRs are used by each charon node to identify its cluster peers over the internet, and to communicate with one another in an [end to end encrypted manner](https://github.com/libp2p/go-libp2p-noise). These keys need to be created by each operator before they can participate in a cluster creation. @@ -36,8 +36,8 @@ This cluster-definition file is created with the help of the [Distributed Valida - The list of participants in the cluster specified by Ethereum address(/ENS) - The threshold of fault tolerance required (if not choosing the safe default) - The network (fork_version/chainId) that this cluster will validate on -- These key pieces of information form the basis of the cluster configuration. These fields (and some technical fields like DKG algorithm to use) are serialised and merklised to produce the manifests `cluster_definition_hash`. This merkle root will be used to confirm that their is no ambiguity or deviation between manifests when they are provided to charon nodes. -- Once the leader is satisfied with the configuration they publish it to the launchpad's data availability layer for the other participants to access. (For early development the launchpad will use a centralised backend db to store the cluster configuration. Near production, solutions like IPFS or arweave may be more suitable for the long term decentralisation of the launchpad.) +- These key pieces of information form the basis of the cluster configuration. These fields (and some technical fields like DKG algorithm to use) are serialised and merklised to produce the manifests `cluster_definition_hash`. This merkle root will be used to confirm that there is no ambiguity or deviation between manifests when they are provided to charon nodes. +- Once the leader is satisfied with the configuration, they publish it to the launchpad's data availability layer for the other participants to access. (For early development the launchpad will use a centralised backend db to store the cluster configuration. Near production, solutions like IPFS or arweave may be more suitable for the long term decentralisation of the launchpad.) - The leader will then share the URL to this ceremony with their intended participants. - Anyone that clicks the ceremony url, or inputs the `config_hash` when prompted on the landing page will be brought to the ceremony status page. (After completing all disclaimers and advisories) - A "Connect Wallet" button will be visible beneath the ceremony status container, a participant can click on it to connect their wallet to the site @@ -53,7 +53,7 @@ This cluster-definition file is created with the help of the [Distributed Valida ## Carrying out the DKG ceremony -Once participant has their cluster-definition file prepared, they will pass the file to charon's `dkg` command. Charon will read the ENRs in the cluster-definition, confirm that its ENR is present, and then will reach out to bootnodes that are deployed to find the other ENRs on the network. (Fresh ENRs just have a public key and an IP address of 0.0.0.0 until they are loaded into a live charon client, which will update the IP address and increment the ENRs nonce and resign with the clients private key. If an ENR with a higher nonce is seen to be a charon client, they will update the IP address of that ENR in their address book.) +Once participants has their cluster-definition file prepared, they will pass the file to charon's `dkg` command. Charon will read the ENRs in the cluster-definition, confirm that its ENR is present, and then will reach out to bootnodes that are deployed to find the other ENRs on the network. (Fresh ENRs just have a public key and an IP address of 0.0.0.0 until they are loaded into a live charon client, which will update the IP address and increment the ENRs nonce and resign with the clients private key. If an ENR with a higher nonce is seen to be a charon client, they will update the IP address of that ENR in their address book.) Once all clients in the cluster can establish a connection with one another and they each complete a handshake (confirm everyone has a matching `cluster_definition_hash`), the ceremony begins. @@ -74,7 +74,7 @@ Once the ceremony is complete, all participants should take a backup of the crea ## Preparing for validator activation -Once the ceremony is complete, and secure backups of key shares have been made by each operator. They must now load these key shares into their validator clients, and run the `charon run` command to turn it into operational mode. +Once the ceremony is complete and secure backups of key shares have been made by each operator. They must now load these key shares into their validator clients, and run the `charon run` command to turn it into operational mode. All operators should confirm that their charon client logs indicate all nodes are online and connected. They should also verify the readiness of their beacon clients and validator clients. Charon's grafana dashboard is a good way to see the readiness of the full cluster from its perspective. diff --git a/docs/goguidelines.md b/docs/goguidelines.md index 78eabeb91..574233ad0 100644 --- a/docs/goguidelines.md +++ b/docs/goguidelines.md @@ -1,6 +1,6 @@ # Charon Go Guidelines -This page contains guidelines, principals and best practices relating to how we write go code. +This page contains guidelines, principles and best practices relating to how we write go code. As an open source project, we need to aim for high code quality, consistency and canonical go. ## Required Knowledge @@ -29,7 +29,7 @@ thousand ways to approach a problem. The Charon codebase doesn't follow the comm Instead, it follows a more procedural style for a focus on *functions and values*, [#AlgorthimsAndDataStructuresOverTypes](https://en.wikipedia.org/wiki/Object-oriented_programming#cite_note-48). This style can be summarized by the following tradeoffs: - Prefer **unexported over exported** types and functions. [#WriteShyCode](https://dave.cheney.net/practical-go/presentations/qcon-china.html#_package_design) -- Prefer **functions over methods** as methods lends itself to stateful code while functions are stateless. [#FunctionsOverMethods](https://kellysutton.com/2018/07/13/simple-made-easy-methods-vs-functions.html) +- Prefer **functions over methods** as methods lends themselves to stateful code while functions are stateless. [#FunctionsOverMethods](https://kellysutton.com/2018/07/13/simple-made-easy-methods-vs-functions.html) - Prefer **structs over objects** as structs tend to be more on the immutable data side while “objects” tend to be mutable and combine data with logic. [#TheValueOfValues](https://www.youtube.com/watch?v=-I-VpPMzG7c) - Prefer **explicit over implement** as explicit code doesn’t hide anything while implicit code does. - Prefer **immutability over mutability** as that results in code that is easier to reason about and debug and compose. @@ -146,7 +146,7 @@ Please try to inform your decisions by the following style for improved consiste - Note that passing pointers around is in general not faster than non-pointers (except in some edge cases). ### Naming: - - Data labels should be snake_case. This include json fields, structured logging fields, prometheus labels etc. + - Data labels should be snake_case. This includes json fields, structured logging fields, prometheus labels etc. - Go package names should be concise; aim for a single noun (`validator`) or two concatenated nouns (`validatorapi`). Avoid underscores or three word nouns. ### Declarations: diff --git a/docs/metrics.md b/docs/metrics.md index 4bcfcbe96..6ad754a90 100644 --- a/docs/metrics.md +++ b/docs/metrics.md @@ -4,7 +4,7 @@ This document contains all the prometheus metrics exposed by a charon node. All metrics contain the following labels, so they are omitted from the table below: - `cluster_hash`: The cluster lock hash uniquely identifying the cluster. -- `clustter_name`: The cluster lock name. +- `cluster_name`: The cluster lock name. - `cluster_network`: The cluster network name; goerli, mainnet, etc. - `cluster_peer`: The name of this node in the cluster. It is determined from the operator ENR. @@ -43,6 +43,7 @@ when storing metrics from multiple nodes or clusters in one Prometheus instance. | `core_bcast_recast_errors_total` | Counter | The total count of failed recasted registrations by source; `pregen` vs `downstream` | `source` | | `core_bcast_recast_registration_total` | Counter | The total number of unique validator registration stored in recaster per pubkey | `pubkey` | | `core_bcast_recast_total` | Counter | The total count of recasted registrations by source; `pregen` vs `downstream` | `source` | +| `core_consensus_decided_leader_index` | Gauge | Leader node index of the decision round by duty. | `duty` | | `core_consensus_decided_rounds` | Gauge | Number of rounds it took to decide consensus instances by duty and timer type. | `duty, timer` | | `core_consensus_duration_seconds` | Histogram | Duration of a consensus instance in seconds by duty and timer type. | `duty, timer` | | `core_consensus_error_total` | Counter | Total count of consensus errors | | diff --git a/docs/structure.md b/docs/structure.md index c7ab166d2..9d99bddfb 100644 --- a/docs/structure.md +++ b/docs/structure.md @@ -78,7 +78,7 @@ charon/ # project root - `version`: Print charon version - Defines and parses [viper](https://github.com/spf13/viper) configuration parameters for required by each command. - `cluster/`: Cluster config definition and files formats - - `cluster-definition.json` defines the intended cluster including confutation including operators. + - `cluster-definition.json` defines the intended cluster including configuration including operators. - `cluster-lock.json` extends cluster definition adding distributed validator public keys and public shares. - `dkg/`: Distributed Key Generation command - Runs the dkg command that takes a cluster definition as input and generates a cluster lock file and private shares as output. diff --git a/eth2util/keymanager/keymanager.go b/eth2util/keymanager/keymanager.go index fdfaed76b..124f0dfab 100644 --- a/eth2util/keymanager/keymanager.go +++ b/eth2util/keymanager/keymanager.go @@ -110,7 +110,6 @@ func postKeys(ctx context.Context, addr, authToken string, reqBody keymanagerReq } _ = resp.Body.Close() - //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable if resp.StatusCode/100 != 2 { return errors.New("failed posting keys", z.Int("status", resp.StatusCode), z.Str("body", string(data))) } diff --git a/go.mod b/go.mod index d21da46e7..2d23396f7 100644 --- a/go.mod +++ b/go.mod @@ -1,53 +1,53 @@ module github.com/obolnetwork/charon -go 1.22 +go 1.23 require ( - github.com/attestantio/go-eth2-client v0.21.9 + github.com/attestantio/go-builder-client v0.5.1 + github.com/attestantio/go-eth2-client v0.21.10 github.com/bufbuild/buf v1.35.1 github.com/coinbase/kryptology v1.5.6-0.20220316191335-269410e1b06b github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 - github.com/ferranbt/fastssz v0.1.3 - github.com/golang/snappy v0.0.4 + github.com/ferranbt/fastssz v0.1.4 + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/google/gofuzz v1.2.0 github.com/gorilla/mux v1.8.1 - github.com/herumi/bls-eth-go-binary v1.35.0 + github.com/herumi/bls-eth-go-binary v1.36.1 github.com/holiman/uint256 v1.3.1 github.com/ipfs/go-log/v2 v2.5.1 github.com/jonboulle/clockwork v0.4.0 github.com/jsternberg/zap-logfmt v1.3.0 github.com/libp2p/go-libp2p v0.33.2 github.com/libp2p/go-msgio v0.3.0 - github.com/multiformats/go-multiaddr v0.13.0 - github.com/pelletier/go-toml/v2 v2.2.2 - github.com/prometheus/client_golang v1.19.1 + github.com/multiformats/go-multiaddr v0.14.0 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 github.com/protolambda/eth2-shuffle v1.1.0 github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e github.com/r3labs/sse/v2 v2.10.0 github.com/rs/zerolog v1.33.0 - github.com/showwin/speedtest-go v1.7.8 + github.com/showwin/speedtest-go v1.7.9 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.4.1 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 - go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 + go.opentelemetry.io/otel v1.32.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 - go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 - go.uber.org/automaxprocs v1.5.3 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 + go.opentelemetry.io/otel/sdk v1.32.0 + go.opentelemetry.io/otel/trace v1.32.0 + go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.25.0 + golang.org/x/crypto v0.29.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 - golang.org/x/sync v0.7.0 - golang.org/x/term v0.22.0 - golang.org/x/time v0.5.0 - golang.org/x/tools v0.23.0 - google.golang.org/protobuf v1.34.2 + golang.org/x/sync v0.9.0 + golang.org/x/term v0.26.0 + golang.org/x/time v0.8.0 + golang.org/x/tools v0.27.0 + google.golang.org/protobuf v1.35.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) @@ -59,18 +59,18 @@ require ( connectrpc.com/otelconnect v0.7.0 // indirect filippo.io/edwards25519 v1.0.0-rc.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.7.0 // indirect + github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/btcsuite/btcd v0.22.3 // indirect github.com/bufbuild/protocompile v0.14.0 // indirect github.com/bufbuild/protoplugin v0.0.0-20240323223605-e2735f6c31ee // indirect github.com/bufbuild/protovalidate-go v0.6.2 // indirect github.com/bufbuild/protoyaml-go v0.1.9 // indirect github.com/bwesterb/go-ristretto v1.2.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.12.1 // indirect github.com/containerd/cgroups v1.1.0 // indirect @@ -82,11 +82,12 @@ require ( github.com/distribution/reference v0.6.0 // indirect github.com/docker/cli v26.1.4+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.1.0+incompatible // indirect + github.com/docker/docker v27.1.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect + github.com/emicklei/dot v1.6.2 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/fgprof v0.9.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -120,6 +121,7 @@ require ( github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/koron/go-ssdp v0.0.4 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect @@ -151,18 +153,20 @@ require ( github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.5.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/onsi/ginkgo/v2 v2.15.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pk910/dynamic-ssz v0.0.3 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/profile v1.7.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/common v0.48.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/quic-go v0.42.0 // indirect github.com/quic-go/webtransport-go v0.6.0 // indirect @@ -181,16 +185,16 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect github.com/vbatts/tar-split v0.11.5 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.17.1 // indirect go.uber.org/fx v1.20.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect diff --git a/go.sum b/go.sum index 8d31047ec..1c6188763 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/ObolNetwork/go-eth2-client v0.21.11-0.20240822135044-f0a5b21e02c6 h1:VEBrga7Dn5SwvJQEG3i2K7IAUQQvEmUulWxXoBDimnM= github.com/ObolNetwork/go-eth2-client v0.21.11-0.20240822135044-f0a5b21e02c6/go.mod h1:d7ZPNrMX8jLfIgML5u7QZxFo2AukLM+5m08iMaLdqb8= github.com/ObolNetwork/kryptology v0.0.0-20231016091344-eed023b6cac8 h1:IXoKQKGzebwtIzKADtZyAjL3MIr0m3zQFxlSxxWIdCU= @@ -31,6 +31,8 @@ github.com/ObolNetwork/kryptology v0.0.0-20231016091344-eed023b6cac8/go.mod h1:q github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/attestantio/go-builder-client v0.5.1 h1:zFeiWZrKBj43HkpaGchImkAjvarWdFv0gACLkBY0Pbs= +github.com/attestantio/go-builder-client v0.5.1/go.mod h1:1/ewo8zF6++C6Fldvtq5hjhp9ZAafIK91Vp7XrmUZsE= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -38,8 +40,8 @@ github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= -github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.22.3 h1:kYNaWFvOw6xvqP0vR20RP1Zq1DVMBxEO8QN5d1/EfNg= github.com/btcsuite/btcd v0.22.3/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= @@ -60,8 +62,8 @@ github.com/bwesterb/go-ristretto v1.2.0 h1:xxWOVbN5m8NNKiSDZXE1jtZvZnC6JSJ9cYFAD github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= @@ -110,8 +112,8 @@ github.com/docker/cli v26.1.4+incompatible h1:I8PHdc0MtxEADqYJZvhBrW9bo8gawKwwen github.com/docker/cli v26.1.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.1.0+incompatible h1:rEHVQc4GZ0MIQKifQPHSFGV/dVgaZafgRf8fCPtDYBs= -github.com/docker/docker v27.1.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= +github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -123,6 +125,8 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= @@ -133,8 +137,8 @@ github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88= github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/ferranbt/fastssz v0.1.3 h1:ZI+z3JH05h4kgmFXdHuR1aWYsgrg7o+Fw7/NCzM16Mo= -github.com/ferranbt/fastssz v0.1.3/go.mod h1:0Y9TEd/9XuFlh7mskMPfXiI2Dkw4Ddg9EyXt1W7MRvE= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= @@ -189,8 +193,8 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= @@ -228,8 +232,8 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1 github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/herumi/bls-eth-go-binary v1.35.0 h1:4CgrKurBK4g0ZMKBdHq5CwK9slYe7Ei+HF+/n6RSkOI= -github.com/herumi/bls-eth-go-binary v1.35.0/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= +github.com/herumi/bls-eth-go-binary v1.36.1 h1:SfLjxbO1fWkKtKS7J3Ezd1/5QXrcaTZgWynxdSe10hQ= +github.com/herumi/bls-eth-go-binary v1.36.1/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= @@ -284,6 +288,8 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= @@ -364,8 +370,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= -github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= +github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= +github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -382,6 +388,8 @@ github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dy github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= @@ -416,21 +424,23 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/protolambda/eth2-shuffle v1.1.0 h1:gixIBI84IeugTwwHXm8vej1bSSEhueBCSryA4lAKRLU= github.com/protolambda/eth2-shuffle v1.1.0/go.mod h1:FhA2c0tN15LTC+4T9DNVm+55S7uXTTjQ8TQnBuXlkF8= github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e h1:ATgOe+abbzfx9kCPeXIW4fiWyDdxlwHw07j8UGhdTd4= github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/quic-go/quic-go v0.42.0 h1:uSfdap0eveIl8KXnipv9K7nlwZ5IqLlYOpJ58u5utpM= @@ -457,8 +467,8 @@ github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgY github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/showwin/speedtest-go v1.7.8 h1:UZbFQ/ArVgPvkR03egSeTM2FXBd6qJsLp8lzt9aeod0= -github.com/showwin/speedtest-go v1.7.8/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= +github.com/showwin/speedtest-go v1.7.9 h1:5b3T3U3WSppVXFqsIqF1zdHRYKKVuPNpzFU71HnYNEY= +github.com/showwin/speedtest-go v1.7.9/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -522,8 +532,6 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8 github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10 h1:CQh33pStIp/E30b7TxDlXfM0145bn2e8boI30IxAhTg= -github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10/go.mod h1:x/Pa0FF5Te9kdrlZKJK82YmAkvL8+f989USgz6Jiw7M= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= @@ -539,33 +547,33 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk= @@ -591,8 +599,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= @@ -605,8 +613,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -624,8 +632,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -639,8 +647,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -668,21 +676,21 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -695,8 +703,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -725,8 +733,8 @@ google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/Knetic/govaluate.v3 v3.0.0 h1:18mUyIt4ZlRlFZAAfVetz4/rzlJs9yhN+U02F4u1AOc= gopkg.in/Knetic/govaluate.v3 v3.0.0/go.mod h1:csKLBORsPbafmSCGTEh3U7Ozmsuq8ZSIlKk1bcqph0E= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= @@ -749,6 +757,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= diff --git a/p2p/bootnode.go b/p2p/bootnode.go index 53d7f3380..4ebd4495b 100644 --- a/p2p/bootnode.go +++ b/p2p/bootnode.go @@ -153,7 +153,7 @@ func queryRelayAddrs(ctx context.Context, relayURL string, backoff func(), lockH if err != nil { log.Warn(ctx, "Failure querying relay addresses (will try again)", err) continue - } else if resp.StatusCode/100 != 2 { //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable + } else if resp.StatusCode/100 != 2 { log.Warn(ctx, "Non-200 response querying relay addresses (will try again)", nil, z.Int("status_code", resp.StatusCode)) continue } diff --git a/tbls/herumi.go b/tbls/herumi.go index 0be2ef2a9..0c50c1599 100644 --- a/tbls/herumi.go +++ b/tbls/herumi.go @@ -84,6 +84,10 @@ func (Herumi) ThresholdSplitInsecure(t *testing.T, secret PrivateKey, total uint t.Helper() var p bls.SecretKey + if threshold <= 1 { + return nil, errors.New("threshold has to be greater than 1") + } + if err := p.Deserialize(secret[:]); err != nil { return nil, errors.Wrap(err, "cannot unmarshal bytes into Herumi secret key") } @@ -133,6 +137,10 @@ func (Herumi) ThresholdSplitInsecure(t *testing.T, secret PrivateKey, total uint func (Herumi) ThresholdSplit(secret PrivateKey, total uint, threshold uint) (map[int]PrivateKey, error) { var p bls.SecretKey + if threshold <= 1 { + return nil, errors.New("threshold has to be greater than 1") + } + if err := p.Deserialize(secret[:]); err != nil { return nil, errors.Wrap(err, "cannot unmarshal bytes into Herumi secret key") } diff --git a/testutil/compose/smoke/smoke_test.go b/testutil/compose/smoke/smoke_test.go index 449634b48..b4a921e42 100644 --- a/testutil/compose/smoke/smoke_test.go +++ b/testutil/compose/smoke/smoke_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/obolnetwork/charon/app/version" "github.com/obolnetwork/charon/testutil" "github.com/obolnetwork/charon/testutil/compose" ) @@ -84,26 +85,26 @@ func TestSmoke(t *testing.T) { }, Timeout: time.Minute * 2, }, - // TODO: https://github.com/ObolNetwork/charon/issues/3004 - // { - // Name: "run_version_matrix_with_dkg", - // PrintYML: true, - // ConfigFunc: func(conf *compose.Config) { - // conf.KeyGen = compose.KeyGenDKG - // // NOTE: Add external VCs when supported versions include minimal preset. - // conf.VCs = []compose.VCType{compose.VCMock} - // }, - // DefineTmplFunc: func(data *compose.TmplData) { - // // Use oldest supported version for cluster lock - // pegImageTag(data.Nodes, 0, last(version.Supported()[1:])+"-rc") - // }, - // RunTmplFunc: func(data *compose.TmplData) { - // // Node 0 is local build - // pegImageTag(data.Nodes, 1, nth(version.Supported(), 0)+"-dev") // Node 1 is previous commit on this branch (v0.X-dev/rc) Note this will fail for first commit on new branch version. - // pegImageTag(data.Nodes, 2, nth(version.Supported()[1:], 1)+"-rc") - // pegImageTag(data.Nodes, 3, nth(version.Supported()[1:], 2)+"-rc") - // }, - // }, + { + Name: "run_version_matrix_with_dkg", + PrintYML: true, + ConfigFunc: func(conf *compose.Config) { + conf.KeyGen = compose.KeyGenDKG + // NOTE: Add external VCs when supported versions include minimal preset. + conf.VCs = []compose.VCType{compose.VCMock} + }, + DefineTmplFunc: func(data *compose.TmplData) { + // Use oldest supported version for cluster lock + pegImageTag(data.Nodes, 0, last(version.Supported()[1:])+".0-rc1") + }, + RunTmplFunc: func(data *compose.TmplData) { + // Node 0 is local build + // Nodeы 1-3 use the previous release; ensure better diversity in the matrix when more releases are added. + pegImageTag(data.Nodes, 1, nth(version.Supported(), 1)+".0-rc1") + pegImageTag(data.Nodes, 2, nth(version.Supported(), 1)+".0-rc1") + pegImageTag(data.Nodes, 3, nth(version.Supported(), 1)+".0-rc1") + }, + }, { Name: "teku_versions", ConfigFunc: func(conf *compose.Config) { @@ -171,8 +172,7 @@ func TestSmoke(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - dir, err := os.MkdirTemp("", "") - require.NoError(t, err) + dir := t.TempDir() conf := compose.NewDefaultConfig() conf.Monitoring = false @@ -204,7 +204,7 @@ func TestSmoke(t *testing.T) { autoConfig.LogFile = path.Join(*logDir, test.Name+".log") } - err = compose.Auto(context.Background(), autoConfig) + err := compose.Auto(context.Background(), autoConfig) testutil.RequireNoError(t, err) }) } @@ -212,17 +212,17 @@ func TestSmoke(t *testing.T) { // pegImageTag pegs the charon docker image tag for one of the nodes. // It overrides the default that uses locally built latest version. -// func pegImageTag(nodes []compose.TmplNode, index int, imageTag string) { -// nodes[index].ImageTag = imageTag -// nodes[index].Entrypoint = "/usr/local/bin/charon" // Use contains binary, not locally built latest version. -// } - -// // last returns the last element of a slice. -// func last(s []version.SemVer) string { -// return s[len(s)-1].String() -// } - -// // nth returns the nth element of a slice, wrapping if n > len(s). -// func nth(s []version.SemVer, n int) string { -// return s[n%len(s)].String() -// } +func pegImageTag(nodes []compose.TmplNode, index int, imageTag string) { + nodes[index].ImageTag = imageTag + nodes[index].Entrypoint = "/usr/local/bin/charon" // Use contains binary, not locally built latest version. +} + +// last returns the last element of a slice. +func last(s []version.SemVer) string { + return s[len(s)-1].String() +} + +// nth returns the nth element of a slice, wrapping if n > len(s). +func nth(s []version.SemVer, n int) string { + return s[n%len(s)].String() +} diff --git a/testutil/compose/static/lighthouse/Dockerfile b/testutil/compose/static/lighthouse/Dockerfile index 683787c43..4883967b8 100644 --- a/testutil/compose/static/lighthouse/Dockerfile +++ b/testutil/compose/static/lighthouse/Dockerfile @@ -1,4 +1,4 @@ -FROM sigp/lighthouse:v5.2.1 +FROM sigp/lighthouse:v5.3.0 ENV YQ_VERSION=v4.42.1 diff --git a/testutil/compose/static/lodestar/Dockerfile b/testutil/compose/static/lodestar/Dockerfile index 5745c4528..68e4d9077 100644 --- a/testutil/compose/static/lodestar/Dockerfile +++ b/testutil/compose/static/lodestar/Dockerfile @@ -1,6 +1,6 @@ -FROM chainsafe/lodestar:v1.20.2 +FROM chainsafe/lodestar:v1.23.0 -RUN apk update && apk add curl jq wget +RUN apt-get update && apt-get install -y curl jq wget ENV YQ_VERSION=v4.23.1 ENV YQ_BINARY=yq_linux_amd64 diff --git a/testutil/compose/static/lodestar/run.sh b/testutil/compose/static/lodestar/run.sh index b8350d0e1..ea04d5b97 100755 --- a/testutil/compose/static/lodestar/run.sh +++ b/testutil/compose/static/lodestar/run.sh @@ -17,15 +17,13 @@ for f in /compose/"${NODE}"/validator_keys/keystore-*.json; do node /usr/app/packages/cli/bin/lodestar validator import \ --network="dev" \ --importKeystores="$f" \ - --importKeystoresPassword="${f//json/txt}" + --importKeystoresPassword="${f%.json}.txt" done echo "Imported all keys" node /usr/app/packages/cli/bin/lodestar validator \ --network="dev" \ - --presetFile="/tmp/testnet/config.yaml" \ - --paramsFile="/tmp/testnet/config.yaml" \ --metrics=true \ --metrics.address="0.0.0.0" \ --metrics.port=5064 \ diff --git a/testutil/compose/static/vouch/Dockerfile b/testutil/compose/static/vouch/Dockerfile index 614157d3b..641530884 100644 --- a/testutil/compose/static/vouch/Dockerfile +++ b/testutil/compose/static/vouch/Dockerfile @@ -1,6 +1,6 @@ FROM wealdtech/ethdo:1.35.2 as ethdo -FROM attestant/vouch:1.8.2 +FROM attestant/vouch:1.9.0 COPY --from=ethdo /app/ethdo /app/ethdo diff --git a/testutil/integration/nightly_dkg_test.go b/testutil/integration/nightly_dkg_test.go index ed333f3f0..ab0ad6b10 100644 --- a/testutil/integration/nightly_dkg_test.go +++ b/testutil/integration/nightly_dkg_test.go @@ -148,8 +148,7 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w t.Helper() var ( - ctx context.Context - cancelFunc context.CancelFunc + stopNode context.CancelFunc firstNode bool // True if node index is 0 allStarted bool // True if all nodes have started DKG firstTime = true // True if the node is starting for the first time @@ -158,13 +157,9 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w firstNode = nodeIdx == 0 // runDKG runs a new instance of DKG. If a DKG is already running, it stops it before starting a new one. - runDKG := func() { - // If there's an instance already running, stop it - if ctx != nil { - cancelFunc() - } + runDKG := func() context.CancelFunc { + ctx, cancelFunc := context.WithCancel(parentCtx) - ctx, cancelFunc = context.WithCancel(parentCtx) log.Debug(ctx, "Starting DKG node", z.Int("node", nodeIdx), z.Bool("first_time", firstTime)) errCh := make(chan error, 1) @@ -178,6 +173,8 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w }(ctx) err := <-errCh require.ErrorContains(t, err, ctxCanceledErr) + + return cancelFunc } for { @@ -186,12 +183,15 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w allStarted = true case <-newWindowStarted: if firstNode && !firstTime { // Node 0 never restarts (is always up) - log.Debug(ctx, "Not restarting node", z.Int("node", nodeIdx)) + log.Debug(parentCtx, "Not restarting node", z.Int("node", nodeIdx)) continue } // Start the node - runDKG() + if stopNode != nil { + stopNode() + } + stopNode = runDKG() firstTime = false if firstNode { continue @@ -199,7 +199,7 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w // Wait for some random duration before stopping the node stopDelay := calcStopDelay(t, window, nodeDownPeriod) - log.Debug(ctx, "Stopping node after delay", z.Int("node", nodeIdx), z.Str("delay", stopDelay.String())) + log.Debug(parentCtx, "Stopping node after delay", z.Int("node", nodeIdx), z.Str("delay", stopDelay.String())) select { case <-time.After(stopDelay): case <-allNodesStarted: @@ -207,8 +207,8 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w } // Stop the node - cancelFunc() - log.Debug(ctx, "Node stopped", z.Int("node", nodeIdx)) + stopNode() + log.Debug(parentCtx, "Node stopped", z.Int("node", nodeIdx)) // If all nodes have started, there's no point in restarting the node if allStarted { @@ -216,10 +216,10 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w } // Wait nodeDownPeriod before restarting the node - log.Debug(ctx, "Waiting before restarting node", z.Int("node", nodeIdx), z.Str("delay", nodeDownPeriod.String())) + log.Debug(parentCtx, "Waiting before restarting node", z.Int("node", nodeIdx), z.Str("delay", nodeDownPeriod.String())) select { case <-time.After(nodeDownPeriod): - runDKG() + stopNode = runDKG() case <-allNodesStarted: allStarted = true } @@ -231,8 +231,8 @@ func mimicDKGNode(parentCtx context.Context, t *testing.T, dkgConf dkg.Config, w } // Stop any existing running DKG and run the final DKG since all nodes are up now - if ctx != nil { - cancelFunc() + if stopNode != nil { + stopNode() } log.Debug(parentCtx, "Running final DKG", z.Int("node", nodeIdx)) diff --git a/testutil/obolapimock/obolapi_exit.go b/testutil/obolapimock/obolapi_exit.go index 4d321c03c..86f2de156 100644 --- a/testutil/obolapimock/obolapi_exit.go +++ b/testutil/obolapimock/obolapi_exit.go @@ -314,7 +314,7 @@ func cleanTmpl(tmpl string) string { "").Replace(tmpl) } -// MockServer returns a obol API mock test server. +// MockServer returns a Obol API mock test server. // It returns a http.Handler to be served over HTTP, and a function to add cluster lock files to its database. func MockServer(dropOnePsig bool, beacon eth2wrap.Client) (http.Handler, func(lock cluster.Lock)) { ts := testServer{ diff --git a/testutil/promrated/Dockerfile b/testutil/promrated/Dockerfile index d75382daf..4e490c82d 100644 --- a/testutil/promrated/Dockerfile +++ b/testutil/promrated/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.5-alpine AS builder +FROM golang:1.23.3-alpine AS builder # Install dependencies RUN apk add --no-cache build-base git diff --git a/testutil/promrated/rated.go b/testutil/promrated/rated.go index 68fb76182..ed14c384f 100644 --- a/testutil/promrated/rated.go +++ b/testutil/promrated/rated.go @@ -35,7 +35,7 @@ func getNetworkStatistics(ctx context.Context, ratedEndpoint string, ratedAuth s return networkEffectivenessData{}, errors.Wrap(err, "parse rated endpoint") } - url.Path = "/v0/eth/network/stats" + url.Path = "/v0/eth/network/overview" body, err := queryRatedAPI(ctx, url, ratedAuth, network) if err != nil { @@ -106,7 +106,7 @@ func queryRatedAPI(ctx context.Context, url *url.URL, ratedAuth string, network backoff() continue - } else if res.StatusCode/100 != 2 { //nolint:usestdlibvars // we should not replace 100 with http.StatusContinue, it makes it less readable + } else if res.StatusCode/100 != 2 { incRatedErrors(res.StatusCode) return nil, errors.New("not ok http response", z.Str("body", string(body))) diff --git a/testutil/promrated/rated_internal_test.go b/testutil/promrated/rated_internal_test.go index 04a72d458..4ac3cfbe0 100644 --- a/testutil/promrated/rated_internal_test.go +++ b/testutil/promrated/rated_internal_test.go @@ -17,7 +17,7 @@ import ( func TestGetNetworkStatistics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, "/v0/eth/network/stats", r.URL.Path) + require.Equal(t, "/v0/eth/network/overview", r.URL.Path) require.Equal(t, "Bearer auth", r.Header.Get("Authorization")) require.Equal(t, "prater", r.Header.Get("X-Rated-Network"))