From ebd64072e4080667ef6c08afa91d9cd8ae21bb67 Mon Sep 17 00:00:00 2001 From: "Derrick J. Wippler" Date: Thu, 17 Aug 2023 14:38:57 -0500 Subject: [PATCH] Introduced Release Candidate v3.0 CHANGES: * Introduced HTTPClient * Integrating DUH-RPC * Updated tests with new client * Fixed some lint issues * GRPC is no more * OTEL tracing for new client and handler * Updated benchmarks * Isolate the trace benchmark --- .gitignore | 2 + Makefile | 2 +- README.md | 50 +- algorithms.go | 21 +- benchmark_cache_test.go | 2 +- benchmark_test.go | 272 ++++++--- buf.gen.yaml | 11 +- buf.yaml | 8 +- client.go | 171 +++++- cluster/cluster.go | 28 +- cluster/cluster_test.go | 22 +- cmd/gubernator-cli/main.go | 78 ++- cmd/gubernator-cluster/main.go | 16 +- cmd/gubernator/main.go | 4 +- cmd/healthcheck/main.go | 6 +- config.go | 93 ++- .../main.tf | 1 - .../charts/gubernator/templates/_helpers.tpl | 19 +- .../gubernator/templates/deployment.yaml | 6 +- .../charts/gubernator/templates/service.yaml | 4 - contrib/charts/gubernator/values.yaml | 5 - contrib/k8s-deployment.yaml | 17 +- daemon.go | 475 +++++++--------- dns.go | 15 +- docker-compose-tls.yaml | 16 +- docker-compose.yaml | 20 +- docs/tracing.md | 33 +- etcd.go | 14 +- example.conf | 13 +- functional_test.go | 336 +++++------ global.go | 108 ++-- go.mod | 36 +- go.sum | 64 ++- grpc_stats.go | 145 ----- gubernator.go | 323 ++++++----- gubernator.pb.go | 411 +++++++------- gubernator.pb.gw.go | 240 -------- gubernator.proto | 53 +- gubernator_grpc.pb.go | 168 ------ handler.go | 168 ++++++ interval_test.go | 2 +- kubernetes.go | 4 +- lrucache_test.go | 2 +- memberlist.go | 4 +- metadata_carrier_test.go | 2 +- mock_cache_test.go | 2 +- mock_loader_test.go | 2 +- mock_store_test.go | 6 +- peer.go | 441 +++++++++++++++ peer.pb.go | 391 +++++++++++++ peers.proto => peer.proto | 30 +- peer_client.go | 534 ------------------ peer_client_test.go => peer_test.go | 37 +- peers.pb.go | 462 --------------- peers.pb.gw.go | 256 --------- peers_grpc.pb.go | 166 ------ python/gubernator/__init__.py | 21 - python/gubernator/gubernator_pb2.py | 57 -- python/gubernator/gubernator_pb2_grpc.py | 102 ---- python/gubernator/peers_pb2.py | 37 -- python/gubernator/peers_pb2_grpc.py | 104 ---- python/requirements-py2.txt | 15 - python/requirements-py3.txt | 11 - python/setup.py | 57 -- python/tests/__init__.py | 0 python/tests/test_client.py | 60 -- region_picker.go | 28 +- replicated_hash.go | 32 +- replicated_hash_test.go | 14 +- store.go | 12 +- store_test.go | 164 +++--- tls.go | 1 - tls_test.go | 72 ++- workers.go | 10 +- workers_test.go | 2 +- 75 files changed, 2549 insertions(+), 4067 deletions(-) delete mode 100644 grpc_stats.go delete mode 100644 gubernator.pb.gw.go delete mode 100644 gubernator_grpc.pb.go create mode 100644 handler.go create mode 100644 peer.go create mode 100644 peer.pb.go rename peers.proto => peer.proto (55%) delete mode 100644 peer_client.go rename peer_client_test.go => peer_test.go (73%) delete mode 100644 peers.pb.go delete mode 100644 peers.pb.gw.go delete mode 100644 peers_grpc.pb.go delete mode 100644 python/gubernator/__init__.py delete mode 100644 python/gubernator/gubernator_pb2.py delete mode 100644 python/gubernator/gubernator_pb2_grpc.py delete mode 100644 python/gubernator/peers_pb2.py delete mode 100644 python/gubernator/peers_pb2_grpc.py delete mode 100644 python/requirements-py2.txt delete mode 100644 python/requirements-py3.txt delete mode 100755 python/setup.py delete mode 100644 python/tests/__init__.py delete mode 100644 python/tests/test_client.py diff --git a/.gitignore b/.gitignore index 976cfb4f..899d827b 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ coverage.out coverage.html /gubernator /gubernator-cli +.run/ +tmp/ diff --git a/Makefile b/Makefile index fde55c98..1792a1c1 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ clean: .PHONY: proto proto: - scripts/proto.sh + buf build .PHONY: certs certs: diff --git a/README.md b/README.md index e60b5a9d..61286620 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,6 @@ Gubernator is a distributed, high performance, cloud native and stateless rate-l kubernetes or nomad trivial. * Gubernator holds no state on disk, It’s configuration is passed to it by the client on a per-request basis. -* Gubernator provides both GRPC and HTTP access to the API. * It Can be run as a sidecar to services that need rate limiting or as a separate service. * It Can be used as a library to implement a domain-specific rate limiting service. * Supports optional eventually consistent rate limit distribution for extremely @@ -38,11 +37,13 @@ $ docker-compose up -d ``` Now you can make rate limit requests via CURL ``` -# Hit the HTTP API at localhost:9080 (GRPC is at 9081) -$ curl http://localhost:9080/v1/HealthCheck +# Hit the HTTP API at localhost:9080 +$ curl http://localhost:9080/v1/health.check + +# TODO: Update this example # Make a rate limit request -$ curl http://localhost:9080/v1/GetRateLimits \ +$ curl http://localhost:9080/v1/rate-limits.check \ --header 'Content-Type: application/json' \ --data '{ "requests": [ @@ -59,7 +60,7 @@ $ curl http://localhost:9080/v1/GetRateLimits \ ### ProtoBuf Structure -An example rate limit request sent via GRPC might look like the following +An example rate limit request sent with protobuf might look like the following ```yaml rate_limits: # Scopes the request to a specific rate limit @@ -189,7 +190,7 @@ limiting service. When you use the library, your service becomes a full member of the cluster participating in the same consistent hashing and caching as a stand alone -Gubernator server would. All you need to do is provide the GRPC server instance +Gubernator server would. All you need to do is provide the server instance and tell Gubernator where the peers in your cluster are located. The `cmd/gubernator/main.go` is a great example of how to use Gubernator as a library. @@ -213,21 +214,14 @@ to support rate limit durations longer than a minute, day or month, calls to those rate limits that have durations over a self determined limit. ### API -All methods are accessed via GRPC but are also exposed via HTTP using the -[GRPC Gateway](https://github.com/grpc-ecosystem/grpc-gateway) #### Health Check Health check returns `unhealthy` in the event a peer is reported by etcd or kubernetes as `up` but the server instance is unable to contact that peer via it's advertised address. -###### GRPC -```grpc -rpc HealthCheck (HealthCheckReq) returns (HealthCheckResp) -``` - ###### HTTP ``` -GET /v1/HealthCheck +GET /v1/health.check ``` Example response: @@ -235,7 +229,7 @@ Example response: ```json { "status": "healthy", - "peer_count": 3 + "peerCount": 3 } ``` @@ -244,14 +238,9 @@ Rate limits can be applied or retrieved using this interface. If the client makes a request to the server with `hits: 0` then current state of the rate limit is retrieved but not incremented. -###### GRPC -```grpc -rpc GetRateLimits (GetRateLimitsReq) returns (GetRateLimitsResp) -``` - ###### HTTP ``` -POST /v1/GetRateLimits +POST /v1/rate-limit.check ``` Example Payload @@ -289,20 +278,10 @@ Example response: ``` ### Deployment -NOTE: Gubernator uses `etcd`, Kubernetes or round-robin DNS to discover peers and +NOTE: Gubernator uses `memberlist` Kubernetes or round-robin DNS to discover peers and establish a cluster. If you don't have either, the docker-compose method is the simplest way to try gubernator out. - -##### Docker with existing etcd cluster -```bash -$ docker run -p 8081:81 -p 9080:80 -e GUBER_ETCD_ENDPOINTS=etcd1:2379,etcd2:2379 \ - ghcr.io/mailgun/gubernator:latest - -# Hit the HTTP API at localhost:9080 -$ curl http://localhost:9080/v1/HealthCheck -``` - ##### Kubernetes ```bash # Download the kubernetes deployment spec @@ -321,14 +300,13 @@ you can use same fully-qualified domain name to both let your business logic con instances to find `gubernator` and for `gubernator` containers/instances to find each other. ##### TLS -Gubernator supports TLS for both HTTP and GRPC connections. You can see an example with -self signed certs by running `docker-compose-tls.yaml` +Gubernator supports TLS. You can see an example with self signed certs by running `docker-compose-tls.yaml` ```bash # Run docker compose $ docker-compose -f docker-compose-tls.yaml up -d -# Hit the HTTP API at localhost:9080 (GRPC is at 9081) -$ curl --cacert certs/ca.cert --cert certs/gubernator.pem --key certs/gubernator.key https://localhost:9080/v1/HealthCheck +# Hit the HTTP API at localhost:9080 +$ curl -X POST --cacert certs/ca.cert --cert certs/gubernator.pem --key certs/gubernator.key https://localhost:9080/v1/health.check ``` ### Configuration diff --git a/algorithms.go b/algorithms.go index c7e6315c..cd0598f9 100644 --- a/algorithms.go +++ b/algorithms.go @@ -27,7 +27,7 @@ import ( ) // Implements token bucket algorithm for rate limiting. https://en.wikipedia.org/wiki/Token_bucket -func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp *RateLimitResp, err error) { +func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitRequest) (resp *RateLimitResponse, err error) { tokenBucketTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("tokenBucket")) defer tokenBucketTimer.ObserveDuration() @@ -75,7 +75,7 @@ func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp * if s != nil { s.Remove(ctx, hashKey) } - return &RateLimitResp{ + return &RateLimitResponse{ Status: Status_UNDER_LIMIT, Limit: r.Limit, Remaining: r.Limit, @@ -112,7 +112,7 @@ func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp * t.Limit = r.Limit } - rl := &RateLimitResp{ + rl := &RateLimitResponse{ Status: t.Status, Limit: r.Limit, Remaining: t.Remaining, @@ -194,7 +194,7 @@ func tokenBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp * } // Called by tokenBucket() when adding a new item in the store. -func tokenBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp *RateLimitResp, err error) { +func tokenBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitRequest) (resp *RateLimitResponse, err error) { now := MillisecondNow() expire := now + r.Duration @@ -220,7 +220,7 @@ func tokenBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitReq) ExpireAt: expire, } - rl := &RateLimitResp{ + rl := &RateLimitResponse{ Status: Status_UNDER_LIMIT, Limit: r.Limit, Remaining: t.Remaining, @@ -246,10 +246,7 @@ func tokenBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitReq) } // Implements leaky bucket algorithm for rate limiting https://en.wikipedia.org/wiki/Leaky_bucket -func leakyBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp *RateLimitResp, err error) { - leakyBucketTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.getRateLimit_leakyBucket")) - defer leakyBucketTimer.ObserveDuration() - +func leakyBucket(ctx context.Context, s Store, c Cache, r *RateLimitRequest) (resp *RateLimitResponse, err error) { if r.Burst == 0 { r.Burst = r.Limit } @@ -359,7 +356,7 @@ func leakyBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp * b.Remaining = float64(b.Burst) } - rl := &RateLimitResp{ + rl := &RateLimitResponse{ Limit: b.Limit, Remaining: int64(b.Remaining), Status: Status_UNDER_LIMIT, @@ -412,7 +409,7 @@ func leakyBucket(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp * } // Called by leakyBucket() when adding a new item in the store. -func leakyBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitReq) (resp *RateLimitResp, err error) { +func leakyBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitRequest) (resp *RateLimitResponse, err error) { now := MillisecondNow() duration := r.Duration rate := float64(duration) / float64(r.Limit) @@ -436,7 +433,7 @@ func leakyBucketNewItem(ctx context.Context, s Store, c Cache, r *RateLimitReq) Burst: r.Burst, } - rl := RateLimitResp{ + rl := RateLimitResponse{ Status: Status_UNDER_LIMIT, Limit: b.Limit, Remaining: r.Burst - r.Hits, diff --git a/benchmark_cache_test.go b/benchmark_cache_test.go index 1152366b..79a2401f 100644 --- a/benchmark_cache_test.go +++ b/benchmark_cache_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - gubernator "github.com/mailgun/gubernator/v2" + "github.com/mailgun/gubernator/v3" "github.com/mailgun/holster/v4/clock" ) diff --git a/benchmark_test.go b/benchmark_test.go index 56d0fe57..6a812a36 100644 --- a/benchmark_test.go +++ b/benchmark_test.go @@ -18,52 +18,51 @@ package gubernator_test import ( "context" + "fmt" + "os" "testing" - guber "github.com/mailgun/gubernator/v2" - "github.com/mailgun/gubernator/v2/cluster" - "github.com/mailgun/holster/v4/syncutil" + guber "github.com/mailgun/gubernator/v3" + "github.com/mailgun/gubernator/v3/cluster" "github.com/stretchr/testify/require" ) -func BenchmarkServer(b *testing.B) { +// go test benchmark_test.go -bench=BenchmarkTrace -benchtime=20s -trace=trace.out +// go tool trace trace.out +func BenchmarkTrace(b *testing.B) { + if err := cluster.StartWith([]guber.PeerInfo{ + {HTTPAddress: "127.0.0.1:9980", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9981", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9982", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9983", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9984", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9985", DataCenter: cluster.DataCenterNone}, + + // DataCenterOne + {HTTPAddress: "127.0.0.1:9880", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9881", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9882", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9883", DataCenter: cluster.DataCenterOne}, + }); err != nil { + fmt.Println(err) + os.Exit(1) + } + defer cluster.Stop() ctx := context.Background() conf := guber.Config{} err := conf.SetDefaults() require.NoError(b, err, "Error in conf.SetDefaults") - b.Run("GetPeerRateLimit() with no batching", func(b *testing.B) { - client := guber.NewPeerClient(guber.PeerConfig{ - Info: cluster.GetRandomPeer(cluster.DataCenterNone), - Behavior: conf.Behaviors, - }) + b.Run("CheckRateLimits() BATCHING", func(b *testing.B) { + client, err := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) + require.NoError(b, err, "Error in guber.NewClient") b.ResetTimer() for n := 0; n < b.N; n++ { - _, err := client.GetPeerRateLimit(context.Background(), &guber.RateLimitReq{ - Name: "get_peer_rate_limits_benchmark", - UniqueKey: guber.RandomString(10), - Behavior: guber.Behavior_NO_BATCHING, - Limit: 10, - Duration: 5, - Hits: 1, - }) - if err != nil { - b.Errorf("Error in client.GetPeerRateLimit: %s", err) - } - } - }) - - b.Run("GetRateLimit()", func(b *testing.B) { - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) - require.NoError(b, err, "Error in guber.DialV1Server") - - b.ResetTimer() - - for n := 0; n < b.N; n++ { - _, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "get_rate_limit_benchmark", UniqueKey: guber.RandomString(10), @@ -72,77 +71,186 @@ func BenchmarkServer(b *testing.B) { Hits: 1, }, }, - }) + }, &resp) if err != nil { - b.Errorf("Error in client.GetRateLimits(): %s", err) + b.Errorf("Error in client.CheckRateLimits(): %s", err) } } }) +} + +// +// go test -bench=BenchmarkServer -benchmem=1 -benchtime=20s +// + +func BenchmarkServer(b *testing.B) { + if err := cluster.StartWith([]guber.PeerInfo{ + {HTTPAddress: "127.0.0.1:9980", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9981", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9982", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9983", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9984", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9985", DataCenter: cluster.DataCenterNone}, - b.Run("GetRateLimitGlobal()", func(b *testing.B) { - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) - require.NoError(b, err, "Error in guber.DialV1Server") + // DataCenterOne + {HTTPAddress: "127.0.0.1:9880", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9881", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9882", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9883", DataCenter: cluster.DataCenterOne}, + }); err != nil { + fmt.Println(err) + os.Exit(1) + } + defer cluster.Stop() + ctx := context.Background() + conf := guber.Config{} + err := conf.SetDefaults() + require.NoError(b, err, "Error in conf.SetDefaults") + + //b.Run("Forward() NO_BATCHING", func(b *testing.B) { + // client, err := guber.NewPeer(guber.PeerConfig{ + // Info: cluster.GetRandomPeerInfo(cluster.DataCenterNone), + // Behavior: conf.Behaviors, + // }) + // if err != nil { + // b.Errorf("during guber.NewPeer(): %s", err) + // } + // + // b.ResetTimer() + // + // for n := 0; n < b.N; n++ { + // _, err := client.Forward(context.Background(), &guber.RateLimitRequest{ + // Name: "get_peer_rate_limits_benchmark", + // UniqueKey: guber.RandomString(10), + // Behavior: guber.Behavior_NO_BATCHING, + // Limit: 10, + // Duration: 5, + // Hits: 1, + // }) + // if err != nil { + // b.Errorf("Error in client.Forward: %s", err) + // } + // } + //}) + + //b.Run("CheckRateLimits() BATCHING", func(b *testing.B) { + // client, err := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) + // require.NoError(b, err, "Error in guber.NewClient") + // + // b.ResetTimer() + // + // for n := 0; n < b.N; n++ { + // var resp guber.CheckRateLimitsResponse + // err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + // Requests: []*guber.RateLimitRequest{ + // { + // Name: "get_rate_limit_benchmark", + // UniqueKey: guber.RandomString(10), + // Limit: 10, + // Duration: guber.Second * 5, + // Hits: 1, + // }, + // }, + // }, &resp) + // if err != nil { + // b.Errorf("Error in client.CheckRateLimits(): %s", err) + // } + // } + //}) + + b.Run("CheckRateLimits() NO_BATCHING", func(b *testing.B) { + client, err := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) + require.NoError(b, err, "Error in guber.NewClient") b.ResetTimer() for n := 0; n < b.N; n++ { - _, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "get_rate_limit_benchmark", UniqueKey: guber.RandomString(10), - Behavior: guber.Behavior_GLOBAL, + Behavior: guber.Behavior_NO_BATCHING, Limit: 10, Duration: guber.Second * 5, Hits: 1, }, }, - }) + }, &resp) if err != nil { - b.Errorf("Error in client.GetRateLimits: %s", err) - } - } - }) - - b.Run("HealthCheck", func(b *testing.B) { - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) - require.NoError(b, err, "Error in guber.DialV1Server") - - b.ResetTimer() - - for n := 0; n < b.N; n++ { - if _, err := client.HealthCheck(context.Background(), &guber.HealthCheckReq{}); err != nil { - b.Errorf("Error in client.HealthCheck: %s", err) + b.Errorf("Error in client.CheckRateLimits(): %s", err) } } }) - b.Run("Thundering herd", func(b *testing.B) { - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) - require.NoError(b, err, "Error in guber.DialV1Server") - - b.ResetTimer() - - fan := syncutil.NewFanOut(100) + //b.Run("CheckRateLimits() GLOBAL", func(b *testing.B) { + // client, err := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) + // require.NoError(b, err, "Error in guber.NewClient") + // + // b.ResetTimer() + // + // for n := 0; n < b.N; n++ { + // var resp guber.CheckRateLimitsResponse + // err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + // Requests: []*guber.RateLimitRequest{ + // { + // Name: "get_rate_limit_benchmark", + // UniqueKey: guber.RandomString(10), + // Behavior: guber.Behavior_GLOBAL, + // Limit: 10, + // Duration: guber.Second * 5, + // Hits: 1, + // }, + // }, + // }, &resp) + // if err != nil { + // b.Errorf("Error in client.CheckRateLimits: %s", err) + // } + // } + //}) - for n := 0; n < b.N; n++ { - fan.Run(func(o interface{}) error { - _, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ - { - Name: "get_rate_limit_benchmark", - UniqueKey: guber.RandomString(10), - Limit: 10, - Duration: guber.Second * 5, - Hits: 1, - }, - }, - }) - if err != nil { - b.Errorf("Error in client.GetRateLimits: %s", err) - } - return nil - }, nil) - } - }) + //b.Run("HealthCheck", func(b *testing.B) { + // client, err := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) + // require.NoError(b, err, "Error in guber.NewClient") + // + // b.ResetTimer() + // + // for n := 0; n < b.N; n++ { + // var resp guber.HealthCheckResponse + // if err := client.HealthCheck(context.Background(), &resp); err != nil { + // b.Errorf("Error in client.HealthCheck: %s", err) + // } + // } + //}) + // + //b.Run("Thundering herd", func(b *testing.B) { + // client, err := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) + // require.NoError(b, err, "Error in guber.NewClient") + // + // b.ResetTimer() + // + // fan := syncutil.NewFanOut(100) + // + // for n := 0; n < b.N; n++ { + // fan.Run(func(o interface{}) error { + // var resp guber.CheckRateLimitsResponse + // err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + // Requests: []*guber.RateLimitRequest{ + // { + // Name: "get_rate_limit_benchmark", + // UniqueKey: guber.RandomString(10), + // Limit: 10, + // Duration: guber.Second * 5, + // Hits: 1, + // }, + // }, + // }, &resp) + // if err != nil { + // b.Errorf("Error in client.CheckRateLimits: %s", err) + // } + // return nil + // }, nil) + // } + //}) } diff --git a/buf.gen.yaml b/buf.gen.yaml index 65a065c5..a1128cde 100644 --- a/buf.gen.yaml +++ b/buf.gen.yaml @@ -2,13 +2,4 @@ version: v1 plugins: - name: go out: ./ - opt: paths=source_relative - - plugin: buf.build/grpc/go:v1.3.0 - out: ./ - opt: - - paths=source_relative - - require_unimplemented_servers=false - - plugin: buf.build/grpc/python:v1.57.0 - out: ./python/gubernator - - plugin: buf.build/protocolbuffers/python - out: ./python/gubernator + opt: paths=source_relative \ No newline at end of file diff --git a/buf.yaml b/buf.yaml index b6d13510..41ece4f1 100644 --- a/buf.yaml +++ b/buf.yaml @@ -1,6 +1,5 @@ version: v1 -deps: - - buf.build/googleapis/googleapis +deps: [] build: excludes: [] breaking: @@ -8,7 +7,4 @@ breaking: - FILE lint: use: - - DEFAULT - rpc_allow_same_request_response: false - rpc_allow_google_protobuf_empty_requests: true - rpc_allow_google_protobuf_empty_responses: true \ No newline at end of file + - DEFAULT \ No newline at end of file diff --git a/client.go b/client.go index f989669b..6112f5c8 100644 --- a/client.go +++ b/client.go @@ -17,17 +17,24 @@ limitations under the License. package gubernator import ( + "bytes" + "context" crand "crypto/rand" "crypto/tls" + "fmt" "math/rand" + "net" + "net/http" "time" + "github.com/duh-rpc/duh-go" + v1 "github.com/duh-rpc/duh-go/proto/v1" "github.com/mailgun/holster/v4/clock" + "github.com/mailgun/holster/v4/setter" "github.com/pkg/errors" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" + "go.opentelemetry.io/otel/propagation" + "golang.org/x/net/http2" + "google.golang.org/protobuf/proto" ) const ( @@ -36,32 +43,158 @@ const ( Minute = 60 * Second ) -func (m *RateLimitReq) HashKey() string { +type Client interface { + CheckRateLimits(context.Context, *CheckRateLimitsRequest, *CheckRateLimitsResponse) error + HealthCheck(context.Context, *HealthCheckResponse) error +} + +func (m *RateLimitRequest) HashKey() string { return m.Name + "_" + m.UniqueKey } -// DialV1Server is a convenience function for dialing gubernator instances -func DialV1Server(server string, tls *tls.Config) (V1Client, error) { - if len(server) == 0 { - return nil, errors.New("server is empty; must provide a server") +type ClientOptions struct { + // Users can provide their own http client with TLS config if needed + Client *http.Client + // The address of endpoint in the format `://:` + Endpoint string +} + +type client struct { + *duh.Client + prop propagation.TraceContext + opts ClientOptions +} + +// NewClient creates a new instance of the Gubernator user client +func NewClient(opts ClientOptions) (Client, error) { + setter.SetDefault(&opts.Client, DefaultHTTPClient) + + if len(opts.Endpoint) == 0 { + return nil, errors.New("opts.Endpoint is empty; must provide an address") + } + + return &client{ + Client: &duh.Client{ + Client: opts.Client, + }, + opts: opts, + }, nil +} + +func NewPeerClient(opts ClientOptions) PeerClient { + return &client{ + Client: &duh.Client{ + Client: opts.Client, + }, + opts: opts, + } +} + +func (c *client) CheckRateLimits(ctx context.Context, req *CheckRateLimitsRequest, resp *CheckRateLimitsResponse) error { + payload, err := proto.Marshal(req) + if err != nil { + return duh.NewClientError(fmt.Errorf("while marshaling request payload: %w", err), nil) } - // Setup OpenTelemetry interceptor to propagate spans. - opts := []grpc.DialOption{ - grpc.WithStatsHandler(otelgrpc.NewClientHandler()), + r, err := http.NewRequestWithContext(ctx, http.MethodPost, + fmt.Sprintf("%s%s", c.opts.Endpoint, RPCRateLimitCheck), bytes.NewReader(payload)) + if err != nil { + return duh.NewClientError(err, nil) } - if tls != nil { - opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tls))) - } else { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + + r.Header.Set("Content-Type", duh.ContentTypeProtoBuf) + return c.Do(r, resp) +} + +func (c *client) HealthCheck(ctx context.Context, resp *HealthCheckResponse) error { + payload, err := proto.Marshal(&HealthCheckRequest{}) + if err != nil { + return duh.NewClientError(fmt.Errorf("while marshaling request payload: %w", err), nil) } - conn, err := grpc.Dial(server, opts...) + r, err := http.NewRequestWithContext(ctx, http.MethodPost, + fmt.Sprintf("%s%s", c.opts.Endpoint, RPCHealthCheck), bytes.NewReader(payload)) if err != nil { - return nil, errors.Wrapf(err, "failed to dial server %s", server) + return duh.NewClientError(err, nil) } - return NewV1Client(conn), nil + r.Header.Set("Content-Type", duh.ContentTypeProtoBuf) + return c.Do(r, resp) +} + +func (c *client) Forward(ctx context.Context, req *ForwardRequest, resp *ForwardResponse) error { + payload, err := proto.Marshal(req) + if err != nil { + return duh.NewClientError(fmt.Errorf("while marshaling request payload: %w", err), nil) + } + + r, err := http.NewRequestWithContext(ctx, http.MethodPost, + fmt.Sprintf("%s%s", c.opts.Endpoint, RPCPeerForward), bytes.NewReader(payload)) + if err != nil { + return duh.NewClientError(err, nil) + } + + c.prop.Inject(ctx, propagation.HeaderCarrier(r.Header)) + r.Header.Set("Content-Type", duh.ContentTypeProtoBuf) + return c.Do(r, resp) +} + +func (c *client) Update(ctx context.Context, req *UpdateRequest) error { + payload, err := proto.Marshal(req) + if err != nil { + return duh.NewClientError(fmt.Errorf("while marshaling request payload: %w", err), nil) + } + r, err := http.NewRequestWithContext(ctx, http.MethodPost, + fmt.Sprintf("%s%s", c.opts.Endpoint, RPCPeerUpdate), bytes.NewReader(payload)) + if err != nil { + return duh.NewClientError(err, nil) + } + + r.Header.Set("Content-Type", duh.ContentTypeProtoBuf) + return c.Do(r, &v1.Reply{}) +} + +var ( + // DefaultHTTPClient enables H2C (HTTP/2 over Cleartext) + DefaultHTTPClient = &http.Client{ + Transport: &http2.Transport{ + // So http2.Transport doesn't complain the URL scheme isn't 'https' + AllowHTTP: true, + // Pretend we are dialing a TLS endpoint. (Note, we ignore the passed tls.Config) + DialTLSContext: func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) + }, + }, + } +) + +// WithNoTLS returns ClientOptions suitable for use with NON-TLS clients with H2C enabled. +func WithNoTLS(address string) ClientOptions { + return ClientOptions{ + Endpoint: fmt.Sprintf("http://%s", address), + Client: DefaultHTTPClient, + } +} + +// WithTLS returns ClientOptions suitable for use with NON-TLS clients with H2C enabled. +func WithTLS(tls *tls.Config, address string) ClientOptions { + return ClientOptions{ + Endpoint: fmt.Sprintf("https://%s", address), + Client: &http.Client{ + Transport: &http2.Transport{ + TLSClientConfig: tls, + }, + }, + } +} + +// WithDaemonConfig returns ClientOptions suitable for use by the Daemon +func WithDaemonConfig(conf DaemonConfig, address string) ClientOptions { + if conf.ClientTLS() == nil { + return WithNoTLS(address) + } + return WithTLS(conf.ClientTLS(), address) } // ToTimeStamp is a convenience function to convert a time.Duration diff --git a/cluster/cluster.go b/cluster/cluster.go index 493aa71c..647c00ba 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -21,9 +21,8 @@ import ( "fmt" "math/rand" - gubernator "github.com/mailgun/gubernator/v2" + "github.com/mailgun/gubernator/v3" "github.com/mailgun/holster/v4/clock" - "github.com/mailgun/holster/v4/errors" "github.com/sirupsen/logrus" ) @@ -36,8 +35,14 @@ const ( var daemons []*gubernator.Daemon var peers []gubernator.PeerInfo -// GetRandomPeer returns a random peer from the cluster -func GetRandomPeer(dc string) gubernator.PeerInfo { +// GetRandomClientOptions returns gubernator.ClientOptions for a random peer in the cluster +func GetRandomClientOptions(dc string) gubernator.ClientOptions { + info := GetRandomPeerInfo(dc) + return gubernator.WithNoTLS(info.HTTPAddress) +} + +// GetRandomPeerInfo returns a random peer from the cluster +func GetRandomPeerInfo(dc string) gubernator.PeerInfo { var local []gubernator.PeerInfo for _, p := range peers { @@ -80,14 +85,13 @@ func NumOfDaemons() int { // Start a local cluster of gubernator servers func Start(numInstances int) error { - // Ideally we should let the socket choose the port, but then + // Ideally, we should let the socket choose the port, but then // some things like the logger will not be set correctly. var peers []gubernator.PeerInfo port := 1111 for i := 0; i < numInstances; i++ { peers = append(peers, gubernator.PeerInfo{ HTTPAddress: fmt.Sprintf("localhost:%d", port), - GRPCAddress: fmt.Sprintf("localhost:%d", port+1), }) port += 2 } @@ -97,7 +101,7 @@ func Start(numInstances int) error { // Restart the cluster func Restart(ctx context.Context) error { for i := 0; i < len(daemons); i++ { - daemons[i].Close() + daemons[i].Close(context.Background()) if err := daemons[i].Start(ctx); err != nil { return err } @@ -111,8 +115,7 @@ func StartWith(localPeers []gubernator.PeerInfo) error { for _, peer := range localPeers { ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) d, err := gubernator.SpawnDaemon(ctx, gubernator.DaemonConfig{ - Logger: logrus.WithField("instance", peer.GRPCAddress), - GRPCListenAddress: peer.GRPCAddress, + Logger: logrus.WithField("instance", peer.HTTPAddress), HTTPListenAddress: peer.HTTPAddress, DataCenter: peer.DataCenter, Behaviors: gubernator.BehaviorConfig{ @@ -124,13 +127,12 @@ func StartWith(localPeers []gubernator.PeerInfo) error { }) cancel() if err != nil { - return errors.Wrapf(err, "while starting server for addr '%s'", peer.GRPCAddress) + return fmt.Errorf("while starting server for addr '%s': %w", peer.HTTPAddress, err) } // Add the peers and daemons to the package level variables peers = append(peers, gubernator.PeerInfo{ - GRPCAddress: d.GRPCListeners[0].Addr().String(), - HTTPAddress: d.HTTPListener.Addr().String(), + HTTPAddress: d.Listener.Addr().String(), DataCenter: peer.DataCenter, }) daemons = append(daemons, d) @@ -146,7 +148,7 @@ func StartWith(localPeers []gubernator.PeerInfo) error { // Stop all daemons in the cluster func Stop() { for _, d := range daemons { - d.Close() + d.Close(context.Background()) } peers = nil daemons = nil diff --git a/cluster/cluster_test.go b/cluster/cluster_test.go index 900d0ced..e6135cd5 100644 --- a/cluster/cluster_test.go +++ b/cluster/cluster_test.go @@ -19,8 +19,8 @@ package cluster_test import ( "testing" - gubernator "github.com/mailgun/gubernator/v2" - "github.com/mailgun/gubernator/v2/cluster" + "github.com/mailgun/gubernator/v3" + "github.com/mailgun/gubernator/v3/cluster" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -45,28 +45,28 @@ func TestStartOneInstance(t *testing.T) { func TestStartMultipleDaemons(t *testing.T) { peers := []gubernator.PeerInfo{ - {GRPCAddress: "localhost:1111", HTTPAddress: "localhost:1112"}, - {GRPCAddress: "localhost:2222", HTTPAddress: "localhost:2221"}} + {HTTPAddress: "localhost:1111"}, + {HTTPAddress: "localhost:2222"}} err := cluster.StartWith(peers) require.NoError(t, err) defer cluster.Stop() wantPeers := []gubernator.PeerInfo{ - {GRPCAddress: "127.0.0.1:1111", HTTPAddress: "127.0.0.1:1112"}, - {GRPCAddress: "127.0.0.1:2222", HTTPAddress: "127.0.0.1:2221"}, + {HTTPAddress: "127.0.0.1:1111"}, + {HTTPAddress: "127.0.0.1:2222"}, } daemons := cluster.GetDaemons() assert.Equal(t, wantPeers, cluster.GetPeers()) assert.Equal(t, 2, len(daemons)) - assert.Equal(t, "127.0.0.1:1111", daemons[0].GRPCListeners[0].Addr().String()) - assert.Equal(t, "127.0.0.1:2222", daemons[1].GRPCListeners[0].Addr().String()) - assert.Equal(t, "127.0.0.1:2222", cluster.DaemonAt(1).GRPCListeners[0].Addr().String()) - assert.Equal(t, "127.0.0.1:2222", cluster.PeerAt(1).GRPCAddress) + assert.Equal(t, "127.0.0.1:1111", daemons[0].Listener.Addr().String()) + assert.Equal(t, "127.0.0.1:2222", daemons[1].Listener.Addr().String()) + assert.Equal(t, "127.0.0.1:2222", cluster.DaemonAt(1).Listener.Addr().String()) + assert.Equal(t, "127.0.0.1:2222", cluster.PeerAt(1).HTTPAddress) } func TestStartWithInvalidPeer(t *testing.T) { - err := cluster.StartWith([]gubernator.PeerInfo{{GRPCAddress: "1111"}}) + err := cluster.StartWith([]gubernator.PeerInfo{{HTTPAddress: "1111"}}) assert.NotNil(t, err) assert.Nil(t, cluster.GetPeers()) assert.Nil(t, cluster.GetDaemons()) diff --git a/cmd/gubernator-cli/main.go b/cmd/gubernator-cli/main.go index 4e0a96b2..b52989c6 100644 --- a/cmd/gubernator-cli/main.go +++ b/cmd/gubernator-cli/main.go @@ -26,9 +26,8 @@ import ( "time" "github.com/davecgh/go-spew/spew" - guber "github.com/mailgun/gubernator/v2" + guber "github.com/mailgun/gubernator/v3" "github.com/mailgun/holster/v4/clock" - "github.com/mailgun/holster/v4/errors" "github.com/mailgun/holster/v4/setter" "github.com/mailgun/holster/v4/syncutil" "github.com/mailgun/holster/v4/tracing" @@ -40,7 +39,7 @@ import ( var ( log *logrus.Logger - configFile, grpcAddress string + configFile, httpAddress string concurrency uint64 timeout time.Duration checksPerRequest uint64 @@ -51,7 +50,7 @@ var ( func main() { log = logrus.StandardLogger() flag.StringVar(&configFile, "config", "", "Environment config file") - flag.StringVar(&grpcAddress, "e", "", "Gubernator GRPC endpoint address") + flag.StringVar(&httpAddress, "e", "", "Gubernator HTTP endpoint address") flag.Uint64Var(&concurrency, "concurrency", 1, "Concurrent threads (default 1)") flag.DurationVar(&timeout, "timeout", 100*time.Millisecond, "Request timeout (default 100ms)") flag.Uint64Var(&checksPerRequest, "checks", 1, "Rate checks per request (default 1)") @@ -78,45 +77,35 @@ func main() { } // Print startup message. - startCtx := tracing.StartScope(ctx) argsMsg := fmt.Sprintf("Command line: %s", strings.Join(os.Args[1:], " ")) log.Info(argsMsg) - tracing.EndScope(startCtx, nil) - var client guber.V1Client - err = tracing.CallScope(ctx, func(ctx context.Context) error { - // Print startup message. - cmdLine := strings.Join(os.Args[1:], " ") - logrus.WithContext(ctx).Info("Command line: " + cmdLine) - - conf, err := guber.SetupDaemonConfig(log, configFile) - if err != nil { - return err - } - setter.SetOverride(&conf.GRPCListenAddress, grpcAddress) + var client guber.Client + // Print startup message. + cmdLine := strings.Join(os.Args[1:], " ") + logrus.WithContext(ctx).Info("Command line: " + cmdLine) - if configFile == "" && grpcAddress == "" && os.Getenv("GUBER_GRPC_ADDRESS") == "" { - return errors.New("please provide a GRPC endpoint via -e or from a config " + - "file via -config or set the env GUBER_GRPC_ADDRESS") - } + conf, err := guber.SetupDaemonConfig(log, configFile) + checkErr(err) + setter.SetOverride(&conf.HTTPListenAddress, httpAddress) - err = guber.SetupTLS(conf.TLS) - if err != nil { - return err - } + if configFile == "" && httpAddress == "" && os.Getenv("GUBER_HTTP_ADDRESS") == "" { + log.Fatal("please provide a endpoint via -e or from a config " + + "file via -config or set the env GUBER_HTTP_ADDRESS") + } - log.WithContext(ctx).Infof("Connecting to '%s'...", conf.GRPCListenAddress) - client, err = guber.DialV1Server(conf.GRPCListenAddress, conf.ClientTLS()) - return err - }) + err = guber.SetupTLS(conf.TLS) + checkErr(err) + log.WithContext(ctx).Infof("Connecting to '%s'...", conf.HTTPListenAddress) + client, err = guber.NewClient(guber.WithDaemonConfig(conf, conf.HTTPListenAddress)) checkErr(err) // Generate a selection of rate limits with random limits. - var rateLimits []*guber.RateLimitReq + var rateLimits []*guber.RateLimitRequest for i := 0; i < 2000; i++ { - rateLimits = append(rateLimits, &guber.RateLimitReq{ + rateLimits = append(rateLimits, &guber.RateLimitRequest{ Name: fmt.Sprintf("gubernator-cli-%d", i), UniqueKey: guber.RandomString(10), Hits: 1, @@ -138,12 +127,12 @@ func main() { // Replay requests in endless loop. for { for i := int(0); i < len(rateLimits); i += int(checksPerRequest) { - req := &guber.GetRateLimitsReq{ + req := &guber.CheckRateLimitsRequest{ Requests: rateLimits[i:min(i+int(checksPerRequest), len(rateLimits))], } fan.Run(func(obj interface{}) error { - req := obj.(*guber.GetRateLimitsReq) + req := obj.(*guber.CheckRateLimitsRequest) if reqRate > 0 { _ = limiter.Wait(ctx) @@ -174,49 +163,46 @@ func randInt(min, max int) int { return rand.Intn(max-min) + min } -func sendRequest(ctx context.Context, client guber.V1Client, req *guber.GetRateLimitsReq) { +func sendRequest(ctx context.Context, client guber.Client, req *guber.CheckRateLimitsRequest) { ctx = tracing.StartScope(ctx) defer tracing.EndScope(ctx, nil) ctx, cancel := context.WithTimeout(ctx, timeout) // Now hit our cluster with the rate limits - resp, err := client.GetRateLimits(ctx, req) + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, req, &resp) cancel() if err != nil { - log.WithContext(ctx).WithError(err).Error("Error in client.GetRateLimits") + log.WithContext(ctx).WithError(err).Error("Error in client.CheckRateLimits") return } - // Sanity checks. - if resp == nil { - log.WithContext(ctx).Error("Response object is unexpectedly nil") - return - } + // Sanity check if resp.Responses == nil { log.WithContext(ctx).Error("Responses array is unexpectedly nil") return } - // Check for overlimit response. - overlimit := false + // Check for over limit response. + overLimit := false for itemNum, resp := range resp.Responses { if resp.Status == guber.Status_OVER_LIMIT { - overlimit = true + overLimit = true log.WithContext(ctx).WithField("name", req.Requests[itemNum].Name). Info("Overlimit!") } } - if overlimit { + if overLimit { span := trace.SpanFromContext(ctx) span.SetAttributes( attribute.Bool("overlimit", true), ) if !quiet { - dumpResp := spew.Sdump(resp) + dumpResp := spew.Sdump(&resp) log.WithContext(ctx).Info(dumpResp) } } diff --git a/cmd/gubernator-cluster/main.go b/cmd/gubernator-cluster/main.go index 8b4eb54f..64ca8047 100644 --- a/cmd/gubernator-cluster/main.go +++ b/cmd/gubernator-cluster/main.go @@ -21,8 +21,8 @@ import ( "os" "os/signal" - gubernator "github.com/mailgun/gubernator/v2" - "github.com/mailgun/gubernator/v2/cluster" + "github.com/mailgun/gubernator/v3" + "github.com/mailgun/gubernator/v3/cluster" "github.com/sirupsen/logrus" ) @@ -31,12 +31,12 @@ func main() { logrus.SetLevel(logrus.InfoLevel) // Start a local cluster err := cluster.StartWith([]gubernator.PeerInfo{ - {GRPCAddress: "127.0.0.1:9990", HTTPAddress: "127.0.0.1:9980"}, - {GRPCAddress: "127.0.0.1:9991", HTTPAddress: "127.0.0.1:9981"}, - {GRPCAddress: "127.0.0.1:9992", HTTPAddress: "127.0.0.1:9982"}, - {GRPCAddress: "127.0.0.1:9993", HTTPAddress: "127.0.0.1:9983"}, - {GRPCAddress: "127.0.0.1:9994", HTTPAddress: "127.0.0.1:9984"}, - {GRPCAddress: "127.0.0.1:9995", HTTPAddress: "127.0.0.1:9985"}, + {HTTPAddress: "127.0.0.1:9980"}, + {HTTPAddress: "127.0.0.1:9981"}, + {HTTPAddress: "127.0.0.1:9982"}, + {HTTPAddress: "127.0.0.1:9983"}, + {HTTPAddress: "127.0.0.1:9984"}, + {HTTPAddress: "127.0.0.1:9985"}, }) if err != nil { panic(err) diff --git a/cmd/gubernator/main.go b/cmd/gubernator/main.go index 2d3d6fe8..50540307 100644 --- a/cmd/gubernator/main.go +++ b/cmd/gubernator/main.go @@ -25,7 +25,7 @@ import ( "runtime" "syscall" - gubernator "github.com/mailgun/gubernator/v2" + gubernator "github.com/mailgun/gubernator/v3" "github.com/mailgun/holster/v4/clock" "github.com/mailgun/holster/v4/tracing" "github.com/sirupsen/logrus" @@ -89,7 +89,7 @@ func main() { signal.Notify(c, os.Interrupt, syscall.SIGTERM) for range c { log.Info("caught signal; shutting down") - daemon.Close() + daemon.Close(context.Background()) _ = tracing.CloseTracing(context.Background()) exit(0) } diff --git a/cmd/healthcheck/main.go b/cmd/healthcheck/main.go index a98259d9..83d29a28 100644 --- a/cmd/healthcheck/main.go +++ b/cmd/healthcheck/main.go @@ -23,7 +23,7 @@ import ( "net/http" "os" - guber "github.com/mailgun/gubernator/v2" + guber "github.com/mailgun/gubernator/v3" ) func main() { @@ -31,7 +31,7 @@ func main() { if url == "" { url = "localhost:80" } - resp, err := http.DefaultClient.Get(fmt.Sprintf("http://%s/v1/HealthCheck", url)) + resp, err := http.DefaultClient.Get(fmt.Sprintf("http://%s/healthz", url)) if err != nil { panic(err) } @@ -42,7 +42,7 @@ func main() { panic(err) } - var hc guber.HealthCheckResp + var hc guber.HealthCheckResponse if err := json.Unmarshal(body, &hc); err != nil { panic(err) } diff --git a/config.go b/config.go index c46e2fa3..48a66fdf 100644 --- a/config.go +++ b/config.go @@ -41,8 +41,6 @@ import ( "github.com/segmentio/fasthash/fnv1a" "github.com/sirupsen/logrus" etcd "go.etcd.io/etcd/client/v3" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "google.golang.org/grpc" ) // BehaviorConfig controls the handling of rate limits in the cluster @@ -71,18 +69,18 @@ type BehaviorConfig struct { // Config for a gubernator instance type Config struct { - // (Required) A list of GRPC servers to register our instance with - GRPCServers []*grpc.Server - // (Optional) Adjust how gubernator behaviors are configured Behaviors BehaviorConfig + // (Optional) The PeerClient gubernator should use when making requests to other peers in the cluster. + PeerClientFactory func(PeerInfo) PeerClient + // (Optional) The cache implementation CacheFactory func(maxSize int) Cache // (Optional) A persistent store implementation. Allows the implementor the ability to store the rate limits this // instance of gubernator owns. It's up to the implementor to decide what rate limits to persist. - // For instance an implementor might only persist rate limits that have an expiration of + // For instance, an implementor might only persist rate limits that have an expiration of // longer than 1 hour. Store Store @@ -107,10 +105,7 @@ type Config struct { Logger FieldLogger // (Optional) The TLS config used when connecting to gubernator peers - PeerTLS *tls.Config - - // (Optional) If true, will emit traces for GRPC client requests to other peers - PeerTraceGRPC bool + //PeerTLS *tls.Config // (Optional) The number of go routine workers used to process concurrent rate limit requests // Default is set to number of CPUs. @@ -144,15 +139,15 @@ func (c *Config) SetDefaults() error { } } + if c.PeerClientFactory == nil { + c.PeerClientFactory = func(info PeerInfo) PeerClient { + return NewPeerClient(WithNoTLS(info.HTTPAddress)) + } + } if c.Behaviors.BatchLimit > maxBatchSize { return fmt.Errorf("Behaviors.BatchLimit cannot exceed '%d'", maxBatchSize) } - // Make a copy of the TLS config in case our caller decides to make changes - if c.PeerTLS != nil { - c.PeerTLS = c.PeerTLS.Clone() - } - return nil } @@ -161,15 +156,13 @@ type PeerInfo struct { DataCenter string `json:"data-center"` // (Optional) The http address:port of the peer HTTPAddress string `json:"http-address"` - // (Required) The grpc address:port of the peer - GRPCAddress string `json:"grpc-address"` // (Optional) Is true if PeerInfo is for this instance of gubernator IsOwner bool `json:"is-owner,omitempty"` } // HashKey returns the hash key used to identify this peer in the Picker. func (p PeerInfo) HashKey() string { - return p.GRPCAddress + return p.HTTPAddress } type UpdateFunc func([]PeerInfo) @@ -177,9 +170,6 @@ type UpdateFunc func([]PeerInfo) var DebugEnabled = false type DaemonConfig struct { - // (Required) The `address:port` that will accept GRPC requests - GRPCListenAddress string - // (Required) The `address:port` that will accept HTTP requests HTTPListenAddress string @@ -190,12 +180,8 @@ type DaemonConfig struct { // provide client certificate but you want to enforce mTLS in other RPCs (like in K8s) HTTPStatusListenAddress string - // (Optional) Defines the max age connection from client in seconds. - // Default is infinity - GRPCMaxConnectionAgeSeconds int - // (Optional) The `address:port` that is advertised to other Gubernator peers. - // Defaults to `GRPCListenAddress` + // Defaults to `HTTPListenAddress` AdvertiseAddress string // (Optional) The number of items in the cache. Defaults to 50,000 @@ -234,6 +220,16 @@ type DaemonConfig struct { // (Optional) A Logger which implements the declared logger interface (typically *logrus.Entry) Logger FieldLogger + // (Optional) A loader from a persistent store. Allows the implementor the ability to load and save + // the contents of the cache when the gubernator instance is started and stopped + Loader Loader + + // (Optional) A persistent store implementation. Allows the implementor the ability to store the rate limits this + // instance of gubernator owns. It's up to the implementor to decide what rate limits to persist. + // For instance, an implementor might only persist rate limits that have an expiration of + // longer than 1 hour. + Store Store + // (Optional) TLS Configuration; SpawnDaemon() will modify the passed TLS config in an // attempt to build a complete TLS config if one is not provided. TLS *TLSConfig @@ -241,7 +237,7 @@ type DaemonConfig struct { // (Optional) Metrics Flags which enable or disable a collection of some metric types MetricFlags MetricFlags - // (Optional) Instance ID which is a unique id that identifies this instance of gubernator + // (Optional) Service ID which is a unique id that identifies this instance of gubernator InstanceID string // (Optional) TraceLevel sets the tracing level, this controls the number of spans included in a single trace. @@ -308,16 +304,23 @@ func SetupDaemonConfig(logger *logrus.Logger, configFile string) (DaemonConfig, } // Main config - setter.SetDefault(&conf.GRPCListenAddress, os.Getenv("GUBER_GRPC_ADDRESS"), - fmt.Sprintf("%s:81", LocalHost())) setter.SetDefault(&conf.HTTPListenAddress, os.Getenv("GUBER_HTTP_ADDRESS"), fmt.Sprintf("%s:80", LocalHost())) setter.SetDefault(&conf.InstanceID, GetInstanceID()) setter.SetDefault(&conf.HTTPStatusListenAddress, os.Getenv("GUBER_STATUS_HTTP_ADDRESS"), "") - setter.SetDefault(&conf.GRPCMaxConnectionAgeSeconds, getEnvInteger(log, "GUBER_GRPC_MAX_CONN_AGE_SEC"), 0) setter.SetDefault(&conf.CacheSize, getEnvInteger(log, "GUBER_CACHE_SIZE"), 50_000) setter.SetDefault(&conf.Workers, getEnvInteger(log, "GUBER_WORKER_COUNT"), 0) - setter.SetDefault(&conf.AdvertiseAddress, os.Getenv("GUBER_ADVERTISE_ADDRESS"), conf.GRPCListenAddress) + + conf.HTTPListenAddress, err = ResolveHostIP(conf.HTTPListenAddress) + if err != nil { + return conf, errors.Wrap(err, "while identifying the actual GUBER_HTTP_ADDRESS address") + } + conf.HTTPStatusListenAddress, err = ResolveHostIP(conf.HTTPStatusListenAddress) + if err != nil { + return conf, errors.Wrap(err, "while identifying the actual GUBER_STATUS_HTTP_ADDRESS address") + } + + setter.SetDefault(&conf.AdvertiseAddress, os.Getenv("GUBER_ADVERTISE_ADDRESS"), conf.HTTPListenAddress) setter.SetDefault(&conf.DataCenter, os.Getenv("GUBER_DATA_CENTER"), "") setter.SetDefault(&conf.MetricFlags, getEnvMetricFlags(log, "GUBER_METRIC_FLAGS")) @@ -390,10 +393,10 @@ func SetupDaemonConfig(logger *logrus.Logger, configFile string) (DaemonConfig, setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.DialTimeout, getEnvDuration(log, "GUBER_ETCD_DIAL_TIMEOUT"), clock.Second*5) setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.Username, os.Getenv("GUBER_ETCD_USER")) setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.Password, os.Getenv("GUBER_ETCD_PASSWORD")) - setter.SetDefault(&conf.EtcdPoolConf.Advertise.GRPCAddress, os.Getenv("GUBER_ETCD_ADVERTISE_ADDRESS"), conf.AdvertiseAddress) + setter.SetDefault(&conf.EtcdPoolConf.Advertise.HTTPAddress, os.Getenv("GUBER_ETCD_ADVERTISE_ADDRESS"), conf.AdvertiseAddress) setter.SetDefault(&conf.EtcdPoolConf.Advertise.DataCenter, os.Getenv("GUBER_ETCD_DATA_CENTER"), conf.DataCenter) - setter.SetDefault(&conf.MemberListPoolConf.Advertise.GRPCAddress, os.Getenv("GUBER_MEMBERLIST_ADVERTISE_ADDRESS"), conf.AdvertiseAddress) + setter.SetDefault(&conf.MemberListPoolConf.Advertise.HTTPAddress, os.Getenv("GUBER_MEMBERLIST_ADVERTISE_ADDRESS"), conf.AdvertiseAddress) setter.SetDefault(&conf.MemberListPoolConf.MemberListAddress, os.Getenv("GUBER_MEMBERLIST_ADDRESS"), fmt.Sprintf("%s:7946", advAddr)) setter.SetDefault(&conf.MemberListPoolConf.KnownNodes, getEnvSlice("GUBER_MEMBERLIST_KNOWN_NODES"), []string{}) setter.SetDefault(&conf.MemberListPoolConf.Advertise.DataCenter, conf.DataCenter) @@ -729,27 +732,3 @@ func GetTracingLevel() tracing.Level { } return tracing.InfoLevel } - -// TraceLevelInfoFilter is used with otelgrpc.WithInterceptorFilter() to -// reduce noise by filtering trace propagation on some gRPC methods. -// otelgrpc deprecated use of interceptors in v0.45.0 in favor of stats -// handlers to propagate trace context. -// However, stats handlers do not have a filter feature. -// See: https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4575 -var TraceLevelInfoFilter = otelgrpc.Filter(func(info *otelgrpc.InterceptorInfo) bool { - if info.UnaryServerInfo != nil { - if info.UnaryServerInfo.FullMethod == "/pb.gubernator.PeersV1/GetPeerRateLimits" { - return false - } - if info.UnaryServerInfo.FullMethod == "/pb.gubernator.V1/HealthCheck" { - return false - } - } - if info.Method == "/pb.gubernator.PeersV1/GetPeerRateLimits" { - return false - } - if info.Method == "/pb.gubernator.V1/HealthCheck" { - return false - } - return true -}) diff --git a/contrib/aws-ecs-service-discovery-deployment/main.tf b/contrib/aws-ecs-service-discovery-deployment/main.tf index 3b7faaa1..1661d708 100644 --- a/contrib/aws-ecs-service-discovery-deployment/main.tf +++ b/contrib/aws-ecs-service-discovery-deployment/main.tf @@ -3,7 +3,6 @@ locals { gubernator_service_discovery = "app" gubernator_service_host = "${local.gubernator_service_discovery}.${local.service_namespace}" gubernator_env_vars = { - GUBER_GRPC_ADDRESS = "0.0.0.0:81" GUBER_HTTP_ADDRESS = "0.0.0.0:80" GUBER_PEER_DISCOVERY_TYPE = "dns" GUBER_DNS_FQDN = local.gubernator_service_host diff --git a/contrib/charts/gubernator/templates/_helpers.tpl b/contrib/charts/gubernator/templates/_helpers.tpl index 881a4fca..51b28f4a 100644 --- a/contrib/charts/gubernator/templates/_helpers.tpl +++ b/contrib/charts/gubernator/templates/_helpers.tpl @@ -60,17 +60,6 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} -{{/* -GRPC Port -*/}} -{{- define "gubernator.grpc.port" -}} -{{- if .Values.gubernator.server.grpc.port }} -{{- .Values.gubernator.server.grpc.port}} -{{- else }} -{{- print "81" }} -{{- end }} -{{- end }} - {{/* HTTP Port */}} @@ -92,14 +81,12 @@ HTTP Port valueFrom: fieldRef: fieldPath: status.podIP -- name: GUBER_GRPC_ADDRESS - value: "0.0.0.0:{{ include "gubernator.grpc.port" . }}" - name: GUBER_HTTP_ADDRESS value: "0.0.0.0:{{ include "gubernator.http.port" . }}" - name: GUBER_PEER_DISCOVERY_TYPE value: "k8s" - name: GUBER_K8S_POD_PORT - value: "{{ include "gubernator.grpc.port" . }}" + value: "{{ include "gubernator.http.port" . }}" - name: GUBER_K8S_ENDPOINTS_SELECTOR value: "app=gubernator" {{- if .Values.gubernator.debug }} @@ -112,8 +99,4 @@ HTTP Port {{- else }} value: "endpoints" {{- end }} -{{- if .Values.gubernator.server.grpc.maxConnAgeSeconds }} -- name: GUBER_GRPC_MAX_CONN_AGE_SEC - value: {{ .Values.gubernator.server.grpc.maxConnAgeSeconds }} -{{- end }} {{- end }} diff --git a/contrib/charts/gubernator/templates/deployment.yaml b/contrib/charts/gubernator/templates/deployment.yaml index 5dbc19b8..f21bd686 100644 --- a/contrib/charts/gubernator/templates/deployment.yaml +++ b/contrib/charts/gubernator/templates/deployment.yaml @@ -39,14 +39,12 @@ spec: image: {{ .Values.gubernator.image.repository }}:{{ .Values.gubernator.image.tag | default .Chart.AppVersion }} imagePullPolicy: {{ .Values.gubernator.image.pullPolicy }} ports: - - name: grpc - containerPort: {{ include "gubernator.grpc.port" . }} - name: http containerPort: {{ include "gubernator.http.port" . }} livenessProbe: failureThreshold: 2 tcpSocket: - port: {{ include "gubernator.grpc.port" . }} + port: {{ include "gubernator.http.port" . }} initialDelaySeconds: 30 periodSeconds: 10 successThreshold: 1 @@ -54,7 +52,7 @@ spec: readinessProbe: failureThreshold: 20 tcpSocket: - port: {{ include "gubernator.grpc.port" . }} + port: {{ include "gubernator.http.port" . }} periodSeconds: 1 successThreshold: 1 timeoutSeconds: 1 diff --git a/contrib/charts/gubernator/templates/service.yaml b/contrib/charts/gubernator/templates/service.yaml index 995dcc60..7453aeb1 100644 --- a/contrib/charts/gubernator/templates/service.yaml +++ b/contrib/charts/gubernator/templates/service.yaml @@ -10,10 +10,6 @@ spec: type: ClusterIP clusterIP: None ports: - - port: {{ include "gubernator.grpc.port" . }} - targetPort: {{ include "gubernator.grpc.port" . }} - protocol: TCP - name: grpc - port: {{ include "gubernator.http.port" . }} targetPort: {{ include "gubernator.http.port" . }} protocol: TCP diff --git a/contrib/charts/gubernator/values.yaml b/contrib/charts/gubernator/values.yaml index b62ee70a..327a0f73 100644 --- a/contrib/charts/gubernator/values.yaml +++ b/contrib/charts/gubernator/values.yaml @@ -32,11 +32,6 @@ gubernator: server: http: port: "80" - grpc: - port: "81" - # Defines the max age of a client connection - # default is infinity - # maxConnAgeSeconds: 30 annotations: { } diff --git a/contrib/k8s-deployment.yaml b/contrib/k8s-deployment.yaml index 5e40746a..799209ef 100644 --- a/contrib/k8s-deployment.yaml +++ b/contrib/k8s-deployment.yaml @@ -19,8 +19,6 @@ spec: - image: ghcr.io/mailgun/gubernator:latest imagePullPolicy: IfNotPresent ports: - - name: grpc-port - containerPort: 81 - name: http-port containerPort: 80 name: gubernator @@ -33,16 +31,14 @@ spec: valueFrom: fieldRef: fieldPath: status.podIP - # Must set the GRPC and HTTP addresses, as gubernator + # Must set the HTTP addresses, as gubernator # defaults to listening on localhost only - - name: GUBER_GRPC_ADDRESS - value: 0.0.0.0:81 - name: GUBER_HTTP_ADDRESS value: 0.0.0.0:80 # Use the k8s API for peer discovery - name: GUBER_PEER_DISCOVERY_TYPE value: "k8s" - # This should match the port number GRPC is listening on + # This should match the port number HTTP is listening on # as defined by `containerPort` - name: GUBER_K8S_POD_PORT value: "81" @@ -57,10 +53,6 @@ spec: # Enable debug for diagnosing issues - name: GUBER_DEBUG value: "true" - # Defines the max age of a client connection - # Default is infinity - # - name: GUBER_GRPC_MAX_CONN_AGE_SEC - # value: "30" restartPolicy: Always --- @@ -72,11 +64,6 @@ metadata: app: gubernator spec: clusterIP: None - #ports: - #- name: grpc-port - #targetPort: 81 - #protocol: TCP - #port: 81 #- name: http-port #targetPort: 80 #protocol: TCP diff --git a/daemon.go b/daemon.go index a220136b..88c48355 100644 --- a/daemon.go +++ b/daemon.go @@ -19,53 +19,40 @@ package gubernator import ( "context" "crypto/tls" + "fmt" "log" "net" "net/http" "strings" "time" - "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" - "github.com/mailgun/holster/v4/errors" + "github.com/mailgun/errors" "github.com/mailgun/holster/v4/etcdutil" "github.com/mailgun/holster/v4/setter" "github.com/mailgun/holster/v4/syncutil" - "github.com/mailgun/holster/v4/tracing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/keepalive" - "google.golang.org/protobuf/encoding/protojson" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" + "golang.org/x/net/proxy" ) type Daemon struct { - GRPCListeners []net.Listener - HTTPListener net.Listener - V1Server *V1Instance - - log FieldLogger - pool PoolInterface - conf DaemonConfig - httpSrv *http.Server - httpSrvNoMTLS *http.Server - grpcSrvs []*grpc.Server - wg syncutil.WaitGroup - statsHandler *GRPCStatsHandler - promRegister *prometheus.Registry - gwCancel context.CancelFunc - instanceConf Config + wg syncutil.WaitGroup + httpServers []*http.Server + pool PoolInterface + conf DaemonConfig + Listener net.Listener + HealthListener net.Listener + log FieldLogger + Service *Service } // SpawnDaemon starts a new gubernator daemon according to the provided DaemonConfig. -// This function will block until the daemon responds to connections as specified -// by GRPCListenAddress and HTTPListenAddress +// This function will block until the daemon responds to connections to HTTPListenAddress func SpawnDaemon(ctx context.Context, conf DaemonConfig) (*Daemon, error) { - s := &Daemon{ log: conf.Logger, conf: conf, @@ -76,137 +63,58 @@ func SpawnDaemon(ctx context.Context, conf DaemonConfig) (*Daemon, error) { func (s *Daemon) Start(ctx context.Context) error { var err error + // TODO: Then setup benchmarks and go trace + setter.SetDefault(&s.log, logrus.WithFields(logrus.Fields{ - "instance-id": s.conf.InstanceID, - "category": "gubernator", + "service-id": s.conf.InstanceID, + "category": "gubernator", })) - s.promRegister = prometheus.NewRegistry() + registry := prometheus.NewRegistry() // The LRU cache for storing rate limits. cacheCollector := NewLRUCacheCollector() - if err := s.promRegister.Register(cacheCollector); err != nil { - return errors.Wrap(err, "during call to promRegister.Register()") - } - - cacheFactory := func(maxSize int) Cache { - cache := NewLRUCache(maxSize) - cacheCollector.AddCache(cache) - return cache - } - - // Handler to collect duration and API access metrics for GRPC - s.statsHandler = NewGRPCStatsHandler() - _ = s.promRegister.Register(s.statsHandler) - - var filters []otelgrpc.Option - // otelgrpc deprecated use of interceptors in v0.45.0 in favor of stats - // handlers to propagate trace context. - // However, stats handlers do not have a filter feature. - // See: https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4575 - // if s.conf.TraceLevel != tracing.DebugLevel { - // filters = []otelgrpc.Option{ - // otelgrpc.WithInterceptorFilter(TraceLevelInfoFilter), - // } - // } - - opts := []grpc.ServerOption{ - grpc.StatsHandler(s.statsHandler), - grpc.MaxRecvMsgSize(1024 * 1024), - - // OpenTelemetry instrumentation on gRPC endpoints. - grpc.StatsHandler(otelgrpc.NewServerHandler(filters...)), - } - - if s.conf.GRPCMaxConnectionAgeSeconds > 0 { - opts = append(opts, grpc.KeepaliveParams(keepalive.ServerParameters{ - MaxConnectionAge: time.Second * time.Duration(s.conf.GRPCMaxConnectionAgeSeconds), - MaxConnectionAgeGrace: time.Second * time.Duration(s.conf.GRPCMaxConnectionAgeSeconds), - })) - } + registry.MustRegister(cacheCollector) if err := SetupTLS(s.conf.TLS); err != nil { return err } - if s.conf.ServerTLS() != nil { - // Create two GRPC server instances, one for TLS and the other for the API Gateway - opts2 := append(opts, grpc.Creds(credentials.NewTLS(s.conf.ServerTLS()))) - s.grpcSrvs = append(s.grpcSrvs, grpc.NewServer(opts2...)) - } - s.grpcSrvs = append(s.grpcSrvs, grpc.NewServer(opts...)) - - // Registers a new gubernator instance with the GRPC server - s.instanceConf = Config{ - PeerTraceGRPC: s.conf.TraceLevel >= tracing.DebugLevel, - PeerTLS: s.conf.ClientTLS(), - DataCenter: s.conf.DataCenter, - LocalPicker: s.conf.Picker, - GRPCServers: s.grpcSrvs, - Logger: s.log, - CacheFactory: cacheFactory, - Behaviors: s.conf.Behaviors, - CacheSize: s.conf.CacheSize, - Workers: s.conf.Workers, - } - - s.V1Server, err = NewV1Instance(s.instanceConf) - if err != nil { - return errors.Wrap(err, "while creating new gubernator instance") - } - - // V1Server instance also implements prometheus.Collector interface - _ = s.promRegister.Register(s.V1Server) - - l, err := net.Listen("tcp", s.conf.GRPCListenAddress) + s.Service, err = NewService(Config{ + PeerClientFactory: func(info PeerInfo) PeerClient { + return NewPeerClient(WithDaemonConfig(s.conf, info.HTTPAddress)) + }, + CacheFactory: func(maxSize int) Cache { + cache := NewLRUCache(maxSize) + cacheCollector.AddCache(cache) + return cache + }, + DataCenter: s.conf.DataCenter, + CacheSize: s.conf.CacheSize, + Behaviors: s.conf.Behaviors, + Workers: s.conf.Workers, + LocalPicker: s.conf.Picker, + Loader: s.conf.Loader, + Store: s.conf.Store, + Logger: s.log, + }) if err != nil { - return errors.Wrap(err, "while starting GRPC listener") + return errors.Wrap(err, "while creating new gubernator service") } - s.GRPCListeners = append(s.GRPCListeners, l) - - // Start serving GRPC Requests - s.wg.Go(func() { - s.log.Infof("GRPC Listening on %s ...", l.Addr().String()) - if err := s.grpcSrvs[0].Serve(l); err != nil { - s.log.WithError(err).Error("while starting GRPC server") - } - }) - var gatewayAddr string - if s.conf.ServerTLS() != nil { - // We start a new local GRPC instance because we can't guarantee the TLS cert provided by the - // user has localhost or the local interface included in the certs' valid hostnames. If they are not - // included, it means the local gateway connections will not be able to connect. - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return errors.Wrap(err, "while starting GRPC Gateway listener") - } - s.GRPCListeners = append(s.GRPCListeners, l) - - s.wg.Go(func() { - s.log.Infof("GRPC Gateway Listening on %s ...", l.Addr()) - if err := s.grpcSrvs[1].Serve(l); err != nil { - s.log.WithError(err).Error("while starting GRPC Gateway server") - } - }) - gatewayAddr = l.Addr().String() - } else { - gatewayAddr, err = ResolveHostIP(s.conf.GRPCListenAddress) - if err != nil { - return errors.Wrap(err, "while resolving GRPC gateway client address") - } - } + // Service implements prometheus.Collector interface + registry.MustRegister(s.Service) switch s.conf.PeerDiscoveryType { case "k8s": // Source our list of peers from kubernetes endpoint API - s.conf.K8PoolConf.OnUpdate = s.V1Server.SetPeers + s.conf.K8PoolConf.OnUpdate = s.Service.SetPeers s.pool, err = NewK8sPool(s.conf.K8PoolConf) if err != nil { return errors.Wrap(err, "while querying kubernetes API") } case "etcd": - s.conf.EtcdPoolConf.OnUpdate = s.V1Server.SetPeers + s.conf.EtcdPoolConf.OnUpdate = s.Service.SetPeers // Register ourselves with other peers via ETCD s.conf.EtcdPoolConf.Client, err = etcdutil.NewClient(s.conf.EtcdPoolConf.EtcdConfig) if err != nil { @@ -218,13 +126,13 @@ func (s *Daemon) Start(ctx context.Context) error { return errors.Wrap(err, "while creating etcd pool") } case "dns": - s.conf.DNSPoolConf.OnUpdate = s.V1Server.SetPeers + s.conf.DNSPoolConf.OnUpdate = s.Service.SetPeers s.pool, err = NewDNSPool(s.conf.DNSPoolConf) if err != nil { return errors.Wrap(err, "while creating the DNS pool") } case "member-list": - s.conf.MemberListPoolConf.OnUpdate = s.V1Server.SetPeers + s.conf.MemberListPoolConf.OnUpdate = s.Service.SetPeers s.conf.MemberListPoolConf.Logger = s.log // Register peer on the member list @@ -234,40 +142,10 @@ func (s *Daemon) Start(ctx context.Context) error { } } - // We override the default Marshaller to enable the `UseProtoNames` option. - // We do this is because the default JSONPb in 2.5.0 marshals proto structs using - // `camelCase`, while all the JSON annotations are `under_score`. - // Our protobuf files follow the convention described here - // https://developers.google.com/protocol-buffers/docs/style#message-and-field-names - // Camel case breaks unmarshalling our GRPC gateway responses with protobuf structs. - gateway := runtime.NewServeMux( - runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.JSONPb{ - MarshalOptions: protojson.MarshalOptions{ - UseProtoNames: true, - EmitUnpopulated: true, - }, - UnmarshalOptions: protojson.UnmarshalOptions{ - DiscardUnknown: true, - }, - }), - ) - - // Set up an JSON Gateway API for our GRPC methods - var gwCtx context.Context - gwCtx, s.gwCancel = context.WithCancel(context.Background()) - err = RegisterV1HandlerFromEndpoint(gwCtx, gateway, gatewayAddr, - []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}) - if err != nil { - return errors.Wrap(err, "while registering GRPC gateway handler") - } - - // Serve the JSON Gateway and metrics handlers via standard HTTP/1 - mux := http.NewServeMux() - // Optionally collect process metrics if s.conf.MetricFlags.Has(FlagOSMetrics) { s.log.Debug("Collecting OS Metrics") - s.promRegister.MustRegister(collectors.NewProcessCollector( + registry.MustRegister(collectors.NewProcessCollector( collectors.ProcessCollectorOpts{Namespace: "gubernator"}, )) } @@ -275,113 +153,161 @@ func (s *Daemon) Start(ctx context.Context) error { // Optionally collect golang internal metrics if s.conf.MetricFlags.Has(FlagGolangMetrics) { s.log.Debug("Collecting Golang Metrics") - s.promRegister.MustRegister(collectors.NewGoCollector()) + registry.MustRegister(collectors.NewGoCollector()) + } + + handler := NewHandler(s.Service, promhttp.InstrumentMetricHandler( + registry, promhttp.HandlerFor(registry, promhttp.HandlerOpts{}), + )) + registry.MustRegister(handler) + + if s.conf.ServerTLS() != nil { + if err := s.spawnHTTPS(ctx, handler); err != nil { + return err + } + if s.conf.HTTPStatusListenAddress != "" { + if err := s.spawnHTTPHealthCheck(ctx, handler, registry); err != nil { + return err + } + } + } else { + if err := s.spawnHTTP(ctx, handler); err != nil { + return err + } } + return nil +} +// spawnHTTPHealthCheck spawns a plan HTTP listener for use by orchestration systems to preform health checks and +// collect metrics when TLS and client certs are in use. +func (s *Daemon) spawnHTTPHealthCheck(ctx context.Context, h *Handler, r *prometheus.Registry) error { + mux := http.NewServeMux() + mux.HandleFunc("/healthz", h.HealthZ) mux.Handle("/metrics", promhttp.InstrumentMetricHandler( - s.promRegister, promhttp.HandlerFor(s.promRegister, promhttp.HandlerOpts{}), + r, promhttp.HandlerFor(r, promhttp.HandlerOpts{}), )) - mux.Handle("/", gateway) - log := log.New(newLogWriter(s.log), "", 0) - s.httpSrv = &http.Server{Addr: s.conf.HTTPListenAddress, Handler: mux, ErrorLog: log} - s.HTTPListener, err = net.Listen("tcp", s.conf.HTTPListenAddress) + srv := &http.Server{ + ErrorLog: log.New(newLogWriter(s.log), "", 0), + Addr: s.conf.HTTPStatusListenAddress, + TLSConfig: s.conf.ServerTLS().Clone(), + Handler: mux, + } + + srv.TLSConfig.ClientAuth = tls.NoClientCert + var err error + s.HealthListener, err = net.Listen("tcp", s.conf.HTTPStatusListenAddress) if err != nil { - return errors.Wrap(err, "while starting HTTP listener") + return errors.Wrap(err, "while starting HTTP listener for health metric") + } + + s.wg.Go(func() { + s.log.Infof("HTTPS Health Check Listening on %s ...", s.conf.HTTPStatusListenAddress) + if err := srv.ServeTLS(s.HealthListener, "", ""); err != nil { + if !errors.Is(err, http.ErrServerClosed) { + s.log.WithError(err).Error("while starting TLS Status HTTP server") + } + } + }) + + if err := WaitForConnect(ctx, s.HealthListener.Addr().String(), nil); err != nil { + return err } - httpListenerAddr := s.HTTPListener.Addr().String() - addrs := []string{httpListenerAddr} + s.httpServers = append(s.httpServers, srv) - if s.conf.ServerTLS() != nil { + return nil +} - // If configured, start another listener at configured address and server only - // /v1/HealthCheck while not requesting or verifying client certificate. - if s.conf.HTTPStatusListenAddress != "" { - muxNoMTLS := http.NewServeMux() - muxNoMTLS.Handle("/v1/HealthCheck", gateway) - s.httpSrvNoMTLS = &http.Server{ - Addr: s.conf.HTTPStatusListenAddress, - Handler: muxNoMTLS, - ErrorLog: log, - TLSConfig: s.conf.ServerTLS().Clone(), - } - s.httpSrvNoMTLS.TLSConfig.ClientAuth = tls.NoClientCert - httpListener, err := net.Listen("tcp", s.conf.HTTPStatusListenAddress) - if err != nil { - return errors.Wrap(err, "while starting HTTP listener for health metric") +func (s *Daemon) spawnHTTPS(ctx context.Context, mux http.Handler) error { + srv := &http.Server{ + ErrorLog: log.New(newLogWriter(s.log), "", 0), + TLSConfig: s.conf.ServerTLS().Clone(), + Addr: s.conf.HTTPListenAddress, + Handler: mux, + } + + var err error + s.Listener, err = net.Listen("tcp", s.conf.HTTPListenAddress) + if err != nil { + return errors.Wrap(err, "while starting HTTPS listener") + } + + s.wg.Go(func() { + s.log.Infof("HTTPS Listening on %s ...", s.conf.HTTPListenAddress) + if err := srv.ServeTLS(s.Listener, "", ""); err != nil { + if !errors.Is(err, http.ErrServerClosed) { + s.log.WithError(err).Error("while starting TLS HTTP server") } - httpAddr := httpListener.Addr().String() - addrs = append(addrs, httpAddr) - s.wg.Go(func() { - s.log.Infof("HTTPS Status Handler Listening on %s ...", httpAddr) - if err := s.httpSrvNoMTLS.ServeTLS(httpListener, "", ""); err != nil { - if !errors.Is(err, http.ErrServerClosed) { - s.log.WithError(err).Error("while starting TLS Status HTTP server") - } - } - }) } + }) - // This is to avoid any race conditions that might occur - // since the tls config is a shared pointer. - s.httpSrv.TLSConfig = s.conf.ServerTLS().Clone() - s.wg.Go(func() { - s.log.Infof("HTTPS Gateway Listening on %s ...", httpListenerAddr) - if err := s.httpSrv.ServeTLS(s.HTTPListener, "", ""); err != nil { - if !errors.Is(err, http.ErrServerClosed) { - s.log.WithError(err).Error("while starting TLS HTTP server") - } - } - }) - } else { - s.wg.Go(func() { - s.log.Infof("HTTP Gateway Listening on %s ...", httpListenerAddr) - if err := s.httpSrv.Serve(s.HTTPListener); err != nil { - if !errors.Is(err, http.ErrServerClosed) { - s.log.WithError(err).Error("while starting HTTP server") - } - } - }) + if err := WaitForConnect(ctx, s.Listener.Addr().String(), s.conf.ClientTLS()); err != nil { + return err + } + + s.httpServers = append(s.httpServers, srv) + + return nil +} + +func (s *Daemon) spawnHTTP(ctx context.Context, h http.Handler) error { + // Support H2C (HTTP/2 ClearText) + // See https://github.com/thrawn01/h2c-golang-example + h2s := &http2.Server{} + + srv := &http.Server{ + ErrorLog: log.New(newLogWriter(s.log), "", 0), + Addr: s.conf.HTTPListenAddress, + Handler: h2c.NewHandler(h, h2s), } - // Validate we can reach the GRPC and HTTP endpoints before returning - for _, l := range s.GRPCListeners { - addrs = append(addrs, l.Addr().String()) + var err error + s.Listener, err = net.Listen("tcp", s.conf.HTTPListenAddress) + if err != nil { + return errors.Wrap(err, "while starting HTTP listener") } - if err := WaitForConnect(ctx, addrs); err != nil { + + s.wg.Go(func() { + s.log.Infof("HTTP Listening on %s ...", s.conf.HTTPListenAddress) + if err := srv.Serve(s.Listener); err != nil { + if !errors.Is(err, http.ErrServerClosed) { + s.log.WithError(err).Error("while starting HTTP server") + } + } + }) + + if err := WaitForConnect(ctx, s.Listener.Addr().String(), nil); err != nil { return err } + s.httpServers = append(s.httpServers, srv) + return nil } // Close gracefully closes all server connections and listening sockets -func (s *Daemon) Close() { - if s.httpSrv == nil && s.httpSrvNoMTLS == nil { - return +func (s *Daemon) Close(ctx context.Context) error { + if len(s.httpServers) == 0 { + return nil } if s.pool != nil { s.pool.Close() } - s.log.Infof("HTTP Gateway close for %s ...", s.conf.HTTPListenAddress) - _ = s.httpSrv.Shutdown(context.Background()) - if s.httpSrvNoMTLS != nil { - s.log.Infof("HTTP Status Gateway close for %s ...", s.conf.HTTPStatusListenAddress) - _ = s.httpSrvNoMTLS.Shutdown(context.Background()) + for _, srv := range s.httpServers { + s.log.Infof("Shutting down server %s ...", srv.Addr) + _ = srv.Shutdown(ctx) } - for i, srv := range s.grpcSrvs { - s.log.Infof("GRPC close for %s ...", s.GRPCListeners[i].Addr()) - srv.GracefulStop() + + if err := s.Service.Close(ctx); err != nil { + return err } + s.wg.Stop() - s.statsHandler.Close() - s.gwCancel() - s.httpSrv = nil - s.httpSrvNoMTLS = nil - s.grpcSrvs = nil + s.httpServers = nil + return nil } // SetPeers sets the peers for this daemon @@ -390,11 +316,11 @@ func (s *Daemon) SetPeers(in []PeerInfo) { copy(peers, in) for i, p := range peers { - if s.conf.GRPCListenAddress == p.GRPCAddress { + if s.conf.AdvertiseAddress == p.HTTPAddress { peers[i].IsOwner = true } } - s.V1Server.SetPeers(peers) + s.Service.SetPeers(peers) } // Config returns the current config for this Daemon @@ -405,49 +331,38 @@ func (s *Daemon) Config() DaemonConfig { // Peers returns the peers this daemon knows about func (s *Daemon) Peers() []PeerInfo { var peers []PeerInfo - for _, client := range s.V1Server.GetPeerList() { + for _, client := range s.Service.GetPeerList() { peers = append(peers, client.Info()) } return peers } -// WaitForConnect returns nil if the list of addresses is listening -// for connections; will block until context is cancelled. -func WaitForConnect(ctx context.Context, addresses []string) error { - var d net.Dialer - var errs []error - for { - errs = nil - for _, addr := range addresses { - if addr == "" { - continue - } +// WaitForConnect waits until the passed address is accepting connections. +// It will continue to attempt a connection until context is canceled. +func WaitForConnect(ctx context.Context, address string, cfg *tls.Config) error { + if address == "" { + return fmt.Errorf("WaitForConnect() requires a valid address") + } - // TODO: golang 1.15.3 introduces tls.DialContext(). When we are ready to drop - // support for older versions we can detect tls and use the tls.DialContext to - // avoid the `http: TLS handshake error` we get when using TLS. - conn, err := d.DialContext(ctx, "tcp", addr) - if err != nil { - errs = append(errs, err) - continue - } - _ = conn.Close() + var errs []string + for { + var d proxy.ContextDialer + if cfg != nil { + d = &tls.Dialer{Config: cfg} + } else { + d = &net.Dialer{} } - - if len(errs) == 0 { - break + conn, err := d.DialContext(ctx, "tcp", address) + if err == nil { + _ = conn.Close() + return nil } - - <-ctx.Done() - return ctx.Err() - } - - if len(errs) != 0 { - var errStrings []string - for _, err := range errs { - errStrings = append(errStrings, err.Error()) + errs = append(errs, err.Error()) + if ctx.Err() != nil { + errs = append(errs, ctx.Err().Error()) + return errors.New(strings.Join(errs, "\n")) } - return errors.New(strings.Join(errStrings, "\n")) + time.Sleep(time.Millisecond * 100) + continue } - return nil } diff --git a/dns.go b/dns.go index 024b1282..05b9a311 100644 --- a/dns.go +++ b/dns.go @@ -118,12 +118,15 @@ type DNSPoolConfig struct { // (Required) Filesystem path to "/etc/resolv.conf", override for testing ResolvConf string - // (Required) Own GRPC address + // (Required) Own HTTP address OwnAddress string // (Required) Called when the list of gubernators in the pool updates OnUpdate UpdateFunc + // (Required) The default client used to contact peers discovered + Client PeerClient + Logger FieldLogger } @@ -138,7 +141,7 @@ func NewDNSPool(conf DNSPoolConfig) (*DNSPool, error) { setter.SetDefault(&conf.Logger, logrus.WithField("category", "gubernator")) if conf.OwnAddress == "" { - return nil, errors.New("Advertise.GRPCAddress is required") + return nil, errors.New("Advertise.OwnAddress is required") } ctx, cancel := context.WithCancel(context.Background()) @@ -157,14 +160,12 @@ func peer(ip string, self string, ipv6 bool) PeerInfo { if ipv6 { ip = "[" + ip + "]" } - grpc := ip + ":81" + http := ip + ":80" return PeerInfo{ DataCenter: "", - HTTPAddress: ip + ":80", - GRPCAddress: grpc, - IsOwner: grpc == self, + HTTPAddress: http, + IsOwner: http == self, } - } func min(a uint32, b uint32) uint32 { diff --git a/docker-compose-tls.yaml b/docker-compose-tls.yaml index ce2ac118..5ec51773 100644 --- a/docker-compose-tls.yaml +++ b/docker-compose-tls.yaml @@ -5,9 +5,8 @@ services: entrypoint: "/gubernator" environment: # Basic member-list config - - GUBER_GRPC_ADDRESS=0.0.0.0:81 - GUBER_HTTP_ADDRESS=0.0.0.0:80 - - GUBER_ADVERTISE_ADDRESS=gubernator-1:81 + - GUBER_ADVERTISE_ADDRESS=gubernator-1:80 - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 # TLS config - GUBER_TLS_CA=/etc/tls/ca.pem @@ -15,7 +14,6 @@ services: - GUBER_TLS_CERT=/etc/tls/gubernator.pem - GUBER_TLS_CLIENT_AUTH=require-and-verify ports: - - "9081:81" - "9080:80" volumes: - ${PWD}/contrib/certs:/etc/tls @@ -25,9 +23,8 @@ services: entrypoint: "/gubernator" environment: # Basic member-list config - - GUBER_GRPC_ADDRESS=0.0.0.0:81 - GUBER_HTTP_ADDRESS=0.0.0.0:80 - - GUBER_ADVERTISE_ADDRESS=gubernator-2:81 + - GUBER_ADVERTISE_ADDRESS=gubernator-2:80 - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 # TLS config - GUBER_TLS_CA=/etc/tls/ca.pem @@ -35,7 +32,6 @@ services: - GUBER_TLS_CERT=/etc/tls/gubernator.pem - GUBER_TLS_CLIENT_AUTH=require-and-verify ports: - - "9181:81" - "9180:80" volumes: - ${PWD}/contrib/certs:/etc/tls @@ -45,9 +41,8 @@ services: entrypoint: "/gubernator" environment: # Basic member-list config - - GUBER_GRPC_ADDRESS=0.0.0.0:81 - GUBER_HTTP_ADDRESS=0.0.0.0:80 - - GUBER_ADVERTISE_ADDRESS=gubernator-3:81 + - GUBER_ADVERTISE_ADDRESS=gubernator-3:80 - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 # TLS config - GUBER_TLS_CA=/etc/tls/ca.pem @@ -55,7 +50,6 @@ services: - GUBER_TLS_CERT=/etc/tls/gubernator.pem - GUBER_TLS_CLIENT_AUTH=require-and-verify ports: - - "9281:81" - "9280:80" volumes: - ${PWD}/contrib/certs:/etc/tls @@ -66,9 +60,8 @@ services: environment: # Basic member-list config - GUBER_DEBUG=true - - GUBER_GRPC_ADDRESS=0.0.0.0:81 - GUBER_HTTP_ADDRESS=0.0.0.0:80 - - GUBER_ADVERTISE_ADDRESS=gubernator-4:81 + - GUBER_ADVERTISE_ADDRESS=gubernator-4:80 - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 # TLS config - GUBER_TLS_CA=/etc/tls/ca.pem @@ -76,7 +69,6 @@ services: - GUBER_TLS_CERT=/etc/tls/gubernator.pem - GUBER_TLS_CLIENT_AUTH=require-and-verify ports: - - "9381:81" - "9380:80" volumes: - ${PWD}/contrib/certs:/etc/tls diff --git a/docker-compose.yaml b/docker-compose.yaml index d746c11d..27007bf6 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -4,51 +4,42 @@ services: image: ghcr.io/mailgun/gubernator:latest entrypoint: "/gubernator" environment: - # The address GRPC requests will listen on - - GUBER_GRPC_ADDRESS=0.0.0.0:81 # The address HTTP requests will listen on - GUBER_HTTP_ADDRESS=0.0.0.0:80 # The address that is advertised to other peers - - GUBER_ADVERTISE_ADDRESS=gubernator-1:81 + - GUBER_ADVERTISE_ADDRESS=gubernator-1:80 # A comma separated list of known gubernator nodes - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 #- GUBER_DATA_CENTER=us-east-1 ports: - - "9081:81" - "9080:80" gubernator-2: image: ghcr.io/mailgun/gubernator:latest entrypoint: "/gubernator" environment: - # The address GRPC requests will listen on - - GUBER_GRPC_ADDRESS=0.0.0.0:81 # The address HTTP requests will listen on - GUBER_HTTP_ADDRESS=0.0.0.0:80 # The address that is advertised to other peers - - GUBER_ADVERTISE_ADDRESS=gubernator-2:81 + - GUBER_ADVERTISE_ADDRESS=gubernator-2:80 # A comma separated list of known gubernator nodes - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 #- GUBER_DATA_CENTER=us-east-1 ports: - - "9181:81" - "9180:80" gubernator-3: image: ghcr.io/mailgun/gubernator:latest entrypoint: "/gubernator" environment: - # The address GRPC requests will listen on - - GUBER_GRPC_ADDRESS=0.0.0.0:81 # The address HTTP requests will listen on - GUBER_HTTP_ADDRESS=0.0.0.0:80 # The address that is advertised to other peers - - GUBER_ADVERTISE_ADDRESS=gubernator-3:81 + - GUBER_ADVERTISE_ADDRESS=gubernator-3:80 # A comma separated list of known gubernator nodes - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 #- GUBER_DATA_CENTER=us-west-2 ports: - - "9281:81" - "9280:80" gubernator-4: @@ -56,16 +47,13 @@ services: entrypoint: "/gubernator" environment: - GUBER_DEBUG=true - # The address GRPC requests will listen on - - GUBER_GRPC_ADDRESS=0.0.0.0:81 # The address HTTP requests will listen on - GUBER_HTTP_ADDRESS=0.0.0.0:80 # The address that is advertised to other peers - - GUBER_ADVERTISE_ADDRESS=gubernator-4:81 + - GUBER_ADVERTISE_ADDRESS=gubernator-4:80 # A Comma separated list of known gubernator nodes - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1,gubernator-2 #- GUBER_DATA_CENTER=us-west-2 #- GUBER_METRIC_FLAGS=golang,os ports: - - "9381:81" - "9380:80" diff --git a/docs/tracing.md b/docs/tracing.md index dad9a2e9..bba1c760 100644 --- a/docs/tracing.md +++ b/docs/tracing.md @@ -59,44 +59,15 @@ services. That service will link the client span with the server span. When the client and server both send traces to the same Jaeger server, the trace will appear with the two spans linked in the same view. -When sending gRPC requests to Gubernator, be sure to use the [`otelgrpc` -interceptor](https://github.com/open-telemetry/opentelemetry-go-contrib) to -propagate the client's trace context to the server so it can add spans. - -See [gRPC](#gRPC) section and `cmd/gubernator-cli/main.go` for usage examples. - - - -## gRPC -If using Gubernator's Golang gRPC client, the client must be created like so: - -```go - import ( - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "google.golang.org/grpc" - ) - - // ... - - opts := []grpc.DialOption{ - grpc.WithBlock(), - grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), - grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()), - } - - endpoint := "" - conn, err := grpc.DialContext(ctx, endpoint, opts...) -``` - ## HTTP -If using HTTP, the tracing ids must be propagated in HTTP headers. This is +When making HTTP requests, the tracing ids must be propagated in HTTP headers. This is typically done using OpenTelemetry instrumentation, such as [`otelhttp`](https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp). See [OpenTelemetry registry](https://opentelemetry.io/registry/?language=go) for instrumentation using many other HTTP frameworks. ## Gubernator Standlone -When deployed as a standalone daemon, Gubernator's gRPC service will receive +When deployed as a standalone daemon, the Gubernator service will receive embedded trace ids in requests from the client's `context` object. For this to work, the client must be configured to embed tracing ids. diff --git a/etcd.go b/etcd.go index 336dfb11..676718e2 100644 --- a/etcd.go +++ b/etcd.go @@ -20,8 +20,8 @@ import ( "context" "encoding/json" + "github.com/mailgun/errors" "github.com/mailgun/holster/v4/clock" - "github.com/mailgun/holster/v4/errors" "github.com/mailgun/holster/v4/setter" "github.com/mailgun/holster/v4/syncutil" "github.com/sirupsen/logrus" @@ -74,8 +74,8 @@ func NewEtcdPool(conf EtcdPoolConfig) (*EtcdPool, error) { setter.SetDefault(&conf.KeyPrefix, defaultBaseKey) setter.SetDefault(&conf.Logger, logrus.WithField("category", "gubernator")) - if conf.Advertise.GRPCAddress == "" { - return nil, errors.New("Advertise.GRPCAddress is required") + if conf.Advertise.HTTPAddress == "" { + return nil, errors.New("Advertise.HTTPAddress is required") } if conf.Client == nil { @@ -150,7 +150,7 @@ func (e *EtcdPool) collectPeers(revision *int64) error { // Collect all the peers for _, v := range resp.Kvs { p := e.unMarshallValue(v.Value) - peers[p.GRPCAddress] = p + peers[p.HTTPAddress] = p } e.peers = peers @@ -165,7 +165,7 @@ func (e *EtcdPool) unMarshallValue(v []byte) PeerInfo { // for backward compatible with older gubernator versions if err := json.Unmarshal(v, &p); err != nil { e.log.WithError(err).Errorf("while unmarshalling peer info from key value") - return PeerInfo{GRPCAddress: string(v)} + return PeerInfo{HTTPAddress: string(v)} } return p } @@ -219,7 +219,7 @@ func (e *EtcdPool) watch() error { } func (e *EtcdPool) register(peer PeerInfo) error { - instanceKey := e.conf.KeyPrefix + peer.GRPCAddress + instanceKey := e.conf.KeyPrefix + peer.HTTPAddress e.log.Infof("Registering peer '%#v' with etcd", peer) b, err := json.Marshal(peer) @@ -323,7 +323,7 @@ func (e *EtcdPool) callOnUpdate() { var peers []PeerInfo for _, p := range e.peers { - if p.GRPCAddress == e.conf.Advertise.GRPCAddress { + if p.HTTPAddress == e.conf.Advertise.HTTPAddress { p.IsOwner = true } peers = append(peers, p) diff --git a/example.conf b/example.conf index 3396a405..8ccabe67 100644 --- a/example.conf +++ b/example.conf @@ -2,16 +2,13 @@ # Basic Config ############################ -# The address GRPC requests will listen on -GUBER_GRPC_ADDRESS=0.0.0.0:9990 - # The address HTTP requests will listen on GUBER_HTTP_ADDRESS=0.0.0.0:9980 # The address gubernator peers will connect to. Ignored if using k8s peer # discovery method. # -# Should be the same as GUBER_GRPC_ADDRESS unless you are running behind a NAT +# Should be the same as GUBER_HTTP_ADDRESS unless you are running behind a NAT # or running in a docker container without host networking. # # If unset, will default to the hostname or if that fails will attempt @@ -32,10 +29,6 @@ GUBER_INSTANCE_ID= # The name of the datacenter this gubernator instance is in. # GUBER_DATA_CENTER=datacenter1 -# Time in seconds that the GRPC server will keep a client connection alive. -# If value is zero (default) time is infinity -# GUBER_GRPC_MAX_CONN_AGE_SEC=30 - # A list of optional prometheus metric collection # os - collect process metrics # See https://pkg.go.dev/github.com/prometheus/client_golang@v1.11.0/prometheus/collectors#NewProcessCollector @@ -122,7 +115,7 @@ GUBER_INSTANCE_ID= # gubernator instance and any host name in that certificate. # GUBER_TLS_INSECURE_SKIP_VERIFY=false -# Configures the tls client used to make peer GRPC requests to verify that peer certificates +# Configures the tls client used to make peer requests verify that peer certificates # contain the specified SAN. See ServerName field of https://pkg.go.dev/crypto/tls#Config. # Useful if your peer certificates do not contain IP SANs, but all contain a common SAN. # GUBER_TLS_CLIENT_AUTH_SERVER_NAME=gubernator @@ -246,7 +239,7 @@ GUBER_INSTANCE_ID= # OTEL_TRACES_SAMPLER_ARG=1.0 # Choose the transport protocol (otlp, grpc or http/protobuf) -# OTEL_EXPORTER_OTLP_PROTOCOL=grpc +# OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf # The endpoint to send traces to # OTEL_EXPORTER_OTLP_ENDPOINT=https://collector: diff --git a/functional_test.go b/functional_test.go index 6225fc3e..ab113961 100644 --- a/functional_test.go +++ b/functional_test.go @@ -17,7 +17,6 @@ limitations under the License. package gubernator_test import ( - "bytes" "context" "fmt" "io" @@ -26,8 +25,8 @@ import ( "strings" "testing" - guber "github.com/mailgun/gubernator/v2" - "github.com/mailgun/gubernator/v2/cluster" + guber "github.com/mailgun/gubernator/v3" + "github.com/mailgun/gubernator/v3/cluster" "github.com/mailgun/holster/v4/clock" "github.com/mailgun/holster/v4/testutil" "github.com/prometheus/common/expfmt" @@ -40,18 +39,18 @@ import ( // Setup and shutdown the mock gubernator cluster for the entire test suite func TestMain(m *testing.M) { if err := cluster.StartWith([]guber.PeerInfo{ - {GRPCAddress: "127.0.0.1:9990", HTTPAddress: "127.0.0.1:9980", DataCenter: cluster.DataCenterNone}, - {GRPCAddress: "127.0.0.1:9991", HTTPAddress: "127.0.0.1:9981", DataCenter: cluster.DataCenterNone}, - {GRPCAddress: "127.0.0.1:9992", HTTPAddress: "127.0.0.1:9982", DataCenter: cluster.DataCenterNone}, - {GRPCAddress: "127.0.0.1:9993", HTTPAddress: "127.0.0.1:9983", DataCenter: cluster.DataCenterNone}, - {GRPCAddress: "127.0.0.1:9994", HTTPAddress: "127.0.0.1:9984", DataCenter: cluster.DataCenterNone}, - {GRPCAddress: "127.0.0.1:9995", HTTPAddress: "127.0.0.1:9985", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9980", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9981", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9982", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9983", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9984", DataCenter: cluster.DataCenterNone}, + {HTTPAddress: "127.0.0.1:9985", DataCenter: cluster.DataCenterNone}, // DataCenterOne - {GRPCAddress: "127.0.0.1:9890", HTTPAddress: "127.0.0.1:9880", DataCenter: cluster.DataCenterOne}, - {GRPCAddress: "127.0.0.1:9891", HTTPAddress: "127.0.0.1:9881", DataCenter: cluster.DataCenterOne}, - {GRPCAddress: "127.0.0.1:9892", HTTPAddress: "127.0.0.1:9882", DataCenter: cluster.DataCenterOne}, - {GRPCAddress: "127.0.0.1:9893", HTTPAddress: "127.0.0.1:9883", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9880", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9881", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9882", DataCenter: cluster.DataCenterOne}, + {HTTPAddress: "127.0.0.1:9883", DataCenter: cluster.DataCenterOne}, }); err != nil { fmt.Println(err) os.Exit(1) @@ -61,7 +60,7 @@ func TestMain(m *testing.M) { } func TestOverTheLimit(t *testing.T) { - client, errs := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) + client, errs := guber.NewClient(cluster.GetRandomClientOptions(cluster.DataCenterNone)) require.Nil(t, errs) tests := []struct { @@ -83,8 +82,9 @@ func TestOverTheLimit(t *testing.T) { } for _, test := range tests { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_over_limit", UniqueKey: "account:1234", @@ -95,11 +95,12 @@ func TestOverTheLimit(t *testing.T) { Behavior: 0, }, }, - }) + }, &resp) require.Nil(t, err) rl := resp.Responses[0] + assert.Equal(t, "", rl.Error) assert.Equal(t, test.Status, rl.Status) assert.Equal(t, test.Remaining, rl.Remaining) assert.Equal(t, int64(2), rl.Limit) @@ -114,12 +115,13 @@ func TestMultipleAsync(t *testing.T) { // need to be changed. We want the test to forward both rate limits to other // nodes in the cluster. - t.Logf("Asking Peer: %s", cluster.GetPeers()[0].GRPCAddress) - client, errs := guber.DialV1Server(cluster.GetPeers()[0].GRPCAddress, nil) + t.Logf("Asking Peer: %s", cluster.GetPeers()[0].HTTPAddress) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.GetPeers()[0].HTTPAddress)) require.Nil(t, errs) - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_multiple_async", UniqueKey: "account:9234", @@ -139,7 +141,7 @@ func TestMultipleAsync(t *testing.T) { Behavior: 0, }, }, - }) + }, &resp) require.Nil(t, err) require.Len(t, resp.Responses, 2) @@ -158,8 +160,8 @@ func TestMultipleAsync(t *testing.T) { func TestTokenBucket(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - addr := cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress - client, errs := guber.DialV1Server(addr, nil) + addr := cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress + client, errs := guber.NewClient(guber.WithNoTLS(addr)) require.Nil(t, errs) tests := []struct { @@ -190,8 +192,9 @@ func TestTokenBucket(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_token_bucket", UniqueKey: "account:1234", @@ -201,7 +204,7 @@ func TestTokenBucket(t *testing.T) { Hits: 1, }, }, - }) + }, &resp) require.Nil(t, err) rl := resp.Responses[0] @@ -219,7 +222,7 @@ func TestTokenBucket(t *testing.T) { func TestTokenBucketGregorian(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, errs := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) require.Nil(t, errs) tests := []struct { @@ -264,8 +267,9 @@ func TestTokenBucketGregorian(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_token_bucket_greg", UniqueKey: "account:12345", @@ -276,7 +280,7 @@ func TestTokenBucketGregorian(t *testing.T) { Limit: 60, }, }, - }) + }, &resp) require.Nil(t, err) rl := resp.Responses[0] @@ -294,8 +298,8 @@ func TestTokenBucketGregorian(t *testing.T) { func TestTokenBucketNegativeHits(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - addr := cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress - client, errs := guber.DialV1Server(addr, nil) + addr := cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress + client, errs := guber.NewClient(guber.WithNoTLS(addr)) require.Nil(t, errs) tests := []struct { @@ -337,8 +341,9 @@ func TestTokenBucketNegativeHits(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_token_bucket_negative", UniqueKey: "account:12345", @@ -348,7 +353,7 @@ func TestTokenBucketNegativeHits(t *testing.T) { Hits: tt.Hits, }, }, - }) + }, &resp) require.Nil(t, err) rl := resp.Responses[0] @@ -366,7 +371,7 @@ func TestTokenBucketNegativeHits(t *testing.T) { func TestLeakyBucket(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, errs := guber.DialV1Server(cluster.PeerAt(0).GRPCAddress, nil) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.PeerAt(0).HTTPAddress)) require.Nil(t, errs) tests := []struct { @@ -464,8 +469,9 @@ func TestLeakyBucket(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_leaky_bucket", UniqueKey: "account:1234", @@ -475,7 +481,7 @@ func TestLeakyBucket(t *testing.T) { Limit: 10, }, }, - }) + }, &resp) require.NoError(t, err) require.Len(t, resp.Responses, 1) @@ -493,7 +499,7 @@ func TestLeakyBucket(t *testing.T) { func TestLeakyBucketWithBurst(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, errs := guber.DialV1Server(cluster.PeerAt(0).GRPCAddress, nil) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.PeerAt(0).HTTPAddress)) require.Nil(t, errs) tests := []struct { @@ -570,8 +576,9 @@ func TestLeakyBucketWithBurst(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_leaky_bucket_with_burst", UniqueKey: "account:1234", @@ -582,7 +589,7 @@ func TestLeakyBucketWithBurst(t *testing.T) { Burst: 20, }, }, - }) + }, &resp) require.NoError(t, err) require.Len(t, resp.Responses, 1) @@ -600,7 +607,7 @@ func TestLeakyBucketWithBurst(t *testing.T) { func TestLeakyBucketGregorian(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, errs := guber.DialV1Server(cluster.PeerAt(0).GRPCAddress, nil) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.PeerAt(0).HTTPAddress)) require.Nil(t, errs) tests := []struct { @@ -635,8 +642,9 @@ func TestLeakyBucketGregorian(t *testing.T) { now := clock.Now() for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_leaky_bucket_greg", UniqueKey: "account:12345", @@ -647,7 +655,7 @@ func TestLeakyBucketGregorian(t *testing.T) { Limit: 60, }, }, - }) + }, &resp) clock.Freeze(clock.Now()) require.NoError(t, err) @@ -665,7 +673,7 @@ func TestLeakyBucketGregorian(t *testing.T) { func TestLeakyBucketNegativeHits(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, errs := guber.DialV1Server(cluster.PeerAt(0).GRPCAddress, nil) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.PeerAt(0).HTTPAddress)) require.Nil(t, errs) tests := []struct { @@ -707,8 +715,9 @@ func TestLeakyBucketNegativeHits(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_leaky_bucket_negative", UniqueKey: "account:12345", @@ -718,7 +727,7 @@ func TestLeakyBucketNegativeHits(t *testing.T) { Limit: 10, }, }, - }) + }, &resp) require.NoError(t, err) require.Len(t, resp.Responses, 1) @@ -734,16 +743,16 @@ func TestLeakyBucketNegativeHits(t *testing.T) { } func TestMissingFields(t *testing.T) { - client, errs := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) require.Nil(t, errs) tests := []struct { - Req *guber.RateLimitReq + Req *guber.RateLimitRequest Status guber.Status Error string }{ { - Req: &guber.RateLimitReq{ + Req: &guber.RateLimitRequest{ Name: "test_missing_fields", UniqueKey: "account:1234", Hits: 1, @@ -754,7 +763,7 @@ func TestMissingFields(t *testing.T) { Status: guber.Status_UNDER_LIMIT, }, { - Req: &guber.RateLimitReq{ + Req: &guber.RateLimitRequest{ Name: "test_missing_fields", UniqueKey: "account:12345", Hits: 1, @@ -765,7 +774,7 @@ func TestMissingFields(t *testing.T) { Status: guber.Status_OVER_LIMIT, }, { - Req: &guber.RateLimitReq{ + Req: &guber.RateLimitRequest{ UniqueKey: "account:1234", Hits: 1, Duration: 10000, @@ -775,7 +784,7 @@ func TestMissingFields(t *testing.T) { Status: guber.Status_UNDER_LIMIT, }, { - Req: &guber.RateLimitReq{ + Req: &guber.RateLimitRequest{ Name: "test_missing_fields", Hits: 1, Duration: 10000, @@ -787,9 +796,10 @@ func TestMissingFields(t *testing.T) { } for i, test := range tests { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{test.Req}, - }) + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{test.Req}, + }, &resp) require.Nil(t, err) assert.Equal(t, test.Error, resp.Responses[0].Error, i) assert.Equal(t, test.Status, resp.Responses[0].Status, i) @@ -797,15 +807,16 @@ func TestMissingFields(t *testing.T) { } func TestGlobalRateLimits(t *testing.T) { - peer := cluster.PeerAt(0).GRPCAddress - client, errs := guber.DialV1Server(peer, nil) + address := cluster.PeerAt(0).HTTPAddress + client, errs := guber.NewClient(guber.WithNoTLS(address)) require.NoError(t, errs) sendHit := func(status guber.Status, remain int64, i int) string { ctx, cancel := context.WithTimeout(context.Background(), clock.Second*5) defer cancel() - resp, err := client.GetRateLimits(ctx, &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_global", UniqueKey: "account:12345", @@ -816,7 +827,7 @@ func TestGlobalRateLimits(t *testing.T) { Limit: 5, }, }, - }) + }, &resp) require.NoError(t, err, i) assert.Equal(t, "", resp.Responses[0].Error, i) assert.Equal(t, status, resp.Responses[0].Status, i) @@ -828,7 +839,7 @@ func TestGlobalRateLimits(t *testing.T) { // name/key should ensure our connected peer is NOT the owner, // the peer we are connected to should forward requests asynchronously to the owner. - assert.NotEqual(t, peer, resp.Responses[0].Metadata["owner"]) + assert.NotEqual(t, address, resp.Responses[0].Metadata["owner"]) return resp.Responses[0].Metadata["owner"] } @@ -839,6 +850,7 @@ func TestGlobalRateLimits(t *testing.T) { // Our second should be processed as if we own it since the async forward hasn't occurred yet sendHit(guber.Status_UNDER_LIMIT, 3, 2) + log := t.Logf testutil.UntilPass(t, 20, clock.Millisecond*200, func(t testutil.TestingT) { // Inspect our metrics, ensure they collected the counts we expected during this test d := cluster.DaemonAt(0) @@ -856,6 +868,21 @@ func TestGlobalRateLimits(t *testing.T) { } assert.Equal(t, 1, broadcastCount) + log("Global Send Duration Count: %d\n", broadcastCount) + + // Service 5 should be the owner of our global rate limit + d = cluster.DaemonAt(5) + config := d.Config() + resp, err := http.Get(fmt.Sprintf("http://%s/metrics", config.HTTPListenAddress)) + if !assert.NoError(t, err) { + return + } + defer resp.Body.Close() + + assert.Equal(t, http.StatusOK, resp.StatusCode) + m = getMetric(t, resp.Body, "gubernator_broadcast_duration_count") + assert.Equal(t, 1, int(m.Value)) + log("Broadcast Duration Count: %d\n", int(m.Value)) }) } @@ -867,7 +894,7 @@ func getMetricRequest(t testutil.TestingT, url string, name string) *model.Sampl } func TestChangeLimit(t *testing.T) { - client, errs := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) require.Nil(t, errs) tests := []struct { @@ -937,8 +964,9 @@ func TestChangeLimit(t *testing.T) { for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_change_limit", UniqueKey: "account:1234", @@ -948,7 +976,7 @@ func TestChangeLimit(t *testing.T) { Hits: 1, }, }, - }) + }, &resp) require.Nil(t, err) rl := resp.Responses[0] @@ -962,7 +990,7 @@ func TestChangeLimit(t *testing.T) { } func TestResetRemaining(t *testing.T) { - client, errs := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) + client, errs := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) require.Nil(t, errs) tests := []struct { @@ -1009,8 +1037,9 @@ func TestResetRemaining(t *testing.T) { for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_reset_remaining", UniqueKey: "account:1234", @@ -1021,7 +1050,7 @@ func TestResetRemaining(t *testing.T) { Hits: 1, }, }, - }) + }, &resp) require.Nil(t, err) rl := resp.Responses[0] @@ -1034,63 +1063,71 @@ func TestResetRemaining(t *testing.T) { } func TestHealthCheck(t *testing.T) { - client, err := guber.DialV1Server(cluster.DaemonAt(0).GRPCListeners[0].Addr().String(), nil) + client, err := guber.NewClient(guber.WithNoTLS(cluster.DaemonAt(0).Listener.Addr().String())) require.NoError(t, err) // Check that the cluster is healthy to start with - healthResp, err := client.HealthCheck(context.Background(), &guber.HealthCheckReq{}) + var resp guber.HealthCheckResponse + err = client.HealthCheck(context.Background(), &resp) require.NoError(t, err) - require.Equal(t, "healthy", healthResp.GetStatus()) + require.Equal(t, "healthy", resp.GetStatus()) // Create a global rate limit that will need to be sent to all peers in the cluster - _, err = client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ - { - Name: "test_health_check", - UniqueKey: "account:12345", - Algorithm: guber.Algorithm_TOKEN_BUCKET, - Behavior: guber.Behavior_BATCHING, - Duration: guber.Second * 3, - Hits: 1, - Limit: 5, + { + var resp guber.CheckRateLimitsResponse + err = client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ + { + Name: "test_health_check", + UniqueKey: "account:12345", + Algorithm: guber.Algorithm_TOKEN_BUCKET, + Behavior: guber.Behavior_BATCHING, + Duration: guber.Second * 3, + Hits: 1, + Limit: 5, + }, }, - }, - }) - require.Nil(t, err) + }, &resp) + require.NoError(t, err) + } // Stop the rest of the cluster to ensure errors occur on our instance for i := 1; i < cluster.NumOfDaemons(); i++ { d := cluster.DaemonAt(i) require.NotNil(t, d) - d.Close() + d.Close(context.Background()) } // Hit the global rate limit again this time causing a connection error - _, err = client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ - { - Name: "test_health_check", - UniqueKey: "account:12345", - Algorithm: guber.Algorithm_TOKEN_BUCKET, - Behavior: guber.Behavior_GLOBAL, - Duration: guber.Second * 3, - Hits: 1, - Limit: 5, + { + var resp guber.CheckRateLimitsResponse + err = client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ + { + Name: "test_health_check", + UniqueKey: "account:12345", + Algorithm: guber.Algorithm_TOKEN_BUCKET, + Behavior: guber.Behavior_GLOBAL, + Duration: guber.Second * 3, + Hits: 1, + Limit: 5, + }, }, - }, - }) - require.Nil(t, err) + }, &resp) + require.NoError(t, err) + } testutil.UntilPass(t, 20, clock.Millisecond*300, func(t testutil.TestingT) { // Check the health again to get back the connection error - healthResp, err = client.HealthCheck(context.Background(), &guber.HealthCheckReq{}) + var resp guber.HealthCheckResponse + err = client.HealthCheck(context.Background(), &resp) if assert.Nil(t, err) { return } - assert.Equal(t, "unhealthy", healthResp.GetStatus()) - assert.Contains(t, healthResp.GetMessage(), "connect: connection refused") + assert.Equal(t, "unhealthy", resp.GetStatus()) + assert.Contains(t, resp.GetMessage(), "connect: connection refused") }) // Restart stopped instances @@ -1102,11 +1139,12 @@ func TestHealthCheck(t *testing.T) { func TestLeakyBucketDivBug(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, err := guber.DialV1Server(cluster.GetRandomPeer(cluster.DataCenterNone).GRPCAddress, nil) + client, err := guber.NewClient(guber.WithNoTLS(cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress)) require.NoError(t, err) - resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + var resp guber.CheckRateLimitsResponse + err = client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_leaky_bucket_div", UniqueKey: "account:12345", @@ -1116,7 +1154,7 @@ func TestLeakyBucketDivBug(t *testing.T) { Limit: 2000, }, }, - }) + }, &resp) require.NoError(t, err) assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, guber.Status_UNDER_LIMIT, resp.Responses[0].Status) @@ -1124,8 +1162,8 @@ func TestLeakyBucketDivBug(t *testing.T) { assert.Equal(t, int64(2000), resp.Responses[0].Limit) // Should result in a rate of 0.5 - resp, err = client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ + err = client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{ + Requests: []*guber.RateLimitRequest{ { Name: "test_leaky_bucket_div", UniqueKey: "account:12345", @@ -1135,7 +1173,7 @@ func TestLeakyBucketDivBug(t *testing.T) { Limit: 2000, }, }, - }) + }, &resp) require.NoError(t, err) assert.Equal(t, int64(1899), resp.Responses[0].Remaining) assert.Equal(t, int64(2000), resp.Responses[0].Limit) @@ -1151,59 +1189,32 @@ func TestMultiRegion(t *testing.T) { // TODO: Wait until both rate limit count show up on all datacenters } -func TestGRPCGateway(t *testing.T) { - address := cluster.GetRandomPeer(cluster.DataCenterNone).HTTPAddress - resp, err := http.DefaultClient.Get("http://" + address + "/v1/HealthCheck") +func TestDefaultHealthZ(t *testing.T) { + address := cluster.GetRandomPeerInfo(cluster.DataCenterNone).HTTPAddress + resp, err := http.DefaultClient.Get("http://" + address + "/healthz") require.NoError(t, err) defer resp.Body.Close() assert.Equal(t, http.StatusOK, resp.StatusCode) b, err := io.ReadAll(resp.Body) - // This test ensures future upgrades don't accidentally change `under_score` to `camelCase` again. - assert.Contains(t, string(b), "peer_count") + assert.Contains(t, string(b), "peerCount") - var hc guber.HealthCheckResp + var hc guber.HealthCheckResponse require.NoError(t, json.Unmarshal(b, &hc)) assert.Equal(t, int32(10), hc.PeerCount) require.NoError(t, err) - - payload, err := json.Marshal(&guber.GetRateLimitsReq{ - Requests: []*guber.RateLimitReq{ - { - Name: "requests_per_sec", - UniqueKey: "account:12345", - Duration: guber.Millisecond * 1000, - Hits: 1, - Limit: 10, - }, - }, - }) - require.NoError(t, err) - - resp, err = http.DefaultClient.Post("http://"+address+"/v1/GetRateLimits", - "application/json", bytes.NewReader(payload)) - require.NoError(t, err) - defer resp.Body.Close() - - assert.Equal(t, http.StatusOK, resp.StatusCode) - b, err = io.ReadAll(resp.Body) - require.NoError(t, err) - var r guber.GetRateLimitsResp - - // NOTE: It is important to use 'protojson' instead of the standard 'json' package - // else the enums will not be converted properly and json.Unmarshal() will return an - // error. - require.NoError(t, json.Unmarshal(b, &r)) - require.Equal(t, 1, len(r.Responses)) - assert.Equal(t, guber.Status_UNDER_LIMIT, r.Responses[0].Status) } func TestGetPeerRateLimits(t *testing.T) { ctx := context.Background() - peerClient := guber.NewPeerClient(guber.PeerConfig{ - Info: cluster.GetRandomPeer(cluster.DataCenterNone), + info := cluster.GetRandomPeerInfo(cluster.DataCenterNone) + + peerClient, err := guber.NewPeer(guber.PeerConfig{ + PeerClient: guber.NewPeerClient(guber.WithNoTLS(info.HTTPAddress)), + Info: info, }) + require.NoError(t, err) t.Run("Stable rate check request order", func(t *testing.T) { // Ensure response order matches rate check request order. @@ -1213,11 +1224,11 @@ func TestGetPeerRateLimits(t *testing.T) { for _, n := range testCases { t.Run(fmt.Sprintf("Batch size %d", n), func(t *testing.T) { // Build request. - req := &guber.GetPeerRateLimitsReq{ - Requests: make([]*guber.RateLimitReq, n), + req := &guber.ForwardRequest{ + Requests: make([]*guber.RateLimitRequest, n), } for i := 0; i < n; i++ { - req.Requests[i] = &guber.RateLimitReq{ + req.Requests[i] = &guber.RateLimitRequest{ Name: "Foobar", UniqueKey: fmt.Sprintf("%08x", i), Hits: 0, @@ -1229,11 +1240,11 @@ func TestGetPeerRateLimits(t *testing.T) { } // Send request. - resp, err := peerClient.GetPeerRateLimits(ctx, req) + var resp guber.ForwardResponse + err := peerClient.ForwardBatch(ctx, req, &resp) // Verify. require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.RateLimits, n) for i, item := range resp.RateLimits { @@ -1245,7 +1256,14 @@ func TestGetPeerRateLimits(t *testing.T) { }) } -// TODO: Add a test for sending no rate limits RateLimitReqList.RateLimits = nil +func TestNoRateLimits(t *testing.T) { + client, errs := guber.NewClient(cluster.GetRandomClientOptions(cluster.DataCenterNone)) + require.Nil(t, errs) + + var resp guber.CheckRateLimitsResponse + err := client.CheckRateLimits(context.Background(), &guber.CheckRateLimitsRequest{}, &resp) + require.Error(t, err) +} func getMetric(t testutil.TestingT, in io.Reader, name string) *model.Sample { dec := expfmt.SampleDecoder{ diff --git a/global.go b/global.go index 78431960..5ba49ff5 100644 --- a/global.go +++ b/global.go @@ -19,6 +19,8 @@ package gubernator import ( "context" + "github.com/mailgun/holster/v4/ctxutil" + "github.com/mailgun/holster/v4/syncutil" "github.com/prometheus/client_golang/prometheus" "google.golang.org/protobuf/proto" @@ -27,23 +29,24 @@ import ( // globalManager manages async hit queue and updates peers in // the cluster periodically when a global rate limit we own updates. type globalManager struct { - asyncQueue chan *RateLimitReq - broadcastQueue chan *RateLimitReq - wg syncutil.WaitGroup - conf BehaviorConfig - log FieldLogger - instance *V1Instance + asyncQueue chan *RateLimitRequest + broadcastQueue chan *RateLimitRequest + wg syncutil.WaitGroup + conf BehaviorConfig + log FieldLogger + instance *Service + metricGlobalSendDuration prometheus.Summary metricBroadcastDuration prometheus.Summary metricBroadcastCounter *prometheus.CounterVec metricGlobalQueueLength prometheus.Gauge } -func newGlobalManager(conf BehaviorConfig, instance *V1Instance) *globalManager { +func newGlobalManager(conf BehaviorConfig, instance *Service) *globalManager { gm := globalManager{ log: instance.log, - asyncQueue: make(chan *RateLimitReq, conf.GlobalBatchLimit), - broadcastQueue: make(chan *RateLimitReq, conf.GlobalBatchLimit), + asyncQueue: make(chan *RateLimitRequest, conf.GlobalBatchLimit), + broadcastQueue: make(chan *RateLimitRequest, conf.GlobalBatchLimit), instance: instance, conf: conf, metricGlobalSendDuration: prometheus.NewSummary(prometheus.SummaryOpts{ @@ -70,11 +73,11 @@ func newGlobalManager(conf BehaviorConfig, instance *V1Instance) *globalManager return &gm } -func (gm *globalManager) QueueHit(r *RateLimitReq) { +func (gm *globalManager) QueueHit(r *RateLimitRequest) { gm.asyncQueue <- r } -func (gm *globalManager) QueueUpdate(r *RateLimitReq) { +func (gm *globalManager) QueueUpdate(r *RateLimitRequest) { gm.broadcastQueue <- r } @@ -82,7 +85,7 @@ func (gm *globalManager) QueueUpdate(r *RateLimitReq) { // be sent to their owning peers. func (gm *globalManager) runAsyncHits() { var interval = NewInterval(gm.conf.GlobalSyncWait) - hits := make(map[string]*RateLimitReq) + hits := make(map[string]*RateLimitRequest) gm.wg.Until(func(done chan struct{}) bool { @@ -100,7 +103,7 @@ func (gm *globalManager) runAsyncHits() { // Send the hits if we reached our batch limit if len(hits) == gm.conf.GlobalBatchLimit { gm.sendHits(hits) - hits = make(map[string]*RateLimitReq) + hits = make(map[string]*RateLimitRequest) return true } @@ -113,7 +116,7 @@ func (gm *globalManager) runAsyncHits() { case <-interval.C: if len(hits) != 0 { gm.sendHits(hits) - hits = make(map[string]*RateLimitReq) + hits = make(map[string]*RateLimitRequest) } case <-done: return false @@ -124,10 +127,10 @@ func (gm *globalManager) runAsyncHits() { // sendHits takes the hits collected by runAsyncHits and sends them to their // owning peers -func (gm *globalManager) sendHits(hits map[string]*RateLimitReq) { +func (gm *globalManager) sendHits(hits map[string]*RateLimitRequest) { type pair struct { - client *PeerClient - req GetPeerRateLimitsReq + client *Peer + req ForwardRequest } defer prometheus.NewTimer(gm.metricGlobalSendDuration).ObserveDuration() peerRequests := make(map[string]*pair) @@ -140,13 +143,13 @@ func (gm *globalManager) sendHits(hits map[string]*RateLimitReq) { continue } - p, ok := peerRequests[peer.Info().GRPCAddress] + p, ok := peerRequests[peer.Info().HTTPAddress] if ok { p.req.Requests = append(p.req.Requests, r) } else { - peerRequests[peer.Info().GRPCAddress] = &pair{ + peerRequests[peer.Info().HTTPAddress] = &pair{ client: peer, - req: GetPeerRateLimitsReq{Requests: []*RateLimitReq{r}}, + req: ForwardRequest{Requests: []*RateLimitRequest{r}}, } } } @@ -154,18 +157,16 @@ func (gm *globalManager) sendHits(hits map[string]*RateLimitReq) { fan := syncutil.NewFanOut(gm.conf.GlobalPeerRequestsConcurrency) // Send the rate limit requests to their respective owning peers. for _, p := range peerRequests { - fan.Run(func(in interface{}) error { - p := in.(*pair) - ctx, cancel := context.WithTimeout(context.Background(), gm.conf.GlobalTimeout) - _, err := p.client.GetPeerRateLimits(ctx, &p.req) - cancel() - - if err != nil { - gm.log.WithError(err). - Errorf("error sending global hits to '%s'", p.client.Info().GRPCAddress) - } - return nil - }, p) + ctx, cancel := ctxutil.WithTimeout(context.Background(), gm.conf.GlobalTimeout) + var resp ForwardResponse + err := p.client.ForwardBatch(ctx, &p.req, &resp) + cancel() + + if err != nil { + gm.log.WithError(err). + Errorf("error sending global hits to '%s'", p.client.Info().HTTPAddress) + continue + } } fan.Wait() } @@ -173,7 +174,7 @@ func (gm *globalManager) sendHits(hits map[string]*RateLimitReq) { // runBroadcasts collects status changes for global rate limits and broadcasts the changes to each peer in the cluster. func (gm *globalManager) runBroadcasts() { var interval = NewInterval(gm.conf.GlobalSyncWait) - updates := make(map[string]*RateLimitReq) + updates := make(map[string]*RateLimitRequest) gm.wg.Until(func(done chan struct{}) bool { select { @@ -184,7 +185,7 @@ func (gm *globalManager) runBroadcasts() { if len(updates) >= gm.conf.GlobalBatchLimit { gm.metricBroadcastCounter.WithLabelValues("queue_full").Inc() gm.broadcastPeers(context.Background(), updates) - updates = make(map[string]*RateLimitReq) + updates = make(map[string]*RateLimitRequest) return true } @@ -198,7 +199,7 @@ func (gm *globalManager) runBroadcasts() { if len(updates) != 0 { gm.metricBroadcastCounter.WithLabelValues("timer").Inc() gm.broadcastPeers(context.Background(), updates) - updates = make(map[string]*RateLimitReq) + updates = make(map[string]*RateLimitRequest) } else { gm.metricGlobalQueueLength.Set(0) } @@ -210,30 +211,30 @@ func (gm *globalManager) runBroadcasts() { } // broadcastPeers broadcasts global rate limit statuses to all other peers -func (gm *globalManager) broadcastPeers(ctx context.Context, updates map[string]*RateLimitReq) { +func (gm *globalManager) broadcastPeers(ctx context.Context, updates map[string]*RateLimitRequest) { defer prometheus.NewTimer(gm.metricBroadcastDuration).ObserveDuration() - var req UpdatePeerGlobalsReq + var req UpdateRequest gm.metricGlobalQueueLength.Set(float64(len(updates))) for _, r := range updates { // Copy the original since we are removing the GLOBAL behavior - rl := proto.Clone(r).(*RateLimitReq) + rl := proto.Clone(r).(*RateLimitRequest) // We are only sending the status of the rate limit so, we // clear the behavior flag, so we don't get queued for update again. SetBehavior(&rl.Behavior, Behavior_GLOBAL, false) rl.Hits = 0 - status, err := gm.instance.getLocalRateLimit(ctx, rl) + status, err := gm.instance.checkLocalRateLimit(ctx, rl) if err != nil { gm.log.WithError(err).Errorf("while broadcasting update to peers for: '%s'", rl.HashKey()) continue } - // Build an UpdatePeerGlobalsReq - req.Globals = append(req.Globals, &UpdatePeerGlobal{ + // Build an UpdateRateLimitsRequest + req.Globals = append(req.Globals, &UpdateRateLimit{ Algorithm: rl.Algorithm, Key: rl.HashKey(), - Status: status, + Update: status, }) } @@ -244,20 +245,17 @@ func (gm *globalManager) broadcastPeers(ctx context.Context, updates map[string] continue } - fan.Run(func(in interface{}) error { - peer := in.(*PeerClient) - ctx, cancel := context.WithTimeout(ctx, gm.conf.GlobalTimeout) - _, err := peer.UpdatePeerGlobals(ctx, &req) - cancel() - - if err != nil { - // Skip peers that are not in a ready state - if !IsNotReady(err) { - gm.log.WithError(err).Errorf("while broadcasting global updates to '%s'", peer.Info().GRPCAddress) - } + ctx, cancel := ctxutil.WithTimeout(ctx, gm.conf.GlobalTimeout) + err := peer.Update(ctx, &req) + cancel() + + if err != nil { + // Skip peers that are not in a ready state + if !IsNotReady(err) { + gm.log.WithError(err).Errorf("while broadcasting global updates to '%s'", + peer.Info().HTTPAddress) } - return nil - }, peer) + } } fan.Wait() } diff --git a/go.mod b/go.mod index 0f5275c6..22f42309 100644 --- a/go.mod +++ b/go.mod @@ -1,14 +1,16 @@ -module github.com/mailgun/gubernator/v2 +module github.com/mailgun/gubernator/v3 -go 1.20 +go 1.21 + +toolchain go1.21.0 require ( github.com/OneOfOne/xxhash v1.2.8 github.com/davecgh/go-spew v1.1.1 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 + github.com/duh-rpc/duh-go v0.0.2-0.20230929155108-5d641b0c008a github.com/hashicorp/memberlist v0.5.0 - github.com/mailgun/errors v0.1.5 - github.com/mailgun/holster/v4 v4.16.2-0.20231121154636-69040cb71a3b + github.com/mailgun/errors v0.2.0 + github.com/mailgun/holster/v4 v4.16.1 github.com/miekg/dns v1.1.50 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.13.0 @@ -18,14 +20,11 @@ require ( github.com/sirupsen/logrus v1.9.2 github.com/stretchr/testify v1.8.4 go.etcd.io/etcd/client/v3 v3.5.5 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 - go.opentelemetry.io/otel v1.21.0 - go.opentelemetry.io/otel/sdk v1.21.0 - go.opentelemetry.io/otel/trace v1.21.0 + go.opentelemetry.io/otel v1.19.0 + go.opentelemetry.io/otel/sdk v1.19.0 + go.opentelemetry.io/otel/trace v1.19.0 golang.org/x/net v0.18.0 golang.org/x/time v0.3.0 - google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b - google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 k8s.io/api v0.23.3 k8s.io/apimachinery v0.23.3 @@ -40,7 +39,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -49,6 +48,7 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/googleapis/gnostic v0.5.5 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v1.1.5 // indirect @@ -69,12 +69,12 @@ require ( github.com/uptrace/opentelemetry-go-extra/otelutil v0.2.1 // indirect go.etcd.io/etcd/api/v3 v3.5.5 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect @@ -87,7 +87,9 @@ require ( golang.org/x/tools v0.6.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/grpc v1.59.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 3ff2e93e..f07d5074 100644 --- a/go.sum +++ b/go.sum @@ -18,15 +18,12 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -55,7 +52,9 @@ github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdII github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/ahmetb/go-linq v3.0.0+incompatible h1:qQkjjOXKrKOTy83X8OpRmnKflXKQIL/mC/gMVVDMhOA= +github.com/ahmetb/go-linq v3.0.0+incompatible/go.mod h1:PFffvbdbtw+QTB0WKRP0cNht7vnCfnGlEpak/DVg5cY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -91,7 +90,6 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= @@ -101,6 +99,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/duh-rpc/duh-go v0.0.2-0.20230929155108-5d641b0c008a h1:v/NQEfHHOY/huFECKxKZnEkY5jVD8Yix8TPa0FjgKbg= +github.com/duh-rpc/duh-go v0.0.2-0.20230929155108-5d641b0c008a/go.mod h1:OoCoGsZkeED84v8TAE86m2NM5ZfNLNlqUUm7tYO+h+k= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -112,11 +112,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -154,6 +153,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -223,6 +223,7 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= @@ -285,14 +286,15 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailgun/errors v0.1.5 h1:riRpZqfUKTdc8saXvoEg2tYkbRyZESU1KvQ3UxPbdus= -github.com/mailgun/errors v0.1.5/go.mod h1:lw+Nh4r/aoUTz6uK915FdfZJo3yq60gPiflFHNpK4NQ= -github.com/mailgun/holster/v4 v4.16.2-0.20231121154636-69040cb71a3b h1:ohMhrwmmA4JbXNukFpriztFWEVLlMuL90Cssg2Vl2TU= -github.com/mailgun/holster/v4 v4.16.2-0.20231121154636-69040cb71a3b/go.mod h1:phAg61z7LZ1PBfedyt2GXkGSlHhuVKK9AcVJO+Cm0/U= +github.com/mailgun/errors v0.2.0 h1:/Og2m/hbtl5U80zLP/grchU+dvjtl+8kemFr1nd42WA= +github.com/mailgun/errors v0.2.0/go.mod h1:lw+Nh4r/aoUTz6uK915FdfZJo3yq60gPiflFHNpK4NQ= +github.com/mailgun/holster/v4 v4.16.1 h1:blSeB3xJo37dvYimiNduOxZboWsM9Xyk09J5AHhsJjc= +github.com/mailgun/holster/v4 v4.16.1/go.mod h1:2YSKKbCOGCfqPCvAHrYX2W/N3m+nN7pUlgYW6ycsDHU= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -372,6 +374,7 @@ github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0ua github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -424,26 +427,24 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= @@ -451,7 +452,8 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= @@ -579,6 +581,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -847,6 +850,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= diff --git a/grpc_stats.go b/grpc_stats.go deleted file mode 100644 index 39cc662a..00000000 --- a/grpc_stats.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2018-2022 Mailgun Technologies Inc - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gubernator - -import ( - "context" - - "github.com/mailgun/holster/v4/clock" - "github.com/mailgun/holster/v4/syncutil" - "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc/stats" -) - -type GRPCStats struct { - Duration clock.Duration - Method string - Failed float64 - Success float64 -} - -type contextKey struct{} - -var statsContextKey = contextKey{} - -// Implements the Prometheus collector interface. Such that when the /metrics handler is -// called this collector pulls all the stats from -type GRPCStatsHandler struct { - reqCh chan *GRPCStats - wg syncutil.WaitGroup - - grpcRequestCount *prometheus.CounterVec - grpcRequestDuration *prometheus.SummaryVec -} - -func NewGRPCStatsHandler() *GRPCStatsHandler { - c := &GRPCStatsHandler{ - grpcRequestCount: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "gubernator_grpc_request_counts", - Help: "The count of gRPC requests.", - }, []string{"status", "method"}), - grpcRequestDuration: prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Name: "gubernator_grpc_request_duration", - Help: "The timings of gRPC requests in seconds", - Objectives: map[float64]float64{ - 0.5: 0.05, - 0.99: 0.001, - }, - }, []string{"method"}), - } - c.run() - return c -} - -func (c *GRPCStatsHandler) run() { - c.reqCh = make(chan *GRPCStats, 10000) - - c.wg.Until(func(done chan struct{}) bool { - select { - case stat := <-c.reqCh: - c.grpcRequestCount.With(prometheus.Labels{"status": "failed", "method": stat.Method}).Add(stat.Failed) - c.grpcRequestCount.With(prometheus.Labels{"status": "success", "method": stat.Method}).Add(stat.Success) - c.grpcRequestDuration.With(prometheus.Labels{"method": stat.Method}).Observe(stat.Duration.Seconds()) - case <-done: - return false - } - return true - }) -} - -func (c *GRPCStatsHandler) Describe(ch chan<- *prometheus.Desc) { - c.grpcRequestCount.Describe(ch) - c.grpcRequestDuration.Describe(ch) -} - -func (c *GRPCStatsHandler) Collect(ch chan<- prometheus.Metric) { - c.grpcRequestCount.Collect(ch) - c.grpcRequestDuration.Collect(ch) -} - -func (c *GRPCStatsHandler) Close() { - c.wg.Stop() -} - -func (c *GRPCStatsHandler) HandleRPC(ctx context.Context, s stats.RPCStats) { - rs := StatsFromContext(ctx) - if rs == nil { - return - } - - switch t := s.(type) { - // case *stats.Begin: - // case *stats.InPayload: - // case *stats.InHeader: - // case *stats.InTrailer: - // case *stats.OutPayload: - // case *stats.OutHeader: - // case *stats.OutTrailer: - case *stats.End: - rs.Duration = t.EndTime.Sub(t.BeginTime) - if t.Error != nil { - rs.Failed = 1 - } else { - rs.Success = 1 - } - c.reqCh <- rs - } -} - -func (c *GRPCStatsHandler) HandleConn(ctx context.Context, s stats.ConnStats) {} - -func (c *GRPCStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { - return ctx -} - -func (c *GRPCStatsHandler) TagRPC(ctx context.Context, tagInfo *stats.RPCTagInfo) context.Context { - return ContextWithStats(ctx, &GRPCStats{Method: tagInfo.FullMethodName}) -} - -// Returns a new `context.Context` that holds a reference to `GRPCStats`. -func ContextWithStats(ctx context.Context, stats *GRPCStats) context.Context { - return context.WithValue(ctx, statsContextKey, stats) -} - -// Returns the `GRPCStats` previously associated with `ctx`. -func StatsFromContext(ctx context.Context) *GRPCStats { - val := ctx.Value(statsContextKey) - if rs, ok := val.(*GRPCStats); ok { - return rs - } - return nil -} diff --git a/gubernator.go b/gubernator.go index 59c26eca..c8d0f1de 100644 --- a/gubernator.go +++ b/gubernator.go @@ -22,15 +22,17 @@ import ( "strings" "sync" - "github.com/mailgun/errors" + "github.com/duh-rpc/duh-go" + v1 "github.com/duh-rpc/duh-go/proto/v1" + "github.com/pkg/errors" + + "github.com/mailgun/holster/v4/ctxutil" "github.com/mailgun/holster/v4/syncutil" "github.com/mailgun/holster/v4/tracing" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" ) @@ -40,15 +42,14 @@ const ( UnHealthy = "unhealthy" ) -type V1Instance struct { - UnimplementedV1Server - UnimplementedPeersV1Server +type Service struct { + propagator propagation.TraceContext global *globalManager peerMutex sync.RWMutex + workerPool *WorkerPool log FieldLogger conf Config isClosed bool - workerPool *WorkerPool } var ( @@ -104,18 +105,16 @@ var ( }, []string{"peerAddr"}) ) -// NewV1Instance instantiate a single instance of a gubernator peer and register this -// instance with the provided GRPCServer. -func NewV1Instance(conf Config) (s *V1Instance, err error) { - ctx := context.Background() - if conf.GRPCServers == nil { - return nil, errors.New("at least one GRPCServer instance is required") - } +// NewService instantiate a single instance of a gubernator service +func NewService(conf Config) (s *Service, err error) { + ctx := tracing.StartNamedScopeDebug(context.Background(), "gubernator.NewService") + defer func() { tracing.EndScope(ctx, err) }() + if err := conf.SetDefaults(); err != nil { return nil, err } - s = &V1Instance{ + s = &Service{ log: conf.Logger, conf: conf, } @@ -123,12 +122,6 @@ func NewV1Instance(conf Config) (s *V1Instance, err error) { s.workerPool = NewWorkerPool(&conf) s.global = newGlobalManager(conf.Behaviors, s) - // Register our instance with all GRPC servers - for _, srv := range conf.GRPCServers { - RegisterV1Server(srv, s) - RegisterPeersV1Server(srv, s) - } - if s.conf.Loader == nil { return s, nil } @@ -142,9 +135,7 @@ func NewV1Instance(conf Config) (s *V1Instance, err error) { return s, nil } -func (s *V1Instance) Close() (err error) { - ctx := context.Background() - +func (s *Service) Close(ctx context.Context) (err error) { if s.isClosed { return nil } @@ -173,42 +164,47 @@ func (s *V1Instance) Close() (err error) { return nil } -// GetRateLimits is the public interface used by clients to request rate limits from the system. If the +// CheckRateLimits is the public interface used by clients to request rate limits from the system. If the // rate limit `Name` and `UniqueKey` is not owned by this instance, then we forward the request to the // peer that does. -func (s *V1Instance) GetRateLimits(ctx context.Context, r *GetRateLimitsReq) (*GetRateLimitsResp, error) { - funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.GetRateLimits")) +func (s *Service) CheckRateLimits(ctx context.Context, req *CheckRateLimitsRequest, resp *CheckRateLimitsResponse) (err error) { + + funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Service.CheckRateLimits")) defer funcTimer.ObserveDuration() metricConcurrentChecks.Inc() defer metricConcurrentChecks.Dec() - if len(r.Requests) > maxBatchSize { - metricCheckErrorCounter.WithLabelValues("Request too large").Inc() - return nil, status.Errorf(codes.OutOfRange, - "Requests.RateLimits list too large; max size is '%d'", maxBatchSize) + if len(req.Requests) > maxBatchSize { + metricCheckErrorCounter.WithLabelValues("Request too large").Add(1) + return duh.NewServiceError(duh.CodeBadRequest, + fmt.Errorf("CheckRateLimitsRequest.RateLimits list too large; max size is '%d'", maxBatchSize), nil) } - resp := GetRateLimitsResp{ - Responses: make([]*RateLimitResp, len(r.Requests)), + if len(req.Requests) == 0 { + return duh.NewServiceError(duh.CodeBadRequest, + errors.New("CheckRateLimitsRequest.RateLimits list is empty; provide at least one rate limit"), nil) } + + resp.Responses = make([]*RateLimitResponse, len(req.Requests)) + var wg sync.WaitGroup - asyncCh := make(chan AsyncResp, len(r.Requests)) + asyncCh := make(chan AsyncResp, len(req.Requests)) // For each item in the request body - for i, req := range r.Requests { + for i, req := range req.Requests { key := req.Name + "_" + req.UniqueKey - var peer *PeerClient + var peer *Peer var err error if len(req.UniqueKey) == 0 { - metricCheckErrorCounter.WithLabelValues("Invalid request").Inc() - resp.Responses[i] = &RateLimitResp{Error: "field 'unique_key' cannot be empty"} + metricCheckErrorCounter.WithLabelValues("Invalid request").Add(1) + resp.Responses[i] = &RateLimitResponse{Error: "field 'unique_key' cannot be empty"} continue } if len(req.Name) == 0 { - metricCheckErrorCounter.WithLabelValues("Invalid request").Inc() - resp.Responses[i] = &RateLimitResp{Error: "field 'namespace' cannot be empty"} + metricCheckErrorCounter.WithLabelValues("Invalid request").Add(1) + resp.Responses[i] = &RateLimitResponse{Error: "field 'namespace' cannot be empty"} continue } @@ -216,7 +212,7 @@ func (s *V1Instance) GetRateLimits(ctx context.Context, r *GetRateLimitsReq) (*G err = errors.Wrap(ctx.Err(), "Error while iterating request items") span := trace.SpanFromContext(ctx) span.RecordError(err) - resp.Responses[i] = &RateLimitResp{ + resp.Responses[i] = &RateLimitResponse{ Error: err.Error(), } continue @@ -230,7 +226,7 @@ func (s *V1Instance) GetRateLimits(ctx context.Context, r *GetRateLimitsReq) (*G if err != nil { countError(err, "Error in GetPeer") err = errors.Wrapf(err, "Error in GetPeer, looking up peer that owns rate limit '%s'", key) - resp.Responses[i] = &RateLimitResp{ + resp.Responses[i] = &RateLimitResponse{ Error: err.Error(), } continue @@ -239,12 +235,15 @@ func (s *V1Instance) GetRateLimits(ctx context.Context, r *GetRateLimitsReq) (*G // If our server instance is the owner of this rate limit if peer.Info().IsOwner { // Apply our rate limit algorithm to the request - resp.Responses[i], err = s.getLocalRateLimit(ctx, req) + metricGetRateLimitCounter.WithLabelValues("local").Add(1) + funcTimer1 := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.getLocalRateLimit (local)")) + resp.Responses[i], err = s.checkLocalRateLimit(ctx, req) + funcTimer1.ObserveDuration() if err != nil { err = errors.Wrapf(err, "Error while apply rate limit for '%s'", key) span := trace.SpanFromContext(ctx) span.RecordError(err) - resp.Responses[i] = &RateLimitResp{Error: err.Error()} + resp.Responses[i] = &RateLimitResponse{Error: err.Error()} } } else { if HasBehavior(req.Behavior, Behavior_GLOBAL) { @@ -253,11 +252,11 @@ func (s *V1Instance) GetRateLimits(ctx context.Context, r *GetRateLimitsReq) (*G err = errors.Wrap(err, "Error in getGlobalRateLimit") span := trace.SpanFromContext(ctx) span.RecordError(err) - resp.Responses[i] = &RateLimitResp{Error: err.Error()} + resp.Responses[i] = &RateLimitResponse{Error: err.Error()} } // Inform the client of the owner key of the key - resp.Responses[i].Metadata = map[string]string{"owner": peer.Info().GRPCAddress} + resp.Responses[i].Metadata = map[string]string{"owner": peer.Info().HTTPAddress} continue } @@ -283,31 +282,30 @@ func (s *V1Instance) GetRateLimits(ctx context.Context, r *GetRateLimitsReq) (*G resp.Responses[a.Idx] = a.Resp } - return &resp, nil + return nil } type AsyncResp struct { Idx int - Resp *RateLimitResp + Resp *RateLimitResponse } type AsyncReq struct { WG *sync.WaitGroup AsyncCh chan AsyncResp - Req *RateLimitReq - Peer *PeerClient + Req *RateLimitRequest + Peer *Peer Key string Idx int } -func (s *V1Instance) asyncRequest(ctx context.Context, req *AsyncReq) { +func (s *Service) asyncRequest(ctx context.Context, req *AsyncReq) { + ctx = tracing.StartNamedScope(ctx, "Service.asyncRequest") + defer tracing.EndScope(ctx, nil) var attempts int var err error - ctx = tracing.StartNamedScope(ctx, "V1Instance.asyncRequest") - defer tracing.EndScope(ctx, nil) - - funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.asyncRequest")) + funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Service.asyncRequest")) defer funcTimer.ObserveDuration() resp := AsyncResp{ @@ -322,28 +320,30 @@ func (s *V1Instance) asyncRequest(ctx context.Context, req *AsyncReq) { Error("GetPeer() returned peer that is not connected") countError(err, "Peer not connected") err = errors.Wrapf(err, "GetPeer() keeps returning peers that are not connected for '%s'", req.Key) - resp.Resp = &RateLimitResp{Error: err.Error()} + resp.Resp = &RateLimitResponse{Error: err.Error()} break } // If we are attempting again, the owner of this rate limit might have changed to us! if attempts != 0 { if req.Peer.Info().IsOwner { - resp.Resp, err = s.getLocalRateLimit(ctx, req.Req) + metricGetRateLimitCounter.WithLabelValues("local").Add(1) + resp.Resp, err = s.checkLocalRateLimit(ctx, req.Req) if err != nil { s.log.WithContext(ctx). WithError(err). WithField("key", req.Key). Error("Error applying rate limit") - err = errors.Wrapf(err, "Error in getLocalRateLimit for '%s'", req.Key) - resp.Resp = &RateLimitResp{Error: err.Error()} + err = errors.Wrapf(err, "Error in checkLocalRateLimit for '%s'", req.Key) + resp.Resp = &RateLimitResponse{Error: err.Error()} } break } } // Make an RPC call to the peer that owns this rate limit - r, err := req.Peer.GetPeerRateLimit(ctx, req.Req) + metricGetRateLimitCounter.WithLabelValues("forward").Add(1) + r, err := req.Peer.Forward(ctx, req.Req) if err != nil { if IsNotReady(err) { attempts++ @@ -354,7 +354,7 @@ func (s *V1Instance) asyncRequest(ctx context.Context, req *AsyncReq) { s.log.WithContext(ctx).WithError(err).WithField("key", req.Key).Error(errPart) countError(err, "Error in GetPeer") err = errors.Wrap(err, errPart) - resp.Resp = &RateLimitResp{Error: err.Error()} + resp.Resp = &RateLimitResponse{Error: err.Error()} break } continue @@ -363,13 +363,13 @@ func (s *V1Instance) asyncRequest(ctx context.Context, req *AsyncReq) { // Not calling `countError()` because we expect the remote end to // report this error. err = errors.Wrap(err, fmt.Sprintf("Error while fetching rate limit '%s' from peer", req.Key)) - resp.Resp = &RateLimitResp{Error: err.Error()} + resp.Resp = &RateLimitResponse{Error: err.Error()} break } // Inform the client of the owner key of the key resp.Resp = r - resp.Resp.Metadata = map[string]string{"owner": req.Peer.Info().GRPCAddress} + resp.Resp.Metadata = map[string]string{"owner": req.Peer.Info().HTTPAddress} break } @@ -383,18 +383,17 @@ func (s *V1Instance) asyncRequest(ctx context.Context, req *AsyncReq) { // getGlobalRateLimit handles rate limits that are marked as `Behavior = GLOBAL`. Rate limit responses // are returned from the local cache and the hits are queued to be sent to the owning peer. -func (s *V1Instance) getGlobalRateLimit(ctx context.Context, req *RateLimitReq) (resp *RateLimitResp, err error) { - ctx = tracing.StartNamedScope(ctx, "V1Instance.getGlobalRateLimit", trace.WithAttributes( - attribute.String("ratelimit.key", req.UniqueKey), - attribute.String("ratelimit.name", req.Name), - )) - defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.getGlobalRateLimit")).ObserveDuration() - defer func() { - if err == nil { - s.global.QueueHit(req) - } - tracing.EndScope(ctx, err) - }() +func (s *Service) getGlobalRateLimit(ctx context.Context, req *RateLimitRequest) (resp *RateLimitResponse, err error) { + ctx = tracing.StartNamedScopeDebug(ctx, "getGlobalRateLimit") + defer func() { tracing.EndScope(ctx, err) }() + + funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Service.getGlobalRateLimit")) + defer funcTimer.ObserveDuration() + // Queue the hit for async update after we have prepared our response. + // NOTE: The defer here avoids a race condition where we queue the req to + // be forwarded to the owning peer in a separate goroutine but simultaneously + // access and possibly copy the req in this method. + defer s.global.QueueHit(req) item, ok, err := s.workerPool.GetCacheItem(ctx, req.HashKey()) if err != nil { @@ -402,8 +401,8 @@ func (s *V1Instance) getGlobalRateLimit(ctx context.Context, req *RateLimitReq) return nil, errors.Wrap(err, "during in workerPool.GetCacheItem") } if ok { - // Global rate limits are always stored as RateLimitResp regardless of algorithm - rl, ok := item.Value.(*RateLimitResp) + // Global rate limits are always stored as RateLimitResponse regardless of algorithm + rl, ok := item.Value.(*RateLimitResponse) if ok { return rl, nil } @@ -411,59 +410,64 @@ func (s *V1Instance) getGlobalRateLimit(ctx context.Context, req *RateLimitReq) // our cache still holds the rate limit we created on the first hit. } - cpy := proto.Clone(req).(*RateLimitReq) + cpy := proto.Clone(req).(*RateLimitRequest) cpy.Behavior = Behavior_NO_BATCHING // Process the rate limit like we own it - resp, err = s.getLocalRateLimit(ctx, cpy) + metricGetRateLimitCounter.WithLabelValues("global").Add(1) + resp, err = s.checkLocalRateLimit(ctx, cpy) if err != nil { - return nil, errors.Wrap(err, "during in getLocalRateLimit") + return nil, errors.Wrap(err, "Error in checkLocalRateLimit") } metricGetRateLimitCounter.WithLabelValues("global").Inc() return resp, nil } -// UpdatePeerGlobals updates the local cache with a list of global rate limits. This method should only -// be called by a peer who is the owner of a global rate limit. -func (s *V1Instance) UpdatePeerGlobals(ctx context.Context, r *UpdatePeerGlobalsReq) (*UpdatePeerGlobalsResp, error) { +// Update updates the local cache with a list of rate limits. +// This method should only be called by a peer. +func (s *Service) Update(ctx context.Context, r *UpdateRequest, resp *v1.Reply) (err error) { + ctx = tracing.StartNamedScopeDebug(ctx, "Service.Update") + defer func() { tracing.EndScope(ctx, err) }() + for _, g := range r.Globals { item := &CacheItem{ - ExpireAt: g.Status.ResetTime, + ExpireAt: g.Update.ResetTime, Algorithm: g.Algorithm, - Value: g.Status, + Value: g.Update, Key: g.Key, } err := s.workerPool.AddCacheItem(ctx, g.Key, item) if err != nil { - return nil, errors.Wrap(err, "Error in workerPool.AddCacheItem") + return errors.Wrap(err, "Error in workerPool.AddCacheItem") } } - - return &UpdatePeerGlobalsResp{}, nil + resp.Code = duh.CodeOK + return nil } -// GetPeerRateLimits is called by other peers to get the rate limits owned by this peer. -func (s *V1Instance) GetPeerRateLimits(ctx context.Context, r *GetPeerRateLimitsReq) (resp *GetPeerRateLimitsResp, err error) { - if len(r.Requests) > maxBatchSize { - err := fmt.Errorf("'PeerRequest.rate_limits' list too large; max size is '%d'", maxBatchSize) - metricCheckErrorCounter.WithLabelValues("Request too large").Inc() - return nil, status.Error(codes.OutOfRange, err.Error()) +// Forward is called by other peers when forwarding rate limits to this peer +func (s *Service) Forward(ctx context.Context, req *ForwardRequest, resp *ForwardResponse) (err error) { + ctx = tracing.StartNamedScopeDebug(ctx, "Service.Forward") + defer func() { tracing.EndScope(ctx, err) }() + + if len(req.Requests) > maxBatchSize { + metricCheckErrorCounter.WithLabelValues("Request too large").Add(1) + return duh.NewServiceError(duh.CodeBadRequest, + fmt.Errorf("'PeerRequest.rate_limits' list too large; max size is '%d'", maxBatchSize), nil) } // Invoke each rate limit request. type reqIn struct { idx int - req *RateLimitReq + req *RateLimitRequest } type respOut struct { idx int - rl *RateLimitResp + rl *RateLimitResponse } - resp = &GetPeerRateLimitsResp{ - RateLimits: make([]*RateLimitResp, len(r.Requests)), - } + resp.RateLimits = make([]*RateLimitResponse, len(req.Requests)) respChan := make(chan respOut) var respWg sync.WaitGroup respWg.Add(1) @@ -479,18 +483,17 @@ func (s *V1Instance) GetPeerRateLimits(ctx context.Context, r *GetPeerRateLimits // Fan out requests. fan := syncutil.NewFanOut(s.conf.Workers) - for idx, req := range r.Requests { + for idx, req := range req.Requests { fan.Run(func(in interface{}) error { rin := in.(reqIn) // Extract the propagated context from the metadata in the request - prop := propagation.TraceContext{} - ctx := prop.Extract(ctx, &MetadataCarrier{Map: rin.req.Metadata}) - rl, err := s.getLocalRateLimit(ctx, rin.req) + ctx := s.propagator.Extract(ctx, &MetadataCarrier{Map: rin.req.Metadata}) + rl, err := s.checkLocalRateLimit(ctx, rin.req) if err != nil { // Return the error for this request - err = errors.Wrap(err, "Error in getLocalRateLimit") - rl = &RateLimitResp{Error: err.Error()} - // metricCheckErrorCounter is updated within getLocalRateLimit(), not in GetPeerRateLimits. + err = errors.Wrap(err, "Error in checkLocalRateLimit") + rl = &RateLimitResponse{Error: err.Error()} + // metricCheckErrorCounter is updated within checkLocalRateLimit(). } respChan <- respOut{rin.idx, rl} @@ -503,13 +506,11 @@ func (s *V1Instance) GetPeerRateLimits(ctx context.Context, r *GetPeerRateLimits close(respChan) respWg.Wait() - return resp, nil + return nil } // HealthCheck Returns the health of our instance. -func (s *V1Instance) HealthCheck(ctx context.Context, r *HealthCheckReq) (health *HealthCheckResp, err error) { - span := trace.SpanFromContext(ctx) - +func (s *Service) HealthCheck(ctx context.Context, _ *HealthCheckRequest, resp *HealthCheckResponse) (err error) { var errs []string s.peerMutex.RLock() @@ -519,8 +520,7 @@ func (s *V1Instance) HealthCheck(ctx context.Context, r *HealthCheckReq) (health localPeers := s.conf.LocalPicker.Peers() for _, peer := range localPeers { for _, errMsg := range peer.GetLastErr() { - err := fmt.Errorf("Error returned from local peer.GetLastErr: %s", errMsg) - span.RecordError(err) + err := fmt.Errorf("error returned from local peer.GetLastErr: %s", errMsg) errs = append(errs, err.Error()) } } @@ -529,39 +529,33 @@ func (s *V1Instance) HealthCheck(ctx context.Context, r *HealthCheckReq) (health regionPeers := s.conf.RegionPicker.Peers() for _, peer := range regionPeers { for _, errMsg := range peer.GetLastErr() { - err := fmt.Errorf("Error returned from region peer.GetLastErr: %s", errMsg) - span.RecordError(err) + err := fmt.Errorf("error returned from region peer.GetLastErr: %s", errMsg) errs = append(errs, err.Error()) } } - health = &HealthCheckResp{ - PeerCount: int32(len(localPeers) + len(regionPeers)), - Status: Healthy, - } + resp.PeerCount = int32(len(localPeers) + len(regionPeers)) + resp.Status = Healthy if len(errs) != 0 { - health.Status = UnHealthy - health.Message = strings.Join(errs, "|") + resp.Status = UnHealthy + resp.Message = strings.Join(errs, "|") } + return nil +} + +func (s *Service) checkLocalRateLimit(ctx context.Context, r *RateLimitRequest) (*RateLimitResponse, error) { + ctx = tracing.StartNamedScope(ctx, "Service.checkLocalRateLimit") + span := trace.SpanFromContext(ctx) span.SetAttributes( - attribute.Int64("health.peerCount", int64(health.PeerCount)), - attribute.String("health.status", health.Status), + attribute.String("request.key", r.UniqueKey), + attribute.String("request.name", r.Name), + attribute.Int64("request.limit", r.Limit), + attribute.Int64("request.hits", r.Hits), ) - return health, nil -} - -func (s *V1Instance) getLocalRateLimit(ctx context.Context, r *RateLimitReq) (_ *RateLimitResp, err error) { - ctx = tracing.StartNamedScope(ctx, "V1Instance.getLocalRateLimit", trace.WithAttributes( - attribute.String("ratelimit.key", r.UniqueKey), - attribute.String("ratelimit.name", r.Name), - attribute.Int64("ratelimit.limit", r.Limit), - attribute.Int64("ratelimit.hits", r.Hits), - )) - defer func() { tracing.EndScope(ctx, err) }() - defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.getLocalRateLimit")).ObserveDuration() + defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Service.checkLocalRateLimit")).ObserveDuration() resp, err := s.workerPool.GetRateLimit(ctx, r) if err != nil { @@ -580,37 +574,43 @@ func (s *V1Instance) getLocalRateLimit(ctx context.Context, r *RateLimitReq) (_ } // SetPeers is called by the implementor to indicate the pool of peers has changed -func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { +func (s *Service) SetPeers(peerInfo []PeerInfo) { localPicker := s.conf.LocalPicker.New() regionPicker := s.conf.RegionPicker.New() for _, info := range peerInfo { // Add peers that are not in our local DC to the RegionPicker if info.DataCenter != s.conf.DataCenter { + var err error peer := s.conf.RegionPicker.GetByPeerInfo(info) - // If we don't have an existing PeerClient create a new one + // If we don't have an existing Peer create a new one if peer == nil { - peer = NewPeerClient(PeerConfig{ - TraceGRPC: s.conf.PeerTraceGRPC, - Behavior: s.conf.Behaviors, - TLS: s.conf.PeerTLS, - Log: s.log, - Info: info, + peer, err = NewPeer(PeerConfig{ + PeerClient: s.conf.PeerClientFactory(info), + Behavior: s.conf.Behaviors, + Log: s.log, + Info: info, }) + if err != nil { + s.log.WithError(err).Error("while adding new peer; skipping..") + } } regionPicker.Add(peer) continue } - // If we don't have an existing PeerClient create a new one + var err error + // If we don't have an existing Peer create a new one peer := s.conf.LocalPicker.GetByPeerInfo(info) if peer == nil { - peer = NewPeerClient(PeerConfig{ - TraceGRPC: s.conf.PeerTraceGRPC, - Behavior: s.conf.Behaviors, - TLS: s.conf.PeerTLS, - Log: s.log, - Info: info, + peer, err = NewPeer(PeerConfig{ + PeerClient: s.conf.PeerClientFactory(info), + Behavior: s.conf.Behaviors, + Log: s.log, + Info: info, }) + if err != nil { + s.log.WithError(err).Error("while adding new peer; skipping..") + } } localPicker.Add(peer) } @@ -626,11 +626,11 @@ func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { s.log.WithField("peers", peerInfo).Debug("peers updated") - // Shutdown any old peers we no longer need - ctx, cancel := context.WithTimeout(context.Background(), s.conf.Behaviors.BatchTimeout) + // Close any old peers we no longer need + ctx, cancel := ctxutil.WithTimeout(context.Background(), s.conf.Behaviors.BatchTimeout) defer cancel() - var shutdownPeers []*PeerClient + var shutdownPeers []*Peer for _, peer := range oldLocalPicker.Peers() { if peerInfo := s.conf.LocalPicker.GetByPeerInfo(peer.Info()); peerInfo == nil { shutdownPeers = append(shutdownPeers, peer) @@ -648,8 +648,8 @@ func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { var wg syncutil.WaitGroup for _, p := range shutdownPeers { wg.Run(func(obj interface{}) error { - pc := obj.(*PeerClient) - err := pc.Shutdown(ctx) + pc := obj.(*Peer) + err := pc.Close(ctx) if err != nil { s.log.WithError(err).WithField("peer", pc).Error("while shutting down peer") } @@ -661,18 +661,17 @@ func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { if len(shutdownPeers) > 0 { var peers []string for _, p := range shutdownPeers { - peers = append(peers, p.Info().GRPCAddress) + peers = append(peers, p.Info().HTTPAddress) } s.log.WithField("peers", peers).Debug("peers shutdown") } } // GetPeer returns a peer client for the hash key provided -func (s *V1Instance) GetPeer(ctx context.Context, key string) (p *PeerClient, err error) { - defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("V1Instance.GetPeer")).ObserveDuration() - +func (s *Service) GetPeer(ctx context.Context, key string) (p *Peer, err error) { s.peerMutex.RLock() defer s.peerMutex.RUnlock() + p, err = s.conf.LocalPicker.Get(key) if err != nil { return nil, errors.Wrap(err, "Error in conf.LocalPicker.Get") @@ -681,20 +680,20 @@ func (s *V1Instance) GetPeer(ctx context.Context, key string) (p *PeerClient, er return p, nil } -func (s *V1Instance) GetPeerList() []*PeerClient { +func (s *Service) GetPeerList() []*Peer { s.peerMutex.RLock() defer s.peerMutex.RUnlock() return s.conf.LocalPicker.Peers() } -func (s *V1Instance) GetRegionPickers() map[string]PeerPicker { +func (s *Service) GetRegionPickers() map[string]PeerPicker { s.peerMutex.RLock() defer s.peerMutex.RUnlock() return s.conf.RegionPicker.Pickers() } // Describe fetches prometheus metrics to be registered -func (s *V1Instance) Describe(ch chan<- *prometheus.Desc) { +func (s *Service) Describe(ch chan<- *prometheus.Desc) { metricBatchQueueLength.Describe(ch) metricBatchSendDuration.Describe(ch) metricBatchSendRetries.Describe(ch) @@ -712,7 +711,7 @@ func (s *V1Instance) Describe(ch chan<- *prometheus.Desc) { } // Collect fetches metrics from the server for use by prometheus -func (s *V1Instance) Collect(ch chan<- prometheus.Metric) { +func (s *Service) Collect(ch chan<- prometheus.Metric) { metricBatchQueueLength.Collect(ch) metricBatchSendDuration.Collect(ch) metricBatchSendRetries.Collect(ch) diff --git a/gubernator.pb.go b/gubernator.pb.go index 1e902315..6fa5f4c0 100644 --- a/gubernator.pb.go +++ b/gubernator.pb.go @@ -15,19 +15,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.7 +// protoc-gen-go v1.30.0 +// protoc (unknown) // source: gubernator.proto package gubernator import ( - reflect "reflect" - sync "sync" - - _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) const ( @@ -144,21 +142,16 @@ const ( // algorithm chosen. For instance, if used with `TOKEN_BUCKET` it will immediately expire the // cache value. For `LEAKY_BUCKET` it sets the `Remaining` to `Limit`. Behavior_RESET_REMAINING Behavior = 8 - // Enables rate limits to be pushed to other regions. Currently this is only implemented when using - // 'member-list' peer discovery. Also requires GUBER_DATA_CENTER to be set to different values on at - // least 2 instances of Gubernator. - Behavior_MULTI_REGION Behavior = 16 ) // Enum value maps for Behavior. var ( Behavior_name = map[int32]string{ - 0: "BATCHING", - 1: "NO_BATCHING", - 2: "GLOBAL", - 4: "DURATION_IS_GREGORIAN", - 8: "RESET_REMAINING", - 16: "MULTI_REGION", + 0: "BATCHING", + 1: "NO_BATCHING", + 2: "GLOBAL", + 4: "DURATION_IS_GREGORIAN", + 8: "RESET_REMAINING", } Behavior_value = map[string]int32{ "BATCHING": 0, @@ -166,7 +159,6 @@ var ( "GLOBAL": 2, "DURATION_IS_GREGORIAN": 4, "RESET_REMAINING": 8, - "MULTI_REGION": 16, } ) @@ -243,17 +235,17 @@ func (Status) EnumDescriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{2} } -// Must specify at least one Request -type GetRateLimitsReq struct { +// Must specify at least one RateLimitRequest +type CheckRateLimitsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Requests []*RateLimitReq `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + Requests []*RateLimitRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } -func (x *GetRateLimitsReq) Reset() { - *x = GetRateLimitsReq{} +func (x *CheckRateLimitsRequest) Reset() { + *x = CheckRateLimitsRequest{} if protoimpl.UnsafeEnabled { mi := &file_gubernator_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -261,13 +253,13 @@ func (x *GetRateLimitsReq) Reset() { } } -func (x *GetRateLimitsReq) String() string { +func (x *CheckRateLimitsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetRateLimitsReq) ProtoMessage() {} +func (*CheckRateLimitsRequest) ProtoMessage() {} -func (x *GetRateLimitsReq) ProtoReflect() protoreflect.Message { +func (x *CheckRateLimitsRequest) ProtoReflect() protoreflect.Message { mi := &file_gubernator_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -279,29 +271,29 @@ func (x *GetRateLimitsReq) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetRateLimitsReq.ProtoReflect.Descriptor instead. -func (*GetRateLimitsReq) Descriptor() ([]byte, []int) { +// Deprecated: Use CheckRateLimitsRequest.ProtoReflect.Descriptor instead. +func (*CheckRateLimitsRequest) Descriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{0} } -func (x *GetRateLimitsReq) GetRequests() []*RateLimitReq { +func (x *CheckRateLimitsRequest) GetRequests() []*RateLimitRequest { if x != nil { return x.Requests } return nil } -// RateLimits returned are in the same order as the Requests -type GetRateLimitsResp struct { +// RateLimits returned are in the same order provided in CheckRateLimitsRequest +type CheckRateLimitsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Responses []*RateLimitResp `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` + Responses []*RateLimitResponse `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` } -func (x *GetRateLimitsResp) Reset() { - *x = GetRateLimitsResp{} +func (x *CheckRateLimitsResponse) Reset() { + *x = CheckRateLimitsResponse{} if protoimpl.UnsafeEnabled { mi := &file_gubernator_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -309,13 +301,13 @@ func (x *GetRateLimitsResp) Reset() { } } -func (x *GetRateLimitsResp) String() string { +func (x *CheckRateLimitsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetRateLimitsResp) ProtoMessage() {} +func (*CheckRateLimitsResponse) ProtoMessage() {} -func (x *GetRateLimitsResp) ProtoReflect() protoreflect.Message { +func (x *CheckRateLimitsResponse) ProtoReflect() protoreflect.Message { mi := &file_gubernator_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -327,19 +319,19 @@ func (x *GetRateLimitsResp) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetRateLimitsResp.ProtoReflect.Descriptor instead. -func (*GetRateLimitsResp) Descriptor() ([]byte, []int) { +// Deprecated: Use CheckRateLimitsResponse.ProtoReflect.Descriptor instead. +func (*CheckRateLimitsResponse) Descriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{1} } -func (x *GetRateLimitsResp) GetResponses() []*RateLimitResp { +func (x *CheckRateLimitsResponse) GetResponses() []*RateLimitResponse { if x != nil { return x.Responses } return nil } -type RateLimitReq struct { +type RateLimitRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -360,9 +352,9 @@ type RateLimitReq struct { Duration int64 `protobuf:"varint,5,opt,name=duration,proto3" json:"duration,omitempty"` // The algorithm used to calculate the rate limit. The algorithm may change on // subsequent requests, when this occurs any previous rate limit hit counts are reset. - Algorithm Algorithm `protobuf:"varint,6,opt,name=algorithm,proto3,enum=pb.gubernator.Algorithm" json:"algorithm,omitempty"` + Algorithm Algorithm `protobuf:"varint,6,opt,name=algorithm,proto3,enum=gubernator.v3.Algorithm" json:"algorithm,omitempty"` // Behavior is a set of int32 flags that control the behavior of the rate limit in gubernator - Behavior Behavior `protobuf:"varint,7,opt,name=behavior,proto3,enum=pb.gubernator.Behavior" json:"behavior,omitempty"` + Behavior Behavior `protobuf:"varint,7,opt,name=behavior,proto3,enum=gubernator.v3.Behavior" json:"behavior,omitempty"` // Maximum burst size that the limit can accept. Burst int64 `protobuf:"varint,8,opt,name=burst,proto3" json:"burst,omitempty"` // This is metadata that is associated with this rate limit. Peer to Peer communication will use @@ -371,8 +363,8 @@ type RateLimitReq struct { Metadata map[string]string `protobuf:"bytes,9,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *RateLimitReq) Reset() { - *x = RateLimitReq{} +func (x *RateLimitRequest) Reset() { + *x = RateLimitRequest{} if protoimpl.UnsafeEnabled { mi := &file_gubernator_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -380,13 +372,13 @@ func (x *RateLimitReq) Reset() { } } -func (x *RateLimitReq) String() string { +func (x *RateLimitRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RateLimitReq) ProtoMessage() {} +func (*RateLimitRequest) ProtoMessage() {} -func (x *RateLimitReq) ProtoReflect() protoreflect.Message { +func (x *RateLimitRequest) ProtoReflect() protoreflect.Message { mi := &file_gubernator_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -398,81 +390,81 @@ func (x *RateLimitReq) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RateLimitReq.ProtoReflect.Descriptor instead. -func (*RateLimitReq) Descriptor() ([]byte, []int) { +// Deprecated: Use RateLimitRequest.ProtoReflect.Descriptor instead. +func (*RateLimitRequest) Descriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{2} } -func (x *RateLimitReq) GetName() string { +func (x *RateLimitRequest) GetName() string { if x != nil { return x.Name } return "" } -func (x *RateLimitReq) GetUniqueKey() string { +func (x *RateLimitRequest) GetUniqueKey() string { if x != nil { return x.UniqueKey } return "" } -func (x *RateLimitReq) GetHits() int64 { +func (x *RateLimitRequest) GetHits() int64 { if x != nil { return x.Hits } return 0 } -func (x *RateLimitReq) GetLimit() int64 { +func (x *RateLimitRequest) GetLimit() int64 { if x != nil { return x.Limit } return 0 } -func (x *RateLimitReq) GetDuration() int64 { +func (x *RateLimitRequest) GetDuration() int64 { if x != nil { return x.Duration } return 0 } -func (x *RateLimitReq) GetAlgorithm() Algorithm { +func (x *RateLimitRequest) GetAlgorithm() Algorithm { if x != nil { return x.Algorithm } return Algorithm_TOKEN_BUCKET } -func (x *RateLimitReq) GetBehavior() Behavior { +func (x *RateLimitRequest) GetBehavior() Behavior { if x != nil { return x.Behavior } return Behavior_BATCHING } -func (x *RateLimitReq) GetBurst() int64 { +func (x *RateLimitRequest) GetBurst() int64 { if x != nil { return x.Burst } return 0 } -func (x *RateLimitReq) GetMetadata() map[string]string { +func (x *RateLimitRequest) GetMetadata() map[string]string { if x != nil { return x.Metadata } return nil } -type RateLimitResp struct { +type RateLimitResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The status of the rate limit. - Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=pb.gubernator.Status" json:"status,omitempty"` + Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=gubernator.v3.Status" json:"status,omitempty"` // The currently configured request limit (Identical to RateLimitRequest.rate_limit_config.limit). Limit int64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` // This is the number of requests remaining before the limit is hit. @@ -481,12 +473,12 @@ type RateLimitResp struct { ResetTime int64 `protobuf:"varint,4,opt,name=reset_time,json=resetTime,proto3" json:"reset_time,omitempty"` // Contains the error; If set all other values should be ignored Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` - // This is additional metadata that a client might find useful. (IE: Additional headers, corrdinator ownership, etc..) + // This is additional metadata that a client might find useful. (IE: Additional headers, coordinator ownership, etc..) Metadata map[string]string `protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *RateLimitResp) Reset() { - *x = RateLimitResp{} +func (x *RateLimitResponse) Reset() { + *x = RateLimitResponse{} if protoimpl.UnsafeEnabled { mi := &file_gubernator_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -494,13 +486,13 @@ func (x *RateLimitResp) Reset() { } } -func (x *RateLimitResp) String() string { +func (x *RateLimitResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RateLimitResp) ProtoMessage() {} +func (*RateLimitResponse) ProtoMessage() {} -func (x *RateLimitResp) ProtoReflect() protoreflect.Message { +func (x *RateLimitResponse) ProtoReflect() protoreflect.Message { mi := &file_gubernator_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -512,61 +504,61 @@ func (x *RateLimitResp) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RateLimitResp.ProtoReflect.Descriptor instead. -func (*RateLimitResp) Descriptor() ([]byte, []int) { +// Deprecated: Use RateLimitResponse.ProtoReflect.Descriptor instead. +func (*RateLimitResponse) Descriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{3} } -func (x *RateLimitResp) GetStatus() Status { +func (x *RateLimitResponse) GetStatus() Status { if x != nil { return x.Status } return Status_UNDER_LIMIT } -func (x *RateLimitResp) GetLimit() int64 { +func (x *RateLimitResponse) GetLimit() int64 { if x != nil { return x.Limit } return 0 } -func (x *RateLimitResp) GetRemaining() int64 { +func (x *RateLimitResponse) GetRemaining() int64 { if x != nil { return x.Remaining } return 0 } -func (x *RateLimitResp) GetResetTime() int64 { +func (x *RateLimitResponse) GetResetTime() int64 { if x != nil { return x.ResetTime } return 0 } -func (x *RateLimitResp) GetError() string { +func (x *RateLimitResponse) GetError() string { if x != nil { return x.Error } return "" } -func (x *RateLimitResp) GetMetadata() map[string]string { +func (x *RateLimitResponse) GetMetadata() map[string]string { if x != nil { return x.Metadata } return nil } -type HealthCheckReq struct { +type HealthCheckRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } -func (x *HealthCheckReq) Reset() { - *x = HealthCheckReq{} +func (x *HealthCheckRequest) Reset() { + *x = HealthCheckRequest{} if protoimpl.UnsafeEnabled { mi := &file_gubernator_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -574,13 +566,13 @@ func (x *HealthCheckReq) Reset() { } } -func (x *HealthCheckReq) String() string { +func (x *HealthCheckRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*HealthCheckReq) ProtoMessage() {} +func (*HealthCheckRequest) ProtoMessage() {} -func (x *HealthCheckReq) ProtoReflect() protoreflect.Message { +func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_gubernator_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -592,12 +584,12 @@ func (x *HealthCheckReq) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use HealthCheckReq.ProtoReflect.Descriptor instead. -func (*HealthCheckReq) Descriptor() ([]byte, []int) { +// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead. +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{4} } -type HealthCheckResp struct { +type HealthCheckResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -610,8 +602,8 @@ type HealthCheckResp struct { PeerCount int32 `protobuf:"varint,3,opt,name=peer_count,json=peerCount,proto3" json:"peer_count,omitempty"` } -func (x *HealthCheckResp) Reset() { - *x = HealthCheckResp{} +func (x *HealthCheckResponse) Reset() { + *x = HealthCheckResponse{} if protoimpl.UnsafeEnabled { mi := &file_gubernator_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -619,13 +611,13 @@ func (x *HealthCheckResp) Reset() { } } -func (x *HealthCheckResp) String() string { +func (x *HealthCheckResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*HealthCheckResp) ProtoMessage() {} +func (*HealthCheckResponse) ProtoMessage() {} -func (x *HealthCheckResp) ProtoReflect() protoreflect.Message { +func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_gubernator_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -637,26 +629,26 @@ func (x *HealthCheckResp) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use HealthCheckResp.ProtoReflect.Descriptor instead. -func (*HealthCheckResp) Descriptor() ([]byte, []int) { +// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead. +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return file_gubernator_proto_rawDescGZIP(), []int{5} } -func (x *HealthCheckResp) GetStatus() string { +func (x *HealthCheckResponse) GetStatus() string { if x != nil { return x.Status } return "" } -func (x *HealthCheckResp) GetMessage() string { +func (x *HealthCheckResponse) GetMessage() string { if x != nil { return x.Message } return "" } -func (x *HealthCheckResp) GetPeerCount() int32 { +func (x *HealthCheckResponse) GetPeerCount() int32 { if x != nil { return x.PeerCount } @@ -667,101 +659,86 @@ var File_gubernator_proto protoreflect.FileDescriptor var file_gubernator_proto_rawDesc = []byte{ 0x0a, 0x10, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0d, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, - 0x72, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x4b, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, - 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, - 0x65, 0x71, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x4f, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x12, 0x3a, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, - 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x22, 0x8e, 0x03, - 0x0a, 0x0c, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, - 0x79, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x04, 0x68, 0x69, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, - 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x33, 0x0a, 0x08, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x17, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, - 0x72, 0x2e, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x08, 0x62, 0x65, 0x68, 0x61, - 0x76, 0x69, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x75, 0x72, 0x73, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x05, 0x62, 0x75, 0x72, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x08, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, - 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x52, 0x61, 0x74, - 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xac, - 0x02, 0x0a, 0x0d, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, - 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x73, 0x65, 0x74, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x46, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x62, 0x2e, - 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x10, 0x0a, - 0x0e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x22, - 0x62, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x2a, 0x2f, 0x0a, 0x09, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, - 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x45, 0x41, 0x4b, 0x59, 0x5f, 0x42, 0x55, 0x43, 0x4b, - 0x45, 0x54, 0x10, 0x01, 0x2a, 0x77, 0x0a, 0x08, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, - 0x12, 0x0c, 0x0a, 0x08, 0x42, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, - 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, - 0x0a, 0x0a, 0x06, 0x47, 0x4c, 0x4f, 0x42, 0x41, 0x4c, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x44, - 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x49, 0x53, 0x5f, 0x47, 0x52, 0x45, 0x47, 0x4f, - 0x52, 0x49, 0x41, 0x4e, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x53, 0x45, 0x54, 0x5f, - 0x52, 0x45, 0x4d, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x08, 0x12, 0x10, 0x0a, 0x0c, 0x4d, - 0x55, 0x4c, 0x54, 0x49, 0x5f, 0x52, 0x45, 0x47, 0x49, 0x4f, 0x4e, 0x10, 0x10, 0x2a, 0x29, 0x0a, - 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x44, 0x45, 0x52, - 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x56, 0x45, 0x52, - 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x10, 0x01, 0x32, 0xdd, 0x01, 0x0a, 0x02, 0x56, 0x31, 0x12, - 0x70, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, - 0x12, 0x1f, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, - 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, - 0x71, 0x1a, 0x20, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, - 0x72, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x22, 0x1c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x16, 0x22, 0x11, 0x2f, 0x76, 0x31, - 0x2f, 0x47, 0x65, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x3a, 0x01, - 0x2a, 0x12, 0x65, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x12, 0x1d, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, - 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x1a, - 0x1e, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x22, - 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x76, 0x31, 0x2f, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x22, 0x5a, 0x1d, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x61, 0x69, 0x6c, 0x67, 0x75, 0x6e, 0x2f, 0x67, - 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x80, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, + 0x33, 0x22, 0x55, 0x0a, 0x16, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x08, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x59, 0x0a, 0x17, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, + 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x22, 0x96, 0x03, 0x0a, 0x10, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x68, 0x69, 0x74, 0x73, 0x12, + 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, + 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x33, 0x0a, 0x08, 0x62, 0x65, 0x68, + 0x61, 0x76, 0x69, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x75, + 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x52, 0x08, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x62, 0x75, 0x72, 0x73, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x62, + 0x75, 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, + 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, + 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb4, 0x02, 0x0a, + 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x6d, 0x61, 0x69, + 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x6d, 0x61, + 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x73, 0x65, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4a, 0x0a, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, + 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x14, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, 0x0a, 0x13, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x2a, 0x2f, 0x0a, 0x09, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x00, + 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x45, 0x41, 0x4b, 0x59, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, + 0x10, 0x01, 0x2a, 0x65, 0x0a, 0x08, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x0c, + 0x0a, 0x08, 0x42, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, + 0x4e, 0x4f, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a, 0x0a, + 0x06, 0x47, 0x4c, 0x4f, 0x42, 0x41, 0x4c, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x55, 0x52, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x49, 0x53, 0x5f, 0x47, 0x52, 0x45, 0x47, 0x4f, 0x52, 0x49, + 0x41, 0x4e, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x53, 0x45, 0x54, 0x5f, 0x52, 0x45, + 0x4d, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x08, 0x2a, 0x29, 0x0a, 0x06, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x4c, 0x49, 0x4d, + 0x49, 0x54, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x56, 0x45, 0x52, 0x5f, 0x4c, 0x49, 0x4d, + 0x49, 0x54, 0x10, 0x01, 0x42, 0x1f, 0x5a, 0x1d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x6d, 0x61, 0x69, 0x6c, 0x67, 0x75, 0x6e, 0x2f, 0x67, 0x75, 0x62, 0x65, 0x72, + 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -779,32 +756,28 @@ func file_gubernator_proto_rawDescGZIP() []byte { var file_gubernator_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_gubernator_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_gubernator_proto_goTypes = []interface{}{ - (Algorithm)(0), // 0: pb.gubernator.Algorithm - (Behavior)(0), // 1: pb.gubernator.Behavior - (Status)(0), // 2: pb.gubernator.Status - (*GetRateLimitsReq)(nil), // 3: pb.gubernator.GetRateLimitsReq - (*GetRateLimitsResp)(nil), // 4: pb.gubernator.GetRateLimitsResp - (*RateLimitReq)(nil), // 5: pb.gubernator.RateLimitReq - (*RateLimitResp)(nil), // 6: pb.gubernator.RateLimitResp - (*HealthCheckReq)(nil), // 7: pb.gubernator.HealthCheckReq - (*HealthCheckResp)(nil), // 8: pb.gubernator.HealthCheckResp - nil, // 9: pb.gubernator.RateLimitReq.MetadataEntry - nil, // 10: pb.gubernator.RateLimitResp.MetadataEntry + (Algorithm)(0), // 0: gubernator.v3.Algorithm + (Behavior)(0), // 1: gubernator.v3.Behavior + (Status)(0), // 2: gubernator.v3.Status + (*CheckRateLimitsRequest)(nil), // 3: gubernator.v3.CheckRateLimitsRequest + (*CheckRateLimitsResponse)(nil), // 4: gubernator.v3.CheckRateLimitsResponse + (*RateLimitRequest)(nil), // 5: gubernator.v3.RateLimitRequest + (*RateLimitResponse)(nil), // 6: gubernator.v3.RateLimitResponse + (*HealthCheckRequest)(nil), // 7: gubernator.v3.HealthCheckRequest + (*HealthCheckResponse)(nil), // 8: gubernator.v3.HealthCheckResponse + nil, // 9: gubernator.v3.RateLimitRequest.MetadataEntry + nil, // 10: gubernator.v3.RateLimitResponse.MetadataEntry } var file_gubernator_proto_depIdxs = []int32{ - 5, // 0: pb.gubernator.GetRateLimitsReq.requests:type_name -> pb.gubernator.RateLimitReq - 6, // 1: pb.gubernator.GetRateLimitsResp.responses:type_name -> pb.gubernator.RateLimitResp - 0, // 2: pb.gubernator.RateLimitReq.algorithm:type_name -> pb.gubernator.Algorithm - 1, // 3: pb.gubernator.RateLimitReq.behavior:type_name -> pb.gubernator.Behavior - 9, // 4: pb.gubernator.RateLimitReq.metadata:type_name -> pb.gubernator.RateLimitReq.MetadataEntry - 2, // 5: pb.gubernator.RateLimitResp.status:type_name -> pb.gubernator.Status - 10, // 6: pb.gubernator.RateLimitResp.metadata:type_name -> pb.gubernator.RateLimitResp.MetadataEntry - 3, // 7: pb.gubernator.V1.GetRateLimits:input_type -> pb.gubernator.GetRateLimitsReq - 7, // 8: pb.gubernator.V1.HealthCheck:input_type -> pb.gubernator.HealthCheckReq - 4, // 9: pb.gubernator.V1.GetRateLimits:output_type -> pb.gubernator.GetRateLimitsResp - 8, // 10: pb.gubernator.V1.HealthCheck:output_type -> pb.gubernator.HealthCheckResp - 9, // [9:11] is the sub-list for method output_type - 7, // [7:9] is the sub-list for method input_type + 5, // 0: gubernator.v3.CheckRateLimitsRequest.requests:type_name -> gubernator.v3.RateLimitRequest + 6, // 1: gubernator.v3.CheckRateLimitsResponse.responses:type_name -> gubernator.v3.RateLimitResponse + 0, // 2: gubernator.v3.RateLimitRequest.algorithm:type_name -> gubernator.v3.Algorithm + 1, // 3: gubernator.v3.RateLimitRequest.behavior:type_name -> gubernator.v3.Behavior + 9, // 4: gubernator.v3.RateLimitRequest.metadata:type_name -> gubernator.v3.RateLimitRequest.MetadataEntry + 2, // 5: gubernator.v3.RateLimitResponse.status:type_name -> gubernator.v3.Status + 10, // 6: gubernator.v3.RateLimitResponse.metadata:type_name -> gubernator.v3.RateLimitResponse.MetadataEntry + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type 7, // [7:7] is the sub-list for extension type_name 7, // [7:7] is the sub-list for extension extendee 0, // [0:7] is the sub-list for field type_name @@ -817,7 +790,7 @@ func file_gubernator_proto_init() { } if !protoimpl.UnsafeEnabled { file_gubernator_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRateLimitsReq); i { + switch v := v.(*CheckRateLimitsRequest); i { case 0: return &v.state case 1: @@ -829,7 +802,7 @@ func file_gubernator_proto_init() { } } file_gubernator_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRateLimitsResp); i { + switch v := v.(*CheckRateLimitsResponse); i { case 0: return &v.state case 1: @@ -841,7 +814,7 @@ func file_gubernator_proto_init() { } } file_gubernator_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RateLimitReq); i { + switch v := v.(*RateLimitRequest); i { case 0: return &v.state case 1: @@ -853,7 +826,7 @@ func file_gubernator_proto_init() { } } file_gubernator_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RateLimitResp); i { + switch v := v.(*RateLimitResponse); i { case 0: return &v.state case 1: @@ -865,7 +838,7 @@ func file_gubernator_proto_init() { } } file_gubernator_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckReq); i { + switch v := v.(*HealthCheckRequest); i { case 0: return &v.state case 1: @@ -877,7 +850,7 @@ func file_gubernator_proto_init() { } } file_gubernator_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckResp); i { + switch v := v.(*HealthCheckResponse); i { case 0: return &v.state case 1: @@ -897,7 +870,7 @@ func file_gubernator_proto_init() { NumEnums: 3, NumMessages: 8, NumExtensions: 0, - NumServices: 1, + NumServices: 0, }, GoTypes: file_gubernator_proto_goTypes, DependencyIndexes: file_gubernator_proto_depIdxs, diff --git a/gubernator.pb.gw.go b/gubernator.pb.gw.go deleted file mode 100644 index 1c67924b..00000000 --- a/gubernator.pb.gw.go +++ /dev/null @@ -1,240 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: gubernator.proto - -/* -Package gubernator is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package gubernator - -import ( - "context" - "io" - "net/http" - - "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" - "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = metadata.Join - -func request_V1_GetRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, client V1Client, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetRateLimitsReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetRateLimits(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_V1_GetRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, server V1Server, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetRateLimitsReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetRateLimits(ctx, &protoReq) - return msg, metadata, err - -} - -func request_V1_HealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, client V1Client, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq HealthCheckReq - var metadata runtime.ServerMetadata - - msg, err := client.HealthCheck(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_V1_HealthCheck_0(ctx context.Context, marshaler runtime.Marshaler, server V1Server, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq HealthCheckReq - var metadata runtime.ServerMetadata - - msg, err := server.HealthCheck(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterV1HandlerServer registers the http handlers for service V1 to "mux". -// UnaryRPC :call V1Server directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterV1HandlerFromEndpoint instead. -func RegisterV1HandlerServer(ctx context.Context, mux *runtime.ServeMux, server V1Server) error { - - mux.Handle("POST", pattern_V1_GetRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.gubernator.V1/GetRateLimits", runtime.WithHTTPPathPattern("/v1/GetRateLimits")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_V1_GetRateLimits_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_V1_GetRateLimits_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_V1_HealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.gubernator.V1/HealthCheck", runtime.WithHTTPPathPattern("/v1/HealthCheck")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_V1_HealthCheck_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_V1_HealthCheck_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterV1HandlerFromEndpoint is same as RegisterV1Handler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterV1HandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterV1Handler(ctx, mux, conn) -} - -// RegisterV1Handler registers the http handlers for service V1 to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterV1Handler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterV1HandlerClient(ctx, mux, NewV1Client(conn)) -} - -// RegisterV1HandlerClient registers the http handlers for service V1 -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "V1Client". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "V1Client" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "V1Client" to call the correct interceptors. -func RegisterV1HandlerClient(ctx context.Context, mux *runtime.ServeMux, client V1Client) error { - - mux.Handle("POST", pattern_V1_GetRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.gubernator.V1/GetRateLimits", runtime.WithHTTPPathPattern("/v1/GetRateLimits")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_V1_GetRateLimits_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_V1_GetRateLimits_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_V1_HealthCheck_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.gubernator.V1/HealthCheck", runtime.WithHTTPPathPattern("/v1/HealthCheck")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_V1_HealthCheck_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_V1_HealthCheck_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_V1_GetRateLimits_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "GetRateLimits"}, "")) - - pattern_V1_HealthCheck_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "HealthCheck"}, "")) -) - -var ( - forward_V1_GetRateLimits_0 = runtime.ForwardResponseMessage - - forward_V1_HealthCheck_0 = runtime.ForwardResponseMessage -) diff --git a/gubernator.proto b/gubernator.proto index c8f237e8..3773255b 100644 --- a/gubernator.proto +++ b/gubernator.proto @@ -18,40 +18,16 @@ syntax = "proto3"; option go_package = "github.com/mailgun/gubernator"; -option cc_generic_services = true; +package gubernator.v3; -package pb.gubernator; - -import "google/api/annotations.proto"; - -service V1 { - - // Given a list of rate limit requests, return the rate limits of each. - rpc GetRateLimits (GetRateLimitsReq) returns (GetRateLimitsResp) { - option (google.api.http) = { - post: "/v1/GetRateLimits" - body: "*" - }; - } - - - // This method is for round trip benchmarking and can be used by - // the client to determine connectivity to the server - rpc HealthCheck (HealthCheckReq) returns (HealthCheckResp) { - option (google.api.http) = { - get: "/v1/HealthCheck" - }; - } -} - -// Must specify at least one Request -message GetRateLimitsReq { - repeated RateLimitReq requests = 1; +// Must specify at least one RateLimitRequest +message CheckRateLimitsRequest { + repeated RateLimitRequest requests = 1; } -// RateLimits returned are in the same order as the Requests -message GetRateLimitsResp { - repeated RateLimitResp responses = 1; +// RateLimits returned are in the same order provided in CheckRateLimitsRequest +message CheckRateLimitsResponse { + repeated RateLimitResponse responses = 1; } enum Algorithm { @@ -122,15 +98,10 @@ enum Behavior { // cache value. For `LEAKY_BUCKET` it sets the `Remaining` to `Limit`. RESET_REMAINING = 8; - // Enables rate limits to be pushed to other regions. Currently this is only implemented when using - // 'member-list' peer discovery. Also requires GUBER_DATA_CENTER to be set to different values on at - // least 2 instances of Gubernator. - MULTI_REGION = 16; - // TODO: Add support for LOCAL. Which would force the rate limit to be handled by the local instance } -message RateLimitReq { +message RateLimitRequest { // The name of the rate limit IE: 'requests_per_second', 'gets_per_minute` string name = 1; @@ -171,7 +142,7 @@ enum Status { OVER_LIMIT = 1; } -message RateLimitResp { +message RateLimitResponse { // The status of the rate limit. Status status = 1; // The currently configured request limit (Identical to RateLimitRequest.rate_limit_config.limit). @@ -182,12 +153,12 @@ message RateLimitResp { int64 reset_time = 4; // Contains the error; If set all other values should be ignored string error = 5; - // This is additional metadata that a client might find useful. (IE: Additional headers, corrdinator ownership, etc..) + // This is additional metadata that a client might find useful. (IE: Additional headers, coordinator ownership, etc..) map metadata = 6; } -message HealthCheckReq {} -message HealthCheckResp { +message HealthCheckRequest {} +message HealthCheckResponse { // Valid entries are 'healthy' or 'unhealthy' string status = 1; // If 'unhealthy', message indicates the problem diff --git a/gubernator_grpc.pb.go b/gubernator_grpc.pb.go deleted file mode 100644 index 20b4c442..00000000 --- a/gubernator_grpc.pb.go +++ /dev/null @@ -1,168 +0,0 @@ -// -//Copyright 2018-2022 Mailgun Technologies Inc -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.21.7 -// source: gubernator.proto - -package gubernator - -import ( - context "context" - - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - V1_GetRateLimits_FullMethodName = "/pb.gubernator.V1/GetRateLimits" - V1_HealthCheck_FullMethodName = "/pb.gubernator.V1/HealthCheck" -) - -// V1Client is the client API for V1 service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type V1Client interface { - // Given a list of rate limit requests, return the rate limits of each. - GetRateLimits(ctx context.Context, in *GetRateLimitsReq, opts ...grpc.CallOption) (*GetRateLimitsResp, error) - // This method is for round trip benchmarking and can be used by - // the client to determine connectivity to the server - HealthCheck(ctx context.Context, in *HealthCheckReq, opts ...grpc.CallOption) (*HealthCheckResp, error) -} - -type v1Client struct { - cc grpc.ClientConnInterface -} - -func NewV1Client(cc grpc.ClientConnInterface) V1Client { - return &v1Client{cc} -} - -func (c *v1Client) GetRateLimits(ctx context.Context, in *GetRateLimitsReq, opts ...grpc.CallOption) (*GetRateLimitsResp, error) { - out := new(GetRateLimitsResp) - err := c.cc.Invoke(ctx, V1_GetRateLimits_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *v1Client) HealthCheck(ctx context.Context, in *HealthCheckReq, opts ...grpc.CallOption) (*HealthCheckResp, error) { - out := new(HealthCheckResp) - err := c.cc.Invoke(ctx, V1_HealthCheck_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// V1Server is the server API for V1 service. -// All implementations must embed UnimplementedV1Server -// for forward compatibility -type V1Server interface { - // Given a list of rate limit requests, return the rate limits of each. - GetRateLimits(context.Context, *GetRateLimitsReq) (*GetRateLimitsResp, error) - // This method is for round trip benchmarking and can be used by - // the client to determine connectivity to the server - HealthCheck(context.Context, *HealthCheckReq) (*HealthCheckResp, error) - mustEmbedUnimplementedV1Server() -} - -// UnimplementedV1Server must be embedded to have forward compatible implementations. -type UnimplementedV1Server struct { -} - -func (UnimplementedV1Server) GetRateLimits(context.Context, *GetRateLimitsReq) (*GetRateLimitsResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetRateLimits not implemented") -} -func (UnimplementedV1Server) HealthCheck(context.Context, *HealthCheckReq) (*HealthCheckResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method HealthCheck not implemented") -} -func (UnimplementedV1Server) mustEmbedUnimplementedV1Server() {} - -// UnsafeV1Server may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to V1Server will -// result in compilation errors. -type UnsafeV1Server interface { - mustEmbedUnimplementedV1Server() -} - -func RegisterV1Server(s grpc.ServiceRegistrar, srv V1Server) { - s.RegisterService(&V1_ServiceDesc, srv) -} - -func _V1_GetRateLimits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetRateLimitsReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(V1Server).GetRateLimits(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: V1_GetRateLimits_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(V1Server).GetRateLimits(ctx, req.(*GetRateLimitsReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _V1_HealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HealthCheckReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(V1Server).HealthCheck(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: V1_HealthCheck_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(V1Server).HealthCheck(ctx, req.(*HealthCheckReq)) - } - return interceptor(ctx, in, info, handler) -} - -// V1_ServiceDesc is the grpc.ServiceDesc for V1 service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var V1_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "pb.gubernator.V1", - HandlerType: (*V1Server)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetRateLimits", - Handler: _V1_GetRateLimits_Handler, - }, - { - MethodName: "HealthCheck", - Handler: _V1_HealthCheck_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "gubernator.proto", -} diff --git a/handler.go b/handler.go new file mode 100644 index 00000000..f2c9cc7c --- /dev/null +++ b/handler.go @@ -0,0 +1,168 @@ +package gubernator + +import ( + "context" + "fmt" + "net/http" + + "github.com/duh-rpc/duh-go" + v1 "github.com/duh-rpc/duh-go/proto/v1" + "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel/propagation" +) + +const ( + RPCPeerForward = "/v1/peer.forward" + RPCPeerUpdate = "/v1/peer.update" + RPCRateLimitCheck = "/v1/rate-limit.check" + RPCHealthCheck = "/v1/health.check" +) + +type Handler struct { + prop propagation.TraceContext + duration *prometheus.SummaryVec + metrics http.Handler + service *Service +} + +func NewHandler(s *Service, metrics http.Handler) *Handler { + return &Handler{ + duration: prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Name: "gubernator_http_handler_duration", + Help: "The timings of http requests handled by the service", + Objectives: map[float64]float64{ + 0.5: 0.05, + 0.99: 0.001, + }, + }, []string{"path"}), + metrics: metrics, + service: s, + } +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + defer prometheus.NewTimer(h.duration.WithLabelValues(r.URL.Path)).ObserveDuration() + ctx := h.prop.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + + switch r.URL.Path { + case RPCPeerForward: + h.PeerForward(ctx, w, r) + return + case RPCPeerUpdate: + h.PeerUpdate(ctx, w, r) + return + case RPCRateLimitCheck: + h.CheckRateLimit(ctx, w, r) + return + case RPCHealthCheck: + h.HealthCheck(w, r) + return + case "/metrics": + h.metrics.ServeHTTP(w, r) + return + case "/healthz": + h.HealthZ(w, r) + return + } + duh.ReplyWithCode(w, r, duh.CodeNotImplemented, nil, "no such method; "+r.URL.Path) +} + +func (h *Handler) PeerForward(ctx context.Context, w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + duh.ReplyWithCode(w, r, duh.CodeBadRequest, nil, + fmt.Sprintf("http method '%s' not allowed; only POST", r.Method)) + return + } + + var req ForwardRequest + if err := duh.ReadRequest(r, &req); err != nil { + duh.ReplyError(w, r, err) + return + } + var resp ForwardResponse + if err := h.service.Forward(ctx, &req, &resp); err != nil { + duh.ReplyError(w, r, err) + return + } + duh.Reply(w, r, duh.CodeOK, &resp) +} + +func (h *Handler) PeerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + duh.ReplyWithCode(w, r, duh.CodeBadRequest, nil, + fmt.Sprintf("http method '%s' not allowed; only POST", r.Method)) + return + } + + var req UpdateRequest + if err := duh.ReadRequest(r, &req); err != nil { + duh.ReplyError(w, r, err) + return + } + var resp v1.Reply + if err := h.service.Update(ctx, &req, &resp); err != nil { + duh.ReplyError(w, r, err) + return + } + duh.Reply(w, r, duh.CodeOK, &resp) +} + +func (h *Handler) CheckRateLimit(ctx context.Context, w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + duh.ReplyWithCode(w, r, duh.CodeBadRequest, nil, + fmt.Sprintf("http method '%s' not allowed; only POST", r.Method)) + return + } + + var req CheckRateLimitsRequest + if err := duh.ReadRequest(r, &req); err != nil { + duh.ReplyError(w, r, err) + return + } + + var resp CheckRateLimitsResponse + if err := h.service.CheckRateLimits(ctx, &req, &resp); err != nil { + duh.ReplyError(w, r, err) + return + } + duh.Reply(w, r, duh.CodeOK, &resp) +} + +func (h *Handler) HealthCheck(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + duh.ReplyWithCode(w, r, duh.CodeBadRequest, nil, + fmt.Sprintf("http method '%s' not allowed; only POST", r.Method)) + return + } + + var req HealthCheckRequest + if err := duh.ReadRequest(r, &req); err != nil { + duh.ReplyError(w, r, err) + return + } + var resp HealthCheckResponse + if err := h.service.HealthCheck(r.Context(), &req, &resp); err != nil { + duh.ReplyError(w, r, err) + return + } + duh.Reply(w, r, duh.CodeOK, &resp) +} + +func (h *Handler) HealthZ(w http.ResponseWriter, r *http.Request) { + var resp HealthCheckResponse + if err := h.service.HealthCheck(r.Context(), nil, &resp); err != nil { + duh.ReplyError(w, r, err) + return + } + duh.Reply(w, r, duh.CodeOK, &resp) +} + +// Describe fetches prometheus metrics to be registered +func (h *Handler) Describe(ch chan<- *prometheus.Desc) { + h.duration.Describe(ch) +} + +// Collect fetches metrics from the server for use by prometheus +func (h *Handler) Collect(ch chan<- prometheus.Metric) { + h.duration.Collect(ch) +} diff --git a/interval_test.go b/interval_test.go index 89642c3e..7e8ecaba 100644 --- a/interval_test.go +++ b/interval_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - gubernator "github.com/mailgun/gubernator/v2" + "github.com/mailgun/gubernator/v3" "github.com/mailgun/holster/v4/clock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/kubernetes.go b/kubernetes.go index e25ebf7f..818e116e 100644 --- a/kubernetes.go +++ b/kubernetes.go @@ -195,7 +195,7 @@ main: e.log.Errorf("expected type v1.Endpoints got '%s' instead", reflect.TypeOf(obj).String()) } - peer := PeerInfo{GRPCAddress: fmt.Sprintf("%s:%s", pod.Status.PodIP, e.conf.PodPort)} + peer := PeerInfo{HTTPAddress: fmt.Sprintf("%s:%s", pod.Status.PodIP, e.conf.PodPort)} // if containers are not ready or not running then skip this peer for _, status := range pod.Status.ContainerStatuses { @@ -228,7 +228,7 @@ func (e *K8sPool) updatePeersFromEndpoints() { // TODO(thrawn01): Might consider using the `namespace` as the `DataCenter`. We should // do what ever k8s convention is for identifying a k8s cluster within a federated multi-data // center setup. - peer := PeerInfo{GRPCAddress: fmt.Sprintf("%s:%s", addr.IP, e.conf.PodPort)} + peer := PeerInfo{HTTPAddress: fmt.Sprintf("%s:%s", addr.IP, e.conf.PodPort)} if addr.IP == e.conf.PodIP { peer.IsOwner = true diff --git a/lrucache_test.go b/lrucache_test.go index 9609cc37..d89dfdb6 100644 --- a/lrucache_test.go +++ b/lrucache_test.go @@ -24,7 +24,7 @@ import ( "testing" "time" - gubernator "github.com/mailgun/gubernator/v2" + "github.com/mailgun/gubernator/v3" "github.com/mailgun/holster/v4/clock" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" diff --git a/memberlist.go b/memberlist.go index 1d65434d..c9e2e16a 100644 --- a/memberlist.go +++ b/memberlist.go @@ -224,7 +224,7 @@ func (e *memberListEventHandler) callOnUpdate() { var peers []PeerInfo for _, p := range e.peers { - if p.GRPCAddress == e.conf.Advertise.GRPCAddress { + if p.HTTPAddress == e.conf.Advertise.HTTPAddress { p.IsOwner = true } peers = append(peers, p) @@ -260,7 +260,7 @@ func unmarshallPeer(b []byte, ip string) (PeerInfo, error) { if metadata.AdvertiseAddress == "" { metadata.AdvertiseAddress = makeAddress(ip, metadata.GubernatorPort) } - return PeerInfo{GRPCAddress: metadata.AdvertiseAddress, DataCenter: metadata.DataCenter}, nil + return PeerInfo{HTTPAddress: metadata.AdvertiseAddress, DataCenter: metadata.DataCenter}, nil } return peer, nil } diff --git a/metadata_carrier_test.go b/metadata_carrier_test.go index 48f565b6..3ef33b63 100644 --- a/metadata_carrier_test.go +++ b/metadata_carrier_test.go @@ -19,7 +19,7 @@ package gubernator_test import ( "testing" - gubernator "github.com/mailgun/gubernator/v2" + "github.com/mailgun/gubernator/v3" "github.com/stretchr/testify/assert" ) diff --git a/mock_cache_test.go b/mock_cache_test.go index d2e02e66..3cd58a28 100644 --- a/mock_cache_test.go +++ b/mock_cache_test.go @@ -19,7 +19,7 @@ package gubernator_test // Mock implementation of Cache. import ( - guber "github.com/mailgun/gubernator/v2" + guber "github.com/mailgun/gubernator/v3" "github.com/stretchr/testify/mock" ) diff --git a/mock_loader_test.go b/mock_loader_test.go index 3a52c686..268b66f7 100644 --- a/mock_loader_test.go +++ b/mock_loader_test.go @@ -19,7 +19,7 @@ package gubernator_test // Mock implementation of Loader. import ( - guber "github.com/mailgun/gubernator/v2" + guber "github.com/mailgun/gubernator/v3" "github.com/stretchr/testify/mock" ) diff --git a/mock_store_test.go b/mock_store_test.go index 8d3d1bdd..206a6800 100644 --- a/mock_store_test.go +++ b/mock_store_test.go @@ -21,7 +21,7 @@ package gubernator_test import ( "context" - guber "github.com/mailgun/gubernator/v2" + guber "github.com/mailgun/gubernator/v3" "github.com/stretchr/testify/mock" ) @@ -31,11 +31,11 @@ type MockStore2 struct { var _ guber.Store = &MockStore2{} -func (m *MockStore2) OnChange(ctx context.Context, r *guber.RateLimitReq, item *guber.CacheItem) { +func (m *MockStore2) OnChange(ctx context.Context, r *guber.RateLimitRequest, item *guber.CacheItem) { m.Called(ctx, r, item) } -func (m *MockStore2) Get(ctx context.Context, r *guber.RateLimitReq) (*guber.CacheItem, bool) { +func (m *MockStore2) Get(ctx context.Context, r *guber.RateLimitRequest) (*guber.CacheItem, bool) { args := m.Called(ctx, r) retval, _ := args.Get(0).(*guber.CacheItem) return retval, args.Bool(1) diff --git a/peer.go b/peer.go new file mode 100644 index 00000000..ffb8855e --- /dev/null +++ b/peer.go @@ -0,0 +1,441 @@ +/* +Copyright 2018-2023 Mailgun Technologies Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gubernator + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "github.com/mailgun/errors" + "github.com/mailgun/holster/v4/clock" + "github.com/mailgun/holster/v4/collections" + "github.com/mailgun/holster/v4/ctxutil" + "github.com/mailgun/holster/v4/setter" + "github.com/mailgun/holster/v4/tracing" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type Peer struct { + lastErrs *collections.LRUCache + wg sync.WaitGroup + queue chan *request + mutex sync.RWMutex + client PeerClient + conf PeerConfig + inShutdown int64 +} + +type response struct { + rl *RateLimitResponse + err error +} + +type request struct { + request *RateLimitRequest + resp chan *response + ctx context.Context +} + +type PeerConfig struct { + PeerClient PeerClient + Behavior BehaviorConfig + Info PeerInfo + Log FieldLogger +} + +type PeerClient interface { + Forward(context.Context, *ForwardRequest, *ForwardResponse) error + Update(context.Context, *UpdateRequest) error +} + +func NewPeer(conf PeerConfig) (*Peer, error) { + if len(conf.Info.HTTPAddress) == 0 { + return nil, errors.New("Peer.Info.HTTPAddress is empty; must provide an address") + } + + setter.SetDefault(&conf.PeerClient, NewPeerClient(WithNoTLS(conf.Info.HTTPAddress))) + setter.SetDefault(&conf.Log, logrus.WithField("category", "Peer")) + + p := &Peer{ + lastErrs: collections.NewLRUCache(100), + queue: make(chan *request, 1000), + client: conf.PeerClient, + conf: conf, + } + go p.run() + return p, nil +} + +// Info returns PeerInfo struct that describes this Peer +func (p *Peer) Info() PeerInfo { + return p.conf.Info +} + +var ( + // TODO: Should retry in this case + ErrPeerShutdown = errors.New("peer is shutdown; try a different peer") +) + +// Forward forwards a rate limit request to a peer. +// If the rate limit has `behavior == BATCHING` configured, this method will attempt to batch the rate limits +func (p *Peer) Forward(ctx context.Context, r *RateLimitRequest) (resp *RateLimitResponse, err error) { + ctx = tracing.StartNamedScope(ctx, "Peer.Forward") + defer func() { tracing.EndScope(ctx, err) }() + span := trace.SpanFromContext(ctx) + span.SetAttributes( + attribute.String("peer.HTTPAddress", p.conf.Info.HTTPAddress), + attribute.String("peer.Datacenter", p.conf.Info.DataCenter), + attribute.String("request.key", r.UniqueKey), + attribute.String("request.name", r.Name), + attribute.Int64("request.algorithm", int64(r.Algorithm)), + attribute.Int64("request.behavior", int64(r.Behavior)), + attribute.Int64("request.duration", r.Duration), + attribute.Int64("request.limit", r.Limit), + attribute.Int64("request.hits", r.Hits), + attribute.Int64("request.burst", r.Burst), + ) + + if atomic.LoadInt64(&p.inShutdown) == 1 { + return nil, ErrPeerShutdown + } + + // NOTE: Add() must be done within the RLock since we must ensure all in-flight Forward() + // requests are done before calls to Close() can complete. We can't just wg.Wait() for + // since there may be Forward() call that is executing at this very code spot when Close() + // is called. In that scenario wg.Add() and wg.Wait() are in a race. + p.mutex.RLock() + p.wg.Add(1) + defer func() { + p.mutex.RUnlock() + defer p.wg.Done() + }() + + // If config asked for no batching + if HasBehavior(r.Behavior, Behavior_NO_BATCHING) { + // If no metadata is provided + if r.Metadata == nil { + r.Metadata = make(map[string]string) + } + // Propagate the trace context along with the rate limit so + // peers can continue to report traces for this rate limit. + prop := propagation.TraceContext{} + prop.Inject(ctx, &MetadataCarrier{Map: r.Metadata}) + + // Forward a single rate limit + var fr ForwardResponse + err = p.ForwardBatch(ctx, &ForwardRequest{ + Requests: []*RateLimitRequest{r}, + }, &fr) + if err != nil { + err = errors.Wrap(err, "Error in forward") + return nil, p.setLastErr(err) + } + return fr.RateLimits[0], nil + } + + resp, err = p.forwardBatch(ctx, r) + if err != nil { + err = errors.Wrap(err, "Error in forwardBatch") + return nil, p.setLastErr(err) + } + + return resp, nil +} + +// ForwardBatch requests a list of rate limit statuses from a peer +func (p *Peer) ForwardBatch(ctx context.Context, req *ForwardRequest, resp *ForwardResponse) (err error) { + ctx = tracing.StartNamedScopeDebug(ctx, "Peer.forward") + defer func() { tracing.EndScope(ctx, err) }() + + if err = p.client.Forward(ctx, req, resp); err != nil { + return p.setLastErr(errors.Wrap(err, "Error in client.Forward()")) + } + + // Unlikely, but this avoids a panic if something wonky happens + if len(resp.RateLimits) != len(req.Requests) { + return p.setLastErr( + errors.New("number of rate limits in peer response does not match request")) + } + return nil +} + +// Update sends rate limit status updates to a peer +func (p *Peer) Update(ctx context.Context, req *UpdateRequest) (err error) { + ctx = tracing.StartNamedScope(ctx, "Peer.Update") + defer func() { tracing.EndScope(ctx, err) }() + + err = p.client.Update(ctx, req) + if err != nil { + _ = p.setLastErr(err) + } + return err +} + +func (p *Peer) GetLastErr() []string { + var errs []string + keys := p.lastErrs.Keys() + + // Get errors from each key in the cache + for _, key := range keys { + err, ok := p.lastErrs.Get(key) + if ok { + errs = append(errs, err.(error).Error()) + } + } + + return errs +} + +// Close will gracefully close all client connections, until the context is canceled +func (p *Peer) Close(ctx context.Context) error { + if atomic.LoadInt64(&p.inShutdown) == 1 { + return nil + } + + atomic.AddInt64(&p.inShutdown, 1) + + // This allows us to wait on the wait group, or until the context + // has been canceled. + waitChan := make(chan struct{}) + go func() { + p.mutex.Lock() + p.wg.Wait() + close(p.queue) + p.mutex.Unlock() + close(waitChan) + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-waitChan: + return nil + } +} + +func (p *Peer) forwardBatch(ctx context.Context, r *RateLimitRequest) (resp *RateLimitResponse, err error) { + ctx = tracing.StartNamedScopeDebug(ctx, "Peer.forwardBatch") + defer func() { tracing.EndScope(ctx, err) }() + + funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Peer.forwardBatch")) + defer funcTimer.ObserveDuration() + + if atomic.LoadInt64(&p.inShutdown) == 1 { + return nil, p.setLastErr(&ErrNotReady{err: errors.New("already disconnecting")}) + } + + // Wait for a response or context cancel + ctx2 := tracing.StartNamedScopeDebug(ctx, "Wait for response") + defer tracing.EndScope(ctx2, nil) + + req := request{ + resp: make(chan *response, 1), + ctx: ctx2, + request: r, + } + + // Enqueue the request to be sent + peerAddr := p.Info().HTTPAddress + metricBatchQueueLength.WithLabelValues(peerAddr).Set(float64(len(p.queue))) + + select { + case p.queue <- &req: + // Successfully enqueued request. + case <-ctx2.Done(): + return nil, errors.Wrap(ctx2.Err(), "Context error while enqueuing request") + } + + p.wg.Add(1) + defer func() { + p.wg.Done() + }() + + select { + case re := <-req.resp: + if re.err != nil { + err := errors.Wrap(p.setLastErr(re.err), "Request error") + return nil, p.setLastErr(err) + } + return re.rl, nil + case <-ctx2.Done(): + return nil, errors.Wrap(ctx2.Err(), "Context error while waiting for response") + } +} + +// run waits for requests to be queued, when either c.batchWait time +// has elapsed or the queue reaches c.batchLimit. Send what is in the queue. +func (p *Peer) run() { + var interval = NewInterval(p.conf.Behavior.BatchWait) + defer interval.Stop() + + var queue []*request + + for { + select { + case r, ok := <-p.queue: + // If the queue has closed, we need to send the rest of the queue + if !ok { + if len(queue) > 0 { + p.sendBatch(queue) + } + return + } + + queue = append(queue, r) + // Send the queue if we reached our batch limit + if len(queue) >= p.conf.Behavior.BatchLimit { + p.conf.Log.WithFields(logrus.Fields{ + "queueLen": len(queue), + "batchLimit": p.conf.Behavior.BatchLimit, + }).Debug("run() reached batch limit") + ref := queue + queue = nil + go p.sendBatch(ref) + continue + } + + // If this is our first enqueued item since last + // sendBatch, reset interval timer. + if len(queue) == 1 { + interval.Next() + } + continue + + case <-interval.C: + queue2 := queue + + if len(queue2) > 0 { + queue = nil + go p.sendBatch(queue2) + } + } + } +} + +// sendBatch sends the queue provided and returns the responses to +// waiting go routines +func (p *Peer) sendBatch(queue []*request) { + ctx := tracing.StartNamedScopeDebug(context.Background(), "Peer.sendBatch") + defer tracing.EndScope(ctx, nil) + + batchSendTimer := prometheus.NewTimer(metricBatchSendDuration.WithLabelValues(p.conf.Info.HTTPAddress)) + defer batchSendTimer.ObserveDuration() + funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Peer.sendBatch")) + defer funcTimer.ObserveDuration() + + var req ForwardRequest + for _, r := range queue { + // NOTE: This trace has the same name because it's in a separate trace than the one above. + // We link the two traces, so we can relate our rate limit trace back to the above trace. + r.ctx = tracing.StartNamedScopeDebug(r.ctx, "Peer.sendBatch", + trace.WithLinks(trace.LinkFromContext(ctx))) + // If no metadata is provided + if r.request.Metadata == nil { + r.request.Metadata = make(map[string]string) + } + // Propagate the trace context along with the batched rate limit so + // peers can continue to report traces for this rate limit. + prop := propagation.TraceContext{} + prop.Inject(r.ctx, &MetadataCarrier{Map: r.request.Metadata}) + req.Requests = append(req.Requests, r.request) + tracing.EndScope(r.ctx, nil) + + } + + ctx, cancel := ctxutil.WithTimeout(ctx, p.conf.Behavior.BatchTimeout) + var resp ForwardResponse + err := p.client.Forward(ctx, &req, &resp) + cancel() + + // An error here indicates the entire request failed + if err != nil { + err = errors.Wrap(err, "Error in client.forward") + p.conf.Log.WithFields(logrus.Fields{ + "batchTimeout": p.conf.Behavior.BatchTimeout.String(), + "queueLen": len(queue), + "error": err, + }).Error("Error in client.forward") + _ = p.setLastErr(err) + + for _, r := range queue { + r.resp <- &response{err: err} + } + return + } + + // Unlikely, but this avoids a panic if something wonky happens + if len(resp.RateLimits) != len(queue) { + for _, r := range queue { + r.resp <- &response{err: errors.New("server responded with incorrect rate limit list size")} + } + return + } + + // Provide responses to channels waiting in the queue + for i, r := range queue { + r.resp <- &response{rl: resp.RateLimits[i]} + } +} + +func (p *Peer) setLastErr(err error) error { + // If we get a nil error return without caching it + if err == nil { + return err + } + + // Add error to the cache with a TTL of 5 minutes + p.lastErrs.AddWithTTL(err.Error(), + errors.Wrap(err, fmt.Sprintf("from host %s", p.conf.Info.HTTPAddress)), + clock.Minute*5) + + return err +} + +// TODO: Replace this with modern error handling + +// ErrNotReady is returned if the peer is not connected or is in a closing state +type ErrNotReady struct { + err error +} + +func (p *ErrNotReady) NotReady() bool { + return true +} + +func (p *ErrNotReady) Error() string { + return p.err.Error() +} + +func (p *ErrNotReady) Cause() error { + return p.err +} + +type notReadyErr interface { + NotReady() bool +} + +// IsNotReady returns true if the err is because the peer is not connected or in a closing state +func IsNotReady(err error) bool { + te, ok := err.(notReadyErr) + return ok && te.NotReady() +} diff --git a/peer.pb.go b/peer.pb.go new file mode 100644 index 00000000..6e2be138 --- /dev/null +++ b/peer.pb.go @@ -0,0 +1,391 @@ +// +//Copyright 2018-2022 Mailgun Technologies Inc +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: peer.proto + +package gubernator + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ForwardRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Must specify at least one RateLimit. The peer that receives this request MUST be authoritative for + // each rate_limit[x].unique_key provided, as the peer will not forward the request to any other peers + Requests []*RateLimitRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` +} + +func (x *ForwardRequest) Reset() { + *x = ForwardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_peer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ForwardRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForwardRequest) ProtoMessage() {} + +func (x *ForwardRequest) ProtoReflect() protoreflect.Message { + mi := &file_peer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForwardRequest.ProtoReflect.Descriptor instead. +func (*ForwardRequest) Descriptor() ([]byte, []int) { + return file_peer_proto_rawDescGZIP(), []int{0} +} + +func (x *ForwardRequest) GetRequests() []*RateLimitRequest { + if x != nil { + return x.Requests + } + return nil +} + +type ForwardResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Responses are in the same order as they appeared in the PeerRateLimitRequests + RateLimits []*RateLimitResponse `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` +} + +func (x *ForwardResponse) Reset() { + *x = ForwardResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_peer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ForwardResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForwardResponse) ProtoMessage() {} + +func (x *ForwardResponse) ProtoReflect() protoreflect.Message { + mi := &file_peer_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForwardResponse.ProtoReflect.Descriptor instead. +func (*ForwardResponse) Descriptor() ([]byte, []int) { + return file_peer_proto_rawDescGZIP(), []int{1} +} + +func (x *ForwardResponse) GetRateLimits() []*RateLimitResponse { + if x != nil { + return x.RateLimits + } + return nil +} + +type UpdateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Must specify at least one RateLimit + Globals []*UpdateRateLimit `protobuf:"bytes,1,rep,name=globals,proto3" json:"globals,omitempty"` +} + +func (x *UpdateRequest) Reset() { + *x = UpdateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_peer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateRequest) ProtoMessage() {} + +func (x *UpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_peer_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateRequest.ProtoReflect.Descriptor instead. +func (*UpdateRequest) Descriptor() ([]byte, []int) { + return file_peer_proto_rawDescGZIP(), []int{2} +} + +func (x *UpdateRequest) GetGlobals() []*UpdateRateLimit { + if x != nil { + return x.Globals + } + return nil +} + +type UpdateRateLimit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Update *RateLimitResponse `protobuf:"bytes,2,opt,name=update,proto3" json:"update,omitempty"` + Algorithm Algorithm `protobuf:"varint,3,opt,name=algorithm,proto3,enum=gubernator.v3.Algorithm" json:"algorithm,omitempty"` +} + +func (x *UpdateRateLimit) Reset() { + *x = UpdateRateLimit{} + if protoimpl.UnsafeEnabled { + mi := &file_peer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateRateLimit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateRateLimit) ProtoMessage() {} + +func (x *UpdateRateLimit) ProtoReflect() protoreflect.Message { + mi := &file_peer_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateRateLimit.ProtoReflect.Descriptor instead. +func (*UpdateRateLimit) Descriptor() ([]byte, []int) { + return file_peer_proto_rawDescGZIP(), []int{3} +} + +func (x *UpdateRateLimit) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *UpdateRateLimit) GetUpdate() *RateLimitResponse { + if x != nil { + return x.Update + } + return nil +} + +func (x *UpdateRateLimit) GetAlgorithm() Algorithm { + if x != nil { + return x.Algorithm + } + return Algorithm_TOKEN_BUCKET +} + +var File_peer_proto protoreflect.FileDescriptor + +var file_peer_proto_rawDesc = []byte{ + 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x75, + 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x10, 0x67, 0x75, 0x62, + 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4d, 0x0a, + 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x54, 0x0a, 0x0f, + 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x73, 0x22, 0x49, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x07, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x52, 0x07, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x22, 0x95, 0x01, + 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, + 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, 0x22, 0x5a, 0x1d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x61, 0x69, 0x6c, 0x67, 0x75, 0x6e, 0x2f, 0x67, 0x75, 0x62, 0x65, + 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x80, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_peer_proto_rawDescOnce sync.Once + file_peer_proto_rawDescData = file_peer_proto_rawDesc +) + +func file_peer_proto_rawDescGZIP() []byte { + file_peer_proto_rawDescOnce.Do(func() { + file_peer_proto_rawDescData = protoimpl.X.CompressGZIP(file_peer_proto_rawDescData) + }) + return file_peer_proto_rawDescData +} + +var file_peer_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_peer_proto_goTypes = []interface{}{ + (*ForwardRequest)(nil), // 0: gubernator.v3.ForwardRequest + (*ForwardResponse)(nil), // 1: gubernator.v3.ForwardResponse + (*UpdateRequest)(nil), // 2: gubernator.v3.UpdateRequest + (*UpdateRateLimit)(nil), // 3: gubernator.v3.UpdateRateLimit + (*RateLimitRequest)(nil), // 4: gubernator.v3.RateLimitRequest + (*RateLimitResponse)(nil), // 5: gubernator.v3.RateLimitResponse + (Algorithm)(0), // 6: gubernator.v3.Algorithm +} +var file_peer_proto_depIdxs = []int32{ + 4, // 0: gubernator.v3.ForwardRequest.requests:type_name -> gubernator.v3.RateLimitRequest + 5, // 1: gubernator.v3.ForwardResponse.rate_limits:type_name -> gubernator.v3.RateLimitResponse + 3, // 2: gubernator.v3.UpdateRequest.globals:type_name -> gubernator.v3.UpdateRateLimit + 5, // 3: gubernator.v3.UpdateRateLimit.update:type_name -> gubernator.v3.RateLimitResponse + 6, // 4: gubernator.v3.UpdateRateLimit.algorithm:type_name -> gubernator.v3.Algorithm + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_peer_proto_init() } +func file_peer_proto_init() { + if File_peer_proto != nil { + return + } + file_gubernator_proto_init() + if !protoimpl.UnsafeEnabled { + file_peer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForwardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_peer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForwardResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_peer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_peer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateRateLimit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_peer_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_peer_proto_goTypes, + DependencyIndexes: file_peer_proto_depIdxs, + MessageInfos: file_peer_proto_msgTypes, + }.Build() + File_peer_proto = out.File + file_peer_proto_rawDesc = nil + file_peer_proto_goTypes = nil + file_peer_proto_depIdxs = nil +} diff --git a/peers.proto b/peer.proto similarity index 55% rename from peers.proto rename to peer.proto index 5caefae4..75862b17 100644 --- a/peers.proto +++ b/peer.proto @@ -20,38 +20,28 @@ option go_package = "github.com/mailgun/gubernator"; option cc_generic_services = true; -package pb.gubernator; +package gubernator.v3; import "gubernator.proto"; -// NOTE: For use by gubernator peers only -service PeersV1 { - // Used by peers to relay batches of requests to an authoritative peer - rpc GetPeerRateLimits (GetPeerRateLimitsReq) returns (GetPeerRateLimitsResp) {} - - // Used by peers send global rate limit updates to other peers - rpc UpdatePeerGlobals (UpdatePeerGlobalsReq) returns (UpdatePeerGlobalsResp) {} -} - -message GetPeerRateLimitsReq { - // Must specify at least one RateLimit. The peer that recives this request MUST be authoritative for +message ForwardRequest { + // Must specify at least one RateLimit. The peer that receives this request MUST be authoritative for // each rate_limit[x].unique_key provided, as the peer will not forward the request to any other peers - repeated RateLimitReq requests = 1; + repeated RateLimitRequest requests = 1; } -message GetPeerRateLimitsResp { +message ForwardResponse { // Responses are in the same order as they appeared in the PeerRateLimitRequests - repeated RateLimitResp rate_limits = 1; + repeated RateLimitResponse rate_limits = 1; } -message UpdatePeerGlobalsReq { +message UpdateRequest { // Must specify at least one RateLimit - repeated UpdatePeerGlobal globals = 1; + repeated UpdateRateLimit globals = 1; } -message UpdatePeerGlobal { +message UpdateRateLimit { string key = 1; - RateLimitResp status = 2; + RateLimitResponse update = 2; Algorithm algorithm = 3; } -message UpdatePeerGlobalsResp {} diff --git a/peer_client.go b/peer_client.go deleted file mode 100644 index a39d9f02..00000000 --- a/peer_client.go +++ /dev/null @@ -1,534 +0,0 @@ -/* -Copyright 2018-2022 Mailgun Technologies Inc - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gubernator - -import ( - "context" - "crypto/tls" - "fmt" - "sync" - - "github.com/mailgun/holster/v4/clock" - "github.com/mailgun/holster/v4/collections" - "github.com/mailgun/holster/v4/errors" - "github.com/mailgun/holster/v4/tracing" - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" -) - -type PeerPicker interface { - GetByPeerInfo(PeerInfo) *PeerClient - Peers() []*PeerClient - Get(string) (*PeerClient, error) - New() PeerPicker - Add(*PeerClient) -} - -type peerStatus int - -const ( - peerNotConnected peerStatus = iota - peerConnected - peerClosing -) - -type PeerClient struct { - client PeersV1Client - conn *grpc.ClientConn - conf PeerConfig - queue chan *request - lastErrs *collections.LRUCache - - mutex sync.RWMutex // This mutex is for verifying the closing state of the client - status peerStatus // Keep the current status of the peer - wg sync.WaitGroup // This wait group is to monitor the number of in-flight requests -} - -type response struct { - rl *RateLimitResp - err error -} - -type request struct { - request *RateLimitReq - resp chan *response - ctx context.Context -} - -type PeerConfig struct { - TLS *tls.Config - Behavior BehaviorConfig - Info PeerInfo - Log FieldLogger - TraceGRPC bool -} - -func NewPeerClient(conf PeerConfig) *PeerClient { - return &PeerClient{ - queue: make(chan *request, 1000), - status: peerNotConnected, - conf: conf, - lastErrs: collections.NewLRUCache(100), - } -} - -// Connect establishes a GRPC connection to a peer -func (c *PeerClient) connect(ctx context.Context) (err error) { - // NOTE: To future self, this mutex is used here because we need to know if the peer is disconnecting and - // handle ErrClosing. Since this mutex MUST be here we take this opportunity to also see if we are connected. - // Doing this here encapsulates managing the connected state to the PeerClient struct. Previously a PeerClient - // was connected when `NewPeerClient()` was called however, when adding support for multi data centers having a - // PeerClient connected to every Peer in every data center continuously is not desirable. - - funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("PeerClient.connect")) - defer funcTimer.ObserveDuration() - lockTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("PeerClient.connect_RLock")) - - c.mutex.RLock() - lockTimer.ObserveDuration() - - if c.status == peerClosing { - c.mutex.RUnlock() - return &PeerErr{err: errors.New("already disconnecting")} - } - - if c.status == peerNotConnected { - // This mutex stuff looks wonky, but it allows us to use RLock() 99% of the time, while the 1% where we - // actually need to connect uses a full Lock(), using RLock() most of which should reduce the over head - // of a full lock on every call - - // Yield the read lock so we can get the RW lock - c.mutex.RUnlock() - c.mutex.Lock() - defer c.mutex.Unlock() - - // Now that we have the RW lock, ensure no else got here ahead of us. - if c.status == peerConnected { - return nil - } - - // Setup OpenTelemetry interceptor to propagate spans. - var opts []grpc.DialOption - - if c.conf.TraceGRPC { - opts = []grpc.DialOption{ - grpc.WithStatsHandler(otelgrpc.NewClientHandler()), - } - } - - if c.conf.TLS != nil { - opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(c.conf.TLS))) - } else { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - var err error - c.conn, err = grpc.Dial(c.conf.Info.GRPCAddress, opts...) - if err != nil { - return c.setLastErr(&PeerErr{err: errors.Wrapf(err, "failed to dial peer %s", c.conf.Info.GRPCAddress)}) - } - c.client = NewPeersV1Client(c.conn) - c.status = peerConnected - - if !c.conf.Behavior.DisableBatching { - go c.runBatch() - } - return nil - } - c.mutex.RUnlock() - return nil -} - -// Info returns PeerInfo struct that describes this PeerClient -func (c *PeerClient) Info() PeerInfo { - return c.conf.Info -} - -// GetPeerRateLimit forwards a rate limit request to a peer. If the rate limit has `behavior == BATCHING` configured, -// this method will attempt to batch the rate limits -func (c *PeerClient) GetPeerRateLimit(ctx context.Context, r *RateLimitReq) (resp *RateLimitResp, err error) { - span := trace.SpanFromContext(ctx) - span.SetAttributes( - attribute.String("ratelimit.key", r.UniqueKey), - attribute.String("ratelimit.name", r.Name), - ) - - // If config asked for no batching - if c.conf.Behavior.DisableBatching || HasBehavior(r.Behavior, Behavior_NO_BATCHING) { - // If no metadata is provided - if r.Metadata == nil { - r.Metadata = make(map[string]string) - } - // Propagate the trace context along with the rate limit so - // peers can continue to report traces for this rate limit. - prop := propagation.TraceContext{} - prop.Inject(ctx, &MetadataCarrier{Map: r.Metadata}) - - // Send a single low latency rate limit request - resp, err := c.GetPeerRateLimits(ctx, &GetPeerRateLimitsReq{ - Requests: []*RateLimitReq{r}, - }) - if err != nil { - err = errors.Wrap(err, "Error in GetPeerRateLimits") - return nil, c.setLastErr(err) - } - return resp.RateLimits[0], nil - } - - resp, err = c.getPeerRateLimitsBatch(ctx, r) - if err != nil { - err = errors.Wrap(err, "Error in getPeerRateLimitsBatch") - return nil, c.setLastErr(err) - } - - return resp, nil -} - -// GetPeerRateLimits requests a list of rate limit statuses from a peer -func (c *PeerClient) GetPeerRateLimits(ctx context.Context, r *GetPeerRateLimitsReq) (resp *GetPeerRateLimitsResp, err error) { - if err := c.connect(ctx); err != nil { - err = errors.Wrap(err, "Error in connect") - metricCheckErrorCounter.WithLabelValues("Connect error").Add(1) - return nil, c.setLastErr(err) - } - - // NOTE: This must be done within the RLock since calling Wait() in Shutdown() causes - // a race condition if called within a separate go routine if the internal wg is `0` - // when Wait() is called then Add(1) is called concurrently. - c.mutex.RLock() - c.wg.Add(1) - defer func() { - c.mutex.RUnlock() - defer c.wg.Done() - }() - - resp, err = c.client.GetPeerRateLimits(ctx, r) - if err != nil { - err = errors.Wrap(err, "Error in client.GetPeerRateLimits") - // metricCheckErrorCounter is updated within client.GetPeerRateLimits(). - return nil, c.setLastErr(err) - } - - // Unlikely, but this avoids a panic if something wonky happens - if len(resp.RateLimits) != len(r.Requests) { - err = errors.New("number of rate limits in peer response does not match request") - metricCheckErrorCounter.WithLabelValues("Item mismatch").Add(1) - return nil, c.setLastErr(err) - } - return resp, nil -} - -// UpdatePeerGlobals sends global rate limit status updates to a peer -func (c *PeerClient) UpdatePeerGlobals(ctx context.Context, r *UpdatePeerGlobalsReq) (resp *UpdatePeerGlobalsResp, err error) { - if err := c.connect(ctx); err != nil { - return nil, c.setLastErr(err) - } - - // See NOTE above about RLock and wg.Add(1) - c.mutex.RLock() - c.wg.Add(1) - defer func() { - c.mutex.RUnlock() - defer c.wg.Done() - }() - - resp, err = c.client.UpdatePeerGlobals(ctx, r) - if err != nil { - _ = c.setLastErr(err) - } - - return resp, err -} - -func (c *PeerClient) setLastErr(err error) error { - // If we get a nil error return without caching it - if err == nil { - return err - } - - // Prepend client address to error - errWithHostname := errors.Wrap(err, fmt.Sprintf("from host %s", c.conf.Info.GRPCAddress)) - key := err.Error() - - // Add error to the cache with a TTL of 5 minutes - c.lastErrs.AddWithTTL(key, errWithHostname, clock.Minute*5) - - return err -} - -func (c *PeerClient) GetLastErr() []string { - var errs []string - keys := c.lastErrs.Keys() - - // Get errors from each key in the cache - for _, key := range keys { - err, ok := c.lastErrs.Get(key) - if ok { - errs = append(errs, err.(error).Error()) - } - } - - return errs -} - -func (c *PeerClient) getPeerRateLimitsBatch(ctx context.Context, r *RateLimitReq) (resp *RateLimitResp, err error) { - funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("PeerClient.getPeerRateLimitsBatch")) - defer funcTimer.ObserveDuration() - - if err := c.connect(ctx); err != nil { - err = errors.Wrap(err, "Error in connect") - return nil, c.setLastErr(err) - } - - // See NOTE above about RLock and wg.Add(1) - c.mutex.RLock() - if c.status == peerClosing { - err := &PeerErr{err: errors.New("already disconnecting")} - return nil, c.setLastErr(err) - } - - // Wait for a response or context cancel - req := request{ - resp: make(chan *response, 1), - ctx: ctx, - request: r, - } - - // Enqueue the request to be sent - peerAddr := c.Info().GRPCAddress - metricBatchQueueLength.WithLabelValues(peerAddr).Set(float64(len(c.queue))) - - select { - case c.queue <- &req: - // Successfully enqueued request. - case <-ctx.Done(): - return nil, errors.Wrap(ctx.Err(), "Context error while enqueuing request") - } - - c.wg.Add(1) - defer func() { - c.mutex.RUnlock() - c.wg.Done() - }() - - select { - case re := <-req.resp: - if re.err != nil { - err := errors.Wrap(c.setLastErr(re.err), "Request error") - return nil, c.setLastErr(err) - } - return re.rl, nil - case <-ctx.Done(): - return nil, errors.Wrap(ctx.Err(), "Context error while waiting for response") - } -} - -// run processes batching requests by waiting for requests to be queued. Send -// the queue as a batch when either c.batchWait time has elapsed or the queue -// reaches c.batchLimit. -func (c *PeerClient) runBatch() { - var interval = NewInterval(c.conf.Behavior.BatchWait) - defer interval.Stop() - - var queue []*request - - for { - ctx := context.Background() - - select { - case r, ok := <-c.queue: - // If the queue has shutdown, we need to send the rest of the queue - if !ok { - if len(queue) > 0 { - c.sendBatch(ctx, queue) - } - return - } - - queue = append(queue, r) - // Send the queue if we reached our batch limit - if len(queue) >= c.conf.Behavior.BatchLimit { - c.conf.Log.WithContext(ctx). - WithFields(logrus.Fields{ - "queueLen": len(queue), - "batchLimit": c.conf.Behavior.BatchLimit, - }). - Debug("runBatch() reached batch limit") - ref := queue - queue = nil - go c.sendBatch(ctx, ref) - continue - } - - // If this is our first enqueued item since last - // sendBatch, reset interval timer. - if len(queue) == 1 { - interval.Next() - } - continue - - case <-interval.C: - queue2 := queue - - if len(queue2) > 0 { - queue = nil - - go func() { - c.sendBatch(ctx, queue2) - }() - } - } - } -} - -// sendBatch sends the queue provided and returns the responses to -// waiting go routines -func (c *PeerClient) sendBatch(ctx context.Context, queue []*request) { - batchSendTimer := prometheus.NewTimer(metricBatchSendDuration.WithLabelValues(c.conf.Info.GRPCAddress)) - defer batchSendTimer.ObserveDuration() - funcTimer := prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("PeerClient.sendBatch")) - defer funcTimer.ObserveDuration() - - var req GetPeerRateLimitsReq - for _, r := range queue { - // NOTE: This trace has the same name because it's in a separate trace than the one above. - // We link the two traces, so we can relate our rate limit trace back to the above trace. - r.ctx = tracing.StartNamedScope(r.ctx, "PeerClient.sendBatch", - trace.WithLinks(trace.LinkFromContext(ctx))) - // If no metadata is provided - if r.request.Metadata == nil { - r.request.Metadata = make(map[string]string) - } - // Propagate the trace context along with the batched rate limit so - // peers can continue to report traces for this rate limit. - prop := propagation.TraceContext{} - prop.Inject(r.ctx, &MetadataCarrier{Map: r.request.Metadata}) - req.Requests = append(req.Requests, r.request) - tracing.EndScope(r.ctx, nil) - - } - - timeoutCtx, timeoutCancel := context.WithTimeout(ctx, c.conf.Behavior.BatchTimeout) - resp, err := c.client.GetPeerRateLimits(timeoutCtx, &req) - timeoutCancel() - - // An error here indicates the entire request failed - if err != nil { - logPart := "Error in client.GetPeerRateLimits" - c.conf.Log.WithContext(ctx). - WithError(err). - WithFields(logrus.Fields{ - "queueLen": len(queue), - "batchTimeout": c.conf.Behavior.BatchTimeout.String(), - }). - Error(logPart) - err = errors.Wrap(err, logPart) - _ = c.setLastErr(err) - // metricCheckErrorCounter is updated within client.GetPeerRateLimits(). - - for _, r := range queue { - r.resp <- &response{err: err} - } - return - } - - // Unlikely, but this avoids a panic if something wonky happens - if len(resp.RateLimits) != len(queue) { - err = errors.New("server responded with incorrect rate limit list size") - - for _, r := range queue { - metricCheckErrorCounter.WithLabelValues("Item mismatch").Add(1) - r.resp <- &response{err: err} - } - return - } - - // Provide responses to channels waiting in the queue - for i, r := range queue { - r.resp <- &response{rl: resp.RateLimits[i]} - } -} - -// Shutdown will gracefully shutdown the client connection, until the context is cancelled -func (c *PeerClient) Shutdown(ctx context.Context) error { - // Take the write lock since we're going to modify the closing state - c.mutex.Lock() - if c.status == peerClosing || c.status == peerNotConnected { - c.mutex.Unlock() - return nil - } - defer c.mutex.Unlock() - - c.status = peerClosing - - defer func() { - if c.conn != nil { - c.conn.Close() - } - }() - - // This allows us to wait on the waitgroup, or until the context - // has been cancelled. This doesn't leak goroutines, because - // closing the connection will kill any outstanding requests. - waitChan := make(chan struct{}) - go func() { - c.wg.Wait() - close(c.queue) - close(waitChan) - }() - - select { - case <-ctx.Done(): - return ctx.Err() - case <-waitChan: - return nil - } -} - -// PeerErr is returned if the peer is not connected or is in a closing state -type PeerErr struct { - err error -} - -func (p *PeerErr) NotReady() bool { - return true -} - -func (p *PeerErr) Error() string { - return p.err.Error() -} - -func (p *PeerErr) Cause() error { - return p.err -} - -type notReadyErr interface { - NotReady() bool -} - -// IsNotReady returns true if the err is because the peer is not connected or in a closing state -func IsNotReady(err error) bool { - te, ok := err.(notReadyErr) - return ok && te.NotReady() -} diff --git a/peer_client_test.go b/peer_test.go similarity index 73% rename from peer_client_test.go rename to peer_test.go index 99924bed..3b307c5c 100644 --- a/peer_client_test.go +++ b/peer_test.go @@ -22,21 +22,21 @@ import ( "sync" "testing" - gubernator "github.com/mailgun/gubernator/v2" - "github.com/mailgun/gubernator/v2/cluster" + "github.com/stretchr/testify/require" + + "github.com/mailgun/gubernator/v3" + "github.com/mailgun/gubernator/v3/cluster" "github.com/mailgun/holster/v4/clock" "github.com/stretchr/testify/assert" ) func TestPeerClientShutdown(t *testing.T) { - type test struct { - Name string - Behavior gubernator.Behavior - } - const threads = 10 - cases := []test{ + cases := []struct { + Name string + Behavior gubernator.Behavior + }{ {"No batching", gubernator.Behavior_NO_BATCHING}, {"Batching", gubernator.Behavior_BATCHING}, {"Global", gubernator.Behavior_GLOBAL}, @@ -56,28 +56,29 @@ func TestPeerClientShutdown(t *testing.T) { c := cases[i] t.Run(c.Name, func(t *testing.T) { - client := gubernator.NewPeerClient(gubernator.PeerConfig{ - Info: cluster.GetRandomPeer(cluster.DataCenterNone), + client, err := gubernator.NewPeer(gubernator.PeerConfig{ + Info: cluster.GetRandomPeerInfo(cluster.DataCenterNone), Behavior: config, }) + require.NoError(t, err) wg := sync.WaitGroup{} wg.Add(threads) - // Spawn a whole bunch of concurrent requests to test shutdown in various states - for j := 0; j < threads; j++ { - go func() { + // Spawn a bunch of concurrent requests to test shutdown in various states + for i := 0; i < threads; i++ { + go func(client *gubernator.Peer, behavior gubernator.Behavior) { defer wg.Done() ctx := context.Background() - _, err := client.GetPeerRateLimit(ctx, &gubernator.RateLimitReq{ + _, err := client.Forward(ctx, &gubernator.RateLimitRequest{ Hits: 1, Limit: 100, - Behavior: c.Behavior, + Behavior: behavior, }) isExpectedErr := false switch err.(type) { - case *gubernator.PeerErr: + case *gubernator.ErrNotReady: isExpectedErr = true case nil: isExpectedErr = true @@ -85,13 +86,13 @@ func TestPeerClientShutdown(t *testing.T) { assert.True(t, true, isExpectedErr) - }() + }(client, c.Behavior) } // yield the processor that way we allow other goroutines to start their request runtime.Gosched() - err := client.Shutdown(context.Background()) + err = client.Close(context.Background()) assert.NoError(t, err) wg.Wait() diff --git a/peers.pb.go b/peers.pb.go deleted file mode 100644 index 597cd02a..00000000 --- a/peers.pb.go +++ /dev/null @@ -1,462 +0,0 @@ -// -//Copyright 2018-2022 Mailgun Technologies Inc -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.7 -// source: peers.proto - -package gubernator - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type GetPeerRateLimitsReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Must specify at least one RateLimit. The peer that recives this request MUST be authoritative for - // each rate_limit[x].unique_key provided, as the peer will not forward the request to any other peers - Requests []*RateLimitReq `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` -} - -func (x *GetPeerRateLimitsReq) Reset() { - *x = GetPeerRateLimitsReq{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetPeerRateLimitsReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPeerRateLimitsReq) ProtoMessage() {} - -func (x *GetPeerRateLimitsReq) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPeerRateLimitsReq.ProtoReflect.Descriptor instead. -func (*GetPeerRateLimitsReq) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{0} -} - -func (x *GetPeerRateLimitsReq) GetRequests() []*RateLimitReq { - if x != nil { - return x.Requests - } - return nil -} - -type GetPeerRateLimitsResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Responses are in the same order as they appeared in the PeerRateLimitRequests - RateLimits []*RateLimitResp `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"` -} - -func (x *GetPeerRateLimitsResp) Reset() { - *x = GetPeerRateLimitsResp{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetPeerRateLimitsResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPeerRateLimitsResp) ProtoMessage() {} - -func (x *GetPeerRateLimitsResp) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPeerRateLimitsResp.ProtoReflect.Descriptor instead. -func (*GetPeerRateLimitsResp) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{1} -} - -func (x *GetPeerRateLimitsResp) GetRateLimits() []*RateLimitResp { - if x != nil { - return x.RateLimits - } - return nil -} - -type UpdatePeerGlobalsReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Must specify at least one RateLimit - Globals []*UpdatePeerGlobal `protobuf:"bytes,1,rep,name=globals,proto3" json:"globals,omitempty"` -} - -func (x *UpdatePeerGlobalsReq) Reset() { - *x = UpdatePeerGlobalsReq{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdatePeerGlobalsReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdatePeerGlobalsReq) ProtoMessage() {} - -func (x *UpdatePeerGlobalsReq) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdatePeerGlobalsReq.ProtoReflect.Descriptor instead. -func (*UpdatePeerGlobalsReq) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{2} -} - -func (x *UpdatePeerGlobalsReq) GetGlobals() []*UpdatePeerGlobal { - if x != nil { - return x.Globals - } - return nil -} - -type UpdatePeerGlobal struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Status *RateLimitResp `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - Algorithm Algorithm `protobuf:"varint,3,opt,name=algorithm,proto3,enum=pb.gubernator.Algorithm" json:"algorithm,omitempty"` -} - -func (x *UpdatePeerGlobal) Reset() { - *x = UpdatePeerGlobal{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdatePeerGlobal) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdatePeerGlobal) ProtoMessage() {} - -func (x *UpdatePeerGlobal) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdatePeerGlobal.ProtoReflect.Descriptor instead. -func (*UpdatePeerGlobal) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{3} -} - -func (x *UpdatePeerGlobal) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *UpdatePeerGlobal) GetStatus() *RateLimitResp { - if x != nil { - return x.Status - } - return nil -} - -func (x *UpdatePeerGlobal) GetAlgorithm() Algorithm { - if x != nil { - return x.Algorithm - } - return Algorithm_TOKEN_BUCKET -} - -type UpdatePeerGlobalsResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *UpdatePeerGlobalsResp) Reset() { - *x = UpdatePeerGlobalsResp{} - if protoimpl.UnsafeEnabled { - mi := &file_peers_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdatePeerGlobalsResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdatePeerGlobalsResp) ProtoMessage() {} - -func (x *UpdatePeerGlobalsResp) ProtoReflect() protoreflect.Message { - mi := &file_peers_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdatePeerGlobalsResp.ProtoReflect.Descriptor instead. -func (*UpdatePeerGlobalsResp) Descriptor() ([]byte, []int) { - return file_peers_proto_rawDescGZIP(), []int{4} -} - -var File_peers_proto protoreflect.FileDescriptor - -var file_peers_proto_rawDesc = []byte{ - 0x0a, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x70, - 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x1a, 0x10, 0x67, 0x75, - 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4f, - 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, - 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x52, 0x65, 0x71, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, - 0x56, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3d, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, - 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x52, 0x61, - 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x52, 0x0a, 0x72, 0x61, 0x74, - 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x22, 0x51, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x12, - 0x39, 0x0a, 0x07, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x52, 0x07, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x22, 0x92, 0x01, 0x0a, 0x10, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x34, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, - 0x72, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, - 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x22, - 0x17, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x47, 0x6c, 0x6f, - 0x62, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x32, 0xcd, 0x01, 0x0a, 0x07, 0x50, 0x65, 0x65, - 0x72, 0x73, 0x56, 0x31, 0x12, 0x60, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x70, 0x62, 0x2e, 0x67, - 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, - 0x72, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x24, - 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x47, - 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x12, 0x23, 0x2e, 0x70, 0x62, - 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, - 0x1a, 0x24, 0x2e, 0x70, 0x62, 0x2e, 0x67, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x47, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x42, 0x22, 0x5a, 0x1d, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x61, 0x69, 0x6c, 0x67, 0x75, 0x6e, 0x2f, 0x67, - 0x75, 0x62, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x80, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_peers_proto_rawDescOnce sync.Once - file_peers_proto_rawDescData = file_peers_proto_rawDesc -) - -func file_peers_proto_rawDescGZIP() []byte { - file_peers_proto_rawDescOnce.Do(func() { - file_peers_proto_rawDescData = protoimpl.X.CompressGZIP(file_peers_proto_rawDescData) - }) - return file_peers_proto_rawDescData -} - -var file_peers_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_peers_proto_goTypes = []interface{}{ - (*GetPeerRateLimitsReq)(nil), // 0: pb.gubernator.GetPeerRateLimitsReq - (*GetPeerRateLimitsResp)(nil), // 1: pb.gubernator.GetPeerRateLimitsResp - (*UpdatePeerGlobalsReq)(nil), // 2: pb.gubernator.UpdatePeerGlobalsReq - (*UpdatePeerGlobal)(nil), // 3: pb.gubernator.UpdatePeerGlobal - (*UpdatePeerGlobalsResp)(nil), // 4: pb.gubernator.UpdatePeerGlobalsResp - (*RateLimitReq)(nil), // 5: pb.gubernator.RateLimitReq - (*RateLimitResp)(nil), // 6: pb.gubernator.RateLimitResp - (Algorithm)(0), // 7: pb.gubernator.Algorithm -} -var file_peers_proto_depIdxs = []int32{ - 5, // 0: pb.gubernator.GetPeerRateLimitsReq.requests:type_name -> pb.gubernator.RateLimitReq - 6, // 1: pb.gubernator.GetPeerRateLimitsResp.rate_limits:type_name -> pb.gubernator.RateLimitResp - 3, // 2: pb.gubernator.UpdatePeerGlobalsReq.globals:type_name -> pb.gubernator.UpdatePeerGlobal - 6, // 3: pb.gubernator.UpdatePeerGlobal.status:type_name -> pb.gubernator.RateLimitResp - 7, // 4: pb.gubernator.UpdatePeerGlobal.algorithm:type_name -> pb.gubernator.Algorithm - 0, // 5: pb.gubernator.PeersV1.GetPeerRateLimits:input_type -> pb.gubernator.GetPeerRateLimitsReq - 2, // 6: pb.gubernator.PeersV1.UpdatePeerGlobals:input_type -> pb.gubernator.UpdatePeerGlobalsReq - 1, // 7: pb.gubernator.PeersV1.GetPeerRateLimits:output_type -> pb.gubernator.GetPeerRateLimitsResp - 4, // 8: pb.gubernator.PeersV1.UpdatePeerGlobals:output_type -> pb.gubernator.UpdatePeerGlobalsResp - 7, // [7:9] is the sub-list for method output_type - 5, // [5:7] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_peers_proto_init() } -func file_peers_proto_init() { - if File_peers_proto != nil { - return - } - file_gubernator_proto_init() - if !protoimpl.UnsafeEnabled { - file_peers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetPeerRateLimitsReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetPeerRateLimitsResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdatePeerGlobalsReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdatePeerGlobal); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_peers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdatePeerGlobalsResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_peers_proto_rawDesc, - NumEnums: 0, - NumMessages: 5, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_peers_proto_goTypes, - DependencyIndexes: file_peers_proto_depIdxs, - MessageInfos: file_peers_proto_msgTypes, - }.Build() - File_peers_proto = out.File - file_peers_proto_rawDesc = nil - file_peers_proto_goTypes = nil - file_peers_proto_depIdxs = nil -} diff --git a/peers.pb.gw.go b/peers.pb.gw.go deleted file mode 100644 index 41f7d6e5..00000000 --- a/peers.pb.gw.go +++ /dev/null @@ -1,256 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: peers.proto - -/* -Package gubernator is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package gubernator - -import ( - "context" - "io" - "net/http" - - "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" - "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = metadata.Join - -func request_PeersV1_GetPeerRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, client PeersV1Client, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPeerRateLimitsReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetPeerRateLimits(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_PeersV1_GetPeerRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, server PeersV1Server, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GetPeerRateLimitsReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetPeerRateLimits(ctx, &protoReq) - return msg, metadata, err - -} - -func request_PeersV1_UpdatePeerGlobals_0(ctx context.Context, marshaler runtime.Marshaler, client PeersV1Client, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq UpdatePeerGlobalsReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UpdatePeerGlobals(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_PeersV1_UpdatePeerGlobals_0(ctx context.Context, marshaler runtime.Marshaler, server PeersV1Server, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq UpdatePeerGlobalsReq - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.UpdatePeerGlobals(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterPeersV1HandlerServer registers the http handlers for service PeersV1 to "mux". -// UnaryRPC :call PeersV1Server directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterPeersV1HandlerFromEndpoint instead. -func RegisterPeersV1HandlerServer(ctx context.Context, mux *runtime.ServeMux, server PeersV1Server) error { - - mux.Handle("POST", pattern_PeersV1_GetPeerRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.gubernator.PeersV1/GetPeerRateLimits", runtime.WithHTTPPathPattern("/pb.gubernator.PeersV1/GetPeerRateLimits")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_PeersV1_GetPeerRateLimits_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_PeersV1_GetPeerRateLimits_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_PeersV1_UpdatePeerGlobals_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.gubernator.PeersV1/UpdatePeerGlobals", runtime.WithHTTPPathPattern("/pb.gubernator.PeersV1/UpdatePeerGlobals")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_PeersV1_UpdatePeerGlobals_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_PeersV1_UpdatePeerGlobals_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterPeersV1HandlerFromEndpoint is same as RegisterPeersV1Handler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterPeersV1HandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterPeersV1Handler(ctx, mux, conn) -} - -// RegisterPeersV1Handler registers the http handlers for service PeersV1 to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterPeersV1Handler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterPeersV1HandlerClient(ctx, mux, NewPeersV1Client(conn)) -} - -// RegisterPeersV1HandlerClient registers the http handlers for service PeersV1 -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "PeersV1Client". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "PeersV1Client" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "PeersV1Client" to call the correct interceptors. -func RegisterPeersV1HandlerClient(ctx context.Context, mux *runtime.ServeMux, client PeersV1Client) error { - - mux.Handle("POST", pattern_PeersV1_GetPeerRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.gubernator.PeersV1/GetPeerRateLimits", runtime.WithHTTPPathPattern("/pb.gubernator.PeersV1/GetPeerRateLimits")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_PeersV1_GetPeerRateLimits_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_PeersV1_GetPeerRateLimits_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_PeersV1_UpdatePeerGlobals_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.gubernator.PeersV1/UpdatePeerGlobals", runtime.WithHTTPPathPattern("/pb.gubernator.PeersV1/UpdatePeerGlobals")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_PeersV1_UpdatePeerGlobals_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - - forward_PeersV1_UpdatePeerGlobals_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_PeersV1_GetPeerRateLimits_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"pb.gubernator.PeersV1", "GetPeerRateLimits"}, "")) - - pattern_PeersV1_UpdatePeerGlobals_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"pb.gubernator.PeersV1", "UpdatePeerGlobals"}, "")) -) - -var ( - forward_PeersV1_GetPeerRateLimits_0 = runtime.ForwardResponseMessage - - forward_PeersV1_UpdatePeerGlobals_0 = runtime.ForwardResponseMessage -) diff --git a/peers_grpc.pb.go b/peers_grpc.pb.go deleted file mode 100644 index 355c43d6..00000000 --- a/peers_grpc.pb.go +++ /dev/null @@ -1,166 +0,0 @@ -// -//Copyright 2018-2022 Mailgun Technologies Inc -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.21.7 -// source: peers.proto - -package gubernator - -import ( - context "context" - - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - PeersV1_GetPeerRateLimits_FullMethodName = "/pb.gubernator.PeersV1/GetPeerRateLimits" - PeersV1_UpdatePeerGlobals_FullMethodName = "/pb.gubernator.PeersV1/UpdatePeerGlobals" -) - -// PeersV1Client is the client API for PeersV1 service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type PeersV1Client interface { - // Used by peers to relay batches of requests to an authoritative peer - GetPeerRateLimits(ctx context.Context, in *GetPeerRateLimitsReq, opts ...grpc.CallOption) (*GetPeerRateLimitsResp, error) - // Used by peers send global rate limit updates to other peers - UpdatePeerGlobals(ctx context.Context, in *UpdatePeerGlobalsReq, opts ...grpc.CallOption) (*UpdatePeerGlobalsResp, error) -} - -type peersV1Client struct { - cc grpc.ClientConnInterface -} - -func NewPeersV1Client(cc grpc.ClientConnInterface) PeersV1Client { - return &peersV1Client{cc} -} - -func (c *peersV1Client) GetPeerRateLimits(ctx context.Context, in *GetPeerRateLimitsReq, opts ...grpc.CallOption) (*GetPeerRateLimitsResp, error) { - out := new(GetPeerRateLimitsResp) - err := c.cc.Invoke(ctx, PeersV1_GetPeerRateLimits_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *peersV1Client) UpdatePeerGlobals(ctx context.Context, in *UpdatePeerGlobalsReq, opts ...grpc.CallOption) (*UpdatePeerGlobalsResp, error) { - out := new(UpdatePeerGlobalsResp) - err := c.cc.Invoke(ctx, PeersV1_UpdatePeerGlobals_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// PeersV1Server is the server API for PeersV1 service. -// All implementations must embed UnimplementedPeersV1Server -// for forward compatibility -type PeersV1Server interface { - // Used by peers to relay batches of requests to an authoritative peer - GetPeerRateLimits(context.Context, *GetPeerRateLimitsReq) (*GetPeerRateLimitsResp, error) - // Used by peers send global rate limit updates to other peers - UpdatePeerGlobals(context.Context, *UpdatePeerGlobalsReq) (*UpdatePeerGlobalsResp, error) - mustEmbedUnimplementedPeersV1Server() -} - -// UnimplementedPeersV1Server must be embedded to have forward compatible implementations. -type UnimplementedPeersV1Server struct { -} - -func (UnimplementedPeersV1Server) GetPeerRateLimits(context.Context, *GetPeerRateLimitsReq) (*GetPeerRateLimitsResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPeerRateLimits not implemented") -} -func (UnimplementedPeersV1Server) UpdatePeerGlobals(context.Context, *UpdatePeerGlobalsReq) (*UpdatePeerGlobalsResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdatePeerGlobals not implemented") -} -func (UnimplementedPeersV1Server) mustEmbedUnimplementedPeersV1Server() {} - -// UnsafePeersV1Server may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to PeersV1Server will -// result in compilation errors. -type UnsafePeersV1Server interface { - mustEmbedUnimplementedPeersV1Server() -} - -func RegisterPeersV1Server(s grpc.ServiceRegistrar, srv PeersV1Server) { - s.RegisterService(&PeersV1_ServiceDesc, srv) -} - -func _PeersV1_GetPeerRateLimits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPeerRateLimitsReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PeersV1Server).GetPeerRateLimits(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: PeersV1_GetPeerRateLimits_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PeersV1Server).GetPeerRateLimits(ctx, req.(*GetPeerRateLimitsReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _PeersV1_UpdatePeerGlobals_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdatePeerGlobalsReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PeersV1Server).UpdatePeerGlobals(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: PeersV1_UpdatePeerGlobals_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PeersV1Server).UpdatePeerGlobals(ctx, req.(*UpdatePeerGlobalsReq)) - } - return interceptor(ctx, in, info, handler) -} - -// PeersV1_ServiceDesc is the grpc.ServiceDesc for PeersV1 service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var PeersV1_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "pb.gubernator.PeersV1", - HandlerType: (*PeersV1Server)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetPeerRateLimits", - Handler: _PeersV1_GetPeerRateLimits_Handler, - }, - { - MethodName: "UpdatePeerGlobals", - Handler: _PeersV1_UpdatePeerGlobals_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "peers.proto", -} diff --git a/python/gubernator/__init__.py b/python/gubernator/__init__.py deleted file mode 100644 index b90c1a38..00000000 --- a/python/gubernator/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# This code is py3.7 and py2.7 compatible - -import gubernator.ratelimit_pb2_grpc as pb_grpc -from datetime import datetime - -import time -import grpc - -MILLISECOND = 1 -SECOND = MILLISECOND * 1000 -MINUTE = SECOND * 60 - - -def sleep_until_reset(reset_time): - now = datetime.now() - time.sleep((reset_time - now).seconds) - - -def V1Client(endpoint='127.0.0.1:9090'): - channel = grpc.insecure_channel(endpoint) - return pb_grpc.RateLimitServiceV1Stub(channel) diff --git a/python/gubernator/gubernator_pb2.py b/python/gubernator/gubernator_pb2.py deleted file mode 100644 index ed3b2695..00000000 --- a/python/gubernator/gubernator_pb2.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: gubernator.proto -"""Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10gubernator.proto\x12\rpb.gubernator\x1a\x1cgoogle/api/annotations.proto\"A\n\x10GetRateLimitsReq\x12-\n\x08requests\x18\x01 \x03(\x0b\x32\x1b.pb.gubernator.RateLimitReq\"D\n\x11GetRateLimitsResp\x12/\n\tresponses\x18\x01 \x03(\x0b\x32\x1c.pb.gubernator.RateLimitResp\"\xb4\x02\n\x0cRateLimitReq\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nunique_key\x18\x02 \x01(\t\x12\x0c\n\x04hits\x18\x03 \x01(\x03\x12\r\n\x05limit\x18\x04 \x01(\x03\x12\x10\n\x08\x64uration\x18\x05 \x01(\x03\x12+\n\talgorithm\x18\x06 \x01(\x0e\x32\x18.pb.gubernator.Algorithm\x12)\n\x08\x62\x65havior\x18\x07 \x01(\x0e\x32\x17.pb.gubernator.Behavior\x12\r\n\x05\x62urst\x18\x08 \x01(\x03\x12;\n\x08metadata\x18\t \x03(\x0b\x32).pb.gubernator.RateLimitReq.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xea\x01\n\rRateLimitResp\x12%\n\x06status\x18\x01 \x01(\x0e\x32\x15.pb.gubernator.Status\x12\r\n\x05limit\x18\x02 \x01(\x03\x12\x11\n\tremaining\x18\x03 \x01(\x03\x12\x12\n\nreset_time\x18\x04 \x01(\x03\x12\r\n\x05\x65rror\x18\x05 \x01(\t\x12<\n\x08metadata\x18\x06 \x03(\x0b\x32*.pb.gubernator.RateLimitResp.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x10\n\x0eHealthCheckReq\"F\n\x0fHealthCheckResp\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x12\n\npeer_count\x18\x03 \x01(\x05*/\n\tAlgorithm\x12\x10\n\x0cTOKEN_BUCKET\x10\x00\x12\x10\n\x0cLEAKY_BUCKET\x10\x01*w\n\x08\x42\x65havior\x12\x0c\n\x08\x42\x41TCHING\x10\x00\x12\x0f\n\x0bNO_BATCHING\x10\x01\x12\n\n\x06GLOBAL\x10\x02\x12\x19\n\x15\x44URATION_IS_GREGORIAN\x10\x04\x12\x13\n\x0fRESET_REMAINING\x10\x08\x12\x10\n\x0cMULTI_REGION\x10\x10*)\n\x06Status\x12\x0f\n\x0bUNDER_LIMIT\x10\x00\x12\x0e\n\nOVER_LIMIT\x10\x01\x32\xdd\x01\n\x02V1\x12p\n\rGetRateLimits\x12\x1f.pb.gubernator.GetRateLimitsReq\x1a .pb.gubernator.GetRateLimitsResp\"\x1c\x82\xd3\xe4\x93\x02\x16\"\x11/v1/GetRateLimits:\x01*\x12\x65\n\x0bHealthCheck\x12\x1d.pb.gubernator.HealthCheckReq\x1a\x1e.pb.gubernator.HealthCheckResp\"\x17\x82\xd3\xe4\x93\x02\x11\x12\x0f/v1/HealthCheckB\"Z\x1dgithub.com/mailgun/gubernator\x80\x01\x01\x62\x06proto3') - -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'gubernator_pb2', globals()) -if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'Z\035github.com/mailgun/gubernator\200\001\001' - _RATELIMITREQ_METADATAENTRY._options = None - _RATELIMITREQ_METADATAENTRY._serialized_options = b'8\001' - _RATELIMITRESP_METADATAENTRY._options = None - _RATELIMITRESP_METADATAENTRY._serialized_options = b'8\001' - _V1.methods_by_name['GetRateLimits']._options = None - _V1.methods_by_name['GetRateLimits']._serialized_options = b'\202\323\344\223\002\026\"\021/v1/GetRateLimits:\001*' - _V1.methods_by_name['HealthCheck']._options = None - _V1.methods_by_name['HealthCheck']._serialized_options = b'\202\323\344\223\002\021\022\017/v1/HealthCheck' - _ALGORITHM._serialized_start=840 - _ALGORITHM._serialized_end=887 - _BEHAVIOR._serialized_start=889 - _BEHAVIOR._serialized_end=1008 - _STATUS._serialized_start=1010 - _STATUS._serialized_end=1051 - _GETRATELIMITSREQ._serialized_start=65 - _GETRATELIMITSREQ._serialized_end=130 - _GETRATELIMITSRESP._serialized_start=132 - _GETRATELIMITSRESP._serialized_end=200 - _RATELIMITREQ._serialized_start=203 - _RATELIMITREQ._serialized_end=511 - _RATELIMITREQ_METADATAENTRY._serialized_start=464 - _RATELIMITREQ_METADATAENTRY._serialized_end=511 - _RATELIMITRESP._serialized_start=514 - _RATELIMITRESP._serialized_end=748 - _RATELIMITRESP_METADATAENTRY._serialized_start=464 - _RATELIMITRESP_METADATAENTRY._serialized_end=511 - _HEALTHCHECKREQ._serialized_start=750 - _HEALTHCHECKREQ._serialized_end=766 - _HEALTHCHECKRESP._serialized_start=768 - _HEALTHCHECKRESP._serialized_end=838 - _V1._serialized_start=1054 - _V1._serialized_end=1275 -# @@protoc_insertion_point(module_scope) diff --git a/python/gubernator/gubernator_pb2_grpc.py b/python/gubernator/gubernator_pb2_grpc.py deleted file mode 100644 index 02dd7792..00000000 --- a/python/gubernator/gubernator_pb2_grpc.py +++ /dev/null @@ -1,102 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -import gubernator_pb2 as gubernator__pb2 - - -class V1Stub(object): - """Missing associated documentation comment in .proto file.""" - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.GetRateLimits = channel.unary_unary( - '/pb.gubernator.V1/GetRateLimits', - request_serializer=gubernator__pb2.GetRateLimitsReq.SerializeToString, - response_deserializer=gubernator__pb2.GetRateLimitsResp.FromString, - ) - self.HealthCheck = channel.unary_unary( - '/pb.gubernator.V1/HealthCheck', - request_serializer=gubernator__pb2.HealthCheckReq.SerializeToString, - response_deserializer=gubernator__pb2.HealthCheckResp.FromString, - ) - - -class V1Servicer(object): - """Missing associated documentation comment in .proto file.""" - - def GetRateLimits(self, request, context): - """Given a list of rate limit requests, return the rate limits of each. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def HealthCheck(self, request, context): - """This method is for round trip benchmarking and can be used by - the client to determine connectivity to the server - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_V1Servicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetRateLimits': grpc.unary_unary_rpc_method_handler( - servicer.GetRateLimits, - request_deserializer=gubernator__pb2.GetRateLimitsReq.FromString, - response_serializer=gubernator__pb2.GetRateLimitsResp.SerializeToString, - ), - 'HealthCheck': grpc.unary_unary_rpc_method_handler( - servicer.HealthCheck, - request_deserializer=gubernator__pb2.HealthCheckReq.FromString, - response_serializer=gubernator__pb2.HealthCheckResp.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'pb.gubernator.V1', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - # This class is part of an EXPERIMENTAL API. -class V1(object): - """Missing associated documentation comment in .proto file.""" - - @staticmethod - def GetRateLimits(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/pb.gubernator.V1/GetRateLimits', - gubernator__pb2.GetRateLimitsReq.SerializeToString, - gubernator__pb2.GetRateLimitsResp.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def HealthCheck(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/pb.gubernator.V1/HealthCheck', - gubernator__pb2.HealthCheckReq.SerializeToString, - gubernator__pb2.HealthCheckResp.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/python/gubernator/peers_pb2.py b/python/gubernator/peers_pb2.py deleted file mode 100644 index 98ade704..00000000 --- a/python/gubernator/peers_pb2.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peers.proto -"""Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -import gubernator_pb2 as gubernator__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0bpeers.proto\x12\rpb.gubernator\x1a\x10gubernator.proto\"E\n\x14GetPeerRateLimitsReq\x12-\n\x08requests\x18\x01 \x03(\x0b\x32\x1b.pb.gubernator.RateLimitReq\"J\n\x15GetPeerRateLimitsResp\x12\x31\n\x0brate_limits\x18\x01 \x03(\x0b\x32\x1c.pb.gubernator.RateLimitResp\"H\n\x14UpdatePeerGlobalsReq\x12\x30\n\x07globals\x18\x01 \x03(\x0b\x32\x1f.pb.gubernator.UpdatePeerGlobal\"z\n\x10UpdatePeerGlobal\x12\x0b\n\x03key\x18\x01 \x01(\t\x12,\n\x06status\x18\x02 \x01(\x0b\x32\x1c.pb.gubernator.RateLimitResp\x12+\n\talgorithm\x18\x03 \x01(\x0e\x32\x18.pb.gubernator.Algorithm\"\x17\n\x15UpdatePeerGlobalsResp2\xcd\x01\n\x07PeersV1\x12`\n\x11GetPeerRateLimits\x12#.pb.gubernator.GetPeerRateLimitsReq\x1a$.pb.gubernator.GetPeerRateLimitsResp\"\x00\x12`\n\x11UpdatePeerGlobals\x12#.pb.gubernator.UpdatePeerGlobalsReq\x1a$.pb.gubernator.UpdatePeerGlobalsResp\"\x00\x42\"Z\x1dgithub.com/mailgun/gubernator\x80\x01\x01\x62\x06proto3') - -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'peers_pb2', globals()) -if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'Z\035github.com/mailgun/gubernator\200\001\001' - _GETPEERRATELIMITSREQ._serialized_start=48 - _GETPEERRATELIMITSREQ._serialized_end=117 - _GETPEERRATELIMITSRESP._serialized_start=119 - _GETPEERRATELIMITSRESP._serialized_end=193 - _UPDATEPEERGLOBALSREQ._serialized_start=195 - _UPDATEPEERGLOBALSREQ._serialized_end=267 - _UPDATEPEERGLOBAL._serialized_start=269 - _UPDATEPEERGLOBAL._serialized_end=391 - _UPDATEPEERGLOBALSRESP._serialized_start=393 - _UPDATEPEERGLOBALSRESP._serialized_end=416 - _PEERSV1._serialized_start=419 - _PEERSV1._serialized_end=624 -# @@protoc_insertion_point(module_scope) diff --git a/python/gubernator/peers_pb2_grpc.py b/python/gubernator/peers_pb2_grpc.py deleted file mode 100644 index 7b8f4c99..00000000 --- a/python/gubernator/peers_pb2_grpc.py +++ /dev/null @@ -1,104 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -import peers_pb2 as peers__pb2 - - -class PeersV1Stub(object): - """NOTE: For use by gubernator peers only - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.GetPeerRateLimits = channel.unary_unary( - '/pb.gubernator.PeersV1/GetPeerRateLimits', - request_serializer=peers__pb2.GetPeerRateLimitsReq.SerializeToString, - response_deserializer=peers__pb2.GetPeerRateLimitsResp.FromString, - ) - self.UpdatePeerGlobals = channel.unary_unary( - '/pb.gubernator.PeersV1/UpdatePeerGlobals', - request_serializer=peers__pb2.UpdatePeerGlobalsReq.SerializeToString, - response_deserializer=peers__pb2.UpdatePeerGlobalsResp.FromString, - ) - - -class PeersV1Servicer(object): - """NOTE: For use by gubernator peers only - """ - - def GetPeerRateLimits(self, request, context): - """Used by peers to relay batches of requests to an authoritative peer - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def UpdatePeerGlobals(self, request, context): - """Used by peers send global rate limit updates to other peers - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_PeersV1Servicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetPeerRateLimits': grpc.unary_unary_rpc_method_handler( - servicer.GetPeerRateLimits, - request_deserializer=peers__pb2.GetPeerRateLimitsReq.FromString, - response_serializer=peers__pb2.GetPeerRateLimitsResp.SerializeToString, - ), - 'UpdatePeerGlobals': grpc.unary_unary_rpc_method_handler( - servicer.UpdatePeerGlobals, - request_deserializer=peers__pb2.UpdatePeerGlobalsReq.FromString, - response_serializer=peers__pb2.UpdatePeerGlobalsResp.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'pb.gubernator.PeersV1', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - # This class is part of an EXPERIMENTAL API. -class PeersV1(object): - """NOTE: For use by gubernator peers only - """ - - @staticmethod - def GetPeerRateLimits(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/pb.gubernator.PeersV1/GetPeerRateLimits', - peers__pb2.GetPeerRateLimitsReq.SerializeToString, - peers__pb2.GetPeerRateLimitsResp.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) - - @staticmethod - def UpdatePeerGlobals(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary(request, target, '/pb.gubernator.PeersV1/UpdatePeerGlobals', - peers__pb2.UpdatePeerGlobalsReq.SerializeToString, - peers__pb2.UpdatePeerGlobalsResp.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/python/requirements-py2.txt b/python/requirements-py2.txt deleted file mode 100644 index 7beae473..00000000 --- a/python/requirements-py2.txt +++ /dev/null @@ -1,15 +0,0 @@ -atomicwrites==1.3.0 -attrs==18.2.0 -enum34==1.1.6 -funcsigs==1.0.2 -futures==3.2.0 -googleapis-common-protos==1.5.8 -grpcio==1.53.0 -more-itertools==5.0.0 -pathlib2==2.3.3 -pluggy==0.8.1 -protobuf==3.18.3 -py==1.10.0 -pytest==7.2.0 -scandir==1.9.0 -six==1.12.0 diff --git a/python/requirements-py3.txt b/python/requirements-py3.txt deleted file mode 100644 index 03e84cbf..00000000 --- a/python/requirements-py3.txt +++ /dev/null @@ -1,11 +0,0 @@ -atomicwrites==1.3.0 -attrs==18.2.0 -googleapis-common-protos==1.5.8 -grpcio==1.53.0 -grpcio-tools==1.19.0 -more-itertools==6.0.0 -pluggy==0.8.1 -protobuf==3.18.3 -py==1.10.0 -pytest==7.2.0 -six==1.12.0 diff --git a/python/setup.py b/python/setup.py deleted file mode 100755 index eb8126c7..00000000 --- a/python/setup.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2018-2022 Mailgun Technologies Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -try: # for pip >= 10 - from pip._internal.req import parse_requirements -except ImportError: # for pip <= 9.0.3 - from pip.req import parse_requirements -from setuptools import setup, find_packages -import platform - -with open('version', 'r') as version_file: - version = version_file.readline().strip() - -if platform.python_version_tuple()[0] == '2': - reqs = parse_requirements('requirements-py2.txt', session='') -else: - reqs = parse_requirements('requirements-py3.txt', session='') - -requirements = [str(r.req) for r in reqs] - -setup( - name='gubernator', - version='0.1.0', - description="Python client for gubernator", - author="Derrick J. Wippler", - author_email='thrawn01@gmail.com', - url='https://github.com/mailgun/gubernator', - package_dir={'': '.'}, - packages=find_packages('.', exclude=['tests']), - install_requires=requirements, - license="Apache Software License 2.0", - python_requires='>=2.7', - classifiers=[ - 'Development Status :: 4 - Beta', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: Apache Software License', - 'Natural Language :: English', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - ], -) diff --git a/python/tests/__init__.py b/python/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/python/tests/test_client.py b/python/tests/test_client.py deleted file mode 100644 index 28efabfb..00000000 --- a/python/tests/test_client.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2018-2022 Mailgun Technologies Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from gubernator import ratelimit_pb2 as pb - -import pytest -import subprocess -import os -import gubernator - - -@pytest.fixture(scope='module') -def cluster(): - args = ["/bin/sh", "-c", - "go run ./cmd/gubernator-cluster/main.go"] - - os.chdir("golang") - proc = subprocess.Popen(args, stdout=subprocess.PIPE) - os.chdir("..") - - while True: - line = proc.stdout.readline() - if b'Ready' in line: - break - yield proc - proc.kill() - - -def test_health_check(cluster): - client = gubernator.V1Client() - resp = client.health_check() - print("Health:", resp) - - -def test_get_rate_limit(cluster): - req = pb.Requests() - rate_limit = req.requests.add() - - rate_limit.algorithm = pb.TOKEN_BUCKET - rate_limit.duration = gubernator.SECOND * 2 - rate_limit.limit = 10 - rate_limit.namespace = 'test-ns' - rate_limit.unique_key = 'domain-id-0001' - rate_limit.hits = 1 - - client = gubernator.V1Client() - resp = client.GetRateLimits(req, timeout=0.5) - print("RateLimit: {}".format(resp)) diff --git a/region_picker.go b/region_picker.go index 4bef59db..ca5f8df0 100644 --- a/region_picker.go +++ b/region_picker.go @@ -17,11 +17,11 @@ limitations under the License. package gubernator type RegionPeerPicker interface { - GetClients(string) ([]*PeerClient, error) - GetByPeerInfo(PeerInfo) *PeerClient + GetClients(string) ([]*Peer, error) + GetByPeerInfo(PeerInfo) *Peer Pickers() map[string]PeerPicker - Peers() []*PeerClient - Add(*PeerClient) + Peers() []*Peer + Add(*Peer) New() RegionPeerPicker } @@ -32,13 +32,13 @@ type RegionPicker struct { // A map of all the pickers by region regions map[string]PeerPicker // The implementation of picker we will use for each region - reqQueue chan *RateLimitReq + reqQueue chan *RateLimitRequest } func NewRegionPicker(fn HashString64) *RegionPicker { rp := &RegionPicker{ regions: make(map[string]PeerPicker), - reqQueue: make(chan *RateLimitReq), + reqQueue: make(chan *RateLimitRequest), ReplicatedConsistentHash: NewReplicatedConsistentHash(fn, defaultReplicas), } return rp @@ -48,14 +48,14 @@ func (rp *RegionPicker) New() RegionPeerPicker { hash := rp.ReplicatedConsistentHash.New().(*ReplicatedConsistentHash) return &RegionPicker{ regions: make(map[string]PeerPicker), - reqQueue: make(chan *RateLimitReq), + reqQueue: make(chan *RateLimitRequest), ReplicatedConsistentHash: hash, } } // GetClients returns all the PeerClients that match this key in all regions -func (rp *RegionPicker) GetClients(key string) ([]*PeerClient, error) { - result := make([]*PeerClient, len(rp.regions)) +func (rp *RegionPicker) GetClients(key string) ([]*Peer, error) { + result := make([]*Peer, len(rp.regions)) var i int for _, picker := range rp.regions { peer, err := picker.Get(key) @@ -68,8 +68,8 @@ func (rp *RegionPicker) GetClients(key string) ([]*PeerClient, error) { return result, nil } -// GetByPeerInfo returns the first PeerClient the PeerInfo.HasKey() matches -func (rp *RegionPicker) GetByPeerInfo(info PeerInfo) *PeerClient { +// GetByPeerInfo returns the first Peer the PeerInfo.HasKey() matches +func (rp *RegionPicker) GetByPeerInfo(info PeerInfo) *Peer { for _, picker := range rp.regions { if client := picker.GetByPeerInfo(info); client != nil { return client @@ -83,8 +83,8 @@ func (rp *RegionPicker) Pickers() map[string]PeerPicker { return rp.regions } -func (rp *RegionPicker) Peers() []*PeerClient { - var peers []*PeerClient +func (rp *RegionPicker) Peers() []*Peer { + var peers []*Peer for _, picker := range rp.regions { peers = append(peers, picker.Peers()...) @@ -93,7 +93,7 @@ func (rp *RegionPicker) Peers() []*PeerClient { return peers } -func (rp *RegionPicker) Add(peer *PeerClient) { +func (rp *RegionPicker) Add(peer *Peer) { picker, ok := rp.regions[peer.Info().DataCenter] if !ok { picker = rp.ReplicatedConsistentHash.New() diff --git a/replicated_hash.go b/replicated_hash.go index c53504e1..2f78fa4e 100644 --- a/replicated_hash.go +++ b/replicated_hash.go @@ -26,6 +26,14 @@ import ( "github.com/segmentio/fasthash/fnv1" ) +type PeerPicker interface { + GetByPeerInfo(PeerInfo) *Peer + Peers() []*Peer + Get(string) (*Peer, error) + New() PeerPicker + Add(*Peer) +} + const defaultReplicas = 512 type HashString64 func(data string) uint64 @@ -36,19 +44,19 @@ var defaultHashString64 HashString64 = fnv1.HashString64 type ReplicatedConsistentHash struct { hashFunc HashString64 peerKeys []peerInfo - peers map[string]*PeerClient + peers map[string]*Peer replicas int } type peerInfo struct { hash uint64 - peer *PeerClient + peer *Peer } func NewReplicatedConsistentHash(fn HashString64, replicas int) *ReplicatedConsistentHash { ch := &ReplicatedConsistentHash{ hashFunc: fn, - peers: make(map[string]*PeerClient), + peers: make(map[string]*Peer), replicas: replicas, } @@ -61,13 +69,13 @@ func NewReplicatedConsistentHash(fn HashString64, replicas int) *ReplicatedConsi func (ch *ReplicatedConsistentHash) New() PeerPicker { return &ReplicatedConsistentHash{ hashFunc: ch.hashFunc, - peers: make(map[string]*PeerClient), + peers: make(map[string]*Peer), replicas: ch.replicas, } } -func (ch *ReplicatedConsistentHash) Peers() []*PeerClient { - var results []*PeerClient +func (ch *ReplicatedConsistentHash) Peers() []*Peer { + var results []*Peer for _, v := range ch.peers { results = append(results, v) } @@ -75,10 +83,10 @@ func (ch *ReplicatedConsistentHash) Peers() []*PeerClient { } // Adds a peer to the hash -func (ch *ReplicatedConsistentHash) Add(peer *PeerClient) { - ch.peers[peer.Info().GRPCAddress] = peer +func (ch *ReplicatedConsistentHash) Add(peer *Peer) { + ch.peers[peer.Info().HTTPAddress] = peer - key := fmt.Sprintf("%x", md5.Sum([]byte(peer.Info().GRPCAddress))) + key := fmt.Sprintf("%x", md5.Sum([]byte(peer.Info().HTTPAddress))) for i := 0; i < ch.replicas; i++ { hash := ch.hashFunc(strconv.Itoa(i) + key) ch.peerKeys = append(ch.peerKeys, peerInfo{ @@ -96,12 +104,12 @@ func (ch *ReplicatedConsistentHash) Size() int { } // Returns the peer by hostname -func (ch *ReplicatedConsistentHash) GetByPeerInfo(peer PeerInfo) *PeerClient { - return ch.peers[peer.GRPCAddress] +func (ch *ReplicatedConsistentHash) GetByPeerInfo(peer PeerInfo) *Peer { + return ch.peers[peer.HTTPAddress] } // Given a key, return the peer that key is assigned too -func (ch *ReplicatedConsistentHash) Get(key string) (*PeerClient, error) { +func (ch *ReplicatedConsistentHash) Get(key string) (*Peer, error) { if ch.Size() == 0 { return nil, errors.New("unable to pick a peer; pool is empty") } diff --git a/replicated_hash_test.go b/replicated_hash_test.go index 699808bd..52b8a44d 100644 --- a/replicated_hash_test.go +++ b/replicated_hash_test.go @@ -32,7 +32,7 @@ func TestReplicatedConsistentHash(t *testing.T) { hash := NewReplicatedConsistentHash(nil, defaultReplicas) for _, h := range hosts { - hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) + hash.Add(&Peer{conf: PeerConfig{Info: PeerInfo{HTTPAddress: h}}}) } assert.Equal(t, len(hosts), hash.Size()) @@ -40,16 +40,16 @@ func TestReplicatedConsistentHash(t *testing.T) { t.Run("Host", func(t *testing.T) { hash := NewReplicatedConsistentHash(nil, defaultReplicas) - hostMap := map[string]*PeerClient{} + hostMap := map[string]*Peer{} for _, h := range hosts { - peer := &PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}} + peer := &Peer{conf: PeerConfig{Info: PeerInfo{HTTPAddress: h}}} hash.Add(peer) hostMap[h] = peer } for host, peer := range hostMap { - assert.Equal(t, peer, hash.GetByPeerInfo(PeerInfo{GRPCAddress: host})) + assert.Equal(t, peer, hash.GetByPeerInfo(PeerInfo{HTTPAddress: host})) } }) @@ -87,13 +87,13 @@ func TestReplicatedConsistentHash(t *testing.T) { distribution := make(map[string]int) for _, h := range hosts { - hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) + hash.Add(&Peer{conf: PeerConfig{Info: PeerInfo{HTTPAddress: h}}}) distribution[h] = 0 } for i := range strings { peer, _ := hash.Get(strings[i]) - distribution[peer.Info().GRPCAddress]++ + distribution[peer.Info().HTTPAddress]++ } assert.Equal(t, tc.outDistribution, distribution) }) @@ -118,7 +118,7 @@ func BenchmarkReplicatedConsistantHash(b *testing.B) { hash := NewReplicatedConsistentHash(hashFunc, defaultReplicas) hosts := []string{"a.svc.local", "b.svc.local", "c.svc.local"} for _, h := range hosts { - hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) + hash.Add(&Peer{conf: PeerConfig{Info: PeerInfo{HTTPAddress: h}}}) } b.ResetTimer() diff --git a/store.go b/store.go index 1c234610..faa2940e 100644 --- a/store.go +++ b/store.go @@ -47,16 +47,16 @@ type TokenBucketItem struct { // to maximize performance of gubernator. // Implementations MUST be threadsafe. type Store interface { - // Called by gubernator *after* a rate limit item is updated. It's up to the store to + // OnChange is called by gubernator *after* a rate limit item is updated. It's up to the store to // decide if this rate limit item should be persisted in the store. It's up to the // store to expire old rate limit items. The CacheItem represents the current state of - // the rate limit item *after* the RateLimitReq has been applied. - OnChange(ctx context.Context, r *RateLimitReq, item *CacheItem) + // the rate limit item *after* the RateLimitRequest has been applied. + OnChange(ctx context.Context, r *RateLimitRequest, item *CacheItem) // Called by gubernator when a rate limit is missing from the cache. It's up to the store // to decide if this request is fulfilled. Should return true if the request is fulfilled // and false if the request is not fulfilled or doesn't exist in the store. - Get(ctx context.Context, r *RateLimitReq) (*CacheItem, bool) + Get(ctx context.Context, r *RateLimitRequest) (*CacheItem, bool) // Called by gubernator when an existing rate limit should be removed from the store. // NOTE: This is NOT called when an rate limit expires from the cache, store implementors @@ -95,12 +95,12 @@ type MockStore struct { var _ Store = &MockStore{} -func (ms *MockStore) OnChange(ctx context.Context, r *RateLimitReq, item *CacheItem) { +func (ms *MockStore) OnChange(ctx context.Context, r *RateLimitRequest, item *CacheItem) { ms.Called["OnChange()"] += 1 ms.CacheItems[item.Key] = item } -func (ms *MockStore) Get(ctx context.Context, r *RateLimitReq) (*CacheItem, bool) { +func (ms *MockStore) Get(ctx context.Context, r *RateLimitRequest) (*CacheItem, bool) { ms.Called["Get()"] += 1 item, ok := ms.CacheItems[r.HashKey()] return item, ok diff --git a/store_test.go b/store_test.go index 6d3cbe06..8d9f537c 100644 --- a/store_test.go +++ b/store_test.go @@ -18,81 +18,42 @@ package gubernator_test import ( "context" - "fmt" - "net" "testing" - gubernator "github.com/mailgun/gubernator/v2" + "github.com/mailgun/gubernator/v3" "github.com/mailgun/holster/v4/clock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "google.golang.org/grpc" ) -type v1Server struct { - conf gubernator.Config - listener net.Listener - srv *gubernator.V1Instance -} - -func (s *v1Server) Close() error { - s.conf.GRPCServers[0].GracefulStop() - return s.srv.Close() -} - -// Start a single instance of V1Server with the provided config and listening address. -func newV1Server(t *testing.T, address string, conf gubernator.Config) *v1Server { - t.Helper() - conf.GRPCServers = append(conf.GRPCServers, grpc.NewServer()) - - srv, err := gubernator.NewV1Instance(conf) - require.NoError(t, err) - - listener, err := net.Listen("tcp", address) - require.NoError(t, err) - - go func() { - if err := conf.GRPCServers[0].Serve(listener); err != nil { - fmt.Printf("while serving: %s\n", err) - } - }() - - srv.SetPeers([]gubernator.PeerInfo{{GRPCAddress: listener.Addr().String(), IsOwner: true}}) - - ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) - - err = gubernator.WaitForConnect(ctx, []string{listener.Addr().String()}) - require.NoError(t, err) - cancel() - - return &v1Server{ - conf: conf, - listener: listener, - srv: srv, - } -} - func TestLoader(t *testing.T) { loader := gubernator.NewMockLoader() - srv := newV1Server(t, "localhost:0", gubernator.Config{ + d, err := gubernator.SpawnDaemon(context.Background(), gubernator.DaemonConfig{ + HTTPListenAddress: "localhost:0", Behaviors: gubernator.BehaviorConfig{ + // Suitable for testing but not production GlobalSyncWait: clock.Millisecond * 50, // Suitable for testing but not production GlobalTimeout: clock.Second, }, Loader: loader, }) + assert.NoError(t, err) + conf := d.Config() + d.SetPeers([]gubernator.PeerInfo{{HTTPAddress: conf.HTTPListenAddress, IsOwner: true}}) + // loader.Load() should have been called for gubernator startup assert.Equal(t, 1, loader.Called["Load()"]) assert.Equal(t, 0, loader.Called["Save()"]) - client, err := gubernator.DialV1Server(srv.listener.Addr().String(), nil) - assert.Nil(t, err) + client, err := gubernator.NewClient(gubernator.WithNoTLS(d.Listener.Addr().String())) + assert.NoError(t, err) - resp, err := client.GetRateLimits(context.Background(), &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{ + var resp gubernator.CheckRateLimitsResponse + err = client.CheckRateLimits(context.Background(), &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{ { Name: "test_over_limit", UniqueKey: "account:1234", @@ -102,14 +63,12 @@ func TestLoader(t *testing.T) { Hits: 1, }, }, - }) - require.Nil(t, err) - require.NotNil(t, resp) + }, &resp) + require.NoError(t, err) require.Equal(t, 1, len(resp.Responses)) require.Equal(t, "", resp.Responses[0].Error) - err = srv.Close() - require.NoError(t, err, "Error in srv.Close") + d.Close(context.Background()) // Loader.Save() should been called during gubernator shutdown assert.Equal(t, 1, loader.Called["Load()"]) @@ -126,31 +85,34 @@ func TestLoader(t *testing.T) { func TestStore(t *testing.T) { ctx := context.Background() - setup := func() (*MockStore2, *v1Server, gubernator.V1Client) { + setup := func() (*MockStore2, *gubernator.Daemon, gubernator.Client) { store := &MockStore2{} - srv := newV1Server(t, "localhost:0", gubernator.Config{ + d, err := gubernator.SpawnDaemon(context.Background(), gubernator.DaemonConfig{ + HTTPListenAddress: "localhost:0", Behaviors: gubernator.BehaviorConfig{ - GlobalSyncWait: clock.Millisecond * 50, // Suitable for testing but not production + GlobalSyncWait: clock.Millisecond * 50, GlobalTimeout: clock.Second, }, Store: store, }) + assert.NoError(t, err) + conf := d.Config() + d.SetPeers([]gubernator.PeerInfo{{HTTPAddress: conf.HTTPListenAddress, IsOwner: true}}) - client, err := gubernator.DialV1Server(srv.listener.Addr().String(), nil) + client, err := gubernator.NewClient(gubernator.WithNoTLS(d.Listener.Addr().String())) require.NoError(t, err) - return store, srv, client + return store, d, client } - tearDown := func(srv *v1Server) { - err := srv.Close() - require.NoError(t, err) + tearDown := func(d *gubernator.Daemon) { + d.Close(context.Background()) } // Create a mock argument matcher for a request by name/key. - matchReq := func(req *gubernator.RateLimitReq) interface{} { - return mock.MatchedBy(func(req2 *gubernator.RateLimitReq) bool { + matchReq := func(req *gubernator.RateLimitRequest) interface{} { + return mock.MatchedBy(func(req2 *gubernator.RateLimitRequest) bool { return req2.Name == req.Name && req2.UniqueKey == req.UniqueKey }) @@ -158,7 +120,7 @@ func TestStore(t *testing.T) { // Create a mock argument matcher for CacheItem input. // Verify item matches expected algorithm, limit, and duration. - matchItem := func(req *gubernator.RateLimitReq) interface{} { + matchItem := func(req *gubernator.RateLimitRequest) interface{} { switch req.Algorithm { case gubernator.Algorithm_TOKEN_BUCKET: return mock.MatchedBy(func(item *gubernator.CacheItem) bool { @@ -193,7 +155,7 @@ func TestStore(t *testing.T) { } // Create a bucket item matching the request. - createBucketItem := func(req *gubernator.RateLimitReq) interface{} { + createBucketItem := func(req *gubernator.RateLimitRequest) interface{} { switch req.Algorithm { case gubernator.Algorithm_TOKEN_BUCKET: return &gubernator.TokenBucketItem{ @@ -230,7 +192,7 @@ func TestStore(t *testing.T) { store, srv, client := setup() defer tearDown(srv) - req := &gubernator.RateLimitReq{ + req := &gubernator.RateLimitRequest{ Name: "test_over_limit", UniqueKey: "account:1234", Algorithm: testCase.Algorithm, @@ -244,12 +206,13 @@ func TestStore(t *testing.T) { store.On("OnChange", mock.Anything, matchReq(req), matchItem(req)).Once() // Call code. - resp, err := client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{req}, - }) + var resp gubernator.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.Responses, 1) + assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, req.Limit, resp.Responses[0].Limit) assert.Equal(t, gubernator.Status_UNDER_LIMIT, resp.Responses[0].Status) store.AssertExpectations(t) @@ -259,12 +222,13 @@ func TestStore(t *testing.T) { store.On("OnChange", mock.Anything, matchReq(req), matchItem(req)).Once() // Call code. - resp, err := client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{req}, - }) + var resp gubernator.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.Responses, 1) + assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, req.Limit, resp.Responses[0].Limit) assert.Equal(t, gubernator.Status_UNDER_LIMIT, resp.Responses[0].Status) store.AssertExpectations(t) @@ -275,7 +239,7 @@ func TestStore(t *testing.T) { store, srv, client := setup() defer tearDown(srv) - req := &gubernator.RateLimitReq{ + req := &gubernator.RateLimitRequest{ Name: "test_over_limit", UniqueKey: "account:1234", Algorithm: testCase.Algorithm, @@ -298,12 +262,13 @@ func TestStore(t *testing.T) { store.On("OnChange", mock.Anything, matchReq(req), matchItem(req)).Once() // Call code. - resp, err := client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{req}, - }) + var resp gubernator.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.Responses, 1) + assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, req.Limit, resp.Responses[0].Limit) assert.Equal(t, gubernator.Status_UNDER_LIMIT, resp.Responses[0].Status) store.AssertExpectations(t) @@ -314,7 +279,7 @@ func TestStore(t *testing.T) { store, srv, client := setup() defer tearDown(srv) - req := &gubernator.RateLimitReq{ + req := &gubernator.RateLimitRequest{ Name: "test_over_limit", UniqueKey: "account:1234", Algorithm: testCase.Algorithm, @@ -338,12 +303,13 @@ func TestStore(t *testing.T) { store.On("OnChange", mock.Anything, matchReq(req), matchItem(req)).Once() // Call code. - resp, err := client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{req}, - }) + var resp gubernator.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.Responses, 1) + assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, req.Limit, resp.Responses[0].Limit) assert.Equal(t, gubernator.Status_UNDER_LIMIT, resp.Responses[0].Status) store.AssertExpectations(t) @@ -360,7 +326,7 @@ func TestStore(t *testing.T) { oldDuration := int64(5000) newDuration := int64(8000) - req := &gubernator.RateLimitReq{ + req := &gubernator.RateLimitRequest{ Name: "test_over_limit", UniqueKey: "account:1234", Algorithm: testCase.Algorithm, @@ -427,12 +393,13 @@ func TestStore(t *testing.T) { Once() // Call code. - resp, err := client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{req}, - }) + var resp gubernator.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.Responses, 1) + assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, req.Limit, resp.Responses[0].Limit) assert.Equal(t, gubernator.Status_UNDER_LIMIT, resp.Responses[0].Status) store.AssertExpectations(t) @@ -447,7 +414,7 @@ func TestStore(t *testing.T) { oldDuration := int64(500000) newDuration := int64(8000) - req := &gubernator.RateLimitReq{ + req := &gubernator.RateLimitRequest{ Name: "test_over_limit", UniqueKey: "account:1234", Algorithm: testCase.Algorithm, @@ -517,12 +484,13 @@ func TestStore(t *testing.T) { Once() // Call code. - resp, err := client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{req}, - }) + var resp gubernator.CheckRateLimitsResponse + err := client.CheckRateLimits(ctx, &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{req}, + }, &resp) require.NoError(t, err) - require.NotNil(t, resp) assert.Len(t, resp.Responses, 1) + assert.Equal(t, "", resp.Responses[0].Error) assert.Equal(t, req.Limit, resp.Responses[0].Limit) assert.Equal(t, gubernator.Status_UNDER_LIMIT, resp.Responses[0].Status) store.AssertExpectations(t) diff --git a/tls.go b/tls.go index 5a050479..f6ee1f14 100644 --- a/tls.go +++ b/tls.go @@ -269,7 +269,6 @@ func SetupTLS(conf *TLSConfig) error { } // error if neither was provided - //nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated because cert does not come from SystemCertPool. if len(clientPool.Subjects()) == 0 { return errors.New("client auth enabled, but no CA's provided") } diff --git a/tls_test.go b/tls_test.go index 0b2cc3e5..26ce2640 100644 --- a/tls_test.go +++ b/tls_test.go @@ -25,7 +25,7 @@ import ( "strings" "testing" - gubernator "github.com/mailgun/gubernator/v2" + "github.com/mailgun/gubernator/v3" "github.com/mailgun/holster/v4/clock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -39,18 +39,19 @@ func spawnDaemon(t *testing.T, conf gubernator.DaemonConfig) *gubernator.Daemon d, err := gubernator.SpawnDaemon(ctx, conf) cancel() require.NoError(t, err) - d.SetPeers([]gubernator.PeerInfo{{GRPCAddress: conf.GRPCListenAddress, IsOwner: true}}) + d.SetPeers([]gubernator.PeerInfo{{HTTPAddress: conf.HTTPListenAddress, IsOwner: true}}) return d } func makeRequest(t *testing.T, conf gubernator.DaemonConfig) error { t.Helper() - client, err := gubernator.DialV1Server(conf.GRPCListenAddress, conf.TLS.ClientTLS) + client, err := gubernator.NewClient(gubernator.WithTLS(conf.ClientTLS(), conf.HTTPListenAddress)) require.NoError(t, err) - resp, err := client.GetRateLimits(context.Background(), &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{ + var resp gubernator.CheckRateLimitsResponse + err = client.CheckRateLimits(context.Background(), &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{ { Name: "test_tls", UniqueKey: "account:995", @@ -60,7 +61,7 @@ func makeRequest(t *testing.T, conf gubernator.DaemonConfig) error { Hits: 1, }, }, - }) + }, &resp) if err != nil { return err @@ -120,18 +121,18 @@ func TestSetupTLS(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { conf := gubernator.DaemonConfig{ - GRPCListenAddress: "127.0.0.1:9695", HTTPListenAddress: "127.0.0.1:9685", TLS: tt.tls, } d := spawnDaemon(t, conf) - client, err := gubernator.DialV1Server(conf.GRPCListenAddress, tt.tls.ClientTLS) + client, err := gubernator.NewClient(gubernator.WithTLS(conf.ClientTLS(), conf.HTTPListenAddress)) require.NoError(t, err) - resp, err := client.GetRateLimits(context.Background(), &gubernator.GetRateLimitsReq{ - Requests: []*gubernator.RateLimitReq{ + var resp gubernator.CheckRateLimitsResponse + err = client.CheckRateLimits(context.Background(), &gubernator.CheckRateLimitsRequest{ + Requests: []*gubernator.RateLimitRequest{ { Name: "test_tls", UniqueKey: "account:995", @@ -141,21 +142,20 @@ func TestSetupTLS(t *testing.T) { Hits: 1, }, }, - }) + }, &resp) require.NoError(t, err) rl := resp.Responses[0] assert.Equal(t, "", rl.Error) assert.Equal(t, gubernator.Status_UNDER_LIMIT, rl.Status) assert.Equal(t, int64(99), rl.Remaining) - d.Close() + d.Close(context.Background()) }) } } func TestSetupTLSSkipVerify(t *testing.T) { conf := gubernator.DaemonConfig{ - GRPCListenAddress: "127.0.0.1:9695", HTTPListenAddress: "127.0.0.1:9685", TLS: &gubernator.TLSConfig{ CaFile: "contrib/certs/ca.cert", @@ -165,7 +165,7 @@ func TestSetupTLSSkipVerify(t *testing.T) { } d := spawnDaemon(t, conf) - defer d.Close() + defer d.Close(context.Background()) tls := &gubernator.TLSConfig{ AutoTLS: true, @@ -190,13 +190,12 @@ func TestSetupTLSClientAuth(t *testing.T) { } conf := gubernator.DaemonConfig{ - GRPCListenAddress: "127.0.0.1:9695", HTTPListenAddress: "127.0.0.1:9685", TLS: &serverTLS, } d := spawnDaemon(t, conf) - defer d.Close() + defer d.Close(context.Background()) // Given generated client certs tls := &gubernator.TLSConfig{ @@ -211,7 +210,7 @@ func TestSetupTLSClientAuth(t *testing.T) { // Should not be allowed without a cert signed by the client CA err = makeRequest(t, conf) require.Error(t, err) - assert.Contains(t, err.Error(), "code = Unavailable desc") + assert.Contains(t, err.Error(), "tls: certificate required") // Given the client auth certs tls = &gubernator.TLSConfig{ @@ -238,27 +237,23 @@ func TestTLSClusterWithClientAuthentication(t *testing.T) { } d1 := spawnDaemon(t, gubernator.DaemonConfig{ - GRPCListenAddress: "127.0.0.1:9695", HTTPListenAddress: "127.0.0.1:9685", TLS: &serverTLS, }) - defer d1.Close() + defer d1.Close(context.Background()) d2 := spawnDaemon(t, gubernator.DaemonConfig{ - GRPCListenAddress: "127.0.0.1:9696", HTTPListenAddress: "127.0.0.1:9686", TLS: &serverTLS, }) - defer d2.Close() + defer d2.Close(context.Background()) peers := []gubernator.PeerInfo{ { - GRPCAddress: d1.GRPCListeners[0].Addr().String(), - HTTPAddress: d1.HTTPListener.Addr().String(), + HTTPAddress: d1.Listener.Addr().String(), }, { - GRPCAddress: d2.GRPCListeners[0].Addr().String(), - HTTPAddress: d2.HTTPListener.Addr().String(), + HTTPAddress: d2.Listener.Addr().String(), }, } d1.SetPeers(peers) @@ -281,13 +276,12 @@ func TestTLSClusterWithClientAuthentication(t *testing.T) { b, err := io.ReadAll(resp.Body) require.NoError(t, err) - // Should have called GetPeerRateLimits on d2 - assert.Contains(t, string(b), `{method="/pb.gubernator.PeersV1/GetPeerRateLimits"} 1`) + // Should have called /v1/peer.forward on d2 + assert.Contains(t, string(b), `{path="`+gubernator.RPCPeerForward+`"} 1`) } func TestHTTPSClientAuth(t *testing.T) { conf := gubernator.DaemonConfig{ - GRPCListenAddress: "127.0.0.1:9695", HTTPListenAddress: "127.0.0.1:9685", HTTPStatusListenAddress: "127.0.0.1:9686", TLS: &gubernator.TLSConfig{ @@ -299,7 +293,7 @@ func TestHTTPSClientAuth(t *testing.T) { } d := spawnDaemon(t, conf) - defer d.Close() + defer d.Close(context.Background()) clientWithCert := &http.Client{ Transport: &http.Transport{ @@ -315,9 +309,9 @@ func TestHTTPSClientAuth(t *testing.T) { }, } - reqCertRequired, err := http.NewRequest(http.MethodGet, fmt.Sprintf("https://%s/v1/HealthCheck", conf.HTTPListenAddress), nil) + reqCertRequired, err := http.NewRequest(http.MethodGet, fmt.Sprintf("https://%s/healthz", conf.HTTPListenAddress), nil) require.NoError(t, err) - reqNoClientCertRequired, err := http.NewRequest(http.MethodGet, fmt.Sprintf("https://%s/v1/HealthCheck", conf.HTTPStatusListenAddress), nil) + reqNoClientCertRequired, err := http.NewRequest(http.MethodGet, fmt.Sprintf("https://%s/healthz", conf.HTTPStatusListenAddress), nil) require.NoError(t, err) // Test that a client without a cert can access /v1/HealthCheck at status address @@ -326,18 +320,18 @@ func TestHTTPSClientAuth(t *testing.T) { defer resp.Body.Close() b, err := io.ReadAll(resp.Body) require.NoError(t, err) - assert.Equal(t, `{"status":"healthy","message":"","peer_count":1}`, strings.ReplaceAll(string(b), " ", "")) + assert.Equal(t, `{"status":"healthy","peerCount":1}`, strings.ReplaceAll(string(b), " ", "")) // Verify we get an error when we try to access existing HTTPListenAddress without cert - //nolint:bodyclose // Expect error, no body to close. - _, err = clientWithoutCert.Do(reqCertRequired) + _, err = clientWithoutCert.Do(reqCertRequired) //nolint:all require.Error(t, err) + assert.Contains(t, err.Error(), "remote error: tls: certificate required") - // Check that with a valid client cert we can access /v1/HealthCheck at existing HTTPListenAddress - resp2, err := clientWithCert.Do(reqCertRequired) + // Check that with a valid client cert we can access /v1/healthz at existing HTTPListenAddress + resp3, err := clientWithCert.Do(reqCertRequired) require.NoError(t, err) - defer resp2.Body.Close() - b, err = io.ReadAll(resp2.Body) + defer resp3.Body.Close() + b, err = io.ReadAll(resp3.Body) require.NoError(t, err) - assert.Equal(t, `{"status":"healthy","message":"","peer_count":1}`, strings.ReplaceAll(string(b), " ", "")) + assert.Equal(t, `{"status":"healthy","peerCount":1}`, strings.ReplaceAll(string(b), " ", "")) } diff --git a/workers.go b/workers.go index 07ba177f..e42a307f 100644 --- a/workers.go +++ b/workers.go @@ -44,13 +44,14 @@ import ( "sync/atomic" "github.com/OneOfOne/xxhash" - "github.com/mailgun/holster/v4/errors" + "github.com/mailgun/errors" "github.com/mailgun/holster/v4/setter" "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/trace" ) +// TODO: Make this Private type WorkerPool struct { hasher workerHasher workers []*Worker @@ -60,6 +61,7 @@ type WorkerPool struct { done chan struct{} } +// TODO: Make this Private type Worker struct { name string conf *Config @@ -258,7 +260,7 @@ func (p *WorkerPool) dispatch(worker *Worker) { } // GetRateLimit sends a GetRateLimit request to worker pool. -func (p *WorkerPool) GetRateLimit(ctx context.Context, rlRequest *RateLimitReq) (retval *RateLimitResp, reterr error) { +func (p *WorkerPool) GetRateLimit(ctx context.Context, rlRequest *RateLimitRequest) (retval *RateLimitResponse, reterr error) { // Delegate request to assigned channel based on request key. worker := p.getWorker(rlRequest.HashKey()) queueGauge := metricWorkerQueue.WithLabelValues("GetRateLimit", worker.name) @@ -289,9 +291,9 @@ func (p *WorkerPool) GetRateLimit(ctx context.Context, rlRequest *RateLimitReq) } // Handle request received by worker. -func (worker *Worker) handleGetRateLimit(ctx context.Context, req *RateLimitReq, cache Cache) (*RateLimitResp, error) { +func (worker *Worker) handleGetRateLimit(ctx context.Context, req *RateLimitRequest, cache Cache) (*RateLimitResponse, error) { defer prometheus.NewTimer(metricFuncTimeDuration.WithLabelValues("Worker.handleGetRateLimit")).ObserveDuration() - var rlResponse *RateLimitResp + var rlResponse *RateLimitResponse var err error switch req.Algorithm { diff --git a/workers_test.go b/workers_test.go index 5a14c941..dd4b855e 100644 --- a/workers_test.go +++ b/workers_test.go @@ -22,7 +22,7 @@ import ( "sort" "testing" - guber "github.com/mailgun/gubernator/v2" + guber "github.com/mailgun/gubernator/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require"