diff --git a/.github/workflows/goreleaser.yml b/.github/workflows/goreleaser.yml new file mode 100644 index 0000000..79dbdb0 --- /dev/null +++ b/.github/workflows/goreleaser.yml @@ -0,0 +1,38 @@ +name: goreleaser + +on: + push: + tags: + - '*' + +permissions: + contents: write + +jobs: + goreleaser: + runs-on: ubuntu-latest + env: + DOCKER_CLI_EXPERIMENTAL: "enabled" + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Docker Login + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.23.0 + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v2 + with: + version: latest + args: release --clean + env: + GITHUB_TOKEN: ${{ secrets.GH_PAT }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..fdeb986 --- /dev/null +++ b/.gitignore @@ -0,0 +1,25 @@ +### Go template +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + + +dist/ diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 0000000..5e1becc --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,69 @@ +# This is an example .goreleaser.yml file with some sensible defaults. +# Make sure to check the documentation at https://goreleaser.com + +# The lines below are called `modelines`. See `:help modeline` +# Feel free to remove those if you don't want/need to use them. +# yaml-language-server: $schema=https://goreleaser.com/static/schema.json +# vim: set ts=2 sw=2 tw=0 fo=cnqoj + +version: 2 + +before: + hooks: + # You may remove this if you don't use go modules. + - go mod tidy + # you may remove this if you don't need go generate + - go generate ./... + - go test ./... + +builds: + - env: + - CGO_ENABLED=0 + goarch: + - amd64 + - arm64 + goos: + - linux + - windows + - darwin + +archives: + - format: tar.gz + # this name template makes the OS and Arch compatible with the results of `uname`. + name_template: >- + {{ .ProjectName }}_ + {{- title .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} + {{- if .Arm }}v{{ .Arm }}{{ end }} + # use zip for windows archives + format_overrides: + - goos: windows + format: zip + +changelog: + sort: asc + filters: + exclude: + - "^docs:" + - "^test:" + +dockers: + - image_templates: + - "paragor/simple_cdn:{{ .Tag }}-amd64" + goarch: amd64 + use: buildx + build_flag_templates: + - "--builder=default" + - image_templates: + - "paragor/simple_cdn:{{ .Tag }}-arm64" + goarch: arm64 + use: buildx + build_flag_templates: + - "--builder=default" +docker_manifests: + - name_template: "paragor/simple_cdn:{{ .Tag }}" + image_templates: + - "paragor/simple_cdn:{{ .Tag }}-amd64" + - "paragor/simple_cdn:{{ .Tag }}-arm64" diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..1881cfb --- /dev/null +++ b/Dockerfile @@ -0,0 +1,6 @@ +FROM alpine:3.20.2 + +WORKDIR /app + +COPY simple_cdn /user/bin/ +ENTRYPOINT ["/usr/bin/simple_cdn"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..b82a19e --- /dev/null +++ b/Makefile @@ -0,0 +1,2 @@ +build_local: + goreleaser release --snapshot --clean diff --git a/examples/all_params.yaml b/examples/all_params.yaml new file mode 100644 index 0000000..60bd9ae --- /dev/null +++ b/examples/all_params.yaml @@ -0,0 +1,59 @@ +listen_addr: :8080 +diagnostic_addr: :7070 +can_persist_cache: + and: + - not: + any: + - cookie: + exists: value + - user_agent: + pattern: regexp_value + - header: + exists: value + - header: + pattern: + name: value + pattern: regexp_value + - query: + count: + gte: 1 + lte: 10 + - path: + pattern: regexp_value + - always: true + - never: true + +can_load_cache: + always: true + +can_force_emit_debug_logging: + never: true + +cache: + type: redis + redis: + addr: 127.0.0.1:6379 + get_timeout: 3s + set_timeout: 3s + connection_timeout: 100ms + +cache_key_config: + headers: [] + cookies: [] + query: [] + not_headers: [] + all_cookies: true + all_query: true + all_headers: true + +upstream: + host: "www.google.com" + scheme: "https" + transport_pool_config: + size: 5 + max_idle_conns_per_host: 2 + idle_conn_timeout: 15s + keep_alive_timeout: 15s + conn_timeout: 5s + max_life_time: 10s + diff --git a/examples/embed.go b/examples/embed.go new file mode 100644 index 0000000..f43e589 --- /dev/null +++ b/examples/embed.go @@ -0,0 +1,6 @@ +package examples + +import "embed" + +//go:embed *.yaml +var ExampleConfigs embed.FS diff --git a/examples/yandex.yaml b/examples/yandex.yaml new file mode 100644 index 0000000..a29065d --- /dev/null +++ b/examples/yandex.yaml @@ -0,0 +1,43 @@ +listen_addr: :8080 +diagnostic_addr: :7070 +can_persist_cache: + and: + - not: + header: + exists: authorization + - not: + cookie: + exists: token + +can_load_cache: + user_agent: + pattern: ".*http.?://yandex.com/bots.*" + +can_force_emit_debug_logging: + header: + exists: x-with-debug-log + +cache: + type: redis + redis: + addr: 127.0.0.1:6379 + get_timeout: 3s + set_timeout: 3s + connection_timeout: 100ms + +cache_key_config: + cookies: [] + all_query: true + headers: ["host"] + +upstream: + host: "www.google.com" + scheme: "https" + transport_pool_config: + size: 5 + max_idle_conns_per_host: 2 + idle_conn_timeout: 15s + keep_alive_timeout: 15s + conn_timeout: 5s + max_life_time: 10s + diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..c5c4523 --- /dev/null +++ b/go.mod @@ -0,0 +1,26 @@ +module github.com/paragor/simple_cdn + +go 1.23.0 + +require ( + github.com/felixge/httpsnoop v1.0.4 + github.com/google/uuid v1.6.0 + github.com/klauspost/compress v1.17.9 + github.com/prometheus/client_golang v1.20.2 + github.com/redis/go-redis/v9 v9.6.1 + go.uber.org/zap v1.27.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + go.uber.org/multierr v1.10.0 // indirect + golang.org/x/sys v0.22.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..6a2f025 --- /dev/null +++ b/go.sum @@ -0,0 +1,59 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/main.go b/main.go new file mode 100644 index 0000000..7da12b1 --- /dev/null +++ b/main.go @@ -0,0 +1,203 @@ +package main + +import ( + "bytes" + "context" + "errors" + "flag" + "fmt" + "github.com/paragor/simple_cdn/pkg/cache" + "github.com/paragor/simple_cdn/pkg/cachebehavior" + "github.com/paragor/simple_cdn/pkg/logger" + "github.com/paragor/simple_cdn/pkg/metrics" + "github.com/paragor/simple_cdn/pkg/upstream" + "github.com/paragor/simple_cdn/pkg/user" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/yaml.v3" + "net/http" + "net/http/pprof" + "os" + "strings" + "time" +) + +func getLogLevel() zapcore.Level { + switch strings.ToLower(os.Getenv("LOG_LEVEL")) { + case "info", "": + return zapcore.InfoLevel + case "debug": + return zapcore.DebugLevel + case "error": + return zapcore.ErrorLevel + case "warn": + return zapcore.WarnLevel + default: + panic("unknown logger level") + } +} + +var app = "simple_cdn" + +func main() { + logLevel := getLogLevel() + logger.Init(app, logLevel) + metrics.Init(app) + log := logger.Logger() + configPath := flag.String("config", "", "config path in yaml format") + checkConfig := flag.Bool("check-config", false, "only check validation and exit") + flag.Parse() + data, err := os.ReadFile(*configPath) + if err != nil { + panic(err) + } + + config, err := ParseConfig(data) + if err != nil { + log.With(zap.Error(err)).Fatal("cant parse config") + } + log.With(zap.String("description", config.CanPersistCache.ToUser().String())).Info("can persist cache config") + log.With(zap.String("description", config.CanLoadCache.ToUser().String())).Info("can load cache config") + log.With(zap.String("description", config.CanForceEmitDebugLogging.ToUser().String())).Info("can force emit debug logging") + if *checkConfig { + log.Info("check-config is set, config is valid") + os.Exit(0) + } + + cacheDb := config.Cache.Cache() + handler := cachebehavior.NewCacheBehavior( + config.CanPersistCache.ToUser(), + config.CanLoadCache.ToUser(), + &config.CacheKeyConfig, + config.Upstream.CreateUpstream(), + cacheDb, + ) + handler = logger.HttpRecoveryMiddleware(handler) + handler = logger.HttpLoggingMiddleware(handler) + handler = logger.HttpSetLoggerMiddleware(config.CanForceEmitDebugLogging.ToUser(), handler) + + mainServer := http.Server{ + Addr: config.ListenAddr, + Handler: handler, + } + diagnosticServer := http.Server{ + Addr: config.DiagnosticAddr, + Handler: GetDiagnosticServerHandler(cacheDb), + } + + diagnosticServer.RegisterOnShutdown(func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) + defer cancel() + _ = mainServer.Shutdown(ctx) + }) + mainServer.RegisterOnShutdown(func() { + _ = diagnosticServer.Close() + }) + + go func() { + time.Sleep(time.Second * 5) + log.Debug("starting diagnostic server") + if err = diagnosticServer.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + log.With(zap.Error(err)).Error("on listen diagnostic server") + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) + defer cancel() + _ = mainServer.Shutdown(ctx) + }() + log.Debug("starting main server") + if err = mainServer.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + log.With(zap.Error(err)).Fatal("on listen main server") + } + log.Info("good bye") +} + +type Config struct { + ListenAddr string `yaml:"listen_addr"` + DiagnosticAddr string `yaml:"diagnostic_addr"` + CanPersistCache user.Config `yaml:"can_persist_cache"` + CanLoadCache user.Config `yaml:"can_load_cache"` + CanForceEmitDebugLogging user.Config `yaml:"can_force_emit_debug_logging"` + CacheKeyConfig cache.KeyConfig `yaml:"cache_key_config"` + Upstream upstream.Config `yaml:"upstream"` + Cache cache.Config `yaml:"cache"` +} + +func (c *Config) Validate() error { + if c.ListenAddr == "" { + c.ListenAddr = ":8080" + } + if c.DiagnosticAddr == "" { + c.DiagnosticAddr = ":7070" + } + if err := c.CanForceEmitDebugLogging.Validate(); err != nil { + return fmt.Errorf("when_collect_debug_logging invalid: %w", err) + } + if err := c.CanPersistCache.Validate(); err != nil { + return fmt.Errorf("can_persist_cache invalid: %w", err) + } + if err := c.CanLoadCache.Validate(); err != nil { + return fmt.Errorf("can_load_cache invalid: %w", err) + } + if err := c.CacheKeyConfig.Validate(); err != nil { + return fmt.Errorf("cache_key_config invalid: %w", err) + } + if err := c.Upstream.Validate(); err != nil { + return fmt.Errorf("upstream invalid: %w", err) + } + if err := c.Cache.Validate(); err != nil { + return fmt.Errorf("cache invalid: %w", err) + } + return nil +} + +func ParseConfig(data []byte) (*Config, error) { + config := &Config{} + decoder := yaml.NewDecoder(bytes.NewReader(data)) + decoder.KnownFields(true) + if err := decoder.Decode(config); err != nil { + return nil, fmt.Errorf("error on unmarshal: %w", err) + } + if err := config.Validate(); err != nil { + return nil, fmt.Errorf("error on validation: %w", err) + } + return config, nil +} + +func GetDiagnosticServerHandler(cacheDb cache.Cache) http.Handler { + mux := http.NewServeMux() + mux.HandleFunc("/readyz", func(writer http.ResponseWriter, request *http.Request) { + writer.WriteHeader(200) + _, _ = writer.Write([]byte("ok")) + }) + mux.HandleFunc("/healthz", func(writer http.ResponseWriter, request *http.Request) { + writer.WriteHeader(200) + _, _ = writer.Write([]byte("ok")) + }) + mux.HandleFunc("/invalidate", func(writer http.ResponseWriter, request *http.Request) { + ctx := request.Context() + if ctx == nil { + var cancel func() + ctx, cancel = context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + } + keyPattern := request.URL.Query().Get("pattern") + if keyPattern == "" { + http.Error(writer, "query 'pattern' is empty", 400) + return + } + if err := cacheDb.Invalidate(ctx, keyPattern); err != nil { + http.Error(writer, "cant invalidate cache:"+err.Error(), 500) + } + + writer.WriteHeader(200) + _, _ = writer.Write([]byte("ok")) + }) + mux.Handle("/metrics", promhttp.Handler()) + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + return mux +} diff --git a/main_test.go b/main_test.go new file mode 100644 index 0000000..48cd4e7 --- /dev/null +++ b/main_test.go @@ -0,0 +1,26 @@ +package main + +import ( + "github.com/paragor/simple_cdn/examples" + "testing" +) + +func TestParseConfig(t *testing.T) { + files, err := examples.ExampleConfigs.ReadDir(".") + if err != nil { + panic(err) + } + for _, file := range files { + t.Run(file.Name(), func(t *testing.T) { + content, err := examples.ExampleConfigs.ReadFile(file.Name()) + if err != nil { + panic(err) + } + _, err = ParseConfig(content) + if err != nil { + t.Errorf("ParseConfig() error = %v", err) + return + } + }) + } +} diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go new file mode 100644 index 0000000..1f5f075 --- /dev/null +++ b/pkg/cache/cache.go @@ -0,0 +1,48 @@ +package cache + +import ( + "context" + "fmt" + "sync" +) + +type Config struct { + Type string `yaml:"type"` + Redis RedisConfig `yaml:"redis"` +} + +func (c *Config) Validate() error { + if c.Type != "redis" { + return fmt.Errorf("type should be redis") + } + if err := c.Redis.Validate(); err != nil { + return fmt.Errorf("redis is invalid: %w", err) + } + return nil +} + +func (c *Config) Cache() Cache { + if c.Type == "redis" { + return c.Redis.Cache() + } + panic("only redis is supported") +} + +type Cache interface { + Get(ctx context.Context, key string) *Item + Set(ctx context.Context, key string, value *Item) + Invalidate(ctx context.Context, keyPattern string) error +} + +var bufferPool = sync.Pool{ + New: func() any { + return make([]byte, 8*1024) + }, +} + +func getBytesBuffer() ([]byte, func()) { + responseBytesBuffer := bufferPool.Get().([]byte)[:0] + return responseBytesBuffer, func() { + bufferPool.Put(responseBytesBuffer[:0]) + } +} diff --git a/pkg/cache/cache_control.go b/pkg/cache/cache_control.go new file mode 100644 index 0000000..f022c71 --- /dev/null +++ b/pkg/cache/cache_control.go @@ -0,0 +1,79 @@ +package cache + +import ( + "strconv" + "strings" + "time" + "unicode" +) + +type CacheControl struct { + Public bool + MaxAge time.Duration + SMaxAge time.Duration + StaleWhileInvalidation time.Duration + StaleIfError time.Duration +} + +func (cc *CacheControl) ttl() time.Duration { + return max(cc.SMaxAge, cc.StaleIfError, cc.StaleWhileInvalidation) +} + +func (cc *CacheControl) ShouldCDNPersist() bool { + return cc.Public && (cc.SMaxAge > 0 || cc.StaleWhileInvalidation > 0 || cc.StaleIfError > 0) +} + +func ParseCacheControlHeader(str string) CacheControl { + result := CacheControl{} + for _, token := range strings.Split(strings.TrimSpace(strings.ToLower(str)), " ") { + token = strings.TrimFunc(token, func(r rune) bool { + return unicode.IsSpace(r) || r == ',' + }) + if token == "" { + continue + } + if token == "public" { + result.Public = true + continue + } + if strings.HasPrefix(token, "max-age=") { + value := strings.TrimPrefix(token, "max-age=") + duration, err := strconv.Atoi(value) + if err != nil { + continue + } + result.MaxAge = time.Second * time.Duration(duration) + continue + } + if strings.HasPrefix(token, "s-maxage=") { + value := strings.TrimPrefix(token, "s-maxage=") + duration, err := strconv.Atoi(value) + if err != nil { + continue + } + result.SMaxAge = time.Second * time.Duration(duration) + continue + } + if strings.HasPrefix(token, "stale-while-revalidate=") { + value := strings.TrimPrefix(token, "stale-while-revalidate=") + duration, err := strconv.Atoi(value) + if err != nil { + continue + } + result.StaleWhileInvalidation = time.Second * time.Duration(duration) + continue + } + if strings.HasPrefix(token, "stale-if-error=") { + value := strings.TrimPrefix(token, "stale-if-error=") + duration, err := strconv.Atoi(value) + if err != nil { + continue + } + result.StaleIfError = time.Second * time.Duration(duration) + continue + } + + } + + return result +} diff --git a/pkg/cache/cache_control_test.go b/pkg/cache/cache_control_test.go new file mode 100644 index 0000000..03a0e99 --- /dev/null +++ b/pkg/cache/cache_control_test.go @@ -0,0 +1,52 @@ +package cache + +import ( + "reflect" + "testing" + "time" +) + +func TestParseCacheHeader(t *testing.T) { + type args struct { + str string + } + tests := []struct { + name string + args args + want CacheControl + }{ + { + name: "public, max-age=100, s-maxage=200, stale-while-revalidate=300, stale-if-error=400", + args: args{ + str: "public, max-age=100, s-maxage=200, stale-while-revalidate=300, stale-if-error=400", + }, + want: CacheControl{ + Public: true, + MaxAge: 100 * time.Second, + SMaxAge: 200 * time.Second, + StaleWhileInvalidation: 300 * time.Second, + StaleIfError: 400 * time.Second, + }, + }, + { + name: "nothing", + args: args{ + str: "nothing", + }, + want: CacheControl{ + Public: false, + MaxAge: 0, + SMaxAge: 0, + StaleWhileInvalidation: 0, + StaleIfError: 0, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ParseCacheControlHeader(tt.args.str); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ParseCacheControlHeader() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/cache/cache_item.go b/pkg/cache/cache_item.go new file mode 100644 index 0000000..fcfe46a --- /dev/null +++ b/pkg/cache/cache_item.go @@ -0,0 +1,67 @@ +package cache + +import ( + "net/http" + "strings" + "time" +) + +type Item struct { + SavedAt time.Time + CacheHeader CacheControl + Headers map[string][]string + Body []byte +} + +func (item *Item) CanUseCache(now time.Time) bool { + return item.CacheHeader.Public && now.Sub(item.SavedAt) < item.CacheHeader.SMaxAge +} + +func (item *Item) CanStaleIfError(now time.Time) bool { + return item.CacheHeader.Public && now.Sub(item.SavedAt) < item.CacheHeader.StaleIfError +} + +func (item *Item) CanStaleWhileRevalidation(now time.Time) bool { + return item.CacheHeader.Public && now.Sub(item.SavedAt) < item.CacheHeader.StaleWhileInvalidation +} + +func ShouldPersist(response *http.Response) bool { + cacheControlHeader := response.Header.Get("Cache-Control") + if len(cacheControlHeader) == 0 { + return false + } + cacheControl := ParseCacheControlHeader(cacheControlHeader) + return cacheControl.ShouldCDNPersist() +} + +func ItemFromResponse(response *http.Response, body []byte) *Item { + cacheControlHeader := response.Header.Get("Cache-Control") + if len(cacheControlHeader) == 0 { + return nil + } + cacheControl := ParseCacheControlHeader(cacheControlHeader) + if !cacheControl.ShouldCDNPersist() { + return nil + } + return &Item{ + SavedAt: time.Now(), + Headers: response.Header.Clone(), + Body: body, + CacheHeader: cacheControl, + } +} + +func (item *Item) Write(w http.ResponseWriter) error { + for k, values := range item.Headers { + lowerHeader := strings.ToLower(k) + if lowerHeader == "x-cache-status" || lowerHeader == "set-cookie" { + continue + } + for _, v := range values { + w.Header().Add(k, v) + } + } + w.WriteHeader(200) + _, err := w.Write(item.Body) + return err +} diff --git a/pkg/cache/key_config.go b/pkg/cache/key_config.go new file mode 100644 index 0000000..cfe370a --- /dev/null +++ b/pkg/cache/key_config.go @@ -0,0 +1,215 @@ +package cache + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "hash" + "net/http" + "sort" + "strings" + "sync" + "sync/atomic" +) + +var keySpecDelimiter = "|" + +type KeyConfig struct { + Headers []string `yaml:"headers,omitempty"` + Cookies []string `yaml:"cookies,omitempty"` + Query []string `yaml:"query,omitempty"` + + NotHeaders []string `yaml:"not_headers,omitempty"` + + AllCookies bool `yaml:"all_cookies"` + AllQuery bool `yaml:"all_query"` + AllHeaders bool `yaml:"all_headers"` + + notHeadersMap map[string]struct{} + headersMap map[string]struct{} + cookiesMap map[string]struct{} + queryMap map[string]struct{} + m sync.Mutex + compiled atomic.Bool +} + +func (kc *KeyConfig) Validate() error { + if len(kc.Headers) > 0 && kc.AllHeaders { + return fmt.Errorf("only on of two field must be specified: headers, all_headers") + } + if len(kc.Cookies) > 0 && kc.AllCookies { + return fmt.Errorf("only on of two field must be specified: cookies, all_cookies") + } + if len(kc.Query) > 0 && kc.AllQuery { + return fmt.Errorf("only on of two field must be specified: query, all_query") + } + return nil +} + +func (kc *KeyConfig) compile() { + if kc.compiled.Load() { + return + } + kc.m.Lock() + defer kc.m.Unlock() + if kc.compiled.Load() { + return + } + + kc.headersMap = make(map[string]struct{}) + kc.cookiesMap = make(map[string]struct{}) + kc.queryMap = make(map[string]struct{}) + kc.notHeadersMap = make(map[string]struct{}) + + for _, k := range kc.NotHeaders { + kc.notHeadersMap[strings.ToLower(k)] = struct{}{} + } + if !kc.AllHeaders { + for _, k := range kc.Headers { + kc.headersMap[strings.ToLower(k)] = struct{}{} + } + } + if !kc.AllCookies { + for _, k := range kc.Cookies { + kc.cookiesMap[k] = struct{}{} + } + } + if !kc.AllQuery { + for _, k := range kc.Query { + kc.queryMap[k] = struct{}{} + } + } + kc.compiled.Store(true) +} + +func sortedKeys[T any](hashtable map[string]T) []string { + keys := make([]string, 0, len(hashtable)) + for k := range hashtable { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (kc *KeyConfig) generateRawKeyForHash(r *http.Request) string { + kc.compile() + key := &strings.Builder{} + key.Grow(512) + key.WriteString("headers" + keySpecDelimiter) + if len(kc.Headers) > 0 || kc.AllHeaders { + headersMap := make(map[string]string, len(r.Header)) + for k := range r.Header { + _, inAllowList := kc.headersMap[strings.ToLower(k)] + _, inBlackList := kc.notHeadersMap[strings.ToLower(k)] + if !inBlackList && !inAllowList && !(kc.AllHeaders && !isBlacklistHeader(k)) { + continue + } + headersMap[k] = strings.Join(r.Header[k], keySpecDelimiter) // not sortable :( + } + kc.addMapToKey(key, headersMap) + } + key.WriteString(keySpecDelimiter + "query" + keySpecDelimiter) + if len(kc.queryMap) > 0 || kc.AllQuery { + query := r.URL.Query() + queryMap := make(map[string]string, len(query)) + for k := range query { + if _, exists := kc.queryMap[k]; kc.AllQuery || exists { + queryMap[k] = strings.Join(query[k], keySpecDelimiter) // not sortable :( + } + } + kc.addMapToKey(key, queryMap) + } + key.WriteString(keySpecDelimiter + "cookies" + keySpecDelimiter) + if len(kc.cookiesMap) > 0 || kc.AllCookies { + cookies := r.Cookies() + cookiesMap := make(map[string]string, len(cookies)) + for _, cookie := range cookies { + if err := cookie.Valid(); err != nil { + continue + } + if _, exists := kc.cookiesMap[cookie.Name]; kc.AllCookies || exists { + cookiesMap[cookie.Name] = cookie.Value + } + } + kc.addMapToKey(key, cookiesMap) + } + return key.String() +} + +func (kc *KeyConfig) addMapToKey(key *strings.Builder, addMap map[string]string) { + lenMap := len(addMap) + for i, k := range sortedKeys(addMap) { + key.WriteString(k + "=" + addMap[k]) + if i != lenMap-1 { + key.WriteString(keySpecDelimiter) + } + } +} + +func (kc *KeyConfig) Apply(r *http.Request) string { + return r.URL.Path + "|" + getMD5Hash(kc.generateRawKeyForHash(r)) +} + +var notCachableHttpHeadersSource = map[string][]string{ + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers + "caching": {"age", "cache-control", "clear-site-data", "expires", "no-vary-search"}, + "conditionals": { + "last-modified", "etag", "if-match", "if-none-match", + "if-modified-since", "if-unmodified-since", "vary", + }, + "connection management": {"connection", "keep-alive"}, + "content negotiation": {"accept-encoding"}, + "controls": {"max-forwards"}, + "proxies": {"forwarded", "via"}, + "other": {"upgrade"}, + + //https://en.wikipedia.org/wiki/list_of_http_header_fields + "common non-standard request fields": { + "x-requested-with", "x-forwarded-for", "x-forwarded-host", + "x-forwarded-proto", "proxy-connection", "x-csrf-token", + "x-request-id", "x-correlation-id", "correlation-id", + "x-forwarded-port", "x-forwarded-proto", "x-forwarded-scheme", + "save-data", "x-real-ip", "sec-ch-ua", "sec-ch-ua-platform", + "dnt", "upgrade-insecure-requests", "sec-fetch-site", + "sec-fetch-mode", "sec-fetch-user", "sec-fetch-dest", + "accept-language", "priority", + }, + "my": {"cookie"}, +} + +var notCachableHttpHeaders = map[string]struct{}{} + +func init() { + headersList := []string{} + for _, headers := range notCachableHttpHeadersSource { + for _, header := range headers { + headersList = append(headersList, header) + } + } + sort.Strings(headersList) + notCachableHttpHeaders = make(map[string]struct{}, len(headersList)) + for _, header := range headersList { + notCachableHttpHeaders[header] = struct{}{} + } +} + +func isBlacklistHeader(key string) bool { + _, isBlacklisted := notCachableHttpHeaders[strings.ToLower(key)] + return isBlacklisted +} + +var md5pool = sync.Pool{New: func() any { return md5.New() }} + +func getMD5Hash(text string) string { + hasher := md5pool.Get().(hash.Hash) + if hasher == nil { + hasher = md5.New() + } + defer func() { + hasher.Reset() + md5pool.Put(hasher) + }() + hasher.Reset() + hasher.Write([]byte(text)) + return hex.EncodeToString(hasher.Sum(nil)) +} diff --git a/pkg/cache/key_config_test.go b/pkg/cache/key_config_test.go new file mode 100644 index 0000000..2c69522 --- /dev/null +++ b/pkg/cache/key_config_test.go @@ -0,0 +1,138 @@ +package cache + +import ( + "net/http" + "testing" + "time" +) + +func TestKeyConfig_generateRawKeyForHash(t *testing.T) { + type fields struct { + Headers []string + Cookies []string + Query []string + NotHeaders []string + AllCookies bool + AllQuery bool + AllHeaders bool + } + type args struct { + r *http.Request + } + tests := []struct { + name string + fields fields + args args + want string + }{ + { + name: "all true", + fields: fields{ + Headers: nil, + Cookies: nil, + Query: nil, + NotHeaders: nil, + AllCookies: true, + AllQuery: true, + AllHeaders: true, + }, + args: args{ + r: createRequest( + "a=1&b=2&d&c=4", + http.Header{ + "h1": []string{"hv1"}, + "h2": []string{"hv2"}, + }, + []http.Cookie{ + { + Name: "c1", + Value: "cv1", + Expires: time.Now().Add(time.Hour), + }, + { + Name: "c3", + Value: "cv3", + Expires: time.Now().Add(time.Hour), + }, + { + Name: "c1", + Value: "cv2", + Expires: time.Now().Add(time.Hour), + }, + { + Name: "Without time", + Value: "is invalid", + Expires: time.Time{}, + }, + }, + ), + }, + want: "headers|H1=hv1|H2=hv2|query|a=1|b=2|c=4|d=|cookies|c1=cv2|c3=cv3", + }, + { + name: "example yandex.yaml", + fields: fields{ + Headers: []string{"host"}, + Cookies: nil, + Query: nil, + NotHeaders: nil, + AllCookies: false, + AllQuery: true, + AllHeaders: false, + }, + args: args{ + r: createRequest( + "1_three&0_one=two", + http.Header{ + "host": []string{"www.google.com"}, + "another": []string{"one"}, + }, + []http.Cookie{ + { + Name: "c1", + Value: "cv2", + }, + { + Name: "c1", + Value: "cv2", + }, + }, + ), + }, + want: "headers|Host=www.google.com|query|0_one=two|1_three=|cookies|", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + kc := &KeyConfig{ + Headers: tt.fields.Headers, + Cookies: tt.fields.Cookies, + Query: tt.fields.Query, + NotHeaders: tt.fields.NotHeaders, + AllCookies: tt.fields.AllCookies, + AllQuery: tt.fields.AllQuery, + AllHeaders: tt.fields.AllHeaders, + } + if got := kc.generateRawKeyForHash(tt.args.r); got != tt.want { + t.Errorf("generateRawKeyForHash() = %v, want %v", got, tt.want) + } + }) + } +} + +func createRequest(query string, header http.Header, cookies []http.Cookie) *http.Request { + request, err := http.NewRequest("GET", "http://127.0.0.1/?"+query, nil) + if err != nil { + panic(err) + } + for k, values := range header { + for _, v := range values { + request.Header.Add(k, v) + } + + } + for _, c := range cookies { + request.AddCookie(&c) + } + return request +} diff --git a/pkg/cache/redis.go b/pkg/cache/redis.go new file mode 100644 index 0000000..8455f1a --- /dev/null +++ b/pkg/cache/redis.go @@ -0,0 +1,173 @@ +package cache + +import ( + "context" + "encoding/json" + "fmt" + "github.com/klauspost/compress/zstd" + "github.com/paragor/simple_cdn/pkg/logger" + "github.com/paragor/simple_cdn/pkg/metrics" + "github.com/redis/go-redis/v9" + "go.uber.org/zap" + "time" +) + +type RedisConfig struct { + Addr string `yaml:"addr"` + Username string `yaml:"username"` + Password string `yaml:"password"` + DB int `yaml:"db"` + GetTimeout time.Duration `yaml:"get_timeout"` + SetTimeout time.Duration `yaml:"set_timeout"` + ConnectionTimeout time.Duration `yaml:"connection_timeout"` +} + +func (c *RedisConfig) Validate() error { + if c.Addr == "" { + return fmt.Errorf("addr shoud not be empty") + } + if c.DB < 0 { + return fmt.Errorf("db shoud not < 0") + } + if c.GetTimeout <= 0 { + return fmt.Errorf("get_timeout shoud not <= 0") + } + if c.SetTimeout <= 0 { + return fmt.Errorf("get_timeout shoud not <= 0") + } + return nil +} + +func (c *RedisConfig) Cache() Cache { + return newRedisCache( + redis.NewClient(&redis.Options{ + Addr: c.Addr, + Username: c.Username, + Password: c.Password, + DB: c.DB, + WriteTimeout: c.SetTimeout, + ReadTimeout: c.GetTimeout, + PoolTimeout: c.ConnectionTimeout, + ContextTimeoutEnabled: true, + DialTimeout: c.ConnectionTimeout, + MinIdleConns: 5, + }), + c.SetTimeout, + c.GetTimeout, + ) +} + +type redisCache struct { + getTimeout time.Duration + setTimeout time.Duration + client *redis.Client +} + +func newRedisCache( + client *redis.Client, + setTimeout time.Duration, + getTimeout time.Duration, +) Cache { + return &redisCache{ + client: client, + setTimeout: setTimeout, + getTimeout: getTimeout, + } +} + +func (c *redisCache) Get(ctx context.Context, key string) *Item { + log := logger.FromCtx(ctx). + With(zap.String("component", "cache.redis")). + With(zap.String("cache_key", key)) + ctx, cancel := context.WithTimeout(context.Background(), c.getTimeout) + defer cancel() + redisValueCompressed, err := c.client.Get(ctx, key).Bytes() + if err != nil { + log.With(zap.Error(err)).Error("cant get cache") + metrics.CacheErrors.Inc() + return nil + } + bytesBuffer, bytesBufferClean := getBytesBuffer() + defer bytesBufferClean() + + bytesBuffer, err = zstdDecoder.DecodeAll(redisValueCompressed, bytesBuffer) + if err != nil { + log.With(zap.Error(err)).Error("cant decompress cache") + metrics.CacheErrors.Inc() + return nil + } + item := &Item{} + if err := json.Unmarshal(bytesBuffer, item); err != nil { + log.With(zap.Error(err)).Error("cant unmarshal cache") + metrics.CacheErrors.Inc() + return nil + } + if !item.CacheHeader.ShouldCDNPersist() { + return nil + } + return item +} + +func (c *redisCache) Set(ctx context.Context, key string, value *Item) { + log := logger.FromCtx(ctx). + With(zap.String("component", "cache.redis")). + With(zap.String("cache_key", key)) + ttl := value.CacheHeader.ttl() + if ttl <= 0 { + return + } + data, err := json.Marshal(value) + if err != nil { + log.With(zap.Error(err)).Error("cant marshal cache") + metrics.CacheErrors.Inc() + return + } + bytesBuffer, bytesBufferClean := getBytesBuffer() + defer bytesBufferClean() + bytesBuffer = zstdEncoder.EncodeAll(data, bytesBuffer) + ctx, cancel := context.WithTimeout(context.Background(), c.setTimeout) + defer cancel() + _, err = c.client.SetNX(ctx, key, bytesBuffer, ttl).Result() + if err != nil { + log.With(zap.Error(err)).Error("cant save cache") + metrics.CacheErrors.Inc() + } +} + +var zstdEncoder *zstd.Encoder +var zstdDecoder *zstd.Decoder + +func init() { + var err error + zstdEncoder, err = zstd.NewWriter(nil) + if err != nil { + panic("cant init zstd encoder " + err.Error()) + } + zstdDecoder, err = zstd.NewReader(nil) + if err != nil { + panic("cant init zstd decoder " + err.Error()) + } +} + +func (c *redisCache) Invalidate(ctx context.Context, keyPattern string) error { + log := logger.FromCtx(ctx) + metrics.CacheInvalidations.Inc() + itemsCount := 0 + defer func() { + log. + With(zap.String("invalidate_key", keyPattern)). + With(zap.Int("items_count", itemsCount)). + Info("invalidate cache") + }() + iter := c.client.Scan(ctx, 0, keyPattern, 0).Iterator() + for iter.Next(ctx) { + if err := c.client.Del(ctx, iter.Val()).Err(); err != nil { + itemsCount++ + metrics.CacheInvalidatedItems.Inc() + } + } + if err := iter.Err(); err != nil { + return err + } + return nil +} diff --git a/pkg/cachebehavior/cache.go b/pkg/cachebehavior/cache.go new file mode 100644 index 0000000..0dda05c --- /dev/null +++ b/pkg/cachebehavior/cache.go @@ -0,0 +1,248 @@ +package cachebehavior + +import ( + "bytes" + "github.com/paragor/simple_cdn/pkg/cache" + "github.com/paragor/simple_cdn/pkg/logger" + "github.com/paragor/simple_cdn/pkg/metrics" + "github.com/paragor/simple_cdn/pkg/upstream" + "github.com/paragor/simple_cdn/pkg/user" + "go.uber.org/zap" + "io" + "net/http" + "strings" + "sync" + "time" +) + +const bufferSize = 32 * 1024 + +type cacheBehavior struct { + upstream upstream.Upstream + cacheKeyConfig *cache.KeyConfig + cache cache.Cache + + canPersistCache user.User + canLoadCache user.User +} + +func NewCacheBehavior( + canPersistCache user.User, + canLoadCache user.User, + cacheKeyConfig *cache.KeyConfig, + upstream upstream.Upstream, + cache cache.Cache, +) http.Handler { + return &cacheBehavior{ + upstream: upstream, + cacheKeyConfig: cacheKeyConfig, + cache: cache, + canPersistCache: canPersistCache, + canLoadCache: canLoadCache, + } +} +func (b *cacheBehavior) ServeHTTP(w http.ResponseWriter, r *http.Request) { + canPersistCache := b.canPersistCache.IsUser(r) + canLoadCache := b.canLoadCache.IsUser(r) + log := + logger.FromCtx(r.Context()). + With(zap.String("component", "cacheBehavior")). + With(zap.Bool("can_persist_cache", canPersistCache)). + With(zap.Bool("can_load_cache", canLoadCache)) + + now := time.Now() + if r.Method != http.MethodGet || (!canPersistCache && !canLoadCache) { + log.Debug("just proxy pass") + response, err := b.upstream.Do(r) + if err != nil { + log.With(zap.Error(err)).Error("cant send request to upstream") + http.Error(w, "service unavailable", http.StatusServiceUnavailable) + return + } + defer response.Body.Close() + copyHeaders(response.Header, w.Header()) + w.Header().Set("X-Cache-Status", "MISS") + w.WriteHeader(response.StatusCode) + if err := ioCopy(w, response.Body); err != nil { + log.With(zap.Error(err)).Warn("cant write response body") + } + return + } + var cacheItem *cache.Item + if canLoadCache { + start := time.Now() + cacheItem = b.cache.Get(r.Context(), b.cacheKeyConfig.Apply(r)) + cacheStatus := metrics.BoolToString(cacheItem != nil, "HIT", "MISS") + metrics.CacheLoadTime. + WithLabelValues(cacheStatus). + Observe(time.Now().Sub(start).Seconds()) + log.With(zap.Bool("found", cacheItem != nil)). + With(zap.String("cache_status", cacheStatus)). + Debug("load cache item") + } + if cacheItem != nil && cacheItem.CanUseCache(now) { + log.Debug("response from cache") + w.Header().Set("X-Cache-Status", "HIT") + if err := cacheItem.Write(w); err != nil { + log.With(zap.Error(err)).Warn("cant write cache response") + } + return + } + + if cacheItem != nil && cacheItem.CanStaleWhileRevalidation(now) { + log.Debug("response from stale") + w.Header().Set("X-Cache-Status", "HIT-STALE") + if err := cacheItem.Write(w); err != nil { + log.With(zap.Error(err)).Warn("cant write cache response") + } + if !canPersistCache { + return + } + go func() { + cacheIsInvalidated := false + log = log.With(zap.String("goroutine", "invalidation")) + defer func() { + log.With(zap.Bool("is_invalidated", cacheIsInvalidated)).Debug("stale cache invalidated") + }() + response, err := b.upstream.Do(r) + if err != nil { + log.With(zap.Error(err)).Error("upstream error") + return + } + defer response.Body.Close() + log = log.With(zap.Int("upstream_status", response.StatusCode)) + if response.StatusCode != 200 { + log.Warn("not cachable status code") + return + } + cacheBytesBuffer, cacheBytesBufferClean := getBytesBuffer() + defer cacheBytesBufferClean() + buffer := bytes.NewBuffer(cacheBytesBuffer) + if _, err := buffer.ReadFrom(response.Body); err != nil { + log.With(zap.Error(err)).Error("cant read upstream body") + return + } + item := cache.ItemFromResponse(response, buffer.Bytes()) + if item == nil { + return + } + b.cache.Set(r.Context(), b.cacheKeyConfig.Apply(r), item) + cacheIsInvalidated = true + }() + return + } + + response, err := b.upstream.Do(r) + if err != nil { + if cacheItem != nil && cacheItem.CanStaleIfError(now) { + log.With(zap.Error(err)).Debug("use stale cache") + w.Header().Set("X-Cache-Status", "HIT-ERROR") + if err := cacheItem.Write(w); err != nil { + log.With(zap.Error(err)).Warn("cant write cache response") + } + return + } + log.With(zap.Error(err)).Error("cant send request to upstream") + http.Error(w, "service unavailable", http.StatusServiceUnavailable) + return + } + + defer response.Body.Close() + log = log.With(zap.Int("upstream_status", response.StatusCode)) + if response.StatusCode != 200 { + if response.StatusCode >= 500 && cacheItem != nil && cacheItem.CanStaleIfError(now) { + log.Info("response from cache due code >= 500") + w.Header().Set("X-Cache-Status", "HIT-ERROR") + if err := cacheItem.Write(w); err != nil { + log.With(zap.Error(err)).Warn("cant write cache response") + } + return + } + log.Debug("response to client with not 200 status") + copyHeaders(response.Header, w.Header()) + w.Header().Set("X-Cache-Status", "ERROR") + w.WriteHeader(response.StatusCode) + if err := ioCopy(w, response.Body); err != nil { + log.With(zap.Error(err)).Warn("cant write response body") + } + return + } + copyHeaders(response.Header, w.Header()) + w.Header().Set("X-Cache-Status", "MISS") + w.WriteHeader(response.StatusCode) + if !canPersistCache || !cache.ShouldPersist(response) { + log.Debug("response to client without cache save") + if err = ioCopy(w, response.Body); err != nil { + log.With(zap.Error(err)).Warn("cant write response body") + } + return + } + + bodyBytes, bodyBytesClean := getBytesBuffer() + bodyBuffer := bytes.NewBuffer(bodyBytes) + if err := ioCopyWithPersist(w, response.Body, bodyBuffer); err != nil { + log.With(zap.Error(err)).Error("cant read all body from upstream") + bodyBytesClean() + return + } + ctx := r.Context() + go func() { + cacheIsSaved := false + log = log.With(zap.String("goroutine", "cache_saving")) + defer func() { + log.With(zap.Bool("is_saved", cacheIsSaved)).Debug("persist cache") + }() + defer bodyBytesClean() + cacheItem = cache.ItemFromResponse(response, bodyBuffer.Bytes()) + if cacheItem == nil { + return + } + b.cache.Set(ctx, b.cacheKeyConfig.Apply(r), cacheItem) + cacheIsSaved = true + }() +} + +var bufferPool = sync.Pool{ + New: func() any { + return make([]byte, 8*1024) + }, +} + +func getBytesBuffer() ([]byte, func()) { + responseBytesBuffer := bufferPool.Get().([]byte)[:0] + return responseBytesBuffer, func() { + bufferPool.Put(responseBytesBuffer[:0]) + } +} + +func ioCopy(dst io.Writer, src io.Reader) error { + responseBytesBuffer, responseBytesBufferClean := getBytesBuffer() + defer responseBytesBufferClean() + var buffer []byte + buffer = responseBytesBuffer[:cap(responseBytesBuffer)] + if len(buffer) < bufferSize { + buffer = make([]byte, bufferSize) + } + _, err := io.CopyBuffer(dst, src, nil) + return err +} + +func ioCopyWithPersist(dst io.Writer, src io.Reader, buffer *bytes.Buffer) error { + if _, err := buffer.ReadFrom(src); err != nil { + return err + } + _, err := dst.Write(buffer.Bytes()) + return err +} + +func copyHeaders(from, to http.Header) { + for k, values := range from { + lowerHeader := strings.ToLower(k) + if lowerHeader == "x-cache-status" { + continue + } + for _, v := range values { + to.Add(k, v) + } + } +} diff --git a/pkg/cachebehavior/cache_test.go b/pkg/cachebehavior/cache_test.go new file mode 100644 index 0000000..5896475 --- /dev/null +++ b/pkg/cachebehavior/cache_test.go @@ -0,0 +1,340 @@ +package cachebehavior + +import ( + "bytes" + "context" + "fmt" + "github.com/paragor/simple_cdn/pkg/cache" + "github.com/paragor/simple_cdn/pkg/logger" + "github.com/paragor/simple_cdn/pkg/metrics" + "github.com/paragor/simple_cdn/pkg/user" + "go.uber.org/zap/zapcore" + "io" + "net/http" + "net/http/httptest" + "net/textproto" + "regexp" + "sort" + "strings" + "sync" + "testing" + "time" +) + +// newInMemoryCache only for tests +func newInMemoryCache() *inMemoryCache { + return &inMemoryCache{data: make(map[string]*cache.Item)} +} + +// inMemoryCache only for tests +type inMemoryCache struct { + m sync.Mutex + data map[string]*cache.Item + savingCount int + wait sync.Cond +} + +func (c *inMemoryCache) With(r *http.Request, keyConfig *cache.KeyConfig, value *cache.Item) *inMemoryCache { + c.Set(context.Background(), keyConfig.Apply(r), value) + return c +} + +func (c *inMemoryCache) Get(_ context.Context, key string) *cache.Item { + c.m.Lock() + value, ok := c.data[key] + c.m.Unlock() + if !ok { + return nil + } + return value +} + +func (c *inMemoryCache) Len() int { + c.m.Lock() + defer c.m.Unlock() + return len(c.data) +} +func (c *inMemoryCache) SavingCount() int { + c.m.Lock() + defer c.m.Unlock() + return c.savingCount +} + +func (c *inMemoryCache) Set(_ context.Context, key string, value *cache.Item) { + c.m.Lock() + c.savingCount++ + c.data[key] = value + c.m.Unlock() +} + +func (c *inMemoryCache) Invalidate(_ context.Context, keyPattern string) error { + c.m.Lock() + defer c.m.Unlock() + re, err := regexp.Compile(strings.ReplaceAll(regexp.QuoteMeta(keyPattern), "\\*", ".*")) + if err != nil { + return fmt.Errorf("cant compile patter: %w", err) + } + for k := range c.data { + if re.MatchString(k) { + delete(c.data, k) + } + } + return nil +} + +type fakeUpstream struct { + m sync.Mutex + ordered []func(*http.Request) (*http.Response, error) + any func(*http.Request) (*http.Response, error) +} + +func newFakeUpstream() *fakeUpstream { + return &fakeUpstream{} +} + +func (u *fakeUpstream) WithOrdered(do func(*http.Request) (*http.Response, error)) *fakeUpstream { + u.m.Lock() + defer u.m.Unlock() + u.ordered = append(u.ordered, do) + return u +} +func (u *fakeUpstream) WithAny(do func(*http.Request) (*http.Response, error)) *fakeUpstream { + u.m.Lock() + defer u.m.Unlock() + u.any = do + return u +} + +func (u *fakeUpstream) Do(originRequest *http.Request) (*http.Response, error) { + do := u.any + u.m.Lock() + if len(u.ordered) > 0 { + do, u.ordered = u.ordered[0], u.ordered[1:] + } + u.m.Unlock() + if do == nil { + return nil, fmt.Errorf("fake upstream have no 'any' or 'ordered' functions") + } + return do(originRequest) +} + +func createRequest(method string, url string, headers http.Header, cookies []*http.Cookie, body []byte) *http.Request { + var bodyReader io.Reader + if body != nil { + b := bytes.NewBuffer(nil) + b.Write(body) + bodyReader = b + } + request, err := http.NewRequest(method, url, bodyReader) + if err != nil { + panic(err) + } + for k, values := range headers { + for _, v := range values { + request.Header.Add(k, v) + } + } + for _, c := range cookies { + request.AddCookie(c) + } + return request +} +func createResponse(status int, headers http.Header, body []byte) *http.Response { + response := httptest.NewRecorder() + for k, values := range headers { + for _, v := range values { + response.Header().Add(k, v) + } + } + response.WriteHeader(status) + response.Write(body) + return response.Result() +} + +var once sync.Once + +func initMetricsAndLogs() { + once.Do(func() { + logger.Init("testing", zapcore.DebugLevel) + metrics.Init("testing") + }) +} + +func Test_cacheBehavior_ServeHTTP_ProxyPass(t *testing.T) { + initMetricsAndLogs() + fUpstream := newFakeUpstream() + keyConfig := &cache.KeyConfig{ + Headers: []string{"host"}, + Cookies: []string{}, + AllQuery: true, + } + fCache := newInMemoryCache() + canPersistCache := user.Any( + user.Not(user.HeaderExists("Authorization")), + user.Not(user.CookieExists("token")), + ) + canLoadCache := must1(user.UserAgentPattern(".*http.?://yandex.com/bots.*")) + + cachebehavior := NewCacheBehavior( + canPersistCache, + canLoadCache, + keyConfig, + fUpstream, + fCache, + ) + fBody := bytes.NewBuffer(nil) + fBody.WriteString("this is body") + + testingRequest := createRequest( + http.MethodGet, + "http://127.0.0.1/testing?query=queryValue", + http.Header{"test": []string{"one"}}, + nil, + nil, + ) + + fUpstream.WithOrdered(func(request *http.Request) (*http.Response, error) { + if err := requestIsEqualWithoutBody(testingRequest, request); err != nil { + t.Errorf("enxpected upstream request: %s", err.Error()) + } + return createResponse(200, http.Header{"test": []string{"one"}}, fBody.Bytes()), nil + }).WithOrdered(func(request *http.Request) (*http.Response, error) { + if err := requestIsEqualWithoutBody(testingRequest, request); err != nil { + t.Errorf("enxpected upstream request: %s", err.Error()) + } + return createResponse(400, http.Header{"test": []string{"two"}}, fBody.Bytes()), nil + }).WithOrdered(func(request *http.Request) (*http.Response, error) { + if err := requestIsEqualWithoutBody(testingRequest, request); err != nil { + t.Errorf("enxpected upstream request: %s", err.Error()) + } + return createResponse(200, http.Header{"test": []string{"one"}, "Cache-Control": []string{"public, s-maxage=60"}}, fBody.Bytes()), nil + }).WithAny(func(request *http.Request) (*http.Response, error) { + t.Error("unexpected call upstream") + return nil, fmt.Errorf("unexpected call upstream") + }) + + recorder := httptest.NewRecorder() + cachebehavior.ServeHTTP(recorder, testingRequest) + if recorder.Code != 200 { + t.Errorf("r1 wrong status code: expected %d, got %d", 200, recorder.Code) + } + expectedHeader := http.Header{} + expectedHeader.Set("test", "one") + expectedHeader.Set("x-cache-status", "MISS") + if err := compareHeaders(expectedHeader, recorder.Header()); err != nil { + t.Errorf("r1 wrong headers: %s", err.Error()) + } + if recorder.Body.String() != fBody.String() { + t.Errorf("r1 wrong body: expected '%s', got '%s'", fBody.String(), recorder.Body.String()) + } + + recorder = httptest.NewRecorder() + cachebehavior.ServeHTTP(recorder, testingRequest) + if recorder.Code != 400 { + t.Errorf("r2 wrong status code: expected %d, got %d", 200, recorder.Code) + } + expectedHeader = http.Header{} + expectedHeader.Set("test", "two") + expectedHeader.Set("x-cache-status", "ERROR") + if err := compareHeaders(expectedHeader, recorder.Header()); err != nil { + t.Errorf("r2 wrong headers: %s", err.Error()) + } + if recorder.Body.String() != fBody.String() { + t.Errorf("r2 wrong body: expected '%s', got '%s'", fBody.String(), recorder.Body.String()) + } + if fCache.SavingCount() != 0 { + t.Errorf("cache saving count should %d, but have %d", 0, fCache.SavingCount()) + } + if fCache.Len() != 0 { + t.Errorf("cache items count should %d, but have %d", 0, fCache.Len()) + } + + recorder = httptest.NewRecorder() + cachebehavior.ServeHTTP(recorder, testingRequest) + if recorder.Code != 200 { + t.Errorf("r3 wrong status code: expected %d, got %d", 200, recorder.Code) + } + expectedHeader = http.Header{} + expectedHeader.Set("test", "one") + expectedHeader.Set("x-cache-status", "MISS") + expectedHeader.Set("cache-control", "public, s-maxage=60") + if err := compareHeaders(expectedHeader, recorder.Header()); err != nil { + t.Errorf("r3 wrong headers: %s", err.Error()) + } + if recorder.Body.String() != fBody.String() { + t.Errorf("r3 wrong body: expected '%s', got '%s'", fBody.String(), recorder.Body.String()) + } + + start := fCache.SavingCount() + timeout := time.NewTimer(time.Second) + for { + time.Sleep(time.Millisecond * 100) + if fCache.SavingCount() > start { + break + } + select { + case <-timeout.C: + t.Fatal("no expected cache savings") + } + } + testingRequestYandex := testingRequest.Clone(context.Background()) + testingRequestYandex.Header.Set("user-agent", "Yandex Bot (http://yandex.com/bots)") + recorder = httptest.NewRecorder() + cachebehavior.ServeHTTP(recorder, testingRequestYandex) + if recorder.Code != 200 { + t.Errorf("r4 wrong status code: expected %d, got %d", 200, recorder.Code) + } + expectedHeader = http.Header{} + expectedHeader.Set("test", "one") + expectedHeader.Set("x-cache-status", "HIT") + expectedHeader.Set("cache-control", "public, s-maxage=60") + if err := compareHeaders(expectedHeader, recorder.Header()); err != nil { + t.Errorf("r4 wrong headers: %s", err.Error()) + } + if recorder.Body.String() != fBody.String() { + t.Errorf("r4 wrong body: expected '%s', got '%s'", fBody.String(), recorder.Body.String()) + } + if fCache.SavingCount() != 1 { + t.Errorf("cache saving count should %d, but have %d", 1, fCache.SavingCount()) + } + if fCache.Len() != 1 { + t.Errorf("cache items count should %d, but have %d", 1, fCache.Len()) + } +} + +func requestIsEqualWithoutBody(expected *http.Request, got *http.Request) error { + if expected.Method != got.Method { + return fmt.Errorf("method invalid: expected %s, got %s", expected.Method, got.Method) + } + if expected.URL.String() != got.URL.String() { + return fmt.Errorf("url invalid: expected %s, got %s", expected.URL.String(), got.URL.String()) + } + return compareHeaders(expected.Header, got.Header) +} +func compareHeaders(expected, got http.Header) error { + fmtHeader := func(header http.Header) string { + keys := []string{} + for k := range header { + keys = append(keys, textproto.CanonicalMIMEHeaderKey(k)) + } + sort.Strings(keys) + result := []string{} + for _, k := range keys { + result = append(result, k+" : "+strings.Join(header.Values(k), ", ")) + } + return strings.Join(result, "; ") + } + fmtExpected := fmtHeader(expected) + fmtGot := fmtHeader(got) + if fmtExpected != fmtGot { + return fmt.Errorf("headers not equal: expected \n%s\ngot\n%s", fmtExpected, fmtGot) + } + return nil +} + +func must1[T any](value T, err error) T { + if err != nil { + panic(err) + } + return value +} diff --git a/pkg/logger/http.go b/pkg/logger/http.go new file mode 100644 index 0000000..c0a5556 --- /dev/null +++ b/pkg/logger/http.go @@ -0,0 +1,91 @@ +package logger + +import ( + "context" + "fmt" + "github.com/felixge/httpsnoop" + "github.com/google/uuid" + "github.com/paragor/simple_cdn/pkg/user" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "net/http" + "time" +) + +func HttpSetLoggerMiddleware(forceEmitDebugLogging user.User, handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestId := r.Header.Get("X-Request-ID") + if len(requestId) == 0 { + requestId = uuid.NewString() + r.Header.Set("X-Request-ID", requestId) + } + ctx := r.Context() + if ctx == nil { + ctx = context.Background() + } + var logger *zap.Logger + if forceEmitDebugLogging.IsUser(r) { + logger = DebugLogger() + } else { + logger = Logger() + } + r = r.WithContext(ToCtx(logger.With(zap.String("request_id", requestId)), ctx)) + handler.ServeHTTP(w, r) + }) +} + +func HttpLoggingMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + log := FromCtx(r.Context()) + if !log.Level().Enabled(zapcore.DebugLevel) { + handler.ServeHTTP(w, r) + return + } + status := 0 + sentByte := 0 + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + WriteHeader: func(headerFunc httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return func(code int) { + status = code + headerFunc(code) + } + }, + Write: func(writeFunc httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return func(b []byte) (int, error) { + s, err := writeFunc(b) + sentByte += s + return s, err + } + }, + }) + start := time.Now() + handler.ServeHTTP(w, r) + log.With( + zap.Int("status_code", status), + zap.Duration("request_duration", time.Now().Sub(start)), + zap.String("request_path", r.URL.Path), + zap.String("method", r.Method), + zap.String("host", r.URL.Host), + zap.String("remote_addr", r.Header.Get("X-Real-Ip")), + zap.String("request_query", r.URL.RawQuery), + zap.String("user_agent", r.Header.Get("User-Agent")), + zap.Int("response_size", sentByte), + ).Debug("handle request") + }) +} + +func HttpRecoveryMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + FromCtx(r.Context()). + WithOptions(zap.AddStacktrace(zapcore.ErrorLevel)). + With(zap.Error(fmt.Errorf("%v", err))). + Error("panic on request handler") + w.WriteHeader(http.StatusInternalServerError) + } + }() + + handler.ServeHTTP(w, r) + }) +} diff --git a/pkg/logger/init.go b/pkg/logger/init.go new file mode 100644 index 0000000..abfb833 --- /dev/null +++ b/pkg/logger/init.go @@ -0,0 +1,68 @@ +package logger + +import ( + "context" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "sync" + "sync/atomic" +) + +var doOnce sync.Once +var commonLogger *zap.Logger +var debugLogger *zap.Logger +var inited atomic.Bool + +func Logger() *zap.Logger { + if !inited.Load() { + panic("loger not inited") + } + return commonLogger +} + +// DebugLogger instead of Logger always have debug level +func DebugLogger() *zap.Logger { + if !inited.Load() { + panic("loger not inited") + } + return debugLogger +} + +func Init(app string, loglevel zapcore.Level) { + doOnce.Do(func() { + commonLogger = initLogger(app, loglevel) + debugLogger = initLogger(app, zap.DebugLevel) + inited.Store(true) + }) +} + +func initLogger(app string, loglevel zapcore.Level) *zap.Logger { + config := zap.NewProductionConfig() + config.EncoderConfig.MessageKey = "message" + config.EncoderConfig.TimeKey = "@timestamp" + config.Level.SetLevel(loglevel) + logger, err := config.Build() + if err != nil { + panic("cant build logger: " + err.Error()) + } + logger = logger.WithOptions(zap.AddStacktrace(zapcore.PanicLevel)) + logger = logger.With(zap.String("app", app)) + return logger +} + +type buildXContextKey struct{} + +func ToCtx(logger *zap.Logger, ctx context.Context) context.Context { + return context.WithValue(ctx, buildXContextKey{}, logger) +} + +func FromCtx(ctx context.Context) *zap.Logger { + if ctx == nil { + return Logger() + } + value := ctx.Value(buildXContextKey{}) + if logger, ok := value.(*zap.Logger); ok && logger != nil { + return logger + } + return Logger() +} diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go new file mode 100644 index 0000000..541f7e5 --- /dev/null +++ b/pkg/metrics/metrics.go @@ -0,0 +1,47 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +var ( + CacheLoadTime *prometheus.HistogramVec + CacheErrors prometheus.Counter + CacheInvalidations prometheus.Counter + CacheInvalidatedItems prometheus.Counter +) + +func Init(app string) { + CacheInvalidations = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: app, + Name: "cache_invalidations", + Help: "cache_invalidations", + }) + prometheus.MustRegister(CacheInvalidations) + + CacheInvalidatedItems = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: app, + Name: "cache_invalidated_items", + Help: "cache_invalidated_items", + }) + prometheus.MustRegister(CacheInvalidatedItems) + + CacheErrors = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: app, + Name: "cache_errors", + Help: "cache_errors", + }) + prometheus.MustRegister(CacheErrors) + + CacheLoadTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: app, + Name: "cache_load_time", + Help: "cache_load_time", + }, []string{"cache_status"}) + prometheus.MustRegister(CacheLoadTime) +} + +func BoolToString(value bool, trueString, falseString string) string { + if value { + return trueString + } + return falseString +} diff --git a/pkg/upstream/transport_pool.go b/pkg/upstream/transport_pool.go new file mode 100644 index 0000000..703cf6f --- /dev/null +++ b/pkg/upstream/transport_pool.go @@ -0,0 +1,95 @@ +package upstream + +import ( + "fmt" + "math/rand" + "net" + "net/http" + "sync" + "time" +) + +type TransportPoolConfig struct { + Size int `yaml:"size,omitempty"` + MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host,omitempty"` + IdleConnTimeout time.Duration `yaml:"idle_conn_timeout"` + ConnTimeout time.Duration `yaml:"conn_timeout"` + KeepAliveTimeout time.Duration `yaml:"keep_alive_timeout"` + MaxLifeTime time.Duration `yaml:"max_life_time"` +} + +func (c *TransportPoolConfig) Validate() error { + if c.Size <= 0 { + return fmt.Errorf("size should be >= 0") + } + if c.MaxIdleConnsPerHost <= 0 { + return fmt.Errorf("max_idle_conns_per_host should be >= 0") + } + if c.IdleConnTimeout <= 0 { + return fmt.Errorf("idle_conn_timeout should be >= 0") + } + if c.ConnTimeout <= 0 { + return fmt.Errorf("conn_timeout should be >= 0") + } + if c.KeepAliveTimeout <= 0 { + return fmt.Errorf("keep_alive_timeout should be >= 0") + } + if c.MaxLifeTime <= 0 { + return fmt.Errorf("MaxLifeTime should be >= 0") + } + return nil +} + +type TransportPool struct { + config TransportPoolConfig + + sync.Mutex + transports []*Transport + curr int +} + +type Transport struct { + http.Transport + poolDeadLine time.Time +} + +func NewTransportPool(config TransportPoolConfig) *TransportPool { + pool := &TransportPool{config: config} + for i := 0; i < config.Size; i++ { + pool.transports = append(pool.transports, pool.newTransport()) + } + + return pool +} + +func (pool *TransportPool) RoundTrip(req *http.Request) (*http.Response, error) { + return pool.Next().RoundTrip(req) +} + +func (pool *TransportPool) Next() http.RoundTripper { + pool.Lock() + defer pool.Unlock() + pool.curr = (pool.curr + 1) % len(pool.transports) + + ret := pool.transports[pool.curr] + + if time.Now().After(pool.transports[pool.curr].poolDeadLine) { + pool.transports[pool.curr] = pool.newTransport() + } + + return ret +} + +func (pool *TransportPool) newTransport() *Transport { + return &Transport{ + Transport: http.Transport{ + DialContext: (&net.Dialer{ + Timeout: pool.config.ConnTimeout, + KeepAlive: pool.config.KeepAliveTimeout, + }).DialContext, + MaxIdleConnsPerHost: pool.config.MaxIdleConnsPerHost, + IdleConnTimeout: pool.config.ConnTimeout, + }, + poolDeadLine: time.Now().Add(pool.config.MaxLifeTime + time.Duration(rand.Intn(int(pool.config.MaxLifeTime)/10))), + } +} diff --git a/pkg/upstream/upstream.go b/pkg/upstream/upstream.go new file mode 100644 index 0000000..4f3860e --- /dev/null +++ b/pkg/upstream/upstream.go @@ -0,0 +1,150 @@ +package upstream + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "sync" + "time" +) + +type Config struct { + TransportPoolConfig TransportPoolConfig `yaml:"transport_pool_config"` + Host string `yaml:"host"` + Scheme string `yaml:"scheme"` + RequestTimeout time.Duration `yaml:"request_timeout"` +} + +func (c *Config) Validate() error { + if c.Host == "" { + return fmt.Errorf("host can not be empty") + } + if c.Scheme != "http" && c.Scheme != "https" { + return fmt.Errorf("scheme should have value http or https") + } + if c.RequestTimeout < 0 { + return fmt.Errorf("request_timeout should be >= 0") + } + return c.TransportPoolConfig.Validate() +} + +func (c *Config) CreateUpstream() Upstream { + requestTimeout := c.RequestTimeout + if requestTimeout <= 0 { + requestTimeout = time.Second * 360 + } + + return newSingleHostUpstream( + NewTransportPool(c.TransportPoolConfig), + requestTimeout, + c.Scheme, + c.Host, + ) +} + +type Upstream interface { + Do(originRequest *http.Request) (*http.Response, error) +} + +type singleHostUpstream struct { + pool http.RoundTripper + requestTimeout time.Duration + targetScheme string + targetHost string +} + +func newSingleHostUpstream( + roundTripper http.RoundTripper, + requestTimeout time.Duration, + targetScheme string, + targetHost string, +) Upstream { + return &singleHostUpstream{ + pool: roundTripper, + requestTimeout: requestTimeout, + targetScheme: targetScheme, + targetHost: targetHost, + } +} + +func (u *singleHostUpstream) Do(originRequest *http.Request) (*http.Response, error) { + bufferData, bufferDataClean := getBytesBuffer() + defer bufferDataClean() + + requestBody := bytes.NewBuffer(bufferData) + _, _ = requestBody.ReadFrom(originRequest.Body) + + request := originRequest.Clone(context.Background()) + request.URL.Scheme = u.targetScheme + request.URL.Host = u.targetHost + request.Host = u.targetHost + if requestBody.Len() == 0 { + request.Body = nil + } else { + request.Body = io.NopCloser(requestBody) + } + removeHopByHopHeaders(request.Header) + if ua := request.Header.Get("User-Agent"); len(ua) == 0 { + // If the outbound request doesn't have a User-Agent header set, + // don't send the default Go HTTP client User-Agent. + request.Header.Set("User-Agent", "") + } + done := make(chan bool, 1) + var response *http.Response + var err error + timer := time.NewTimer(u.requestTimeout) + defer timer.Stop() + go func() { + response, err = u.pool.RoundTrip(request) + done <- true + }() + select { + case <-done: + return response, err + case <-timer.C: + return nil, fmt.Errorf("request timeout") + } +} + +var bufferPool = sync.Pool{ + New: func() any { + return make([]byte, 0) + }, +} + +func getBytesBuffer() ([]byte, func()) { + responseBytesBuffer := bufferPool.Get().([]byte)[:0] + return responseBytesBuffer, func() { + bufferPool.Put(responseBytesBuffer[:0]) + } +} + +// Hop-by-hop headers. These are removed when sent to the backend. +// As of RFC 7230, hop-by-hop headers are required to appear in the +// Connection header field. These are the headers defined by the +// obsoleted RFC 2616 (section 13.5.1) and are used for backward +// compatibility. +var hopHeaders = []string{ + "Connection", + "Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google + "Keep-Alive", + "Proxy-Authenticate", + "Proxy-Authorization", + "Te", // canonicalized version of "TE" + "Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522 + "Transfer-Encoding", + "Upgrade", + + "Accept-Encoding", +} + +func removeHopByHopHeaders(h http.Header) { + // RFC 2616, section 13.5.1: Remove a set of known hop-by-hop headers. + // This behavior is superseded by the RFC 7230 Connection header, but + // preserve it for backwards compatibility. + for _, f := range hopHeaders { + h.Del(f) + } +} diff --git a/pkg/user/config.go b/pkg/user/config.go new file mode 100644 index 0000000..d545199 --- /dev/null +++ b/pkg/user/config.go @@ -0,0 +1,246 @@ +package user + +import ( + "fmt" +) + +type Config struct { + Any []Config `yaml:"any,omitempty"` + And []Config `yaml:"and,omitempty"` + Not *Config `yaml:"not,omitempty"` + Cookie *struct { + Exists *string `yaml:"exists,omitempty"` + } `yaml:"cookie,omitempty"` + UserAgent *struct { + Pattern *string `yaml:"pattern,omitempty"` + } `yaml:"user_agent,omitempty"` + Header *struct { + Exists *string `yaml:"exists,omitempty"` + Pattern *struct { + Name string `yaml:"name"` + Pattern string `yaml:"pattern"` + } `yaml:"pattern,omitempty"` + } `yaml:"header,omitempty"` + Query *struct { + Count *struct { + Gte int `yaml:"gte"` + Lte int `yaml:"lte"` + } `yaml:"count"` + } `yaml:"query"` + Path *struct { + Pattern *string `yaml:"pattern,omitempty"` + } `yaml:"path,omitempty"` + Always *bool `yaml:"always,omitempty"` + Never *bool `yaml:"never,omitempty"` +} + +func (c *Config) ToUser() User { + if c.Always != nil { + return Always() + } + if c.Never != nil { + return Never() + } + if c.Not != nil { + return Not(c.Not.ToUser()) + } + if c.UserAgent != nil { + if c.UserAgent.Pattern != nil { + return must(UserAgentPattern(*c.UserAgent.Pattern)) + } + } + if c.Cookie != nil { + if c.Cookie.Exists != nil { + return CookieExists(*c.Cookie.Exists) + } + } + if c.Header != nil { + if c.Header.Exists != nil { + return HeaderExists(*c.Header.Exists) + } + if c.Header.Pattern != nil { + return must(HeaderPattern(c.Header.Pattern.Name, c.Header.Pattern.Pattern)) + } + } + if c.And != nil { + users := []User{} + for _, subconfig := range c.And { + users = append(users, subconfig.ToUser()) + } + return And(users...) + } + if c.Any != nil { + users := []User{} + for _, subconfig := range c.Any { + users = append(users, subconfig.ToUser()) + } + return Any(users...) + } + if c.Query != nil { + if c.Query.Count != nil { + return must(QueryCount(c.Query.Count.Gte, c.Query.Count.Lte)) + } + } + if c.Path != nil { + if c.Path.Pattern != nil { + return must(PathPattern(*c.Path.Pattern)) + } + } + + panic("config to user: empty config") +} + +func must[T any](value T, err error) T { + if err != nil { + panic(fmt.Errorf("user: %w", err)) + } + return value +} + +func (c *Config) Validate() error { + foundField := "" + if c.Any != nil { + field := "any" + foundField = field + + for _, item := range c.Any { + if err := item.Validate(); err != nil { + return fmt.Errorf("%s: %w", field, err) + } + } + } + if c.Always != nil { + field := "always" + if len(foundField) > 0 { + return fmt.Errorf("need specify only 1 field, found 2: %s and %s", field, foundField) + } + foundField = field + } + if c.Not != nil { + field := "not" + if len(foundField) > 0 { + return fmt.Errorf("need specify only 1 field, found 2: %s and %s", field, foundField) + } + foundField = field + + if err := c.Not.Validate(); err != nil { + return fmt.Errorf("not: %w", err) + } + } + if c.Never != nil { + field := "never" + if len(foundField) > 0 { + return fmt.Errorf("need specify only 1 field, found 2: %s and %s", field, foundField) + } + foundField = field + } + + if c.And != nil { + field := "and" + if len(foundField) > 0 { + return fmt.Errorf("need specify only 1 field, found 2: %s and %s", field, foundField) + } + foundField = field + + for _, item := range c.And { + if err := item.Validate(); err != nil { + return fmt.Errorf("%s: %w", field, err) + } + } + } + if c.Cookie != nil { + field := "cookie" + if len(foundField) > 0 { + return fmt.Errorf("need specify only 1 field, found 2: %s and %s", field, foundField) + } + foundField = field + + optionFound := false + if c.Cookie.Exists != nil && len(*c.Cookie.Exists) != 0 { + optionFound = true + } + if !optionFound { + return fmt.Errorf("field %s required set 'exists' field", field) + } + } + if c.Header != nil { + field := "header" + if len(foundField) > 0 { + return fmt.Errorf("need specify only 1 field, found 2: %s and %s", field, foundField) + } + foundField = field + + optionFound := false + if c.Header.Exists != nil && len(*c.Header.Exists) != 0 { + optionFound = true + } + if c.Header.Pattern != nil && len(c.Header.Pattern.Name) != 0 { + optionFound = true + if _, err := HeaderPattern(c.Header.Pattern.Name, c.Header.Pattern.Pattern); err != nil { + return fmt.Errorf("field %s: %w", field, err) + } + } + if !optionFound { + return fmt.Errorf("field %s required set 'exists' field", field) + } + } + if c.UserAgent != nil { + field := "user_agent" + if len(foundField) > 0 { + return fmt.Errorf("need specify only 1 field, found 2: %s and %s", field, foundField) + } + foundField = field + + optionFound := false + if c.UserAgent.Pattern != nil && len(*c.UserAgent.Pattern) != 0 { + optionFound = true + if _, err := UserAgentPattern(*c.UserAgent.Pattern); err != nil { + return fmt.Errorf("field %s: %w", field, err) + } + } + if !optionFound { + return fmt.Errorf("field %s required set 'pattern' field", field) + } + } + if c.Query != nil { + field := "query" + if len(foundField) > 0 { + return fmt.Errorf("need specify only 1 field, found 2: %s and %s", field, foundField) + } + foundField = field + + optionFound := false + if c.Query.Count != nil { + optionFound = true + if _, err := QueryCount(c.Query.Count.Gte, c.Query.Count.Lte); err != nil { + return fmt.Errorf("field %s: %w", field, err) + } + } + if !optionFound { + return fmt.Errorf("field %s required set 'count' field", field) + } + } + if c.Path != nil { + field := "path" + if len(foundField) > 0 { + return fmt.Errorf("need specify only 1 field, found 2: %s and %s", field, foundField) + } + foundField = field + + optionFound := false + if c.Path.Pattern != nil && len(*c.Path.Pattern) != 0 { + optionFound = true + if _, err := PathPattern(*c.Path.Pattern); err != nil { + return fmt.Errorf("field %s: %w", field, err) + } + } + if !optionFound { + return fmt.Errorf("field %s required set 'pattern' field", field) + } + } + + if len(foundField) == 0 { + return fmt.Errorf("empty config") + } + return nil +} diff --git a/pkg/user/cookie.go b/pkg/user/cookie.go new file mode 100644 index 0000000..fc1770e --- /dev/null +++ b/pkg/user/cookie.go @@ -0,0 +1,26 @@ +package user + +import ( + "net/http" + "strings" +) + +type cookieExists struct { + name string +} + +func CookieExists(name string) User { + return &cookieExists{name: strings.ToLower(name)} +} + +func (u *cookieExists) IsUser(r *http.Request) bool { + for _, cookie := range r.Cookies() { + if strings.ToLower(cookie.Name) == u.name { + return true + } + } + return false +} +func (u *cookieExists) String() string { + return "cookie.exists = " + u.name +} diff --git a/pkg/user/header.go b/pkg/user/header.go new file mode 100644 index 0000000..e27bd0a --- /dev/null +++ b/pkg/user/header.go @@ -0,0 +1,53 @@ +package user + +import ( + "fmt" + "net/http" + "regexp" + "strings" +) + +type headerExists struct { + name string +} + +func HeaderExists(name string) User { + return &headerExists{name: strings.ToLower(name)} +} + +func (u *headerExists) IsUser(r *http.Request) bool { + for header := range r.Header { + if u.name == strings.ToLower(header) { + return true + } + } + return false +} +func (u *headerExists) String() string { + return "header.exists = " + u.name +} + +type headerPattern struct { + name string + re *regexp.Regexp +} + +func HeaderPattern(name string, pattern string) (User, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("invalid pattern: %w", err) + } + return &headerPattern{name: strings.ToLower(name), re: re}, nil +} + +func (u *headerPattern) IsUser(r *http.Request) bool { + for header := range r.Header { + if u.name == strings.ToLower(header) && u.re.MatchString(r.Header.Get(u.name)) { + return true + } + } + return false +} +func (u *headerPattern) String() string { + return fmt.Sprintf("header.pattern = %s match '%s'", u.name, u.re.String()) +} diff --git a/pkg/user/logical.go b/pkg/user/logical.go new file mode 100644 index 0000000..3a567ba --- /dev/null +++ b/pkg/user/logical.go @@ -0,0 +1,109 @@ +package user + +import ( + "net/http" + "strings" +) + +type anyChain struct { + users []User +} + +func Any(users ...User) User { + return &anyChain{users: users} +} + +func (u *anyChain) IsUser(r *http.Request) bool { + for _, user := range u.users { + if user.IsUser(r) { + return true + } + } + return false +} + +func (u *anyChain) String() string { + children := make([]string, 0, len(u.users)) + for _, user := range u.users { + children = append(children, addIndentToOutput(user.String())) + } + return "any = \n" + strings.Join(children, "\n") +} + +type andChain struct { + users []User +} + +func And(users ...User) User { + return &andChain{users: users} +} + +func (u *andChain) IsUser(r *http.Request) bool { + for _, user := range u.users { + if !user.IsUser(r) { + return false + } + } + return true +} + +func (u *andChain) String() string { + children := make([]string, 0, len(u.users)) + for _, user := range u.users { + children = append(children, addIndentToOutput(user.String())) + } + return "and = \n" + strings.Join(children, "\n") +} + +type not struct { + user User +} + +func Not(user User) User { + return ¬{user: user} +} + +func (u *not) IsUser(r *http.Request) bool { + return !u.user.IsUser(r) +} +func (u *not) String() string { + return "not = \n" + addIndentToOutput(u.user.String()) +} + +type always struct { +} + +func Always() User { + return &always{} +} + +func (u *always) IsUser(_ *http.Request) bool { + return true +} + +func (u *always) String() string { + return "always" +} + +type never struct { +} + +func Never() User { + return &never{} +} + +func (u *never) String() string { + return "never" +} + +func (u *never) IsUser(_ *http.Request) bool { + return false +} + +func addIndentToOutput(str string) string { + lines := strings.Split(str, "\n") + for i, line := range lines { + lines[i] = " " + line + } + return strings.Join(lines, "\n") +} diff --git a/pkg/user/path.go b/pkg/user/path.go new file mode 100644 index 0000000..7d8c15d --- /dev/null +++ b/pkg/user/path.go @@ -0,0 +1,26 @@ +package user + +import ( + "fmt" + "net/http" + "regexp" +) + +type pathPattern struct { + re *regexp.Regexp +} + +func (u *pathPattern) IsUser(r *http.Request) bool { + return u.re.MatchString(r.URL.Path) +} + +func PathPattern(pattern string) (User, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("invalid pattern: %w", err) + } + return &pathPattern{re: re}, nil +} +func (u *pathPattern) String() string { + return "path.pattern = " + u.re.String() +} diff --git a/pkg/user/query.go b/pkg/user/query.go new file mode 100644 index 0000000..a000368 --- /dev/null +++ b/pkg/user/query.go @@ -0,0 +1,26 @@ +package user + +import ( + "fmt" + "net/http" +) + +type queryCount struct { + gte int + lte int +} + +func QueryCount(gte int, lte int) (User, error) { + if lte < gte { + return nil, fmt.Errorf("should be lte >= gte") + } + return &queryCount{gte: gte, lte: lte}, nil +} + +func (u *queryCount) IsUser(r *http.Request) bool { + count := len(r.URL.Query()) + return count >= u.gte && count <= u.lte +} +func (u *queryCount) String() string { + return fmt.Sprintf("query.count = [%d, %d]", u.gte, u.lte) +} diff --git a/pkg/user/query_test.go b/pkg/user/query_test.go new file mode 100644 index 0000000..5f37a91 --- /dev/null +++ b/pkg/user/query_test.go @@ -0,0 +1,84 @@ +package user + +import ( + "net/http" + "net/url" + "testing" +) + +func Test_queryCount_IsUser(t *testing.T) { + type fields struct { + gte int + lte int + } + type args struct { + query url.Values + } + tests := []struct { + name string + fields fields + args args + want bool + }{ + { + name: "[1,10] and 0", + fields: fields{ + gte: 1, + lte: 10, + }, + args: args{ + query: map[string][]string{}, + }, + want: false, + }, + { + name: "[0,1] and 1", + fields: fields{ + gte: 0, + lte: 1, + }, + args: args{ + query: map[string][]string{"one": {"1", "2"}}, + }, + want: true, + }, + { + name: "[0,1] and 0", + fields: fields{ + gte: 0, + lte: 1, + }, + args: args{ + query: map[string][]string{}, + }, + want: true, + }, + { + name: "[0,1] and 2", + fields: fields{ + gte: 0, + lte: 1, + }, + args: args{ + query: map[string][]string{"one": {"1", "2"}, "two": {"3"}}, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + u := &queryCount{ + gte: tt.fields.gte, + lte: tt.fields.lte, + } + rawQuery := tt.args.query.Encode() + r, err := http.NewRequest("GET", "http://127.0.0.1?"+rawQuery, nil) + if err != nil { + t.Fatal("cant create request", err.Error()) + } + if got := u.IsUser(r); got != tt.want { + t.Errorf("IsUser() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/user/user.go b/pkg/user/user.go new file mode 100644 index 0000000..72ed40a --- /dev/null +++ b/pkg/user/user.go @@ -0,0 +1,10 @@ +package user + +import ( + "net/http" +) + +type User interface { + IsUser(r *http.Request) bool + String() string +} diff --git a/pkg/user/user_agent.go b/pkg/user/user_agent.go new file mode 100644 index 0000000..ec5a410 --- /dev/null +++ b/pkg/user/user_agent.go @@ -0,0 +1,26 @@ +package user + +import ( + "fmt" + "net/http" + "regexp" +) + +type userAgent struct { + re *regexp.Regexp +} + +func (u *userAgent) IsUser(r *http.Request) bool { + return u.re.MatchString(r.Header.Get("User-Agent")) +} + +func UserAgentPattern(pattern string) (User, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("invalid pattern: %w", err) + } + return &userAgent{re: re}, nil +} +func (u *userAgent) String() string { + return "user_agent.pattern = " + u.re.String() +} diff --git a/readme.md b/readme.md new file mode 100644 index 0000000..0361d94 --- /dev/null +++ b/readme.md @@ -0,0 +1,62 @@ +# Simple CDN + +`simple_cdn` is a lightweight Content Delivery Network (CDN) server written in Go. +It provides caching, request proxying, and diagnostic features. +This application is designed to handle HTTP requests, cache responses based on configurable rules, +and serve cached content to reduce the load on the upstream servers. + +# Features +* Caching: Supports response caching with configurable rules for persistence and retrieval. +* Proxying: Forwards requests to an upstream server when caching is not applicable. +* Diagnostics: Includes a diagnostic server for health checks, metrics, and profiling. +* Logging and Metrics: Integrated logging and Prometheus metrics for observability. + +# Installation +To build the simple_cdn server, you need to have Go installed. Clone the repository and run the following command: + +```bash +go build -o simple_cdn main.go +``` + +But actually, you can use docker image `paragor/simple_cdn` that have both arm64 and amd64 architectures. + + +# Usage +Run the server with the following command: + +``` +LOG_LEVEL=info simple_cdn -config /path/to/config.yaml +``` +## Command-Line Options +* `-config`: Path to the YAML configuration file (required). +* `-check-config`: Validates the configuration file and exits without starting the server. + +# Configuration +See examples in [./examples/*.yaml](./examples) + +## Configuration Parameters +- `listen_addr`: Address for the main server to listen on. +- `diagnostic_addr`: Address for the diagnostic server to listen on. +- `can_persist_cache`: Conditions under which responses can be cached. +- `can_load_cache`: Conditions under which cached responses can be served. +- `can_force_emit_debug_logging`: Conditions under which debug logging is forced. +- `cache`: Cache backend configuration (e.g., Redis). +- `cache_key_config`: Configuration for cache key generation based on cookies, headers, and query parameters. +- `upstream`: Configuration for the upstream server to which uncached requests are forwarded. + +# Diagnostic Server +The diagnostic server provides the following endpoints: + +- `/readyz`: Readiness probe endpoint. +- `/healthz`: Health check endpoint. +- `/invalidate`: Endpoint to invalidate cached content based on a pattern. (`/invalidate?pattern=/static/*` - not regexp) +- `/metrics`: Prometheus metrics endpoint. +- `/debug/pprof/`: pprof profiling endpoints for performance diagnostics. + +# Logging +`simple_cdn` uses structured logging via `zap` for different log levels. +The log level can be configured through the `LOG_LEVEL` environment variable (`info`, `debug`, `error`, `warn`). + +# Metrics +`simple_cdn` integrates with Prometheus for metrics collection. +Metrics are exposed at the /metrics endpoint of the diagnostic server.