diff --git a/.env.sample b/.env.sample index 67fa7619..fc366004 100644 --- a/.env.sample +++ b/.env.sample @@ -1,55 +1,147 @@ -# Tenant Configuration +####################################### +# Server +####################################### +SERVER_HOST=0.0.0.0 +SERVER_PORT=5000 +SERVER_ADMIN_PORT=5001 +SERVER_KEEP_ALIVE_TIMEOUT=61 +SERVER_HEADERS_TIMEOUT=65 +SERVER_REGION=region-of-where-your-service-is-running + + +####################################### +# Auth +####################################### +AUTH_JWT_SECRET=f023d3db-39dc-4ac9-87b2-b2be72e9162b +AUTH_JWT_ALGORITHM=HS256 +AUTH_ENCRYPTION_KEY=encryptionkey + + +####################################### +# Single Tenant +####################################### +TENANT_ID=bjhaohmqunupljrqypxz ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoiYW5vbiIsImlhdCI6MTYxMzUzMTk4NSwiZXhwIjoxOTI5MTA3OTg1fQ.mqfi__KnQB4v6PkIjkhzfwWrYyF94MEbSC6LnuvVniE SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoic2VydmljZV9yb2xlIiwiaWF0IjoxNjEzNTMxOTg1LCJleHAiOjE5MjkxMDc5ODV9.th84OKK0Iz8QchDyXZRrojmKSEZ-OuitQm_5DvLiSIc -TENANT_ID=bjhaohmqunupljrqypxz -PGRST_JWT_SECRET=f023d3db-39dc-4ac9-87b2-b2be72e9162b -# Database configuration + +####################################### +# Multi Tenancy +# uncomment MULTI_TENANT=true to enable multi tenancy +# the [Single Tenant] configurations will be ignored +####################################### +# MULTI_TENANT=true +DATABASE_MULTITENANT_URL=postgresql://postgres:postgres@127.0.0.1:5433/postgres +REQUEST_X_FORWARDED_HOST_REGEXP= +ADMIN_API_KEYS=apikey + + +####################################### +# Database +####################################### DATABASE_URL=postgresql://postgres:postgres@127.0.0.1/postgres DATABASE_POOL_URL=postgresql://postgres:postgres@127.0.0.1:6453/postgres +DATABASE_CONNECTION_TIMEOUT=3000 +DATABASE_SEARCH_PATH= + +## When DATABASE_POOL_URL is SET the following params are ignored +DATABASE_MAX_CONNECTIONS=20 +DATABASE_FREE_POOL_AFTER_INACTIVITY=60000 + +####################################### +# Database - Migrations +####################################### +DB_INSTALL_ROLES=true +DB_ANON_ROLE=anon +DB_SERVICE_ROLE=service_role +DB_AUTHENTICATED_ROLE=authenticated +DB_SUPER_USER=postgres +DB_ALLOW_MIGRATION_REFRESH=true -# Upload configuration -FILE_SIZE_LIMIT=52428800 -# Storage +####################################### +# Uploads +####################################### +UPLOAD_FILE_SIZE_LIMIT=524288000 +UPLOAD_FILE_SIZE_LIMIT_STANDARD=52428800 +UPLOAD_SIGNED_URL_EXPIRATION_TIME=60 + +TUS_URL_PATH=/upload/resumable +TUS_URL_EXPIRY_MS=3600000 + + +####################################### +# Storage Backend Driver +# values: s3 | file +####################################### STORAGE_BACKEND=s3 -GLOBAL_S3_BUCKET=name-of-your-s3-bucket -# S3 Configuration -REGION=region-of-your-s3-bucket -GLOBAL_S3_ENDPOINT=http://127.0.0.1:9000 -GLOBAL_S3_PROTOCOL=http +####################################### +# S3 Backend +####################################### +STORAGE_S3_BUCKET=name-of-your-s3-bucket +STORAGE_S3_MAX_SOCKETS=200 +STORAGE_S3_ENDPOINT=http://127.0.0.1:9000 +STORAGE_S3_FORCE_PATH_STYLE=true +STORAGE_S3_REGION=us-east-1 -# Minio Configuration (if using Minio) -GLOBAL_S3_FORCE_PATH_STYLE=true AWS_ACCESS_KEY_ID=supa-storage AWS_SECRET_ACCESS_KEY=secret1234 -AWS_DEFAULT_REGION=ap-southeast-1 - -# Local File Storage Configuration -FILE_STORAGE_BACKEND_PATH=./data -# Multitenant -IS_MULTITENANT=false -MULTITENANT_DATABASE_URL=postgresql://postgres:postgres@127.0.0.1:5433/postgres -X_FORWARDED_HOST_REGEXP= -ADMIN_API_KEYS=apikey -ENCRYPTION_KEY=encryptionkey +####################################### +# File Backend +####################################### +STORAGE_FILE_BACKEND_PATH=./data -# Logs -LOGFLARE_ENABLED=false -LOGFLARE_API_KEY=api_key -LOGFLARE_SOURCE_TOKEN=source_token +####################################### # Image Transformation -ENABLE_IMAGE_TRANSFORMATION=true +####################################### +IMAGE_TRANSFORMATION_ENABLED=true +IMAGE_TRANSFORMATION_LIMIT_MIN_SIZE=0 +IMAGE_TRANSFORMATION_LIMIT_MAX_SIZE=2000 + IMGPROXY_URL=http://localhost:50020 +IMGPROXY_REQUEST_TIMEOUT=15 +IMGPROXY_HTTP_MAX_SOCKETS=500 + +####################################### +# Image Transformation - Rate Limiter +# values: redis | memory +####################################### +RATE_LIMITER_ENABLED=false +RATE_LIMITER_DRIVER=redis +RATE_LIMITER_REDIS_URL=localhost:6379 +RATE_LIMITER_REDIS_CONNECT_TIMEOUT=5 +RATE_LIMITER_RENDER_PATH_MAX_REQ_SEC=5 +RATE_LIMITER_REDIS_COMMAND_TIMEOUT=2 + +####################################### # Queue +# When PG_QUEUE_CONNECTION_URL is not SET it will use the DATABASE_URL +# or DB +####################################### +PG_QUEUE_ENABLE=false +PG_QUEUE_DELETE_AFTER_DAYS=2 +PG_QUEUE_ARCHIVE_COMPLETED_AFTER_SECONDS=7200 +PG_QUEUE_RETENTION_DAYS=2 +PG_QUEUE_CONNECTION_URL= + + +####################################### +# Webhooks +####################################### WEBHOOK_URL= -ENABLE_QUEUE_EVENTS=false +WEBHOOK_API_KEY= -# Rate Limiter -ENABLE_RATE_LIMITER=true -RATE_LIMITER_DRIVER=redis -RATE_LIMITER_REDIS_URL=localhost:6379 \ No newline at end of file + +####################################### +# Monitoring +####################################### +DEFAULT_METRICS_ENABLED=true +LOG_LEVEL=info + +LOGFLARE_ENABLED=false +LOGFLARE_API_KEY=api_key +LOGFLARE_SOURCE_TOKEN=source_token diff --git a/docker-compose-infra.yml b/docker-compose-infra.yml new file mode 100644 index 00000000..f3ffc3c1 --- /dev/null +++ b/docker-compose-infra.yml @@ -0,0 +1,165 @@ +# docker-compose.yml + +version: '3' +services: + + tenant_db: + image: postgres:15 + ports: + - '5432:5432' + healthcheck: + test: [ "CMD-SHELL", "pg_isready", "-d", "postgres" ] + interval: 50s + timeout: 60s + retries: 5 + environment: + POSTGRES_DB: postgres + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_PORT: 5432 + + multitenant_db: + image: postgres:15 + ports: + - '5433:5432' + configs: + - source: init.sql + target: /docker-entrypoint-initdb.d/init.sql + healthcheck: + test: [ "CMD-SHELL", "pg_isready", "-d", "postgres" ] + interval: 50s + timeout: 60s + retries: 5 + environment: + POSTGRES_DB: postgres + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + + pg_bouncer: + image: bitnami/pgbouncer:latest + ports: + - 6453:6432 + environment: + POSTGRESQL_USERNAME: postgres + POSTGRESQL_HOST: tenant_db + POSTGRESQL_PASSWORD: postgres + PGBOUNCER_POOL_MODE: transaction + PGBOUNCER_IGNORE_STARTUP_PARAMETERS: "extra_float_digits, options" + PGBOUNCER_STATS_USERS: postgres + + supavisor: + image: supabase/supavisor:1.1.6 + depends_on: + multitenant_db: + condition: service_healthy + tenant_db: + condition: service_healthy + ports: + - 4000:4000 + - 5452:5452 + - 6543:6543 + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:4000/api/health"] + interval: 2s + timeout: 10s + retries: 5 + environment: + PORT: 4000 + PROXY_PORT_SESSION: 5452 + PROXY_PORT_TRANSACTION: 6543 + DATABASE_URL: "ecto://postgres:postgres@multitenant_db:5432/postgres" + CLUSTER_POSTGRES: "true" + SECRET_KEY_BASE: "12345678901234567890121234567890123456789012345678903212345678901234567890123456789032123456789012345678901234567890323456789032" + VAULT_ENC_KEY: "12345678901234567890123456789032" + API_JWT_SECRET: "dev" + METRICS_JWT_SECRET: "dev" + REGION: "local" + ERL_AFLAGS: -proto_dist inet_tcp + command: sh -c "/app/bin/migrate && /app/bin/server" + + supavisor_setup: + image: supabase/supavisor:1.1.6 + command: | + curl -X PUT \ + "http://supavisor:4000/api/tenants/bjhaohmqunupljrqypxz" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJvbGUiOiJhbm9uIiwiaWF0IjoxNjQ1MTkyODI0LCJleHAiOjE5NjA3Njg4MjR9.M9jrxyvPLkUxWgOYSf5dNdJ8v_eRrq810ShFRT8N-6M" \ + --header "Content-Type: application/json" \ + --data-raw "{ + \"tenant\": { + \"db_host\": \"tenant_db\", + \"db_port\": 5432, + \"db_database\": \"postgres\", + \"ip_version\": \"auto\", + \"require_user\": true, + \"upstream_ssl\": false, + \"enforce_ssl\": false, + \"default_max_clients\": 200, + \"default_pool_size\": 15, + \"users\": [ + { + \"db_user\": \"postgres\", + \"db_password\": \"postgres\", + \"mode_type\": \"transaction\", + \"pool_size\": 15, + \"pool_checkout_timeout\": 100 + } + ] + } + }" + depends_on: + supavisor: + condition: service_healthy + + minio: + image: minio/minio + ports: + - '9000:9000' + - '9001:9001' + healthcheck: + test: timeout 5s bash -c ':> /dev/tcp/127.0.0.1/9000' || exit 1 + interval: 10s + timeout: 5s + retries: 2 + environment: + MINIO_ROOT_USER: supa-storage + MINIO_ROOT_PASSWORD: secret1234 + command: server --console-address ":9001" /data + + minio_setup: + image: minio/mc + depends_on: + minio: + condition: service_healthy + entrypoint: > + /bin/sh -c " + /usr/bin/mc alias set supa-minio http://minio:9000 supa-storage secret1234; + /usr/bin/mc mb supa-minio/supa-storage-bucket; + exit 0; + " + + imgproxy: + image: darthsim/imgproxy + ports: + - '50020:8080' + volumes: + - ./data:/images/data + environment: + - IMGPROXY_WRITE_TIMEOUT=20 + - IMGPROXY_READ_TIMEOUT=20 + - IMGPROXY_REQUESTS_QUEUE_SIZE=24 + - IMGPROXY_LOCAL_FILESYSTEM_ROOT=/images + - IMGPROXY_USE_ETAG=true + - IMGPROXY_ENABLE_WEBP_DETECTION=true + + # Optional for rate-limiting + redis: + image: redis:6.2-alpine + restart: always + ports: + - '6379:6379' + +configs: + init.sql: + content: | + CREATE SCHEMA IF NOT EXISTS _supavisor; \ No newline at end of file diff --git a/docker-compose-multi-tenant.yml b/docker-compose-multi-tenant.yml new file mode 100644 index 00000000..38565b63 --- /dev/null +++ b/docker-compose-multi-tenant.yml @@ -0,0 +1,102 @@ +# docker-compose.yml + +version: '3' +services: + storage: + image: supabase/storage-api:latest + ports: + - '5000:5000' + - '5001:5001' + depends_on: + tenant_db: + condition: service_healthy + multitenant_db: + condition: service_healthy + supavisor: + condition: service_started + minio_setup: + condition: service_completed_successfully + environment: + # Server + SERVER_PORT: 5000 + SERVER_REGION: local + # Auth + AUTH_JWT_SECRET: f023d3db-39dc-4ac9-87b2-b2be72e9162b + AUTH_JWT_ALGORITHM: HS256 + AUTH_ENCRYPTION_KEY: encryptionkey + # Multi tenant Mode + MULTI_TENANT: true + DATABASE_MULTITENANT_URL: postgresql://postgres:postgres@multitenant_db:5432/postgres + SERVER_ADMIN_API_KEYS: apikey + SERVER_ADMIN_PORT: 5001 + REQUEST_X_FORWARDED_HOST_REGEXP: "^([a-z]{20}).local.(?:com|dev)$" + # Migrations + DB_INSTALL_ROLES: true # set to false if you want to manage roles yourself + # Storage + STORAGE_BACKEND: s3 + STORAGE_S3_BUCKET: supa-storage-bucket # name of s3 bucket where you want to store objects + STORAGE_S3_ENDPOINT: http://minio:9000 + STORAGE_S3_FORCE_PATH_STYLE: "true" + STORAGE_S3_REGION: us-east-1 + AWS_ACCESS_KEY_ID: supa-storage + AWS_SECRET_ACCESS_KEY: secret1234 + # Upload + UPLOAD_FILE_SIZE_LIMIT: 524288000 + UPLOAD_FILE_SIZE_LIMIT_STANDARD: 52428800 + UPLOAD_SIGNED_URL_EXPIRATION_TIME: 120 + TUS_URL_PATH: /upload/resumable + TUS_URL_EXPIRY_MS: 3600000 + # Image Tranformation + IMAGE_TRANSFORMATION_ENABLED: "true" + IMGPROXY_URL: http://imgproxy:8080 + IMGPROXY_REQUEST_TIMEOUT: 15 + + tenant_db: + extends: + service: tenant_db + file: docker-compose-infra.yml + + multitenant_db: + extends: + service: multitenant_db + file: docker-compose-infra.yml + + supavisor: + extends: + service: supavisor + file: docker-compose-infra.yml + + supavisor_setup: + depends_on: + supavisor: + condition: service_healthy + extends: + service: supavisor_setup + file: docker-compose-infra.yml + + minio: + extends: + service: minio + file: docker-compose-infra.yml + + minio_setup: + extends: + service: minio_setup + file: docker-compose-infra.yml + depends_on: + - minio + + imgproxy: + extends: + service: imgproxy + file: docker-compose-infra.yml + +# Optional for rate-limiting +# redis: +# extends: +# service: redis +# file: docker-compose-infra.yml + +configs: + init.sql: + content: "CREATE SCHEMA IF NOT EXISTS _supavisor;" diff --git a/docker-compose.yml b/docker-compose.yml index 36cb0948..62b368f3 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,81 +2,83 @@ version: '3' services: - db: - image: postgres:13 + storage: + image: supabase/storage-api:latest ports: - - '5432:5432' - healthcheck: - test: [ "CMD-SHELL", "pg_isready", "-d", "postgres" ] - interval: 50s - timeout: 60s - retries: 5 - environment: - POSTGRES_DB: postgres - POSTGRES_USER: postgres - POSTGRES_PASSWORD: postgres - POSTGRES_PORT: 5432 - - pg_bouncer: - image: bitnami/pgbouncer:latest - ports: - - '6453:6432' - environment: - POSTGRESQL_USERNAME: postgres - POSTGRESQL_HOST: db - POSTGRESQL_PASSWORD: postgres - PGBOUNCER_POOL_MODE: transaction - PGBOUNCER_IGNORE_STARTUP_PARAMETERS: "extra_float_digits, options" - PGBOUNCER_STATS_USERS: postgres + - '5000:5000' + - '5001:5001' depends_on: - - db - - multitenant_db: - image: postgres:13 - ports: - - '5433:5432' + tenant_db: + condition: service_healthy + pg_bouncer: + condition: service_started + minio_setup: + condition: service_completed_successfully environment: - POSTGRES_DB: postgres - POSTGRES_USER: postgres - POSTGRES_PASSWORD: postgres + # Server + SERVER_PORT: 5000 + SERVER_REGION: local + # Auth + AUTH_JWT_SECRET: f023d3db-39dc-4ac9-87b2-b2be72e9162b + AUTH_JWT_ALGORITHM: HS256 + AUTH_ENCRYPTION_KEY: encryptionkey + # Single tenant Mode + TENANT_ID: bjwdssmqcnupljrqypxz + ANON_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoiYW5vbiIsImlhdCI6MTYxMzUzMTk4NSwiZXhwIjoxOTI5MTA3OTg1fQ.mqfi__KnQB4v6PkIjkhzfwWrYyF94MEbSC6LnuvVniE + SERVICE_KEY: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoic2VydmljZV9yb2xlIiwiaWF0IjoxNjEzNTMxOTg1LCJleHAiOjE5MjkxMDc5ODV9.th84OKK0Iz8QchDyXZRrojmKSEZ-OuitQm_5DvLiSIc + DATABASE_URL: postgres://postgres:postgres@tenant_db:5432/postgres + DATABASE_POOL_URL: postgresql://postgres:postgres@pg_bouncer:6432/postgres + # Migrations + DB_INSTALL_ROLES: true # set to false if you want to manage roles yourself + # Storage + STORAGE_BACKEND: s3 + STORAGE_S3_BUCKET: supa-storage-bucket # name of s3 bucket where you want to store objects + STORAGE_S3_ENDPOINT: http://minio:9000 + STORAGE_S3_FORCE_PATH_STYLE: "true" + STORAGE_S3_REGION: us-east-1 + AWS_ACCESS_KEY_ID: supa-storage + AWS_SECRET_ACCESS_KEY: secret1234 + # Upload + UPLOAD_FILE_SIZE_LIMIT: 524288000 + UPLOAD_FILE_SIZE_LIMIT_STANDARD: 52428800 + UPLOAD_SIGNED_URL_EXPIRATION_TIME: 120 + TUS_URL_PATH: /upload/resumable + TUS_URL_EXPIRY_MS: 3600000 + # Image Tranformation + IMAGE_TRANSFORMATION_ENABLED: "true" + IMGPROXY_URL: http://imgproxy:8080 + IMGPROXY_REQUEST_TIMEOUT: 15 - redis: - image: redis:6.2-alpine - restart: always - ports: - - '6379:6379' + tenant_db: + extends: + service: tenant_db + file: docker-compose-infra.yml + + pg_bouncer: + extends: + service: pg_bouncer + file: docker-compose-infra.yml minio: - image: minio/minio - ports: - - '9000:9000' - - '9001:9001' - environment: - MINIO_ROOT_USER: supa-storage - MINIO_ROOT_PASSWORD: secret1234 - command: server --console-address ":9001" /data + extends: + service: minio + file: docker-compose-infra.yml - createbuckets: - image: minio/mc + minio_setup: + extends: + service: minio_setup + file: docker-compose-infra.yml depends_on: - - minio - entrypoint: > - /bin/sh -c " - /usr/bin/mc alias set supa-minio http://minio:9000 supa-storage secret1234; - /usr/bin/mc mb supa-minio/supa-storage-staging-ap-southeast-1; - exit 0; - " + minio: + condition: service_healthy imgproxy: - image: darthsim/imgproxy - ports: - - '50020:8080' - volumes: - - ./data:/images/data - environment: - - IMGPROXY_WRITE_TIMEOUT=20 - - IMGPROXY_READ_TIMEOUT=20 - - IMGPROXY_REQUESTS_QUEUE_SIZE=24 - - IMGPROXY_LOCAL_FILESYSTEM_ROOT=/images - - IMGPROXY_USE_ETAG=true - - IMGPROXY_ENABLE_WEBP_DETECTION=true + extends: + service: imgproxy + file: docker-compose-infra.yml + +# Optional for rate-limiting +# redis: +# extends: +# service: redis +# file: docker-compose-infra.yml diff --git a/package.json b/package.json index ef69f6f4..35e3e4a0 100644 --- a/package.json +++ b/package.json @@ -15,8 +15,8 @@ "prettier:check": "prettier -v && prettier -c src/**", "format": "prettier -c --write src/**", "eslint:check": "eslint 'src/**'", - "infra:stop": "docker-compose --project-directory . -f docker-compose.yml down --remove-orphans", - "infra:start": "docker-compose --project-directory . -f docker-compose.yml up -d && sleep 5 && npm run migration:run", + "infra:stop": "docker compose --project-directory . -f docker-compose-infra.yml down --remove-orphans", + "infra:start": "docker compose --project-directory . -f docker-compose-infra.yml up -d && sleep 5 && npm run migration:run", "infra:restart": "npm run infra:stop && npm run infra:start" }, "author": "Supabase", diff --git a/src/config.ts b/src/config.ts index 6e03015e..955f72c6 100644 --- a/src/config.ts +++ b/src/config.ts @@ -10,14 +10,14 @@ type StorageConfigType = { adminRequestIdHeader?: string anonKey: string encryptionKey: string - fileSizeLimit: number - fileSizeLimitStandardUpload?: number - fileStoragePath?: string - globalS3Protocol: 'http' | 'https' - globalS3MaxSockets?: number - globalS3Bucket: string - globalS3Endpoint?: string - globalS3ForcePathStyle?: boolean + uploadFileSizeLimit: number + uploadFileSizeLimitStandard?: number + storageFilePath?: string + storageS3MaxSockets?: number + storageS3Bucket: string + storageS3Endpoint?: string + storageS3ForcePathStyle?: boolean + storageS3Region: string isMultitenant: boolean jwtSecret: string jwtAlgorithm: string @@ -36,17 +36,17 @@ type StorageConfigType = { databaseFreePoolAfterInactivity: number databaseConnectionTimeout: number region: string - requestIdHeader?: string + requestTraceHeader?: string serviceKey: string storageBackendType: StorageBackendType tenantId: string - urlLengthLimit: number - xForwardedHostRegExp?: string + requestUrlLengthLimit: number + requestXForwardedHostRegExp?: string logLevel?: string logflareEnabled?: boolean logflareApiKey?: string logflareSourceToken?: string - enableQueueEvents: boolean + pgQueueEnable: boolean pgQueueConnectionURL?: string pgQueueDeleteAfterDays?: number pgQueueArchiveCompletedAfterSeconds?: number @@ -58,7 +58,7 @@ type StorageConfigType = { webhookQueueConcurrency?: number adminDeleteQueueTeamSize?: number adminDeleteConcurrency?: number - enableImageTransformation: boolean + imageTransformationEnabled: boolean imgProxyURL?: string imgProxyRequestTimeout: number imgProxyHttpMaxSockets: number @@ -73,64 +73,144 @@ type StorageConfigType = { adminPort: number port: number host: string - enableRateLimiter: boolean + rateLimiterEnabled: boolean rateLimiterDriver: 'memory' | 'redis' | string rateLimiterRedisUrl?: string rateLimiterSkipOnError?: boolean rateLimiterRenderPathMaxReqSec: number rateLimiterRedisConnectTimeout: number rateLimiterRedisCommandTimeout: number - signedUploadUrlExpirationTime: number + uploadSignedUrlExpirationTime: number tusUrlExpiryMs: number tusPath: string tusUseFileVersionSeparator: boolean - enableDefaultMetrics: boolean + defaultMetricsEnabled: boolean } -function getOptionalConfigFromEnv(key: string): string | undefined { - return process.env[key] +function getOptionalConfigFromEnv(key: string, fallback?: string): string | undefined { + const envValue = process.env[key] + + if (!envValue && fallback) { + return getOptionalConfigFromEnv(fallback) + } + + return envValue } -function getConfigFromEnv(key: string): string { +function getConfigFromEnv(key: string, fallbackEnv?: string): string { const value = getOptionalConfigFromEnv(key) if (!value) { + if (fallbackEnv) { + return getConfigFromEnv(fallbackEnv) + } + throw new Error(`${key} is undefined`) } return value } -function getOptionalIfMultitenantConfigFromEnv(key: string): string | undefined { - return getOptionalConfigFromEnv('IS_MULTITENANT') === 'true' - ? getOptionalConfigFromEnv(key) - : getConfigFromEnv(key) +function getOptionalIfMultitenantConfigFromEnv(key: string, fallback?: string): string | undefined { + return getOptionalConfigFromEnv('MULTI_TENANT', 'IS_MULTITENANT') === 'true' + ? getOptionalConfigFromEnv(key, fallback) + : getConfigFromEnv(key, fallback) +} + +let config: StorageConfigType | undefined + +export function mergeConfig(newConfig: Partial) { + config = { ...config, ...(newConfig as Required) } } -export function getConfig(): StorageConfigType { +export function getConfig(options?: { reload?: boolean }): StorageConfigType { + if (config && !options?.reload) { + return config + } + dotenv.config() - return { + config = { + // Tenant + tenantId: + getOptionalConfigFromEnv('PROJECT_REF') || + getOptionalIfMultitenantConfigFromEnv('TENANT_ID') || + '', + isMultitenant: getOptionalConfigFromEnv('MULTI_TENANT', 'IS_MULTITENANT') === 'true', + + // Server + region: getConfigFromEnv('SERVER_REGION', 'REGION'), version: getOptionalConfigFromEnv('VERSION') || '0.0.0', keepAliveTimeout: parseInt(getOptionalConfigFromEnv('SERVER_KEEP_ALIVE_TIMEOUT') || '61', 10), headersTimeout: parseInt(getOptionalConfigFromEnv('SERVER_HEADERS_TIMEOUT') || '65', 10), - adminApiKeys: getOptionalConfigFromEnv('ADMIN_API_KEYS') || '', - adminRequestIdHeader: getOptionalConfigFromEnv('ADMIN_REQUEST_ID_HEADER'), + host: getOptionalConfigFromEnv('SERVER_HOST', 'HOST') || '0.0.0.0', + port: Number(getOptionalConfigFromEnv('SERVER_PORT', 'PORT')) || 5000, + adminPort: Number(getOptionalConfigFromEnv('SERVER_ADMIN_PORT', 'ADMIN_PORT')) || 5001, + + // Request + requestXForwardedHostRegExp: getOptionalConfigFromEnv( + 'REQUEST_X_FORWARDED_HOST_REGEXP', + 'X_FORWARDED_HOST_REGEXP' + ), + requestUrlLengthLimit: + Number(getOptionalConfigFromEnv('REQUEST_URL_LENGTH_LIMIT', 'URL_LENGTH_LIMIT')) || 7_500, + requestTraceHeader: getOptionalConfigFromEnv('REQUEST_TRACE_HEADER', 'REQUEST_ID_HEADER'), + + // Admin + adminApiKeys: getOptionalConfigFromEnv('SERVER_ADMIN_API_KEYS', 'ADMIN_API_KEYS') || '', + adminRequestIdHeader: getOptionalConfigFromEnv( + 'REQUEST_TRACE_HEADER', + 'REQUEST_ADMIN_TRACE_HEADER' + ), + + // Auth anonKey: getOptionalIfMultitenantConfigFromEnv('ANON_KEY') || '', - encryptionKey: getOptionalConfigFromEnv('ENCRYPTION_KEY') || '', - fileSizeLimit: Number(getConfigFromEnv('FILE_SIZE_LIMIT')), - fileSizeLimitStandardUpload: parseInt( - getOptionalConfigFromEnv('FILE_SIZE_LIMIT_STANDARD_UPLOAD') || '0' - ), - fileStoragePath: getOptionalConfigFromEnv('FILE_STORAGE_BACKEND_PATH'), - globalS3MaxSockets: parseInt(getOptionalConfigFromEnv('GLOBAL_S3_MAX_SOCKETS') || '200', 10), - globalS3Protocol: (getOptionalConfigFromEnv('GLOBAL_S3_PROTOCOL') || 'https') as - | 'http' - | 'https', - globalS3Bucket: getConfigFromEnv('GLOBAL_S3_BUCKET'), - globalS3Endpoint: getOptionalConfigFromEnv('GLOBAL_S3_ENDPOINT'), - globalS3ForcePathStyle: getOptionalConfigFromEnv('GLOBAL_S3_FORCE_PATH_STYLE') === 'true', - isMultitenant: getOptionalConfigFromEnv('IS_MULTITENANT') === 'true', - jwtSecret: getOptionalIfMultitenantConfigFromEnv('PGRST_JWT_SECRET') || '', - jwtAlgorithm: getOptionalConfigFromEnv('PGRST_JWT_ALGORITHM') || 'HS256', + serviceKey: getOptionalIfMultitenantConfigFromEnv('SERVICE_KEY') || '', + encryptionKey: getOptionalConfigFromEnv('AUTH_ENCRYPTION_KEY', 'ENCRYPTION_KEY') || '', + jwtSecret: getOptionalIfMultitenantConfigFromEnv('AUTH_JWT_SECRET', 'PGRST_JWT_SECRET') || '', + jwtAlgorithm: getOptionalConfigFromEnv('AUTH_JWT_ALGORITHM', 'PGRST_JWT_ALGORITHM') || 'HS256', + + // Upload + uploadFileSizeLimit: Number(getConfigFromEnv('UPLOAD_FILE_SIZE_LIMIT', 'FILE_SIZE_LIMIT')), + uploadFileSizeLimitStandard: parseInt( + getOptionalConfigFromEnv( + 'UPLOAD_FILE_SIZE_LIMIT_STANDARD', + 'FILE_SIZE_LIMIT_STANDARD_UPLOAD' + ) || '0' + ), + uploadSignedUrlExpirationTime: parseInt( + getOptionalConfigFromEnv( + 'UPLOAD_SIGNED_URL_EXPIRATION_TIME', + 'SIGNED_UPLOAD_URL_EXPIRATION_TIME' + ) || '60' + ), + + // Upload - TUS + tusPath: getOptionalConfigFromEnv('TUS_URL_PATH') || '/upload/resumable', + tusUrlExpiryMs: parseInt( + getOptionalConfigFromEnv('TUS_URL_EXPIRY_MS') || (1000 * 60 * 60).toString(), + 10 + ), + tusUseFileVersionSeparator: + getOptionalConfigFromEnv('TUS_USE_FILE_VERSION_SEPARATOR') === 'true', + + // Storage + storageBackendType: getConfigFromEnv('STORAGE_BACKEND') as StorageBackendType, + + // Storage - File + storageFilePath: getOptionalConfigFromEnv('STORAGE_FILE_BACKEND_PATH', 'STORAGE_FILE_PATH'), + + // Storage - S3 + storageS3MaxSockets: parseInt( + getOptionalConfigFromEnv('STORAGE_S3_MAX_SOCKETS', 'GLOBAL_S3_MAX_SOCKETS') || '200', + 10 + ), + storageS3Bucket: getConfigFromEnv('STORAGE_S3_BUCKET', 'GLOBAL_S3_BUCKET'), + storageS3Endpoint: getOptionalConfigFromEnv('STORAGE_S3_ENDPOINT', 'GLOBAL_S3_ENDPOINT'), + storageS3ForcePathStyle: + getOptionalConfigFromEnv('STORAGE_S3_FORCE_PATH_STYLE', 'GLOBAL_S3_FORCE_PATH_STYLE') === + 'true', + storageS3Region: getOptionalConfigFromEnv('STORAGE_S3_REGION', 'REGION') as string, + + // DB - Migrations dbAnonRole: getOptionalConfigFromEnv('DB_ANON_ROLE') || 'anon', dbServiceRole: getOptionalConfigFromEnv('DB_SERVICE_ROLE') || 'service_role', dbAuthenticatedRole: getOptionalConfigFromEnv('DB_AUTHENTICATED_ROLE') || 'authenticated', @@ -139,8 +219,13 @@ export function getConfig(): StorageConfigType { getOptionalConfigFromEnv('DB_ALLOW_MIGRATION_REFRESH') === 'false' ), dbSuperUser: getOptionalConfigFromEnv('DB_SUPER_USER') || 'postgres', - dbSearchPath: getOptionalConfigFromEnv('DB_SEARCH_PATH') || '', - multitenantDatabaseUrl: getOptionalConfigFromEnv('MULTITENANT_DATABASE_URL'), + + // Database - Connection + dbSearchPath: getOptionalConfigFromEnv('DATABASE_SEARCH_PATH', 'DB_SEARCH_PATH') || '', + multitenantDatabaseUrl: getOptionalConfigFromEnv( + 'DATABASE_MULTITENANT_URL', + 'MULTITENANT_DATABASE_URL' + ), databaseSSLRootCert: getOptionalConfigFromEnv('DATABASE_SSL_ROOT_CERT'), databaseURL: getOptionalIfMultitenantConfigFromEnv('DATABASE_URL') || '', databasePoolURL: getOptionalConfigFromEnv('DATABASE_POOL_URL') || '', @@ -156,21 +241,17 @@ export function getConfig(): StorageConfigType { getOptionalConfigFromEnv('DATABASE_CONNECTION_TIMEOUT') || '3000', 10 ), - region: getConfigFromEnv('REGION'), - requestIdHeader: getOptionalConfigFromEnv('REQUEST_ID_HEADER'), - serviceKey: getOptionalIfMultitenantConfigFromEnv('SERVICE_KEY') || '', - storageBackendType: getConfigFromEnv('STORAGE_BACKEND') as StorageBackendType, - tenantId: - getOptionalConfigFromEnv('PROJECT_REF') || - getOptionalIfMultitenantConfigFromEnv('TENANT_ID') || - '', - urlLengthLimit: Number(getOptionalConfigFromEnv('URL_LENGTH_LIMIT')) || 7_500, - xForwardedHostRegExp: getOptionalConfigFromEnv('X_FORWARDED_HOST_REGEXP'), + + // Monitoring logLevel: getOptionalConfigFromEnv('LOG_LEVEL') || 'info', logflareEnabled: getOptionalConfigFromEnv('LOGFLARE_ENABLED') === 'true', logflareApiKey: getOptionalConfigFromEnv('LOGFLARE_API_KEY'), logflareSourceToken: getOptionalConfigFromEnv('LOGFLARE_SOURCE_TOKEN'), - enableQueueEvents: getOptionalConfigFromEnv('ENABLE_QUEUE_EVENTS') === 'true', + defaultMetricsEnabled: + getOptionalConfigFromEnv('DEFAULT_METRICS_ENABLED', 'ENABLE_DEFAULT_METRICS') === 'true', + + // Queue + pgQueueEnable: getOptionalConfigFromEnv('PG_QUEUE_ENABLE', 'ENABLE_QUEUE_EVENTS') === 'true', pgQueueConnectionURL: getOptionalConfigFromEnv('PG_QUEUE_CONNECTION_URL'), pgQueueDeleteAfterDays: parseInt( getOptionalConfigFromEnv('PG_QUEUE_DELETE_AFTER_DAYS') || '2', @@ -181,6 +262,8 @@ export function getConfig(): StorageConfigType { 10 ), pgQueueRetentionDays: parseInt(getOptionalConfigFromEnv('PG_QUEUE_RETENTION_DAYS') || '2', 10), + + // Webhooks webhookURL: getOptionalConfigFromEnv('WEBHOOK_URL'), webhookApiKey: getOptionalConfigFromEnv('WEBHOOK_API_KEY'), webhookQueuePullInterval: parseInt( @@ -194,7 +277,11 @@ export function getConfig(): StorageConfigType { adminDeleteConcurrency: parseInt( getOptionalConfigFromEnv('QUEUE_ADMIN_DELETE_CONCURRENCY') || '5' ), - enableImageTransformation: getOptionalConfigFromEnv('ENABLE_IMAGE_TRANSFORMATION') === 'true', + + // Image Transformation + imageTransformationEnabled: + getOptionalConfigFromEnv('IMAGE_TRANSFORMATION_ENABLED', 'ENABLE_IMAGE_TRANSFORMATION') === + 'true', imgProxyRequestTimeout: parseInt( getOptionalConfigFromEnv('IMGPROXY_REQUEST_TIMEOUT') || '15', 10 @@ -210,15 +297,22 @@ export function getConfig(): StorageConfigType { imgProxyURL: getOptionalConfigFromEnv('IMGPROXY_URL'), imgLimits: { size: { - min: parseInt(getOptionalConfigFromEnv('IMG_LIMITS_MIN_SIZE') || '1', 10), - max: parseInt(getOptionalConfigFromEnv('IMG_LIMITS_MAX_SIZE') || '2000', 10), + min: parseInt( + getOptionalConfigFromEnv('IMAGE_TRANSFORMATION_LIMIT_MIN_SIZE', 'IMG_LIMITS_MIN_SIZE') || + '1', + 10 + ), + max: parseInt( + getOptionalConfigFromEnv('IMAGE_TRANSFORMATION_LIMIT_MAX_SIZE', 'IMG_LIMITS_MAX_SIZE') || + '2000', + 10 + ), }, }, - postgrestForwardHeaders: getOptionalConfigFromEnv('POSTGREST_FORWARD_HEADERS'), - host: getOptionalConfigFromEnv('HOST') || '0.0.0.0', - port: Number(getOptionalConfigFromEnv('PORT')) || 5000, - adminPort: Number(getOptionalConfigFromEnv('ADMIN_PORT')) || 5001, - enableRateLimiter: getOptionalConfigFromEnv('ENABLE_RATE_LIMITER') === 'true', + + // Rate Limiting + rateLimiterEnabled: + getOptionalConfigFromEnv('RATE_LIMITER_ENABLED', 'ENABLE_RATE_LIMITER') === 'true', rateLimiterSkipOnError: getOptionalConfigFromEnv('RATE_LIMITER_SKIP_ON_ERROR') === 'true', rateLimiterDriver: getOptionalConfigFromEnv('RATE_LIMITER_DRIVER') || 'memory', rateLimiterRedisUrl: getOptionalConfigFromEnv('RATE_LIMITER_REDIS_URL'), @@ -234,17 +328,7 @@ export function getConfig(): StorageConfigType { getOptionalConfigFromEnv('RATE_LIMITER_REDIS_COMMAND_TIMEOUT') || '2', 10 ), - signedUploadUrlExpirationTime: parseInt( - getOptionalConfigFromEnv('SIGNED_UPLOAD_URL_EXPIRATION_TIME') || '60' - ), - - tusPath: getOptionalConfigFromEnv('TUS_URL_PATH') || '/upload/resumable', - tusUrlExpiryMs: parseInt( - getOptionalConfigFromEnv('TUS_URL_EXPIRY_MS') || (1000 * 60 * 60).toString(), - 10 - ), - tusUseFileVersionSeparator: - getOptionalConfigFromEnv('TUS_USE_FILE_VERSION_SEPARATOR') === 'true', - enableDefaultMetrics: getOptionalConfigFromEnv('ENABLE_DEFAULT_METRICS') === 'true', } + + return config } diff --git a/src/database/client.ts b/src/database/client.ts index 28c4a642..79c5ac01 100644 --- a/src/database/client.ts +++ b/src/database/client.ts @@ -39,7 +39,7 @@ async function getDbCredentials( databasePoolURL, databaseURL, databaseMaxConnections, - xForwardedHostRegExp, + requestXForwardedHostRegExp, } = getConfig() let dbUrl = databasePoolURL || databaseURL @@ -51,7 +51,7 @@ async function getDbCredentials( throw new StorageBackendError('Invalid Tenant Id', 400, 'Tenant id not provided') } - if (xForwardedHostRegExp && !options?.disableHostCheck) { + if (requestXForwardedHostRegExp && !options?.disableHostCheck) { const xForwardedHost = host if (typeof xForwardedHost !== 'string') { @@ -61,7 +61,7 @@ async function getDbCredentials( 'X-Forwarded-Host header is not a string' ) } - if (!new RegExp(xForwardedHostRegExp).test(xForwardedHost)) { + if (!new RegExp(requestXForwardedHostRegExp).test(xForwardedHost)) { throw new StorageBackendError( 'Invalid Header', 400, diff --git a/src/http/plugins/metrics.ts b/src/http/plugins/metrics.ts index 99d39f18..59f73db6 100644 --- a/src/http/plugins/metrics.ts +++ b/src/http/plugins/metrics.ts @@ -3,7 +3,7 @@ import { MetricsRegistrar } from '../../monitoring/metrics' import fastifyMetrics from 'fastify-metrics' import { getConfig } from '../../config' -const { region, enableDefaultMetrics } = getConfig() +const { region, defaultMetricsEnabled } = getConfig() interface MetricsOptions { enabledEndpoint?: boolean @@ -14,7 +14,7 @@ export const metrics = ({ enabledEndpoint }: MetricsOptions) => fastify.register(fastifyMetrics, { endpoint: enabledEndpoint ? '/metrics' : null, defaultMetrics: { - enabled: enableDefaultMetrics, + enabled: defaultMetricsEnabled, register: MetricsRegistrar, prefix: 'storage_api_', labels: { @@ -22,7 +22,7 @@ export const metrics = ({ enabledEndpoint }: MetricsOptions) => }, }, routeMetrics: { - enabled: enableDefaultMetrics, + enabled: defaultMetricsEnabled, overrides: { summary: { name: 'storage_api_http_request_summary_seconds', diff --git a/src/http/plugins/tenant-id.ts b/src/http/plugins/tenant-id.ts index b2e55f6b..889f147b 100644 --- a/src/http/plugins/tenant-id.ts +++ b/src/http/plugins/tenant-id.ts @@ -8,13 +8,13 @@ declare module 'fastify' { } export const tenantId = fastifyPlugin(async (fastify) => { - const { isMultitenant, tenantId, xForwardedHostRegExp } = getConfig() + const { isMultitenant, tenantId, requestXForwardedHostRegExp } = getConfig() fastify.decorateRequest('tenantId', tenantId) fastify.addHook('onRequest', async (request) => { - if (!isMultitenant || !xForwardedHostRegExp) return + if (!isMultitenant || !requestXForwardedHostRegExp) return const xForwardedHost = request.headers['x-forwarded-host'] if (typeof xForwardedHost !== 'string') return - const result = xForwardedHost.match(xForwardedHostRegExp) + const result = xForwardedHost.match(requestXForwardedHostRegExp) if (!result) return request.tenantId = result[1] }) diff --git a/src/http/routes/object/getObject.ts b/src/http/routes/object/getObject.ts index d323a385..77bb3018 100644 --- a/src/http/routes/object/getObject.ts +++ b/src/http/routes/object/getObject.ts @@ -4,7 +4,7 @@ import { IncomingMessage, Server, ServerResponse } from 'http' import { getConfig } from '../../../config' import { AuthenticatedRangeRequest } from '../../request' -const { globalS3Bucket } = getConfig() +const { storageS3Bucket } = getConfig() const getObjectParamsSchema = { type: 'object', @@ -48,7 +48,7 @@ async function requestHandler( request.log.info(s3Key) return request.storage.renderer('asset').render(request, response, { - bucket: globalS3Bucket, + bucket: storageS3Bucket, key: s3Key, version: obj.version, download, diff --git a/src/http/routes/object/getObjectInfo.ts b/src/http/routes/object/getObjectInfo.ts index 51e55f0b..45c8bc75 100644 --- a/src/http/routes/object/getObjectInfo.ts +++ b/src/http/routes/object/getObjectInfo.ts @@ -5,7 +5,7 @@ import { getConfig } from '../../../config' import { AuthenticatedRangeRequest } from '../../request' import { Obj } from '../../../storage/schemas' -const { globalS3Bucket } = getConfig() +const { storageS3Bucket } = getConfig() const getObjectParamsSchema = { type: 'object', @@ -44,7 +44,7 @@ async function requestHandler( } return request.storage.renderer('head').render(request, response, { - bucket: globalS3Bucket, + bucket: storageS3Bucket, key: s3Key, version: obj.version, }) diff --git a/src/http/routes/object/getPublicObject.ts b/src/http/routes/object/getPublicObject.ts index ce76938f..c73b980e 100644 --- a/src/http/routes/object/getPublicObject.ts +++ b/src/http/routes/object/getPublicObject.ts @@ -2,7 +2,7 @@ import { FastifyInstance } from 'fastify' import { FromSchema } from 'json-schema-to-ts' import { getConfig } from '../../../config' -const { globalS3Bucket } = getConfig() +const { storageS3Bucket } = getConfig() const getPublicObjectParamsSchema = { type: 'object', @@ -58,7 +58,7 @@ export default async function routes(fastify: FastifyInstance) { const s3Key = `${request.tenantId}/${bucketName}/${objectName}` return request.storage.renderer('asset').render(request, response, { - bucket: globalS3Bucket, + bucket: storageS3Bucket, key: s3Key, version: obj.version, download, diff --git a/src/http/routes/object/getSignedObject.ts b/src/http/routes/object/getSignedObject.ts index 8b6b40bb..f2db3105 100644 --- a/src/http/routes/object/getSignedObject.ts +++ b/src/http/routes/object/getSignedObject.ts @@ -4,7 +4,7 @@ import { getConfig } from '../../../config' import { getJwtSecret, SignedToken, verifyJWT } from '../../../auth' import { StorageBackendError } from '../../../storage' -const { globalS3Bucket } = getConfig() +const { storageS3Bucket } = getConfig() const getSignedObjectParamsSchema = { type: 'object', @@ -82,7 +82,7 @@ export default async function routes(fastify: FastifyInstance) { .findObject(objParts.join('/'), 'id,version') return request.storage.renderer('asset').render(request, response, { - bucket: globalS3Bucket, + bucket: storageS3Bucket, key: s3Key, version: obj.version, download, diff --git a/src/http/routes/object/getSignedUploadURL.ts b/src/http/routes/object/getSignedUploadURL.ts index cf595dbf..f92fdcbf 100644 --- a/src/http/routes/object/getSignedUploadURL.ts +++ b/src/http/routes/object/getSignedUploadURL.ts @@ -4,7 +4,7 @@ import { createDefaultSchema } from '../../generic-routes' import { AuthenticatedRequest } from '../../request' import { getConfig } from '../../../config' -const { signedUploadUrlExpirationTime } = getConfig() +const { uploadSignedUrlExpirationTime } = getConfig() const getSignedUploadURLParamsSchema = { type: 'object', @@ -54,7 +54,7 @@ export default async function routes(fastify: FastifyInstance) { const signedUploadURL = await request.storage .from(bucketName) - .signUploadObjectUrl(objectName, urlPath as string, signedUploadUrlExpirationTime, owner) + .signUploadObjectUrl(objectName, urlPath as string, uploadSignedUrlExpirationTime, owner) return response.status(200).send({ url: signedUploadURL }) } diff --git a/src/http/routes/render/index.ts b/src/http/routes/render/index.ts index ec8e9e4d..690aab2e 100644 --- a/src/http/routes/render/index.ts +++ b/src/http/routes/render/index.ts @@ -6,17 +6,17 @@ import { jwt, storage, requireTenantFeature, db, dbSuperUser } from '../../plugi import { getConfig } from '../../../config' import { rateLimiter } from './rate-limiter' -const { enableImageTransformation, enableRateLimiter } = getConfig() +const { imageTransformationEnabled, rateLimiterEnabled } = getConfig() export default async function routes(fastify: FastifyInstance) { - if (!enableImageTransformation) { + if (!imageTransformationEnabled) { return } fastify.register(async function authorizationContext(fastify) { fastify.register(requireTenantFeature('imageTransformation')) - if (enableRateLimiter) { + if (rateLimiterEnabled) { fastify.register(rateLimiter) } @@ -29,7 +29,7 @@ export default async function routes(fastify: FastifyInstance) { fastify.register(async (fastify) => { fastify.register(requireTenantFeature('imageTransformation')) - if (enableRateLimiter) { + if (rateLimiterEnabled) { fastify.register(rateLimiter) } diff --git a/src/http/routes/render/renderAuthenticatedImage.ts b/src/http/routes/render/renderAuthenticatedImage.ts index e5a54176..f4e9ee4d 100644 --- a/src/http/routes/render/renderAuthenticatedImage.ts +++ b/src/http/routes/render/renderAuthenticatedImage.ts @@ -4,7 +4,7 @@ import { FastifyInstance } from 'fastify' import { ImageRenderer } from '../../../storage/renderer' import { transformationOptionsSchema } from '../../schemas/transformations' -const { globalS3Bucket } = getConfig() +const { storageS3Bucket } = getConfig() const renderAuthenticatedImageParamsSchema = { type: 'object', @@ -53,7 +53,7 @@ export default async function routes(fastify: FastifyInstance) { const renderer = request.storage.renderer('image') as ImageRenderer return renderer.setTransformations(request.query).render(request, response, { - bucket: globalS3Bucket, + bucket: storageS3Bucket, key: s3Key, version: obj.version, download, diff --git a/src/http/routes/render/renderPublicImage.ts b/src/http/routes/render/renderPublicImage.ts index 92e2fd2f..6ecd8416 100644 --- a/src/http/routes/render/renderPublicImage.ts +++ b/src/http/routes/render/renderPublicImage.ts @@ -4,7 +4,7 @@ import { FastifyInstance } from 'fastify' import { ImageRenderer } from '../../../storage/renderer' import { transformationOptionsSchema } from '../../schemas/transformations' -const { globalS3Bucket } = getConfig() +const { storageS3Bucket } = getConfig() const renderPublicImageParamsSchema = { type: 'object', @@ -58,7 +58,7 @@ export default async function routes(fastify: FastifyInstance) { const renderer = request.storage.renderer('image') as ImageRenderer return renderer.setTransformations(request.query).render(request, response, { - bucket: globalS3Bucket, + bucket: storageS3Bucket, key: s3Key, version: obj.version, download, diff --git a/src/http/routes/render/renderSignedImage.ts b/src/http/routes/render/renderSignedImage.ts index 39120255..0c66cf50 100644 --- a/src/http/routes/render/renderSignedImage.ts +++ b/src/http/routes/render/renderSignedImage.ts @@ -5,7 +5,7 @@ import { ImageRenderer } from '../../../storage/renderer' import { getJwtSecret, SignedToken, verifyJWT } from '../../../auth' import { StorageBackendError } from '../../../storage' -const { globalS3Bucket } = getConfig() +const { storageS3Bucket } = getConfig() const renderAuthenticatedImageParamsSchema = { type: 'object', @@ -82,7 +82,7 @@ export default async function routes(fastify: FastifyInstance) { return renderer .setTransformationsFromString(transformations || '') .render(request, response, { - bucket: globalS3Bucket, + bucket: storageS3Bucket, key: s3Key, version: obj.version, download, diff --git a/src/http/routes/tus/index.ts b/src/http/routes/tus/index.ts index 0cfe6702..1161c478 100644 --- a/src/http/routes/tus/index.ts +++ b/src/http/routes/tus/index.ts @@ -24,14 +24,14 @@ import { S3Store } from './s3-store' import { DeleteHandler } from './handlers' const { - globalS3Bucket, - globalS3Endpoint, - globalS3ForcePathStyle, + storageS3Bucket, + storageS3Endpoint, + storageS3ForcePathStyle, region, tusUrlExpiryMs, tusPath, storageBackendType, - fileStoragePath, + storageFilePath, } = getConfig() type MultiPartRequest = http.IncomingMessage & { @@ -50,16 +50,16 @@ function createTusStore() { partSize: 6 * 1024 * 1024, // Each uploaded part will have ~6MB, expirationPeriodInMilliseconds: tusUrlExpiryMs, s3ClientConfig: { - bucket: globalS3Bucket, + bucket: storageS3Bucket, region: region, - endpoint: globalS3Endpoint, - forcePathStyle: globalS3ForcePathStyle, + endpoint: storageS3Endpoint, + forcePathStyle: storageS3ForcePathStyle, }, }) } return new FileStore({ - directory: fileStoragePath + '/' + globalS3Bucket, + directory: storageFilePath + '/' + storageS3Bucket, }) } diff --git a/src/http/routes/tus/lifecycle.ts b/src/http/routes/tus/lifecycle.ts index fe4d0a20..3527e95d 100644 --- a/src/http/routes/tus/lifecycle.ts +++ b/src/http/routes/tus/lifecycle.ts @@ -8,7 +8,7 @@ import { Uploader } from '../../../storage/uploader' import { TenantConnection } from '../../../database/connection' import { BaseLogger } from 'pino' -const { globalS3Bucket, tusPath } = getConfig() +const { storageS3Bucket, tusPath } = getConfig() const reExtractFileID = /([^/]+)\/?$/ export type MultiPartRequest = http.IncomingMessage & { @@ -149,7 +149,7 @@ export async function onUploadFinish( try { const s3Key = `${req.upload.tenantId}/${resourceId.bucket}/${resourceId.objectName}` const metadata = await req.upload.storage.backend.headObject( - globalS3Bucket, + storageS3Bucket, s3Key, resourceId.version ) diff --git a/src/queue/events/base-event.ts b/src/queue/events/base-event.ts index 0643bc10..066223a3 100644 --- a/src/queue/events/base-event.ts +++ b/src/queue/events/base-event.ts @@ -20,8 +20,9 @@ export interface BasePayload { export type StaticThis = { new (...args: any): T } -const { enableQueueEvents, storageBackendType, globalS3Protocol } = getConfig() -const httpAgent = createAgent(globalS3Protocol) +const { pgQueueEnable, storageBackendType, storageS3Endpoint } = getConfig() +const storageS3Protocol = storageS3Endpoint?.includes('http://') ? 'http' : 'https' +const httpAgent = createAgent(storageS3Protocol) export abstract class BaseEvent> { public static readonly version: string = 'v1' @@ -124,7 +125,7 @@ export abstract class BaseEvent> { async send() { const constructor = this.constructor as typeof BaseEvent - if (!enableQueueEvents) { + if (!pgQueueEnable) { return constructor.handle({ id: '', name: constructor.getQueueName(), diff --git a/src/queue/events/object-admin-delete.ts b/src/queue/events/object-admin-delete.ts index 8b67a060..c885cbc7 100644 --- a/src/queue/events/object-admin-delete.ts +++ b/src/queue/events/object-admin-delete.ts @@ -11,7 +11,7 @@ export interface ObjectDeleteEvent extends BasePayload { version?: string } -const { globalS3Bucket, adminDeleteQueueTeamSize, adminDeleteConcurrency } = getConfig() +const { storageS3Bucket, adminDeleteQueueTeamSize, adminDeleteConcurrency } = getConfig() export class ObjectAdminDelete extends BaseEvent { static queueName = 'object:admin:delete' @@ -43,7 +43,7 @@ export class ObjectAdminDelete extends BaseEvent { reqId: job.data.reqId, }) - await storage.backend.deleteObjects(globalS3Bucket, [ + await storage.backend.deleteObjects(storageS3Bucket, [ withOptionalVersion(s3Key, version), withOptionalVersion(s3Key, version) + '.info', ]) diff --git a/src/queue/queue.ts b/src/queue/queue.ts index 0c0a7596..8f54e938 100644 --- a/src/queue/queue.ts +++ b/src/queue/queue.ts @@ -22,6 +22,7 @@ export abstract class Queue { const { isMultitenant, + databaseURL, multitenantDatabaseUrl, pgQueueConnectionURL, pgQueueDeleteAfterDays, @@ -29,10 +30,15 @@ export abstract class Queue { pgQueueRetentionDays, } = getConfig() - let url = pgQueueConnectionURL ?? process.env.DATABASE_URL + let url = pgQueueConnectionURL ?? databaseURL - if (isMultitenant) { - url = pgQueueConnectionURL ?? multitenantDatabaseUrl + if (isMultitenant && !pgQueueConnectionURL) { + if (!multitenantDatabaseUrl) { + throw new Error( + 'running storage in multi-tenant but DB_MULTITENANT_DATABASE_URL is not set' + ) + } + url = multitenantDatabaseUrl } Queue.pgBoss = new PgBoss({ diff --git a/src/server.ts b/src/server.ts index 4fe51052..6f071f6b 100644 --- a/src/server.ts +++ b/src/server.ts @@ -17,12 +17,12 @@ const exposeDocs = true const { databaseURL, isMultitenant, - requestIdHeader, + requestTraceHeader, adminRequestIdHeader, adminPort, port, host, - enableQueueEvents, + pgQueueEnable, } = getConfig() if (isMultitenant) { @@ -32,7 +32,7 @@ const exposeDocs = true await runMigrationsOnTenant(databaseURL) } - if (enableQueueEvents) { + if (pgQueueEnable) { await Queue.init() } @@ -40,7 +40,7 @@ const exposeDocs = true logger, disableRequestLogging: true, exposeDocs, - requestIdHeader, + requestIdHeader: requestTraceHeader, }) await PubSub.connect() diff --git a/src/storage/backend/file.ts b/src/storage/backend/file.ts index 1be3df87..e00443f9 100644 --- a/src/storage/backend/file.ts +++ b/src/storage/backend/file.ts @@ -40,11 +40,11 @@ export class FileBackend implements StorageBackendAdapter { filePath: string constructor() { - const { fileStoragePath } = getConfig() - if (!fileStoragePath) { + const { storageFilePath } = getConfig() + if (!storageFilePath) { throw new Error('FILE_STORAGE_BACKEND_PATH env variable not set') } - this.filePath = fileStoragePath + this.filePath = storageFilePath } /** diff --git a/src/storage/backend/index.ts b/src/storage/backend/index.ts index 44b398f9..071fda19 100644 --- a/src/storage/backend/index.ts +++ b/src/storage/backend/index.ts @@ -7,7 +7,7 @@ export * from './s3' export * from './file' export * from './generic' -const { region, globalS3Endpoint, globalS3ForcePathStyle } = getConfig() +const { storageS3Region, storageS3Endpoint, storageS3ForcePathStyle } = getConfig() type ConfigForStorage = Type extends 's3' ? S3ClientOptions @@ -23,9 +23,9 @@ export function createStorageBackend( storageBackend = new FileBackend() } else { const defaultOptions: S3ClientOptions = { - region: region, - endpoint: globalS3Endpoint, - forcePathStyle: globalS3ForcePathStyle, + region: storageS3Region, + endpoint: storageS3Endpoint, + forcePathStyle: storageS3ForcePathStyle, ...(config ? config : {}), } storageBackend = new S3Backend(defaultOptions) diff --git a/src/storage/backend/s3.ts b/src/storage/backend/s3.ts index 9f2ea01a..19b398a9 100644 --- a/src/storage/backend/s3.ts +++ b/src/storage/backend/s3.ts @@ -23,7 +23,7 @@ import { StorageBackendError } from '../errors' import { getConfig } from '../../config' import Agent, { HttpsAgent } from 'agentkeepalive' -const { globalS3Protocol, globalS3MaxSockets } = getConfig() +const { storageS3MaxSockets } = getConfig() /** * Creates an agent for the given protocol @@ -31,7 +31,7 @@ const { globalS3Protocol, globalS3MaxSockets } = getConfig() */ export function createAgent(protocol: 'http' | 'https') { const agentOptions = { - maxSockets: globalS3MaxSockets, + maxSockets: storageS3MaxSockets, keepAlive: true, } @@ -58,7 +58,8 @@ export class S3Backend implements StorageBackendAdapter { client: S3Client constructor(options: S3ClientOptions) { - const agent = options.httpAgent ? options.httpAgent : createAgent(globalS3Protocol) + const storageS3Protocol = options.endpoint?.includes('http://') ? 'http' : 'https' + const agent = options.httpAgent ? options.httpAgent : createAgent(storageS3Protocol) const params: S3ClientConfig = { region: options.region, diff --git a/src/storage/limits.ts b/src/storage/limits.ts index 7f841598..3b9f28e5 100644 --- a/src/storage/limits.ts +++ b/src/storage/limits.ts @@ -2,18 +2,18 @@ import { getConfig } from '../config' import { getFileSizeLimit as getFileSizeLimitForTenant, getFeatures } from '../database/tenant' import { StorageBackendError } from './errors' -const { isMultitenant, enableImageTransformation } = getConfig() +const { isMultitenant, imageTransformationEnabled } = getConfig() /** * Get the maximum file size for a specific project * @param tenantId */ export async function getFileSizeLimit(tenantId: string): Promise { - let { fileSizeLimit } = getConfig() + let { uploadFileSizeLimit } = getConfig() if (isMultitenant) { - fileSizeLimit = await getFileSizeLimitForTenant(tenantId) + uploadFileSizeLimit = await getFileSizeLimitForTenant(tenantId) } - return fileSizeLimit + return uploadFileSizeLimit } /** @@ -22,7 +22,7 @@ export async function getFileSizeLimit(tenantId: string): Promise { */ export async function isImageTransformationEnabled(tenantId: string) { if (!isMultitenant) { - return enableImageTransformation + return imageTransformationEnabled } const { imageTransformation } = await getFeatures(tenantId) diff --git a/src/storage/object.ts b/src/storage/object.ts index f1a4d318..5dfb2ac8 100644 --- a/src/storage/object.ts +++ b/src/storage/object.ts @@ -23,7 +23,7 @@ export interface UploadObjectOptions { version?: string } -const { urlLengthLimit, globalS3Bucket } = getConfig() +const { requestUrlLengthLimit, storageS3Bucket } = getConfig() /** * ObjectStorage @@ -146,7 +146,7 @@ export class ObjectStorage { const prefixesSubset: string[] = [] let urlParamLength = 0 - for (; i < prefixes.length && urlParamLength < urlLengthLimit; i++) { + for (; i < prefixes.length && urlParamLength < requestUrlLengthLimit; i++) { const prefix = prefixes[i] prefixesSubset.push(prefix) urlParamLength += encodeURIComponent(prefix).length + 9 // length of '%22%2C%22' @@ -171,7 +171,7 @@ export class ObjectStorage { return all }, [] as string[]) - await this.backend.deleteObjects(globalS3Bucket, prefixesToDelete) + await this.backend.deleteObjects(storageS3Bucket, prefixesToDelete) await Promise.allSettled( data.map((object) => @@ -278,14 +278,14 @@ export class ObjectStorage { }) const copyResult = await this.backend.copyObject( - globalS3Bucket, + storageS3Bucket, s3SourceKey, originObject.version, s3DestinationKey, newVersion ) - const metadata = await this.backend.headObject(globalS3Bucket, s3DestinationKey, newVersion) + const metadata = await this.backend.headObject(storageS3Bucket, s3DestinationKey, newVersion) const destObject = await this.db.createObject({ ...originObject, @@ -361,14 +361,14 @@ export class ObjectStorage { try { await this.backend.copyObject( - globalS3Bucket, + storageS3Bucket, s3SourceKey, sourceObj.version, s3DestinationKey, newVersion ) - const metadata = await this.backend.headObject(globalS3Bucket, s3DestinationKey, newVersion) + const metadata = await this.backend.headObject(storageS3Bucket, s3DestinationKey, newVersion) await this.db.asSuperUser().withTransaction(async (db) => { await db.createObject({ @@ -484,7 +484,7 @@ export class ObjectStorage { const pathsSubset = [] let urlParamLength = 0 - for (; i < paths.length && urlParamLength < urlLengthLimit; i++) { + for (; i < paths.length && urlParamLength < requestUrlLengthLimit; i++) { const path = paths[i] pathsSubset.push(path) urlParamLength += encodeURIComponent(path).length + 9 // length of '%22%2C%22' diff --git a/src/storage/storage.ts b/src/storage/storage.ts index 5d6f236f..6e84c663 100644 --- a/src/storage/storage.ts +++ b/src/storage/storage.ts @@ -6,7 +6,7 @@ import { getFileSizeLimit, mustBeValidBucketName, parseFileSizeToBytes } from '. import { getConfig } from '../config' import { ObjectStorage } from './object' -const { urlLengthLimit, globalS3Bucket } = getConfig() +const { requestUrlLengthLimit, storageS3Bucket } = getConfig() /** * Storage @@ -186,7 +186,7 @@ export class Storage { const objects = await this.db.listObjects( bucketId, 'id, name', - Math.floor(urlLengthLimit / (36 + 3)) + Math.floor(requestUrlLengthLimit / (36 + 3)) ) if (!(objects && objects.length > 0)) { @@ -207,7 +207,7 @@ export class Storage { return all }, [] as string[]) // delete files from s3 asynchronously - this.backend.deleteObjects(globalS3Bucket, params) + this.backend.deleteObjects(storageS3Bucket, params) } if (deleted?.length !== objects.length) { diff --git a/src/storage/uploader.ts b/src/storage/uploader.ts index 7066efa4..773b4da1 100644 --- a/src/storage/uploader.ts +++ b/src/storage/uploader.ts @@ -13,7 +13,7 @@ interface UploaderOptions extends UploadObjectOptions { allowedMimeTypes?: string[] | null } -const { globalS3Bucket, fileSizeLimitStandardUpload } = getConfig() +const { storageS3Bucket, uploadFileSizeLimitStandard } = getConfig() export interface UploadObjectOptions { bucketId: string @@ -90,7 +90,7 @@ export class Uploader { const s3Key = `${this.db.tenantId}/${path}` const objectMetadata = await this.backend.uploadObject( - globalS3Bucket, + storageS3Bucket, s3Key, version, file.body, @@ -302,8 +302,8 @@ export class Uploader { globalFileSizeLimit = Math.min(bucketSizeLimit, globalFileSizeLimit) } - if (fileSizeLimitStandardUpload && fileSizeLimitStandardUpload > 0) { - globalFileSizeLimit = Math.min(fileSizeLimitStandardUpload, globalFileSizeLimit) + if (uploadFileSizeLimitStandard && uploadFileSizeLimitStandard > 0) { + globalFileSizeLimit = Math.min(uploadFileSizeLimitStandard, globalFileSizeLimit) } return globalFileSizeLimit diff --git a/src/test/object.test.ts b/src/test/object.test.ts index 14a6796e..50e69933 100644 --- a/src/test/object.test.ts +++ b/src/test/object.test.ts @@ -4,7 +4,7 @@ import dotenv from 'dotenv' import FormData from 'form-data' import fs from 'fs' import app from '../app' -import { getConfig } from '../config' +import { getConfig, mergeConfig } from '../config' import { S3Backend } from '../storage/backend' import { Obj } from '../storage/schemas' import { signJWT } from '../auth' @@ -36,6 +36,10 @@ async function getSuperuserPostgrestClient() { useMockObject() useMockQueue() +beforeEach(() => { + getConfig({ reload: true }) +}) + afterEach(async () => { if (tnx) { await tnx.commit() @@ -421,7 +425,10 @@ describe('testing POST object via multipart upload', () => { }) test('return 400 when exceeding file size limit', async () => { - process.env.FILE_SIZE_LIMIT = '1' + mergeConfig({ + uploadFileSizeLimit: 1, + }) + const form = new FormData() form.append('file', fs.createReadStream(`./src/test/assets/sadcat.jpg`)) const headers = Object.assign({}, form.getHeaders(), { @@ -653,7 +660,9 @@ describe('testing POST object via binary upload', () => { }) test('return 400 when exceeding file size limit', async () => { - process.env.FILE_SIZE_LIMIT = '1' + mergeConfig({ + uploadFileSizeLimit: 1, + }) const path = './src/test/assets/sadcat.jpg' const { size } = fs.statSync(path) diff --git a/src/test/rls.test.ts b/src/test/rls.test.ts index 568d4386..d9018411 100644 --- a/src/test/rls.test.ts +++ b/src/test/rls.test.ts @@ -66,7 +66,7 @@ const testSpec = yaml.load( fs.readFileSync(path.resolve(__dirname, 'rls_tests.yaml'), 'utf8') ) as RlsTestSpec -const { serviceKey, tenantId, jwtSecret, databaseURL, globalS3Bucket, storageBackendType } = +const { serviceKey, tenantId, jwtSecret, databaseURL, storageS3Bucket, storageBackendType } = getConfig() const backend = createStorageBackend(storageBackendType) const client = backend.client @@ -79,11 +79,11 @@ describe('RLS policies', () => { beforeAll(async () => { // parse yaml file if (client instanceof S3Client) { - const bucketExists = await checkBucketExists(client, globalS3Bucket) + const bucketExists = await checkBucketExists(client, storageS3Bucket) if (!bucketExists) { const createBucketCommand = new CreateBucketCommand({ - Bucket: globalS3Bucket, + Bucket: storageS3Bucket, }) await client.send(createBucketCommand) } diff --git a/src/test/tus.test.ts b/src/test/tus.test.ts index 0e88f6c1..487129c0 100644 --- a/src/test/tus.test.ts +++ b/src/test/tus.test.ts @@ -18,7 +18,7 @@ import { DetailedError } from 'tus-js-client' import { getServiceKeyUser } from '../database/tenant' import { checkBucketExists } from './common' -const { serviceKey, tenantId, globalS3Bucket, storageBackendType } = getConfig() +const { serviceKey, tenantId, storageS3Bucket, storageBackendType } = getConfig() const oneChunkFile = fs.createReadStream(path.resolve(__dirname, 'assets', 'sadcat.jpg')) const localServerAddress = 'http://127.0.0.1:8999' @@ -41,11 +41,11 @@ describe('Tus multipart', () => { }) if (client instanceof S3Client) { - const bucketExists = await checkBucketExists(client, globalS3Bucket) + const bucketExists = await checkBucketExists(client, storageS3Bucket) if (!bucketExists) { const createBucketCommand = new CreateBucketCommand({ - Bucket: globalS3Bucket, + Bucket: storageS3Bucket, }) await client.send(createBucketCommand) } diff --git a/src/test/x-forwarded-host.test.ts b/src/test/x-forwarded-host.test.ts index ade548cc..2d3508c8 100644 --- a/src/test/x-forwarded-host.test.ts +++ b/src/test/x-forwarded-host.test.ts @@ -5,6 +5,7 @@ import * as migrate from '../database/migrate' import { knex } from '../database/multitenant-db' import app from '../app' import * as tenant from '../database/tenant' +import { getConfig, mergeConfig } from '../config' dotenv.config({ path: '.env.test' }) @@ -39,9 +40,14 @@ beforeAll(async () => { }) beforeEach(() => { - process.env = { ...ENV } - process.env.IS_MULTITENANT = 'true' - process.env.X_FORWARDED_HOST_REGEXP = '^([a-z]{20})\\.supabase\\.(?:co|in|net)$' + mergeConfig({ + isMultitenant: true, + requestXForwardedHostRegExp: '^([a-z]{20})\\.supabase\\.(?:co|in|net)$', + }) +}) + +afterEach(() => { + getConfig({ reload: true }) }) afterAll(async () => {