From 920156a752befeeb60a0c8244abf252fefb23cf7 Mon Sep 17 00:00:00 2001 From: Fabrizio Date: Thu, 11 Apr 2024 08:27:54 +0100 Subject: [PATCH] Feat: S3 Compatible Protocol (#444) * feat: s3 protocol --- .env.sample | 8 + .env.test.sample | 7 +- .github/workflows/ci.yml | 6 +- README.md | 10 +- docker-compose.yml | 3 +- jest-setup.ts | 4 + jest.config.js | 2 +- jest.sequencer.js | 8 +- .../0008-tenants-s3-credentials.sql | 46 + ...9-add-scope-token-column-to-tenants-s3.sql | 3 + .../0020-list-objects-with-delimiter.sql | 43 + .../tenant/0021-s3-multipart-uploads.sql | 84 ++ .../0022-s3-multipart-uploads-big-ints.sql | 2 + .../tenant/0023-optimize-search-function.sql | 78 ++ package-lock.json | 646 ++++++--- package.json | 18 +- src/admin-app.ts | 1 + src/app.ts | 4 + src/auth/jwt.ts | 53 +- src/config.ts | 18 + src/database/client.ts | 14 +- src/database/connection.ts | 11 +- src/database/migrations/migrate.ts | 1 + src/database/migrations/progressive.ts | 12 +- src/database/tenant.ts | 161 ++- src/http/error-handler.ts | 28 +- src/http/plugins/db.ts | 3 +- src/http/plugins/index.ts | 2 + src/http/plugins/jwt.ts | 13 +- src/http/plugins/signature-v4.ts | 160 +++ src/http/plugins/xml.ts | 36 + src/http/routes/admin/index.ts | 1 + src/http/routes/admin/migrations.ts | 17 +- src/http/routes/admin/s3.ts | 124 ++ src/http/routes/index.ts | 1 + src/http/routes/object/copyObject.ts | 12 +- src/http/routes/object/deleteObject.ts | 2 +- src/http/routes/object/getObject.ts | 1 - src/http/routes/object/getSignedObject.ts | 6 +- src/http/routes/object/moveObject.ts | 15 +- src/http/routes/object/uploadSignedObject.ts | 8 +- .../routes/render/renderAuthenticatedImage.ts | 2 +- src/http/routes/render/renderPublicImage.ts | 2 +- src/http/routes/render/renderSignedImage.ts | 8 +- .../s3/commands/abort-multipart-upload.ts | 33 + .../s3/commands/complete-multipart-upload.ts | 65 + src/http/routes/s3/commands/copy-object.ts | 53 + src/http/routes/s3/commands/create-bucket.ts | 27 + .../s3/commands/create-multipart-upload.ts | 47 + src/http/routes/s3/commands/delete-bucket.ts | 21 + src/http/routes/s3/commands/delete-object.ts | 79 ++ src/http/routes/s3/commands/get-bucket.ts | 52 + src/http/routes/s3/commands/get-object.ts | 38 + src/http/routes/s3/commands/head-bucket.ts | 20 + src/http/routes/s3/commands/head-object.ts | 25 + src/http/routes/s3/commands/list-buckets.ts | 13 + .../s3/commands/list-multipart-uploads.ts | 42 + src/http/routes/s3/commands/list-objects.ts | 76 ++ src/http/routes/s3/commands/list-parts.ts | 37 + .../routes/s3/commands/upload-part-copy.ts | 62 + src/http/routes/s3/commands/upload-part.ts | 82 ++ src/http/routes/s3/error-handler.ts | 91 ++ src/http/routes/s3/index.ts | 100 ++ src/http/routes/s3/router.ts | 261 ++++ src/http/routes/tus/index.ts | 25 +- src/http/routes/tus/lifecycle.ts | 6 +- src/server.ts | 4 + .../backend/{generic.ts => adapter.ts} | 82 +- src/storage/backend/file.ts | 164 ++- src/storage/backend/index.ts | 4 +- src/storage/backend/s3.ts | 163 ++- src/storage/database/adapter.ts | 54 +- src/storage/database/knex.ts | 422 ++++-- src/storage/errors.ts | 419 +++++- src/storage/limits.ts | 39 +- src/storage/object.ts | 123 +- src/storage/protocols/s3/byte-limit-stream.ts | 20 + src/storage/protocols/s3/index.ts | 1 + src/storage/protocols/s3/s3-handler.ts | 1173 +++++++++++++++++ src/storage/protocols/s3/signature-v4.ts | 286 ++++ .../{ => protocols}/tus/als-memory-kv.ts | 0 src/storage/{ => protocols}/tus/file-store.ts | 2 +- src/storage/{ => protocols}/tus/index.ts | 0 .../{ => protocols}/tus/postgres-locker.ts | 7 +- src/storage/{ => protocols}/tus/server.ts | 0 src/storage/{ => protocols}/tus/upload-id.ts | 26 +- src/storage/renderer/image.ts | 6 +- src/storage/schemas/index.ts | 1 + src/storage/schemas/multipart.ts | 48 + src/storage/storage.ts | 38 +- src/storage/uploader.ts | 52 +- src/test/bucket.test.ts | 2 +- src/test/common.ts | 2 + src/test/db/02-dummy-data.sql | 3 + src/test/object.test.ts | 86 +- src/test/rls_tests.yaml | 24 +- src/test/s3-protocol.test.ts | 1101 ++++++++++++++++ src/test/tenant.test.ts | 4 +- src/test/webhooks.test.ts | 19 +- 99 files changed, 6644 insertions(+), 670 deletions(-) create mode 100644 migrations/multitenant/0008-tenants-s3-credentials.sql create mode 100644 migrations/multitenant/0009-add-scope-token-column-to-tenants-s3.sql create mode 100644 migrations/tenant/0020-list-objects-with-delimiter.sql create mode 100644 migrations/tenant/0021-s3-multipart-uploads.sql create mode 100644 migrations/tenant/0022-s3-multipart-uploads-big-ints.sql create mode 100644 migrations/tenant/0023-optimize-search-function.sql create mode 100644 src/http/plugins/signature-v4.ts create mode 100644 src/http/plugins/xml.ts create mode 100644 src/http/routes/admin/s3.ts create mode 100644 src/http/routes/s3/commands/abort-multipart-upload.ts create mode 100644 src/http/routes/s3/commands/complete-multipart-upload.ts create mode 100644 src/http/routes/s3/commands/copy-object.ts create mode 100644 src/http/routes/s3/commands/create-bucket.ts create mode 100644 src/http/routes/s3/commands/create-multipart-upload.ts create mode 100644 src/http/routes/s3/commands/delete-bucket.ts create mode 100644 src/http/routes/s3/commands/delete-object.ts create mode 100644 src/http/routes/s3/commands/get-bucket.ts create mode 100644 src/http/routes/s3/commands/get-object.ts create mode 100644 src/http/routes/s3/commands/head-bucket.ts create mode 100644 src/http/routes/s3/commands/head-object.ts create mode 100644 src/http/routes/s3/commands/list-buckets.ts create mode 100644 src/http/routes/s3/commands/list-multipart-uploads.ts create mode 100644 src/http/routes/s3/commands/list-objects.ts create mode 100644 src/http/routes/s3/commands/list-parts.ts create mode 100644 src/http/routes/s3/commands/upload-part-copy.ts create mode 100644 src/http/routes/s3/commands/upload-part.ts create mode 100644 src/http/routes/s3/error-handler.ts create mode 100644 src/http/routes/s3/index.ts create mode 100644 src/http/routes/s3/router.ts rename src/storage/backend/{generic.ts => adapter.ts} (64%) create mode 100644 src/storage/protocols/s3/byte-limit-stream.ts create mode 100644 src/storage/protocols/s3/index.ts create mode 100644 src/storage/protocols/s3/s3-handler.ts create mode 100644 src/storage/protocols/s3/signature-v4.ts rename src/storage/{ => protocols}/tus/als-memory-kv.ts (100%) rename src/storage/{ => protocols}/tus/file-store.ts (95%) rename src/storage/{ => protocols}/tus/index.ts (100%) rename src/storage/{ => protocols}/tus/postgres-locker.ts (94%) rename src/storage/{ => protocols}/tus/server.ts (100%) rename src/storage/{ => protocols}/tus/upload-id.ts (68%) create mode 100644 src/storage/schemas/multipart.ts create mode 100644 src/test/s3-protocol.test.ts diff --git a/.env.sample b/.env.sample index 07f3a33b..287e42c5 100644 --- a/.env.sample +++ b/.env.sample @@ -65,10 +65,18 @@ UPLOAD_FILE_SIZE_LIMIT=524288000 UPLOAD_FILE_SIZE_LIMIT_STANDARD=52428800 UPLOAD_SIGNED_URL_EXPIRATION_TIME=60 +####################################### +# TUS Protocol +####################################### TUS_URL_PATH=/upload/resumable TUS_URL_EXPIRY_MS=3600000 TUS_PART_SIZE=50 +####################################### +# S3 Protocol +####################################### +S3_PROTOCOL_ACCESS_KEY_ID=b585f311d839730f8a980a3457be2787 +S3_PROTOCOL_ACCESS_KEY_SECRET=67d161a7a8a46a24a17a75b26e7724f11d56b8d49a119227c66b13b6595601fb ####################################### # Storage Backend Driver diff --git a/.env.test.sample b/.env.test.sample index f33a1059..fee2ccbb 100644 --- a/.env.test.sample +++ b/.env.test.sample @@ -2,8 +2,12 @@ AUTHENTICATED_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhd ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoiYW5vbiIsImlhdCI6MTYxMzUzMTk4NSwiZXhwIjoxOTI5MTA3OTg1fQ.mqfi__KnQB4v6PkIjkhzfwWrYyF94MEbSC6LnuvVniE SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJyb2xlIjoic2VydmljZV9yb2xlIiwiaWF0IjoxNjEzNTMxOTg1LCJleHAiOjE5MjkxMDc5ODV9.th84OKK0Iz8QchDyXZRrojmKSEZ-OuitQm_5DvLiSIc +S3_PROTOCOL_ACCESS_KEY_ID=b585f311d839730f8a980a3457be2787 +S3_PROTOCOL_ACCESS_KEY_SECRET=67d161a7a8a46a24a17a75b26e7724f11d56b8d49a119227c66b13b6595601fb +S3_PROTOCOL_ALLOWS_SERVICE_KEY_AS_SECRET=false + TENANT_ID=bjhaohmqunupljrqypxz -ENABLE_DEFAULT_METRICS=false +DEFAULT_METRICS_ENABLED=false PG_QUEUE_ENABLE=false MULTI_TENANT=false ADMIN_API_KEYS=apikey @@ -18,3 +22,4 @@ AWS_DEFAULT_REGION=ap-southeast-1 STORAGE_S3_ENDPOINT=http://127.0.0.1:9000 STORAGE_S3_PROTOCOL=http STORAGE_S3_FORCE_PATH_STYLE=true +REQUEST_X_FORWARDED_HOST_REGEXP= \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 307b3bf3..8fdf061f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -56,16 +56,13 @@ jobs: SERVICE_KEY: ${{ secrets.SERVICE_KEY }} TENANT_ID: ${{ secrets.TENANT_ID }} REGION: ${{ secrets.REGION }} - POSTGREST_URL: ${{ secrets.POSTGREST_URL }} GLOBAL_S3_BUCKET: ${{ secrets.GLOBAL_S3_BUCKET }} PGRST_JWT_SECRET: ${{ secrets.PGRST_JWT_SECRET }} AUTHENTICATED_KEY: ${{ secrets.AUTHENTICATED_KEY }} DATABASE_URL: postgresql://postgres:postgres@127.0.0.1/postgres - PGOPTIONS: -c search_path=storage,public FILE_SIZE_LIMIT: '52428800' STORAGE_BACKEND: s3 MULTITENANT_DATABASE_URL: postgresql://postgres:postgres@127.0.0.1:5433/postgres - POSTGREST_URL_SUFFIX: /rest/v1 ADMIN_API_KEYS: apikey ENABLE_IMAGE_TRANSFORMATION: true IMGPROXY_URL: http://127.0.0.1:50020 @@ -79,6 +76,9 @@ jobs: ENABLE_DEFAULT_METRICS: false PG_QUEUE_ENABLE: false MULTI_TENANT: false + S3_PROTOCOL_ACCESS_KEY_ID: ${{ secrets.TENANT_ID }} + S3_PROTOCOL_ACCESS_KEY_SECRET: ${{ secrets.SERVICE_KEY }} + - name: Upload coverage results to Coveralls uses: coverallsapp/github-action@master diff --git a/README.md b/README.md index 31b662be..f794d989 100644 --- a/README.md +++ b/README.md @@ -6,11 +6,19 @@ A scalable, light-weight object storage service. > Read [this post](https://supabase.io/blog/2021/03/30/supabase-storage) on why we decided to build a new object storage service. +- Multi-protocol support (HTTP, TUS, S3) - Uses Postgres as its datastore for storing metadata - Authorization rules are written as Postgres Row Level Security policies -- Integrates with S3 as the storage backend (with more in the pipeline!) +- Integrates with S3 Compatible Storages - Extremely lightweight and performant + +**Supported Protocols** + +- [x] HTTP/REST +- [x] TUS Resumable Upload +- [x] S3 Compatible API + ![Architecture](./static/architecture.png?raw=true 'Architecture') ## Documentation diff --git a/docker-compose.yml b/docker-compose.yml index 6d4b2882..94be3174 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -6,7 +6,6 @@ services: image: supabase/storage-api:latest ports: - '5000:5000' - - '5001:5001' depends_on: tenant_db: condition: service_healthy @@ -39,7 +38,7 @@ services: UPLOAD_SIGNED_URL_EXPIRATION_TIME: 120 TUS_URL_PATH: /upload/resumable TUS_URL_EXPIRY_MS: 3600000 - # Image Tranformation + # Image Transformation IMAGE_TRANSFORMATION_ENABLED: "true" IMGPROXY_URL: http://imgproxy:8080 IMGPROXY_REQUEST_TIMEOUT: 15 diff --git a/jest-setup.ts b/jest-setup.ts index 6e5f25b3..4988cc38 100644 --- a/jest-setup.ts +++ b/jest-setup.ts @@ -1,3 +1,7 @@ import { getConfig, setEnvPaths } from './src/config' setEnvPaths(['.env.test', '.env']) + +beforeEach(() => { + getConfig({ reload: true }) +}) diff --git a/jest.config.js b/jest.config.js index ef01b156..39a46f76 100644 --- a/jest.config.js +++ b/jest.config.js @@ -4,7 +4,7 @@ module.exports = { transform: { '^.+\\.(t|j)sx?$': 'ts-jest', }, - setupFiles: ['/jest-setup.ts'], + setupFilesAfterEnv: ['/jest-setup.ts'], testEnvironment: 'node', testPathIgnorePatterns: ['node_modules', 'dist'], coverageProvider: 'v8', diff --git a/jest.sequencer.js b/jest.sequencer.js index d27fde9c..d3b41dc2 100644 --- a/jest.sequencer.js +++ b/jest.sequencer.js @@ -11,15 +11,21 @@ const isTusTest = (test) => { return test.path.includes('tus') } +const isS3Test = (test) => { + return test.path.includes('s3') +} + class CustomSequencer extends Sequencer { sort(tests) { const copyTests = Array.from(tests) - const normalTests = copyTests.filter((t) => !isRLSTest(t) && !isTusTest(t)) + const normalTests = copyTests.filter((t) => !isRLSTest(t) && !isTusTest(t) && !isS3Test(t)) const tusTests = copyTests.filter((t) => isTusTest(t)) + const s3Tests = copyTests.filter((t) => isS3Test(t)) const rlsTests = copyTests.filter((t) => isRLSTest(t)) return super .sort(normalTests) .concat(tusTests) + .concat(s3Tests) .concat(rlsTests.sort((a, b) => (a.path > b.path ? 1 : -1))) } } diff --git a/migrations/multitenant/0008-tenants-s3-credentials.sql b/migrations/multitenant/0008-tenants-s3-credentials.sql new file mode 100644 index 00000000..1376f839 --- /dev/null +++ b/migrations/multitenant/0008-tenants-s3-credentials.sql @@ -0,0 +1,46 @@ + + +CREATE TABLE IF NOT EXISTS tenants_s3_credentials ( + id UUID PRIMARY KEY default gen_random_uuid(), + description text NOT NULL, + tenant_id text REFERENCES tenants(id) ON DELETE CASCADE, + access_key text NOT NULL, + secret_key text NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS tenants_s3_credentials_tenant_id_idx ON tenants_s3_credentials(tenant_id); +CREATE UNIQUE INDEX IF NOT EXISTS tenants_s3_credentials_access_key_idx ON tenants_s3_credentials(tenant_id, access_key); + + +CREATE OR REPLACE FUNCTION tenants_s3_credentials_update_notify_trigger () + RETURNS TRIGGER +AS $$ +BEGIN + PERFORM + pg_notify('tenants_s3_credentials_update', '"' || NEW.id || ':' || NEW.access_key || '"'); + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION tenants_s3_credentials_delete_notify_trigger () + RETURNS TRIGGER +AS $$ +BEGIN + PERFORM + pg_notify('tenants_s3_credentials_update', '"' || OLD.id || ':' || OLD.access_key || '"'); + RETURN NULL; +END; +$$ + LANGUAGE plpgsql; + +CREATE TRIGGER tenants_s3_credentials_update_notify_trigger + AFTER UPDATE ON tenants_s3_credentials + FOR EACH ROW +EXECUTE PROCEDURE tenants_s3_credentials_update_notify_trigger (); + +CREATE TRIGGER tenants_s3_credentials_delete_notify_trigger + AFTER DELETE ON tenants_s3_credentials + FOR EACH ROW +EXECUTE PROCEDURE tenants_s3_credentials_delete_notify_trigger (); diff --git a/migrations/multitenant/0009-add-scope-token-column-to-tenants-s3.sql b/migrations/multitenant/0009-add-scope-token-column-to-tenants-s3.sql new file mode 100644 index 00000000..a7ea520e --- /dev/null +++ b/migrations/multitenant/0009-add-scope-token-column-to-tenants-s3.sql @@ -0,0 +1,3 @@ + + +ALTER TABLE tenants_s3_credentials ADD COLUMN claims json NOT NULL DEFAULT '{}'; diff --git a/migrations/tenant/0020-list-objects-with-delimiter.sql b/migrations/tenant/0020-list-objects-with-delimiter.sql new file mode 100644 index 00000000..bd24c297 --- /dev/null +++ b/migrations/tenant/0020-list-objects-with-delimiter.sql @@ -0,0 +1,43 @@ + + +CREATE OR REPLACE FUNCTION storage.list_objects_with_delimiter(bucket_id text, prefix_param text, delimiter_param text, max_keys integer default 100, start_after text DEFAULT '', next_token text DEFAULT '') + RETURNS TABLE (name text, id uuid, metadata jsonb, updated_at timestamptz) AS +$$ +BEGIN + RETURN QUERY EXECUTE + 'SELECT DISTINCT ON(name COLLATE "C") * from ( + SELECT + CASE + WHEN position($2 IN substring(name from length($1) + 1)) > 0 THEN + substring(name from 1 for length($1) + position($2 IN substring(name from length($1) + 1))) + ELSE + name + END AS name, id, metadata, updated_at + FROM + storage.objects + WHERE + bucket_id = $5 AND + name ILIKE $1 || ''%'' AND + CASE + WHEN $6 != '''' THEN + name COLLATE "C" > $6 + ELSE true END + AND CASE + WHEN $4 != '''' THEN + CASE + WHEN position($2 IN substring(name from length($1) + 1)) > 0 THEN + substring(name from 1 for length($1) + position($2 IN substring(name from length($1) + 1))) COLLATE "C" > $4 + ELSE + name COLLATE "C" > $4 + END + ELSE + true + END + ORDER BY + name COLLATE "C" ASC) as e order by name COLLATE "C" LIMIT $3' + USING prefix_param, delimiter_param, max_keys, next_token, bucket_id, start_after; +END; +$$ LANGUAGE plpgsql; + +CREATE INDEX IF NOT EXISTS idx_objects_bucket_id_name + ON storage.objects (bucket_id, (name COLLATE "C")); \ No newline at end of file diff --git a/migrations/tenant/0021-s3-multipart-uploads.sql b/migrations/tenant/0021-s3-multipart-uploads.sql new file mode 100644 index 00000000..8b6f614e --- /dev/null +++ b/migrations/tenant/0021-s3-multipart-uploads.sql @@ -0,0 +1,84 @@ +CREATE TABLE IF NOT EXISTS storage.s3_multipart_uploads ( + id text PRIMARY KEY, + in_progress_size int NOT NULL default 0, + upload_signature text NOT NULL, + bucket_id text NOT NULL references storage.buckets(id), + key text COLLATE "C" NOT NULL , + version text NOT NULL, + owner_id text NULL, + created_at timestamptz NOT NULL default now() +); + +CREATE TABLE IF NOT EXISTS storage.s3_multipart_uploads_parts ( + id uuid PRIMARY KEY default gen_random_uuid(), + upload_id text NOT NULL references storage.s3_multipart_uploads(id) ON DELETE CASCADE, + size int NOT NULL default 0, + part_number int NOT NULL, + bucket_id text NOT NULL references storage.buckets(id), + key text COLLATE "C" NOT NULL, + etag text NOT NULL, + owner_id text NULL, + version text NOT NULL, + created_at timestamptz NOT NULL default now() +); + +CREATE INDEX IF NOT EXISTS idx_multipart_uploads_list + ON storage.s3_multipart_uploads (bucket_id, (key COLLATE "C"), created_at ASC); + +CREATE OR REPLACE FUNCTION storage.list_multipart_uploads_with_delimiter(bucket_id text, prefix_param text, delimiter_param text, max_keys integer default 100, next_key_token text DEFAULT '', next_upload_token text default '') + RETURNS TABLE (key text, id text, created_at timestamptz) AS +$$ +BEGIN + RETURN QUERY EXECUTE + 'SELECT DISTINCT ON(key COLLATE "C") * from ( + SELECT + CASE + WHEN position($2 IN substring(key from length($1) + 1)) > 0 THEN + substring(key from 1 for length($1) + position($2 IN substring(key from length($1) + 1))) + ELSE + key + END AS key, id, created_at + FROM + storage.s3_multipart_uploads + WHERE + bucket_id = $5 AND + key ILIKE $1 || ''%'' AND + CASE + WHEN $4 != '''' AND $6 = '''' THEN + CASE + WHEN position($2 IN substring(key from length($1) + 1)) > 0 THEN + substring(key from 1 for length($1) + position($2 IN substring(key from length($1) + 1))) COLLATE "C" > $4 + ELSE + key COLLATE "C" > $4 + END + ELSE + true + END AND + CASE + WHEN $6 != '''' THEN + id COLLATE "C" > $6 + ELSE + true + END + ORDER BY + key COLLATE "C" ASC, created_at ASC) as e order by key COLLATE "C" LIMIT $3' + USING prefix_param, delimiter_param, max_keys, next_key_token, bucket_id, next_upload_token; +END; +$$ LANGUAGE plpgsql; + +ALTER TABLE storage.s3_multipart_uploads ENABLE ROW LEVEL SECURITY; +ALTER TABLE storage.s3_multipart_uploads_parts ENABLE ROW LEVEL SECURITY; + +DO $$ +DECLARE + anon_role text = COALESCE(current_setting('storage.anon_role', true), 'anon'); + authenticated_role text = COALESCE(current_setting('storage.authenticated_role', true), 'authenticated'); + service_role text = COALESCE(current_setting('storage.service_role', true), 'service_role'); +BEGIN + EXECUTE 'revoke all on storage.s3_multipart_uploads from ' || anon_role || ', ' || authenticated_role; + EXECUTE 'revoke all on storage.s3_multipart_uploads_parts from ' || anon_role || ', ' || authenticated_role; + EXECUTE 'GRANT ALL ON TABLE storage.s3_multipart_uploads TO ' || service_role; + EXECUTE 'GRANT ALL ON TABLE storage.s3_multipart_uploads_parts TO ' || service_role; + EXECUTE 'GRANT SELECT ON TABLE storage.s3_multipart_uploads TO ' || authenticated_role || ', ' || anon_role; + EXECUTE 'GRANT SELECT ON TABLE storage.s3_multipart_uploads_parts TO ' || authenticated_role || ', ' || anon_role; +END$$; \ No newline at end of file diff --git a/migrations/tenant/0022-s3-multipart-uploads-big-ints.sql b/migrations/tenant/0022-s3-multipart-uploads-big-ints.sql new file mode 100644 index 00000000..86a765f8 --- /dev/null +++ b/migrations/tenant/0022-s3-multipart-uploads-big-ints.sql @@ -0,0 +1,2 @@ +ALTER TABLE storage.s3_multipart_uploads ALTER COLUMN in_progress_size TYPE bigint; +ALTER TABLE storage.s3_multipart_uploads_parts ALTER COLUMN size TYPE bigint; \ No newline at end of file diff --git a/migrations/tenant/0023-optimize-search-function.sql b/migrations/tenant/0023-optimize-search-function.sql new file mode 100644 index 00000000..7721d46f --- /dev/null +++ b/migrations/tenant/0023-optimize-search-function.sql @@ -0,0 +1,78 @@ +create or replace function storage.search ( + prefix text, + bucketname text, + limits int default 100, + levels int default 1, + offsets int default 0, + search text default '', + sortcolumn text default 'name', + sortorder text default 'asc' +) returns table ( + name text, + id uuid, + updated_at timestamptz, + created_at timestamptz, + last_accessed_at timestamptz, + metadata jsonb + ) +as $$ +declare + v_order_by text; + v_sort_order text; +begin + case + when sortcolumn = 'name' then + v_order_by = 'name'; + when sortcolumn = 'updated_at' then + v_order_by = 'updated_at'; + when sortcolumn = 'created_at' then + v_order_by = 'created_at'; + when sortcolumn = 'last_accessed_at' then + v_order_by = 'last_accessed_at'; + else + v_order_by = 'name'; + end case; + + case + when sortorder = 'asc' then + v_sort_order = 'asc'; + when sortorder = 'desc' then + v_sort_order = 'desc'; + else + v_sort_order = 'asc'; + end case; + + v_order_by = v_order_by || ' ' || v_sort_order; + + return query execute + 'with folders as ( + select path_tokens[$1] as folder + from storage.objects + where objects.name ilike $2 || $3 || ''%'' + and bucket_id = $4 + and array_length(objects.path_tokens, 1) <> $1 + group by folder + order by folder ' || v_sort_order || ' + ) + (select folder as "name", + null as id, + null as updated_at, + null as created_at, + null as last_accessed_at, + null as metadata from folders) + union all + (select path_tokens[$1] as "name", + id, + updated_at, + created_at, + last_accessed_at, + metadata + from storage.objects + where objects.name ilike $2 || $3 || ''%'' + and bucket_id = $4 + and array_length(objects.path_tokens, 1) = $1 + order by ' || v_order_by || ') + limit $5 + offset $6' using levels, prefix, search, bucketname, limits, offsets; +end; +$$ language plpgsql stable; \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index bd8418d1..4e376af3 100644 --- a/package-lock.json +++ b/package-lock.json @@ -12,6 +12,7 @@ "@aws-sdk/client-s3": "3.515.0", "@aws-sdk/lib-storage": "3.515.0", "@aws-sdk/s3-request-presigner": "3.421.0", + "@fastify/accepts": "^4.3.0", "@fastify/multipart": "^7.6.0", "@fastify/rate-limit": "^7.6.0", "@fastify/swagger": "^8.3.1", @@ -23,6 +24,7 @@ "@tus/s3-store": "1.4.1", "@tus/server": "1.4.1", "agentkeepalive": "^4.2.1", + "ajv": "^8.12.0", "async-retry": "^1.3.3", "axios": "^1.6.3", "axios-retry": "^3.9.1", @@ -33,24 +35,30 @@ "fastify": "^4.8.1", "fastify-metrics": "^10.2.0", "fastify-plugin": "^4.0.0", + "fastify-xml-body-parser": "^2.2.0", "fs-extra": "^10.0.1", - "fs-xattr": "^0.3.1", + "fs-xattr": "0.3.1", "ioredis": "^5.2.4", "jsonwebtoken": "^9.0.2", "knex": "^3.1.0", + "lru-cache": "^10.2.0", "md5-file": "^5.0.0", + "multistream": "^4.1.0", + "object-sizeof": "^2.6.4", "pg": "^8.11.3", "pg-boss": "^9.0.3", "pg-listen": "^1.7.0", "pino": "^8.15.4", "pino-logflare": "^0.4.2", "postgres-migrations": "^5.3.0", - "prom-client": "^14.0.1" + "prom-client": "^14.0.1", + "xml2js": "^0.6.2" }, "bin": { "supa-storage": "dist/server.js" }, "devDependencies": { + "@types/ajv": "^1.0.0", "@types/async-retry": "^1.4.5", "@types/busboy": "^1.3.0", "@types/crypto-js": "^4.1.1", @@ -58,9 +66,12 @@ "@types/jest": "^29.2.1", "@types/js-yaml": "^4.0.5", "@types/jsonwebtoken": "^9.0.5", + "@types/multistream": "^4.1.3", "@types/mustache": "^4.2.2", "@types/node": "^20.11.5", "@types/pg": "^8.6.4", + "@types/stream-buffers": "^3.0.7", + "@types/xml2js": "^0.4.14", "@typescript-eslint/eslint-plugin": "^5.12.1", "@typescript-eslint/parser": "^5.12.1", "babel-jest": "^29.2.2", @@ -70,10 +81,11 @@ "form-data": "^4.0.0", "jest": "^29.2.2", "js-yaml": "^4.1.0", - "json-schema-to-ts": "^2.5.4", + "json-schema-to-ts": "^3.0.0", "mustache": "^4.2.0", "pino-pretty": "^8.1.0", "prettier": "^2.8.8", + "stream-buffers": "^3.0.2", "ts-jest": "^29.0.3", "ts-node-dev": "^1.1.8", "tsx": "^3.13.0", @@ -2332,6 +2344,22 @@ "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, + "node_modules/@eslint/eslintrc/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, "node_modules/@eslint/eslintrc/node_modules/ignore": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", @@ -2341,6 +2369,12 @@ "node": ">= 4" } }, + "node_modules/@eslint/eslintrc/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, "node_modules/@fastify/accept-negotiator": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@fastify/accept-negotiator/-/accept-negotiator-1.1.0.tgz", @@ -2349,6 +2383,15 @@ "node": ">=14" } }, + "node_modules/@fastify/accepts": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/@fastify/accepts/-/accepts-4.3.0.tgz", + "integrity": "sha512-QK4FoqXdwwPmaPOLL6NrxsyaXVvdviYVoS6ltHyOLdFlUyREIaMykHQIp+x0aJz9hB3B3n/Ht6QRdvBeGkptGQ==", + "dependencies": { + "accepts": "^1.3.5", + "fastify-plugin": "^4.0.0" + } + }, "node_modules/@fastify/ajv-compiler": { "version": "3.5.0", "resolved": "https://registry.npmjs.org/@fastify/ajv-compiler/-/ajv-compiler-3.5.0.tgz", @@ -2359,26 +2402,6 @@ "fast-uri": "^2.0.0" } }, - "node_modules/@fastify/ajv-compiler/node_modules/ajv": { - "version": "8.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", - "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/@fastify/ajv-compiler/node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - }, "node_modules/@fastify/busboy": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-1.1.0.tgz", @@ -3787,6 +3810,16 @@ "node": ">=16" } }, + "node_modules/@types/ajv": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@types/ajv/-/ajv-1.0.0.tgz", + "integrity": "sha512-yGSqw9/QKd5FXbTNrSANcJ6IHWeNhA+gokXqmlPquJgLDC87d4g2FGPs+AlCeGG0GuZXmPq42hOFA2hnPymCLw==", + "deprecated": "This is a stub types definition for ajv (https://github.com/epoberezkin/ajv). ajv provides its own type definitions, so you don't need @types/ajv installed!", + "dev": true, + "dependencies": { + "ajv": "*" + } + }, "node_modules/@types/async-retry": { "version": "1.4.5", "resolved": "https://registry.npmjs.org/@types/async-retry/-/async-retry-1.4.5.tgz", @@ -3925,6 +3958,15 @@ "@types/node": "*" } }, + "node_modules/@types/multistream": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@types/multistream/-/multistream-4.1.3.tgz", + "integrity": "sha512-t57vmDEJOZuC0M3IrZYfCd9wolTcr3ZTCGk1iwHNosvgBX+7/SMvCGcR8wP9lidpelBZQ12crSuINOxkk0azPA==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@types/mustache": { "version": "4.2.2", "resolved": "https://registry.npmjs.org/@types/mustache/-/mustache-4.2.2.tgz", @@ -3969,6 +4011,15 @@ "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==", "dev": true }, + "node_modules/@types/stream-buffers": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/stream-buffers/-/stream-buffers-3.0.7.tgz", + "integrity": "sha512-azOCy05sXVXrO+qklf0c/B07H/oHaIuDDAiHPVwlk3A9Ek+ksHyTeMajLZl3r76FxpPpxem//4Te61G1iW3Giw==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@types/strip-bom": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@types/strip-bom/-/strip-bom-3.0.0.tgz", @@ -3981,6 +4032,15 @@ "integrity": "sha512-7NQmHra/JILCd1QqpSzl8+mJRc8ZHz3uDm8YV1Ks9IhK0epEiTw8aIErbvH9PI+6XbqhyIQy3462nEsn7UVzjQ==", "dev": true }, + "node_modules/@types/xml2js": { + "version": "0.4.14", + "resolved": "https://registry.npmjs.org/@types/xml2js/-/xml2js-0.4.14.tgz", + "integrity": "sha512-4YnrRemBShWRO2QjvUin8ESA41rH+9nQGLUGZV/1IDhi3SL9OhdpNC/MrulTWuptXKwhx/aDxE7toV0f/ypIXQ==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@types/yargs": { "version": "17.0.13", "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.13.tgz", @@ -4196,6 +4256,18 @@ "resolved": "https://registry.npmjs.org/abstract-logging/-/abstract-logging-2.0.1.tgz", "integrity": "sha512-2BjRTZxTPvheOvGbBslFSYOUkr+SjPtOnrLP33f+VIWLzezQpZcqVg7ja3L4dBXmzzgwT+a029jRx5PCi3JuiA==" }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, "node_modules/acorn": { "version": "8.7.0", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.7.0.tgz", @@ -4254,14 +4326,13 @@ } }, "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", "dependencies": { "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", "uri-js": "^4.2.2" }, "funding": { @@ -4285,26 +4356,6 @@ } } }, - "node_modules/ajv-formats/node_modules/ajv": { - "version": "8.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", - "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ajv-formats/node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - }, "node_modules/ansi-escapes": { "version": "4.3.2", "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", @@ -5434,6 +5485,22 @@ "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, + "node_modules/eslint/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, "node_modules/eslint/node_modules/escape-string-regexp": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", @@ -5480,6 +5547,12 @@ "node": ">=10.13.0" } }, + "node_modules/eslint/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, "node_modules/esm": { "version": "3.2.25", "resolved": "https://registry.npmjs.org/esm/-/esm-3.2.25.tgz", @@ -5706,26 +5779,6 @@ "rfdc": "^1.2.0" } }, - "node_modules/fast-json-stringify/node_modules/ajv": { - "version": "8.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", - "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/fast-json-stringify/node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - }, "node_modules/fast-levenshtein": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", @@ -5814,9 +5867,23 @@ } }, "node_modules/fastify-plugin": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/fastify-plugin/-/fastify-plugin-4.5.0.tgz", - "integrity": "sha512-79ak0JxddO0utAXAQ5ccKhvs6vX2MGyHHMMsmZkBANrq3hXc1CHzvNPHOcvTsVMEPl5I+NT+RO4YKMGehOfSIg==" + "version": "4.5.1", + "resolved": "https://registry.npmjs.org/fastify-plugin/-/fastify-plugin-4.5.1.tgz", + "integrity": "sha512-stRHYGeuqpEZTL1Ef0Ovr2ltazUT9g844X5z/zEBFLG8RYlpDiOCIG+ATvYEp+/zmc7sN29mcIMp8gvYplYPIQ==" + }, + "node_modules/fastify-xml-body-parser": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/fastify-xml-body-parser/-/fastify-xml-body-parser-2.2.0.tgz", + "integrity": "sha512-Jxltec0Iin4QX+DEQoYCyGmU5cTRtI0x22mRT/3FBQMhTEn7KNTHnnEtbyN3+6SLgW8cSirnOe1t8vqn77vR+Q==", + "dependencies": { + "fast-xml-parser": "^4.1.2", + "fastify-plugin": "^3.0.0" + } + }, + "node_modules/fastify-xml-body-parser/node_modules/fastify-plugin": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/fastify-plugin/-/fastify-plugin-3.0.1.tgz", + "integrity": "sha512-qKcDXmuZadJqdTm6vlCqioEbyewF60b/0LOFCcYN1B6BIZGlYJumWWOYs70SFYLDAH4YqdE1cxH/RKMG7rFxgA==" }, "node_modules/fastify/node_modules/process-warning": { "version": "2.0.0", @@ -7186,21 +7253,23 @@ } }, "node_modules/json-schema-to-ts": { - "version": "2.5.4", - "resolved": "https://registry.npmjs.org/json-schema-to-ts/-/json-schema-to-ts-2.5.4.tgz", - "integrity": "sha512-wlaYrGg+aYq0aEjSDY3cAFNzJVD2GvdrVIlvMdrbOLwkaMarXBiX+k0qm5Myb2aI3xjvdqsZoGs63JPS/M8+dg==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/json-schema-to-ts/-/json-schema-to-ts-3.0.0.tgz", + "integrity": "sha512-2adDesYifYEXYxNySx3gG0RR69rDWIjqAFzK/JPXdOvjHLZ/UP6d2rkpy6a+AxyhtRp2SvFPZ4+EW36jBinUbA==", "dev": true, "dependencies": { + "@babel/runtime": "^7.18.3", "@types/json-schema": "^7.0.9", - "ts-algebra": "^1.1.1", - "ts-toolbelt": "^9.6.0" + "ts-algebra": "^1.2.2" + }, + "engines": { + "node": ">=16" } }, "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" }, "node_modules/json-stable-stringify-without-jsonify": { "version": "1.0.1", @@ -7518,6 +7587,14 @@ "resolved": "https://registry.npmjs.org/logflare-transport-core/-/logflare-transport-core-0.3.3.tgz", "integrity": "sha512-n82NsRVWvlaa3jd9QQ8rDroCjCJcIamQOlarLDBou9RsF0QaRv39rduy0ToPmlGQn1OPZBwlsv+R36lXupSmVQ==" }, + "node_modules/lru-cache": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.0.tgz", + "integrity": "sha512-2bIM8x+VAf6JT4bKAljS1qUWgMsqZRPGJS6FSahIMPVvctcNhyVp7AJu7quxOW9jwkryBReKZY5tY5JYv2n/7Q==", + "engines": { + "node": "14 || >=16.14" + } + }, "node_modules/luxon": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.2.1.tgz", @@ -7616,19 +7693,19 @@ } }, "node_modules/mime-db": { - "version": "1.46.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.46.0.tgz", - "integrity": "sha512-svXaP8UQRZ5K7or+ZmfNhg2xX3yKDMUzqadsSqi4NCH/KomcH75MAMYAGVlvXn4+b/xOPhS3I2uHKRUzvjY7BQ==", + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", "engines": { "node": ">= 0.6" } }, "node_modules/mime-types": { - "version": "2.1.29", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.29.tgz", - "integrity": "sha512-Y/jMt/S5sR9OaqteJtslsFZKWOIIqMACsJSiHghlCAyhf7jfVYjKBmLiX8OgpWeW+fjJ2b+Az69aPFPkUOY6xQ==", + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", "dependencies": { - "mime-db": "1.46.0" + "mime-db": "1.52.0" }, "engines": { "node": ">= 0.6" @@ -7716,6 +7793,14 @@ "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", "dev": true }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, "node_modules/node-int64": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", @@ -7749,6 +7834,37 @@ "node": ">=8" } }, + "node_modules/object-sizeof": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/object-sizeof/-/object-sizeof-2.6.4.tgz", + "integrity": "sha512-YuJAf7Bi61KROcYmXm8RCeBrBw8UOaJDzTm1gp0eU7RjYi1xEte3/Nmg/VyPaHcJZ3sNojs1Y0xvSrgwkLmcFw==", + "dependencies": { + "buffer": "^6.0.3" + } + }, + "node_modules/object-sizeof/node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, "node_modules/on-exit-leak-free": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/on-exit-leak-free/-/on-exit-leak-free-2.1.0.tgz", @@ -8858,10 +8974,15 @@ "node": ">=10" } }, + "node_modules/sax": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.3.0.tgz", + "integrity": "sha512-0s+oAmw9zLl1V1cS9BtZN7JAd0cW5e0QH4W3LWEK6a4LaLEA2OTpGYWDY+6XasBLtz6wkm3u1xRw95mRuJ59WA==" + }, "node_modules/secure-json-parse": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/secure-json-parse/-/secure-json-parse-2.5.0.tgz", - "integrity": "sha512-ZQruFgZnIWH+WyO9t5rWt4ZEGqCKPwhiw+YbzTwpmT9elgLrLcfuyUiSnwwjUiVy9r4VM3urtbNF1xmEh9IL2w==" + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/secure-json-parse/-/secure-json-parse-2.7.0.tgz", + "integrity": "sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw==" }, "node_modules/semver": { "version": "7.5.4", @@ -9037,6 +9158,15 @@ "readable-stream": "^3.5.0" } }, + "node_modules/stream-buffers": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/stream-buffers/-/stream-buffers-3.0.2.tgz", + "integrity": "sha512-DQi1h8VEBA/lURbSwFtEHnSTb9s2/pwLEaFuNhXwy1Dx3Sa0lOuYT2yNUr4/j2fs8oCAMANtrZ5OrPZtyVs3MQ==", + "dev": true, + "engines": { + "node": ">= 0.10.0" + } + }, "node_modules/stream-shift": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", @@ -9274,13 +9404,10 @@ } }, "node_modules/ts-algebra": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ts-algebra/-/ts-algebra-1.1.1.tgz", - "integrity": "sha512-W43a3/BN0Tp4SgRNERQF/QPVuY1rnHkgCr/fISLY0Ycu05P0NWPYRuViU8JFn+pFZuY6/zp9TgET1fxMzppR/Q==", - "dev": true, - "dependencies": { - "ts-toolbelt": "^9.6.0" - } + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/ts-algebra/-/ts-algebra-1.2.2.tgz", + "integrity": "sha512-kloPhf1hq3JbCPOTYoOWDKxebWjNb2o/LKnNfkWhxVVisFFmMJPPdJeGoGmM+iRLyoXAR61e08Pb+vUXINg8aA==", + "dev": true }, "node_modules/ts-jest": { "version": "29.0.3", @@ -9442,12 +9569,6 @@ "typescript": ">=2.7" } }, - "node_modules/ts-toolbelt": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/ts-toolbelt/-/ts-toolbelt-9.6.0.tgz", - "integrity": "sha512-nsZd8ZeNUzukXPlJmTBwUAuABDe/9qtVDelJeT/qW0ow3ZS3BsQJtNkan1802aM9Uf68/Y8ljw86Hu0h5IUW3w==", - "dev": true - }, "node_modules/tsconfig": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/tsconfig/-/tsconfig-7.0.0.tgz", @@ -9574,9 +9695,9 @@ "integrity": "sha512-Tfay0l6gJMP5rkil8CzGbLthukn+9BN/VXWcABVFPjOoelJ+koW8BuPZYk+h/L+lEeIp1fSzVRiWRPIjKVjPdg==" }, "node_modules/typescript": { - "version": "4.5.5", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.5.tgz", - "integrity": "sha512-TCTIul70LyWe6IJWT8QSYeA54WQe8EjQFU4wY52Fasj5UKx88LNYKCgBEHcOMOrFF1rKGbD8v/xcNWVUq9SymA==", + "version": "4.9.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz", + "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -9786,6 +9907,26 @@ "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, + "node_modules/xml2js": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.2.tgz", + "integrity": "sha512-T4rieHaC1EXcES0Kxxj4JWgaUQHDk+qwHcYOCFHfiwKz7tOVPLq7Hjq9dM1WCMhylqMEfP7hMcOIChvotiZegA==", + "dependencies": { + "sax": ">=0.6.0", + "xmlbuilder": "~11.0.0" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/xmlbuilder": { + "version": "11.0.1", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz", + "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==", + "engines": { + "node": ">=4.0" + } + }, "node_modules/xtend": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", @@ -11612,11 +11753,29 @@ "strip-json-comments": "^3.1.1" }, "dependencies": { + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, "ignore": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", "dev": true + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true } } }, @@ -11625,6 +11784,15 @@ "resolved": "https://registry.npmjs.org/@fastify/accept-negotiator/-/accept-negotiator-1.1.0.tgz", "integrity": "sha512-OIHZrb2ImZ7XG85HXOONLcJWGosv7sIvM2ifAPQVhg9Lv7qdmMBNVaai4QTdyuaqbKM5eO6sLSQOYI7wEQeCJQ==" }, + "@fastify/accepts": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/@fastify/accepts/-/accepts-4.3.0.tgz", + "integrity": "sha512-QK4FoqXdwwPmaPOLL6NrxsyaXVvdviYVoS6ltHyOLdFlUyREIaMykHQIp+x0aJz9hB3B3n/Ht6QRdvBeGkptGQ==", + "requires": { + "accepts": "^1.3.5", + "fastify-plugin": "^4.0.0" + } + }, "@fastify/ajv-compiler": { "version": "3.5.0", "resolved": "https://registry.npmjs.org/@fastify/ajv-compiler/-/ajv-compiler-3.5.0.tgz", @@ -11633,24 +11801,6 @@ "ajv": "^8.11.0", "ajv-formats": "^2.1.1", "fast-uri": "^2.0.0" - }, - "dependencies": { - "ajv": { - "version": "8.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", - "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - } - }, - "json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - } } }, "@fastify/busboy": { @@ -12796,6 +12946,15 @@ "resolved": "https://registry.npmjs.org/@tus/utils/-/utils-0.1.0.tgz", "integrity": "sha512-RXSeAKPfBJk3G0yyyDAqKPJUb1JsHNvwxNWSjZmvxRlSwtPmOlSkSrXRRReAqHzSlxAlNOGzDWqYiCBkLjOu0g==" }, + "@types/ajv": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@types/ajv/-/ajv-1.0.0.tgz", + "integrity": "sha512-yGSqw9/QKd5FXbTNrSANcJ6IHWeNhA+gokXqmlPquJgLDC87d4g2FGPs+AlCeGG0GuZXmPq42hOFA2hnPymCLw==", + "dev": true, + "requires": { + "ajv": "*" + } + }, "@types/async-retry": { "version": "1.4.5", "resolved": "https://registry.npmjs.org/@types/async-retry/-/async-retry-1.4.5.tgz", @@ -12934,6 +13093,15 @@ "@types/node": "*" } }, + "@types/multistream": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/@types/multistream/-/multistream-4.1.3.tgz", + "integrity": "sha512-t57vmDEJOZuC0M3IrZYfCd9wolTcr3ZTCGk1iwHNosvgBX+7/SMvCGcR8wP9lidpelBZQ12crSuINOxkk0azPA==", + "dev": true, + "requires": { + "@types/node": "*" + } + }, "@types/mustache": { "version": "4.2.2", "resolved": "https://registry.npmjs.org/@types/mustache/-/mustache-4.2.2.tgz", @@ -12978,6 +13146,15 @@ "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==", "dev": true }, + "@types/stream-buffers": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/stream-buffers/-/stream-buffers-3.0.7.tgz", + "integrity": "sha512-azOCy05sXVXrO+qklf0c/B07H/oHaIuDDAiHPVwlk3A9Ek+ksHyTeMajLZl3r76FxpPpxem//4Te61G1iW3Giw==", + "dev": true, + "requires": { + "@types/node": "*" + } + }, "@types/strip-bom": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/@types/strip-bom/-/strip-bom-3.0.0.tgz", @@ -12990,6 +13167,15 @@ "integrity": "sha512-7NQmHra/JILCd1QqpSzl8+mJRc8ZHz3uDm8YV1Ks9IhK0epEiTw8aIErbvH9PI+6XbqhyIQy3462nEsn7UVzjQ==", "dev": true }, + "@types/xml2js": { + "version": "0.4.14", + "resolved": "https://registry.npmjs.org/@types/xml2js/-/xml2js-0.4.14.tgz", + "integrity": "sha512-4YnrRemBShWRO2QjvUin8ESA41rH+9nQGLUGZV/1IDhi3SL9OhdpNC/MrulTWuptXKwhx/aDxE7toV0f/ypIXQ==", + "dev": true, + "requires": { + "@types/node": "*" + } + }, "@types/yargs": { "version": "17.0.13", "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.13.tgz", @@ -13113,6 +13299,15 @@ "resolved": "https://registry.npmjs.org/abstract-logging/-/abstract-logging-2.0.1.tgz", "integrity": "sha512-2BjRTZxTPvheOvGbBslFSYOUkr+SjPtOnrLP33f+VIWLzezQpZcqVg7ja3L4dBXmzzgwT+a029jRx5PCi3JuiA==" }, + "accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "requires": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + } + }, "acorn": { "version": "8.7.0", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.7.0.tgz", @@ -13154,14 +13349,13 @@ } }, "ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", "requires": { "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", "uri-js": "^4.2.2" } }, @@ -13171,24 +13365,6 @@ "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", "requires": { "ajv": "^8.0.0" - }, - "dependencies": { - "ajv": { - "version": "8.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", - "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - } - }, - "json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - } } }, "ansi-escapes": { @@ -13985,6 +14161,18 @@ "v8-compile-cache": "^2.0.3" }, "dependencies": { + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, "escape-string-regexp": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", @@ -14015,6 +14203,12 @@ "requires": { "is-glob": "^4.0.3" } + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true } } }, @@ -14243,24 +14437,6 @@ "fast-deep-equal": "^3.1.3", "fast-uri": "^2.1.0", "rfdc": "^1.2.0" - }, - "dependencies": { - "ajv": { - "version": "8.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", - "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - } - }, - "json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" - } } }, "fast-levenshtein": { @@ -14339,9 +14515,25 @@ } }, "fastify-plugin": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/fastify-plugin/-/fastify-plugin-4.5.0.tgz", - "integrity": "sha512-79ak0JxddO0utAXAQ5ccKhvs6vX2MGyHHMMsmZkBANrq3hXc1CHzvNPHOcvTsVMEPl5I+NT+RO4YKMGehOfSIg==" + "version": "4.5.1", + "resolved": "https://registry.npmjs.org/fastify-plugin/-/fastify-plugin-4.5.1.tgz", + "integrity": "sha512-stRHYGeuqpEZTL1Ef0Ovr2ltazUT9g844X5z/zEBFLG8RYlpDiOCIG+ATvYEp+/zmc7sN29mcIMp8gvYplYPIQ==" + }, + "fastify-xml-body-parser": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/fastify-xml-body-parser/-/fastify-xml-body-parser-2.2.0.tgz", + "integrity": "sha512-Jxltec0Iin4QX+DEQoYCyGmU5cTRtI0x22mRT/3FBQMhTEn7KNTHnnEtbyN3+6SLgW8cSirnOe1t8vqn77vR+Q==", + "requires": { + "fast-xml-parser": "^4.1.2", + "fastify-plugin": "^3.0.0" + }, + "dependencies": { + "fastify-plugin": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/fastify-plugin/-/fastify-plugin-3.0.1.tgz", + "integrity": "sha512-qKcDXmuZadJqdTm6vlCqioEbyewF60b/0LOFCcYN1B6BIZGlYJumWWOYs70SFYLDAH4YqdE1cxH/RKMG7rFxgA==" + } + } }, "fastq": { "version": "1.13.0", @@ -15345,21 +15537,20 @@ } }, "json-schema-to-ts": { - "version": "2.5.4", - "resolved": "https://registry.npmjs.org/json-schema-to-ts/-/json-schema-to-ts-2.5.4.tgz", - "integrity": "sha512-wlaYrGg+aYq0aEjSDY3cAFNzJVD2GvdrVIlvMdrbOLwkaMarXBiX+k0qm5Myb2aI3xjvdqsZoGs63JPS/M8+dg==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/json-schema-to-ts/-/json-schema-to-ts-3.0.0.tgz", + "integrity": "sha512-2adDesYifYEXYxNySx3gG0RR69rDWIjqAFzK/JPXdOvjHLZ/UP6d2rkpy6a+AxyhtRp2SvFPZ4+EW36jBinUbA==", "dev": true, "requires": { + "@babel/runtime": "^7.18.3", "@types/json-schema": "^7.0.9", - "ts-algebra": "^1.1.1", - "ts-toolbelt": "^9.6.0" + "ts-algebra": "^1.2.2" } }, "json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" }, "json-stable-stringify-without-jsonify": { "version": "1.0.1", @@ -15628,6 +15819,11 @@ "resolved": "https://registry.npmjs.org/logflare-transport-core/-/logflare-transport-core-0.3.3.tgz", "integrity": "sha512-n82NsRVWvlaa3jd9QQ8rDroCjCJcIamQOlarLDBou9RsF0QaRv39rduy0ToPmlGQn1OPZBwlsv+R36lXupSmVQ==" }, + "lru-cache": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.0.tgz", + "integrity": "sha512-2bIM8x+VAf6JT4bKAljS1qUWgMsqZRPGJS6FSahIMPVvctcNhyVp7AJu7quxOW9jwkryBReKZY5tY5JYv2n/7Q==" + }, "luxon": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.2.1.tgz", @@ -15698,16 +15894,16 @@ "integrity": "sha512-jSCU7/VB1loIWBZe14aEYHU/+1UMEHoaO7qxCOVJOw9GgH72VAWppxNcjU+x9a2k3GSIBXNKxXQFqRvvZ7vr3A==" }, "mime-db": { - "version": "1.46.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.46.0.tgz", - "integrity": "sha512-svXaP8UQRZ5K7or+ZmfNhg2xX3yKDMUzqadsSqi4NCH/KomcH75MAMYAGVlvXn4+b/xOPhS3I2uHKRUzvjY7BQ==" + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" }, "mime-types": { - "version": "2.1.29", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.29.tgz", - "integrity": "sha512-Y/jMt/S5sR9OaqteJtslsFZKWOIIqMACsJSiHghlCAyhf7jfVYjKBmLiX8OgpWeW+fjJ2b+Az69aPFPkUOY6xQ==", + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", "requires": { - "mime-db": "1.46.0" + "mime-db": "1.52.0" } }, "mimic-fn": { @@ -15763,6 +15959,11 @@ "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=", "dev": true }, + "negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==" + }, "node-int64": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", @@ -15790,6 +15991,25 @@ "path-key": "^3.0.0" } }, + "object-sizeof": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/object-sizeof/-/object-sizeof-2.6.4.tgz", + "integrity": "sha512-YuJAf7Bi61KROcYmXm8RCeBrBw8UOaJDzTm1gp0eU7RjYi1xEte3/Nmg/VyPaHcJZ3sNojs1Y0xvSrgwkLmcFw==", + "requires": { + "buffer": "^6.0.3" + }, + "dependencies": { + "buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "requires": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + } + } + }, "on-exit-leak-free": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/on-exit-leak-free/-/on-exit-leak-free-2.1.0.tgz", @@ -16609,10 +16829,15 @@ "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.3.1.tgz", "integrity": "sha512-kYBSfT+troD9cDA85VDnHZ1rpHC50O0g1e6WlGHVCz/g+JS+9WKLj+XwFYyR8UbrZN8ll9HUpDAAddY58MGisg==" }, + "sax": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.3.0.tgz", + "integrity": "sha512-0s+oAmw9zLl1V1cS9BtZN7JAd0cW5e0QH4W3LWEK6a4LaLEA2OTpGYWDY+6XasBLtz6wkm3u1xRw95mRuJ59WA==" + }, "secure-json-parse": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/secure-json-parse/-/secure-json-parse-2.5.0.tgz", - "integrity": "sha512-ZQruFgZnIWH+WyO9t5rWt4ZEGqCKPwhiw+YbzTwpmT9elgLrLcfuyUiSnwwjUiVy9r4VM3urtbNF1xmEh9IL2w==" + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/secure-json-parse/-/secure-json-parse-2.7.0.tgz", + "integrity": "sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw==" }, "semver": { "version": "7.5.4", @@ -16751,6 +16976,12 @@ "readable-stream": "^3.5.0" } }, + "stream-buffers": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/stream-buffers/-/stream-buffers-3.0.2.tgz", + "integrity": "sha512-DQi1h8VEBA/lURbSwFtEHnSTb9s2/pwLEaFuNhXwy1Dx3Sa0lOuYT2yNUr4/j2fs8oCAMANtrZ5OrPZtyVs3MQ==", + "dev": true + }, "stream-shift": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", @@ -16931,13 +17162,10 @@ "dev": true }, "ts-algebra": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ts-algebra/-/ts-algebra-1.1.1.tgz", - "integrity": "sha512-W43a3/BN0Tp4SgRNERQF/QPVuY1rnHkgCr/fISLY0Ycu05P0NWPYRuViU8JFn+pFZuY6/zp9TgET1fxMzppR/Q==", - "dev": true, - "requires": { - "ts-toolbelt": "^9.6.0" - } + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/ts-algebra/-/ts-algebra-1.2.2.tgz", + "integrity": "sha512-kloPhf1hq3JbCPOTYoOWDKxebWjNb2o/LKnNfkWhxVVisFFmMJPPdJeGoGmM+iRLyoXAR61e08Pb+vUXINg8aA==", + "dev": true }, "ts-jest": { "version": "29.0.3", @@ -17021,12 +17249,6 @@ } } }, - "ts-toolbelt": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/ts-toolbelt/-/ts-toolbelt-9.6.0.tgz", - "integrity": "sha512-nsZd8ZeNUzukXPlJmTBwUAuABDe/9qtVDelJeT/qW0ow3ZS3BsQJtNkan1802aM9Uf68/Y8ljw86Hu0h5IUW3w==", - "dev": true - }, "tsconfig": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/tsconfig/-/tsconfig-7.0.0.tgz", @@ -17128,9 +17350,9 @@ "integrity": "sha512-Tfay0l6gJMP5rkil8CzGbLthukn+9BN/VXWcABVFPjOoelJ+koW8BuPZYk+h/L+lEeIp1fSzVRiWRPIjKVjPdg==" }, "typescript": { - "version": "4.5.5", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.5.5.tgz", - "integrity": "sha512-TCTIul70LyWe6IJWT8QSYeA54WQe8EjQFU4wY52Fasj5UKx88LNYKCgBEHcOMOrFF1rKGbD8v/xcNWVUq9SymA==", + "version": "4.9.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz", + "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==", "dev": true }, "undici-types": { @@ -17283,6 +17505,20 @@ "signal-exit": "^3.0.7" } }, + "xml2js": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.2.tgz", + "integrity": "sha512-T4rieHaC1EXcES0Kxxj4JWgaUQHDk+qwHcYOCFHfiwKz7tOVPLq7Hjq9dM1WCMhylqMEfP7hMcOIChvotiZegA==", + "requires": { + "sax": ">=0.6.0", + "xmlbuilder": "~11.0.0" + } + }, + "xmlbuilder": { + "version": "11.0.1", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz", + "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==" + }, "xtend": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", diff --git a/package.json b/package.json index 726a5765..60c8f0fa 100644 --- a/package.json +++ b/package.json @@ -28,6 +28,7 @@ "@aws-sdk/client-s3": "3.515.0", "@aws-sdk/lib-storage": "3.515.0", "@aws-sdk/s3-request-presigner": "3.421.0", + "@fastify/accepts": "^4.3.0", "@fastify/multipart": "^7.6.0", "@fastify/rate-limit": "^7.6.0", "@fastify/swagger": "^8.3.1", @@ -39,6 +40,7 @@ "@tus/s3-store": "1.4.1", "@tus/server": "1.4.1", "agentkeepalive": "^4.2.1", + "ajv": "^8.12.0", "async-retry": "^1.3.3", "axios": "^1.6.3", "axios-retry": "^3.9.1", @@ -49,21 +51,27 @@ "fastify": "^4.8.1", "fastify-metrics": "^10.2.0", "fastify-plugin": "^4.0.0", + "fastify-xml-body-parser": "^2.2.0", "fs-extra": "^10.0.1", - "fs-xattr": "^0.3.1", + "fs-xattr": "0.3.1", "ioredis": "^5.2.4", "jsonwebtoken": "^9.0.2", "knex": "^3.1.0", + "lru-cache": "^10.2.0", "md5-file": "^5.0.0", + "multistream": "^4.1.0", + "object-sizeof": "^2.6.4", "pg": "^8.11.3", "pg-boss": "^9.0.3", "pg-listen": "^1.7.0", "pino": "^8.15.4", "pino-logflare": "^0.4.2", "postgres-migrations": "^5.3.0", - "prom-client": "^14.0.1" + "prom-client": "^14.0.1", + "xml2js": "^0.6.2" }, "devDependencies": { + "@types/ajv": "^1.0.0", "@types/async-retry": "^1.4.5", "@types/busboy": "^1.3.0", "@types/crypto-js": "^4.1.1", @@ -71,9 +79,12 @@ "@types/jest": "^29.2.1", "@types/js-yaml": "^4.0.5", "@types/jsonwebtoken": "^9.0.5", + "@types/multistream": "^4.1.3", "@types/mustache": "^4.2.2", "@types/node": "^20.11.5", "@types/pg": "^8.6.4", + "@types/stream-buffers": "^3.0.7", + "@types/xml2js": "^0.4.14", "@typescript-eslint/eslint-plugin": "^5.12.1", "@typescript-eslint/parser": "^5.12.1", "babel-jest": "^29.2.2", @@ -83,10 +94,11 @@ "form-data": "^4.0.0", "jest": "^29.2.2", "js-yaml": "^4.1.0", - "json-schema-to-ts": "^2.5.4", + "json-schema-to-ts": "^3.0.0", "mustache": "^4.2.0", "pino-pretty": "^8.1.0", "prettier": "^2.8.8", + "stream-buffers": "^3.0.2", "ts-jest": "^29.0.3", "ts-node-dev": "^1.1.8", "tsx": "^3.13.0", diff --git a/src/admin-app.ts b/src/admin-app.ts index 40f3d0da..6d097dc9 100644 --- a/src/admin-app.ts +++ b/src/admin-app.ts @@ -9,6 +9,7 @@ const build = (opts: FastifyServerOptions = {}, appInstance?: FastifyInstance): app.register(plugins.logRequest({ excludeUrls: ['/status', '/metrics', '/health'] })) app.register(routes.tenants, { prefix: 'tenants' }) app.register(routes.migrations, { prefix: 'migrations' }) + app.register(routes.s3Credentials, { prefix: 's3' }) let registriesToMerge: Registry[] = [] diff --git a/src/app.ts b/src/app.ts index 9585b377..4efc844b 100644 --- a/src/app.ts +++ b/src/app.ts @@ -42,6 +42,9 @@ const build = (opts: buildOpts = {}): FastifyInstance => { tags: [ { name: 'object', description: 'Object end-points' }, { name: 'bucket', description: 'Bucket end-points' }, + { name: 's3', description: 'S3 end-points' }, + { name: 'transformation', description: 'Image transformation' }, + { name: 'resumable', description: 'Resumable Upload end-points' }, { name: 'deprecated', description: 'Deprecated end-points' }, ], }, @@ -63,6 +66,7 @@ const build = (opts: buildOpts = {}): FastifyInstance => { app.register(routes.bucket, { prefix: 'bucket' }) app.register(routes.object, { prefix: 'object' }) app.register(routes.render, { prefix: 'render/image' }) + app.register(routes.s3, { prefix: 's3' }) app.register(routes.healthcheck, { prefix: 'health' }) setErrorHandler(app) diff --git a/src/auth/jwt.ts b/src/auth/jwt.ts index 2bd5d724..59e95afd 100644 --- a/src/auth/jwt.ts +++ b/src/auth/jwt.ts @@ -1,10 +1,10 @@ import * as crypto from 'crypto' import jwt from 'jsonwebtoken' -import { getJwtSecret as getJwtSecretForTenant } from '../database/tenant' import { getConfig } from '../config' +import { ERRORS } from '../storage' -const { isMultitenant, jwtSecret, jwtAlgorithm, jwtJWKS } = getConfig() +const { jwtAlgorithm } = getConfig() const JWT_HMAC_ALGOS: jwt.Algorithm[] = ['HS256', 'HS384', 'HS512'] const JWT_RSA_ALGOS: jwt.Algorithm[] = ['RS256', 'RS384', 'RS512'] @@ -101,10 +101,7 @@ function getJWTVerificationKey( } } -export function getJWTAlgorithms( - secret: string, - jwks: { keys: { kid?: string; kty: string }[] } | null -) { +export function getJWTAlgorithms(jwks: { keys: { kid?: string; kty: string }[] } | null) { let algorithms: jwt.Algorithm[] if (jwks && jwks.keys && jwks.keys.length) { @@ -144,9 +141,9 @@ export function verifyJWT( jwt.verify( token, getJWTVerificationKey(secret, jwks || null), - { algorithms: getJWTAlgorithms(secret, jwks || null) }, + { algorithms: getJWTAlgorithms(jwks || null) }, (err, decoded) => { - if (err) return reject(err) + if (err) return reject(ERRORS.AccessDenied(err.message, err)) resolve(decoded as jwt.JwtPayload & T) } ) @@ -162,32 +159,18 @@ export function verifyJWT( export function signJWT( payload: string | object | Buffer, secret: string, - expiresIn: string | number -): Promise { - return new Promise((resolve, reject) => { - jwt.sign( - payload, - secret, - { expiresIn, algorithm: jwtAlgorithm as jwt.Algorithm }, - (err, token) => { - if (err) return reject(err) - resolve(token) - } - ) - }) -} + expiresIn: string | number | undefined +): Promise { + const options: jwt.SignOptions = { algorithm: jwtAlgorithm as jwt.Algorithm } -/** - * Extract the owner (user) from the provided JWT - * @param token - * @param secret - * @param jwks - */ -export async function getOwner( - token: string, - secret: string, - jwks: { keys: { kid?: string; kty: string }[] } | null -): Promise { - const decodedJWT = await verifyJWT(token, secret, jwks) - return (decodedJWT as jwtInterface)?.sub + if (expiresIn) { + options.expiresIn = expiresIn + } + + return new Promise((resolve, reject) => { + jwt.sign(payload, secret, options, (err, token) => { + if (err) return reject(err) + resolve(token as string) + }) + }) } diff --git a/src/config.ts b/src/config.ts index 11e701ee..9ad1a339 100644 --- a/src/config.ts +++ b/src/config.ts @@ -47,6 +47,7 @@ type StorageConfigType = { requestTraceHeader?: string requestEtagHeaders: string[] responseSMaxAge: number + anonKey: string serviceKey: string storageBackendType: StorageBackendType tenantId: string @@ -96,6 +97,10 @@ type StorageConfigType = { tusPartSize: number tusUseFileVersionSeparator: boolean defaultMetricsEnabled: boolean + s3ProtocolPrefix: string + s3ProtocolEnforceRegion: boolean + s3ProtocolAccessKeyId?: string + s3ProtocolAccessKeySecret?: string } function getOptionalConfigFromEnv(key: string, fallback?: string): string | undefined { @@ -183,6 +188,7 @@ export function getConfig(options?: { reload?: boolean }): StorageConfigType { // Auth serviceKey: getOptionalConfigFromEnv('SERVICE_KEY') || '', + anonKey: getOptionalConfigFromEnv('ANON_KEY') || '', encryptionKey: getOptionalConfigFromEnv('AUTH_ENCRYPTION_KEY', 'ENCRYPTION_KEY') || '', jwtSecret: getOptionalIfMultitenantConfigFromEnv('AUTH_JWT_SECRET', 'PGRST_JWT_SECRET') || '', @@ -215,6 +221,11 @@ export function getConfig(options?: { reload?: boolean }): StorageConfigType { tusUseFileVersionSeparator: getOptionalConfigFromEnv('TUS_USE_FILE_VERSION_SEPARATOR') === 'true', + // S3 Protocol + s3ProtocolPrefix: getOptionalConfigFromEnv('S3_PROTOCOL_PREFIX') || '', + s3ProtocolEnforceRegion: getOptionalConfigFromEnv('S3_PROTOCOL_ENFORCE_REGION') === 'true', + s3ProtocolAccessKeyId: getOptionalConfigFromEnv('S3_PROTOCOL_ACCESS_KEY_ID'), + s3ProtocolAccessKeySecret: getOptionalConfigFromEnv('S3_PROTOCOL_ACCESS_KEY_SECRET'), // Storage storageBackendType: getOptionalConfigFromEnv('STORAGE_BACKEND') as StorageBackendType, @@ -366,6 +377,13 @@ export function getConfig(options?: { reload?: boolean }): StorageConfigType { }) } + if (!config.isMultitenant && !config.anonKey) { + config.anonKey = jwt.sign({ role: config.dbAnonRole }, config.jwtSecret, { + expiresIn: '10y', + algorithm: config.jwtAlgorithm as jwt.Algorithm, + }) + } + const jwtJWKS = getOptionalConfigFromEnv('JWT_JWKS') || null if (jwtJWKS) { diff --git a/src/database/client.ts b/src/database/client.ts index 79c5ac01..71e2d1eb 100644 --- a/src/database/client.ts +++ b/src/database/client.ts @@ -1,6 +1,6 @@ import { getConfig } from '../config' import { getTenantConfig } from './tenant' -import { StorageBackendError } from '../storage' +import { ERRORS } from '../storage' import { User, TenantConnection } from './connection' interface ConnectionOptions { @@ -48,23 +48,17 @@ async function getDbCredentials( if (isMultitenant) { if (!tenantId) { - throw new StorageBackendError('Invalid Tenant Id', 400, 'Tenant id not provided') + throw ERRORS.InvalidTenantId() } if (requestXForwardedHostRegExp && !options?.disableHostCheck) { const xForwardedHost = host if (typeof xForwardedHost !== 'string') { - throw new StorageBackendError( - 'Invalid Header', - 400, - 'X-Forwarded-Host header is not a string' - ) + throw ERRORS.InvalidXForwardedHeader('X-Forwarded-Host header is not a string') } if (!new RegExp(requestXForwardedHostRegExp).test(xForwardedHost)) { - throw new StorageBackendError( - 'Invalid Header', - 400, + throw ERRORS.InvalidXForwardedHeader( 'X-Forwarded-Host header does not match regular expression' ) } diff --git a/src/database/connection.ts b/src/database/connection.ts index 0aa57a63..5ca5ea28 100644 --- a/src/database/connection.ts +++ b/src/database/connection.ts @@ -5,7 +5,7 @@ import retry from 'async-retry' import TTLCache from '@isaacs/ttlcache' import { getConfig } from '../config' import { DbActiveConnection, DbActivePool } from '../monitoring/metrics' -import { StorageBackendError } from '../storage' +import { ERRORS } from '../storage' import KnexTimeoutError = knex.KnexTimeoutError // https://github.com/knex/knex/issues/387#issuecomment-51554522 @@ -178,7 +178,7 @@ export class TenantConnection { ) if (!tnx) { - throw new StorageBackendError('Could not create transaction', 500, 'transaction_failed') + throw ERRORS.InternalError(undefined, 'Could not create transaction') } if (!instance && this.options.isExternalPool) { @@ -188,12 +188,7 @@ export class TenantConnection { return tnx } catch (e) { if (e instanceof KnexTimeoutError) { - throw StorageBackendError.withStatusCode( - 'database_timeout', - 544, - 'The connection to the database timed out', - e - ) + throw ERRORS.DatabaseTimeout(e) } throw e diff --git a/src/database/migrations/migrate.ts b/src/database/migrations/migrate.ts index e00bb49d..c6c52d9c 100644 --- a/src/database/migrations/migrate.ts +++ b/src/database/migrations/migrate.ts @@ -41,6 +41,7 @@ const backportMigrations = [ export const progressiveMigrations = new ProgressiveMigrations({ maxSize: 200, interval: 1000 * 60, // 1m + watch: pgQueueEnable, }) /** diff --git a/src/database/migrations/progressive.ts b/src/database/migrations/progressive.ts index be09e5ec..71517289 100644 --- a/src/database/migrations/progressive.ts +++ b/src/database/migrations/progressive.ts @@ -7,10 +7,14 @@ export class ProgressiveMigrations { protected emittingJobs = false protected watchInterval: NodeJS.Timeout | undefined - constructor(protected readonly options: { maxSize: number; interval: number }) {} + constructor(protected readonly options: { maxSize: number; interval: number; watch?: boolean }) { + if (typeof options.watch === 'undefined') { + this.options.watch = true + } + } start(signal: AbortSignal) { - this.watch(signal) + this.watchTenants(signal) signal.addEventListener('abort', () => { if (this.watchInterval) { @@ -43,8 +47,8 @@ export class ProgressiveMigrations { }) } - protected watch(signal: AbortSignal) { - if (signal.aborted) { + protected watchTenants(signal: AbortSignal) { + if (signal.aborted || !this.options.watch) { return } this.watchInterval = setInterval(() => { diff --git a/src/database/tenant.ts b/src/database/tenant.ts index 7c9f3b56..42dcbf49 100644 --- a/src/database/tenant.ts +++ b/src/database/tenant.ts @@ -1,11 +1,14 @@ +import crypto from 'crypto' import { getConfig } from '../config' -import { decrypt, verifyJWT } from '../auth' +import { decrypt, encrypt, verifyJWT } from '../auth' import { multitenantKnex } from './multitenant-db' -import { StorageBackendError } from '../storage' +import { ERRORS } from '../storage' import { JwtPayload } from 'jsonwebtoken' import { PubSubAdapter } from '../pubsub' -import { lastMigrationName } from './migrations' import { createMutexByKey } from '../concurrency' +import { LRUCache } from 'lru-cache' +import objectSizeOf from 'object-sizeof' +import { lastMigrationName } from './migrations/migrate' interface TenantConfig { anonKey?: string @@ -43,10 +46,26 @@ export enum TenantMigrationStatus { FAILED_STALE = 'FAILED_STALE', } +interface S3Credentials { + accessKey: string + secretKey: string + claims: { role: string; sub?: string; [key: string]: any } +} + const { isMultitenant, dbServiceRole, serviceKey, jwtSecret, jwtJWKS } = getConfig() const tenantConfigCache = new Map() + +const tenantS3CredentialsCache = new LRUCache({ + maxSize: 1024 * 1024 * 50, // 50MB + ttl: 1000 * 60 * 60, // 1 hour + sizeCalculation: (value) => objectSizeOf(value), + updateAgeOnGet: true, + allowStale: false, +}) + const tenantMutex = createMutexByKey() +const s3CredentialsMutex = createMutexByKey() const singleTenantServiceKey: | { @@ -167,11 +186,7 @@ export async function getTenantConfig(tenantId: string): Promise { const tenant = await multitenantKnex('tenants').first().where('id', tenantId) if (!tenant) { - throw new StorageBackendError( - 'Missing Tenant config', - 400, - `Tenant config for ${tenantId} not found` - ) + throw ERRORS.MissingTenantConfig(tenantId) } const { anon_key, @@ -278,12 +293,138 @@ export async function getFeatures(tenantId: string): Promise { } const TENANTS_UPDATE_CHANNEL = 'tenants_update' +const TENANTS_S3_CREDENTIALS_UPDATE_CHANNEL = 'tenants_s3_credentials_update' /** * Keeps the in memory config cache up to date */ export async function listenForTenantUpdate(pubSub: PubSubAdapter): Promise { - await pubSub.subscribe(TENANTS_UPDATE_CHANNEL, (tenantId) => { - tenantConfigCache.delete(tenantId) + await pubSub.subscribe(TENANTS_UPDATE_CHANNEL, (cacheKey) => { + tenantConfigCache.delete(cacheKey) + }) + + await pubSub.subscribe(TENANTS_S3_CREDENTIALS_UPDATE_CHANNEL, (cacheKey) => { + tenantS3CredentialsCache.delete(cacheKey) + }) +} + +/** + * Create S3 Credential for a tenant + * @param tenantId + * @param data + */ +export async function createS3Credentials( + tenantId: string, + data: { description: string; claims?: S3Credentials['claims'] } +) { + const existingCount = await countS3Credentials(tenantId) + + if (existingCount >= 50) { + throw ERRORS.MaximumCredentialsLimit() + } + + const secretAccessKeyId = crypto.randomBytes(32).toString('hex').slice(0, 32) + const secretAccessKey = crypto.randomBytes(64).toString('hex').slice(0, 64) + + if (data.claims) { + delete data.claims.iss + delete data.claims.issuer + delete data.claims.exp + delete data.claims.iat + } + + data.claims = { + ...(data.claims || {}), + role: data.claims?.role ?? dbServiceRole, + issuer: `supabase.storage.${tenantId}`, + sub: data.claims?.sub, + } + + const credentials = await multitenantKnex + .table('tenants_s3_credentials') + .insert({ + tenant_id: tenantId, + description: data.description, + access_key: secretAccessKeyId, + secret_key: encrypt(secretAccessKey), + claims: JSON.stringify(data.claims), + }) + .returning('id') + + return { + id: credentials[0].id, + access_key: secretAccessKeyId, + secret_key: secretAccessKey, + } +} + +export async function getS3CredentialsByAccessKey( + tenantId: string, + accessKey: string +): Promise { + const cacheKey = `${tenantId}:${accessKey}` + const cachedCredentials = tenantS3CredentialsCache.get(cacheKey) + + if (cachedCredentials) { + return cachedCredentials + } + + return s3CredentialsMutex(cacheKey, async () => { + const cachedCredentials = tenantS3CredentialsCache.get(cacheKey) + + if (cachedCredentials) { + return cachedCredentials + } + + const data = await multitenantKnex + .table('tenants_s3_credentials') + .select('access_key', 'secret_key', 'claims') + .where('tenant_id', tenantId) + .where('access_key', accessKey) + .first() + + if (!data) { + throw ERRORS.MissingS3Credentials() + } + + const secretKey = decrypt(data.secret_key) + + tenantS3CredentialsCache.set(cacheKey, { + accessKey: data.access_key, + secretKey: secretKey, + claims: data.claims, + }) + + return { + accessKey: data.access_key, + secretKey: secretKey, + claims: data.claims, + } }) } + +export function deleteS3Credential(tenantId: string, credentialId: string) { + return multitenantKnex + .table('tenants_s3_credentials') + .where('tenant_id', tenantId) + .where('id', credentialId) + .delete() + .returning('id') +} + +export function listS3Credentials(tenantId: string) { + return multitenantKnex + .table('tenants_s3_credentials') + .select('id', 'description', 'access_key', 'created_at') + .where('tenant_id', tenantId) + .orderBy('created_at', 'asc') +} + +export async function countS3Credentials(tenantId: string) { + const data = await multitenantKnex + .table('tenants_s3_credentials') + .count('id') + .where('tenant_id', tenantId) + + return Number((data as any)?.count || 0) +} diff --git a/src/http/error-handler.ts b/src/http/error-handler.ts index 6ceb04cb..e5a06227 100644 --- a/src/http/error-handler.ts +++ b/src/http/error-handler.ts @@ -1,5 +1,5 @@ import { FastifyInstance } from 'fastify' -import { isRenderableError } from '../storage' +import { ErrorCode, isRenderableError } from '../storage' import { FastifyError } from '@fastify/error' import { DatabaseError } from 'pg' @@ -15,17 +15,6 @@ export const setErrorHandler = (app: FastifyInstance) => { // it will be logged in the request log plugin request.executionError = error - if (isRenderableError(error)) { - const renderableError = error.render() - const statusCode = error.userStatusCode - ? error.userStatusCode - : renderableError.statusCode === '500' - ? 500 - : 400 - - return reply.status(statusCode).send(renderableError) - } - // database error if ( error instanceof DatabaseError && @@ -40,10 +29,25 @@ export const setErrorHandler = (app: FastifyInstance) => { return reply.status(429).send({ statusCode: `429`, error: 'too_many_connections', + code: ErrorCode.SlowDown, message: 'Too many connections issued to the database', }) } + if (isRenderableError(error)) { + const renderableError = error.render() + const statusCode = error.userStatusCode + ? error.userStatusCode + : renderableError.statusCode === '500' + ? 500 + : 400 + + return reply.status(statusCode).send({ + ...renderableError, + error: error.error || renderableError.code, + }) + } + // Fastify errors if ('statusCode' in error) { const err = error as FastifyError diff --git a/src/http/plugins/db.ts b/src/http/plugins/db.ts index 7fbc49b1..bad637eb 100644 --- a/src/http/plugins/db.ts +++ b/src/http/plugins/db.ts @@ -29,7 +29,8 @@ export const db = fastifyPlugin(async (fastify) => { fastify.addHook('preHandler', async (request) => { const adminUser = await getServiceKeyUser(request.tenantId) - const userPayload = await verifyJWT<{ role?: string }>(request.jwt, adminUser.jwtSecret) + const userPayload = + request.jwtPayload ?? (await verifyJWT<{ role?: string }>(request.jwt, adminUser.jwtSecret)) request.db = await getPostgresConnection({ user: { diff --git a/src/http/plugins/index.ts b/src/http/plugins/index.ts index bf8bdf3d..5730e460 100644 --- a/src/http/plugins/index.ts +++ b/src/http/plugins/index.ts @@ -7,3 +7,5 @@ export * from './storage' export * from './tenant-id' export * from './tenant-feature' export * from './metrics' +export * from './xml' +export * from './signature-v4' diff --git a/src/http/plugins/jwt.ts b/src/http/plugins/jwt.ts index 40bb934f..631a4894 100644 --- a/src/http/plugins/jwt.ts +++ b/src/http/plugins/jwt.ts @@ -1,11 +1,13 @@ import fastifyPlugin from 'fastify-plugin' import { createResponse } from '../generic-routes' -import { getOwner } from '../../auth' -import { getJwtSecret } from '../../database/tenant' +import { verifyJWT } from '../../auth' +import { getJwtSecret } from '../../database' +import { JwtPayload } from 'jsonwebtoken' declare module 'fastify' { interface FastifyRequest { jwt: string + jwtPayload?: JwtPayload & { role?: string } owner?: string } } @@ -14,14 +16,17 @@ const BEARER = /^Bearer\s+/i export const jwt = fastifyPlugin(async (fastify) => { fastify.decorateRequest('jwt', '') + fastify.decorateRequest('jwtPayload', undefined) + fastify.addHook('preHandler', async (request, reply) => { request.jwt = (request.headers.authorization || '').replace(BEARER, '') const { secret, jwks } = await getJwtSecret(request.tenantId) try { - const owner = await getOwner(request.jwt, secret, jwks || null) - request.owner = owner + const payload = await verifyJWT(request.jwt, secret, jwks || null) + request.jwtPayload = payload + request.owner = payload.sub } catch (err: any) { request.log.error({ error: err }, 'unable to get owner') return reply.status(400).send(createResponse(err.message, '400', err.message)) diff --git a/src/http/plugins/signature-v4.ts b/src/http/plugins/signature-v4.ts new file mode 100644 index 00000000..2e453519 --- /dev/null +++ b/src/http/plugins/signature-v4.ts @@ -0,0 +1,160 @@ +import { FastifyInstance, FastifyRequest } from 'fastify' +import fastifyPlugin from 'fastify-plugin' +import { getS3CredentialsByAccessKey, getTenantConfig } from '../../database' +import { ClientSignature, SignatureV4 } from '../../storage/protocols/s3' +import { ERRORS } from '../../storage' +import { signJWT, verifyJWT } from '../../auth' +import { getConfig } from '../../config' + +const { + anonKey, + jwtSecret, + jwtJWKS, + serviceKey, + storageS3Region, + isMultitenant, + s3ProtocolPrefix, + s3ProtocolEnforceRegion, + s3ProtocolAccessKeyId, + s3ProtocolAccessKeySecret, +} = getConfig() + +export const signatureV4 = fastifyPlugin(async function (fastify: FastifyInstance) { + fastify.addHook('preHandler', async (request: FastifyRequest) => { + if (typeof request.headers.authorization !== 'string') { + throw ERRORS.AccessDenied('Missing authorization header') + } + + const clientCredentials = SignatureV4.parseAuthorizationHeader(request.headers.authorization) + + const sessionToken = request.headers['x-amz-security-token'] as string | undefined + + const { + signature: signatureV4, + claims, + token, + } = await createSignature(request.tenantId, clientCredentials, { + sessionToken: sessionToken, + }) + + const isVerified = signatureV4.verify({ + url: request.url, + body: request.body as string | ReadableStream | Buffer, + headers: request.headers as Record, + method: request.method, + query: request.query as Record, + prefix: s3ProtocolPrefix, + credentials: clientCredentials.credentials, + signature: clientCredentials.signature, + signedHeaders: clientCredentials.signedHeaders, + }) + + if (!isVerified && !sessionToken) { + throw ERRORS.SignatureDoesNotMatch( + 'The request signature we calculated does not match the signature you provided. Check your key and signing method.' + ) + } + + if (!isVerified && sessionToken) { + throw ERRORS.SignatureDoesNotMatch( + 'The request signature we calculated does not match the signature you provided, Check your credentials. ' + + 'The session token should be a valid JWT token' + ) + } + + const jwtSecrets = { + jwtSecret: jwtSecret, + jwks: jwtJWKS, + } + + if (isMultitenant) { + const tenant = await getTenantConfig(request.tenantId) + jwtSecrets.jwtSecret = tenant.jwtSecret + jwtSecrets.jwks = tenant.jwks || undefined + } + + if (token) { + const payload = await verifyJWT(token, jwtSecrets.jwtSecret, jwtSecrets.jwks) + request.jwt = token + request.jwtPayload = payload + request.owner = payload.sub + return + } + + if (!claims) { + throw ERRORS.AccessDenied('Missing claims') + } + + const jwt = await signJWT(claims, jwtSecrets.jwtSecret, '5m') + + request.jwt = jwt + request.jwtPayload = claims + request.owner = claims.sub + }) +}) + +async function createSignature( + tenantId: string, + clientSignature: ClientSignature, + session?: { sessionToken?: string } +) { + const awsRegion = storageS3Region + const awsService = 's3' + + if (session?.sessionToken) { + const tenantAnonKey = isMultitenant ? (await getTenantConfig(tenantId)).anonKey : anonKey + + if (!tenantAnonKey) { + throw ERRORS.AccessDenied('Missing tenant anon key') + } + + const signature = new SignatureV4({ + enforceRegion: s3ProtocolEnforceRegion, + credentials: { + accessKey: tenantId, + secretKey: tenantAnonKey, + region: awsRegion, + service: awsService, + }, + }) + + return { signature, claims: undefined, token: session.sessionToken } + } + + if (isMultitenant) { + const credential = await getS3CredentialsByAccessKey( + tenantId, + clientSignature.credentials.accessKey + ) + + const signature = new SignatureV4({ + enforceRegion: s3ProtocolEnforceRegion, + credentials: { + accessKey: credential.accessKey, + secretKey: credential.secretKey, + region: awsRegion, + service: awsService, + }, + }) + + return { signature, claims: credential.claims, token: undefined } + } + + if (!s3ProtocolAccessKeyId || !s3ProtocolAccessKeySecret) { + throw ERRORS.AccessDenied( + 'Missing S3 Protocol Access Key ID or Secret Key Environment variables' + ) + } + + const signature = new SignatureV4({ + enforceRegion: s3ProtocolEnforceRegion, + credentials: { + accessKey: s3ProtocolAccessKeyId, + secretKey: s3ProtocolAccessKeySecret, + region: awsRegion, + service: awsService, + }, + }) + + return { signature, claims: undefined, token: serviceKey } +} diff --git a/src/http/plugins/xml.ts b/src/http/plugins/xml.ts new file mode 100644 index 00000000..98712ceb --- /dev/null +++ b/src/http/plugins/xml.ts @@ -0,0 +1,36 @@ +import { FastifyInstance } from 'fastify' +import accepts from '@fastify/accepts' +import fastifyPlugin from 'fastify-plugin' +import xml from 'xml2js' + +// no types exists for this package +// eslint-disable-next-line @typescript-eslint/ban-ts-comment +// @ts-ignore +import xmlBodyParser from 'fastify-xml-body-parser' + +export const jsonToXml = fastifyPlugin(async function (fastify: FastifyInstance) { + fastify.register(accepts) + + fastify.register(xmlBodyParser) + fastify.addHook('preSerialization', async (req, res, payload) => { + const accept = req.accepts() + if ( + res.getHeader('content-type')?.toString()?.includes('application/json') && + accept.types(['application/xml', 'application/json']) === 'application/xml' + ) { + res.serializer((payload) => payload) + + const xmlBuilder = new xml.Builder({ + renderOpts: { + pretty: false, + }, + }) + const xmlPayload = xmlBuilder.buildObject(payload) + res.type('application/xml') + res.header('content-type', 'application/xml; charset=utf-8') + return xmlPayload + } + + return payload + }) +}) diff --git a/src/http/routes/admin/index.ts b/src/http/routes/admin/index.ts index 49971460..5a35c323 100644 --- a/src/http/routes/admin/index.ts +++ b/src/http/routes/admin/index.ts @@ -1,2 +1,3 @@ export { default as migrations } from './migrations' export { default as tenants } from './tenants' +export { default as s3Credentials } from './s3' diff --git a/src/http/routes/admin/migrations.ts b/src/http/routes/admin/migrations.ts index 35588120..7b5f1ac7 100644 --- a/src/http/routes/admin/migrations.ts +++ b/src/http/routes/admin/migrations.ts @@ -2,13 +2,28 @@ import { FastifyInstance } from 'fastify' import apiKey from '../../plugins/apikey' import { Queue, RunMigrationsOnTenants } from '../../../queue' import { getConfig } from '../../../config' -import { multitenantKnex } from '../../../database' +import { multitenantKnex, runMigrationsOnAllTenants } from '../../../database' const { pgQueueEnable } = getConfig() export default async function routes(fastify: FastifyInstance) { fastify.register(apiKey) + fastify.post('/migrate/fleet', async (req, reply) => { + if (!pgQueueEnable) { + return reply.status(400).send({ message: 'Queue is not enabled' }) + } + const abortController = new AbortController() + + req.raw.on('error', () => { + abortController.abort() + }) + + await runMigrationsOnAllTenants(abortController.signal) + + return reply.send({ message: 'Migrations scheduled' }) + }) + fastify.get('/progress', async (req, reply) => { if (!pgQueueEnable) { return reply.code(400).send({ message: 'Queue is not enabled' }) diff --git a/src/http/routes/admin/s3.ts b/src/http/routes/admin/s3.ts new file mode 100644 index 00000000..947d8580 --- /dev/null +++ b/src/http/routes/admin/s3.ts @@ -0,0 +1,124 @@ +import { FastifyInstance, RequestGenericInterface } from 'fastify' +import apiKey from '../../plugins/apikey' +import { createS3Credentials, deleteS3Credential, listS3Credentials } from '../../../database' +import { FromSchema } from 'json-schema-to-ts' + +const createCredentialsSchema = { + description: 'Create S3 Credentials', + params: { + type: 'object', + properties: { + tenantId: { type: 'string' }, + }, + required: ['tenantId'], + }, + body: { + type: 'object', + properties: { + description: { type: 'string', minLength: 3, maxLength: 2000 }, + claims: { + type: 'object', + properties: { + role: { type: 'string' }, + sub: { type: 'string' }, + }, + required: ['role'], + additionalProperties: true, + }, + }, + required: ['description'], + }, +} as const + +const deleteCredentialsSchema = { + description: 'Delete S3 Credentials', + params: { + type: 'object', + properties: { + tenantId: { type: 'string' }, + }, + required: ['tenantId'], + }, + body: { + type: 'object', + properties: { + id: { type: 'string' }, + }, + required: ['id'], + }, +} as const + +const listCredentialsSchema = { + description: 'List S3 Credentials', + params: { + type: 'object', + properties: { + tenantId: { type: 'string' }, + }, + required: ['tenantId'], + }, +} as const + +interface CreateCredentialsRequest extends RequestGenericInterface { + Body: FromSchema + Params: { + tenantId: string + } +} + +interface DeleteCredentialsRequest extends RequestGenericInterface { + Body: FromSchema + Params: { + tenantId: string + } +} + +interface ListCredentialsRequest extends RequestGenericInterface { + Params: { + tenantId: string + } +} + +export default async function routes(fastify: FastifyInstance) { + fastify.register(apiKey) + + fastify.post( + '/:tenantId/credentials', + { + schema: createCredentialsSchema, + }, + async (req, reply) => { + const credentials = await createS3Credentials(req.params.tenantId, { + description: req.body.description, + claims: req.body.claims, + }) + + reply.status(201).send({ + id: credentials.id, + access_key: credentials.access_key, + secret_key: credentials.secret_key, + description: req.body.description, + }) + } + ) + + fastify.get( + '/:tenantId/credentials', + { schema: listCredentialsSchema }, + async (req, reply) => { + const credentials = await listS3Credentials(req.params.tenantId) + + return reply.send(credentials) + } + ) + + fastify.delete( + '/:tenantId/credentials', + { schema: deleteCredentialsSchema }, + async (req, reply) => { + await deleteS3Credential(req.params.tenantId, req.body.id) + + return reply.code(204).send() + } + ) +} diff --git a/src/http/routes/index.ts b/src/http/routes/index.ts index e5b21b75..3d0e14eb 100644 --- a/src/http/routes/index.ts +++ b/src/http/routes/index.ts @@ -3,4 +3,5 @@ export { default as object } from './object' export { default as render } from './render' export { default as multiPart } from './tus' export { default as healthcheck } from './health' +export { default as s3 } from './s3' export * from './admin' diff --git a/src/http/routes/object/copyObject.ts b/src/http/routes/object/copyObject.ts index 60eed6bb..b8c3533a 100644 --- a/src/http/routes/object/copyObject.ts +++ b/src/http/routes/object/copyObject.ts @@ -6,8 +6,9 @@ import { AuthenticatedRequest } from '../../request' const copyRequestBodySchema = { type: 'object', properties: { - sourceKey: { type: 'string', examples: ['folder/source.png'] }, bucketId: { type: 'string', examples: ['avatars'] }, + sourceKey: { type: 'string', examples: ['folder/source.png'] }, + destinationBucket: { type: 'string', examples: ['users'] }, destinationKey: { type: 'string', examples: ['folder/destination.png'] }, }, required: ['sourceKey', 'bucketId', 'destinationKey'], @@ -38,7 +39,7 @@ export default async function routes(fastify: FastifyInstance) { schema, }, async (request, response) => { - const { sourceKey, destinationKey, bucketId } = request.body + const { sourceKey, destinationKey, bucketId, destinationBucket } = request.body request.log.info( 'sourceKey is %s and bucketName is %s and destinationKey is %s', sourceKey, @@ -46,12 +47,15 @@ export default async function routes(fastify: FastifyInstance) { destinationKey ) + const destinationBucketId = destinationBucket || bucketId + const result = await request.storage .from(bucketId) - .copyObject(sourceKey, destinationKey, request.owner) + .copyObject(sourceKey, destinationBucketId, destinationKey, request.owner) return response.status(result.httpStatusCode ?? 200).send({ - Key: `${bucketId}/${destinationKey}`, + Id: result.destObject.id, + Key: `${destinationBucketId}/${destinationKey}`, }) } ) diff --git a/src/http/routes/object/deleteObject.ts b/src/http/routes/object/deleteObject.ts index a4c09a15..b17d1472 100644 --- a/src/http/routes/object/deleteObject.ts +++ b/src/http/routes/object/deleteObject.ts @@ -1,5 +1,5 @@ import { FastifyInstance } from 'fastify' -import { FromSchema } from 'json-schema-to-ts' +import { FromSchema, JSONSchema } from 'json-schema-to-ts' import { createDefaultSchema, createResponse } from '../../generic-routes' import { AuthenticatedRequest } from '../../request' diff --git a/src/http/routes/object/getObject.ts b/src/http/routes/object/getObject.ts index 77bb3018..511153a3 100644 --- a/src/http/routes/object/getObject.ts +++ b/src/http/routes/object/getObject.ts @@ -45,7 +45,6 @@ async function requestHandler( // send the object from s3 const s3Key = `${request.tenantId}/${bucketName}/${objectName}` - request.log.info(s3Key) return request.storage.renderer('asset').render(request, response, { bucket: storageS3Bucket, diff --git a/src/http/routes/object/getSignedObject.ts b/src/http/routes/object/getSignedObject.ts index 6a2bb22f..3e22e292 100644 --- a/src/http/routes/object/getSignedObject.ts +++ b/src/http/routes/object/getSignedObject.ts @@ -2,7 +2,7 @@ import { FastifyInstance } from 'fastify' import { FromSchema } from 'json-schema-to-ts' import { getConfig } from '../../../config' import { SignedToken, verifyJWT } from '../../../auth' -import { StorageBackendError } from '../../../storage' +import { ERRORS } from '../../../storage' import { getJwtSecret } from '../../../database/tenant' const { storageS3Bucket } = getConfig() @@ -63,14 +63,14 @@ export default async function routes(fastify: FastifyInstance) { payload = (await verifyJWT(token, jwtSecret)) as SignedToken } catch (e) { const err = e as Error - throw new StorageBackendError('Invalid JWT', 400, err.message, err) + throw ERRORS.InvalidJWT(err) } const { url, exp } = payload const path = `${request.params.bucketName}/${request.params['*']}` if (url !== path) { - throw new StorageBackendError('InvalidSignature', 400, 'The url do not match the signature') + throw ERRORS.InvalidSignature() } const s3Key = `${request.tenantId}/${url}` diff --git a/src/http/routes/object/moveObject.ts b/src/http/routes/object/moveObject.ts index 0b10ae96..fc45e28b 100644 --- a/src/http/routes/object/moveObject.ts +++ b/src/http/routes/object/moveObject.ts @@ -8,6 +8,7 @@ const moveObjectsBodySchema = { properties: { bucketId: { type: 'string', examples: ['avatars'] }, sourceKey: { type: 'string', examples: ['folder/cat.png'] }, + destinationBucket: { type: 'string', examples: ['users'] }, destinationKey: { type: 'string', examples: ['folder/newcat.png'] }, }, required: ['bucketId', 'sourceKey', 'destinationKey'], @@ -38,11 +39,19 @@ export default async function routes(fastify: FastifyInstance) { schema, }, async (request, response) => { - const { destinationKey, sourceKey, bucketId } = request.body + const { destinationKey, sourceKey, bucketId, destinationBucket } = request.body - await request.storage.from(bucketId).moveObject(sourceKey, destinationKey, request.owner) + const destinationBucketId = destinationBucket || bucketId - return response.status(200).send(createResponse('Successfully moved')) + const move = await request.storage + .from(bucketId) + .moveObject(sourceKey, destinationBucketId, destinationKey, request.owner) + + return response.status(200).send({ + message: 'Successfully moved', + Id: move.destObject.id, + Key: move.destObject.name, + }) } ) } diff --git a/src/http/routes/object/uploadSignedObject.ts b/src/http/routes/object/uploadSignedObject.ts index 871a347b..ac6edbd1 100644 --- a/src/http/routes/object/uploadSignedObject.ts +++ b/src/http/routes/object/uploadSignedObject.ts @@ -1,7 +1,7 @@ import { FastifyInstance } from 'fastify' import { FromSchema } from 'json-schema-to-ts' import { SignedUploadToken, verifyJWT } from '../../../auth' -import { StorageBackendError } from '../../../storage' +import { ERRORS } from '../../../storage' import { getJwtSecret } from '../../../database/tenant' const uploadSignedObjectParamsSchema = { @@ -78,7 +78,7 @@ export default async function routes(fastify: FastifyInstance) { payload = (await verifyJWT(token, jwtSecret)) as SignedUploadToken } catch (e) { const err = e as Error - throw new StorageBackendError('Invalid JWT', 400, err.message, err) + throw ERRORS.InvalidJWT(err) } const { url, exp, owner } = payload @@ -86,11 +86,11 @@ export default async function routes(fastify: FastifyInstance) { const objectName = request.params['*'] if (url !== `${bucketName}/${objectName}`) { - throw new StorageBackendError('InvalidSignature', 400, 'The url do not match the signature') + throw ERRORS.InvalidSignature() } if (exp * 1000 < Date.now()) { - throw new StorageBackendError('ExpiredSignature', 400, 'The signature has expired') + throw ERRORS.ExpiredSignature() } const { objectMetadata, path } = await request.storage diff --git a/src/http/routes/render/renderAuthenticatedImage.ts b/src/http/routes/render/renderAuthenticatedImage.ts index f4e9ee4d..5ac828bb 100644 --- a/src/http/routes/render/renderAuthenticatedImage.ts +++ b/src/http/routes/render/renderAuthenticatedImage.ts @@ -38,7 +38,7 @@ export default async function routes(fastify: FastifyInstance) { querystring: renderImageQuerySchema, summary, response: { '4xx': { $ref: 'errorSchema#', description: 'Error response' } }, - tags: ['object'], + tags: ['transformation'], }, }, async (request, response) => { diff --git a/src/http/routes/render/renderPublicImage.ts b/src/http/routes/render/renderPublicImage.ts index 6ecd8416..26706c5c 100644 --- a/src/http/routes/render/renderPublicImage.ts +++ b/src/http/routes/render/renderPublicImage.ts @@ -38,7 +38,7 @@ export default async function routes(fastify: FastifyInstance) { querystring: renderImageQuerySchema, summary, response: { '4xx': { $ref: 'errorSchema#', description: 'Error response' } }, - tags: ['object'], + tags: ['transformation'], }, }, async (request, response) => { diff --git a/src/http/routes/render/renderSignedImage.ts b/src/http/routes/render/renderSignedImage.ts index c801df1c..b18319ae 100644 --- a/src/http/routes/render/renderSignedImage.ts +++ b/src/http/routes/render/renderSignedImage.ts @@ -3,7 +3,7 @@ import { FastifyInstance } from 'fastify' import { getConfig } from '../../../config' import { ImageRenderer } from '../../../storage/renderer' import { SignedToken, verifyJWT } from '../../../auth' -import { StorageBackendError } from '../../../storage' +import { ERRORS } from '../../../storage' import { getJwtSecret } from '../../../database/tenant' const { storageS3Bucket } = getConfig() @@ -46,7 +46,7 @@ export default async function routes(fastify: FastifyInstance) { querystring: renderImageQuerySchema, summary, response: { '4xx': { $ref: 'errorSchema#', description: 'Error response' } }, - tags: ['object'], + tags: ['transformation'], }, }, async (request, response) => { @@ -60,7 +60,7 @@ export default async function routes(fastify: FastifyInstance) { payload = (await verifyJWT(token, jwtSecret)) as SignedToken } catch (e) { const err = e as Error - throw new StorageBackendError('Invalid JWT', 400, err.message, err) + throw ERRORS.InvalidJWT(err) } const { url, transformations, exp } = payload @@ -68,7 +68,7 @@ export default async function routes(fastify: FastifyInstance) { const path = `${request.params.bucketName}/${request.params['*']}` if (url !== path) { - throw new StorageBackendError('InvalidSignature', 400, 'The url do not match the signature') + throw ERRORS.InvalidSignature() } const s3Key = `${request.tenantId}/${url}` diff --git a/src/http/routes/s3/commands/abort-multipart-upload.ts b/src/http/routes/s3/commands/abort-multipart-upload.ts new file mode 100644 index 00000000..4eafbedc --- /dev/null +++ b/src/http/routes/s3/commands/abort-multipart-upload.ts @@ -0,0 +1,33 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const AbortMultiPartUploadInput = { + summary: 'Abort MultiPart Upload', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: { + type: 'object', + properties: { + uploadId: { type: 'string' }, + }, + required: ['uploadId'], + }, +} as const + +export default function AbortMultiPartUpload(s3Router: S3Router) { + s3Router.delete('/:Bucket/*?uploadId', AbortMultiPartUploadInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.abortMultipartUpload({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + UploadId: req.Querystring.uploadId, + }) + }) +} diff --git a/src/http/routes/s3/commands/complete-multipart-upload.ts b/src/http/routes/s3/commands/complete-multipart-upload.ts new file mode 100644 index 00000000..55c938f3 --- /dev/null +++ b/src/http/routes/s3/commands/complete-multipart-upload.ts @@ -0,0 +1,65 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const CompletedMultipartUpload = { + summary: 'Complete multipart upload', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: { + type: 'object', + properties: { + uploadId: { type: 'string' }, + }, + required: ['uploadId'], + }, + Headers: { + type: 'object', + properties: { + authorization: { type: 'string' }, + }, + additionalProperties: true, + required: ['authorization'], + }, + Body: { + nullable: true, + type: 'object', + properties: { + CompleteMultipartUpload: { + type: 'object', + properties: { + Parts: { + type: 'array', + items: { + type: 'object', + properties: { + PartNumber: { type: 'integer' }, + ETag: { type: 'string' }, + }, + required: ['PartNumber', 'ETag'], + }, + }, + }, + }, + }, + }, +} as const + +export default function CompleteMultipartUpload(s3Router: S3Router) { + s3Router.post('/:Bucket/*?uploadId', CompletedMultipartUpload, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + return s3Protocol.completeMultiPartUpload({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + UploadId: req.Querystring.uploadId, + MultipartUpload: { + Parts: req.Body?.CompleteMultipartUpload?.Parts || [], + }, + }) + }) +} diff --git a/src/http/routes/s3/commands/copy-object.ts b/src/http/routes/s3/commands/copy-object.ts new file mode 100644 index 00000000..c3279e04 --- /dev/null +++ b/src/http/routes/s3/commands/copy-object.ts @@ -0,0 +1,53 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const CopyObjectInput = { + summary: 'Copy Object', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Headers: { + type: 'object', + properties: { + 'x-amz-copy-source': { type: 'string' }, + 'x-amz-copy-source-if-match': { type: 'string' }, + 'x-amz-copy-source-if-modified-since': { type: 'string' }, + 'x-amz-copy-source-if-none-match': { type: 'string' }, + 'x-amz-copy-source-if-unmodified-since': { type: 'string' }, + 'content-encoding': { type: 'string' }, + 'content-type': { type: 'string' }, + 'cache-control': { type: 'string' }, + expires: { type: 'string' }, + }, + required: ['x-amz-copy-source'], + }, +} as const + +export default function CopyObject(s3Router: S3Router) { + s3Router.put('/:Bucket/*|x-amz-copy-source', CopyObjectInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.copyObject({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + CopySource: req.Headers['x-amz-copy-source'], + ContentType: req.Headers['content-type'], + CacheControl: req.Headers['cache-control'], + Expires: req.Headers.expires ? new Date(req.Headers.expires) : undefined, + ContentEncoding: req.Headers['content-encoding'], + CopySourceIfMatch: req.Headers['x-amz-copy-source-if-match'], + CopySourceIfModifiedSince: req.Headers['x-amz-copy-source-if-modified-since'] + ? new Date(req.Headers['x-amz-copy-source-if-modified-since']) + : undefined, + CopySourceIfNoneMatch: req.Headers['x-amz-copy-source-if-none-match'], + CopySourceIfUnmodifiedSince: req.Headers['x-amz-copy-source-if-unmodified-since'] + ? new Date(req.Headers['x-amz-copy-source-if-unmodified-since']) + : undefined, + }) + }) +} diff --git a/src/http/routes/s3/commands/create-bucket.ts b/src/http/routes/s3/commands/create-bucket.ts new file mode 100644 index 00000000..249b6d8e --- /dev/null +++ b/src/http/routes/s3/commands/create-bucket.ts @@ -0,0 +1,27 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const CreateBucketInput = { + summary: 'Create Bucket', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, + Headers: { + type: 'object', + properties: { + 'x-amz-acl': { type: 'string' }, + }, + }, +} as const + +export default function CreateBucket(s3Router: S3Router) { + s3Router.put('/:Bucket', CreateBucketInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.createBucket(req.Params.Bucket, req.Headers?.['x-amz-acl'] === 'public-read') + }) +} diff --git a/src/http/routes/s3/commands/create-multipart-upload.ts b/src/http/routes/s3/commands/create-multipart-upload.ts new file mode 100644 index 00000000..fe1f8fac --- /dev/null +++ b/src/http/routes/s3/commands/create-multipart-upload.ts @@ -0,0 +1,47 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const CreateMultiPartUploadInput = { + summary: 'Create multipart upload', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: { + type: 'object', + properties: { + uploads: { type: 'string' }, + }, + required: ['uploads'], + }, + Headers: { + type: 'object', + properties: { + authorization: { type: 'string' }, + 'content-type': { type: 'string' }, + 'cache-control': { type: 'string' }, + 'content-disposition': { type: 'string' }, + 'content-encoding': { type: 'string' }, + }, + required: ['authorization'], + }, +} as const + +export default function CreateMultipartUpload(s3Router: S3Router) { + s3Router.post('/:Bucket/*?uploads', CreateMultiPartUploadInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.createMultiPartUpload({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + ContentType: req.Headers?.['content-type'], + CacheControl: req.Headers?.['cache-control'], + ContentDisposition: req.Headers?.['content-disposition'], + ContentEncoding: req.Headers?.['content-encoding'], + }) + }) +} diff --git a/src/http/routes/s3/commands/delete-bucket.ts b/src/http/routes/s3/commands/delete-bucket.ts new file mode 100644 index 00000000..207f4b1d --- /dev/null +++ b/src/http/routes/s3/commands/delete-bucket.ts @@ -0,0 +1,21 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const DeleteBucketInput = { + summary: 'Delete Bucket', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, +} as const + +export default function DeleteBucket(s3Router: S3Router) { + s3Router.delete('/:Bucket', DeleteBucketInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.deleteBucket(req.Params.Bucket) + }) +} diff --git a/src/http/routes/s3/commands/delete-object.ts b/src/http/routes/s3/commands/delete-object.ts new file mode 100644 index 00000000..8843355b --- /dev/null +++ b/src/http/routes/s3/commands/delete-object.ts @@ -0,0 +1,79 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const DeleteObjectInput = { + summary: 'Delete Object', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: {}, +} as const + +const DeleteObjectsInput = { + summary: 'Delete Objects', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, + Body: { + type: 'object', + properties: { + Delete: { + type: 'object', + properties: { + Object: { + type: 'array', + items: { + type: 'object', + properties: { + Key: { type: 'string' }, + }, + required: ['Key'], + }, + }, + }, + required: ['Object'], + }, + }, + required: ['Delete'], + }, + Querystring: { + type: 'object', + properties: { + delete: { type: 'string' }, + }, + required: ['delete'], + }, +} as const + +export default function DeleteObject(s3Router: S3Router) { + // Delete multiple objects + s3Router.post('/:Bucket?delete', DeleteObjectsInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.deleteObjects({ + Bucket: req.Params.Bucket, + Delete: { + Objects: req.Body.Delete.Object, + }, + }) + }) + + // Delete single object + s3Router.delete('/:Bucket/*', DeleteObjectInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.deleteObject({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + }) + }) +} diff --git a/src/http/routes/s3/commands/get-bucket.ts b/src/http/routes/s3/commands/get-bucket.ts new file mode 100644 index 00000000..ff39f41c --- /dev/null +++ b/src/http/routes/s3/commands/get-bucket.ts @@ -0,0 +1,52 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const GetBucketLocationInput = { + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, + Querystring: { + type: 'object', + properties: { + location: { type: 'string' }, + }, + required: ['location'], + }, +} as const + +const GetBucketVersioningInput = { + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, + Querystring: { + type: 'object', + properties: { + versioning: { type: 'string' }, + }, + required: ['versioning'], + }, +} as const + +export default function GetBucket(s3Router: S3Router) { + s3Router.get('/:Bucket?location', GetBucketLocationInput, async (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + await ctx.storage.findBucket(req.Params.Bucket) + + return s3Protocol.getBucketLocation() + }) + + s3Router.get('/:Bucket?versioning', GetBucketVersioningInput, async (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + await ctx.storage.findBucket(req.Params.Bucket) + + return s3Protocol.getBucketVersioning() + }) +} diff --git a/src/http/routes/s3/commands/get-object.ts b/src/http/routes/s3/commands/get-object.ts new file mode 100644 index 00000000..bc4c0ae1 --- /dev/null +++ b/src/http/routes/s3/commands/get-object.ts @@ -0,0 +1,38 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const ListObjectsInput = { + summary: 'Get Object', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Headers: { + type: 'object', + properties: { + range: { type: 'string' }, + 'if-none-match': { type: 'string' }, + 'if-modified-since': { type: 'string' }, + }, + }, + Querystring: {}, +} as const + +export default function ListObjects(s3Router: S3Router) { + s3Router.get('/:Bucket/*', ListObjectsInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + const ifModifiedSince = req.Headers?.['if-modified-since'] + + return s3Protocol.getObject({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + Range: req.Headers?.['range'], + IfNoneMatch: req.Headers?.['if-none-match'], + IfModifiedSince: ifModifiedSince ? new Date(ifModifiedSince) : undefined, + }) + }) +} diff --git a/src/http/routes/s3/commands/head-bucket.ts b/src/http/routes/s3/commands/head-bucket.ts new file mode 100644 index 00000000..dab350cb --- /dev/null +++ b/src/http/routes/s3/commands/head-bucket.ts @@ -0,0 +1,20 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const HeadBucketInput = { + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, +} as const + +export default function HeadBucket(s3Router: S3Router) { + s3Router.head('/:Bucket', HeadBucketInput, async (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.headBucket(req.Params.Bucket) + }) +} diff --git a/src/http/routes/s3/commands/head-object.ts b/src/http/routes/s3/commands/head-object.ts new file mode 100644 index 00000000..e10b7051 --- /dev/null +++ b/src/http/routes/s3/commands/head-object.ts @@ -0,0 +1,25 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const HeadObjectInput = { + summary: 'Head Object', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, +} as const + +export default function HeadObject(s3Router: S3Router) { + s3Router.head('/:Bucket/*', HeadObjectInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.headObject({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + }) + }) +} diff --git a/src/http/routes/s3/commands/list-buckets.ts b/src/http/routes/s3/commands/list-buckets.ts new file mode 100644 index 00000000..5e4c7852 --- /dev/null +++ b/src/http/routes/s3/commands/list-buckets.ts @@ -0,0 +1,13 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const ListObjectsInput = { + summary: 'List buckets', +} as const + +export default function ListBuckets(s3Router: S3Router) { + s3Router.get('/', ListObjectsInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + return s3Protocol.listBuckets() + }) +} diff --git a/src/http/routes/s3/commands/list-multipart-uploads.ts b/src/http/routes/s3/commands/list-multipart-uploads.ts new file mode 100644 index 00000000..4b6a8a15 --- /dev/null +++ b/src/http/routes/s3/commands/list-multipart-uploads.ts @@ -0,0 +1,42 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const ListObjectsInput = { + summary: 'List Objects', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, + Querystring: { + type: 'object', + properties: { + uploads: { type: 'string' }, + delimiter: { type: 'string' }, + 'encoding-type': { type: 'string', enum: ['url'] }, + 'max-uploads': { type: 'number', minimum: 1 }, + 'key-marker': { type: 'string' }, + 'upload-id-marker': { type: 'string' }, + prefix: { type: 'string' }, + }, + required: ['uploads'], + }, +} as const + +export default function ListMultipartUploads(s3Router: S3Router) { + s3Router.get('/:Bucket?uploads', ListObjectsInput, async (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.listMultipartUploads({ + Bucket: req.Params.Bucket, + Prefix: req.Querystring?.prefix || '', + KeyMarker: req.Querystring?.['key-marker'], + UploadIdMarker: req.Querystring?.['upload-id-marker'], + EncodingType: req.Querystring?.['encoding-type'], + MaxUploads: req.Querystring?.['max-uploads'], + Delimiter: req.Querystring?.delimiter, + }) + }) +} diff --git a/src/http/routes/s3/commands/list-objects.ts b/src/http/routes/s3/commands/list-objects.ts new file mode 100644 index 00000000..6d76fec6 --- /dev/null +++ b/src/http/routes/s3/commands/list-objects.ts @@ -0,0 +1,76 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const ListObjectsV2Input = { + summary: 'List Objects V2', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, + Querystring: { + type: 'object', + properties: { + 'list-type': { type: 'string', enum: ['2'] }, + delimiter: { type: 'string' }, + 'encoding-type': { type: 'string', enum: ['url'] }, + 'max-keys': { type: 'number' }, + prefix: { type: 'string' }, + 'continuation-token': { type: 'string' }, + 'start-after': { type: 'string' }, + }, + required: ['list-type'], + }, +} as const + +const ListObjectsInput = { + summary: 'List Objects', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + }, + required: ['Bucket'], + }, + Querystring: { + type: 'object', + properties: { + delimiter: { type: 'string' }, + 'encoding-type': { type: 'string', enum: ['url'] }, + 'max-keys': { type: 'number' }, + prefix: { type: 'string' }, + marker: { type: 'string' }, + }, + }, +} as const + +export default function ListObjects(s3Router: S3Router) { + s3Router.get('/:Bucket?list-type=2', ListObjectsV2Input, async (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.listObjectsV2({ + Bucket: req.Params.Bucket, + Prefix: req.Querystring?.prefix || '', + ContinuationToken: req.Querystring?.['continuation-token'], + StartAfter: req.Querystring?.['start-after'], + EncodingType: req.Querystring?.['encoding-type'], + MaxKeys: req.Querystring?.['max-keys'], + Delimiter: req.Querystring?.delimiter, + }) + }) + + s3Router.get('/:Bucket', ListObjectsInput, async (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.listObjects({ + Bucket: req.Params.Bucket, + Prefix: req.Querystring?.prefix || '', + Marker: req.Querystring?.['marker'], + EncodingType: req.Querystring?.['encoding-type'], + MaxKeys: req.Querystring?.['max-keys'], + Delimiter: req.Querystring?.delimiter, + }) + }) +} diff --git a/src/http/routes/s3/commands/list-parts.ts b/src/http/routes/s3/commands/list-parts.ts new file mode 100644 index 00000000..eb6835b3 --- /dev/null +++ b/src/http/routes/s3/commands/list-parts.ts @@ -0,0 +1,37 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const ListPartsInput = { + summary: 'List Parts', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: { + type: 'object', + properties: { + uploadId: { type: 'string' }, + 'max-parts': { type: 'number', minimum: 1, maximum: 1000 }, + 'part-number-marker': { type: 'string' }, + }, + required: ['uploadId'], + }, +} as const + +export default function ListParts(s3Router: S3Router) { + s3Router.get('/:Bucket/*?uploadId', ListPartsInput, async (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.listParts({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + UploadId: req.Querystring.uploadId, + MaxParts: req.Querystring['max-parts'], + PartNumberMarker: req.Querystring['part-number-marker'], + }) + }) +} diff --git a/src/http/routes/s3/commands/upload-part-copy.ts b/src/http/routes/s3/commands/upload-part-copy.ts new file mode 100644 index 00000000..266d89c5 --- /dev/null +++ b/src/http/routes/s3/commands/upload-part-copy.ts @@ -0,0 +1,62 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const UploadPartCopyInput = { + summary: 'Upload Part Copy', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: { + type: 'object', + properties: { + uploadId: { type: 'string' }, + partNumber: { type: 'number', minimum: 1, maximum: 1000 }, + }, + required: ['uploadId', 'partNumber'], + }, + Headers: { + type: 'object', + properties: { + 'x-amz-copy-source': { type: 'string' }, + 'x-amz-copy-source-range': { type: 'string' }, + 'x-amz-copy-source-if-match': { type: 'string' }, + 'x-amz-copy-source-if-modified-since': { type: 'string' }, + 'x-amz-copy-source-if-none-match': { type: 'string' }, + 'x-amz-copy-source-if-unmodified-since': { type: 'string' }, + expires: { type: 'string' }, + }, + required: ['x-amz-copy-source'], + }, +} as const + +export default function UploadPartCopy(s3Router: S3Router) { + s3Router.put( + '/:Bucket/*?partNumber&uploadId|x-amz-copy-source', + UploadPartCopyInput, + (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.uploadPartCopy({ + Bucket: req.Params.Bucket, + Key: req.Params['*'], + CopySource: req.Headers['x-amz-copy-source'], + PartNumber: req.Querystring.partNumber, + UploadId: req.Querystring.uploadId, + CopySourceRange: req.Headers['x-amz-copy-source-range'], + CopySourceIfMatch: req.Headers['x-amz-copy-source-if-match'], + CopySourceIfModifiedSince: req.Headers['x-amz-copy-source-if-modified-since'] + ? new Date(req.Headers['x-amz-copy-source-if-modified-since']) + : undefined, + CopySourceIfNoneMatch: req.Headers['x-amz-copy-source-if-none-match'], + CopySourceIfUnmodifiedSince: req.Headers['x-amz-copy-source-if-unmodified-since'] + ? new Date(req.Headers['x-amz-copy-source-if-unmodified-since']) + : undefined, + }) + } + ) +} diff --git a/src/http/routes/s3/commands/upload-part.ts b/src/http/routes/s3/commands/upload-part.ts new file mode 100644 index 00000000..455e23db --- /dev/null +++ b/src/http/routes/s3/commands/upload-part.ts @@ -0,0 +1,82 @@ +import { S3ProtocolHandler } from '../../../../storage/protocols/s3/s3-handler' +import { S3Router } from '../router' + +const PutObjectInput = { + summary: 'Put Object', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: { + type: 'object', + }, + Headers: { + type: 'object', + properties: { + authorization: { type: 'string' }, + host: { type: 'string' }, + 'x-amz-content-sha256': { type: 'string' }, + 'x-amz-date': { type: 'string' }, + 'content-type': { type: 'string' }, + }, + }, +} as const + +const UploadPartInput = { + summary: 'Upload Part', + Params: { + type: 'object', + properties: { + Bucket: { type: 'string' }, + '*': { type: 'string' }, + }, + required: ['Bucket', '*'], + }, + Querystring: { + type: 'object', + properties: { + uploadId: { type: 'string' }, + partNumber: { type: 'number', minimum: 1, maximum: 5000 }, + }, + required: ['uploadId', 'partNumber'], + }, + Headers: { + type: 'object', + properties: { + host: { type: 'string' }, + 'x-amz-content-sha256': { type: 'string' }, + 'x-amz-date': { type: 'string' }, + 'content-type': { type: 'string' }, + 'content-length': { type: 'integer' }, + }, + required: ['content-length'], + }, +} as const + +export default function UploadPart(s3Router: S3Router) { + s3Router.put('/:Bucket/*?uploadId&partNumber', UploadPartInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + + return s3Protocol.uploadPart({ + Body: ctx.req.raw, + UploadId: req.Querystring?.uploadId, + Bucket: req.Params.Bucket, + Key: req.Params['*'], + PartNumber: req.Querystring?.partNumber, + ContentLength: req.Headers?.['content-length'], + }) + }) + + s3Router.put('/:Bucket/*', PutObjectInput, (req, ctx) => { + const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) + return s3Protocol.putObject({ + Body: ctx.req as any, + Bucket: req.Params.Bucket, + Key: req.Params['*'], + }) + }) +} diff --git a/src/http/routes/s3/error-handler.ts b/src/http/routes/s3/error-handler.ts new file mode 100644 index 00000000..e83752f2 --- /dev/null +++ b/src/http/routes/s3/error-handler.ts @@ -0,0 +1,91 @@ +import { FastifyError } from '@fastify/error' +import { FastifyRequest } from 'fastify/types/request' +import { FastifyReply } from 'fastify/types/reply' +import { S3ServiceException } from '@aws-sdk/client-s3' +import { ErrorCode, StorageBackendError } from '../../../storage' +import { DatabaseError } from 'pg' + +export const s3ErrorHandler = ( + error: FastifyError | Error, + request: FastifyRequest, + reply: FastifyReply +) => { + request.executionError = error + + const resource = request.url + .split('?')[0] + .replace('/s3', '') + .split('/') + .filter((e) => e) + .join('/') + + if ('validation' in error) { + return reply.status(400).send({ + Error: { + Resource: resource, + Code: ErrorCode.InvalidRequest, + Message: formatValidationError(error.validation).message, + }, + }) + } + + if (error instanceof S3ServiceException) { + return reply.status(error.$metadata.httpStatusCode || 500).send({ + Error: { + Resource: resource, + Code: error.$response?.body.Code || ErrorCode.S3Error, + Message: error.message, + }, + }) + } + + // database error + if ( + error instanceof DatabaseError && + [ + 'Max client connections reached', + 'remaining connection slots are reserved for non-replication superuser connections', + 'no more connections allowed', + 'sorry, too many clients already', + 'server login has been failing, try again later', + ].some((msg) => (error as DatabaseError).message.includes(msg)) + ) { + return reply.status(429).send({ + Error: { + Resource: resource, + Code: ErrorCode.SlowDown, + Message: 'Too many connections issued to the database', + }, + }) + } + + if (error instanceof StorageBackendError) { + return reply.status(error.httpStatusCode || 500).send({ + Error: { + Resource: resource, + Code: error.code, + Message: error.message, + }, + }) + } + + return reply.status(500).send({ + Error: { + Resource: resource, + Code: ErrorCode.InternalError, + Message: 'Internal Server Error', + }, + }) +} + +function formatValidationError(errors: any) { + let text = '' + const separator = ', ' + + for (let i = 0; i !== errors.length; ++i) { + const e = errors[i] + const instancePath = (e.instancePath || '').replace(/^\//, '') + text += instancePath.split('/').join(separator) + ' ' + e.message + separator + } + return new Error(text.slice(0, -separator.length)) +} diff --git a/src/http/routes/s3/index.ts b/src/http/routes/s3/index.ts new file mode 100644 index 00000000..aeb9b3f9 --- /dev/null +++ b/src/http/routes/s3/index.ts @@ -0,0 +1,100 @@ +import { FastifyInstance, RouteHandlerMethod } from 'fastify' +import { db, jsonToXml, signatureV4, storage } from '../../plugins' +import { getRouter, RequestInput } from './router' +import { s3ErrorHandler } from './error-handler' + +export default async function routes(fastify: FastifyInstance) { + fastify.register(async (fastify) => { + fastify.register(jsonToXml) + fastify.register(signatureV4) + fastify.register(db) + fastify.register(storage) + + const s3Router = getRouter() + const s3Routes = s3Router.routes() + + Array.from(s3Routes.keys()).forEach((routePath) => { + const routes = s3Routes.get(routePath) + if (!routes || routes?.length === 0) { + return + } + + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + const methods = new Set(routes.map((e) => e.method)) + + methods.forEach((method) => { + const routesByMethod = routes.filter((e) => e.method === method) + + const routeHandler: RouteHandlerMethod = async (req, reply) => { + for (const route of routesByMethod) { + if ( + s3Router.matchRoute(route, { + query: (req.query as Record) || {}, + headers: (req.headers as Record) || {}, + }) + ) { + if (!route.handler) { + throw new Error('no handler found') + } + + const data: RequestInput = { + Params: req.params, + Body: req.body, + Headers: req.headers, + Querystring: req.query, + } + const compiler = route.compiledSchema() + const isValid = compiler(data) + + if (!isValid) { + throw { validation: compiler.errors } + } + + const output = await route.handler(data, { + req: req, + storage: req.storage, + tenantId: req.tenantId, + owner: req.owner, + }) + + const headers = output.headers + + if (headers) { + Object.keys(headers).forEach((header) => { + reply.header(header, headers[header]) + }) + } + return reply.status(output.statusCode || 200).send(output.responseBody) + } + } + + return reply.status(404).send() + } + + fastify[method]( + routePath, + { + validatorCompiler: () => () => true, + exposeHeadRoute: false, + errorHandler: s3ErrorHandler, + }, + routeHandler + ) + + // handle optional trailing slash + if (!routePath.endsWith('*') && !routePath.endsWith('/')) { + fastify[method]( + routePath + '/', + { + validatorCompiler: () => () => true, + exposeHeadRoute: false, + errorHandler: s3ErrorHandler, + }, + routeHandler + ) + } + }) + }) + }) +} diff --git a/src/http/routes/s3/router.ts b/src/http/routes/s3/router.ts new file mode 100644 index 00000000..dd80d716 --- /dev/null +++ b/src/http/routes/s3/router.ts @@ -0,0 +1,261 @@ +import { FastifyRequest } from 'fastify' +import { FromSchema, JSONSchema } from 'json-schema-to-ts' +import type { ValidateFunction } from 'ajv' +import Ajv from 'ajv' +import { Storage } from '../../../storage' +import { default as CreateBucket } from './commands/create-bucket' +import { default as ListBucket } from './commands/list-buckets' +import { default as ListObjects } from './commands/list-objects' +import { default as GetObject } from './commands/get-object' +import { default as CompleteMultipartUpload } from './commands/complete-multipart-upload' +import { default as DeleteBucket } from './commands/delete-bucket' +import { default as CreateMultipartUpload } from './commands/create-multipart-upload' +import { default as UploadPart } from './commands/upload-part' +import { default as HeadObject } from './commands/head-object' +import { default as DeleteObject } from './commands/delete-object' +import { default as AbortMultiPartUpload } from './commands/abort-multipart-upload' +import { default as GetBucket } from './commands/get-bucket' +import { default as HeadBucket } from './commands/head-bucket' +import { default as CopyObject } from './commands/copy-object' +import { default as ListMultipartUploads } from './commands/list-multipart-uploads' +import { default as ListParts } from './commands/list-parts' +import { default as UploadPartCopy } from './commands/upload-part-copy' +import { JTDDataType } from 'ajv/dist/jtd' + +export type Context = { storage: Storage; tenantId: string; owner?: string; req: FastifyRequest } +export type S3Router = Router + +const s3Commands = [ + UploadPartCopy, + CopyObject, + DeleteBucket, + HeadObject, + CreateBucket, + CompleteMultipartUpload, + CreateMultipartUpload, + UploadPart, + AbortMultiPartUpload, + ListMultipartUploads, + DeleteObject, + GetBucket, + HeadBucket, + ListBucket, + ListParts, + GetObject, + ListObjects, +] + +export function getRouter() { + const router = new Router() + s3Commands.forEach((command) => command(router)) + return router +} + +export type HTTPMethod = 'get' | 'put' | 'post' | 'head' | 'delete' | 'patch' + +export type Schema< + Q extends JSONSchema = JSONSchema, + H extends JSONSchema = JSONSchema, + P extends JSONSchema = JSONSchema, + B extends JSONSchema = JSONSchema +> = { + summary?: string + Querystring?: Q + Headers?: H + Params?: P + Body?: B +} + +type ResponseType = { + statusCode?: number + headers?: Record + responseBody?: unknown +} + +export type RequestInput< + S extends Schema, + A extends { + [key in keyof S]: S[key] extends JSONSchema ? FromSchema : undefined + } = { + [key in keyof S]: S[key] extends JSONSchema ? FromSchema : undefined + } +> = { + Querystring: A['Querystring'] + Headers: A['Headers'] + Params: A['Params'] + Body: A['Body'] +} + +type Handler = ( + req: RequestInput, + ctx: Context +) => Promise + +type Route = { + method: HTTPMethod + path: string + querystringMatches: { key: string; value: string }[] + headersMatches: string[] + handler?: Handler + schema: S + compiledSchema: () => ValidateFunction> +} + +export class Router { + protected _routes: Map[]> = new Map[]>() + + protected ajv = new Ajv({ + coerceTypes: 'array', + useDefaults: true, + removeAdditional: true, + uriResolver: require('fast-uri'), + addUsedSchema: false, + allErrors: false, + }) + + registerRoute( + method: HTTPMethod, + url: string, + schema: R, + handler: Handler + ) { + const { query, headers } = this.parseQueryString(url) + const normalizedUrl = url.split('?')[0].split('|')[0] + + const existingPath = this._routes.get(normalizedUrl) + const schemaToCompile: { + Params?: JSONSchema + Headers?: JSONSchema + Querystring?: JSONSchema + Body?: JSONSchema + } = {} + + if (schema.Params) { + schemaToCompile.Params = schema.Params + } + if (schema.Body) { + schemaToCompile.Body + } + if (schema.Headers) { + schemaToCompile.Headers = schema.Headers + } + + if (schema.Querystring) { + schemaToCompile.Querystring = schema.Querystring + } + + this.ajv.addSchema( + { + type: 'object', + properties: schemaToCompile, + }, + method + url + ) + + const newRoute: Route = { + method: method as HTTPMethod, + path: normalizedUrl, + querystringMatches: query, + headersMatches: headers, + schema: schema, + compiledSchema: () => this.ajv.getSchema(method + url) as ValidateFunction>, + handler: handler as Handler, + } as const + + if (!existingPath) { + this._routes.set(normalizedUrl, [newRoute as unknown as Route]) + return + } + + existingPath.push(newRoute as unknown as Route) + this._routes.set(normalizedUrl, existingPath) + } + + get(url: string, schema: R, handler: Handler) { + this.registerRoute('get', url, schema, handler as any) + } + + post(url: string, schema: R, handler: Handler) { + this.registerRoute('post', url, schema, handler as any) + } + + put(url: string, schema: R, handler: Handler) { + this.registerRoute('put', url, schema, handler as any) + } + + delete(url: string, schema: R, handler: Handler) { + this.registerRoute('delete', url, schema, handler as any) + } + + head(url: string, schema: R, handler: Handler) { + this.registerRoute('head', url, schema, handler as any) + } + + parseQueryMatch(query: string) { + const [key, value] = query.split('=') + return { key, value } + } + + parseQueryString(queryString: string) { + const queries = queryString.replace(/\|.*/, '').split('?')[1]?.split('&') || [] + const headers = queryString.split('|').splice(1) + + if (queries.length === 0) { + return { query: [{ key: '*', value: '*' }], headers: headers } + } + return { query: queries.map(this.parseQueryMatch), headers: headers } + } + + routes() { + return this._routes + } + + matchRoute( + route: Route, + match: { query: Record; headers: Record } + ) { + if ((route.headersMatches?.length || 0) > 0) { + return ( + this.matchHeaders(route.headersMatches, match.headers) && + this.matchQueryString(route.querystringMatches, match.query) + ) + } + + return this.matchQueryString(route.querystringMatches, match.query) + } + + protected matchHeaders(headers: string[], received?: Record) { + if (!received) { + return headers.length === 0 + } + + return headers.every((header) => received[header] !== undefined) + } + + protected matchQueryString( + matches: { key: string; value: string }[], + received?: Record + ) { + const keys = Object.keys(received || {}) + if (keys.length === 0 || !received) { + return matches.find((m) => m.key === '*') + } + + const foundMatches = matches.every((m) => { + const key = Object.keys(received).find((k) => k === m.key) + return ( + (m.key === key && m.value !== undefined && m.value === received[m.key]) || + (m.key === key && m.value === undefined) + ) + }) + + if (foundMatches) { + return true + } + + if (!foundMatches && matches.find((m) => m.key === '*')) { + return true + } + return false + } +} diff --git a/src/http/routes/tus/index.ts b/src/http/routes/tus/index.ts index 4d3fdd3b..7e886a76 100644 --- a/src/http/routes/tus/index.ts +++ b/src/http/routes/tus/index.ts @@ -12,7 +12,7 @@ import { PgLocker, UploadId, AlsMemoryKV, -} from '../../../storage/tus' +} from '../../../storage/protocols/tus' import { namingFunction, onCreate, @@ -22,8 +22,7 @@ import { generateUrl, getFileIdFromRequest, } from './lifecycle' -import { TenantConnection } from '../../../database/connection' -import { PubSub } from '../../../database/pubsub' +import { TenantConnection, PubSub } from '../../../database' import { S3Store } from '@tus/s3-store' import { NodeHttpHandler } from '@smithy/node-http-handler' import { createAgent } from '../../../storage/backend' @@ -155,7 +154,7 @@ export default async function routes(fastify: FastifyInstance) { fastify.post( '/', - { schema: { summary: 'Handle POST request for TUS Resumable uploads', tags: ['object'] } }, + { schema: { summary: 'Handle POST request for TUS Resumable uploads', tags: ['resumable'] } }, (req, res) => { tusServer.handle(req.raw, res.raw) } @@ -163,7 +162,7 @@ export default async function routes(fastify: FastifyInstance) { fastify.post( '/*', - { schema: { summary: 'Handle POST request for TUS Resumable uploads', tags: ['object'] } }, + { schema: { summary: 'Handle POST request for TUS Resumable uploads', tags: ['resumable'] } }, (req, res) => { tusServer.handle(req.raw, res.raw) } @@ -171,28 +170,32 @@ export default async function routes(fastify: FastifyInstance) { fastify.put( '/*', - { schema: { summary: 'Handle PUT request for TUS Resumable uploads', tags: ['object'] } }, + { schema: { summary: 'Handle PUT request for TUS Resumable uploads', tags: ['resumable'] } }, (req, res) => { tusServer.handle(req.raw, res.raw) } ) fastify.patch( '/*', - { schema: { summary: 'Handle PATCH request for TUS Resumable uploads', tags: ['object'] } }, + { + schema: { summary: 'Handle PATCH request for TUS Resumable uploads', tags: ['resumable'] }, + }, (req, res) => { tusServer.handle(req.raw, res.raw) } ) fastify.head( '/*', - { schema: { summary: 'Handle HEAD request for TUS Resumable uploads', tags: ['object'] } }, + { schema: { summary: 'Handle HEAD request for TUS Resumable uploads', tags: ['resumable'] } }, (req, res) => { tusServer.handle(req.raw, res.raw) } ) fastify.delete( '/*', - { schema: { summary: 'Handle DELETE request for TUS Resumable uploads', tags: ['object'] } }, + { + schema: { summary: 'Handle DELETE request for TUS Resumable uploads', tags: ['resumable'] }, + }, (req, res) => { tusServer.handle(req.raw, res.raw) } @@ -221,7 +224,7 @@ export default async function routes(fastify: FastifyInstance) { '/', { schema: { - tags: ['object'], + tags: ['resumable'], summary: 'Handle OPTIONS request for TUS Resumable uploads', description: 'Handle OPTIONS request for TUS Resumable uploads', }, @@ -235,7 +238,7 @@ export default async function routes(fastify: FastifyInstance) { '/*', { schema: { - tags: ['object'], + tags: ['resumable'], summary: 'Handle OPTIONS request for TUS Resumable uploads', description: 'Handle OPTIONS request for TUS Resumable uploads', }, diff --git a/src/http/routes/tus/lifecycle.ts b/src/http/routes/tus/lifecycle.ts index ee72c4b3..26a89f43 100644 --- a/src/http/routes/tus/lifecycle.ts +++ b/src/http/routes/tus/lifecycle.ts @@ -2,11 +2,11 @@ import http from 'http' import { BaseLogger } from 'pino' import { Upload } from '@tus/server' import { randomUUID } from 'crypto' -import { isRenderableError, Storage, StorageBackendError } from '../../../storage' +import { ERRORS, isRenderableError, Storage } from '../../../storage' import { getConfig } from '../../../config' import { Uploader } from '../../../storage/uploader' import { TenantConnection } from '../../../database' -import { UploadId } from '../../../storage/tus' +import { UploadId } from '../../../storage/protocols/tus' const { storageS3Bucket, tusPath } = getConfig() const reExtractFileID = /([^/]+)\/?$/ @@ -102,7 +102,7 @@ export function namingFunction( } if (!metadata) { - throw new StorageBackendError('metadata_header_invalid', 400, 'metadata header invalid') + throw ERRORS.MetadataRequired() } try { diff --git a/src/server.ts b/src/server.ts index 47da27c7..18dce77f 100644 --- a/src/server.ts +++ b/src/server.ts @@ -104,6 +104,10 @@ const exposeDocs = true PubSub.close(), multitenantKnex.destroy(), ]) + + if (process.env.NODE_ENV !== 'production') { + process.exit(0) + } } catch (e) { logSchema.error(logger, 'shutdown error', { type: 'SIGTERM', diff --git a/src/storage/backend/generic.ts b/src/storage/backend/adapter.ts similarity index 64% rename from src/storage/backend/generic.ts rename to src/storage/backend/adapter.ts index b9e35710..6f8b09a7 100644 --- a/src/storage/backend/generic.ts +++ b/src/storage/backend/adapter.ts @@ -1,4 +1,4 @@ -import { Readable } from 'stream' +import stream, { Readable } from 'stream' import { getConfig } from '../../config' /** @@ -32,6 +32,16 @@ export type ObjectMetadata = { httpStatusCode: number } +export type UploadPart = { + Version?: string + ETag?: string + PartNumber?: number + ChecksumCRC32?: string + ChecksumCRC32C?: string + ChecksumSHA1?: string + ChecksumSHA256?: string +} + /** * A generic storage Adapter to interact with files */ @@ -92,14 +102,21 @@ export abstract class StorageBackendAdapter { * @param version * @param destination * @param destinationVersion + * @param conditions */ async copyObject( bucket: string, source: string, version: string | undefined, destination: string, - destinationVersion: string | undefined - ): Promise> { + destinationVersion: string | undefined, + conditions?: { + ifMatch?: string + ifNoneMatch?: string + ifModifiedSince?: Date + ifUnmodifiedSince?: Date + } + ): Promise> { throw new Error('copyObject not implemented') } @@ -135,6 +152,65 @@ export abstract class StorageBackendAdapter { async privateAssetUrl(bucket: string, key: string, version: string | undefined): Promise { throw new Error('privateAssetUrl not implemented') } + + async createMultiPartUpload( + bucketName: string, + key: string, + version: string | undefined, + contentType: string, + cacheControl: string + ): Promise { + throw new Error('not implemented') + } + + async uploadPart( + bucketName: string, + key: string, + version: string, + uploadId: string, + partNumber: number, + body?: string | Uint8Array | Buffer | Readable, + length?: number + ): Promise<{ ETag?: string }> { + throw new Error('not implemented') + } + + async completeMultipartUpload( + bucketName: string, + key: string, + uploadId: string, + version: string, + parts: UploadPart[] + ): Promise< + Omit & { + location?: string + bucket?: string + version: string + } + > { + throw new Error('not implemented') + } + + async abortMultipartUpload( + bucketName: string, + key: string, + uploadId: string, + version?: string + ): Promise { + throw new Error('not implemented') + } + + async uploadPartCopy( + storageS3Bucket: string, + key: string, + version: string, + UploadId: string, + PartNumber: number, + sourceKey: string, + sourceKeyVersion?: string + ): Promise<{ eTag?: string; lastModified?: Date }> { + throw new Error('not implemented') + } } const { tusUseFileVersionSeparator } = getConfig() diff --git a/src/storage/backend/file.ts b/src/storage/backend/file.ts index 196ceac5..102feebd 100644 --- a/src/storage/backend/file.ts +++ b/src/storage/backend/file.ts @@ -1,9 +1,10 @@ -import xattr from 'fs-xattr' +import * as xattr from 'fs-xattr' import fs from 'fs-extra' import path from 'path' import fileChecksum from 'md5-file' import { promisify } from 'util' import stream from 'stream' +import MultiStream from 'multistream' import { getConfig } from '../../config' import { StorageBackendAdapter, @@ -11,8 +12,11 @@ import { ObjectResponse, withOptionalVersion, BrowserCacheHeaders, -} from './generic' -import { StorageBackendError } from '../errors' + UploadPart, +} from './adapter' +import { ERRORS, StorageBackendError } from '../errors' +import { randomUUID } from 'crypto' +import fsExtra from 'fs-extra' const pipeline = promisify(stream.pipeline) interface FileMetadata { @@ -181,7 +185,7 @@ export class FileBackend implements StorageBackendAdapter { version: string | undefined, destination: string, destinationVersion: string - ): Promise> { + ): Promise> { const srcFile = path.resolve(this.filePath, withOptionalVersion(`${bucket}/${source}`, version)) const destFile = path.resolve( this.filePath, @@ -193,8 +197,13 @@ export class FileBackend implements StorageBackendAdapter { await this.setFileMetadata(destFile, await this.getFileMetadata(srcFile)) + const fileStat = await fs.lstat(destFile) + const checksum = await fileChecksum(destFile) + return { httpStatusCode: 200, + lastModified: fileStat.mtime, + eTag: checksum, } } @@ -250,6 +259,151 @@ export class FileBackend implements StorageBackendAdapter { } } + async createMultiPartUpload( + bucketName: string, + key: string, + version: string | undefined, + contentType: string, + cacheControl: string + ): Promise { + const uploadId = randomUUID() + const multiPartFolder = path.join( + this.filePath, + 'multiparts', + uploadId, + bucketName, + withOptionalVersion(key, version) + ) + + const multipartFile = path.join(multiPartFolder, 'metadata.json') + await fsExtra.ensureDir(multiPartFolder) + await fsExtra.writeFile(multipartFile, JSON.stringify({ contentType, cacheControl })) + + return uploadId + } + + async uploadPart( + bucketName: string, + key: string, + version: string, + uploadId: string, + partNumber: number, + body: stream.Readable + ): Promise<{ ETag?: string }> { + const multiPartFolder = path.join( + this.filePath, + 'multiparts', + uploadId, + bucketName, + withOptionalVersion(key, version) + ) + + const multipartFile = path.join(multiPartFolder, `part-${partNumber}`) + + const writeStream = fsExtra.createWriteStream(multipartFile) + await pipeline(body, writeStream) + + const etag = await fileChecksum(multipartFile) + + await this.setMetadataAttr(multipartFile, 'etag', etag) + + return { ETag: etag } + } + + async completeMultipartUpload( + bucketName: string, + key: string, + uploadId: string, + version: string, + parts: UploadPart[] + ): Promise< + Omit & { + location?: string + bucket?: string + version: string + } + > { + const multiPartFolder = path.join( + this.filePath, + 'multiparts', + uploadId, + bucketName, + withOptionalVersion(key, version) + ) + + const partsByEtags = parts.map(async (part) => { + const partFilePath = path.join(multiPartFolder, `part-${part.PartNumber}`) + const partExists = await fsExtra.pathExists(partFilePath) + + if (partExists) { + const etag = await this.getMetadataAttr(partFilePath, 'etag') + if (etag === part.ETag) { + return partFilePath + } + throw ERRORS.InvalidChecksum(`Invalid ETag for part ${part.PartNumber}`) + } + + throw ERRORS.MissingPart(part.PartNumber || 0, uploadId) + }) + + const finalParts = await Promise.all(partsByEtags) + finalParts.sort((a, b) => parseInt(a.split('-')[1]) - parseInt(b.split('-')[1])) + + const fileStreams = finalParts.map((partPath) => { + return fs.createReadStream(partPath) + }) + + const multistream = new MultiStream(fileStreams) + const metadataContent = await fsExtra.readFile( + path.join(multiPartFolder, 'metadata.json'), + 'utf-8' + ) + + const metadata = JSON.parse(metadataContent) + + const uploaded = await this.uploadObject( + bucketName, + key, + version, + multistream, + metadata.contentType, + metadata.cacheControl + ) + + fsExtra.remove(path.join(this.filePath, 'multiparts', uploadId)).catch(() => { + // no-op + }) + + return { + version: version, + ETag: uploaded.eTag, + bucket: bucketName, + location: `${bucketName}/${key}`, + } + } + + async abortMultipartUpload( + bucketName: string, + key: string, + uploadId: string, + version?: string + ): Promise { + const multiPartFolder = path.join(this.filePath, 'multiparts', uploadId) + + await fsExtra.remove(multiPartFolder) + } + + async uploadPartCopy( + storageS3Bucket: string, + key: string, + version: string, + UploadId: string, + PartNumber: number, + sourceKey: string + ): Promise<{ eTag?: string; lastModified?: Date }> { + throw new Error('Method not implemented.') + } + /** * Returns a private url that can only be accessed internally by the system * @param bucket @@ -282,7 +436,7 @@ export class FileBackend implements StorageBackendAdapter { } protected getMetadataAttr(file: string, attribute: string): Promise { - return xattr.get(file, attribute).then((value) => { + return xattr.get(file, attribute).then((value: any) => { return value?.toString() ?? undefined }) } diff --git a/src/storage/backend/index.ts b/src/storage/backend/index.ts index 071fda19..903a5cc1 100644 --- a/src/storage/backend/index.ts +++ b/src/storage/backend/index.ts @@ -1,11 +1,11 @@ -import { StorageBackendAdapter } from './generic' +import { StorageBackendAdapter } from './adapter' import { FileBackend } from './file' import { S3Backend, S3ClientOptions } from './s3' import { getConfig, StorageBackendType } from '../../config' export * from './s3' export * from './file' -export * from './generic' +export * from './adapter' const { storageS3Region, storageS3Endpoint, storageS3ForcePathStyle } = getConfig() diff --git a/src/storage/backend/s3.ts b/src/storage/backend/s3.ts index e3bcfa9c..2b554edc 100644 --- a/src/storage/backend/s3.ts +++ b/src/storage/backend/s3.ts @@ -1,13 +1,21 @@ import { + AbortMultipartUploadCommand, + CompleteMultipartUploadCommand, CompleteMultipartUploadCommandOutput, CopyObjectCommand, + CreateMultipartUploadCommand, DeleteObjectCommand, DeleteObjectsCommand, GetObjectCommand, GetObjectCommandInput, HeadObjectCommand, + ListMultipartUploadsCommand, + ListPartsCommand, + PutObjectCommand, S3Client, S3ClientConfig, + UploadPartCommand, + UploadPartCopyCommand, } from '@aws-sdk/client-s3' import { Upload } from '@aws-sdk/lib-storage' import { NodeHttpHandler } from '@smithy/node-http-handler' @@ -17,11 +25,13 @@ import { ObjectMetadata, ObjectResponse, withOptionalVersion, -} from './generic' + UploadPart, +} from './adapter' import { getSignedUrl } from '@aws-sdk/s3-request-presigner' -import { StorageBackendError } from '../errors' +import { ERRORS, StorageBackendError } from '../errors' import { getConfig } from '../../config' import Agent, { HttpsAgent } from 'agentkeepalive' +import { Readable } from 'stream' const { storageS3MaxSockets } = getConfig() @@ -187,23 +197,36 @@ export class S3Backend implements StorageBackendAdapter { * @param version * @param destination * @param destinationVersion + * @param conditions */ async copyObject( bucket: string, source: string, version: string | undefined, destination: string, - destinationVersion: string | undefined - ): Promise> { + destinationVersion: string | undefined, + conditions?: { + ifMatch?: string + ifNoneMatch?: string + ifModifiedSince?: Date + ifUnmodifiedSince?: Date + } + ): Promise> { try { const command = new CopyObjectCommand({ Bucket: bucket, CopySource: `${bucket}/${withOptionalVersion(source, version)}`, Key: withOptionalVersion(destination, destinationVersion), + CopySourceIfMatch: conditions?.ifMatch, + CopySourceIfNoneMatch: conditions?.ifNoneMatch, + CopySourceIfModifiedSince: conditions?.ifModifiedSince, + CopySourceIfUnmodifiedSince: conditions?.ifUnmodifiedSince, }) const data = await this.client.send(command) return { httpStatusCode: data.$metadata.httpStatusCode || 200, + eTag: data.CopyObjectResult?.ETag || '', + lastModified: data.CopyObjectResult?.LastModified, } } catch (e: any) { throw StorageBackendError.fromError(e) @@ -279,4 +302,136 @@ export class S3Backend implements StorageBackendAdapter { const command = new GetObjectCommand(input) return getSignedUrl(this.client, command, { expiresIn: 600 }) } + + async createMultiPartUpload( + bucketName: string, + key: string, + version: string | undefined, + contentType: string, + cacheControl: string + ) { + const createMultiPart = new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: withOptionalVersion(key, version), + CacheControl: cacheControl, + ContentType: contentType, + Metadata: { + Version: version || '', + }, + }) + + const resp = await this.client.send(createMultiPart) + + if (!resp.UploadId) { + throw ERRORS.InvalidUploadId() + } + + return resp.UploadId + } + + async uploadPart( + bucketName: string, + key: string, + version: string, + uploadId: string, + partNumber: number, + body?: string | Uint8Array | Buffer | Readable, + length?: number + ) { + const paralellUploadS3 = new UploadPartCommand({ + Bucket: bucketName, + Key: `${key}/${version}`, + UploadId: uploadId, + PartNumber: partNumber, + Body: body, + ContentLength: length, + }) + + const resp = await this.client.send(paralellUploadS3) + + return { + version, + ETag: resp.ETag, + } + } + + async completeMultipartUpload( + bucketName: string, + key: string, + uploadId: string, + version: string, + parts: UploadPart[] + ) { + const keyParts = key.split('/') + + if (parts.length === 0) { + const listPartsInput = new ListPartsCommand({ + Bucket: bucketName, + Key: key + '/' + version, + UploadId: uploadId, + }) + + const partsResponse = await this.client.send(listPartsInput) + parts = partsResponse.Parts || [] + } + + const completeUpload = new CompleteMultipartUploadCommand({ + Bucket: bucketName, + Key: key + '/' + version, + UploadId: uploadId, + MultipartUpload: + parts.length === 0 + ? undefined + : { + Parts: parts, + }, + }) + + const response = await this.client.send(completeUpload) + + const locationParts = key.split('/') + locationParts.shift() // tenant-id + const bucket = keyParts.shift() + + return { + version, + location: keyParts.join('/'), + bucket, + ...response, + } + } + + async abortMultipartUpload(bucketName: string, key: string, uploadId: string): Promise { + const abortUpload = new AbortMultipartUploadCommand({ + Bucket: bucketName, + Key: key, + UploadId: uploadId, + }) + await this.client.send(abortUpload) + } + + async uploadPartCopy( + storageS3Bucket: string, + key: string, + version: string, + UploadId: string, + PartNumber: number, + sourceKey: string, + sourceKeyVersion?: string + ) { + const uploadPartCopy = new UploadPartCopyCommand({ + Bucket: storageS3Bucket, + Key: withOptionalVersion(key, version), + UploadId, + PartNumber, + CopySource: `${storageS3Bucket}/${withOptionalVersion(sourceKey, sourceKeyVersion)}`, + }) + + const part = await this.client.send(uploadPartCopy) + + return { + eTag: part.CopyPartResult?.ETag, + lastModified: part.CopyPartResult?.LastModified, + } + } } diff --git a/src/storage/database/adapter.ts b/src/storage/database/adapter.ts index f34cb742..593cbf64 100644 --- a/src/storage/database/adapter.ts +++ b/src/storage/database/adapter.ts @@ -1,4 +1,4 @@ -import { Bucket, Obj } from '../schemas' +import { Bucket, S3MultipartUpload, Obj, S3PartUpload } from '../schemas' import { ObjectMetadata } from '../backend' import { TenantConnection } from '../../database/connection' @@ -77,6 +77,27 @@ export interface Database { deleteBucket(bucketId: string | string[]): Promise listObjects(bucketId: string, columns: string, limit: number): Promise + listObjectsV2( + bucketId: string, + options?: { + prefix?: string + delimiter?: string + nextToken?: string + maxKeys?: number + startAfter?: string + } + ): Promise + + listMultipartUploads( + bucketId: string, + options?: { + prefix?: string + deltimeter?: string + nextUploadToken?: string + nextUploadKeyToken?: string + maxKeys?: number + } + ): Promise listBuckets(columns: string): Promise mustLockObject(bucketId: string, objectName: string, version?: string): Promise @@ -93,7 +114,7 @@ export interface Database { updateObject( bucketId: string, name: string, - data: Pick + data: Pick ): Promise createObject( @@ -120,4 +141,33 @@ export interface Database { healthcheck(): Promise destroyConnection(): Promise + + createMultipartUpload( + uploadId: string, + bucketId: string, + objectName: string, + version: string, + signature: string, + owner?: string + ): Promise + + findMultipartUpload( + uploadId: string, + columns: string, + options?: { forUpdate?: boolean } + ): Promise + + updateMultipartUploadProgress( + uploadId: string, + progress: number, + signature: string + ): Promise + + deleteMultipartUpload(uploadId: string): Promise + + insertUploadPart(part: S3PartUpload): Promise + listParts( + uploadId: string, + options: { afterPart?: string; maxParts: number } + ): Promise } diff --git a/src/storage/database/knex.ts b/src/storage/database/knex.ts index 67b6ae0e..7d345498 100644 --- a/src/storage/database/knex.ts +++ b/src/storage/database/knex.ts @@ -1,5 +1,12 @@ -import { Bucket, Obj } from '../schemas' -import { RenderableError, StorageBackendError, StorageError } from '../errors' +import { Bucket, S3MultipartUpload, Obj, S3PartUpload } from '../schemas' +import { + ErrorCode, + ERRORS, + isStorageError, + RenderableError, + StorageBackendError, + StorageErrorOptions, +} from '../errors' import { ObjectMetadata } from '../backend' import { Knex } from 'knex' import { @@ -8,10 +15,9 @@ import { FindBucketFilters, FindObjectFilters, SearchObjectOption, - TransactionOptions, } from './adapter' import { DatabaseError } from 'pg' -import { TenantConnection } from '../../database/connection' +import { TenantConnection } from '../../database' import { DbQueryPerformance } from '../../monitoring/metrics' import { isUuid } from '../limits' @@ -36,45 +42,27 @@ export class StorageKnexDB implements Database { } //eslint-disable-next-line @typescript-eslint/no-explicit-any - async withTransaction Promise>( - fn: T, - transactionOptions?: TransactionOptions - ) { - let retryLeft = transactionOptions?.retry || 1 - let error: Error | undefined | unknown - - while (retryLeft > 0) { - try { - const tnx = await this.connection.transactionProvider(this.options.tnx)() - - try { - await this.connection.setScope(tnx) - - tnx.once('query-error', (error) => { - throw DBError.fromDBError(error) - }) - - const opts = { ...this.options, tnx } - const storageWithTnx = new StorageKnexDB(this.connection, opts) - - const result: Awaited> = await fn(storageWithTnx) - await tnx.commit() - return result - } catch (e) { - await tnx.rollback() - throw e - } finally { - tnx.removeAllListeners() - } - } catch (e) { - error = e - } finally { - retryLeft-- - } - } + async withTransaction Promise>(fn: T) { + const tnx = await this.connection.transactionProvider(this.options.tnx)() + + try { + await this.connection.setScope(tnx) + + tnx.once('query-error', (error, q) => { + throw DBError.fromDBError(error, q.sql) + }) - if (error) { - throw error + const opts = { ...this.options, tnx } + const storageWithTnx = new StorageKnexDB(this.connection, opts) + + const result: Awaited> = await fn(storageWithTnx) + await tnx.commit() + return result + } catch (e) { + await tnx.rollback() + throw e + } finally { + tnx.removeAllListeners() } } @@ -98,10 +86,10 @@ export class StorageKnexDB implements Database { try { await this.withTransaction(async (db) => { result = await fn(db) - throw new StorageBackendError('permission_ok', 200, 'permission pass') + throw true }) } catch (e) { - if (e instanceof StorageBackendError && e.name === 'permission_ok') { + if (e === true) { return result } throw e @@ -124,17 +112,22 @@ export class StorageKnexDB implements Database { file_size_limit: data.file_size_limit, } - const bucket = await this.runQuery('CreateBucket', async (knex) => { - return knex.from('buckets').insert(bucketData) as Promise<{ rowCount: number }> - }) - - if (bucket.rowCount === 0) { - throw new DBError('Bucket not found', 404, 'Bucket not found', undefined, { - bucketId: data.id, + try { + const bucket = await this.runQuery('CreateBucket', async (knex) => { + return knex.from('buckets').insert(bucketData) as Promise<{ rowCount: number }> }) - } - return bucketData + if (bucket.rowCount === 0) { + throw ERRORS.NoSuchBucket(data.id) + } + + return bucketData + } catch (e) { + if (isStorageError(ErrorCode.ResourceAlreadyExists, e)) { + throw ERRORS.BucketAlreadyExists(data.id, e) + } + throw e + } } async findBucketById(bucketId: string, columns = 'id', filters?: FindBucketFilters) { @@ -157,9 +150,7 @@ export class StorageKnexDB implements Database { }) if (!result && !filters?.dontErrorOnEmpty) { - throw new DBError('Bucket not found', 404, 'Bucket not found', undefined, { - bucketId, - }) + throw ERRORS.NoSuchBucket(bucketId) } return result @@ -198,6 +189,56 @@ export class StorageKnexDB implements Database { return data } + async listObjectsV2( + bucketId: string, + options?: { + prefix?: string + delimiter?: string + nextToken?: string + maxKeys?: number + startAfter?: string + } + ) { + return this.runQuery('ListObjectsV2', async (knex) => { + if (!options?.delimiter) { + const query = knex + .table('objects') + .where('bucket_id', bucketId) + .select(['id', 'name', 'metadata', 'updated_at']) + .limit(options?.maxKeys || 100) + + // knex typing is wrong, it doesn't accept a knex.raw on orderBy, even though is totally legit + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + query.orderBy(knex.raw('name COLLATE "C"')) + + if (options?.prefix) { + query.where('name', 'ilike', `${options.prefix}%`) + } + + if (options?.nextToken) { + query.andWhere(knex.raw('name COLLATE "C" > ?', [options?.nextToken])) + } + + return query + } + + const query = await knex.raw( + 'select * from storage.list_objects_with_delimiter(?,?,?,?,?,?)', + [ + bucketId, + options?.prefix, + options?.delimiter, + options?.maxKeys, + options?.startAfter || '', + options?.nextToken || '', + ] + ) + + return query.rows + }) + } + async listBuckets(columns = 'id') { const data = await this.runQuery('ListBuckets', (knex) => { return knex.from('buckets').select(columns.split(',')) @@ -206,6 +247,60 @@ export class StorageKnexDB implements Database { return data as Bucket[] } + listMultipartUploads( + bucketId: string, + options?: { + prefix?: string + deltimeter?: string + nextUploadToken?: string + nextUploadKeyToken?: string + maxKeys?: number + } + ) { + return this.runQuery('ListMultipartsUploads', async (knex) => { + if (!options?.deltimeter) { + const query = knex + .table('s3_multipart_uploads') + .select(['id', 'key', 'created_at']) + .where('bucket_id', bucketId) + .limit(options?.maxKeys || 100) + + // knex typing is wrong, it doesn't accept a knex.raw on orderBy, even though is totally legit + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + query.orderBy(knex.raw('key COLLATE "C", created_at')) + + if (options?.prefix) { + query.where('key', 'ilike', `${options.prefix}%`) + } + + if (options?.nextUploadKeyToken && !options.nextUploadToken) { + query.andWhere(knex.raw(`key COLLATE "C" > ?`, [options?.nextUploadKeyToken])) + } + + if (options?.nextUploadToken) { + query.andWhere(knex.raw('id COLLATE "C" > ?', [options?.nextUploadToken])) + } + + return query + } + + const query = await knex.raw( + 'select * from storage.list_multipart_uploads_with_delimiter(?,?,?,?,?,?)', + [ + bucketId, + options?.prefix, + options?.deltimeter, + options?.maxKeys, + options?.nextUploadKeyToken || '', + options.nextUploadToken || '', + ] + ) + + return query.rows + }) + } + async updateBucket( bucketId: string, fields: Pick @@ -219,9 +314,7 @@ export class StorageKnexDB implements Database { }) if (bucket === 0) { - throw new DBError('Bucket not found', 404, 'Bucket not found', undefined, { - bucketId, - }) + throw ERRORS.NoSuchBucket(bucketId) } return @@ -256,7 +349,7 @@ export class StorageKnexDB implements Database { async updateObject( bucketId: string, name: string, - data: Pick + data: Pick ) { const [object] = await this.runQuery('UpdateObject', (knex) => { return knex @@ -266,6 +359,7 @@ export class StorageKnexDB implements Database { .update( { name: data.name, + bucket_id: data.bucket_id, owner: isUuid(data.owner || '') ? data.owner : undefined, owner_id: data.owner, metadata: data.metadata, @@ -276,26 +370,33 @@ export class StorageKnexDB implements Database { }) if (!object) { - throw new DBError('Not Found', 404, 'object not found') + throw ERRORS.NoSuchKey(name) } return object } async createObject(data: Pick) { - const object = { - name: data.name, - owner: isUuid(data.owner || '') ? data.owner : undefined, - owner_id: data.owner, - bucket_id: data.bucket_id, - metadata: data.metadata, - version: data.version, - } - await this.runQuery('CreateObject', (knex) => { - return knex.from('objects').insert(object) - }) + try { + const object = { + name: data.name, + owner: isUuid(data.owner || '') ? data.owner : undefined, + owner_id: data.owner, + bucket_id: data.bucket_id, + metadata: data.metadata, + version: data.version, + } + await this.runQuery('CreateObject', (knex) => { + return knex.from('objects').insert(object) + }) - return object + return object + } catch (e) { + if (isStorageError(ErrorCode.ResourceAlreadyExists, e)) { + throw ERRORS.KeyAlreadyExists(data.name, e) + } + throw e + } } async deleteObject(bucketId: string, objectName: string, version?: string) { @@ -355,9 +456,7 @@ export class StorageKnexDB implements Database { }) if (!object) { - throw new DBError('Object not found', 404, 'not_found', undefined, { - bucketId, - }) + throw ERRORS.NoSuchKey(objectName) } return object @@ -395,13 +494,11 @@ export class StorageKnexDB implements Database { }) if (!object && !filters?.dontErrorOnEmpty) { - throw new DBError('Object not found', 404, 'not_found', undefined, { - bucketId, - }) + throw ERRORS.NoSuchKey(objectName) } return object as typeof filters extends FindObjectFilters - ? (typeof filters)['dontErrorOnEmpty'] extends true + ? FindObjectFilters['dontErrorOnEmpty'] extends true ? Obj | undefined : Obj : Obj @@ -426,7 +523,7 @@ export class StorageKnexDB implements Database { const lockAcquired = result.rows.shift()?.pg_try_advisory_xact_lock || false if (!lockAcquired) { - throw new DBError('resource_locked', 409, 'Resource is locked') + throw ERRORS.ResourceLocked() } return true @@ -458,6 +555,96 @@ export class StorageKnexDB implements Database { }) } + async createMultipartUpload( + uploadId: string, + bucketId: string, + objectName: string, + version: string, + signature: string, + owner?: string + ) { + return this.runQuery('CreateMultipartUpload', async (knex) => { + const multipart = await knex + .table('s3_multipart_uploads') + .insert({ + id: uploadId, + bucket_id: bucketId, + key: objectName, + version, + upload_signature: signature, + owner_id: owner, + }) + .returning('*') + + return multipart[0] as S3MultipartUpload + }) + } + + async findMultipartUpload(uploadId: string, columns = 'id', options?: { forUpdate?: boolean }) { + const multiPart = await this.runQuery('FindMultipartUpload', async (knex) => { + const query = knex + .from('s3_multipart_uploads') + .select(columns.split(',')) + .where('id', uploadId) + + if (options?.forUpdate) { + return query.forUpdate().first() + } + return query.first() + }) + + if (!multiPart) { + throw ERRORS.NoSuchUpload(uploadId) + } + return multiPart + } + + async updateMultipartUploadProgress(uploadId: string, progress: number, signature: string) { + return this.runQuery('UpdateMultipartUploadProgress', async (knex) => { + await knex + .from('s3_multipart_uploads') + .update({ in_progress_size: progress, upload_signature: signature }) + .where('id', uploadId) + }) + } + + async deleteMultipartUpload(uploadId: string) { + return this.runQuery('DeleteMultipartUpload', async (knex) => { + await knex.from('s3_multipart_uploads').delete().where('id', uploadId) + }) + } + + async insertUploadPart(part: S3PartUpload) { + return this.runQuery('InsertUploadPart', async (knex) => { + const storedPart = await knex + .table('s3_multipart_uploads_parts') + .insert(part) + .returning('*') + + return storedPart[0] + }) + } + + async listParts( + uploadId: string, + options: { afterPart?: string; maxParts: number } + ): Promise { + return this.runQuery('ListParts', async (knex) => { + const query = knex + .from('s3_multipart_uploads_parts') + .select('etag', 'part_number', 'size', 'upload_id', 'created_at') + .where('upload_id', uploadId) + .orderBy('part_number') + .limit(options.maxParts) + + if (options.afterPart) { + query.andWhere('part_number', '>', options.afterPart) + } + + return query + }) + } + healthcheck() { return this.runQuery('Healthcheck', (knex) => { return knex.raw('SELECT id from storage.buckets limit 1') @@ -525,64 +712,45 @@ export class StorageKnexDB implements Database { } } -export class DBError extends Error implements RenderableError { - constructor( - message: string, - public readonly statusCode: number, - public readonly error: string, - public readonly originalError?: Error, - public readonly metadata?: Record, - public readonly details?: string, - public readonly query?: string - ) { - super(message) - this.message = message +export class DBError extends StorageBackendError implements RenderableError { + constructor(options: StorageErrorOptions) { + super(options) Object.setPrototypeOf(this, DBError.prototype) } static fromDBError(pgError: DatabaseError, query?: string) { - let message = 'Internal Server Error' - let statusCode = 500 - let error = 'internal' - switch (pgError.code) { case '42501': - message = 'new row violates row-level security policy' - statusCode = 403 - error = 'Unauthorized' - break + return ERRORS.AccessDenied( + 'new row violates row-level security policy', + pgError + ).withMetadata({ + query, + code: pgError.code, + }) case '23505': - message = 'The resource already exists' - statusCode = 409 - error = 'Duplicate' - break + return ERRORS.ResourceAlreadyExists(pgError).withMetadata({ + query, + code: pgError.code, + }) case '23503': - message = 'The parent resource is not found' - statusCode = 404 - error = 'Not Found' - break + return ERRORS.RelatedResourceNotFound(pgError).withMetadata({ + query, + code: pgError.code, + }) case '55P03': case 'resource_locked': - message = 'Resource Locked, an upload might be in progress for this resource' - statusCode = 400 - error = 'resource_locked' - break - } - - return new DBError(message, statusCode, error, pgError, undefined, pgError.message, query) - } - - render(): StorageError { - return { - message: this.message, - statusCode: `${this.statusCode}`, - error: this.error, + return ERRORS.ResourceLocked(pgError).withMetadata({ + query, + code: pgError.code, + }) + default: + return ERRORS.DatabaseError(pgError.message, pgError).withMetadata({ + query, + code: pgError.code, + }) } } - - getOriginalError() { - return this.originalError - } } export default function hashStringToInt(str: string): number { diff --git a/src/storage/errors.ts b/src/storage/errors.ts index 90cd5aa1..68bfef50 100644 --- a/src/storage/errors.ts +++ b/src/storage/errors.ts @@ -2,16 +2,369 @@ import { S3ServiceException } from '@aws-sdk/client-s3' export type StorageError = { statusCode: string + code: ErrorCode error: string message: string query?: string } +export enum ErrorCode { + NoSuchBucket = 'NoSuchBucket', + NoSuchKey = 'NoSuchKey', + NoSuchUpload = 'NoSuchUpload', + InvalidJWT = 'InvalidJWT', + InvalidRequest = 'InvalidRequest', + TenantNotFound = 'TenantNotFound', + EntityTooLarge = 'EntityTooLarge', + InternalError = 'InternalError', + ResourceAlreadyExists = 'ResourceAlreadyExists', + InvalidBucketName = 'InvalidBucketName', + InvalidKey = 'InvalidKey', + InvalidRange = 'InvalidRange', + InvalidMimeType = 'InvalidMimeType', + InvalidUploadId = 'InvalidUploadId', + KeyAlreadyExists = 'KeyAlreadyExists', + BucketAlreadyExists = 'BucketAlreadyExists', + DatabaseTimeout = 'DatabaseTimeout', + InvalidSignature = 'InvalidSignature', + SignatureDoesNotMatch = 'SignatureDoesNotMatch', + AccessDenied = 'AccessDenied', + ResourceLocked = 'ResourceLocked', + DatabaseError = 'DatabaseError', + MissingContentLength = 'MissingContentLength', + MissingParameter = 'MissingParameter', + InvalidUploadSignature = 'InvalidUploadSignature', + LockTimeout = 'LockTimeout', + S3Error = 'S3Error', + S3InvalidAccessKeyId = 'InvalidAccessKeyId', + S3MaximumCredentialsLimit = 'MaximumCredentialsLimit', + InvalidChecksum = 'InvalidChecksum', + MissingPart = 'MissingPart', + SlowDown = 'SlowDown', +} + +export const ERRORS = { + BucketNotEmpty: (bucket: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + resource: bucket, + httpStatusCode: 409, + message: `The bucket you tried to delete is not empty`, + originalError: e, + }), + NoSuchBucket: (bucket: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.NoSuchBucket, + resource: bucket, + error: 'Bucket not found', + httpStatusCode: 404, + message: `Bucket not found`, + originalError: e, + }), + NoSuchUpload: (uploadId: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.NoSuchUpload, + resource: uploadId, + httpStatusCode: 404, + message: `Upload not found`, + originalError: e, + }), + NoSuchKey: (resource: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.NoSuchKey, + resource, + error: 'not_found', + httpStatusCode: 404, + message: `Object not found`, + originalError: e, + }), + + MissingParameter: (parameter: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.MissingParameter, + httpStatusCode: 400, + message: `Missing Required Parameter ${parameter}`, + originalError: e, + }), + + InvalidJWT: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidJWT, + httpStatusCode: 400, + message: e?.message || 'Invalid JWT', + }), + + MissingContentLength: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.MissingContentLength, + httpStatusCode: 400, + message: e?.message || 'You must provide the Content-Length HTTP header.', + }), + + AccessDenied: (action: string, e?: Error) => + new StorageBackendError({ + error: 'Unauthorized', + code: ErrorCode.AccessDenied, + httpStatusCode: 403, + message: action || 'Access denied', + originalError: e, + }), + + ResourceAlreadyExists: (e?: Error) => + new StorageBackendError({ + error: 'Duplicate', + code: ErrorCode.ResourceAlreadyExists, + httpStatusCode: 409, + message: 'The resource already exists', + originalError: e, + }), + + MetadataRequired: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + httpStatusCode: 400, + message: 'Metadata header is required', + originalError: e, + }), + + SignatureDoesNotMatch: (message?: string) => + new StorageBackendError({ + code: ErrorCode.SignatureDoesNotMatch, + httpStatusCode: 403, + message: message || 'Signature does not match', + }), + + InvalidSignature: (message?: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidSignature, + httpStatusCode: 400, + message: message || 'Invalid signature', + originalError: e, + }), + + ExpiredSignature: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidSignature, + httpStatusCode: 400, + message: 'Expired signature', + originalError: e, + }), + + InvalidXForwardedHeader: (message?: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + httpStatusCode: 400, + message: message || 'Invalid X-Forwarded-Host header', + originalError: e, + }), + + InvalidTenantId: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.TenantNotFound, + httpStatusCode: 400, + message: e?.message || 'Invalid tenant id', + originalError: e, + }), + + InvalidUploadId: (message?: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidUploadId, + httpStatusCode: 400, + message: message || 'Invalid upload id', + originalError: e, + }), + + MissingTenantConfig: (tenantId: string) => + new StorageBackendError({ + code: ErrorCode.TenantNotFound, + httpStatusCode: 400, + message: `Missing tenant config for tenant ${tenantId}`, + }), + + InvalidMimeType: (mimeType: string) => + new StorageBackendError({ + error: 'invalid_mime_type', + code: ErrorCode.InvalidMimeType, + httpStatusCode: 415, + message: `mime type ${mimeType} is not supported`, + }), + + InvalidRange: () => + new StorageBackendError({ + error: 'invalid_range', + code: ErrorCode.InvalidRange, + httpStatusCode: 400, + message: `invalid range provided`, + }), + + EntityTooLarge: (e?: Error) => + new StorageBackendError({ + error: 'Payload too large', + code: ErrorCode.EntityTooLarge, + httpStatusCode: 413, + message: 'The object exceeded the maximum allowed size', + originalError: e, + }), + + InternalError: (e?: Error, message?: string) => + new StorageBackendError({ + code: ErrorCode.InternalError, + httpStatusCode: 500, + message: message || 'Internal server error', + originalError: e, + }), + + ImageProcessingError: (statusCode: number, message: string, e?: Error) => + new StorageBackendError({ + code: statusCode > 499 ? ErrorCode.InternalError : ErrorCode.InvalidRequest, + httpStatusCode: statusCode, + message: message, + originalError: e, + }), + + InvalidBucketName: (bucket: string, e?: Error) => + new StorageBackendError({ + error: 'Invalid Input', + code: ErrorCode.InvalidBucketName, + resource: bucket, + httpStatusCode: 400, + message: `Bucket name invalid`, + originalError: e, + }), + + InvalidFileSizeLimit: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + httpStatusCode: 400, + message: e?.message || 'Invalid file size format, hint: use 20GB / 20MB / 30KB / 3B', + originalError: e, + }), + + InvalidUploadSignature: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidUploadSignature, + httpStatusCode: 400, + message: e?.message || 'Invalid upload Signature', + originalError: e, + }), + + InvalidKey: (key: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidKey, + resource: key, + httpStatusCode: 400, + message: `Invalid key: ${key}`, + originalError: e, + }), + + KeyAlreadyExists: (key: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.KeyAlreadyExists, + resource: key, + error: 'Duplicate', + httpStatusCode: 409, + message: `The resource already exists`, + originalError: e, + }), + + BucketAlreadyExists: (bucket: string, e?: Error) => + new StorageBackendError({ + code: ErrorCode.BucketAlreadyExists, + resource: bucket, + error: 'Duplicate', + httpStatusCode: 409, + message: `The resource already exists`, + originalError: e, + }), + + NoContentProvided: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + httpStatusCode: 400, + message: e?.message || 'No content provided', + originalError: e, + }), + + DatabaseTimeout: (e?: Error) => + StorageBackendError.withStatusCode(544, { + code: ErrorCode.DatabaseTimeout, + httpStatusCode: 544, + message: 'The connection to the database timed out', + originalError: e, + }), + + ResourceLocked: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.ResourceLocked, + httpStatusCode: 423, + message: `The resource is locked`, + originalError: e, + }), + + RelatedResourceNotFound: (e?: Error) => + new StorageBackendError({ + code: ErrorCode.InvalidRequest, + httpStatusCode: 404, + message: `The related resource does not exist`, + originalError: e, + }), + + DatabaseError: (message: string, err?: Error) => + new StorageBackendError({ + code: ErrorCode.DatabaseError, + httpStatusCode: 500, + message: message, + originalError: err, + }), + + LockTimeout: (err?: Error) => + new StorageBackendError({ + error: 'acquiring_lock_timeout', + code: ErrorCode.LockTimeout, + httpStatusCode: 503, + message: 'acquiring lock timeout', + originalError: err, + }), + + MissingS3Credentials: () => + new StorageBackendError({ + code: ErrorCode.S3InvalidAccessKeyId, + httpStatusCode: 403, + message: 'The Access Key Id you provided does not exist in our records.', + }), + + MaximumCredentialsLimit: () => + new StorageBackendError({ + code: ErrorCode.S3MaximumCredentialsLimit, + httpStatusCode: 400, + message: 'You have reached the maximum number of credentials allowed', + }), + + InvalidChecksum: (message: string) => + new StorageBackendError({ + code: ErrorCode.InvalidChecksum, + httpStatusCode: 400, + message: message, + }), + + MissingPart: (partNumber: number, uploadId: string) => + new StorageBackendError({ + code: ErrorCode.MissingPart, + httpStatusCode: 400, + message: `Part ${partNumber} is missing for upload id ${uploadId}`, + }), +} + +export function isStorageError(errorType: ErrorCode, error: any): error is StorageBackendError { + return error instanceof StorageBackendError && error.code === errorType +} + /** * A renderable error is a handled error * that we want to display to our users */ export interface RenderableError { + error?: string userStatusCode?: number render(): StorageError getOriginalError(): unknown @@ -33,6 +386,15 @@ export function isS3Error(error: unknown): error is S3ServiceException { return !!error && typeof error === 'object' && '$metadata' in error } +export interface StorageErrorOptions { + code: ErrorCode + httpStatusCode: number + message: string + resource?: string + originalError?: unknown + error?: string +} + /** * A generic error that should be always thrown for generic exceptions */ @@ -40,54 +402,71 @@ export class StorageBackendError extends Error implements RenderableError { httpStatusCode: number originalError: unknown userStatusCode: number + resource?: string + code: ErrorCode + metadata?: Record = {} + error?: string // backwards compatible error - constructor(name: string, httpStatusCode: number, message: string, originalError?: unknown) { - super(message) - this.name = name - this.httpStatusCode = httpStatusCode - this.userStatusCode = httpStatusCode === 500 ? 500 : 400 - this.message = message - this.originalError = originalError + constructor(options: StorageErrorOptions) { + super(options.message) + this.code = options.code + this.httpStatusCode = options.httpStatusCode + this.userStatusCode = options.httpStatusCode === 500 ? 500 : 400 + this.message = options.message + this.originalError = options.originalError + this.resource = options.resource + this.error = options.error Object.setPrototypeOf(this, StorageBackendError.prototype) } - static withStatusCode( - name: string, - statusCode: number, - message: string, - originalError?: unknown - ) { - const error = new StorageBackendError(name, statusCode, message, originalError) + static withStatusCode(statusCode: number, options: StorageErrorOptions) { + const error = new StorageBackendError(options) error.userStatusCode = statusCode return error } static fromError(error?: unknown) { - let name: string + let oldErrorMessage: string let httpStatusCode: number let message: string + let code: ErrorCode if (isS3Error(error)) { - name = error.message + code = ErrorCode.S3Error + oldErrorMessage = error.message httpStatusCode = error.$metadata.httpStatusCode ?? 500 message = error.name } else if (error instanceof Error) { - name = error.name + code = ErrorCode.InternalError + oldErrorMessage = error.name httpStatusCode = 500 message = error.message } else { - name = 'Internal server error' + code = ErrorCode.InternalError + oldErrorMessage = 'Internal server error' httpStatusCode = 500 message = 'Internal server error' } - return new StorageBackendError(name, httpStatusCode, message, error) + return new StorageBackendError({ + error: oldErrorMessage, + code: code, + httpStatusCode, + message, + originalError: error, + }) + } + + withMetadata(metadata: Record) { + this.metadata = metadata + return this } render() { return { statusCode: this.httpStatusCode.toString(), - error: this.name, + code: this.code, + error: this.code, message: this.message, } } diff --git a/src/storage/limits.ts b/src/storage/limits.ts index 3b9f28e5..692e61f1 100644 --- a/src/storage/limits.ts +++ b/src/storage/limits.ts @@ -1,18 +1,27 @@ import { getConfig } from '../config' import { getFileSizeLimit as getFileSizeLimitForTenant, getFeatures } from '../database/tenant' -import { StorageBackendError } from './errors' +import { ERRORS } from './errors' const { isMultitenant, imageTransformationEnabled } = getConfig() /** * Get the maximum file size for a specific project * @param tenantId + * @param maxUpperLimit */ -export async function getFileSizeLimit(tenantId: string): Promise { +export async function getFileSizeLimit( + tenantId: string, + maxUpperLimit?: number | null +): Promise { let { uploadFileSizeLimit } = getConfig() if (isMultitenant) { uploadFileSizeLimit = await getFileSizeLimitForTenant(tenantId) } + + if (maxUpperLimit) { + return Math.min(uploadFileSizeLimit, maxUpperLimit) + } + return uploadFileSizeLimit } @@ -57,11 +66,10 @@ export function isValidBucketName(bucketName: string): boolean { * Validates if a given object key is valid * throws if invalid * @param key - * @param message */ -export function mustBeValidKey(key: string, message: string) { - if (!isValidKey(key)) { - throw new StorageBackendError('Invalid Input', 400, message) +export function mustBeValidKey(key?: string): asserts key is string { + if (!key || !isValidKey(key)) { + throw ERRORS.InvalidKey(key || '') } } @@ -69,11 +77,10 @@ export function mustBeValidKey(key: string, message: string) { * Validates if a given bucket name is valid * throws if invalid * @param key - * @param message */ -export function mustBeValidBucketName(key: string, message: string) { - if (!isValidBucketName(key)) { - throw new StorageBackendError('Invalid Input', 400, message) +export function mustBeValidBucketName(key?: string): asserts key is string { + if (!key || !isValidBucketName(key)) { + throw ERRORS.InvalidBucketName(key || '') } } @@ -81,11 +88,7 @@ export function parseFileSizeToBytes(valueWithUnit: string) { const valuesRegex = /(^[0-9]+(?:\.[0-9]+)?)(gb|mb|kb|b)$/i if (!valuesRegex.test(valueWithUnit)) { - throw new StorageBackendError( - 'file_size_limit', - 422, - 'the requested file_size_limit uses an invalid format, use 20GB / 20MB / 30KB / 3B' - ) + throw ERRORS.InvalidFileSizeLimit() } // eslint-disable-next-line @typescript-eslint/no-non-null-assertion @@ -102,11 +105,7 @@ export function parseFileSizeToBytes(valueWithUnit: string) { case 'B': return value default: - throw new StorageBackendError( - 'file_size_limit', - 422, - 'the requested file_size_limit unit is not supported, use GB/MB/KB/B' - ) + throw ERRORS.InvalidFileSizeLimit() } } diff --git a/src/storage/object.ts b/src/storage/object.ts index 5a6c0f23..55dc1f56 100644 --- a/src/storage/object.ts +++ b/src/storage/object.ts @@ -14,7 +14,7 @@ import { ObjectUpdatedMetadata, } from '../queue' import { randomUUID } from 'crypto' -import { StorageBackendError } from './errors' +import { ERRORS } from './errors' import { getJwtSecret } from '../database/tenant' export interface UploadObjectOptions { @@ -55,7 +55,7 @@ export class ObjectStorage { * @param options */ async uploadNewObject(request: FastifyRequest, options: UploadObjectOptions) { - mustBeValidKey(options.objectName, 'The object name contains invalid characters') + mustBeValidKey(options.objectName) const path = `${this.bucketId}/${options.objectName}` @@ -74,7 +74,7 @@ export class ObjectStorage { } public async uploadOverridingObject(request: FastifyRequest, options: UploadObjectOptions) { - mustBeValidKey(options.objectName, 'The object name contains invalid characters') + mustBeValidKey(options.objectName) const path = `${this.bucketId}/${options.objectName}` @@ -115,16 +115,14 @@ export class ObjectStorage { const deleted = await db.deleteObject(this.bucketId, objectName) if (!deleted) { - throw new StorageBackendError('not_found', 404, 'Object Not Found') + throw ERRORS.NoSuchKey(objectName) } - await ObjectAdminDelete.send({ - tenant: this.db.tenant(), - name: objectName, - bucketId: this.bucketId, - version: obj.version, - reqId: this.db.reqId, - }) + await this.backend.deleteObject( + storageS3Bucket, + `${this.db.tenantId}/${this.bucketId}/${objectName}`, + obj.version + ) }) await ObjectRemoved.sendWebhook({ @@ -197,7 +195,7 @@ export class ObjectStorage { * @param metadata */ async updateObjectMetadata(objectName: string, metadata: ObjectMetadata) { - mustBeValidKey(objectName, 'The object name contains invalid characters') + mustBeValidKey(objectName) const result = await this.db.updateObjectMetadata(this.bucketId, objectName, metadata) @@ -228,7 +226,7 @@ export class ObjectStorage { * @param filters */ async findObject(objectName: string, columns = 'id', filters?: FindObjectFilters) { - mustBeValidKey(objectName, 'The object name contains invalid characters') + mustBeValidKey(objectName) return this.db.findObject(this.bucketId, objectName, columns, filters) } @@ -245,23 +243,29 @@ export class ObjectStorage { /** * Copies an existing remote object to a given location * @param sourceKey + * @param destinationBucket * @param destinationKey * @param owner + * @param conditions */ - async copyObject(sourceKey: string, destinationKey: string, owner?: string) { - mustBeValidKey(destinationKey, 'The destination object name contains invalid characters') - - if (sourceKey === destinationKey) { - return { - destObject: undefined, - httpStatusCode: 200, - } + async copyObject( + sourceKey: string, + destinationBucket: string, + destinationKey: string, + owner?: string, + conditions?: { + ifMatch?: string + ifNoneMatch?: string + ifModifiedSince?: Date + ifUnmodifiedSince?: Date } + ) { + mustBeValidKey(destinationKey) const newVersion = randomUUID() const bucketId = this.bucketId const s3SourceKey = `${this.db.tenantId}/${bucketId}/${sourceKey}` - const s3DestinationKey = `${this.db.tenantId}/${bucketId}/${destinationKey}` + const s3DestinationKey = `${this.db.tenantId}/${destinationBucket}/${destinationKey}` try { // We check if the user has permission to copy the object to the destination key @@ -271,8 +275,19 @@ export class ObjectStorage { 'bucket_id,metadata,version' ) + if (sourceKey === destinationKey) { + return { + destObject: originObject, + httpStatusCode: 200, + eTag: originObject.metadata?.eTag, + lastModified: originObject.metadata?.lastModified + ? new Date(originObject.metadata.lastModified as string) + : undefined, + } + } + await this.uploader.canUpload({ - bucketId: this.bucketId, + bucketId: destinationBucket, objectName: destinationKey, owner, isUpsert: false, @@ -283,13 +298,15 @@ export class ObjectStorage { s3SourceKey, originObject.version, s3DestinationKey, - newVersion + newVersion, + conditions ) const metadata = await this.backend.headObject(storageS3Bucket, s3DestinationKey, newVersion) const destObject = await this.db.createObject({ ...originObject, + bucket_id: destinationBucket, name: destinationKey, owner, metadata, @@ -307,6 +324,8 @@ export class ObjectStorage { return { destObject, httpStatusCode: copyResult.httpStatusCode, + eTag: copyResult.eTag, + lastModified: copyResult.lastModified, } } catch (e) { await ObjectAdminDelete.send({ @@ -323,19 +342,21 @@ export class ObjectStorage { /** * Moves an existing remote object to a given location * @param sourceObjectName + * @param destinationBucket * @param destinationObjectName * @param owner */ - async moveObject(sourceObjectName: string, destinationObjectName: string, owner?: string) { - mustBeValidKey(destinationObjectName, 'The destination object name contains invalid characters') - - if (sourceObjectName === destinationObjectName) { - return - } + async moveObject( + sourceObjectName: string, + destinationBucket: string, + destinationObjectName: string, + owner?: string + ) { + mustBeValidKey(destinationObjectName) const newVersion = randomUUID() const s3SourceKey = `${this.db.tenantId}/${this.bucketId}/${sourceObjectName}` - const s3DestinationKey = `${this.db.tenantId}/${this.bucketId}/${destinationObjectName}` + const s3DestinationKey = `${this.db.tenantId}/${destinationBucket}/${destinationObjectName}` await this.db.testPermission((db) => { return Promise.all([ @@ -343,6 +364,7 @@ export class ObjectStorage { db.updateObject(this.bucketId, sourceObjectName, { name: destinationObjectName, version: newVersion, + bucket_id: destinationBucket, owner, }), ]) @@ -352,6 +374,12 @@ export class ObjectStorage { .asSuperUser() .findObject(this.bucketId, sourceObjectName, 'id, version') + if (sourceObjectName === destinationObjectName) { + return { + destObject: sourceObj, + } + } + try { await this.backend.copyObject( storageS3Bucket, @@ -363,13 +391,17 @@ export class ObjectStorage { const metadata = await this.backend.headObject(storageS3Bucket, s3DestinationKey, newVersion) - await this.db.asSuperUser().withTransaction(async (db) => { - await db.findObject(this.bucketId, sourceObjectName, 'id', { forUpdate: true }) + return this.db.asSuperUser().withTransaction(async (db) => { + const sourceObject = await db.findObject(this.bucketId, sourceObjectName, 'id', { + forUpdate: true, + dontErrorOnEmpty: false, + }) await db.updateObject(this.bucketId, sourceObjectName, { name: destinationObjectName, + bucket_id: destinationBucket, version: newVersion, - owner: sourceObj.owner, + owner: owner, metadata, }) @@ -401,6 +433,17 @@ export class ObjectStorage { reqId: this.db.reqId, }), ]) + + return { + destObject: { + id: sourceObject.id, + name: destinationObjectName, + bucket_id: destinationBucket, + version: newVersion, + owner: owner, + metadata, + }, + } }) } catch (e) { await ObjectAdminDelete.send({ @@ -428,6 +471,16 @@ export class ObjectStorage { return this.db.searchObjects(this.bucketId, prefix, options) } + async listObjectsV2(options?: { + prefix?: string + delimiter?: string + nextToken?: string + startAfter?: string + maxKeys?: number + }) { + return this.db.listObjectsV2(this.bucketId, options) + } + /** * Generates a signed url for accessing an object securely * @param objectName @@ -525,7 +578,7 @@ export class ObjectStorage { }) if (found) { - throw new StorageBackendError('Duplicate', 409, 'The resource already exists') + throw ERRORS.KeyAlreadyExists(objectName) } // check if user has INSERT permissions diff --git a/src/storage/protocols/s3/byte-limit-stream.ts b/src/storage/protocols/s3/byte-limit-stream.ts new file mode 100644 index 00000000..c14008c3 --- /dev/null +++ b/src/storage/protocols/s3/byte-limit-stream.ts @@ -0,0 +1,20 @@ +import { Transform, TransformCallback } from 'stream' +import { ERRORS } from '../../errors' + +export class ByteLimitTransformStream extends Transform { + bytesProcessed = 0 + + constructor(private readonly limit: number) { + super() + } + + _transform(chunk: Buffer, encoding: BufferEncoding, callback: TransformCallback) { + this.bytesProcessed += chunk.length + + if (this.bytesProcessed > this.limit) { + callback(ERRORS.EntityTooLarge()) + } else { + callback(null, chunk) + } + } +} diff --git a/src/storage/protocols/s3/index.ts b/src/storage/protocols/s3/index.ts new file mode 100644 index 00000000..4f7a2380 --- /dev/null +++ b/src/storage/protocols/s3/index.ts @@ -0,0 +1 @@ +export * from './signature-v4' diff --git a/src/storage/protocols/s3/s3-handler.ts b/src/storage/protocols/s3/s3-handler.ts new file mode 100644 index 00000000..4e52f78d --- /dev/null +++ b/src/storage/protocols/s3/s3-handler.ts @@ -0,0 +1,1173 @@ +import { Storage } from '../../storage' +import { getConfig } from '../../../config' +import { Uploader } from '../../uploader' +import { + AbortMultipartUploadCommandInput, + CompleteMultipartUploadCommandInput, + CopyObjectCommandInput, + CreateMultipartUploadCommandInput, + DeleteObjectCommandInput, + DeleteObjectsCommandInput, + GetObjectCommandInput, + HeadObjectCommandInput, + ListMultipartUploadsCommandInput, + ListObjectsCommandInput, + ListObjectsV2CommandInput, + ListObjectsV2Output, + ListPartsCommandInput, + PutObjectCommandInput, + UploadPartCommandInput, + UploadPartCopyCommandInput, +} from '@aws-sdk/client-s3' +import { PassThrough, Readable } from 'stream' +import stream from 'stream/promises' +import { getFileSizeLimit, mustBeValidBucketName, mustBeValidKey } from '../../limits' +import { ERRORS } from '../../errors' +import { S3MultipartUpload, Obj } from '../../schemas' +import { decrypt, encrypt } from '../../../auth' +import { ByteLimitTransformStream } from './byte-limit-stream' + +const { storageS3Region, storageS3Bucket } = getConfig() + +export class S3ProtocolHandler { + constructor( + protected readonly storage: Storage, + protected readonly tenantId: string, + protected readonly owner?: string + ) {} + + /** + * Returns the versioning state of a bucket. + * default: versioning is suspended + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html + */ + async getBucketVersioning() { + return { + responseBody: { + VersioningConfiguration: { + Status: 'Suspended', + MfaDelete: 'Disabled', + }, + }, + } + } + + /** + * Returns the Region the bucket resides in + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLocation.html + */ + async getBucketLocation() { + return { + responseBody: { + LocationConstraint: { + LocationConstraint: storageS3Region, + }, + }, + } + } + + /** + * Returns a list of all buckets owned by the authenticated sender of the request + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html + */ + async listBuckets() { + const buckets = await this.storage.listBuckets('name,created_at') + + return { + responseBody: { + ListAllMyBucketsResult: { + Buckets: { + Bucket: buckets.map((bucket) => ({ + Name: bucket.name, + CreationDate: bucket.created_at + ? new Date(bucket.created_at || '').toISOString() + : undefined, + })), + }, + }, + }, + } + } + + /** + * Creates a new S3 bucket. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html + * + * @param Bucket + * @param isPublic + */ + async createBucket(Bucket: string, isPublic: boolean) { + mustBeValidBucketName(Bucket || '') + + await this.storage.createBucket({ + name: Bucket, + id: Bucket, + public: isPublic, + owner: this.owner, + }) + + return { + headers: { + Location: `/${Bucket}`, + }, + } + } + + /** + * Deletes the S3 bucket. All objects in the bucket must be deleted before the bucket itself can be deleted. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html + * + * @param name + */ + async deleteBucket(name: string) { + await this.storage.deleteBucket(name) + + return { + statusCode: 204, + } + } + + /** + * You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission to access it. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html + * + * @param name + */ + async headBucket(name: string) { + await this.storage.findBucket(name) + return { + statusCode: 200, + headers: { + 'x-amz-bucket-region': storageS3Region, + }, + } + } + + /** + * Returns some or all (up to 1,000) of the objects in a bucket. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html + * @param command + */ + async listObjects(command: ListObjectsCommandInput) { + const list = await this.listObjectsV2({ + Bucket: command.Bucket, + Delimiter: command.Delimiter, + EncodingType: command.EncodingType, + MaxKeys: command.MaxKeys, + Prefix: command.Prefix, + ContinuationToken: command.Marker, + }) + + return { + responseBody: { + ListBucketResult: { + Name: list.responseBody.ListBucketResult.Name, + Prefix: list.responseBody.ListBucketResult.Prefix, + Marker: list.responseBody.ListBucketResult.ContinuationToken, + MaxKeys: list.responseBody.ListBucketResult.MaxKeys, + IsTruncated: list.responseBody.ListBucketResult.IsTruncated, + Contents: list.responseBody.ListBucketResult.Contents, + CommonPrefixes: list.responseBody.ListBucketResult.CommonPrefixes, + EncodingType: list.responseBody.ListBucketResult.EncodingType, + }, + }, + } + } + + /** + * List objects in a bucket, implements the ListObjectsV2Command + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html + * + * @param command + */ + async listObjectsV2(command: ListObjectsV2CommandInput) { + if (!command.Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + await this.storage.asSuperUser().findBucket(command.Bucket) + + const continuationToken = command.ContinuationToken + const startAfter = command.StartAfter + const encodingType = command.EncodingType + const delimiter = command.Delimiter + const prefix = command.Prefix || '' + const maxKeys = command.MaxKeys + const bucket = command.Bucket + + const limit = Math.min(maxKeys || 1000, 1000) + + const objects = await this.storage.from(bucket).listObjectsV2({ + prefix, + delimiter: delimiter, + maxKeys: limit + 1, + nextToken: continuationToken ? decodeContinuationToken(continuationToken) : undefined, + startAfter, + }) + + let results = objects + let prevPrefix = '' + + if (delimiter) { + const delimitedResults: Obj[] = [] + for (const object of objects) { + let idx = object.name.replace(prefix, '').indexOf(delimiter) + + if (idx >= 0) { + idx = prefix.length + idx + delimiter.length + const currPrefix = object.name.substring(0, idx) + if (currPrefix === prevPrefix) { + continue + } + prevPrefix = currPrefix + delimitedResults.push({ + id: null, + name: command.EncodingType === 'url' ? encodeURIComponent(currPrefix) : currPrefix, + bucket_id: bucket, + owner: '', + metadata: null, + created_at: '', + updated_at: '', + version: '', + }) + continue + } + + delimitedResults.push(object) + } + results = delimitedResults + } + + let isTruncated = false + + if (results.length > limit) { + results = results.slice(0, limit) + isTruncated = true + } + + const commonPrefixes = results + .filter((e) => e.id === null) + .map((object) => { + return { + Prefix: object.name, + } + }) + + const contents = + results + .filter((o) => o.id) + .map((o) => ({ + Key: command.EncodingType === 'url' ? encodeURIComponent(o.name) : o.name, + LastModified: (o.updated_at ? new Date(o.updated_at).toISOString() : undefined) as + | Date + | undefined, + ETag: o.metadata?.eTag as string, + Size: o.metadata?.size as number, + StorageClass: 'STANDARD' as const, + })) || [] + + const nextContinuationToken = isTruncated + ? encodeContinuationToken(results[results.length - 1].name) + : undefined + + const response: { ListBucketResult: ListObjectsV2Output } = { + ListBucketResult: { + Name: bucket, + Prefix: prefix, + ContinuationToken: continuationToken, + Contents: contents, + IsTruncated: isTruncated, + MaxKeys: limit, + Delimiter: delimiter, + EncodingType: encodingType, + KeyCount: results.length, + CommonPrefixes: commonPrefixes, + NextContinuationToken: nextContinuationToken, + }, + } + + return { + responseBody: response, + } + } + + /** + * This operation lists in-progress multipart uploads in a bucket. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + * + * @param command + */ + async listMultipartUploads(command: ListMultipartUploadsCommandInput) { + if (!command.Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + await this.storage.asSuperUser().findBucket(command.Bucket) + + const keyContinuationToken = command.KeyMarker + const uploadContinuationToken = command.UploadIdMarker + + const encodingType = command.EncodingType + const delimiter = command.Delimiter + const prefix = command.Prefix || '' + const maxKeys = command.MaxUploads + const bucket = command.Bucket + + const limit = maxKeys || 200 + + const multipartUploads = await this.storage.db.listMultipartUploads(bucket, { + prefix, + deltimeter: delimiter, + maxKeys: limit + 1, + nextUploadKeyToken: keyContinuationToken + ? decodeContinuationToken(keyContinuationToken) + : undefined, + nextUploadToken: uploadContinuationToken + ? decodeContinuationToken(uploadContinuationToken) + : undefined, + }) + + let results: Partial[] = multipartUploads + let prevPrefix = '' + + if (delimiter) { + const delimitedResults: Partial[] = [] + for (const object of multipartUploads) { + let idx = object.key.replace(prefix, '').indexOf(delimiter) + + if (idx >= 0) { + idx = prefix.length + idx + delimiter.length + const currPrefix = object.key.substring(0, idx) + if (currPrefix === prevPrefix) { + continue + } + prevPrefix = currPrefix + delimitedResults.push({ + isFolder: true, + id: object.id, + key: command.EncodingType === 'url' ? encodeURIComponent(currPrefix) : currPrefix, + bucket_id: bucket, + }) + continue + } + + delimitedResults.push(object) + } + results = delimitedResults + } + + let isTruncated = false + + if (results.length > limit) { + results = results.slice(0, limit) + isTruncated = true + } + + const commonPrefixes = results + .filter((e) => e.isFolder) + .map((object) => { + return { + Prefix: object.key, + } + }) + + const uploads = + results + .filter((o) => !o.isFolder) + .map((o) => ({ + Key: command.EncodingType === 'url' && o.key ? encodeURIComponent(o.key) : o.key, + Initiated: o.created_at ? new Date(o.created_at).toISOString() : undefined, + UploadId: o.id, + StorageClass: 'STANDARD', + })) || [] + + let keyNextContinuationToken: string | undefined + let uploadNextContinuationToken: string | undefined + + if (isTruncated) { + const lastItem = results[results.length - 1] + keyNextContinuationToken = encodeContinuationToken(lastItem.key!) + uploadNextContinuationToken = encodeContinuationToken(lastItem.id!) + } + + const response = { + ListMultipartUploadsResult: { + Name: bucket, + Prefix: prefix, + KeyMarker: keyContinuationToken, + UploadIdMarker: uploadContinuationToken, + NextKeyMarker: keyNextContinuationToken, + NextUploadIdMarker: uploadNextContinuationToken, + Upload: uploads, + IsTruncated: isTruncated, + MaxUploads: limit, + Delimiter: delimiter, + EncodingType: encodingType, + KeyCount: results.length, + CommonPrefixes: commonPrefixes, + }, + } + + return { + responseBody: response, + } + } + + /** + * This action initiates a multipart upload and returns an upload ID + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html + * + * @param command + */ + async createMultiPartUpload(command: CreateMultipartUploadCommandInput) { + const uploader = new Uploader(this.storage.backend, this.storage.db) + const { Bucket, Key } = command + + mustBeValidBucketName(Bucket) + mustBeValidKey(Key) + + const bucket = await this.storage.asSuperUser().findBucket(Bucket, 'id,allowed_mime_types') + + if (command.ContentType && bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) { + uploader.validateMimeType(command.ContentType, bucket.allowed_mime_types || []) + } + + // Create Multi Part Upload + const version = await uploader.prepareUpload({ + bucketId: command.Bucket as string, + objectName: command.Key as string, + isUpsert: true, + owner: this.owner, + }) + + const uploadId = await this.storage.backend.createMultiPartUpload( + storageS3Bucket, + `${this.tenantId}/${command.Bucket}/${command.Key}`, + version, + command.ContentType || '', + command.CacheControl || '' + ) + + if (!uploadId) { + throw ERRORS.InvalidUploadId(uploadId) + } + + const signature = this.uploadSignature({ in_progress_size: 0 }) + await this.storage.db + .asSuperUser() + .createMultipartUpload(uploadId, Bucket, Key, version, signature, this.owner) + + return { + responseBody: { + InitiateMultipartUploadResult: { + Bucket: command.Bucket, + Key: `${command.Key}`, + UploadId: uploadId, + }, + }, + } + } + + /** + * Completes a multipart upload by assembling previously uploaded parts. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html + * + * @param command + */ + async completeMultiPartUpload(command: CompleteMultipartUploadCommandInput) { + const uploader = new Uploader(this.storage.backend, this.storage.db) + const { Bucket, Key, UploadId } = command + + if (!UploadId) { + throw ERRORS.InvalidUploadId() + } + + await uploader.canUpload({ + bucketId: Bucket as string, + objectName: Key as string, + isUpsert: true, + owner: this.owner, + }) + + const multiPartUpload = await this.storage.db + .asSuperUser() + .findMultipartUpload(UploadId, 'id,version') + + const parts = command.MultipartUpload?.Parts || [] + + if (parts.length === 0) { + const allParts = await this.storage.db.asSuperUser().listParts(UploadId, { + maxParts: 1000, + }) + + parts.push( + ...allParts.map((part) => ({ + PartNumber: part.part_number, + ETag: part.etag, + })) + ) + } + + const resp = await this.storage.backend.completeMultipartUpload( + storageS3Bucket, + `${this.tenantId}/${Bucket}/${Key}`, + UploadId as string, + multiPartUpload.version, + parts + ) + + const metadata = await this.storage.backend.headObject( + storageS3Bucket, + `${this.tenantId}/${Bucket}/${Key}`, + resp.version + ) + + await uploader.completeUpload({ + bucketId: Bucket as string, + objectName: Key as string, + version: resp.version, + isUpsert: true, + isMultipart: false, + objectMetadata: metadata, + owner: this.owner, + }) + + await this.storage.db.asSuperUser().deleteMultipartUpload(UploadId) + + return { + responseBody: { + CompleteMultipartUploadResult: { + Location: `${Bucket}/${Key}`, + Bucket: Bucket, + Key: Key, + ChecksumCRC32: resp.ChecksumCRC32, + ChecksumCRC32C: resp.ChecksumCRC32, + ChecksumSHA1: resp.ChecksumSHA1, + ChecksumSHA256: resp.ChecksumSHA256, + ETag: resp.ETag, + }, + }, + } + } + + /** + * Uploads a part in a multipart upload. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html + * @param command + */ + async uploadPart(command: UploadPartCommandInput) { + const { Bucket, PartNumber, UploadId, Key, Body, ContentLength } = command + + if (!UploadId) { + throw ERRORS.InvalidUploadId() + } + + if (!Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + if (typeof ContentLength === 'undefined') { + throw ERRORS.MissingContentLength() + } + + const bucket = await this.storage.asSuperUser().findBucket(Bucket, 'file_size_limit') + const maxFileSize = await getFileSizeLimit(this.storage.db.tenantId, bucket?.file_size_limit) + + const uploader = new Uploader(this.storage.backend, this.storage.db) + await uploader.canUpload({ + bucketId: Bucket as string, + objectName: Key as string, + owner: this.owner, + isUpsert: true, + }) + + const multipart = await this.shouldAllowPartUpload(UploadId, ContentLength, maxFileSize) + + const proxy = new PassThrough() + + if (Body instanceof Readable) { + proxy.on('error', () => { + Body.unpipe(proxy) + }) + + Body.on('error', () => { + if (!proxy.closed) { + proxy.destroy() + } + }) + } + + const body = Body instanceof Readable ? Body.pipe(proxy) : Readable.from(Body as Buffer) + + try { + const uploadPart = await stream.pipeline( + body, + new ByteLimitTransformStream(ContentLength), + async (stream) => { + return this.storage.backend.uploadPart( + storageS3Bucket, + `${this.tenantId}/${Bucket}/${Key}`, + multipart.version, + UploadId, + PartNumber || 0, + stream as Readable, + ContentLength + ) + } + ) + + await this.storage.db.insertUploadPart({ + upload_id: UploadId, + version: multipart.version, + part_number: PartNumber || 0, + etag: uploadPart.ETag || '', + key: Key as string, + bucket_id: Bucket, + owner_id: this.owner, + }) + + return { + headers: { + etag: uploadPart.ETag || '', + }, + } + } catch (e) { + await this.storage.db.asSuperUser().withTransaction(async (db) => { + const multipart = await db.findMultipartUpload(UploadId, 'in_progress_size', { + forUpdate: true, + }) + + const diff = multipart.in_progress_size - ContentLength + const signature = this.uploadSignature({ in_progress_size: diff }) + await db.updateMultipartUploadProgress(UploadId, diff, signature) + }) + + throw e + } + } + + /** + * Adds an object to a bucket. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + * + * @param command + */ + async putObject(command: PutObjectCommandInput) { + const uploader = new Uploader(this.storage.backend, this.storage.db) + + mustBeValidBucketName(command.Bucket) + mustBeValidKey(command.Key) + + if ( + command.Key.endsWith('/') && + (command.ContentLength === undefined || command.ContentLength === 0) + ) { + // Consistent with how supabase Storage handles empty folders + command.Key += '.emptyFolderPlaceholder' + } + + const bucket = await this.storage + .asSuperUser() + .findBucket(command.Bucket, 'id,file_size_limit,allowed_mime_types') + + const upload = await uploader.upload(command.Body as any, { + bucketId: command.Bucket as string, + objectName: command.Key as string, + owner: this.owner, + isUpsert: true, + isMultipart: false, + fileSizeLimit: bucket.file_size_limit, + allowedMimeTypes: bucket.allowed_mime_types, + }) + + return { + headers: { + etag: upload.metadata.eTag, + }, + } + } + + /** + * This operation aborts a multipart upload + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + * + * @param command + */ + async abortMultipartUpload(command: AbortMultipartUploadCommandInput) { + const { Bucket, Key, UploadId } = command + + if (!UploadId) { + throw ERRORS.InvalidUploadId() + } + + const multipart = await this.storage.db.findMultipartUpload(UploadId, 'id,version') + + await this.storage.backend.abortMultipartUpload( + storageS3Bucket, + `${this.tenantId}/${Bucket}/${Key}`, + UploadId, + multipart.version + ) + + await this.storage.db.asSuperUser().deleteMultipartUpload(UploadId) + + return {} + } + + /** + * The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html + * + * @param command + */ + async headObject(command: HeadObjectCommandInput) { + const { Bucket, Key } = command + + if (!Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + if (!Key) { + throw ERRORS.MissingParameter('Bucket') + } + + const object = await this.storage.from(Bucket).findObject(Key, '*') + + if (!object) { + throw ERRORS.NoSuchKey(Key) + } + + return { + headers: { + 'created-at': (object.created_at as string) || '', + 'cache-control': (object.metadata?.cacheControl as string) || '', + expires: (object.metadata?.expires as string) || '', + 'content-length': (object.metadata?.size as string) || '', + 'content-type': (object.metadata?.contentType as string) || '', + etag: (object.metadata?.eTag as string) || '', + 'last-modified': object.updated_at ? new Date(object.updated_at).toUTCString() || '' : '', + }, + } + } + + /** + * Retrieves an object from Amazon S3. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + * + * @param command + */ + async getObject(command: GetObjectCommandInput) { + const bucket = command.Bucket as string + const key = command.Key as string + + const object = await this.storage.from(bucket).findObject(key, '*') + const response = await this.storage.backend.getObject( + storageS3Bucket, + `${this.tenantId}/${bucket}/${key}`, + object.version, + { + ifModifiedSince: command.IfModifiedSince?.toISOString(), + ifNoneMatch: command.IfNoneMatch, + range: command.Range, + } + ) + return { + headers: { + 'cache-control': response.metadata.cacheControl, + 'content-length': response.metadata.contentLength.toString(), + 'content-type': response.metadata.mimetype, + etag: response.metadata.eTag, + 'last-modified': response.metadata.lastModified?.toUTCString() || '', + }, + responseBody: response.body, + statusCode: command.Range ? 206 : 200, + } + } + + /** + * Removes an object from a bucket. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html + * + * @param command + */ + async deleteObject(command: DeleteObjectCommandInput) { + const { Bucket, Key } = command + + if (!Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + if (!Key) { + throw ERRORS.MissingParameter('Key') + } + + await this.storage.from(Bucket).deleteObject(Key) + + return {} + } + + /** + * This operation enables you to delete multiple objects from a bucket using a single HTTP request. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html + * + * @param command + */ + async deleteObjects(command: DeleteObjectsCommandInput) { + const { Bucket, Delete } = command + + if (!Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + if (!Delete) { + throw ERRORS.MissingParameter('Delete') + } + + if (!Delete.Objects) { + throw ERRORS.MissingParameter('Objects') + } + + if (Delete.Objects.length === 0) { + return {} + } + + const deletedResult = await this.storage + .from(Bucket) + .deleteObjects(Delete.Objects.map((o) => o.Key || '')) + + return { + responseBody: { + DeletedResult: { + Deleted: Delete.Objects.map((o) => { + const isDeleted = deletedResult.find((d) => d.name === o.Key) + if (isDeleted) { + return { + Deleted: { + Key: o.Key, + }, + } + } + + return { + Error: { + Key: o.Key, + Code: 'AccessDenied', + Message: + "You do not have permission to delete this object or the object doesn't exists", + }, + } + }), + }, + }, + } + } + + /** + * Creates a copy of an object that is already stored in Amazon S3. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + * + * @param command + */ + async copyObject(command: CopyObjectCommandInput) { + const { Bucket, Key, CopySource } = command + + if (!Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + if (!Key) { + throw ERRORS.MissingParameter('Key') + } + + if (!CopySource) { + throw ERRORS.MissingParameter('CopySource') + } + + const sourceBucket = ( + CopySource.startsWith('/') ? CopySource.replace('/', '').split('/') : CopySource.split('/') + ).shift() + + const sourceKey = (CopySource.startsWith('/') ? CopySource.replace('/', '') : CopySource) + .split('/') + .slice(1) + .join('/') + + if (!sourceBucket) { + throw ERRORS.InvalidBucketName('') + } + + if (!sourceKey) { + throw ERRORS.MissingParameter('CopySource') + } + + const object = await this.storage.from(sourceBucket).findObject(sourceKey, '*') + + if (!object) { + throw ERRORS.NoSuchKey(sourceKey) + } + + const copyResult = await this.storage + .from(sourceBucket) + .copyObject(sourceKey, Bucket, Key, object.owner, { + ifMatch: command.CopySourceIfMatch, + ifNoneMatch: command.CopySourceIfNoneMatch, + ifModifiedSince: command.CopySourceIfModifiedSince, + ifUnmodifiedSince: command.CopySourceIfUnmodifiedSince, + }) + + return { + responseBody: { + CopyObjectResult: { + ETag: copyResult.eTag, + LastModified: copyResult.lastModified?.toISOString(), + }, + }, + } + } + + /** + * Lists the parts that have been uploaded for a specific multipart upload. + * + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + * + * @param command + */ + async listParts(command: ListPartsCommandInput) { + if (!command.UploadId) { + throw ERRORS.MissingParameter('UploadId') + } + + // check if multipart exists + await this.storage.db.asSuperUser().findMultipartUpload(command.UploadId, 'id') + + const maxParts = Math.min(command.MaxParts || 1000, 1000) + + let result = await this.storage.db.asSuperUser().listParts(command.UploadId, { + afterPart: command.PartNumberMarker, + maxParts: maxParts + 1, + }) + + const isTruncated = result.length > maxParts + if (isTruncated) { + result = result.slice(0, maxParts) + } + const nextPartNumberMarker = isTruncated ? result[result.length - 1].part_number : undefined + + const parts = result.map((part) => ({ + PartNumber: part.part_number, + LastModified: part.created_at ? new Date(part.created_at).toISOString() : undefined, + ETag: part.etag, + })) + + return { + responseBody: { + ListPartsResult: { + Bucket: command.Bucket, + Key: command.Key, + UploadId: command.UploadId, + PartNumberMarker: command.PartNumberMarker, + NextPartNumberMarker: nextPartNumberMarker, + MaxParts: maxParts, + IsTruncated: isTruncated, + Part: parts, + }, + }, + } + } + + /** + * Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source in your request. To specify a byte range, you add the request header x-amz-copy-source-range in your request. + * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + * + * @param command UploadPartCopyCommandInput + */ + async uploadPartCopy(command: UploadPartCopyCommandInput) { + const { Bucket, Key, UploadId, PartNumber, CopySource, CopySourceRange } = command + + if (!UploadId) { + throw ERRORS.MissingParameter('UploadId') + } + + if (!Bucket) { + throw ERRORS.MissingParameter('Bucket') + } + + if (!Key) { + throw ERRORS.MissingParameter('Key') + } + + if (!PartNumber) { + throw ERRORS.MissingParameter('PartNumber') + } + + if (!CopySource) { + throw ERRORS.MissingParameter('CopySource') + } + + if (!CopySourceRange) { + throw ERRORS.MissingParameter('CopySourceRange') + } + + const sourceBucketName = ( + CopySource.startsWith('/') ? CopySource.replace('/', '').split('/') : CopySource.split('/') + ).shift() + + const sourceKey = (CopySource.startsWith('/') ? CopySource.replace('/', '') : CopySource) + .split('/') + .slice(1) + .join('/') + + if (!sourceBucketName) { + throw ERRORS.NoSuchBucket('') + } + + if (!sourceKey) { + throw ERRORS.NoSuchKey('') + } + + const bytes = CopySourceRange.split('=')[1].split('-') + + if (bytes.length !== 2) { + throw ERRORS.InvalidRange() + } + + const fromByte = BigInt(bytes[0]) + const toByte = BigInt(bytes[1]) + + const size = toByte - fromByte + + const uploader = new Uploader(this.storage.backend, this.storage.db) + + await uploader.canUpload({ + bucketId: Bucket as string, + objectName: Key as string, + owner: this.owner, + isUpsert: true, + }) + + // Check if copy source exists + const copySource = await this.storage.db.findObject( + sourceBucketName, + sourceKey, + 'id,name,version' + ) + + const [destinationBucket] = await this.storage.db.asSuperUser().withTransaction(async (db) => { + return Promise.all([ + db.findBucketById(Bucket, 'file_size_limit'), + db.findBucketById(sourceBucketName, 'id'), + ]) + }) + const maxFileSize = await getFileSizeLimit( + this.storage.db.tenantId, + destinationBucket?.file_size_limit + ) + + const multipart = await this.shouldAllowPartUpload(UploadId, Number(size), maxFileSize) + + const uploadPart = await this.storage.backend.uploadPartCopy( + storageS3Bucket, + `${this.tenantId}/${Bucket}/${Key}`, + multipart.version, + UploadId, + PartNumber, + `${this.tenantId}/${sourceBucketName}/${copySource.name}`, + copySource.version + ) + + await this.storage.db.insertUploadPart({ + upload_id: UploadId, + version: multipart.version, + part_number: PartNumber, + etag: uploadPart.eTag || '', + key: Key as string, + bucket_id: Bucket, + owner_id: this.owner, + }) + + return { + responseBody: { + ETag: uploadPart.eTag || '', + LastModified: uploadPart.lastModified ? uploadPart.lastModified.toISOString() : undefined, + }, + } + } + + protected uploadSignature({ in_progress_size }: { in_progress_size: number }) { + return `${encrypt('progress:' + in_progress_size.toString())}` + } + + protected decryptUploadSignature(signature: string) { + const originalSignature = decrypt(signature) + const [, value] = originalSignature.split(':') + + return { + progress: parseInt(value, 10), + } + } + + protected async shouldAllowPartUpload( + uploadId: string, + contentLength: number, + maxFileSize: number + ) { + return this.storage.db.asSuperUser().withTransaction(async (db) => { + const multipart = await db.findMultipartUpload( + uploadId, + 'in_progress_size,version,upload_signature', + { + forUpdate: true, + } + ) + + const { progress } = this.decryptUploadSignature(multipart.upload_signature) + + if (progress !== multipart.in_progress_size) { + throw ERRORS.InvalidUploadSignature() + } + + const currentProgress = multipart.in_progress_size + contentLength + + if (currentProgress > maxFileSize) { + throw ERRORS.EntityTooLarge() + } + + const signature = this.uploadSignature({ in_progress_size: currentProgress }) + await db.updateMultipartUploadProgress(uploadId, currentProgress, signature) + return multipart + }) + } +} + +function encodeContinuationToken(name: string) { + return Buffer.from(`l:${name}`).toString('base64') +} + +function decodeContinuationToken(token: string) { + const decoded = Buffer.from(token, 'base64').toString().split(':') + + if (decoded.length === 0) { + throw new Error('Invalid continuation token') + } + + return decoded[1] +} diff --git a/src/storage/protocols/s3/signature-v4.ts b/src/storage/protocols/s3/signature-v4.ts new file mode 100644 index 00000000..771e752d --- /dev/null +++ b/src/storage/protocols/s3/signature-v4.ts @@ -0,0 +1,286 @@ +import crypto from 'crypto' +import { ERRORS } from '../../errors' + +interface SignatureV4Options { + enforceRegion: boolean + credentials: Omit & { secretKey: string } +} + +export interface ClientSignature { + credentials: Credentials + signature: string + signedHeaders: string[] +} + +interface SignatureRequest { + url: string + body?: string | ReadableStream | Buffer + headers: Record + method: string + query?: Record + prefix?: string + credentials: Credentials + signature: string + signedHeaders: string[] +} + +interface Credentials { + accessKey: string + shortDate: string + region: string + service: string +} + +/** + * Lists the headers that should never be included in the + * request signature signature process. + */ +export const ALWAYS_UNSIGNABLE_HEADERS = { + authorization: true, + 'cache-control': true, + connection: true, + expect: true, + from: true, + 'keep-alive': true, + 'max-forwards': true, + pragma: true, + referer: true, + te: true, + trailer: true, + 'transfer-encoding': true, + upgrade: true, + 'user-agent': true, + 'x-amzn-trace-id': true, +} + +export class SignatureV4 { + public readonly serverCredentials: SignatureV4Options['credentials'] + enforceRegion: boolean + + constructor(options: SignatureV4Options) { + this.serverCredentials = options.credentials + this.enforceRegion = options.enforceRegion + } + + static parseAuthorizationHeader(header: string) { + const parts = header.split(' ') + if (parts[0] !== 'AWS4-HMAC-SHA256') { + throw ERRORS.InvalidSignature('Unsupported authorization type') + } + + const params = header + .replace('AWS4-HMAC-SHA256 ', '') + .split(',') + .reduce((values, value) => { + const [k, v] = value.split('=') + values.set(k.trim(), v) + return values + }, new Map()) + + const credentialPart = params.get('Credential') + const signedHeadersPart = params.get('SignedHeaders') + const signaturePart = params.get('Signature') + + if (!credentialPart || !signedHeadersPart || !signaturePart) { + throw ERRORS.InvalidSignature('Invalid signature format') + } + const signedHeaders = signedHeadersPart.split(';') || [] + + const credentialsPart = credentialPart.split('/') + + if (credentialsPart.length !== 5) { + throw ERRORS.InvalidSignature('Invalid credentials') + } + + const [accessKey, shortDate, region, service] = credentialsPart + + return { + credentials: { + accessKey, + shortDate, + region, + service, + }, + signedHeaders, + signature: signaturePart, + } + } + + verify(request: SignatureRequest) { + const { clientSignature, serverSignature } = this.sign(request) + // Compare the computed signature with the provided signature + return crypto.timingSafeEqual(Buffer.from(clientSignature), Buffer.from(serverSignature)) + } + + sign(request: SignatureRequest) { + const authorizationHeader = this.getHeader(request, 'authorization') + if (!authorizationHeader) { + throw ERRORS.AccessDenied('Missing authorization header') + } + + if (request.credentials.accessKey !== this.serverCredentials.accessKey) { + throw ERRORS.AccessDenied('Invalid Access Key') + } + + // Ensure the region and service match the expected values + if (this.enforceRegion && request.credentials.region !== this.serverCredentials.region) { + throw ERRORS.AccessDenied('Invalid Region') + } + + if (request.credentials.service !== this.serverCredentials.service) { + throw ERRORS.AccessDenied('Invalid Service') + } + + const longDate = request.headers['x-amz-date'] as string + if (!longDate) { + throw ERRORS.AccessDenied('No date header provided') + } + + // When enforcing region is false, we allow the region to be: + // - auto + // - us-east-1 + // - the region set in the env + if ( + !this.enforceRegion && + !['auto', 'us-east-1', this.serverCredentials.region, ''].includes(request.credentials.region) + ) { + throw ERRORS.AccessDenied('Invalid Region') + } + + const selectedRegion = this.enforceRegion + ? this.serverCredentials.region + : request.credentials.region + + // Construct the Canonical Request and String to Sign + const canonicalRequest = this.constructCanonicalRequest(request, request.signedHeaders) + const stringToSign = this.constructStringToSign( + longDate, + request.credentials.shortDate, + selectedRegion, + this.serverCredentials.service, + canonicalRequest + ) + + const signingKey = this.signingKey( + this.serverCredentials.secretKey, + request.credentials.shortDate, + selectedRegion, + this.serverCredentials.service + ) + + return { + clientSignature: request.signature, + serverSignature: this.hmac(signingKey, stringToSign).toString('hex'), + } + } + + getPayloadHash(request: SignatureRequest) { + const headers = request.headers + const body = request.body + + for (const headerName of Object.keys(headers)) { + if (headerName.toLowerCase() === 'x-amz-content-sha256') { + return headers[headerName] + } + } + + const contentLenght = parseInt(this.getHeader(request, 'content-length') || '0', 10) + let payloadHash = '' + + if (body === undefined && contentLenght === 0) { + payloadHash = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + } else if (typeof body === 'string' || ArrayBuffer.isView(body)) { + payloadHash = crypto + .createHash('sha256') + .update(typeof body === 'string' ? JSON.stringify(body) : Buffer.from(body.buffer)) + .digest('hex') + } else { + payloadHash = 'UNSIGNED-PAYLOAD' + } + + return payloadHash + } + + constructStringToSign( + date: string, + dateStamp: string, + region: string, + service: string, + canonicalRequest: string + ) { + const algorithm = 'AWS4-HMAC-SHA256' + const credentialScope = `${dateStamp}/${region}/${service}/aws4_request` + const hashedCanonicalRequest = crypto + .createHash('sha256') + .update(canonicalRequest) + .digest('hex') + + return `${algorithm}\n${date}\n${credentialScope}\n${hashedCanonicalRequest}` + } + + hmac(key: string | Buffer, data: string): Buffer { + return crypto.createHmac('sha256', key).update(data).digest() + } + + protected signingKey( + key: string, + dateStamp: string, + regionName: string, + serviceName: string + ): Buffer { + const kDate = this.hmac('AWS4' + key, dateStamp) + const kRegion = this.hmac(kDate, regionName) + const kService = this.hmac(kRegion, serviceName) + return this.hmac(kService, 'aws4_request') + } + + protected constructCanonicalRequest(request: SignatureRequest, signedHeaders: string[]) { + const method = request.method + const canonicalUri = new URL(`http://localhost:8080${request.prefix || ''}${request.url}`) + .pathname + + const canonicalQueryString = Object.keys((request.query as object) || {}) + .sort() + .map( + (key) => + `${encodeURIComponent(key)}=${encodeURIComponent((request.query as any)[key] as string)}` + ) + .join('&') + + const canonicalHeaders = + signedHeaders + .filter( + (header) => + request.headers[header] !== undefined && + !(header.toLowerCase() in ALWAYS_UNSIGNABLE_HEADERS) + ) + .sort() + .map((header) => { + if (header === 'host') { + const xForwardedHost = this.getHeader(request, 'x-forwarded-host') + if (xForwardedHost) { + return `host:${xForwardedHost.toLowerCase()}` + } + } + + return `${header.toLowerCase()}:${ + (request.headers[header.toLowerCase()] || '') as string + }` + }) + .join('\n') + '\n' + + const signedHeadersString = signedHeaders.sort().join(';') + + const payloadHash = this.getPayloadHash(request) + + return `${method}\n${canonicalUri}\n${canonicalQueryString}\n${canonicalHeaders}\n${signedHeadersString}\n${payloadHash}` + } + + protected getHeader(request: SignatureRequest, name: string) { + const item = request.headers[name] + if (Array.isArray(item)) { + return item.join(',') + } + return item + } +} diff --git a/src/storage/tus/als-memory-kv.ts b/src/storage/protocols/tus/als-memory-kv.ts similarity index 100% rename from src/storage/tus/als-memory-kv.ts rename to src/storage/protocols/tus/als-memory-kv.ts diff --git a/src/storage/tus/file-store.ts b/src/storage/protocols/tus/file-store.ts similarity index 95% rename from src/storage/tus/file-store.ts rename to src/storage/protocols/tus/file-store.ts index 57462eb0..bd84fb1f 100644 --- a/src/storage/tus/file-store.ts +++ b/src/storage/protocols/tus/file-store.ts @@ -3,7 +3,7 @@ import { Upload } from '@tus/server' import fsExtra from 'fs-extra' import path from 'path' import { Configstore } from '@tus/file-store' -import { FileBackend } from '../backend' +import { FileBackend } from '../../backend' type FileStoreOptions = { directory: string diff --git a/src/storage/tus/index.ts b/src/storage/protocols/tus/index.ts similarity index 100% rename from src/storage/tus/index.ts rename to src/storage/protocols/tus/index.ts diff --git a/src/storage/tus/postgres-locker.ts b/src/storage/protocols/tus/postgres-locker.ts similarity index 94% rename from src/storage/tus/postgres-locker.ts rename to src/storage/protocols/tus/postgres-locker.ts index 47294498..61e516c0 100644 --- a/src/storage/tus/postgres-locker.ts +++ b/src/storage/protocols/tus/postgres-locker.ts @@ -1,9 +1,10 @@ import { Lock, Locker, RequestRelease } from '@tus/server' import { clearTimeout } from 'timers' import EventEmitter from 'events' -import { Database, DBError } from '../database' -import { PubSubAdapter } from '../../pubsub' +import { Database, DBError } from '../../database' +import { PubSubAdapter } from '../../../pubsub' import { UploadId } from './upload-id' +import { ERRORS } from '../../errors' const REQUEST_LOCK_RELEASE_MESSAGE = 'REQUEST_LOCK_RELEASE' @@ -60,7 +61,7 @@ export class PgLock implements Lock { abortController.abort() if (!acquired) { - throw new DBError('acquiring lock timeout', 503, 'acquiring_lock_timeout') + throw ERRORS.LockTimeout() } await new Promise((innerResolve) => { diff --git a/src/storage/tus/server.ts b/src/storage/protocols/tus/server.ts similarity index 100% rename from src/storage/tus/server.ts rename to src/storage/protocols/tus/server.ts diff --git a/src/storage/tus/upload-id.ts b/src/storage/protocols/tus/upload-id.ts similarity index 68% rename from src/storage/tus/upload-id.ts rename to src/storage/protocols/tus/upload-id.ts index 00df6c40..645d8ea7 100644 --- a/src/storage/tus/upload-id.ts +++ b/src/storage/protocols/tus/upload-id.ts @@ -1,7 +1,7 @@ -import { getConfig } from '../../config' -import { StorageBackendError } from '../errors' -import { mustBeValidBucketName, mustBeValidKey } from '../limits' -import { FILE_VERSION_SEPARATOR, PATH_SEPARATOR, SEPARATOR } from '../backend' +import { getConfig } from '../../../config' +import { ERRORS } from '../../errors' +import { mustBeValidBucketName, mustBeValidKey } from '../../limits' +import { FILE_VERSION_SEPARATOR, PATH_SEPARATOR, SEPARATOR } from '../../backend' interface ResourceIDOptions { tenant: string @@ -24,15 +24,15 @@ export class UploadId { this.objectName = options.objectName this.version = options.version - mustBeValidBucketName(options.bucket, 'invalid bucket name') - mustBeValidKey(options.objectName, 'invalid object name') + mustBeValidBucketName(options.bucket) + mustBeValidKey(options.objectName) if (!options.tenant) { - throw new StorageBackendError('tenant_not_found', 422, 'tenant not provided') + throw ERRORS.InvalidTenantId() } if (!options.version) { - throw new StorageBackendError('version_not_found', 422, 'version not provided') + throw ERRORS.InvalidUploadId('Version not provided') } } @@ -52,14 +52,14 @@ function fromPathSeparator(id: string) { const idParts = id.split(PATH_SEPARATOR) if (idParts.length < 3) { - throw new StorageBackendError('id_missmatch', 422, 'id format invalid') + throw ERRORS.InvalidUploadId() } const [tenant, bucket, ...objParts] = idParts const version = objParts.pop() if (!version) { - throw new StorageBackendError('version_not_found', 422, 'version not provided') + throw ERRORS.InvalidUploadId('Version not provided') } return { @@ -74,7 +74,7 @@ function fromFileSeparator(id: string) { const idParts = id.split(PATH_SEPARATOR) if (idParts.length < 3) { - throw new StorageBackendError('id_missmatch', 422, 'id format invalid') + throw ERRORS.InvalidUploadId() } const [tenant, bucket, ...objParts] = idParts @@ -84,14 +84,14 @@ function fromFileSeparator(id: string) { const objectNameParts = objectWithVersion?.split(separator) || [] if (objectNameParts.length < 2) { - throw new StorageBackendError('object_name_invalid', 422, 'object name invalid') + throw ERRORS.InvalidUploadId('Object name is invalid') } const version = objectNameParts[1] const objectName = objectNameParts[0] if (!version) { - throw new StorageBackendError('version_not_found', 422, 'version not provided') + throw ERRORS.InvalidUploadId('Version not provided') } objParts.push(objectName) diff --git a/src/storage/renderer/image.ts b/src/storage/renderer/image.ts index d09bdcdf..618a1718 100644 --- a/src/storage/renderer/image.ts +++ b/src/storage/renderer/image.ts @@ -4,7 +4,7 @@ import { getConfig } from '../../config' import { FastifyRequest } from 'fastify' import { Renderer, RenderOptions } from './renderer' import axiosRetry from 'axios-retry' -import { StorageBackendError } from '../errors' +import { ERRORS } from '../errors' import { Stream } from 'stream' import Agent from 'agentkeepalive' @@ -241,7 +241,7 @@ export class ImageRenderer extends Renderer { protected async handleRequestError(error: AxiosError) { const stream = error.response?.data as Stream if (!stream) { - throw new StorageBackendError('Internal Server Error', 500, 'Internal Server Error', error) + throw ERRORS.InternalError(error) } const errorResponse = await new Promise((resolve) => { @@ -257,7 +257,7 @@ export class ImageRenderer extends Renderer { }) const statusCode = error.response?.status || 500 - throw new StorageBackendError('ImageProcessingError', statusCode, errorResponse, error) + throw ERRORS.ImageProcessingError(statusCode, errorResponse, error) } } diff --git a/src/storage/schemas/index.ts b/src/storage/schemas/index.ts index 90c69575..a9c814cf 100644 --- a/src/storage/schemas/index.ts +++ b/src/storage/schemas/index.ts @@ -1,2 +1,3 @@ export * from './object' export * from './bucket' +export * from './multipart' diff --git a/src/storage/schemas/multipart.ts b/src/storage/schemas/multipart.ts new file mode 100644 index 00000000..df0796aa --- /dev/null +++ b/src/storage/schemas/multipart.ts @@ -0,0 +1,48 @@ +import { FromSchema } from 'json-schema-to-ts' + +export const multipartUploadSchema = { + $id: 'multipartUploadSchema', + type: 'object', + properties: { + id: { type: 'string' }, + bucket_id: { type: 'string' }, + key: { type: 'string' }, + in_progress_size: { type: 'number' }, + upload_signature: { type: 'string' }, + version: { type: 'string' }, + owner_id: { type: 'string' }, + created_at: { type: 'string' }, + }, + required: [ + 'id', + 'bucket_id', + 'key', + 'version', + 'created_at', + 'in_progress_size', + 'upload_signature', + ], + additionalProperties: false, +} as const + +export type S3MultipartUpload = FromSchema + +export const uploadPartSchema = { + $id: 'uploadPartSchema', + type: 'object', + properties: { + id: { type: 'string' }, + upload_id: { type: 'string' }, + bucket_id: { type: 'string' }, + key: { type: 'string' }, + part_number: { type: 'number' }, + version: { type: 'string' }, + created_at: { type: 'string' }, + etag: { type: 'string' }, + owner_id: { type: 'string' }, + }, + required: ['upload_id', 'bucket_id', 'key', 'version', 'part_number'], + additionalProperties: false, +} as const + +export type S3PartUpload = FromSchema diff --git a/src/storage/storage.ts b/src/storage/storage.ts index 6e84c663..d07178ce 100644 --- a/src/storage/storage.ts +++ b/src/storage/storage.ts @@ -1,6 +1,6 @@ import { StorageBackendAdapter, withOptionalVersion } from './backend' import { Database, FindBucketFilters } from './database' -import { StorageBackendError } from './errors' +import { ERRORS } from './errors' import { AssetRenderer, HeadRenderer, ImageRenderer } from './renderer' import { getFileSizeLimit, mustBeValidBucketName, parseFileSizeToBytes } from './limits' import { getConfig } from '../config' @@ -21,7 +21,7 @@ export class Storage { * @param bucketId */ from(bucketId: string) { - mustBeValidBucketName(bucketId, 'The bucketId name contains invalid characters') + mustBeValidBucketName(bucketId) return new ObjectStorage(this.backend, this.db, bucketId) } @@ -82,7 +82,7 @@ export class Storage { allowedMimeTypes?: null | string[] } ) { - mustBeValidBucketName(data.name, 'Bucket name invalid') + mustBeValidBucketName(data.name) const bucketData: Parameters[0] = data @@ -117,7 +117,7 @@ export class Storage { allowedMimeTypes?: null | string[] } ) { - mustBeValidBucketName(id, 'Bucket name invalid') + mustBeValidBucketName(id) const bucketData: Parameters[1] = data @@ -158,17 +158,13 @@ export class Storage { const countObjects = await db.asSuperUser().countObjectsInBucket(id) if (countObjects && countObjects > 0) { - throw new StorageBackendError( - 'Storage not empty', - 400, - 'Storage must be empty before you can delete it' - ) + throw ERRORS.BucketNotEmpty(id) } const deleted = await db.deleteBucket(id) if (!deleted) { - throw new StorageBackendError('not_found', 404, 'Bucket Not Found') + throw ERRORS.NoSuchBucket(id) } return deleted @@ -216,9 +212,7 @@ export class Storage { .filter(({ name }) => !deletedNames.has(name)) .map(({ name }) => name) - throw new StorageBackendError( - 'Cannot delete', - 400, + throw ERRORS.AccessDenied( `Cannot delete: ${remainingNames.join( ' ,' )}, you may have SELECT but not DELETE permissions` @@ -230,21 +224,13 @@ export class Storage { validateMimeType(mimeType: string[]) { for (const type of mimeType) { if (type.length > 1000) { - throw new StorageBackendError( - 'invalid_mime_type', - 422, - `the requested mime type "${type}" is invalid` - ) + throw ERRORS.InvalidMimeType(type) } if ( !type.match(/^([a-zA-Z0-9\-+.]+)\/([a-zA-Z0-9\-+.]+)(;\s*charset=[a-zA-Z0-9\-]+)?$|\*$/) ) { - throw new StorageBackendError( - 'invalid_mime_type', - 422, - `the requested mime type "${type} is invalid` - ) + throw ERRORS.InvalidMimeType(type) } } return true @@ -262,11 +248,7 @@ export class Storage { const globalMaxLimit = await getFileSizeLimit(this.db.tenantId) if (maxFileLimit > globalMaxLimit) { - throw new StorageBackendError( - 'max_file_size', - 422, - 'the requested max_file_size exceed the global limit' - ) + throw ERRORS.EntityTooLarge() } return maxFileLimit diff --git a/src/storage/uploader.ts b/src/storage/uploader.ts index 773b4da1..9979598e 100644 --- a/src/storage/uploader.ts +++ b/src/storage/uploader.ts @@ -2,7 +2,7 @@ import { FastifyRequest } from 'fastify' import { getFileSizeLimit } from './limits' import { ObjectMetadata, StorageBackendAdapter } from './backend' import { getConfig } from '../config' -import { StorageBackendError } from './errors' +import { ERRORS } from './errors' import { Database } from './database' import { ObjectAdminDelete, ObjectCreatedPostEvent, ObjectCreatedPutEvent } from '../queue' import { randomUUID } from 'crypto' @@ -99,11 +99,7 @@ export class Uploader { ) if (file.isTruncated()) { - throw new StorageBackendError( - 'Payload too large', - 413, - 'The object exceeded the maximum allowed size' - ) + throw ERRORS.EntityTooLarge() } return this.completeUpload({ @@ -210,11 +206,7 @@ export class Uploader { const requestedMime = mimeType.split('/') if (requestedMime.length < 2) { - throw new StorageBackendError( - 'invalid_mime_type', - 422, - `mime type ${mimeType} is not formatted properly` - ) + throw ERRORS.InvalidMimeType(mimeType) } const [type, ext] = requestedMime @@ -237,11 +229,7 @@ export class Uploader { } } - throw new StorageBackendError( - 'invalid_mime_type', - 422, - `mime type ${mimeType} is not supported` - ) + throw ERRORS.InvalidMimeType(mimeType) } protected async incomingFileInfo( @@ -249,7 +237,10 @@ export class Uploader { options?: Pick ) { const contentType = request.headers['content-type'] - const fileSizeLimit = await this.getFileSizeLimit(request.tenantId, options?.fileSizeLimit) + const fileSizeLimit = await getStandardMaxFileSizeLimit( + this.db.tenantId, + options?.fileSizeLimit + ) let body: NodeJS.ReadableStream let mimeType: string @@ -261,7 +252,7 @@ export class Uploader { const formData = await request.file({ limits: { fileSize: fileSizeLimit } }) if (!formData) { - throw new StorageBackendError(`no_file_provided`, 400, 'No file provided') + throw ERRORS.NoContentProvided() } // https://github.com/fastify/fastify-multipart/issues/162 @@ -274,7 +265,7 @@ export class Uploader { cacheControl = cacheTime ? `max-age=${cacheTime}` : 'no-cache' isTruncated = () => formData.file.truncated } catch (e) { - throw new StorageBackendError('empty_file', 400, 'Unexpected empty file received', e) + throw ERRORS.NoContentProvided(e as Error) } } else { // just assume it's a binary file @@ -294,18 +285,21 @@ export class Uploader { isTruncated, } } +} - protected async getFileSizeLimit(tenantId: string, bucketSizeLimit?: number | null) { - let globalFileSizeLimit = await getFileSizeLimit(tenantId) - - if (typeof bucketSizeLimit === 'number') { - globalFileSizeLimit = Math.min(bucketSizeLimit, globalFileSizeLimit) - } +export async function getStandardMaxFileSizeLimit( + tenantId: string, + bucketSizeLimit?: number | null +) { + let globalFileSizeLimit = await getFileSizeLimit(tenantId) - if (uploadFileSizeLimitStandard && uploadFileSizeLimitStandard > 0) { - globalFileSizeLimit = Math.min(uploadFileSizeLimitStandard, globalFileSizeLimit) - } + if (typeof bucketSizeLimit === 'number') { + globalFileSizeLimit = Math.min(bucketSizeLimit, globalFileSizeLimit) + } - return globalFileSizeLimit + if (uploadFileSizeLimitStandard && uploadFileSizeLimitStandard > 0) { + globalFileSizeLimit = Math.min(uploadFileSizeLimitStandard, globalFileSizeLimit) } + + return globalFileSizeLimit } diff --git a/src/test/bucket.test.ts b/src/test/bucket.test.ts index ee8898ce..bde3de86 100644 --- a/src/test/bucket.test.ts +++ b/src/test/bucket.test.ts @@ -102,7 +102,7 @@ describe('testing GET all buckets', () => { }) expect(response.statusCode).toBe(200) const responseJSON = JSON.parse(response.body) - expect(responseJSON.length).toBe(10) + expect(responseJSON.length).toBeGreaterThanOrEqual(10) expect(responseJSON[0]).toMatchObject({ id: expect.any(String), name: expect.any(String), diff --git a/src/test/common.ts b/src/test/common.ts index f2938ac0..87749d1a 100644 --- a/src/test/common.ts +++ b/src/test/common.ts @@ -57,6 +57,8 @@ export function useMockObject() { jest.spyOn(S3Backend.prototype, 'copyObject').mockResolvedValue({ httpStatusCode: 200, + lastModified: new Date('Thu, 12 Aug 2021 16:00:00 GMT'), + eTag: 'abc', }) jest.spyOn(S3Backend.prototype, 'deleteObject').mockResolvedValue() diff --git a/src/test/db/02-dummy-data.sql b/src/test/db/02-dummy-data.sql index af1544a0..e9b10b79 100644 --- a/src/test/db/02-dummy-data.sql +++ b/src/test/db/02-dummy-data.sql @@ -40,6 +40,8 @@ INSERT INTO "storage"."objects" ("id", "bucket_id", "name", "owner", "created_at ('D1CE4E4F-03E2-473D-858B-301D7989B581', 'bucket2', 'authenticated/move-orig.png', '317eadce-631a-4429-a0bb-f19a7a517b4a', '2021-02-22 22:29:15.14732+00', '2021-02-22 22:29:15.14732+00', '2021-03-02 09:32:17.116+00', '{"mimetype": "image/png", "size": 1234}'), ('222b3d1e-bc17-414c-b336-47894aa4d697', 'bucket2', 'authenticated/move-orig-2.png', '317eadce-631a-4429-a0bb-f19a7a517b4a', '2021-02-22 22:29:15.14732+00', '2021-02-22 22:29:15.14732+00', '2021-03-02 09:32:17.116+00', '{"mimetype": "image/png", "size": 1234}'), ('8f7d643d-1e82-4d39-ae39-d9bd6b0cfe9c', 'bucket2', 'authenticated/move-orig-3.png', '317eadce-631a-4429-a0bb-f19a7a517b4a', '2021-02-22 22:29:15.14732+00', '2021-02-22 22:29:15.14732+00', '2021-03-02 09:32:17.116+00', '{"mimetype": "image/png", "size": 1234}'), +('24f70210-62aa-4daa-9909-693b3febd8fd', 'bucket2', 'authenticated/move-orig-4.png', '317eadce-631a-4429-a0bb-f19a7a517b4a', '2021-02-22 22:29:15.14732+00', '2021-02-22 22:29:15.14732+00', '2021-03-02 09:32:17.116+00', '{"mimetype": "image/png", "size": 1234}'), +('18dc5e3b-4fb1-45a7-bfa4-d99b0784be31', 'bucket2', 'authenticated/move-orig-5.png', '317eadce-631a-4429-a0bb-f19a7a517b4a', '2021-02-22 22:29:15.14732+00', '2021-02-22 22:29:15.14732+00', '2021-03-02 09:32:17.116+00', '{"mimetype": "image/png", "size": 1234}'), ('8377527d-3518-4dc8-8290-c6926470e795', 'bucket2', 'folder/subfolder/public-all-permissions.png', 'd8c7bce9-cfeb-497b-bd61-e66ce2cbdaa2', '2021-02-17 10:26:42.791214+00', '2021-02-17 11:03:30.025116+00', '2021-02-17 10:26:42.791214+00', '{"size": 1234}'), ('b39ae4ab-802b-4c42-9271-3f908c34363c', 'bucket2', 'private/sadcat-upload3.png', '317eadce-631a-4429-a0bb-f19a7a517b4a', '2021-03-01 08:53:29.567975+00', '2021-03-01 08:53:29.567975+00', '2021-03-01 08:53:29.567975+00', '{"mimetype": "image/svg+xml", "size": 1234}'), ('8098E1AC-C744-4368-86DF-71B60CCDE221', 'bucket3', 'sadcat-upload3.png', '317eadce-631a-4429-a0bb-f19a7a517b4a', '2021-03-01 08:53:29.567975+00', '2021-03-01 08:53:29.567975+00', '2021-03-01 08:53:29.567975+00', '{"mimetype": "image/svg+xml", "size": 1234}'), @@ -64,6 +66,7 @@ CREATE POLICY crud_uid_folder ON storage.objects for all USING (bucket_id='bucke CREATE POLICY crud_uid_file ON storage.objects for all USING (bucket_id='bucket2' and name = 'folder/only_uid.jpg' and auth.uid() = 'd8c7bce9-cfeb-497b-bd61-e66ce2cbdaa2'); -- allow CRUD acccess to a folder in bucket2 to all authenticated users CREATE POLICY authenticated_folder ON storage.objects for all USING (bucket_id='bucket2' and (storage.foldername(name))[1] = 'authenticated' and auth.role() = 'authenticated'); +CREATE POLICY authenticated_folder_bucket_3 ON storage.objects for all USING (bucket_id='bucket3' and (storage.foldername(name))[1] = 'authenticated' and auth.role() = 'authenticated'); -- allow CRUD access to a folder in bucket2 to its owners CREATE POLICY crud_owner_only ON storage.objects for all USING (bucket_id='bucket2' and (storage.foldername(name))[1] = 'only_owner' and owner = auth.uid()); -- allow CRUD access to bucket4 diff --git a/src/test/object.test.ts b/src/test/object.test.ts index fcbe509c..94696f55 100644 --- a/src/test/object.test.ts +++ b/src/test/object.test.ts @@ -7,7 +7,7 @@ import { getConfig, mergeConfig } from '../config' import { S3Backend } from '../storage/backend' import { Obj } from '../storage/schemas' import { signJWT } from '../auth' -import { StorageBackendError } from '../storage' +import { ErrorCode, StorageBackendError } from '../storage' import { useMockObject, useMockQueue } from './common' import { getPostgresConnection } from '../database' import { getServiceKeyUser } from '../database/tenant' @@ -376,7 +376,7 @@ describe('testing POST object via multipart upload', () => { expect(await response.json()).toEqual({ error: 'invalid_mime_type', message: `mime type image/png is not supported`, - statusCode: '422', + statusCode: '415', }) expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() }) @@ -399,8 +399,8 @@ describe('testing POST object via multipart upload', () => { expect(response.statusCode).toBe(400) expect(await response.json()).toEqual({ error: 'invalid_mime_type', - message: `mime type thisisnotarealmimetype is not formatted properly`, - statusCode: '422', + message: `mime type thisisnotarealmimetype is not supported`, + statusCode: '415', }) expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() }) @@ -498,6 +498,7 @@ describe('testing POST object via multipart upload', () => { }) expect(createObjectResponse.statusCode).toBe(500) expect(JSON.parse(createObjectResponse.body)).toStrictEqual({ + code: ErrorCode.S3Error, statusCode: '500', error: 'Unknown error', message: 'S3ServiceException', @@ -742,6 +743,7 @@ describe('testing POST object via binary upload', () => { expect(createObjectResponse.statusCode).toBe(500) expect(JSON.parse(createObjectResponse.body)).toStrictEqual({ statusCode: '500', + code: ErrorCode.S3Error, error: 'Unknown error', message: 'S3ServiceException', }) @@ -990,6 +992,42 @@ describe('testing copy object', () => { expect(response.body).toBe(`{"Key":"bucket2/authenticated/casestudy11.png"}`) }) + test('can copy objects across buckets', async () => { + const response = await app().inject({ + method: 'POST', + url: '/object/copy', + headers: { + authorization: `Bearer ${process.env.AUTHENTICATED_KEY}`, + }, + payload: { + bucketId: 'bucket2', + sourceKey: 'authenticated/casestudy.png', + destinationBucket: 'bucket3', + destinationKey: 'authenticated/casestudy11.png', + }, + }) + expect(response.statusCode).toBe(200) + expect(S3Backend.prototype.copyObject).toBeCalled() + expect(response.body).toBe(`{"Key":"bucket3/authenticated/casestudy11.png"}`) + }) + + test('cannot copy objects across buckets when RLS dont allow it', async () => { + const response = await app().inject({ + method: 'POST', + url: '/object/copy', + headers: { + authorization: `Bearer ${process.env.AUTHENTICATED_KEY}`, + }, + payload: { + bucketId: 'bucket2', + sourceKey: 'authenticated/casestudy.png', + destinationBucket: 'bucket3', + destinationKey: 'somekey/casestudy11.png', + }, + }) + expect(response.statusCode).toBe(400) + }) + test('check if RLS policies are respected: anon user is not able to update authenticated resource', async () => { const response = await app().inject({ method: 'POST', @@ -1069,7 +1107,7 @@ describe('testing delete object', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.deleteObjects).toBeCalled() + expect(S3Backend.prototype.deleteObject).toBeCalled() }) test('check if RLS policies are respected: anon user is not able to delete authenticated resource', async () => { @@ -1684,6 +1722,44 @@ describe('testing move object', () => { expect(S3Backend.prototype.deleteObjects).toHaveBeenCalled() }) + test('can move objects across buckets respecting RLS', async () => { + const response = await app().inject({ + method: 'POST', + url: `/object/move`, + payload: { + bucketId: 'bucket2', + sourceKey: 'authenticated/move-orig-4.png', + destinationBucket: 'bucket3', + destinationKey: 'authenticated/move-new.png', + }, + headers: { + authorization: `Bearer ${process.env.AUTHENTICATED_KEY}`, + }, + }) + expect(response.statusCode).toBe(200) + expect(S3Backend.prototype.copyObject).toHaveBeenCalled() + expect(S3Backend.prototype.deleteObjects).toHaveBeenCalled() + }) + + test('cannot move objects across buckets because RLS checks', async () => { + const response = await app().inject({ + method: 'POST', + url: `/object/move`, + payload: { + bucketId: 'bucket2', + sourceKey: 'authenticated/move-orig-5.png', + destinationBucket: 'bucket3', + destinationKey: 'somekey/move-new.png', + }, + headers: { + authorization: `Bearer ${process.env.AUTHENTICATED_KEY}`, + }, + }) + expect(response.statusCode).toBe(400) + expect(S3Backend.prototype.copyObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.deleteObjects).not.toHaveBeenCalled() + }) + test('check if RLS policies are respected: anon user is not able to move an authenticated object', async () => { const response = await app().inject({ method: 'POST', diff --git a/src/test/rls_tests.yaml b/src/test/rls_tests.yaml index d3fa72dc..150c43a7 100644 --- a/src/test/rls_tests.yaml +++ b/src/test/rls_tests.yaml @@ -66,7 +66,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - operation: bucket.update status: 400 @@ -125,7 +125,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - operation: bucket.update status: 400 @@ -161,7 +161,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - operation: bucket.update status: 400 @@ -191,7 +191,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - operation: bucket.update status: 400 @@ -244,7 +244,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - operation: upload status: 400 @@ -265,7 +265,7 @@ tests: - operation: object.delete status: 400 - error: 'Object Not Found' + error: 'Object not found' - operation: bucket.update status: 200 @@ -285,7 +285,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - operation: upload bucketName: 'bucket_{{runId}}' @@ -302,7 +302,7 @@ tests: - operation: object.delete status: 400 - error: 'Object Not Found' + error: 'Object not found' - operation: bucket.update status: 400 @@ -343,7 +343,7 @@ tests: - operation: object.delete status: 400 - error: 'Object Not Found' + error: 'Object not found' - operation: object.delete role: service @@ -393,7 +393,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - description: 'Will only able to move objects when authenticated' policies: @@ -433,7 +433,7 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' - description: 'Will only able to copy owned objects when authenticated' policies: @@ -474,4 +474,4 @@ tests: - operation: bucket.delete status: 400 - error: 'Bucket Not Found' + error: 'Bucket not found' diff --git a/src/test/s3-protocol.test.ts b/src/test/s3-protocol.test.ts new file mode 100644 index 00000000..5b9fa313 --- /dev/null +++ b/src/test/s3-protocol.test.ts @@ -0,0 +1,1101 @@ +import { + AbortMultipartUploadCommand, + CompleteMultipartUploadCommand, + CopyObjectCommand, + CreateBucketCommand, + CreateMultipartUploadCommand, + DeleteBucketCommand, + DeleteObjectCommand, + DeleteObjectsCommand, + GetBucketLocationCommand, + GetBucketVersioningCommand, + GetObjectCommand, + HeadBucketCommand, + ListBucketsCommand, + ListMultipartUploadsCommand, + ListObjectsCommand, + ListObjectsV2Command, + ListPartsCommand, + PutObjectCommand, + S3Client, + S3ServiceException, + UploadPartCommand, + UploadPartCopyCommand, +} from '@aws-sdk/client-s3' +import { getConfig, mergeConfig } from '../config' +import app from '../app' +import { FastifyInstance } from 'fastify' +import { Upload } from '@aws-sdk/lib-storage' +import { ReadableStreamBuffer } from 'stream-buffers' +import { randomUUID } from 'crypto' + +const { + s3ProtocolAccessKeySecret, + s3ProtocolAccessKeyId, + storageS3Region, + tenantId, + anonKey, + serviceKey, +} = getConfig() + +async function createBucket(client: S3Client, name?: string, publicRead = true) { + let bucketName: string + if (!name) { + bucketName = `TestBucket-${randomUUID()}` + } else { + bucketName = `${name}-${randomUUID()}` + } + + const createBucketRequest = new CreateBucketCommand({ + Bucket: bucketName, + ACL: publicRead ? 'public-read' : undefined, + }) + + await client.send(createBucketRequest) + + return bucketName +} + +async function uploadFile(client: S3Client, bucketName: string, key: string, mb: number) { + const uploader = new Upload({ + client: client, + params: { + Bucket: bucketName, + Key: key, + ContentType: 'image/jpg', + Body: Buffer.alloc(1024 * 1024 * mb), + }, + }) + + return await uploader.done() +} + +jest.setTimeout(10 * 1000) + +describe('S3 Protocol', () => { + describe('Bucket', () => { + let testApp: FastifyInstance + let client: S3Client + let clientMinio: S3Client + + beforeAll(async () => { + testApp = app() + const listener = await testApp.listen() + client = new S3Client({ + endpoint: `${listener.replace('[::1]', 'localhost')}/s3`, + forcePathStyle: true, + region: storageS3Region, + credentials: { + accessKeyId: s3ProtocolAccessKeyId!, + secretAccessKey: s3ProtocolAccessKeySecret!, + }, + }) + + clientMinio = new S3Client({ + forcePathStyle: true, + region: storageS3Region, + logger: console, + endpoint: 'http://localhost:9000', + credentials: { + accessKeyId: 'supa-storage', + secretAccessKey: 'secret1234', + }, + }) + }) + + afterAll(async () => { + await Promise.race([testApp.close(), new Promise((resolve) => setTimeout(resolve, 1000))]) + }) + + describe('CreateBucketCommand', () => { + it('creates a bucket', async () => { + const createBucketRequest = new CreateBucketCommand({ + Bucket: `SomeBucket-${randomUUID()}`, + ACL: 'public-read', + }) + + const { Location } = await client.send(createBucketRequest) + + expect(Location).toBeTruthy() + }) + + it('can get bucket versioning', async () => { + const bucket = await createBucket(client) + const bucketVersioningCommand = new GetBucketVersioningCommand({ + Bucket: bucket, + }) + + const resp = await client.send(bucketVersioningCommand) + expect(resp.Status).toEqual('Suspended') + expect(resp.MFADelete).toEqual('Disabled') + }) + + it('can get bucket location', async () => { + const bucket = await createBucket(client) + const bucketVersioningCommand = new GetBucketLocationCommand({ + Bucket: bucket, + }) + + const resp = await client.send(bucketVersioningCommand) + expect(resp.LocationConstraint).toEqual(storageS3Region) + }) + }) + + describe('DeleteBucketCommand', () => { + it('can delete an empty bucket', async () => { + const bucketName = await createBucket(client) + const deleteBucketRequest = new DeleteBucketCommand({ + Bucket: bucketName, + }) + + const resp = await client.send(deleteBucketRequest) + expect(resp.$metadata.httpStatusCode).toBe(204) + }) + + it('cannot delete a non empty bucket', async () => { + const bucketName = await createBucket(client) + await uploadFile(client, bucketName, 'test-1.jpg', 1) + const deleteBucketRequest = new DeleteBucketCommand({ + Bucket: bucketName, + }) + + try { + await client.send(deleteBucketRequest) + throw new Error('Should not reach here') + } catch (e) { + expect((e as Error).message).not.toBe('Should not reach here') + expect((e as S3ServiceException).$metadata.httpStatusCode).toBe(409) + expect((e as S3ServiceException).message).toBe( + 'The bucket you tried to delete is not empty' + ) + } + }) + }) + + describe('HeadBucketCommand', () => { + it('return bucket information when exists', async () => { + const bucketName = await createBucket(client) + const headBucketRequest = new HeadBucketCommand({ + Bucket: bucketName, + }) + + const resp = await client.send(headBucketRequest) + expect(resp.$metadata.httpStatusCode).toBe(200) + expect(resp.BucketRegion).toBe(storageS3Region) + }) + it('will return bucket not found error', async () => { + const headBucketRequest = new HeadBucketCommand({ + Bucket: 'dont-exist-bucket', + }) + + try { + await client.send(headBucketRequest) + throw new Error('Should not reach here') + } catch (e) { + expect((e as S3ServiceException).$metadata.httpStatusCode).toBe(404) + } + }) + }) + + describe('ListBucketsCommand', () => { + it('can list buckets', async () => { + await createBucket(client) + const listBuckets = new ListBucketsCommand({}) + + const resp = await client.send(listBuckets) + expect(resp.Buckets?.length || 0).toBeGreaterThan(0) + }) + }) + + describe('ListObjectCommand', () => { + it('list empty bucket', async () => { + const bucket = await createBucket(client) + const listBuckets = new ListObjectsCommand({ + Bucket: bucket, + }) + + const resp = await client.send(listBuckets) + expect(resp.Contents?.length).toBe(undefined) + }) + + it('list all keys', async () => { + const bucket = await createBucket(client) + const listBuckets = new ListObjectsCommand({ + Bucket: bucket, + }) + + await Promise.all([ + uploadFile(client, bucket, 'test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-1/test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-3/test-1.jpg', 1), + ]) + + const resp = await client.send(listBuckets) + expect(resp.Contents?.length).toBe(3) + }) + }) + + describe('ListObjectsV2Command', () => { + it('list empty bucket', async () => { + const bucket = await createBucket(client) + const listBuckets = new ListObjectsV2Command({ + Bucket: bucket, + }) + + const resp = await client.send(listBuckets) + expect(resp.Contents?.length).toBe(undefined) + }) + + it('list all keys', async () => { + const bucket = await createBucket(client) + const listBuckets = new ListObjectsV2Command({ + Bucket: bucket, + }) + + await Promise.all([ + uploadFile(client, bucket, 'test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-1/test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-3/test-1.jpg', 1), + ]) + + const resp = await client.send(listBuckets) + expect(resp.Contents?.length).toBe(3) + }) + + it('list keys and common prefixes', async () => { + const bucket = await createBucket(client) + const listBuckets = new ListObjectsV2Command({ + Bucket: bucket, + Delimiter: '/', + }) + + await Promise.all([ + uploadFile(client, bucket, 'test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-1/test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-3/test-1.jpg', 1), + ]) + + const resp = await client.send(listBuckets) + expect(resp.Contents?.length).toBe(1) + expect(resp.CommonPrefixes?.length).toBe(2) + }) + + it('paginate keys and common prefixes', async () => { + const bucket = await createBucket(client) + const listBucketsPage1 = new ListObjectsV2Command({ + Bucket: bucket, + Delimiter: '/', + MaxKeys: 1, + }) + + await Promise.all([ + uploadFile(client, bucket, 'test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-1/test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-3/test-1.jpg', 1), + ]) + + const objectsPage1 = await client.send(listBucketsPage1) + expect(objectsPage1.Contents?.length).toBe(undefined) + expect(objectsPage1.CommonPrefixes?.length).toBe(1) + expect(objectsPage1.CommonPrefixes?.[0].Prefix).toBe('prefix-1/') + + const listBucketsPage2 = new ListObjectsV2Command({ + Bucket: bucket, + Delimiter: '/', + MaxKeys: 1, + ContinuationToken: objectsPage1.NextContinuationToken, + }) + + const objectsPage2 = await client.send(listBucketsPage2) + + expect(objectsPage2.Contents?.length).toBe(undefined) + expect(objectsPage2.CommonPrefixes?.length).toBe(1) + expect(objectsPage2.CommonPrefixes?.[0].Prefix).toBe('prefix-3/') + + const listBucketsPage3 = new ListObjectsV2Command({ + Bucket: bucket, + Delimiter: '/', + MaxKeys: 1, + ContinuationToken: objectsPage2.NextContinuationToken, + }) + + const objectsPage3 = await client.send(listBucketsPage3) + + expect(objectsPage3.Contents?.length).toBe(1) + expect(objectsPage3.CommonPrefixes?.length).toBe(undefined) + expect(objectsPage3.Contents?.[0].Key).toBe('test-1.jpg') + }) + + it('paginate keys and common prefixes using StartAfter', async () => { + const bucket = await createBucket(client) + const listBucketsPage1 = new ListObjectsV2Command({ + Bucket: bucket, + Delimiter: '/', + MaxKeys: 1, + StartAfter: 'prefix-1/test-1.jpg', + }) + + await Promise.all([ + uploadFile(client, bucket, 'test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-1/test-1.jpg', 1), + uploadFile(client, bucket, 'prefix-3/test-1.jpg', 1), + ]) + + const objectsPage1 = await client.send(listBucketsPage1) + expect(objectsPage1.Contents?.length).toBe(undefined) + expect(objectsPage1.CommonPrefixes?.length).toBe(1) + expect(objectsPage1.CommonPrefixes?.[0].Prefix).toBe('prefix-3/') + expect(objectsPage1.IsTruncated).toBe(true) + + const listBucketsPage2 = new ListObjectsV2Command({ + Bucket: bucket, + Delimiter: '/', + MaxKeys: 1, + ContinuationToken: objectsPage1.NextContinuationToken, + }) + + const objectsPage2 = await client.send(listBucketsPage2) + + expect(objectsPage2.Contents?.length).toBe(1) + expect(objectsPage2.CommonPrefixes?.length).toBe(undefined) + + const listBucketsPage3 = new ListObjectsV2Command({ + Bucket: bucket, + Delimiter: '/', + MaxKeys: 1, + ContinuationToken: objectsPage2.NextContinuationToken, + StartAfter: 'prefix-3/test-1.jpg', + }) + + const objectsPage3 = await client.send(listBucketsPage3) + + expect(objectsPage3.Contents?.length).toBe(1) + expect(objectsPage3.CommonPrefixes?.length).toBe(undefined) + expect(objectsPage3.Contents?.[0].Key).toBe('test-1.jpg') + expect(objectsPage3.IsTruncated).toBe(false) + }) + }) + + describe('MultiPartUpload', () => { + it('creates a multi part upload', async () => { + const bucketName = await createBucket(client) + const createMultiPartUpload = new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + const resp = await client.send(createMultiPartUpload) + expect(resp.UploadId).toBeTruthy() + }) + + it('upload a part', async () => { + const bucketName = await createBucket(client) + const createMultiPartUpload = new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + const resp = await client.send(createMultiPartUpload) + expect(resp.UploadId).toBeTruthy() + + const data = Buffer.alloc(1024 * 1024 * 5) + + const uploadPart = new UploadPartCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + ContentLength: data.length, + UploadId: resp.UploadId, + Body: data, + PartNumber: 1, + }) + + const partResp = await client.send(uploadPart) + expect(partResp.ETag).toBeTruthy() + }) + + it('completes a multipart upload', async () => { + const bucketName = await createBucket(client) + const createMultiPartUpload = new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + const resp = await client.send(createMultiPartUpload) + expect(resp.UploadId).toBeTruthy() + + const data = Buffer.alloc(1024 * 1024 * 5) + const uploadPart = new UploadPartCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + ContentLength: data.length, + UploadId: resp.UploadId, + Body: data, + PartNumber: 1, + }) + + const part1 = await client.send(uploadPart) + + const completeMultiPartUpload = new CompleteMultipartUploadCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + UploadId: resp.UploadId, + MultipartUpload: { + Parts: [ + { + PartNumber: 1, + ETag: part1.ETag, + }, + ], + }, + }) + + const completeResp = await client.send(completeMultiPartUpload) + expect(completeResp.$metadata.httpStatusCode).toBe(200) + expect(completeResp.Key).toEqual('test-1.jpg') + }) + + it('aborts a multipart upload', async () => { + const bucketName = await createBucket(client) + const createMultiPartUpload = new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + const resp = await client.send(createMultiPartUpload) + expect(resp.UploadId).toBeTruthy() + + const data = Buffer.alloc(1024 * 1024 * 5) + const uploadPart = new UploadPartCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + ContentLength: data.length, + UploadId: resp.UploadId, + Body: data, + PartNumber: 1, + }) + + await client.send(uploadPart) + + const completeMultiPartUpload = new AbortMultipartUploadCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + UploadId: resp.UploadId, + }) + + const completeResp = await client.send(completeMultiPartUpload) + expect(completeResp.$metadata.httpStatusCode).toBe(200) + }) + + it('upload a file using putObject', async () => { + const bucketName = await createBucket(client) + + const putObject = new PutObjectCommand({ + Bucket: bucketName, + Key: 'test-1-put-object.jpg', + Body: Buffer.alloc(1024 * 1024 * 12), + }) + + const resp = await client.send(putObject) + expect(resp.$metadata.httpStatusCode).toEqual(200) + }) + + it('it will not allow to upload a file using putObject when exceeding maxFileSize', async () => { + const bucketName = await createBucket(client) + + mergeConfig({ + uploadFileSizeLimit: 1024 * 1024 * 10, + }) + + const putObject = new PutObjectCommand({ + Bucket: bucketName, + Key: 'test-1-put-object.jpg', + Body: Buffer.alloc(1024 * 1024 * 12), + }) + + try { + await client.send(putObject) + throw new Error('Should not reach here') + } catch (e) { + expect((e as Error).message).not.toEqual('Should not reach here') + expect((e as S3ServiceException).$metadata.httpStatusCode).toEqual(413) + expect((e as S3ServiceException).message).toEqual( + 'The object exceeded the maximum allowed size' + ) + expect((e as S3ServiceException).name).toEqual('EntityTooLarge') + } + }) + + it('will not allow uploading a file that exceeded the maxFileSize', async () => { + const bucketName = await createBucket(client) + + mergeConfig({ + uploadFileSizeLimit: 1024 * 1024 * 10, + }) + + const uploader = new Upload({ + client: client, + leavePartsOnError: true, + + params: { + Bucket: bucketName, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + Body: Buffer.alloc(1024 * 1024 * 12), + }, + }) + + try { + await uploader.done() + throw new Error('Should not reach here') + } catch (e) { + expect((e as Error).message).not.toEqual('Should not reach here') + expect((e as S3ServiceException).$metadata.httpStatusCode).toEqual(413) + expect((e as S3ServiceException).message).toEqual( + 'The object exceeded the maximum allowed size' + ) + expect((e as S3ServiceException).name).toEqual('EntityTooLarge') + } + }) + + it('will not allow uploading a part that exceeded the maxFileSize', async () => { + const bucketName = await createBucket(client, 'try-test-1') + + mergeConfig({ + uploadFileSizeLimit: 1024 * 1024 * 10, + }) + + const createMultiPartUpload = new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + const resp = await client.send(createMultiPartUpload) + expect(resp.UploadId).toBeTruthy() + + const readable = new ReadableStreamBuffer({ + frequency: 500, + chunkSize: 1024 * 1024 * 3, + }) + + readable.put(Buffer.alloc(1024 * 1024 * 12)) + readable.stop() + + const uploadPart = new UploadPartCommand({ + Bucket: bucketName, + Key: 'test-1.jpg', + UploadId: resp.UploadId, + Body: readable, + PartNumber: 1, + ContentLength: 1024 * 1024 * 12, + }) + + try { + await client.send(uploadPart) + throw new Error('Should not reach here') + } catch (e) { + expect((e as Error).message).not.toEqual('Should not reach here') + expect((e as S3ServiceException).$metadata.httpStatusCode).toEqual(413) + expect((e as S3ServiceException).message).toEqual( + 'The object exceeded the maximum allowed size' + ) + expect((e as S3ServiceException).name).toEqual('EntityTooLarge') + } + }) + + it('upload a file using multipart upload', async () => { + const bucketName = await createBucket(client) + + const uploader = new Upload({ + client: client, + params: { + Bucket: bucketName, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + Body: Buffer.alloc(1024 * 1024 * 12), + }, + }) + + const resp = await uploader.done() + + expect(resp.$metadata).toBeTruthy() + }) + }) + + describe('GetObject', () => { + it('can get an existing object', async () => { + const bucketName = await createBucket(client) + const key = 'test-1.jpg' + await uploadFile(client, bucketName, key, 1) + + const getObject = new GetObjectCommand({ + Bucket: bucketName, + Key: key, + }) + + const resp = await client.send(getObject) + const data = await resp.Body?.transformToByteArray() + expect(data).toBeTruthy() + expect(resp.ETag).toBeTruthy() + }) + + it('will return an error when object does not exist', async () => { + const bucketName = await createBucket(client) + const key = 'test-1.jpg' + + const getObject = new GetObjectCommand({ + Bucket: bucketName, + Key: key, + }) + + try { + await client.send(getObject) + } catch (e) { + expect((e as S3ServiceException).$metadata.httpStatusCode).toEqual(404) + expect((e as S3ServiceException).message).toEqual('Object not found') + expect((e as S3ServiceException).name).toEqual('NoSuchKey') + } + }) + + it('can get an object using range requests', async () => { + const bucketName = await createBucket(client) + const key = 'test-1.jpg' + await uploadFile(client, bucketName, key, 1) + + const getObject = new GetObjectCommand({ + Bucket: bucketName, + Key: key, + Range: 'bytes=0-100', + }) + + const resp = await client.send(getObject) + const data = await resp.Body?.transformToByteArray() + expect(resp.$metadata.httpStatusCode).toEqual(206) + expect(data).toBeTruthy() + expect(resp.ETag).toBeTruthy() + }) + }) + + describe('DeleteObjectCommand', () => { + it('can delete an existing object', async () => { + const bucketName = await createBucket(client) + const key = 'test-1.jpg' + await uploadFile(client, bucketName, key, 1) + + const deleteObject = new DeleteObjectCommand({ + Bucket: bucketName, + Key: key, + }) + + await client.send(deleteObject) + + const getObject = new GetObjectCommand({ + Bucket: bucketName, + Key: key, + }) + + try { + await client.send(getObject) + } catch (e) { + expect((e as S3ServiceException).$metadata.httpStatusCode).toEqual(404) + } + }) + }) + + describe('DeleteObjectsCommand', () => { + it('can delete multiple objects', async () => { + const bucketName = await createBucket(client) + await Promise.all([ + uploadFile(client, bucketName, 'test-1.jpg', 1), + uploadFile(client, bucketName, 'test-2.jpg', 1), + uploadFile(client, bucketName, 'test-3.jpg', 1), + ]) + + const deleteObjectsCommand = new DeleteObjectsCommand({ + Bucket: bucketName, + Delete: { + Objects: [ + { + Key: 'test-1.jpg', + }, + { + Key: 'test-2.jpg', + }, + { + Key: 'test-3.jpg', + }, + ], + }, + }) + + await client.send(deleteObjectsCommand) + + const listObjectsCommand = new ListObjectsV2Command({ + Bucket: bucketName, + }) + + const resp = await client.send(listObjectsCommand) + expect(resp.Contents).toBe(undefined) + }) + }) + + describe('CopyObjectCommand', () => { + it('will copy an object in the same bucket', async () => { + const bucketName = await createBucket(client) + await uploadFile(client, bucketName, 'test-copy-1.jpg', 1) + + const copyObjectCommand = new CopyObjectCommand({ + Bucket: bucketName, + Key: 'test-copied-2.jpg', + CopySource: `${bucketName}/test-copy-1.jpg`, + }) + + const resp = await client.send(copyObjectCommand) + expect(resp.CopyObjectResult?.ETag).toBeTruthy() + }) + + it('will copy an object in a different bucket', async () => { + const bucketName1 = await createBucket(client) + const bucketName2 = await createBucket(client) + await uploadFile(client, bucketName1, 'test-copy-1.jpg', 1) + + const copyObjectCommand = new CopyObjectCommand({ + Bucket: bucketName2, + Key: 'test-copied-2.jpg', + CopySource: `${bucketName1}/test-copy-1.jpg`, + }) + + const resp = await client.send(copyObjectCommand) + expect(resp.CopyObjectResult?.ETag).toBeTruthy() + }) + + it('will not be able to copy an object that doesnt exists', async () => { + const bucketName1 = await createBucket(client) + await uploadFile(client, bucketName1, 'test-copy-1.jpg', 1) + + const copyObjectCommand = new CopyObjectCommand({ + Bucket: bucketName1, + Key: 'test-copied-2.jpg', + CopySource: `${bucketName1}/test-dont-exists.jpg`, + }) + + try { + await client.send(copyObjectCommand) + throw new Error('Should not reach here') + } catch (e) { + expect((e as Error).message).not.toEqual('Should not reach here') + expect((e as S3ServiceException).$metadata.httpStatusCode).toEqual(404) + expect((e as S3ServiceException).message).toEqual('Object not found') + } + }) + }) + + describe('ListMultipartUploads', () => { + it('will list multipart uploads', async () => { + const bucketName = await createBucket(client) + const createMultiPartUpload = (key: string) => + new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: key, + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + + await Promise.all([ + client.send(createMultiPartUpload('test-1.jpg')), + client.send(createMultiPartUpload('test-2.jpg')), + client.send(createMultiPartUpload('test-3.jpg')), + client.send(createMultiPartUpload('nested/test-4.jpg')), + ]) + + const listMultipartUploads = new ListMultipartUploadsCommand({ + Bucket: bucketName, + }) + + const resp = await client.send(listMultipartUploads) + expect(resp.Uploads?.length).toBe(4) + expect(resp.Uploads?.[0].Key).toBe('nested/test-4.jpg') + expect(resp.Uploads?.[1].Key).toBe('test-1.jpg') + expect(resp.Uploads?.[2].Key).toBe('test-2.jpg') + expect(resp.Uploads?.[3].Key).toBe('test-3.jpg') + }) + + it('will list multipart uploads with delimiter', async () => { + const bucketName = await createBucket(client) + const createMultiPartUpload = (key: string) => + new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: key, + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + + await Promise.all([ + client.send(createMultiPartUpload('test-1.jpg')), + client.send(createMultiPartUpload('test-2.jpg')), + client.send(createMultiPartUpload('test-3.jpg')), + client.send(createMultiPartUpload('nested/test-4.jpg')), + ]) + + const listMultipartUploads = new ListMultipartUploadsCommand({ + Bucket: bucketName, + Delimiter: '/', + }) + + const resp = await client.send(listMultipartUploads) + expect(resp.Uploads?.length).toBe(3) + expect(resp.CommonPrefixes?.length).toBe(1) + expect(resp.Uploads?.[0].Key).toBe('test-1.jpg') + expect(resp.Uploads?.[1].Key).toBe('test-2.jpg') + expect(resp.Uploads?.[2].Key).toBe('test-3.jpg') + expect(resp.CommonPrefixes?.[0].Prefix).toBe('nested/') + }) + }) + + it('will list multipart uploads with delimiter and pagination', async () => { + const bucketName = await createBucket(client) + const createMultiPartUpload = (key: string) => + new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: key, + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + + await Promise.all([ + client.send(createMultiPartUpload('test-1.jpg')), + client.send(createMultiPartUpload('test-2.jpg')), + client.send(createMultiPartUpload('test-3.jpg')), + client.send(createMultiPartUpload('nested/test-4.jpg')), + ]) + + const listMultipartUploads1 = new ListMultipartUploadsCommand({ + Bucket: bucketName, + Delimiter: '/', + MaxUploads: 1, + }) + + const page1 = await client.send(listMultipartUploads1) + expect(page1.Uploads?.length).toBe(undefined) + expect(page1.CommonPrefixes?.length).toBe(1) + expect(page1.CommonPrefixes?.[0].Prefix).toBe('nested/') + + const listMultipartUploads2 = new ListMultipartUploadsCommand({ + Bucket: bucketName, + Delimiter: '/', + MaxUploads: 1, + KeyMarker: page1.NextKeyMarker, + }) + + const page2 = await client.send(listMultipartUploads2) + expect(page2.CommonPrefixes?.length).toBe(undefined) + expect(page2.Uploads?.length).toBe(1) + expect(page2.Uploads?.[0].Key).toBe('test-1.jpg') + + const listMultipartUploads3 = new ListMultipartUploadsCommand({ + Bucket: bucketName, + Delimiter: '/', + MaxUploads: 1, + KeyMarker: page2.NextKeyMarker, + }) + + const page3 = await client.send(listMultipartUploads3) + expect(page3.CommonPrefixes?.length).toBe(undefined) + expect(page3.Uploads?.length).toBe(1) + expect(page3.Uploads?.[0].Key).toBe('test-2.jpg') + + const listMultipartUploads4 = new ListMultipartUploadsCommand({ + Bucket: bucketName, + Delimiter: '/', + MaxUploads: 1, + KeyMarker: page3.NextKeyMarker, + }) + + const page4 = await client.send(listMultipartUploads4) + expect(page4.CommonPrefixes?.length).toBe(undefined) + expect(page4.Uploads?.length).toBe(1) + expect(page4.Uploads?.[0].Key).toBe('test-3.jpg') + }) + + describe('ListParts', () => { + it('cannot list parts for an upload that doesnt exists', async () => { + const listParts = new ListPartsCommand({ + Bucket: 'no-bucket', + Key: 'test-1.jpg', + UploadId: 'test-upload-id', + }) + + try { + await client.send(listParts) + throw new Error('Should not reach here') + } catch (e) { + expect((e as Error).message).not.toBe('Should not reach here') + expect((e as S3ServiceException).$metadata.httpStatusCode).toBe(404) + expect((e as S3ServiceException).message).toBe('Upload not found') + } + }) + + it('will list parts of a multipart upload', async () => { + const bucket = await createBucket(client) + const createMultiPartUpload = new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + const resp = await client.send(createMultiPartUpload) + expect(resp.UploadId).toBeTruthy() + + const data = Buffer.alloc(1024 * 1024 * 5) + const uploadPart = (partNumber: number) => + new UploadPartCommand({ + Bucket: bucket, + Key: 'test-1.jpg', + ContentLength: data.length, + UploadId: resp.UploadId, + Body: data, + PartNumber: partNumber, + }) + + await Promise.all([ + client.send(uploadPart(1)), + client.send(uploadPart(2)), + client.send(uploadPart(3)), + ]) + + const listParts = new ListPartsCommand({ + Bucket: bucket, + Key: 'test-1.jpg', + UploadId: resp.UploadId, + }) + + const parts = await client.send(listParts) + expect(parts.Parts?.length).toBe(3) + }) + + it('will list parts of a multipart upload with pagination', async () => { + const bucket = await createBucket(client) + const createMultiPartUpload = new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: 'test-1.jpg', + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + const resp = await client.send(createMultiPartUpload) + expect(resp.UploadId).toBeTruthy() + + const data = Buffer.alloc(1024 * 1024 * 5) + const uploadPart = (partNumber: number) => + new UploadPartCommand({ + Bucket: bucket, + Key: 'test-1.jpg', + ContentLength: data.length, + UploadId: resp.UploadId, + Body: data, + PartNumber: partNumber, + }) + + await Promise.all([ + client.send(uploadPart(1)), + client.send(uploadPart(2)), + client.send(uploadPart(3)), + ]) + + const listParts1 = new ListPartsCommand({ + Bucket: bucket, + Key: 'test-1.jpg', + UploadId: resp.UploadId, + MaxParts: 1, + }) + + const parts1 = await client.send(listParts1) + expect(parts1.Parts?.length).toBe(1) + expect(parts1.Parts?.[0].PartNumber).toBe(1) + + const listParts2 = new ListPartsCommand({ + Bucket: bucket, + Key: 'test-1.jpg', + UploadId: resp.UploadId, + MaxParts: 1, + PartNumberMarker: parts1.NextPartNumberMarker, + }) + + const parts2 = await client.send(listParts2) + expect(parts2.Parts?.length).toBe(1) + expect(parts2.Parts?.[0].PartNumber).toBe(2) + + const listParts3 = new ListPartsCommand({ + Bucket: bucket, + Key: 'test-1.jpg', + UploadId: resp.UploadId, + MaxParts: 1, + PartNumberMarker: parts2.NextPartNumberMarker, + }) + + const parts3 = await client.send(listParts3) + expect(parts3.Parts?.length).toBe(1) + expect(parts3.Parts?.[0].PartNumber).toBe(3) + }) + }) + + describe('UploadPartCopyCommand', () => { + it('will copy a part from an existing object and upload it as a part', async () => { + const bucket = await createBucket(client) + + const sourceKey = `${randomUUID()}.jpg` + const newKey = `new-${randomUUID()}.jpg` + + await uploadFile(client, bucket, sourceKey, 12) + + const createMultiPartUpload = new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: newKey, + ContentType: 'image/jpg', + CacheControl: 'max-age=2000', + }) + const resp = await client.send(createMultiPartUpload) + expect(resp.UploadId).toBeTruthy() + + const copyPart = new UploadPartCopyCommand({ + Bucket: bucket, + Key: newKey, + UploadId: resp.UploadId, + PartNumber: 1, + CopySource: `${bucket}/${sourceKey}`, + CopySourceRange: `bytes=0-${1024 * 1024 * 4}`, + }) + + const copyResp = await client.send(copyPart) + expect(copyResp.CopyPartResult?.ETag).toBeTruthy() + expect(copyResp.CopyPartResult?.LastModified).toBeTruthy() + + const listPartsCmd = new ListPartsCommand({ + Bucket: bucket, + Key: newKey, + UploadId: resp.UploadId, + }) + + const parts = await client.send(listPartsCmd) + expect(parts.Parts?.length).toBe(1) + + const completeMultiPartUpload = new CompleteMultipartUploadCommand({ + Bucket: bucket, + Key: newKey, + UploadId: resp.UploadId, + MultipartUpload: { + Parts: [ + { + PartNumber: 1, + ETag: copyResp.CopyPartResult?.ETag, + }, + ], + }, + }) + }) + }) + }) +}) diff --git a/src/test/tenant.test.ts b/src/test/tenant.test.ts index 8b3913c9..5a6db7d6 100644 --- a/src/test/tenant.test.ts +++ b/src/test/tenant.test.ts @@ -16,7 +16,7 @@ const payload = { serviceKey: 'd', jwks: { keys: [] }, migrationStatus: 'COMPLETED', - migrationVersion: 'alter-default-value-objects-id', + migrationVersion: 'optimize-search-function', features: { imageTransformation: { enabled: true, @@ -34,7 +34,7 @@ const payload2 = { serviceKey: 'h', jwks: null, migrationStatus: 'COMPLETED', - migrationVersion: 'alter-default-value-objects-id', + migrationVersion: 'optimize-search-function', features: { imageTransformation: { enabled: false, diff --git a/src/test/webhooks.test.ts b/src/test/webhooks.test.ts index 39d01742..62f9ad10 100644 --- a/src/test/webhooks.test.ts +++ b/src/test/webhooks.test.ts @@ -105,25 +105,10 @@ describe('Webhooks', () => { }, }) expect(response.statusCode).toBe(200) - expect(sendSpy).toBeCalledTimes(2) - expect(sendSpy).toHaveBeenNthCalledWith(1, { - data: { - $version: 'v1', - bucketId: 'bucket6', - name: obj.name, - tenant: { - host: undefined, - ref: 'bjhaohmqunupljrqypxz', - }, - reqId: expect.any(String), - version: expect.any(String), - }, - name: 'object:admin:delete', - options: undefined, - }) + expect(sendSpy).toBeCalledTimes(1) expect(sendSpy).toHaveBeenNthCalledWith( - 2, + 1, expect.objectContaining({ name: 'webhooks', options: undefined,