diff --git a/features/features.go b/features/features.go index 3a7f1369096..c8df4043781 100644 --- a/features/features.go +++ b/features/features.go @@ -126,6 +126,22 @@ type Config struct { // This feature flag also causes CAA checks to happen after all remote VAs // have passed DCV. EnforceMPIC bool + + // ReadNewOrderSchema causes the SA to attempt to read from the new orders, + // authorizations, and validations tables. This allows us to continue reading + // from these tables even if we have to roll back the flag which causes us + // to write to them. + // - Simple select-by-id go to whichever schema hosts the row being selected + // - Complex queries go solely to the new schema (this means that authz and + // order reuse work only in the new schema). + ReadNewOrderSchema bool + + // WriteNewOrderSchema causes the SA to write to the new orders, + // authorizations, and validations tables. Do not enable this flag unless + // ReadNewOrderSchema is also enabled. + // - Inserts go solely to the new schema + // - Updates go to whichver schema hosts the row being updated + WriteNewOrderSchema bool } var fMu = new(sync.RWMutex) diff --git a/sa/database.go b/sa/database.go index 5b516d1ebb5..a2666f97ecf 100644 --- a/sa/database.go +++ b/sa/database.go @@ -287,6 +287,10 @@ func initTables(dbMap *borp.DbMap) { dbMap.AddTableWithName(revokedCertModel{}, "revokedCertificates").SetKeys(true, "ID") dbMap.AddTableWithName(replacementOrderModel{}, "replacementOrders").SetKeys(true, "ID") dbMap.AddTableWithName(pausedModel{}, "paused") + dbMap.AddTableWithName(orders2Model{}, "orders2") + dbMap.AddTableWithName(authorizationsModel{}, "authorizations") + dbMap.AddTableWithName(validationsModel{}, "validations") + dbMap.AddTableWithName(authzReuseModel{}, "authzReuse") // Read-only maps used for selecting subsets of columns. dbMap.AddTableWithName(CertStatusMetadata{}, "certificateStatus") diff --git a/sa/db-next/boulder_sa/20240801000000_OrderSchema.sql b/sa/db-next/boulder_sa/20240801000000_OrderSchema.sql new file mode 100644 index 00000000000..eb467ce80c6 --- /dev/null +++ b/sa/db-next/boulder_sa/20240801000000_OrderSchema.sql @@ -0,0 +1,74 @@ +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied + +-- The orders2 table holds one row per ACME Order object. The authorizations +-- column contains an opaque JSON blob which the SA can use to find the +-- associated authorizations without requiring db-level foreign keys. Most +-- orders are created with status "pending", but may be created with status +-- "ready" if all of their authorizations are reused and already valid. Orders +-- transition to status "processing" when finalization begins. The error field +-- is populated only if an error occurs during finalization and the order moves +-- to the "invalid" state; errors during validation are reflected elsewhere. +CREATE TABLE `orders2` ( + `id` bigint(20) UNSIGNED NOT NULL, + `registrationID` bigint(20) UNSIGNED NOT NULL, + `created` datetime NOT NULL, + `expires` datetime NOT NULL, + `authorizationIDs` json NOT NULL, + `profile` varchar(255) NOT NULL, + `beganProcessing` boolean NOT NULL, + `error` mediumblob DEFAULT NULL, + `certificateSerial` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +-- The authorizations table holds one row per ACME Authorization object and +-- associated challenges. It is always created with status "pending". After +-- one of its challenges is attempted, it will transition into either status +-- "valid" or "invalid", and the validations column will be updated to point +-- to a new row in the validations table containing the record of that attempt. +CREATE TABLE `authorizations` ( + `id` bigint(20) UNSIGNED NOT NULL, + `registrationID` bigint(20) UNSIGNED NOT NULL, + `identifierType` tinyint(4) NOT NULL, + `identifierValue` varchar(255) NOT NULL, + `created` datetime NOT NULL, + `expires` datetime NOT NULL, + `profile` varchar(255) NOT NULL, + `challenges` tinyint(4) NOT NULL, + `token` binary(32) NOT NULL, + `status` tinyint(4) NOT NULL, + `validationIDs` json DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + + +-- The validations table holds records of completed validation attempts, +-- including the validation method used, the resulting status (valid or +-- invalid), and an opaque blob of our audit record. +CREATE TABLE `validations` ( + `id` bigint(20) UNSIGNED NOT NULL, + `registrationID` bigint(20) UNSIGNED NOT NULL, + `challenge` tinyint(4) NOT NULL, + `attemptedAt` datetime NOT NULL, + `status` tinyint(4) NOT NULL, + `record` json NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +-- The authzReuse table exists solely to allow cheap lookups of reusable authz +-- IDs. This allows us to not have expensive indices on the authorizations table. +CREATE TABLE `authzReuse` ( + `accountID_identifier` VARCHAR(300) NOT NULL, + `authzID` bigint(20) UNSIGNED NOT NULL, + `expires` DATETIME NOT NULL, + PRIMARY KEY (`accountID_identifier`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back + +DROP TABLE `authzReuse`; +DROP TABLE `validations`; +DROP TABLE `authorizations`; +DROP TABLE `orders2`; diff --git a/sa/model.go b/sa/model.go index 79f4167980c..918144c6852 100644 --- a/sa/model.go +++ b/sa/model.go @@ -2,10 +2,12 @@ package sa import ( "context" + "crypto/rand" "crypto/sha256" "crypto/x509" "database/sql" "encoding/base64" + "encoding/binary" "encoding/json" "errors" "fmt" @@ -23,6 +25,7 @@ import ( corepb "github.com/letsencrypt/boulder/core/proto" "github.com/letsencrypt/boulder/db" berrors "github.com/letsencrypt/boulder/errors" + "github.com/letsencrypt/boulder/features" "github.com/letsencrypt/boulder/grpc" "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/probs" @@ -59,6 +62,54 @@ func badJSONError(msg string, jsonData []byte, err error) error { } } +// newRandomID creates a 64-bit mostly-random number to be used as the unique ID +// column in a table which no longer uses auto_increment IDs. It takes the +// current time as an argument so that it can include the current "epoch" as the +// first byte of the ID, for the sake of easily dropping old data. +func newRandomID(now time.Time) (int64, error) { + idBytes := make([]byte, 8) // 8 bytes is 64 bits + + // Read random bits into the lower 7 bytes of the id. + _, err := rand.Read(idBytes[1:]) + if err != nil { + return 0, fmt.Errorf("while generating unique database id: %w", err) + } + + // Epochs are arbitrarily chosen to be 90 day chunks counting from the start + // of 2024. This gives us 127 * 90 = ~31 years worth of epochs before we have + // to worry about a rollover. + epoch := uint8(now.Sub(time.Date(2024, 01, 01, 00, 00, 00, 00, time.UTC)) / (90 * 24 * time.Hour)) + if epoch&0x80 != 0 { + // If the first bit is a 1, either the current date is before the epoch + // start date, or we've gone too far into the future. Error out before we + // accidentally generate a negative ID. + return 0, fmt.Errorf("invalid epoch: %d", epoch) + } + idBytes[0] = epoch + + id := binary.BigEndian.Uint64(idBytes) + return int64(id), nil +} + +// looksLikeRandomID returns true if the input ID looks like it might belong to +// the new schema which uses epoch-prefixed random IDs instead of auto-increment +// columns. This is only necessary during the migration period when we are +// reading from both the old and new schemas simultaneously. +func looksLikeRandomID(id int64, now time.Time) bool { + // Compute the current and previous epochs. If the input ID starts with one of + // those two epochs, it's one of ours. Otherwise, it came from somewhere + // unknown and we should ask the old schema about it just in case. + currEpoch := uint8(now.Sub(time.Date(2024, 01, 01, 00, 00, 00, 00, time.UTC)) / (90 * 24 * time.Hour)) + prevEpoch := uint8(now.Add(-90*24*time.Hour).Sub(time.Date(2024, 01, 01, 00, 00, 00, 00, time.UTC)) / (90 * 24 * time.Hour)) + + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(id)) + if buf[0] == currEpoch || buf[0] == prevEpoch { + return true + } + return false +} + const regFields = "id, jwk, jwk_sha256, contact, agreement, createdAt, LockCol, status" // ClearEmail removes the provided email address from one specified registration. If @@ -1208,24 +1259,47 @@ type authzValidity struct { // getAuthorizationStatuses takes a sequence of authz IDs, and returns the // status and expiration date of each of them. -func getAuthorizationStatuses(ctx context.Context, s db.Selector, ids []int64) ([]authzValidity, error) { - var params []interface{} +func getAuthorizationStatuses(ctx context.Context, s db.Selector, now time.Time, ids []int64) ([]authzValidity, error) { + var oldIDs, newIDs []interface{} for _, id := range ids { - params = append(params, id) + if features.Get().ReadNewOrderSchema && looksLikeRandomID(id, now) { + newIDs = append(newIDs, id) + } else { + oldIDs = append(oldIDs, id) + } } - var validities []authzValidity - _, err := s.Select( - ctx, - &validities, - fmt.Sprintf("SELECT identifierType, identifierValue, status, expires FROM authz2 WHERE id IN (%s)", - db.QuestionMarks(len(ids))), - params..., - ) - if err != nil { - return nil, err + + var oldValidities []authzValidity + if len(oldIDs) > 0 { + _, err := s.Select( + ctx, + &oldValidities, + fmt.Sprintf( + "SELECT identifierType, identifierValue, status, expires FROM authz2 WHERE id IN (%s)", + db.QuestionMarks(len(ids))), + oldIDs..., + ) + if err != nil { + return nil, err + } } - return validities, nil + var newValidities []authzValidity + if len(newIDs) > 0 { + _, err := s.Select( + ctx, + &newValidities, + fmt.Sprintf( + "SELECT identifierType, identifierValue, status, expires FROM authorizations WHERE id IN (%s)", + db.QuestionMarks(len(ids))), + newIDs..., + ) + if err != nil { + return nil, err + } + } + + return append(oldValidities, newValidities...), nil } // authzForOrder retrieves the authorization IDs for an order. @@ -1409,3 +1483,47 @@ type pausedModel struct { PausedAt time.Time `db:"pausedAt"` UnpausedAt *time.Time `db:"unpausedAt"` } + +// orders2Model represents a row in the "orders2" table. +type orders2Model struct { + ID int64 + RegistrationID int64 + Created time.Time + Expires time.Time + AuthorizationIDs []int64 // Actually a JSON list of ints + Profile string + BeganProcessing bool + Error []byte + CertificateSerial string +} + +// authorizationsModel represents a row in the "authorizations" table. +type authorizationsModel struct { + ID int64 + RegistrationID int64 + IdentifierType uint8 + IdentifierValue string + Created time.Time + Expires time.Time + Profile string + Challenges uint8 + Token []byte + Status uint8 + ValidationIDs []int64 // Actually a JSON list of ints +} + +// validationsModel represents a row in the "validations" table. +type validationsModel struct { + ID int64 + Challenge uint8 + AttemptedAt time.Time + Status uint8 + Record []byte +} + +// authzReuseModel represents a row in the "authzReuse" table. +type authzReuseModel struct { + ID int64 `db:"accountID_identifier"` + AuthzID int64 + Expires time.Time +} diff --git a/sa/model_test.go b/sa/model_test.go index b2ba57a3766..7be5c6b10cc 100644 --- a/sa/model_test.go +++ b/sa/model_test.go @@ -8,6 +8,7 @@ import ( "crypto/x509/pkix" "database/sql" "encoding/base64" + "encoding/binary" "fmt" "math/big" "os" @@ -27,6 +28,59 @@ import ( "github.com/letsencrypt/boulder/test" ) +func TestNewRandomID(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + date time.Time + expectPrefix uint8 + expectError string + }{ + { + name: "in the past", + date: time.Date(2023, 01, 01, 00, 00, 00, 00, time.UTC), + expectError: "invalid epoch", + }, + { + name: "first epoch", + date: time.Date(2024, 05, 01, 00, 00, 00, 00, time.UTC), + expectPrefix: 1, + }, + { + name: "last epoch", + date: time.Date(2055, 07, 01, 00, 00, 00, 00, time.UTC), + expectPrefix: 127, + }, + { + name: "far future", + date: time.Date(2056, 01, 01, 00, 00, 00, 00, time.UTC), + expectError: "invalid epoch", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + fc := clock.NewFake() + fc.Set(tc.date) + id, err := newRandomID(fc.Now()) + + if tc.expectPrefix != 0 { + test.AssertNotError(t, err, "expected success") + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(id)) + test.AssertEquals(t, buf[0], tc.expectPrefix) + } + + if tc.expectError != "" { + test.AssertError(t, err, "expected error") + test.AssertContains(t, err.Error(), tc.expectError) + } + }) + } +} + func TestRegistrationModelToPb(t *testing.T) { badCases := []struct { name string diff --git a/sa/sa.go b/sa/sa.go index e3c7137d46c..26492e410eb 100644 --- a/sa/sa.go +++ b/sa/sa.go @@ -4,6 +4,7 @@ import ( "context" "crypto/x509" "database/sql" + "encoding/base64" "encoding/json" "errors" "fmt" @@ -24,6 +25,7 @@ import ( "github.com/letsencrypt/boulder/features" bgrpc "github.com/letsencrypt/boulder/grpc" blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/probs" "github.com/letsencrypt/boulder/revocation" sapb "github.com/letsencrypt/boulder/sa/proto" "github.com/letsencrypt/boulder/unpause" @@ -560,23 +562,27 @@ func (ssa *SQLStorageAuthority) DeactivateAuthorization2(ctx context.Context, re return nil, errIncompleteRequest } - _, err := ssa.dbMap.ExecContext(ctx, - `UPDATE authz2 SET status = :deactivated WHERE id = :id and status IN (:valid,:pending)`, - map[string]interface{}{ - "deactivated": statusUint(core.StatusDeactivated), - "id": req.Id, - "valid": statusUint(core.StatusValid), - "pending": statusUint(core.StatusPending), - }, + query := `UPDATE authz2 SET status = :deactivated WHERE id = :id and status IN (:valid,:pending)` + if features.Get().WriteNewOrderSchema && looksLikeRandomID(req.Id, ssa.clk.Now()) { + query = `UPDATE authorizations SET status = :deactivated WHERE id = :id and status IN (:valid,:pending)` + } + + _, err := ssa.dbMap.ExecContext(ctx, query, map[string]interface{}{ + "deactivated": statusUint(core.StatusDeactivated), + "id": req.Id, + "valid": statusUint(core.StatusValid), + "pending": statusUint(core.StatusPending), + }, ) if err != nil { return nil, err } + return &emptypb.Empty{}, nil } // NewOrderAndAuthzs adds the given authorizations to the database, adds their -// autogenerated IDs to the given order, and then adds the order to the db. +// IDs to the given order, and then adds the order to the db. // This is done inside a single transaction to prevent situations where new // authorizations are created, but then their corresponding order is never // created, leading to "invisible" pending authorizations. @@ -597,7 +603,45 @@ func (ssa *SQLStorageAuthority) NewOrderAndAuthzs(ctx context.Context, req *sapb output, err := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) { // First, insert all of the new authorizations and record their IDs. newAuthzIDs := make([]int64, 0) - if features.Get().InsertAuthzsIndividually { + if features.Get().WriteNewOrderSchema { + for _, authz := range req.NewAuthzs { + id, err := newRandomID(ssa.clk.Now()) + if err != nil { + return nil, fmt.Errorf("generating authz ID: %w", err) + } + + token, err := base64.RawURLEncoding.DecodeString(authz.Token) + if err != nil { + return nil, fmt.Errorf("decoding challenge token: %w", err) + } + + var challenges uint8 + for _, challType := range authz.ChallengeTypes { + challenges |= 1 << challTypeToUint[challType] + } + + am := authorizationsModel{ + ID: id, + RegistrationID: authz.RegistrationID, + IdentifierType: identifierTypeToUint[authz.Identifier.Type], + IdentifierValue: authz.Identifier.Value, + Created: ssa.clk.Now(), + Expires: authz.Expires.AsTime(), + Profile: req.NewOrder.CertificateProfileName, + Challenges: challenges, + Token: token, + Status: statusToUint[core.StatusPending], + ValidationIDs: nil, // Only set when validation is attempted + } + + err = tx.Insert(ctx, am) + if err != nil { + return nil, fmt.Errorf("inserting authorizations row: %w", err) + } + + newAuthzIDs = append(newAuthzIDs, id) + } + } else if features.Get().InsertAuthzsIndividually { for _, authz := range req.NewAuthzs { am, err := newAuthzReqToModel(authz) if err != nil { @@ -645,11 +689,35 @@ func (ssa *SQLStorageAuthority) NewOrderAndAuthzs(ctx context.Context, req *sapb } } + allAuthzIds := append(req.NewOrder.V2Authorizations, newAuthzIDs...) + // Second, insert the new order. var orderID int64 var err error created := ssa.clk.Now() - if features.Get().MultipleCertificateProfiles { + if features.Get().WriteNewOrderSchema { + id, err := newRandomID(ssa.clk.Now()) + if err != nil { + return nil, fmt.Errorf("generating order ID: %w", err) + } + + om := orders2Model{ + ID: id, + RegistrationID: req.NewOrder.RegistrationID, + Created: ssa.clk.Now(), + Expires: req.NewOrder.Expires.AsTime(), + AuthorizationIDs: allAuthzIds, + Profile: req.NewOrder.CertificateProfileName, + BeganProcessing: false, // Only set when finalization has begun + Error: nil, // Only set if finalization fails + CertificateSerial: "", // Only set if finalization succeeds + } + + err = tx.Insert(ctx, om) + if err != nil { + return nil, err + } + } else if features.Get().MultipleCertificateProfiles { omv2 := orderModelv2{ RegistrationID: req.NewOrder.RegistrationID, Expires: req.NewOrder.Expires.AsTime(), @@ -657,6 +725,9 @@ func (ssa *SQLStorageAuthority) NewOrderAndAuthzs(ctx context.Context, req *sapb CertificateProfileName: &req.NewOrder.CertificateProfileName, } err = tx.Insert(ctx, &omv2) + if err != nil { + return nil, err + } orderID = omv2.ID } else { omv1 := orderModelv1{ @@ -665,28 +736,29 @@ func (ssa *SQLStorageAuthority) NewOrderAndAuthzs(ctx context.Context, req *sapb Created: created, } err = tx.Insert(ctx, &omv1) + if err != nil { + return nil, err + } orderID = omv1.ID } - if err != nil { - return nil, err - } - // Third, insert all of the orderToAuthz relations. - // Have to combine the already-associated and newly-created authzs. - allAuthzIds := append(req.NewOrder.V2Authorizations, newAuthzIDs...) - inserter, err := db.NewMultiInserter("orderToAuthz2", []string{"orderID", "authzID"}, "") - if err != nil { - return nil, err - } - for _, id := range allAuthzIds { - err := inserter.Add([]interface{}{orderID, id}) + if !features.Get().WriteNewOrderSchema { + // Third, insert all of the orderToAuthz relations. + // Have to combine the already-associated and newly-created authzs. + inserter, err := db.NewMultiInserter("orderToAuthz2", []string{"orderID", "authzID"}, "") + if err != nil { + return nil, err + } + for _, id := range allAuthzIds { + err := inserter.Add([]interface{}{orderID, id}) + if err != nil { + return nil, err + } + } + _, err = inserter.Insert(ctx, tx) if err != nil { return nil, err } - } - _, err = inserter.Insert(ctx, tx) - if err != nil { - return nil, err } // Fourth, insert the FQDNSet entry for the order. @@ -705,7 +777,7 @@ func (ssa *SQLStorageAuthority) NewOrderAndAuthzs(ctx context.Context, req *sapb } // Get the partial Authorization objects for the order - authzValidityInfo, err := getAuthorizationStatuses(ctx, tx, allAuthzIds) + authzValidityInfo, err := getAuthorizationStatuses(ctx, tx, ssa.clk.Now(), allAuthzIds) // If there was an error getting the authorizations, return it immediately if err != nil { return nil, err @@ -767,29 +839,22 @@ func (ssa *SQLStorageAuthority) SetOrderProcessing(ctx context.Context, req *sap if req.Id == 0 { return nil, errIncompleteRequest } - _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) { - result, err := tx.ExecContext(ctx, ` - UPDATE orders - SET beganProcessing = ? - WHERE id = ? - AND beganProcessing = ?`, - true, - req.Id, - false) - if err != nil { - return nil, berrors.InternalServerError("error updating order to beganProcessing status") - } - n, err := result.RowsAffected() - if err != nil || n == 0 { - return nil, berrors.OrderNotReadyError("Order was already processing. This may indicate your client finalized the same order multiple times, possibly due to a client bug.") - } + query := `UPDATE orders SET beganProcessing = ? WHERE id = ? AND beganProcessing = ?` + if features.Get().WriteNewOrderSchema && looksLikeRandomID(req.Id, ssa.clk.Now()) { + query = `UPDATE orders2 SET beganProcessing = ? WHERE id = ? AND beganProcessing = ?` + } - return nil, nil - }) - if overallError != nil { - return nil, overallError + result, err := ssa.dbMap.ExecContext(ctx, query, true, req.Id, false) + if err != nil { + return nil, berrors.InternalServerError("error updating order to beganProcessing status") + } + + n, err := result.RowsAffected() + if err != nil || n == 0 { + return nil, berrors.OrderNotReadyError("Order was already processing. This may indicate your client finalized the same order multiple times, possibly due to a client bug.") } + return &emptypb.Empty{}, nil } @@ -798,35 +863,30 @@ func (ssa *SQLStorageAuthority) SetOrderError(ctx context.Context, req *sapb.Set if req.Id == 0 || req.Error == nil { return nil, errIncompleteRequest } - _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) { - om, err := orderToModelv2(&corepb.Order{ - Id: req.Id, - Error: req.Error, - }) - if err != nil { - return nil, err - } - result, err := tx.ExecContext(ctx, ` - UPDATE orders - SET error = ? - WHERE id = ?`, - om.Error, - om.ID) - if err != nil { - return nil, berrors.InternalServerError("error updating order error field") - } + om, err := orderToModelv2(&corepb.Order{ + Id: req.Id, + Error: req.Error, + }) + if err != nil { + return nil, err + } - n, err := result.RowsAffected() - if err != nil || n == 0 { - return nil, berrors.InternalServerError("no order updated with new error field") - } + query := `UPDATE orders SET error = ? WHERE id = ?` + if features.Get().WriteNewOrderSchema && looksLikeRandomID(req.Id, ssa.clk.Now()) { + query = `UPDATE orders2 SET error = ? WHERE id = ?` + } - return nil, nil - }) - if overallError != nil { - return nil, overallError + result, err := ssa.dbMap.ExecContext(ctx, query, om.Error, om.ID) + if err != nil { + return nil, berrors.InternalServerError("error updating order error field") + } + + n, err := result.RowsAffected() + if err != nil || n == 0 { + return nil, berrors.InternalServerError("no order updated with new error field") } + return &emptypb.Empty{}, nil } @@ -838,14 +898,14 @@ func (ssa *SQLStorageAuthority) FinalizeOrder(ctx context.Context, req *sapb.Fin if req.Id == 0 || req.CertificateSerial == "" { return nil, errIncompleteRequest } + + query := `UPDATE orders SET certificateSerial = ? WHERE id = ? AND beganProcessing = true` + if features.Get().WriteNewOrderSchema && looksLikeRandomID(req.Id, ssa.clk.Now()) { + query = `UPDATE orders2 SET certificateSerial = ? WHERE id = ? AND beganProcessing = true` + } + _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) { - result, err := tx.ExecContext(ctx, ` - UPDATE orders - SET certificateSerial = ? - WHERE id = ? AND - beganProcessing = true`, - req.CertificateSerial, - req.Id) + result, err := tx.ExecContext(ctx, query, req.CertificateSerial, req.Id) if err != nil { return nil, berrors.InternalServerError("error updating order for finalization") } @@ -872,6 +932,7 @@ func (ssa *SQLStorageAuthority) FinalizeOrder(ctx context.Context, req *sapb.Fin if overallError != nil { return nil, overallError } + return &emptypb.Empty{}, nil } @@ -887,6 +948,11 @@ func (ssa *SQLStorageAuthority) FinalizeAuthorization2(ctx context.Context, req if req.Status != string(core.StatusValid) && req.Status != string(core.StatusInvalid) { return nil, berrors.InternalServerError("authorization must have status valid or invalid") } + + if features.Get().WriteNewOrderSchema && looksLikeRandomID(req.Id, ssa.clk.Now()) { + return ssa.finalizeAuthorization(ctx, req) + } + query := `UPDATE authz2 SET status = :status, attempted = :attempted, @@ -962,6 +1028,107 @@ func (ssa *SQLStorageAuthority) FinalizeAuthorization2(ctx context.Context, req return &emptypb.Empty{}, nil } +// finalizeAuthorization inserts a new validation record into the validations +// table, and then updates the identified authorizations row to point to the +// newly-inserted validation. +func (ssa *SQLStorageAuthority) finalizeAuthorization(ctx context.Context, req *sapb.FinalizeAuthorizationRequest) (*emptypb.Empty, error) { + // Convert the validation records and error to a json blob for storage in + // the validations table. + type recordJSON struct { + Records []core.ValidationRecord + Err *probs.ProblemDetails + } + + var records []core.ValidationRecord + for _, recPB := range req.ValidationRecords { + rec, err := bgrpc.PBToValidationRecord(recPB) + if err != nil { + return nil, err + } + records = append(records, rec) + } + + var verr *probs.ProblemDetails + if req.ValidationError != nil { + verrShadow, err := bgrpc.PBToProblemDetails(req.ValidationError) + if err != nil { + return nil, err + } + verr = verrShadow + } + + record, err := json.Marshal(recordJSON{Records: records, Err: verr}) + if err != nil { + return nil, fmt.Errorf("failed to convert validation record to json: %w", err) + } + + id, err := newRandomID(ssa.clk.Now()) + if err != nil { + return nil, err + } + + vm := validationsModel{ + ID: id, + Challenge: challTypeToUint[req.Attempted], + AttemptedAt: req.AttemptedAt.AsTime(), + Status: statusUint(core.AcmeStatus(req.Status)), + Record: record, + } + + _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (interface{}, error) { + // Read the authz row to get its current status and list of validation IDs. + var authz authorizationsModel + err := tx.SelectOne(ctx, &authz, "SELECT status, validationIDs FROM authorizations WHERE id = ?", req.Id) + if err != nil { + return nil, fmt.Errorf("retrieving authz: %w", err) + } + + if authz.Status != statusUint(core.StatusPending) { + return nil, fmt.Errorf("cannot finalize authz with status %q", uintToStatus[authz.Status]) + } + + // Insert the validation record. + err = tx.Insert(ctx, vm) + if err != nil { + return nil, fmt.Errorf("inserting new validation: %w", err) + } + + // Update the authz row. + res, err := tx.ExecContext( + ctx, "UPDATE authorizations SET validationIDs = ? WHERE id = ? AND validationIDs = ?", + append(authz.ValidationIDs[:], vm.ID), req.Id, authz.ValidationIDs) + if err != nil { + return nil, fmt.Errorf("updating authz: %w", err) + } + rows, err := res.RowsAffected() + if err != nil { + return nil, err + } + if rows != 1 { + return nil, fmt.Errorf("unexpected number of rows affected (%d)", rows) + } + + // Delete the orderFQDNSet row for the order now that it has been finalized. + // We use this table for order reuse and should not reuse a finalized order. + err = deleteOrderFQDNSet(ctx, tx, req.Id) + if err != nil { + return nil, err + } + + err = setReplacementOrderFinalized(ctx, tx, req.Id) + if err != nil { + return nil, err + } + + return nil, nil + }) + if overallError != nil { + return nil, overallError + } + + return &emptypb.Empty{}, nil +} + // addRevokedCertificate is a helper used by both RevokeCertificate and // UpdateRevokedCertificate. It inserts a new row into the revokedCertificates // table based on the contents of the input request. The second argument must be diff --git a/sa/saro.go b/sa/saro.go index 129b07daacc..e73c82703c8 100644 --- a/sa/saro.go +++ b/sa/saro.go @@ -549,7 +549,7 @@ func (ssa *SQLStorageAuthorityRO) GetOrder(ctx context.Context, req *sapb.OrderR order.V2Authorizations = v2AuthzIDs // Get the partial Authorization objects for the order - authzValidityInfo, err := getAuthorizationStatuses(ctx, tx, order.V2Authorizations) + authzValidityInfo, err := getAuthorizationStatuses(ctx, tx, ssa.clk.Now(), order.V2Authorizations) // If there was an error getting the authorizations, return it immediately if err != nil { return nil, err diff --git a/sa/type-converter.go b/sa/type-converter.go index d7d92eb7942..5b9b5f7f17d 100644 --- a/sa/type-converter.go +++ b/sa/type-converter.go @@ -20,7 +20,7 @@ type BoulderTypeConverter struct{} // ToDb converts a Boulder object to one suitable for the DB representation. func (tc BoulderTypeConverter) ToDb(val interface{}) (interface{}, error) { switch t := val.(type) { - case identifier.ACMEIdentifier, []core.Challenge, []string, [][]int: + case identifier.ACMEIdentifier, []core.Challenge, []string, [][]int, []int64: jsonBytes, err := json.Marshal(t) if err != nil { return nil, err @@ -56,7 +56,7 @@ func (tc BoulderTypeConverter) ToDb(val interface{}) (interface{}, error) { // FromDb converts a DB representation back into a Boulder object. func (tc BoulderTypeConverter) FromDb(target interface{}) (borp.CustomScanner, bool) { switch target.(type) { - case *identifier.ACMEIdentifier, *[]core.Challenge, *[]string, *[][]int: + case *identifier.ACMEIdentifier, *[]core.Challenge, *[]string, *[][]int, *[]int64: binder := func(holder, target interface{}) error { s, ok := holder.(*string) if !ok {