Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: New schema for orders, authorizations, and validations #7773

Draft
wants to merge 9 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions features/features.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,22 @@ type Config struct {
// This feature flag also causes CAA checks to happen after all remote VAs
// have passed DCV.
EnforceMPIC bool

// ReadNewOrderSchema causes the SA to attempt to read from the new orders,
// authorizations, and validations tables. This allows us to continue reading
// from these tables even if we have to roll back the flag which causes us
// to write to them.
// - Simple select-by-id go to whichever schema hosts the row being selected
// - Complex queries go solely to the new schema (this means that authz and
// order reuse work only in the new schema).
ReadNewOrderSchema bool

// WriteNewOrderSchema causes the SA to write to the new orders,
// authorizations, and validations tables. Do not enable this flag unless
// ReadNewOrderSchema is also enabled.
// - Inserts go solely to the new schema
// - Updates go to whichver schema hosts the row being updated
WriteNewOrderSchema bool
}

var fMu = new(sync.RWMutex)
Expand Down
4 changes: 4 additions & 0 deletions sa/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,10 @@ func initTables(dbMap *borp.DbMap) {
dbMap.AddTableWithName(revokedCertModel{}, "revokedCertificates").SetKeys(true, "ID")
dbMap.AddTableWithName(replacementOrderModel{}, "replacementOrders").SetKeys(true, "ID")
dbMap.AddTableWithName(pausedModel{}, "paused")
dbMap.AddTableWithName(orders2Model{}, "orders2")
dbMap.AddTableWithName(authorizationsModel{}, "authorizations")
dbMap.AddTableWithName(validationsModel{}, "validations")
dbMap.AddTableWithName(authzReuseModel{}, "authzReuse")

// Read-only maps used for selecting subsets of columns.
dbMap.AddTableWithName(CertStatusMetadata{}, "certificateStatus")
Expand Down
74 changes: 74 additions & 0 deletions sa/db-next/boulder_sa/20240801000000_OrderSchema.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
-- +migrate Up
-- SQL in section 'Up' is executed when this migration is applied

-- The orders2 table holds one row per ACME Order object. The authorizations
-- column contains an opaque JSON blob which the SA can use to find the
-- associated authorizations without requiring db-level foreign keys. Most
-- orders are created with status "pending", but may be created with status
-- "ready" if all of their authorizations are reused and already valid. Orders
-- transition to status "processing" when finalization begins. The error field
-- is populated only if an error occurs during finalization and the order moves
-- to the "invalid" state; errors during validation are reflected elsewhere.
CREATE TABLE `orders2` (
`id` bigint(20) UNSIGNED NOT NULL,
`registrationID` bigint(20) UNSIGNED NOT NULL,
`created` datetime NOT NULL,
`expires` datetime NOT NULL,
`authorizationIDs` json NOT NULL,
`profile` varchar(255) NOT NULL,
`beganProcessing` boolean NOT NULL,
`error` mediumblob DEFAULT NULL,
`certificateSerial` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;

-- The authorizations table holds one row per ACME Authorization object and
-- associated challenges. It is always created with status "pending". After
-- one of its challenges is attempted, it will transition into either status
-- "valid" or "invalid", and the validations column will be updated to point
-- to a new row in the validations table containing the record of that attempt.
CREATE TABLE `authorizations` (
`id` bigint(20) UNSIGNED NOT NULL,
`registrationID` bigint(20) UNSIGNED NOT NULL,
`identifierType` tinyint(4) NOT NULL,
`identifierValue` varchar(255) NOT NULL,
`created` datetime NOT NULL,
`expires` datetime NOT NULL,
`profile` varchar(255) NOT NULL,
`challenges` tinyint(4) NOT NULL,
`token` binary(32) NOT NULL,
`status` tinyint(4) NOT NULL,
`validationIDs` json DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;


-- The validations table holds records of completed validation attempts,
-- including the validation method used, the resulting status (valid or
-- invalid), and an opaque blob of our audit record.
CREATE TABLE `validations` (
`id` bigint(20) UNSIGNED NOT NULL,
`registrationID` bigint(20) UNSIGNED NOT NULL,
`challenge` tinyint(4) NOT NULL,
`attemptedAt` datetime NOT NULL,
`status` tinyint(4) NOT NULL,
`record` json NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;

-- The authzReuse table exists solely to allow cheap lookups of reusable authz
-- IDs. This allows us to not have expensive indices on the authorizations table.
CREATE TABLE `authzReuse` (
`accountID_identifier` VARCHAR(300) NOT NULL,
`authzID` bigint(20) UNSIGNED NOT NULL,
`expires` DATETIME NOT NULL,
PRIMARY KEY (`accountID_identifier`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;

-- +migrate Down
-- SQL section 'Down' is executed when this migration is rolled back

DROP TABLE `authzReuse`;
DROP TABLE `validations`;
DROP TABLE `authorizations`;
DROP TABLE `orders2`;
146 changes: 132 additions & 14 deletions sa/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@ package sa

import (
"context"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"database/sql"
"encoding/base64"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
Expand All @@ -23,6 +25,7 @@ import (
corepb "github.com/letsencrypt/boulder/core/proto"
"github.com/letsencrypt/boulder/db"
berrors "github.com/letsencrypt/boulder/errors"
"github.com/letsencrypt/boulder/features"
"github.com/letsencrypt/boulder/grpc"
"github.com/letsencrypt/boulder/identifier"
"github.com/letsencrypt/boulder/probs"
Expand Down Expand Up @@ -59,6 +62,54 @@ func badJSONError(msg string, jsonData []byte, err error) error {
}
}

// newRandomID creates a 64-bit mostly-random number to be used as the unique ID
// column in a table which no longer uses auto_increment IDs. It takes the
// current time as an argument so that it can include the current "epoch" as the
// first byte of the ID, for the sake of easily dropping old data.
func newRandomID(now time.Time) (int64, error) {
idBytes := make([]byte, 8) // 8 bytes is 64 bits

// Read random bits into the lower 7 bytes of the id.
_, err := rand.Read(idBytes[1:])
if err != nil {
return 0, fmt.Errorf("while generating unique database id: %w", err)
}

// Epochs are arbitrarily chosen to be 90 day chunks counting from the start
// of 2024. This gives us 127 * 90 = ~31 years worth of epochs before we have
// to worry about a rollover.
epoch := uint8(now.Sub(time.Date(2024, 01, 01, 00, 00, 00, 00, time.UTC)) / (90 * 24 * time.Hour))
if epoch&0x80 != 0 {
// If the first bit is a 1, either the current date is before the epoch
// start date, or we've gone too far into the future. Error out before we
// accidentally generate a negative ID.
return 0, fmt.Errorf("invalid epoch: %d", epoch)
}
idBytes[0] = epoch

id := binary.BigEndian.Uint64(idBytes)
return int64(id), nil
}

// looksLikeRandomID returns true if the input ID looks like it might belong to
// the new schema which uses epoch-prefixed random IDs instead of auto-increment
// columns. This is only necessary during the migration period when we are
// reading from both the old and new schemas simultaneously.
func looksLikeRandomID(id int64, now time.Time) bool {
// Compute the current and previous epochs. If the input ID starts with one of
// those two epochs, it's one of ours. Otherwise, it came from somewhere
// unknown and we should ask the old schema about it just in case.
currEpoch := uint8(now.Sub(time.Date(2024, 01, 01, 00, 00, 00, 00, time.UTC)) / (90 * 24 * time.Hour))
prevEpoch := uint8(now.Add(-90*24*time.Hour).Sub(time.Date(2024, 01, 01, 00, 00, 00, 00, time.UTC)) / (90 * 24 * time.Hour))

buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(id))
if buf[0] == currEpoch || buf[0] == prevEpoch {
return true
}
return false
}

const regFields = "id, jwk, jwk_sha256, contact, agreement, createdAt, LockCol, status"

// ClearEmail removes the provided email address from one specified registration. If
Expand Down Expand Up @@ -1208,24 +1259,47 @@ type authzValidity struct {

// getAuthorizationStatuses takes a sequence of authz IDs, and returns the
// status and expiration date of each of them.
func getAuthorizationStatuses(ctx context.Context, s db.Selector, ids []int64) ([]authzValidity, error) {
var params []interface{}
func getAuthorizationStatuses(ctx context.Context, s db.Selector, now time.Time, ids []int64) ([]authzValidity, error) {
var oldIDs, newIDs []interface{}
for _, id := range ids {
params = append(params, id)
if features.Get().ReadNewOrderSchema && looksLikeRandomID(id, now) {
newIDs = append(newIDs, id)
} else {
oldIDs = append(oldIDs, id)
}
}
var validities []authzValidity
_, err := s.Select(
ctx,
&validities,
fmt.Sprintf("SELECT identifierType, identifierValue, status, expires FROM authz2 WHERE id IN (%s)",
db.QuestionMarks(len(ids))),
params...,
)
if err != nil {
return nil, err

var oldValidities []authzValidity
if len(oldIDs) > 0 {
_, err := s.Select(
ctx,
&oldValidities,
fmt.Sprintf(
"SELECT identifierType, identifierValue, status, expires FROM authz2 WHERE id IN (%s)",
db.QuestionMarks(len(ids))),
oldIDs...,
)
if err != nil {
return nil, err
}
}

return validities, nil
var newValidities []authzValidity
if len(newIDs) > 0 {
_, err := s.Select(
ctx,
&newValidities,
fmt.Sprintf(
"SELECT identifierType, identifierValue, status, expires FROM authorizations WHERE id IN (%s)",
db.QuestionMarks(len(ids))),
newIDs...,
)
if err != nil {
return nil, err
}
}

return append(oldValidities, newValidities...), nil
}

// authzForOrder retrieves the authorization IDs for an order.
Expand Down Expand Up @@ -1409,3 +1483,47 @@ type pausedModel struct {
PausedAt time.Time `db:"pausedAt"`
UnpausedAt *time.Time `db:"unpausedAt"`
}

// orders2Model represents a row in the "orders2" table.
type orders2Model struct {
ID int64
RegistrationID int64
Created time.Time
Expires time.Time
AuthorizationIDs []int64 // Actually a JSON list of ints
Profile string
BeganProcessing bool
Error []byte
CertificateSerial string
}

// authorizationsModel represents a row in the "authorizations" table.
type authorizationsModel struct {
ID int64
RegistrationID int64
IdentifierType uint8
IdentifierValue string
Created time.Time
Expires time.Time
Profile string
Challenges uint8
Token []byte
Status uint8
ValidationIDs []int64 // Actually a JSON list of ints
}

// validationsModel represents a row in the "validations" table.
type validationsModel struct {
ID int64
Challenge uint8
AttemptedAt time.Time
Status uint8
Record []byte
}

// authzReuseModel represents a row in the "authzReuse" table.
type authzReuseModel struct {
ID int64 `db:"accountID_identifier"`
AuthzID int64
Expires time.Time
}
54 changes: 54 additions & 0 deletions sa/model_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"crypto/x509/pkix"
"database/sql"
"encoding/base64"
"encoding/binary"
"fmt"
"math/big"
"os"
Expand All @@ -27,6 +28,59 @@ import (
"github.com/letsencrypt/boulder/test"
)

func TestNewRandomID(t *testing.T) {
t.Parallel()

testCases := []struct {
name string
date time.Time
expectPrefix uint8
expectError string
}{
{
name: "in the past",
date: time.Date(2023, 01, 01, 00, 00, 00, 00, time.UTC),
expectError: "invalid epoch",
},
{
name: "first epoch",
date: time.Date(2024, 05, 01, 00, 00, 00, 00, time.UTC),
expectPrefix: 1,
},
{
name: "last epoch",
date: time.Date(2055, 07, 01, 00, 00, 00, 00, time.UTC),
expectPrefix: 127,
},
{
name: "far future",
date: time.Date(2056, 01, 01, 00, 00, 00, 00, time.UTC),
expectError: "invalid epoch",
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
fc := clock.NewFake()
fc.Set(tc.date)
id, err := newRandomID(fc.Now())

if tc.expectPrefix != 0 {
test.AssertNotError(t, err, "expected success")
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(id))
test.AssertEquals(t, buf[0], tc.expectPrefix)
}

if tc.expectError != "" {
test.AssertError(t, err, "expected error")
test.AssertContains(t, err.Error(), tc.expectError)
}
})
}
}

func TestRegistrationModelToPb(t *testing.T) {
badCases := []struct {
name string
Expand Down
Loading
Loading