Skip to content

Commit

Permalink
Merge branch 'celestia-v3.1.2' into celestia-v3.2.1
Browse files Browse the repository at this point in the history
  • Loading branch information
Ferret-san committed Sep 24, 2024
2 parents d81324d + ad4ae15 commit a435a86
Show file tree
Hide file tree
Showing 36 changed files with 1,703 additions and 103 deletions.
4 changes: 2 additions & 2 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
url = https://github.com/google/brotli.git
[submodule "contracts"]
path = contracts
url = https://github.com/OffchainLabs/nitro-contracts.git
branch = develop
url = https://github.com/celestiaorg/nitro-contracts.git
branch = contracts-v1.2.1
[submodule "arbitrator/wasm-testsuite/testsuite"]
path = arbitrator/wasm-testsuite/testsuite
url = https://github.com/WebAssembly/testsuite.git
Expand Down
6 changes: 4 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,10 @@ WORKDIR /workspace
RUN apt-get update && apt-get install -y curl build-essential=12.9

FROM wasm-base AS wasm-libs-builder
# clang / lld used by soft-float wasm
# clang / lld used by soft-float wasm
RUN apt-get update && \
apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1 wabt
# pinned rust 1.80.1
# pinned rust 1.80.1
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.80.1 --target x86_64-unknown-linux-gnu wasm32-unknown-unknown wasm32-wasi
COPY ./Makefile ./
COPY arbitrator/Cargo.* arbitrator/
Expand Down Expand Up @@ -77,6 +77,7 @@ COPY ./blsSignatures ./blsSignatures
COPY ./cmd/chaininfo ./cmd/chaininfo
COPY ./cmd/replay ./cmd/replay
COPY ./das/dastree ./das/dastree
COPY ./das/celestia ./das/celestia
COPY ./precompiles ./precompiles
COPY ./statetransfer ./statetransfer
COPY ./util ./util
Expand All @@ -90,6 +91,7 @@ COPY ./fastcache ./fastcache
COPY ./go-ethereum ./go-ethereum
COPY --from=brotli-wasm-export / target/
COPY --from=contracts-builder workspace/contracts/build/contracts/src/precompiles/ contracts/build/contracts/src/precompiles/
COPY --from=contracts-builder workspace/contracts/build/contracts/src/celestia/ contracts/build/contracts/src/celestia/
COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/
COPY --from=contracts-builder workspace/.make/ .make/
RUN PATH="$PATH:/usr/local/go/bin" NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-wasm-bin
Expand Down
2 changes: 1 addition & 1 deletion arbitrator/langs/bf
Submodule bf updated 1 files
+282 −846 Cargo.lock
8 changes: 8 additions & 0 deletions arbitrator/prover/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -417,3 +417,11 @@ pub unsafe extern "C" fn arbitrator_gen_proof(mach: *mut Machine) -> RustByteArr
pub unsafe extern "C" fn arbitrator_free_proof(proof: RustByteArray) {
drop(Vec::from_raw_parts(proof.ptr, proof.len, proof.capacity))
}

#[no_mangle]
pub unsafe extern "C" fn arbitrator_get_opcode(mach: *mut Machine) -> u16 {
match (*mach).get_next_instruction() {
Some(instruction) => return instruction.opcode.repr(),
None => panic!("Failed to get next opcode for Machine"),
}
}
7 changes: 7 additions & 0 deletions arbitrator/prover/src/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3051,6 +3051,13 @@ impl Machine {
{
data.push(0); // inbox proof type
out!(msg_data);
match inbox_identifier {
InboxIdentifier::Sequencer => {
out!(msg_idx.to_be_bytes());
data.push(0x0);
}
InboxIdentifier::Delayed => data.push(0x1),
}
}
} else {
unreachable!()
Expand Down
32 changes: 20 additions & 12 deletions arbnode/batch_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,8 @@ type BatchPoster struct {
bridgeAddr common.Address
gasRefunderAddr common.Address
building *buildingBatch
dapWriter daprovider.Writer
dapReaders []daprovider.Reader
dapWriters []daprovider.Writer
dataPoster *dataposter.DataPoster
redisLock *redislock.Simple
messagesPerBatch *arbmath.MovingAverage[uint64]
Expand Down Expand Up @@ -299,7 +299,7 @@ type BatchPosterOpts struct {
Config BatchPosterConfigFetcher
DeployInfo *chaininfo.RollupAddresses
TransactOpts *bind.TransactOpts
DAPWriter daprovider.Writer
DAPWriters []daprovider.Writer
ParentChainID *big.Int
DAPReaders []daprovider.Reader
}
Expand Down Expand Up @@ -346,7 +346,7 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e
seqInboxAddr: opts.DeployInfo.SequencerInbox,
gasRefunderAddr: opts.Config().gasRefunder,
bridgeAddr: opts.DeployInfo.Bridge,
dapWriter: opts.DAPWriter,
dapWriters: opts.DAPWriters,
redisLock: redisLock,
dapReaders: opts.DAPReaders,
}
Expand Down Expand Up @@ -1126,7 +1126,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
}
var use4844 bool
config := b.config()
if config.Post4844Blobs && b.dapWriter == nil && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil {
if config.Post4844Blobs && len(b.dapWriters) == 0 && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil {
arbOSVersion, err := b.arbOSVersionGetter.ArbOSVersionForMessageNumber(arbutil.MessageIndex(arbmath.SaturatingUSub(uint64(batchPosition.MessageCount), 1)))
if err != nil {
return false, err
Expand Down Expand Up @@ -1351,7 +1351,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
return false, nil
}

if b.dapWriter != nil {
if len(b.dapWriters) > 0 {
if !b.redisLock.AttemptLock(ctx) {
return false, errAttemptLockFailed
}
Expand All @@ -1365,15 +1365,23 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
batchPosterDAFailureCounter.Inc(1)
return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce)
}
// #nosec G115
sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), config.DisableDapFallbackStoreDataOnChain)
if err != nil {
batchPosterDAFailureCounter.Inc(1)
return false, err

// attempt to store data using one of the dapWriters, if it fails and fallbacks are disabled, return a hard error
for _, writer := range b.dapWriters {
// #nosec G115
sequencerMsg, err = writer.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), config.DisableDapFallbackStoreDataOnChain)
if err != nil {
if config.DisableDapFallbackStoreDataOnChain {
log.Error("Error while attempting to post batch and on chain fallback is disabled", "error", err)
return false, err
}
log.Error("Error when trying to store data with dapWriter", "error", err)
continue
}
// if we succesffuly posted a batch with a dapWriter, we move on and ignore the rest
break
}

batchPosterDASuccessCounter.Inc(1)
batchPosterDALastSuccessfulActionGauge.Update(time.Now().Unix())
}

prevMessageCount := batchPosition.MessageCount
Expand Down
46 changes: 42 additions & 4 deletions arbnode/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ import (
"github.com/offchainlabs/nitro/broadcaster"
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/das"
"github.com/offchainlabs/nitro/das/celestia"
celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types"
"github.com/offchainlabs/nitro/execution"
"github.com/offchainlabs/nitro/execution/gethexec"
"github.com/offchainlabs/nitro/solgen/go/bridgegen"
Expand Down Expand Up @@ -93,6 +95,8 @@ type Config struct {
TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"`
Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"`
ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"`
Celestia celestia.CelestiaConfig `koanf:"celestia-cfg"`
DAPreference []string `koanf:"da-preference"`
// SnapSyncConfig is only used for testing purposes, these should not be configured in production.
SnapSyncTest SnapSyncConfig
}
Expand Down Expand Up @@ -158,6 +162,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed
DangerousConfigAddOptions(prefix+".dangerous", f)
TransactionStreamerConfigAddOptions(prefix+".transaction-streamer", f)
MaintenanceConfigAddOptions(prefix+".maintenance", f)
celestia.CelestiaDAConfigAddOptions(prefix+".celestia-cfg", f)
}

var ConfigDefault = Config{
Expand Down Expand Up @@ -548,6 +553,8 @@ func createNodeImpl(
var daReader das.DataAvailabilityServiceReader
var dasLifecycleManager *das.LifecycleManager
var dasKeysetFetcher *das.KeysetFetcher
var celestiaReader celestiaTypes.CelestiaReader
var celestiaWriter celestiaTypes.CelestiaWriter
if config.DataAvailability.Enable {
if config.BatchPoster.Enable {
daWriter, daReader, dasKeysetFetcher, dasLifecycleManager, err = das.CreateBatchPosterDAS(ctx, &config.DataAvailability, dataSigner, l1client, deployInfo.SequencerInbox)
Expand All @@ -573,6 +580,16 @@ func createNodeImpl(
return nil, errors.New("a data availability service is required for this chain, but it was not configured")
}

if config.Celestia.Enable {
celestiaService, err := celestia.NewCelestiaDASRPCClient(config.Celestia.URL)
if err != nil {
return nil, err
}

celestiaReader = celestiaService
celestiaWriter = celestiaService
}

// We support a nil txStreamer for the pruning code
if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && daReader == nil {
return nil, errors.New("data availability service required but unconfigured")
Expand All @@ -584,6 +601,9 @@ func createNodeImpl(
if blobReader != nil {
dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(blobReader))
}
if celestiaReader != nil {
dapReaders = append(dapReaders, celestiaTypes.NewReaderForCelestia(celestiaReader))
}
inboxTracker, err := NewInboxTracker(arbDb, txStreamer, dapReaders, config.SnapSyncTest)
if err != nil {
return nil, err
Expand Down Expand Up @@ -708,9 +728,27 @@ func createNodeImpl(
if txOptsBatchPoster == nil && config.BatchPoster.DataPoster.ExternalSigner.URL == "" {
return nil, errors.New("batchposter, but no TxOpts")
}
var dapWriter daprovider.Writer
if daWriter != nil {
dapWriter = daprovider.NewWriterForDAS(daWriter)
dapWriters := []daprovider.Writer{}
for _, providerName := range config.DAPreference {
nilWriter := false
switch strings.ToLower(providerName) {
case "anytrust":
if daWriter != nil {
dapWriters = append(dapWriters, daprovider.NewWriterForDAS(daWriter))
} else {
nilWriter = true
}
case "celestia":
if celestiaWriter != nil {
dapWriters = append(dapWriters, celestiaTypes.NewWriterForCelestia(celestiaWriter))
} else {
nilWriter = true
}
}

if nilWriter {
log.Error("encountered nil daWriter", "daWriter", providerName)
}
}
batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{
DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix),
Expand All @@ -722,7 +760,7 @@ func createNodeImpl(
Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster },
DeployInfo: deployInfo,
TransactOpts: txOptsBatchPoster,
DAPWriter: dapWriter,
DAPWriters: dapWriters,
ParentChainID: parentChainID,
DAPReaders: dapReaders,
})
Expand Down
10 changes: 9 additions & 1 deletion arbstate/daprovider/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,10 @@ func RecordPreimagesTo(preimages map[arbutil.PreimageType]map[common.Hash][]byte
// which will retrieve the full batch data.
const DASMessageHeaderFlag byte = 0x80

// CelestiaMessageHeaderFlag indicates that this data is a Blob Pointer
// which will be used to retrieve data from Celestia
const CelestiaMessageHeaderFlag byte = 0x63

// TreeDASMessageHeaderFlag indicates that this DAS certificate data employs the new merkelization strategy.
// Ignored when DASMessageHeaderFlag is not set.
const TreeDASMessageHeaderFlag byte = 0x08
Expand All @@ -89,7 +93,7 @@ const BlobHashesHeaderFlag byte = L1AuthenticatedMessageHeaderFlag | 0x10 // 0x5
const BrotliMessageHeaderByte byte = 0

// KnownHeaderBits is all header bits with known meaning to this nitro version
const KnownHeaderBits byte = DASMessageHeaderFlag | TreeDASMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte
const KnownHeaderBits byte = DASMessageHeaderFlag | CelestiaMessageHeaderFlag | TreeDASMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte

// hasBits returns true if `checking` has all `bits`
func hasBits(checking byte, bits byte) bool {
Expand All @@ -116,6 +120,10 @@ func IsBlobHashesHeaderByte(header byte) bool {
return hasBits(header, BlobHashesHeaderFlag)
}

func IsCelestiaMessageHeaderByte(header byte) bool {
return hasBits(header, CelestiaMessageHeaderFlag)
}

func IsBrotliMessageHeaderByte(b uint8) bool {
return b == BrotliMessageHeaderByte
}
Expand Down
2 changes: 2 additions & 0 deletions arbstate/inbox.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,8 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash
if !foundDA {
if daprovider.IsDASMessageHeaderByte(payload[0]) {
log.Error("No DAS Reader configured, but sequencer message found with DAS header")
} else if daprovider.IsCelestiaMessageHeaderByte(payload[0]) {
log.Error("No Celestia Reader configured, but sequencer message found with Celestia header")
} else if daprovider.IsBlobHashesHeaderByte(payload[0]) {
return nil, daprovider.ErrNoBlobReader
}
Expand Down
Binary file not shown.
1 change: 1 addition & 0 deletions cmd/deploy/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ func main() {
}

loserEscrowAddress := common.HexToAddress(*loserEscrowAddressString)

if sequencerAddress != (common.Address{}) && ownerAddress != l1TransactionOpts.From {
panic("cannot specify sequencer address if owner is not deployer")
}
Expand Down
8 changes: 4 additions & 4 deletions cmd/ipfshelper/ipfshelper.bkup_go
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ import (
"sync"

"github.com/ethereum/go-ethereum/log"
iface "github.com/ipfs/boxo/coreiface"
"github.com/ipfs/boxo/coreiface/options"
"github.com/ipfs/go-libipfs/files"
coreiface "github.com/ipfs/interface-go-ipfs-core"
"github.com/ipfs/interface-go-ipfs-core/options"
"github.com/ipfs/interface-go-ipfs-core/path"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/core"
Expand All @@ -33,7 +33,7 @@ import (
const DefaultIpfsProfiles = ""

type IpfsHelper struct {
api coreiface.CoreAPI
api iface.CoreAPI
node *core.IpfsNode
cfg *config.Config
repoPath string
Expand Down Expand Up @@ -276,6 +276,6 @@ func CanBeIpfsPath(pathString string) bool {
}

// TODO break abstraction for now til we figure out what fns are needed
func (h *IpfsHelper) GetAPI() coreiface.CoreAPI {
func (h *IpfsHelper) GetAPI() iface.CoreAPI {
return h.api
}
4 changes: 2 additions & 2 deletions cmd/nitro/nitro.go
Original file line number Diff line number Diff line change
Expand Up @@ -573,9 +573,9 @@ func mainImpl() int {
return 1
}
}
// If batchPoster is enabled, validate MaxSize to be at least 10kB below the sequencer inbox’s maxDataSize if the data availability service is not enabled.
// If batchPoster is enabled, validate MaxSize to be at least 10kB below the sequencer inbox’s maxDataSize if the data availability service and celestia DA are not enabled.
// The 10kB gap is because its possible for the batch poster to exceed its MaxSize limit and produce batches of slightly larger size.
if nodeConfig.Node.BatchPoster.Enable && !nodeConfig.Node.DataAvailability.Enable {
if nodeConfig.Node.BatchPoster.Enable && (!nodeConfig.Node.DataAvailability.Enable && !nodeConfig.Node.Celestia.Enable) {
if nodeConfig.Node.BatchPoster.MaxSize > seqInboxMaxDataSize-10000 {
log.Error("batchPoster's MaxSize is too large")
return 1
Expand Down
Loading

0 comments on commit a435a86

Please sign in to comment.