diff --git a/packages/ciphernode/Cargo.lock b/packages/ciphernode/Cargo.lock index 57495901..f0ff4170 100644 --- a/packages/ciphernode/Cargo.lock +++ b/packages/ciphernode/Cargo.lock @@ -2,6 +2,63 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "actix" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de7fa236829ba0841304542f7614c42b80fca007455315c45c785ccfa873a85b" +dependencies = [ + "actix-macros", + "actix-rt", + "actix_derive", + "bitflags 2.6.0", + "bytes", + "crossbeam-channel", + "futures-core", + "futures-sink", + "futures-task", + "futures-util", + "log", + "once_cell", + "parking_lot", + "pin-project-lite", + "smallvec", + "tokio", + "tokio-util", +] + +[[package]] +name = "actix-macros" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" +dependencies = [ + "quote", + "syn 2.0.72", +] + +[[package]] +name = "actix-rt" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24eda4e2a6e042aa4e55ac438a2ae052d3b5da0ecf83d7411e1a368946925208" +dependencies = [ + "actix-macros", + "futures-core", + "tokio", +] + +[[package]] +name = "actix_derive" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c7db3d5a9718568e4cf4a537cfd7070e6e6ff7481510d0237fb529ac850f6d3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.72", +] + [[package]] name = "addr2line" version = "0.22.0" @@ -605,17 +662,6 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" -[[package]] -name = "core" -version = "0.1.0" -dependencies = [ - "async-std", - "fhe", - "fhe-traits", - "fhe-util", - "libp2p", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -650,6 +696,15 @@ dependencies = [ "libc", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.20" @@ -838,6 +893,26 @@ dependencies = [ "fhe-util", ] +[[package]] +name = "enclave-core" +version = "0.1.0" +dependencies = [ + "actix", + "actix-rt", + "anyhow", + "async-std", + "bs58", + "fhe", + "fhe-traits", + "fhe-util", + "libp2p", + "rand", + "rand_chacha", + "secp256k1", + "sha2", + "tokio", +] + [[package]] name = "enclave_node" version = "0.1.0" @@ -3248,6 +3323,24 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "secp256k1" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0cc0f1cf93f4969faf3ea1c7d8a9faed25918d96affa959720823dfe86d4f3" +dependencies = [ + "secp256k1-sys", +] + +[[package]] +name = "secp256k1-sys" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1433bd67156263443f14d603720b082dd3121779323fce20cba2aa07b874bc1b" +dependencies = [ + "cc", +] + [[package]] name = "semver" version = "1.0.23" @@ -3560,9 +3653,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.2" +version = "1.39.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" +checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" dependencies = [ "backtrace", "bytes", diff --git a/packages/ciphernode/core/Cargo.toml b/packages/ciphernode/core/Cargo.toml index 44c46514..e1f4ddfe 100644 --- a/packages/ciphernode/core/Cargo.toml +++ b/packages/ciphernode/core/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "core" +name = "enclave-core" version = "0.1.0" edition = "2021" description = ": coordinates the encryption and decryption of enclave computations" @@ -13,4 +13,13 @@ libp2p = "0.53.2" fhe = { git = "https://github.com/gnosisguild/fhe.rs", version = "0.1.0-beta.7" } fhe-traits = { git = "https://github.com/gnosisguild/fhe.rs", version = "0.1.0-beta.7" } fhe-util = { git = "https://github.com/gnosisguild/fhe.rs", version = "0.1.0-beta.7" } +actix = "0.13.5" +actix-rt = "2.10.0" +anyhow = "1.0.86" +rand = "0.8.5" +rand_chacha = "0.3.1" +secp256k1 = "0.29.0" +tokio = { version = "1.39.3", features = ["full"] } +sha2 = "0.10.8" +bs58 = "0.5.1" diff --git a/packages/ciphernode/core/src/ciphernode.rs b/packages/ciphernode/core/src/ciphernode.rs new file mode 100644 index 00000000..1b1a19dc --- /dev/null +++ b/packages/ciphernode/core/src/ciphernode.rs @@ -0,0 +1,81 @@ +use crate::{ + data::{Data, Insert}, + eventbus::EventBus, + events::{ComputationRequested, EnclaveEvent, KeyshareCreated}, + fhe::{Fhe, GenerateKeyshare}, +}; +use actix::prelude::*; +use anyhow::Result; + +pub struct Ciphernode { + fhe: Addr, + data: Addr, + bus: Addr, +} + +impl Actor for Ciphernode { + type Context = Context; +} + +impl Ciphernode { + pub fn new(bus: Addr, fhe: Addr, data: Addr) -> Self { + Self { bus, fhe, data } + } +} + +impl Handler for Ciphernode { + type Result = (); + + fn handle(&mut self, event: EnclaveEvent, ctx: &mut Context) -> Self::Result { + match event { + EnclaveEvent::ComputationRequested { data, .. } => ctx.address().do_send(data), + _ => (), + } + } +} + +impl Handler for Ciphernode { + type Result = ResponseFuture<()>; + + fn handle(&mut self, event: ComputationRequested, _: &mut Context) -> Self::Result { + let fhe = self.fhe.clone(); + let data = self.data.clone(); + let bus = self.bus.clone(); + Box::pin(async { + on_computation_requested(fhe, data, bus, event) + .await + .unwrap() + }) + } +} + +async fn on_computation_requested( + fhe: Addr, + data: Addr, + bus: Addr, + event: ComputationRequested, +) -> Result<()> { + let ComputationRequested { e3_id, .. } = event; + // generate keyshare + let (sk, pubkey) = fhe.send(GenerateKeyshare {}).await??; + + // TODO: decrypt from FHE actor + // save encrypted key against e3_id/sk + // reencrypt secretkey locally with env var - this is so we don't have to serialize a secret + // best practice would be as you boot up a node you enter in a configured password from + // which we derive a kdf which gets used to generate this key + data.do_send(Insert(format!("{}/sk", e3_id).into(), sk.unsafe_to_vec())); + + // save public key against e3_id/pk + data.do_send(Insert( + format!("{}/pk", e3_id).into(), + pubkey.clone().into(), + )); + + // broadcast the KeyshareCreated message + let event = EnclaveEvent::from(KeyshareCreated { pubkey, e3_id }); + + bus.do_send(event); + + Ok(()) +} diff --git a/packages/ciphernode/core/src/committee.rs b/packages/ciphernode/core/src/committee.rs new file mode 100644 index 00000000..16a88a72 --- /dev/null +++ b/packages/ciphernode/core/src/committee.rs @@ -0,0 +1,65 @@ +use std::collections::HashMap; + +use actix::{Actor, Addr, Context, Handler}; + +use crate::{ + committee_key::{CommitteeKey, Die}, + eventbus::EventBus, + events::{E3id, EnclaveEvent}, + fhe::Fhe, +}; + +pub struct Committee { + bus: Addr, + fhe: Addr, + aggregators: HashMap>, +} + +impl Actor for Committee { + type Context = Context; +} + +impl Committee { + pub fn new(bus: Addr, fhe: Addr) -> Self { + Self { + bus, + fhe, + aggregators: HashMap::new(), + } + } +} + +impl Handler for Committee { + type Result = (); + + fn handle(&mut self, event: EnclaveEvent, _ctx: &mut Self::Context) -> Self::Result { + match event { + EnclaveEvent::ComputationRequested { data, .. } => { + // start up a new aggregator + let aggregator = CommitteeKey::new( + self.fhe.clone(), + self.bus.clone(), + data.e3_id.clone(), + data.nodecount, + ) + .start(); + + self.aggregators.insert(data.e3_id, aggregator); + } + EnclaveEvent::KeyshareCreated { data, .. } => { + if let Some(aggregator) = self.aggregators.get(&data.e3_id) { + aggregator.do_send(data); + } + }, + EnclaveEvent::PublicKeyAggregated { data, .. } => { + let Some(aggregator) = self.aggregators.get(&data.e3_id) else { + return; + }; + + aggregator.do_send(Die); + self.aggregators.remove(&data.e3_id); + } + // _ => (), + } + } +} diff --git a/packages/ciphernode/core/src/committee_key.rs b/packages/ciphernode/core/src/committee_key.rs new file mode 100644 index 00000000..9d6e7aeb --- /dev/null +++ b/packages/ciphernode/core/src/committee_key.rs @@ -0,0 +1,166 @@ +use crate::{ + eventbus::EventBus, + events::{E3id, EnclaveEvent, KeyshareCreated, PublicKeyAggregated}, + fhe::{Fhe, GetAggregatePublicKey, WrappedPublicKey, WrappedPublicKeyShare}, ordered_set::OrderedSet, +}; +use actix::prelude::*; +use anyhow::{anyhow, Result}; + +#[derive(Debug, Clone)] +pub enum CommitteeKeyState { + Collecting { + nodecount: usize, + keyshares: OrderedSet, + }, + Computing { + keyshares: OrderedSet, + }, + Complete { + public_key: WrappedPublicKey, + keyshares: OrderedSet, + }, +} + +#[derive(Message)] +#[rtype(result = "anyhow::Result<()>")] +struct ComputeAggregate { + pub keyshares: OrderedSet, +} + +#[derive(Message)] +#[rtype(result = "()")] +pub struct Die; + +pub struct CommitteeKey { + fhe: Addr, + bus: Addr, + e3_id: E3id, + state: CommitteeKeyState, +} + +impl CommitteeKey { + pub fn new(fhe: Addr, bus: Addr, e3_id: E3id, nodecount: usize) -> Self { + CommitteeKey { + fhe, + bus, + e3_id, + state: CommitteeKeyState::Collecting { + nodecount, + keyshares: OrderedSet::new(), + }, + } + } + + pub fn add_keyshare(&mut self, keyshare: WrappedPublicKeyShare) -> Result { + let CommitteeKeyState::Collecting { + nodecount, + keyshares, + } = &mut self.state + else { + return Err(anyhow::anyhow!("Can only add keyshare in Collecting state")); + }; + + keyshares.insert(keyshare); + if keyshares.len() == *nodecount { + return Ok(CommitteeKeyState::Computing { + keyshares: keyshares.clone(), + }); + } + + Ok(self.state.clone()) + } + + pub fn set_pubkey(&mut self, pubkey: WrappedPublicKey) -> Result { + let CommitteeKeyState::Computing { keyshares } = &mut self.state else { + return Ok(self.state.clone()); + }; + + let keyshares = keyshares.to_owned(); + + Ok(CommitteeKeyState::Complete { + public_key: pubkey, + keyshares, + }) + } +} + +impl Actor for CommitteeKey { + type Context = Context; +} + +impl Handler for CommitteeKey { + type Result = Result<()>; + + fn handle(&mut self, event: KeyshareCreated, ctx: &mut Self::Context) -> Self::Result { + if event.e3_id != self.e3_id { + return Err(anyhow!( + "Wrong e3_id sent to aggregator. This should not happen." + )); + } + + let CommitteeKeyState::Collecting { .. } = self.state else { + return Err(anyhow!( + "Aggregator has been closed for collecting keyshares." + )); + }; + + // add the keyshare and + self.state = self.add_keyshare(event.pubkey)?; + + // Check the state and if it has changed to the computing + if let CommitteeKeyState::Computing { keyshares } = &self.state { + ctx.address().do_send(ComputeAggregate { + keyshares: keyshares.clone(), + }) + } + + Ok(()) + } +} + +impl Handler for CommitteeKey { + type Result = ResponseActFuture>; + + fn handle(&mut self, msg: ComputeAggregate, _: &mut Self::Context) -> Self::Result { + // Futures are awkward in Actix from what I can tell we should try and structure events so + // that futures that don't reuire access to self filre like the following... + Box::pin( + // Run the async future. + self.fhe + .send(GetAggregatePublicKey { + keyshares: msg.keyshares.clone(), + }) + // allow access to the actor + .into_actor(self) + // map into some sync stuff + .map(|res, act, _| { + // We have to double unwrap here. Suggestions? + // 1st - Mailbox error. + // 2nd - GetAggregatePublicKey Response. + let pubkey = res??; + + // Update the local state + act.state = act.set_pubkey(pubkey.clone())?; + + // Dispatch the PublicKeyAggregated event + let event = EnclaveEvent::from(PublicKeyAggregated { + pubkey, + e3_id: act.e3_id.clone(), + }); + + act.bus.do_send(event); + + // Return + Ok(()) + }), + ) + } +} + +impl Handler for CommitteeKey { + type Result = (); + + fn handle(&mut self, _msg: Die, ctx: &mut Context) { + ctx.stop(); + } +} diff --git a/packages/ciphernode/core/src/data.rs b/packages/ciphernode/core/src/data.rs new file mode 100644 index 00000000..40ba1a3e --- /dev/null +++ b/packages/ciphernode/core/src/data.rs @@ -0,0 +1,85 @@ +use std::collections::BTreeMap; + +use actix::{Actor, Context, Handler, Message}; + +// TODO: replace with sled version + +#[derive(Message, Clone, Debug, PartialEq, Eq, Hash)] +#[rtype(result = "()")] +pub struct Insert(pub Vec, pub Vec); +impl Insert { + fn key(&self) -> Vec { + self.0.clone() + } + + fn value(&self) -> Vec { + self.1.clone() + } +} + + +#[derive(Message, Clone, Debug, PartialEq, Eq, Hash)] +#[rtype(result = "Option>")] +pub struct Get(Vec); +impl Get { + fn key(&self) -> Vec { + self.0.clone() + } +} + +#[derive(Message, Clone, Debug, PartialEq, Eq, Hash)] +#[rtype(result = "Vec")] +pub struct GetLog; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum DataOp { + Insert(Insert), +} + +pub struct Data { + db: BTreeMap, Vec>, + log: Vec, + capture: bool, +} + +impl Actor for Data { + type Context = Context; +} + +impl Data { + pub fn new(capture: bool) -> Self { + Self { + db: BTreeMap::new(), + capture, + log: vec![], + } + } +} + +impl Handler for Data { + type Result = (); + fn handle(&mut self, event: Insert, _: &mut Self::Context) { + + // insert data into sled + self.db.insert(event.key(), event.value()); + + if self.capture { + self.log.push(DataOp::Insert(event)); + } + } +} + +impl Handler for Data { + type Result = Option>; + fn handle(&mut self, event: Get, _: &mut Self::Context) -> Option> { + let key = event.key(); + self.db.get(&key).map(|r| r.clone()) + } +} + +impl Handler for Data { + type Result = Vec; + fn handle(&mut self, _: GetLog, _: &mut Self::Context) -> Vec { + self.log.clone() + } +} diff --git a/packages/ciphernode/core/src/eventbus.rs b/packages/ciphernode/core/src/eventbus.rs new file mode 100644 index 00000000..d6b40cc9 --- /dev/null +++ b/packages/ciphernode/core/src/eventbus.rs @@ -0,0 +1,99 @@ +use crate::events::{EnclaveEvent, EventId}; +use actix::prelude::*; +use std::collections::{HashMap, HashSet}; + +#[derive(Message, Debug)] +#[rtype(result = "()")] +pub struct Subscribe { + pub event_type: String, + pub listener: Recipient, +} + +impl Subscribe { + pub fn new(event_type: impl Into, listener: Recipient) -> Self { + Self { + event_type: event_type.into(), + listener, + } + } +} + +#[derive(Message)] +#[rtype(result = "Vec")] +pub struct GetHistory; + +pub struct EventBus { + capture: bool, + history: Vec, + ids: HashSet, + listeners: HashMap>>, +} + +impl Actor for EventBus { + type Context = Context; +} + +impl EventBus { + pub fn new(capture: bool) -> Self { + EventBus { + capture, + listeners: HashMap::new(), + ids: HashSet::new(), + history: vec![], + } + } + + fn add_to_history(&mut self, event: EnclaveEvent) { + self.history.push(event.clone()); + self.ids.insert(event.into()); + } +} + +impl Handler for EventBus { + type Result = (); + + fn handle(&mut self, event: Subscribe, _: &mut Context) { + self.listeners + .entry(event.event_type) + .or_insert_with(Vec::new) + .push(event.listener); + } +} + +impl Handler for EventBus { + type Result = Vec; + + fn handle(&mut self, _: GetHistory, _: &mut Context) -> Vec { + self.history.clone() + } +} + +impl Handler for EventBus { + type Result = (); + + fn handle(&mut self, event: EnclaveEvent, _: &mut Context) { + // Deduplicate by id + if self.ids.contains(&event.clone().into()) { + // We have seen this before + println!("Duplicate {}", EventId::from(event)); + return; + } + + // TODO: How can we ensure the event we see is coming in in the correct order? + if let Some(listeners) = self.listeners.get("*") { + for listener in listeners { + listener.do_send(event.clone()) + } + } + + if let Some(listeners) = self.listeners.get(&event.event_type()) { + for listener in listeners { + listener.do_send(event.clone()) + } + } + + if self.capture { + self.add_to_history(event); + } + } +} diff --git a/packages/ciphernode/core/src/events.rs b/packages/ciphernode/core/src/events.rs new file mode 100644 index 00000000..2a1ebcde --- /dev/null +++ b/packages/ciphernode/core/src/events.rs @@ -0,0 +1,164 @@ +use actix::Message; +use sha2::{Digest, Sha256}; +use std::{ + fmt, + hash::{DefaultHasher, Hash, Hasher}, +}; + +use crate::fhe::{WrappedPublicKey, WrappedPublicKeyShare}; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct E3id(pub String); +impl fmt::Display for E3id { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&self.0) + } +} + +impl E3id { + pub fn new(id: impl Into) -> Self { + Self(id.into()) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct EventId(pub [u8; 32]); + +impl EventId { + fn from(value: T) -> Self { + let mut hasher = Sha256::new(); + let mut std_hasher = DefaultHasher::new(); + value.hash(&mut std_hasher); + hasher.update(std_hasher.finish().to_le_bytes()); + let result = hasher.finalize(); + EventId(result.into()) + } +} + +impl fmt::Display for EventId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let base58_string = bs58::encode(&self.0).into_string(); + write!(f, "eid_{}", base58_string) + } +} + + +#[derive(Message, Clone, Debug, PartialEq, Eq, Hash)] +#[rtype(result = "()")] +pub enum EnclaveEvent { + KeyshareCreated { + id: EventId, + data: KeyshareCreated, + }, + ComputationRequested { + id: EventId, + data: ComputationRequested, + }, + PublicKeyAggregated { + id: EventId, + data: PublicKeyAggregated, + }, + // CommitteeSelected, + // OutputDecrypted, + // CiphernodeRegistered, + // CiphernodeDeregistered, +} + +impl From for EventId { + fn from(value: EnclaveEvent) -> Self { + match value { + EnclaveEvent::KeyshareCreated { id, .. } => id, + EnclaveEvent::ComputationRequested { id, .. } => id, + EnclaveEvent::PublicKeyAggregated { id, .. } => id, + } + } +} + +impl From for EnclaveEvent { + fn from(data: KeyshareCreated) -> Self { + EnclaveEvent::KeyshareCreated { + id: EventId::from(data.clone()), + data: data.clone(), + } + } +} + +impl From for EnclaveEvent { + fn from(data:ComputationRequested) -> Self { + EnclaveEvent::ComputationRequested { + id: EventId::from(data.clone()), + data: data.clone(), + } + } +} + +impl From for EnclaveEvent{ + fn from(data:PublicKeyAggregated) -> Self { + EnclaveEvent::PublicKeyAggregated { + id: EventId::from(data.clone()), + data: data.clone(), + } + } +} + +#[derive(Message, Clone, Debug, PartialEq, Eq, Hash)] +#[rtype(result = "anyhow::Result<()>")] +pub struct KeyshareCreated { + pub pubkey: WrappedPublicKeyShare, + pub e3_id: E3id, +} + +#[derive(Message, Clone, Debug, PartialEq, Eq, Hash)] +#[rtype(result = "()")] +pub struct PublicKeyAggregated { + pub pubkey: WrappedPublicKey, + pub e3_id: E3id, +} + +#[derive(Message, Clone, Debug, PartialEq, Eq, Hash)] +#[rtype(result = "()")] +pub struct ComputationRequested { + pub e3_id: E3id, + pub nodecount: usize, + pub threshold: usize, + pub sortition_seed: u32, + // computation_type: ??, // TODO: + // execution_model_type: ??, // TODO: + // input_deadline: ??, // TODO: + // availability_duration: ??, // TODO: +} + +fn extract_enclave_event_name(s: &str) -> &str { + let bytes = s.as_bytes(); + for (i, &item) in bytes.iter().enumerate() { + if item == b' ' || item == b'(' { + return &s[..i]; + } + } + s +} + +impl EnclaveEvent { + pub fn event_type(&self) -> String { + let s = format!("{:?}", self); + extract_enclave_event_name(&s).to_string() + } +} + +#[cfg(test)] +mod tests { + + use crate::events::extract_enclave_event_name; + + #[test] + fn test_extract_enum_name() { + assert_eq!( + extract_enclave_event_name("KeyshareCreated(KeyshareCreated { pubkey: [] })"), + "KeyshareCreated" + ); + assert_eq!( + extract_enclave_event_name("CommitteeSelected(SomeStruct { t: 8 })"), + "CommitteeSelected" + ); + } +} diff --git a/packages/ciphernode/core/src/fhe.rs b/packages/ciphernode/core/src/fhe.rs new file mode 100644 index 00000000..c68edb24 --- /dev/null +++ b/packages/ciphernode/core/src/fhe.rs @@ -0,0 +1,138 @@ +use std::{cmp::Ordering, hash::Hash, mem, sync::Arc}; + +use actix::{Actor, Context, Handler, Message}; +use anyhow::*; +use fhe::{ + bfv::{BfvParameters, PublicKey, SecretKey}, + mbfv::{AggregateIter, CommonRandomPoly, PublicKeyShare}, +}; +use fhe_traits::Serialize; +use rand_chacha::ChaCha20Rng; + +use crate::ordered_set::OrderedSet; + +#[derive(Message, Clone, Debug, PartialEq, Eq, Hash)] +#[rtype(result = "Result<(WrappedSecretKey, WrappedPublicKeyShare)>")] +pub struct GenerateKeyshare { + // responder_pk: Vec, // TODO: use this to encrypt the secret data +} + +#[derive(Message, Clone, Debug, PartialEq, Eq)] +#[rtype(result = "Result<(WrappedPublicKey)>")] +pub struct GetAggregatePublicKey { + pub keyshares: OrderedSet, +} + +/// Wrapped PublicKeyShare. This is wrapped to provide an inflection point +/// as we use this library elsewhere we only implement traits as we need them +/// and avoid exposing underlying structures from fhe.rs +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct WrappedPublicKeyShare(pub PublicKeyShare); + +impl Ord for WrappedPublicKeyShare { + fn cmp(&self, other: &Self) -> Ordering { + self.0.to_bytes().cmp(&other.0.to_bytes()) + } +} + +impl PartialOrd for WrappedPublicKeyShare { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl From for Vec { + fn from(share: WrappedPublicKeyShare) -> Self { + share.0.to_bytes() + } +} + +impl Hash for WrappedPublicKeyShare { + fn hash(&self, state: &mut H) { + self.0.to_bytes().hash(state) + } +} + +impl WrappedPublicKeyShare { + fn clone_inner(&self) -> PublicKeyShare { + self.0.clone() + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct WrappedPublicKey(pub PublicKey); + +impl Hash for WrappedPublicKey { + fn hash(&self, state: &mut H) { + self.0.to_bytes().hash(state) + } +} + +impl Ord for WrappedPublicKey { + fn cmp(&self, other: &Self) -> Ordering { + self.0.to_bytes().cmp(&other.0.to_bytes()) + } +} + +impl PartialOrd for WrappedPublicKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + + +#[derive(PartialEq)] +pub struct WrappedSecretKey(pub SecretKey); +impl WrappedSecretKey { + pub fn unsafe_to_vec(&self) -> Vec { + serialize_box_i64(self.0.coeffs.clone()) + } +} + +pub struct Fhe { + params: Arc, + crp: CommonRandomPoly, + rng: ChaCha20Rng, +} + +impl Actor for Fhe { + type Context = Context; +} + +impl Fhe { + pub fn new( + params: Arc, + crp: CommonRandomPoly, + rng: ChaCha20Rng, + ) -> Result { + Ok(Self { params, crp, rng }) + } +} + +impl Handler for Fhe { + type Result = Result<(WrappedSecretKey, WrappedPublicKeyShare)>; + fn handle(&mut self, _event: GenerateKeyshare, _: &mut Self::Context) -> Self::Result { + let sk_share = { SecretKey::random(&self.params, &mut self.rng) }; + let pk_share = { PublicKeyShare::new(&sk_share, self.crp.clone(), &mut self.rng)? }; + Ok((WrappedSecretKey(sk_share), WrappedPublicKeyShare(pk_share))) + } +} + +impl Handler for Fhe { + type Result = Result; + + fn handle(&mut self, msg: GetAggregatePublicKey, _: &mut Self::Context) -> Self::Result { + // Could implement Aggregate for Wrapped keys but that leaks traits + let public_key: PublicKey = msg.keyshares.iter().map(|k| k.clone_inner()).aggregate()?; + Ok(WrappedPublicKey(public_key)) + } +} + +fn serialize_box_i64(boxed: Box<[i64]>) -> Vec { + let vec = boxed.into_vec(); + let mut bytes = Vec::with_capacity(vec.len() * mem::size_of::()); + for &num in &vec { + bytes.extend_from_slice(&num.to_le_bytes()); + } + bytes +} diff --git a/packages/ciphernode/core/src/lib.rs b/packages/ciphernode/core/src/lib.rs index eecfcfeb..ca1f5c3e 100644 --- a/packages/ciphernode/core/src/lib.rs +++ b/packages/ciphernode/core/src/lib.rs @@ -1,13 +1,203 @@ -#![crate_name = "core"] +#![crate_name = "enclave_core"] #![crate_type = "lib"] -#![warn(missing_docs, unused_imports)] +// #![warn(missing_docs, unused_imports)] -pub struct Core { - pub name: String, -} +mod ciphernode; +mod committee; +mod committee_key; +mod data; +mod eventbus; +mod events; +mod fhe; +mod ordered_set; + +// pub struct Core { +// pub name: String, +// } +// +// impl Core { +// fn new(name: String) -> Self { +// Self { name } +// } +// +// fn run() { +// actix::run(async move { +// sleep(Duration::from_millis(100)).await; +// actix::System::current().stop(); +// }); +// } +// } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use crate::{ + ciphernode::Ciphernode, + committee::Committee, + data::Data, + eventbus::{EventBus, GetHistory, Subscribe}, + events::{ComputationRequested, E3id, EnclaveEvent, KeyshareCreated, PublicKeyAggregated}, + fhe::{Fhe, WrappedPublicKey, WrappedPublicKeyShare}, + }; + use actix::prelude::*; + use anyhow::*; + use fhe::{ + bfv::{BfvParameters, BfvParametersBuilder, PublicKey, SecretKey}, + mbfv::{AggregateIter, CommonRandomPoly, PublicKeyShare}, + }; + use rand::SeedableRng; + use rand_chacha::ChaCha20Rng; + + // Simulating a local node + fn setup_local_ciphernode( + bus: Addr, + fhe: Addr, + logging: bool, + ) -> (Addr, Addr) { + // create data actor for saving data + let data = Data::new(logging).start(); // TODO: Use a sled backed Data Actor + + // create ciphernode actor for managing ciphernode flow + let node = Ciphernode::new(bus.clone(), fhe.clone(), data.clone()).start(); + + // subscribe for computation requested events from the event bus + bus.do_send(Subscribe::new("ComputationRequested", node.clone().into())); + + // setup the committee manager to generate the comittee public keys + setup_committee_manager(bus.clone(), fhe); + (node, data) + } + + fn setup_bfv_params( + moduli: &[u64], + degree: usize, + plaintext_modulus: u64, + mut rng: ChaCha20Rng, + ) -> Result<(Arc, CommonRandomPoly)> { + let params = BfvParametersBuilder::new() + .set_degree(degree) + .set_plaintext_modulus(plaintext_modulus) + .set_moduli(&moduli) + .build_arc()?; + let crp = CommonRandomPoly::new(¶ms, &mut rng)?; + Ok((params, crp)) + } + + fn generate_pk_share( + params: Arc, + crp: CommonRandomPoly, + mut rng: ChaCha20Rng, + ) -> Result<(WrappedPublicKeyShare, ChaCha20Rng)> { + let sk = SecretKey::random(¶ms, &mut rng); + let pk = WrappedPublicKeyShare(PublicKeyShare::new(&sk, crp.clone(), &mut rng)?); + Ok((pk, rng)) + } + + fn setup_committee_manager(bus: Addr, fhe: Addr) -> Addr { + let committee = Committee::new(bus.clone(), fhe.clone()).start(); -impl Core { - fn new(name: String) -> Self { - Self { name } + bus.do_send(Subscribe::new( + "ComputationRequested", + committee.clone().into(), + )); + bus.do_send(Subscribe::new("KeyshareCreated", committee.clone().into())); + + committee } -} \ No newline at end of file + + fn setup_global_fhe_actor( + moduli: &[u64], + degree: usize, + plaintext_modulus: u64, + rng1: ChaCha20Rng, + rng2: ChaCha20Rng, + ) -> Result> { + let (params, crp) = setup_bfv_params(&moduli, degree, plaintext_modulus, rng1)?; + Ok(Fhe::new(params, crp, rng2)?.start()) + } + + #[actix::test] + async fn test_ciphernode() -> Result<()> { + // Setup EventBus + let bus = EventBus::new(true).start(); + + // Setup global FHE actor + let fhe = setup_global_fhe_actor( + &vec![0x3FFFFFFF000001], + 2048, + 1032193, + ChaCha20Rng::seed_from_u64(42), + ChaCha20Rng::seed_from_u64(42), + )?; + setup_local_ciphernode(bus.clone(), fhe.clone(), true); + setup_local_ciphernode(bus.clone(), fhe.clone(), true); + setup_local_ciphernode(bus.clone(), fhe.clone(), true); + + let e3_id = E3id::new("1234"); + + let event = EnclaveEvent::from(ComputationRequested { + e3_id: e3_id.clone(), + nodecount: 3, + threshold: 123, + sortition_seed: 123, + }); + + // Send the computation requested event + bus.send(event.clone()).await?; + + // Test that we cannot send the same event twice + bus.send(event).await?; + + let history = bus.send(GetHistory).await?; + + let (params, crp) = setup_bfv_params( + &vec![0x3FFFFFFF000001], + 2048, + 1032193, + ChaCha20Rng::seed_from_u64(42), + )?; + + // Passing rng through function chain to ensure it matches usage in system above + let rng = ChaCha20Rng::seed_from_u64(42); + let (p1, rng) = generate_pk_share(params.clone(), crp.clone(), rng)?; + let (p2, rng) = generate_pk_share(params.clone(), crp.clone(), rng)?; + let (p3, _) = generate_pk_share(params.clone(), crp.clone(), rng)?; + + let aggregated: PublicKey = vec![p1.clone(), p2.clone(), p3.clone()] + .iter() + .map(|k| k.0.clone()) + .aggregate()?; + + assert_eq!(history.len(), 5); + assert_eq!( + history, + vec![ + EnclaveEvent::from(ComputationRequested { + e3_id: e3_id.clone(), + nodecount: 3, + threshold: 123, + sortition_seed: 123, + }), + EnclaveEvent::from(KeyshareCreated { + pubkey: p1.clone(), + e3_id: e3_id.clone(), + }), + EnclaveEvent::from(KeyshareCreated { + pubkey: p2.clone(), + e3_id: e3_id.clone(), + }), + EnclaveEvent::from(KeyshareCreated { + pubkey: p3.clone(), + e3_id: e3_id.clone() + }), + EnclaveEvent::from(PublicKeyAggregated { + pubkey: WrappedPublicKey(aggregated), + e3_id: e3_id.clone() + }) + ] + ); + + Ok(()) + } +} diff --git a/packages/ciphernode/core/src/ordered_set.rs b/packages/ciphernode/core/src/ordered_set.rs new file mode 100644 index 00000000..c263e30a --- /dev/null +++ b/packages/ciphernode/core/src/ordered_set.rs @@ -0,0 +1,194 @@ +use std::collections::BTreeSet; +use std::hash::{Hash, Hasher}; +use std::fmt; + +#[derive(Clone)] +pub struct OrderedSet(BTreeSet); + +impl OrderedSet { + pub fn new() -> Self { + OrderedSet(BTreeSet::new()) + } + + pub fn insert(&mut self, value: T) -> bool { + self.0.insert(value) + } + + pub fn contains(&self, value: &T) -> bool { + self.0.contains(value) + } + + pub fn len(&self) -> usize { + self.0.len() + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } +} + +impl Hash for OrderedSet { + fn hash(&self, state: &mut H) { + self.0.len().hash(state); + for item in &self.0 { + item.hash(state); + } + } +} + +impl PartialEq for OrderedSet { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl Eq for OrderedSet {} + +impl fmt::Debug for OrderedSet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_set().entries(self.0.iter()).finish() + } +} + +impl IntoIterator for OrderedSet { + type Item = T; + type IntoIter = std::collections::btree_set::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, T: Ord> IntoIterator for &'a OrderedSet { + type Item = &'a T; + type IntoIter = std::collections::btree_set::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + + #[test] + fn test_new() { + let set: OrderedSet = OrderedSet::new(); + assert!(set.is_empty()); + assert_eq!(set.len(), 0); + } + + #[test] + fn test_insert() { + let mut set = OrderedSet::new(); + assert!(set.insert(1)); + assert!(set.insert(2)); + assert!(!set.insert(1)); // Duplicate insertion + assert_eq!(set.len(), 2); + } + + #[test] + fn test_contains() { + let mut set = OrderedSet::new(); + set.insert(1); + set.insert(2); + assert!(set.contains(&1)); + assert!(set.contains(&2)); + assert!(!set.contains(&3)); + } + + #[test] + fn test_len_and_is_empty() { + let mut set = OrderedSet::new(); + assert!(set.is_empty()); + assert_eq!(set.len(), 0); + set.insert(1); + assert!(!set.is_empty()); + assert_eq!(set.len(), 1); + } + + #[test] + fn test_iter() { + let mut set = OrderedSet::new(); + set.insert(3); + set.insert(1); + set.insert(2); + let mut iter = set.iter(); + assert_eq!(iter.next(), Some(&1)); + assert_eq!(iter.next(), Some(&2)); + assert_eq!(iter.next(), Some(&3)); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_hash() { + let mut set1 = OrderedSet::new(); + set1.insert(1); + set1.insert(2); + + let mut set2 = OrderedSet::new(); + set2.insert(2); + set2.insert(1); + + let mut hasher1 = DefaultHasher::new(); + let mut hasher2 = DefaultHasher::new(); + + set1.hash(&mut hasher1); + set2.hash(&mut hasher2); + + assert_eq!(hasher1.finish(), hasher2.finish()); + } + + #[test] + fn test_eq() { + let mut set1 = OrderedSet::new(); + set1.insert(1); + set1.insert(2); + + let mut set2 = OrderedSet::new(); + set2.insert(2); + set2.insert(1); + + let mut set3 = OrderedSet::new(); + set3.insert(1); + set3.insert(3); + + assert_eq!(set1, set2); + assert_ne!(set1, set3); + } + + #[test] + fn test_debug() { + let mut set = OrderedSet::new(); + set.insert(1); + set.insert(2); + assert_eq!(format!("{:?}", set), "{1, 2}"); + } + + #[test] + fn test_into_iter() { + let mut set = OrderedSet::new(); + set.insert(3); + set.insert(1); + set.insert(2); + let vec: Vec = set.into_iter().collect(); + assert_eq!(vec, vec![1, 2, 3]); + } + + #[test] + fn test_iter_ref() { + let mut set = OrderedSet::new(); + set.insert(3); + set.insert(1); + set.insert(2); + let vec: Vec<&i32> = (&set).into_iter().collect(); + assert_eq!(vec, vec![&1, &2, &3]); + } +} diff --git a/packages/ciphernode/enclave_node/src/main.rs b/packages/ciphernode/enclave_node/src/main.rs index d3b72aba..466d6e63 100644 --- a/packages/ciphernode/enclave_node/src/main.rs +++ b/packages/ciphernode/enclave_node/src/main.rs @@ -1,5 +1,4 @@ -use eth::EtherClient; -use p2p::EnclaveRouter; +// use p2p::EnclaveRouter; use std::error::Error; use tokio; @@ -30,10 +29,10 @@ async fn main() -> Result<(), Box> { println!("\n\n\n\n\n{}", OWO); println!("\n\n\n\n"); - let mut p2p = EnclaveRouter::new()?; - p2p.connect_swarm("mdns".to_string())?; - p2p.join_topic("enclave-keygen-01")?; - p2p.start().await?; + // let mut p2p = EnclaveRouter::new()?; + // p2p.connect_swarm("mdns".to_string())?; + // p2p.join_topic("enclave-keygen-01")?; + // p2p.start().await?; println!("Hello, cipher world!"); Ok(()) -} \ No newline at end of file +}