From c4b66aca4db12c9d8c58bc978b4b2a399205494b Mon Sep 17 00:00:00 2001 From: Bogdan Cebere Date: Wed, 27 Jan 2021 06:11:17 +0200 Subject: [PATCH] Usability improvements: Arbitrary sized vectors support (#213) * encrypted vector storage update * bump version --- setup.cfg | 2 +- tenseal/binding.cpp | 39 ++- tenseal/cpp/tensors/bfvvector.cpp | 204 ++++++++----- tenseal/cpp/tensors/ckksvector.cpp | 268 ++++++++++++------ tenseal/cpp/tensors/ckksvector.h | 6 +- tenseal/cpp/tensors/encrypted_vector.h | 48 ++-- tenseal/cpp/tensors/plain_tensor.h | 13 + tenseal/cpp/tensors/tensor_storage.h | 15 + tenseal/cpp/tensors/utils/utils.h | 7 +- tenseal/cpp/utils/helpers.h | 25 ++ tenseal/proto/tensors.proto | 16 +- tenseal/version.py | 2 +- tests/cpp/tensors/bfvvector_test.cpp | 43 ++- tests/cpp/tensors/ckksvector_test.cpp | 58 +++- .../python/tenseal/tensors/test_bfv_vector.py | 17 ++ .../tenseal/tensors/test_ckks_vector.py | 34 ++- 16 files changed, 588 insertions(+), 209 deletions(-) diff --git a/setup.cfg b/setup.cfg index 43e22949..e5a16868 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.3.0a4 +current_version = 0.3.0a5 commit = True tag = True files = tenseal/version.py diff --git a/tenseal/binding.cpp b/tenseal/binding.cpp index 0465d902..66e66161 100644 --- a/tenseal/binding.cpp +++ b/tenseal/binding.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -80,12 +81,16 @@ PYBIND11_MODULE(_tenseal_cpp, m) { py::module_local()) .def(py::init([](const shared_ptr &ctx, const vector &data) { - return BFVVector::Create(ctx, data); - })) - .def(py::init( - [](const shared_ptr &ctx, const std::string &data) { - return BFVVector::Create(ctx, data); - })) + return BFVVector::Create(ctx, data); + }), + py::call_guard()) + .def(py::init([](const shared_ptr &ctx, + const std::string &data) { + return BFVVector::Create(ctx, data); + }), + py::call_guard()) .def(py::init( [](const std::string &data) { return BFVVector::Create(data); })) .def("size", py::overload_cast<>(&BFVVector::size, py::const_)) @@ -215,17 +220,23 @@ PYBIND11_MODULE(_tenseal_cpp, m) { // specifying scale .def(py::init([](const shared_ptr &ctx, const vector &data, double scale) { - return CKKSVector::Create(ctx, data, scale); - })) + return CKKSVector::Create(ctx, data, scale); + }), + py::call_guard()) // using global_scale if set .def(py::init([](const shared_ptr &ctx, const vector &data) { - return CKKSVector::Create(ctx, data); - })) - .def(py::init( - [](const shared_ptr &ctx, const std::string &data) { - return CKKSVector::Create(ctx, data); - })) + return CKKSVector::Create(ctx, data); + }), + py::call_guard()) + .def(py::init([](const shared_ptr &ctx, + const std::string &data) { + return CKKSVector::Create(ctx, data); + }), + py::call_guard()) .def(py::init( [](const std::string &data) { return CKKSVector::Create(data); })) .def("size", py::overload_cast<>(&CKKSVector::size, py::const_)) diff --git a/tenseal/cpp/tensors/bfvvector.cpp b/tenseal/cpp/tensors/bfvvector.cpp index d6fe44c7..2c79552e 100644 --- a/tenseal/cpp/tensors/bfvvector.cpp +++ b/tenseal/cpp/tensors/bfvvector.cpp @@ -14,9 +14,30 @@ void BFVVector::prepare_context(const shared_ptr& ctx) { BFVVector::BFVVector(const shared_ptr& ctx, const plain_t& vec) { this->prepare_context(ctx); - // Encrypts the whole vector into a single ciphertext using BFV batching - this->_ciphertext = BFVVector::encrypt(ctx, vec); - this->_size = vec.size(); + + if (vec.empty()) { + throw invalid_argument("Attempting to encrypt an empty vector"); + } + + auto slot_count = ctx->slot_count(); + auto vec_chunks = vec.chunks(slot_count); + + if (vec_chunks.size() > 1) { + std::cout + << "WARNING: The input does not fit in a single ciphertext, and " + "some operations will be disabled.\n" + "The following operations are disabled in this setup: matmul, " + "matmul_plain, conv2d_im2col, replicate_first_slot.\n" + "If you need to use those operations, try increasing the " + "poly_modulus parameter, to fit your input.\n"; + } + this->_ciphertexts = vector(); + this->_sizes = vector(); + + for (auto& chunk : vec_chunks) { + this->_ciphertexts.push_back(BFVVector::encrypt(ctx, chunk)); + this->_sizes.push_back(chunk.size()); + } } BFVVector::BFVVector(const shared_ptr& ctx, const string& vec) { @@ -39,8 +60,8 @@ BFVVector::BFVVector(const TenSEALContextProto& ctx, } BFVVector::BFVVector(const shared_ptr& vec) { this->prepare_context(vec->tenseal_context()); - this->_size = vec->size(); - this->_ciphertext = vec->ciphertext(); + this->_sizes = vec->chunked_size(); + this->_ciphertexts = vec->ciphertext(); } Ciphertext BFVVector::encrypt(shared_ptr context, @@ -65,15 +86,27 @@ Ciphertext BFVVector::encrypt(shared_ptr context, } BFVVector::plain_t BFVVector::decrypt(const shared_ptr& sk) const { - Plaintext plaintext; vector result; + result.reserve(this->size()); + + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + vector partial_result; + + Plaintext plaintext; + this->tenseal_context()->decrypt(*sk, this->_ciphertexts[idx], + plaintext); + this->tenseal_context()->decode(plaintext, + partial_result); + + // result contains all slots of ciphertext (poly_modulus_degree) + // we use the real vector size to delimit the resulting plaintext vector + auto partial_decr = + vector(partial_result.cbegin(), + partial_result.cbegin() + this->_sizes[idx]); + result.insert(result.end(), partial_decr.begin(), partial_decr.end()); + } - this->tenseal_context()->decrypt(*sk, this->_ciphertext, plaintext); - this->tenseal_context()->decode(plaintext, result); - - // result contains all slots of ciphertext (poly_modulus_degree) - // we use the real vector size to delimit the resulting plaintext vector - return vector(result.cbegin(), result.cbegin() + this->size()); + return result; } shared_ptr BFVVector::power_inplace(unsigned int power) { @@ -106,14 +139,17 @@ shared_ptr BFVVector::power_inplace(unsigned int power) { } shared_ptr BFVVector::negate_inplace() { - this->tenseal_context()->evaluator->negate_inplace(this->_ciphertext); + for (auto& ct : this->_ciphertexts) + this->tenseal_context()->evaluator->negate_inplace(ct); return shared_from_this(); } shared_ptr BFVVector::square_inplace() { - this->tenseal_context()->evaluator->square_inplace(this->_ciphertext); - this->auto_relin(_ciphertext); + for (auto& ct : this->_ciphertexts) { + this->tenseal_context()->evaluator->square_inplace(ct); + this->auto_relin(ct); + } return shared_from_this(); } @@ -129,8 +165,9 @@ shared_ptr BFVVector::add_inplace( this->broadcast_or_throw(to_add); - this->tenseal_context()->evaluator->add_inplace(this->_ciphertext, - to_add->_ciphertext); + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) + this->tenseal_context()->evaluator->add_inplace( + this->_ciphertexts[idx], to_add->_ciphertexts[idx]); return shared_from_this(); } @@ -146,8 +183,9 @@ shared_ptr BFVVector::sub_inplace( this->broadcast_or_throw(to_sub); - this->tenseal_context()->evaluator->sub_inplace(this->_ciphertext, - to_sub->_ciphertext); + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) + this->tenseal_context()->evaluator->sub_inplace( + this->_ciphertexts[idx], to_sub->_ciphertexts[idx]); return shared_from_this(); } @@ -163,9 +201,11 @@ shared_ptr BFVVector::mul_inplace( this->broadcast_or_throw(to_mul); - this->tenseal_context()->evaluator->multiply_inplace(this->_ciphertext, - to_mul->_ciphertext); - this->auto_relin(_ciphertext); + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + this->tenseal_context()->evaluator->multiply_inplace( + this->_ciphertexts[idx], to_mul->_ciphertexts[idx]); + this->auto_relin(_ciphertexts[idx]); + } return shared_from_this(); } @@ -187,8 +227,18 @@ shared_ptr BFVVector::dot_plain_inplace( } shared_ptr BFVVector::sum_inplace(size_t /*axis=0*/) { - sum_vector(this->tenseal_context(), this->_ciphertext, this->size()); - this->_size = 1; + vector interm_sum; + // TODO use multithreading for sum + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + Ciphertext out = this->_ciphertexts[idx]; + sum_vector(this->tenseal_context(), out, this->_sizes[idx]); + interm_sum.push_back(out); + } + Ciphertext result; + tenseal_context()->evaluator->add_many(interm_sum, result); + + this->_ciphertexts = {result}; + this->_sizes = {1}; return shared_from_this(); } @@ -198,17 +248,20 @@ shared_ptr BFVVector::add_plain_inplace( } shared_ptr BFVVector::add_plain_inplace( - const BFVVector::plain_t& to_add) { - if (this->size() != to_add.size()) { + const BFVVector::plain_t& vector_to_add) { + if (this->size() != vector_to_add.size()) { throw invalid_argument("can't add vectors of different sizes"); } - - Plaintext plaintext; - - this->tenseal_context()->encode(to_add.data_ref(), plaintext); - this->tenseal_context()->evaluator->add_plain_inplace(this->_ciphertext, - plaintext); - + auto slot_count = tenseal_context()->slot_count(); + auto to_add = vector_to_add.chunks(slot_count); + + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + Plaintext plaintext; + this->tenseal_context()->encode(to_add[idx].data_ref(), + plaintext); + this->tenseal_context()->evaluator->add_plain_inplace( + this->_ciphertexts[idx], plaintext); + } return shared_from_this(); } @@ -218,17 +271,22 @@ shared_ptr BFVVector::sub_plain_inplace( } shared_ptr BFVVector::sub_plain_inplace( - const BFVVector::plain_t& to_sub) { - if (this->size() != to_sub.size()) { + const BFVVector::plain_t& vector_to_sub) { + if (this->size() != vector_to_sub.size()) { throw invalid_argument("can't sub vectors of different sizes"); } - Plaintext plaintext; + auto slot_count = tenseal_context()->slot_count(); + auto to_sub = vector_to_sub.chunks(slot_count); - this->tenseal_context()->encode(to_sub.data_ref(), plaintext); - this->tenseal_context()->evaluator->sub_plain_inplace(this->_ciphertext, - plaintext); + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + Plaintext plaintext; + this->tenseal_context()->encode(to_sub[idx].data_ref(), + plaintext); + this->tenseal_context()->evaluator->sub_plain_inplace( + this->_ciphertexts[idx], plaintext); + } return shared_from_this(); } @@ -238,23 +296,29 @@ shared_ptr BFVVector::mul_plain_inplace( } shared_ptr BFVVector::mul_plain_inplace( - const BFVVector::plain_t& to_mul) { - if (this->size() != to_mul.size()) { + const BFVVector::plain_t& vector_to_mul) { + if (this->size() != vector_to_mul.size()) { throw invalid_argument("can't multiply vectors of different sizes"); } - Plaintext plaintext; - this->tenseal_context()->encode(to_mul.data_ref(), plaintext); - - try { - this->tenseal_context()->evaluator->multiply_plain_inplace( - this->_ciphertext, plaintext); - } catch (const std::logic_error& e) { - if (strcmp(e.what(), "result ciphertext is transparent") == 0) { - // replace by encryption of zero - this->tenseal_context()->encrypt_zero(this->_ciphertext); - } else { // Something else, need to be forwarded - throw; + auto slot_count = tenseal_context()->slot_count(); + auto to_mul = vector_to_mul.chunks(slot_count); + + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + Plaintext plaintext; + this->tenseal_context()->encode(to_mul[idx].data_ref(), + plaintext); + + try { + this->tenseal_context()->evaluator->multiply_plain_inplace( + this->_ciphertexts[idx], plaintext); + } catch (const std::logic_error& e) { + if (strcmp(e.what(), "result ciphertext is transparent") == 0) { + // replace by encryption of zero + this->tenseal_context()->encrypt_zero(this->_ciphertexts[idx]); + } else { // Something else, need to be forwarded + throw; + } } } @@ -282,22 +346,27 @@ shared_ptr BFVVector::enc_matmul_plain_inplace( } shared_ptr BFVVector::replicate_first_slot_inplace(size_t n) { + if (this->_ciphertexts.size() != 1) + throw invalid_argument( + "can't execute replicate_first_slot on chunked vectors"); + // mask - vector mask(this->_size, 0); + vector mask(this->size(), 0); mask[0] = 1; this->mul_plain_inplace(mask); // replicate - Ciphertext tmp = this->_ciphertext; + Ciphertext tmp = this->_ciphertexts[0]; auto galois_keys = this->tenseal_context()->galois_keys(); for (size_t i = 0; i < (size_t)ceil(log2(n)); i++) { this->tenseal_context()->evaluator->rotate_vector_inplace( tmp, static_cast(-pow(2, i)), *galois_keys); - this->tenseal_context()->evaluator->add_inplace(this->_ciphertext, tmp); - tmp = this->_ciphertext; + this->tenseal_context()->evaluator->add_inplace(this->_ciphertexts[0], + tmp); + tmp = this->_ciphertexts[0]; } - this->_size = n; + this->_sizes = {n}; return shared_from_this(); } @@ -305,17 +374,24 @@ void BFVVector::load_proto(const BFVVectorProto& vec) { if (this->tenseal_context() == nullptr) { throw invalid_argument("context missing for deserialization"); } - this->_size = vec.size(); - this->_ciphertext = SEALDeserialize( - *this->tenseal_context()->seal_context(), vec.ciphertext()); + this->_sizes = vector(); + this->_ciphertexts = vector(); + + for (auto& sz : vec.sizes()) this->_sizes.push_back(sz); + for (auto& ct : vec.ciphertexts()) + this->_ciphertexts.push_back(SEALDeserialize( + *this->tenseal_context()->seal_context(), ct)); } BFVVectorProto BFVVector::save_proto() const { BFVVectorProto buffer; - *buffer.mutable_ciphertext() = SEALSerialize(this->_ciphertext); - buffer.set_size(static_cast(this->_size)); - + for (auto& ct : this->_ciphertexts) { + buffer.add_ciphertexts(SEALSerialize(ct)); + } + for (auto& sz : this->_sizes) { + buffer.add_sizes(sz); + } return buffer; } diff --git a/tenseal/cpp/tensors/ckksvector.cpp b/tenseal/cpp/tensors/ckksvector.cpp index 8c72c331..9355b758 100644 --- a/tenseal/cpp/tensors/ckksvector.cpp +++ b/tenseal/cpp/tensors/ckksvector.cpp @@ -15,9 +15,34 @@ CKKSVector::CKKSVector(const shared_ptr& ctx, this->_init_scale = ctx->global_scale(); } - // Encrypts the whole vector into a single ciphertext using CKKS batching - this->_ciphertext = CKKSVector::encrypt(ctx, this->_init_scale, vec); - this->_size = vec.size(); + if (vec.empty()) { + throw invalid_argument("Attempting to encrypt an empty vector"); + } + + auto slot_count = ctx->slot_count(); + auto vec_chunks = vec.chunks(slot_count); + + if (vec_chunks.size() > 1) { + std::cout + << "WARNING: The input does not fit in a single ciphertext, and " + "some operations will be disabled.\n" + "The following operations are disabled in this setup: matmul, " + "matmul_plain, enc_matmul_plain, conv2d_im2col, " + "replicate_first_slot.\n" + "If you need to use those operations, try increasing the " + "poly_modulus parameter, to fit your input.\n"; + } + + this->_ciphertexts = vector(); + this->_sizes = vector(); + + for (auto& chunk : vec_chunks) { + // Encrypts the whole vector into a single ciphertext using CKKS + // batching + this->_ciphertexts.push_back( + CKKSVector::encrypt(ctx, this->_init_scale, chunk)); + this->_sizes.push_back(chunk.size()); + } } CKKSVector::CKKSVector(const shared_ptr& ctx, @@ -43,8 +68,8 @@ CKKSVector::CKKSVector(const shared_ptr& ctx, CKKSVector::CKKSVector(const shared_ptr& vec) { this->link_tenseal_context(vec->tenseal_context()); this->_init_scale = vec->scale(); - this->_size = vec->size(); - this->_ciphertext = vec->ciphertext(); + this->_sizes = vec->chunked_size(); + this->_ciphertexts = vec->ciphertext(); } Ciphertext CKKSVector::encrypt(shared_ptr context, double scale, @@ -69,16 +94,27 @@ Ciphertext CKKSVector::encrypt(shared_ptr context, double scale, } CKKSVector::plain_t CKKSVector::decrypt(const shared_ptr& sk) const { - Plaintext plaintext; vector result; result.reserve(this->size()); - this->tenseal_context()->decrypt(*sk, this->_ciphertext, plaintext); - this->tenseal_context()->decode(plaintext, result); + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + vector partial_result; + partial_result.reserve(this->_sizes[idx]); + Plaintext plaintext; + + this->tenseal_context()->decrypt(*sk, this->_ciphertexts[idx], + plaintext); + this->tenseal_context()->decode(plaintext, partial_result); + + // result contains all slots of ciphertext (n/2), but we may be using + // less we use the size to delimit the resulting plaintext vector + auto partial_decr = + vector(partial_result.cbegin(), + partial_result.cbegin() + this->_sizes[idx]); + result.insert(result.end(), partial_decr.begin(), partial_decr.end()); + } - // result contains all slots of ciphertext (n/2), but we may be using less - // we use the size to delimit the resulting plaintext vector - return vector(result.cbegin(), result.cbegin() + this->size()); + return result; } shared_ptr CKKSVector::power_inplace(unsigned int power) { @@ -111,15 +147,18 @@ shared_ptr CKKSVector::power_inplace(unsigned int power) { } shared_ptr CKKSVector::negate_inplace() { - this->tenseal_context()->evaluator->negate_inplace(this->_ciphertext); + for (auto& ct : this->_ciphertexts) + this->tenseal_context()->evaluator->negate_inplace(ct); return shared_from_this(); } shared_ptr CKKSVector::square_inplace() { - this->tenseal_context()->evaluator->square_inplace(_ciphertext); - this->auto_relin(_ciphertext); - this->auto_rescale(_ciphertext); + for (auto& ct : this->_ciphertexts) { + this->tenseal_context()->evaluator->square_inplace(ct); + this->auto_relin(ct); + this->auto_rescale(ct); + } return shared_from_this(); } @@ -134,11 +173,13 @@ shared_ptr CKKSVector::add_inplace( } to_add = this->broadcast_or_throw(to_add); - this->auto_same_mod(to_add->_ciphertext, _ciphertext); - this->tenseal_context()->evaluator->add_inplace(this->_ciphertext, - to_add->_ciphertext); + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + this->auto_same_mod(to_add->_ciphertexts[idx], _ciphertexts[idx]); + this->tenseal_context()->evaluator->add_inplace( + this->_ciphertexts[idx], to_add->_ciphertexts[idx]); + } return shared_from_this(); } @@ -152,10 +193,13 @@ shared_ptr CKKSVector::sub_inplace( } to_sub = this->broadcast_or_throw(to_sub); - this->auto_same_mod(to_sub->_ciphertext, _ciphertext); - this->tenseal_context()->evaluator->sub_inplace(this->_ciphertext, - to_sub->_ciphertext); + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + this->auto_same_mod(to_sub->_ciphertexts[idx], _ciphertexts[idx]); + + this->tenseal_context()->evaluator->sub_inplace( + this->_ciphertexts[idx], to_sub->_ciphertexts[idx]); + } return shared_from_this(); } @@ -170,13 +214,15 @@ shared_ptr CKKSVector::mul_inplace( } to_mul = this->broadcast_or_throw(to_mul); - this->auto_same_mod(to_mul->_ciphertext, _ciphertext); + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + this->auto_same_mod(to_mul->_ciphertexts[idx], _ciphertexts[idx]); - this->tenseal_context()->evaluator->multiply_inplace(this->_ciphertext, - to_mul->_ciphertext); + this->tenseal_context()->evaluator->multiply_inplace( + this->_ciphertexts[idx], to_mul->_ciphertexts[idx]); - this->auto_relin(_ciphertext); - this->auto_rescale(_ciphertext); + this->auto_relin(_ciphertexts[idx]); + this->auto_rescale(_ciphertexts[idx]); + } return shared_from_this(); } @@ -197,101 +243,136 @@ shared_ptr CKKSVector::dot_plain_inplace(const plain_t& to_mul) { } shared_ptr CKKSVector::sum_inplace(size_t /*axis = 0*/) { - sum_vector(this->tenseal_context(), this->_ciphertext, this->size()); - this->_size = 1; + vector interm_sum; + // TODO use multithreading for the sum + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + Ciphertext out = this->_ciphertexts[idx]; + sum_vector(this->tenseal_context(), out, this->_sizes[idx]); + interm_sum.push_back(out); + } + Ciphertext result; + tenseal_context()->evaluator->add_many(interm_sum, result); + + this->_ciphertexts = {result}; + this->_sizes = {1}; + return shared_from_this(); } -shared_ptr CKKSVector::add_plain_inplace(const plain_t& to_add) { - if (this->size() != to_add.size()) { +shared_ptr CKKSVector::add_plain_inplace( + const plain_t& vector_to_add) { + if (this->size() != vector_to_add.size()) { throw invalid_argument("can't add vectors of different sizes"); } - return this->_add_plain_inplace(to_add.data_ref()); + auto slot_count = tenseal_context()->slot_count(); + auto to_add = vector_to_add.chunks(slot_count); + + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + this->_add_plain_inplace(this->_ciphertexts[idx], + to_add[idx].data_ref()); + } + return shared_from_this(); } shared_ptr CKKSVector::add_plain_inplace(const double& to_add) { - return this->_add_plain_inplace(to_add); + for (auto& ct : this->_ciphertexts) this->_add_plain_inplace(ct, to_add); + return shared_from_this(); } template -shared_ptr CKKSVector::_add_plain_inplace(const T& to_add) { +void CKKSVector::_add_plain_inplace(Ciphertext& ct, const T& to_add) { Plaintext plaintext; this->tenseal_context()->encode(to_add, plaintext, this->_init_scale); - this->auto_same_mod(plaintext, _ciphertext); - this->tenseal_context()->evaluator->add_plain_inplace(this->_ciphertext, - plaintext); - return shared_from_this(); + this->auto_same_mod(plaintext, ct); + this->tenseal_context()->evaluator->add_plain_inplace(ct, plaintext); } -shared_ptr CKKSVector::sub_plain_inplace(const plain_t& to_sub) { - if (this->size() != to_sub.size()) { +shared_ptr CKKSVector::sub_plain_inplace( + const plain_t& vector_to_sub) { + if (this->size() != vector_to_sub.size()) { throw invalid_argument("can't sub vectors of different sizes"); } - return this->_sub_plain_inplace(to_sub.data_ref()); + auto slot_count = tenseal_context()->slot_count(); + auto to_sub = vector_to_sub.chunks(slot_count); + + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + this->_sub_plain_inplace(this->_ciphertexts[idx], + to_sub[idx].data_ref()); + } + + return shared_from_this(); } shared_ptr CKKSVector::sub_plain_inplace(const double& to_sub) { - return this->_sub_plain_inplace(to_sub); + for (auto& ct : this->_ciphertexts) this->_sub_plain_inplace(ct, to_sub); + return shared_from_this(); } template -shared_ptr CKKSVector::_sub_plain_inplace(const T& to_sub) { +void CKKSVector::_sub_plain_inplace(Ciphertext& ct, const T& to_sub) { Plaintext plaintext; this->tenseal_context()->encode(to_sub, plaintext, this->_init_scale); - this->auto_same_mod(plaintext, _ciphertext); - this->tenseal_context()->evaluator->sub_plain_inplace(this->_ciphertext, - plaintext); - - return shared_from_this(); + this->auto_same_mod(plaintext, ct); + this->tenseal_context()->evaluator->sub_plain_inplace(ct, plaintext); } -shared_ptr CKKSVector::mul_plain_inplace(const plain_t& to_mul) { - if (this->size() != to_mul.size()) { +shared_ptr CKKSVector::mul_plain_inplace( + const plain_t& vector_to_mul) { + if (this->size() != vector_to_mul.size()) { throw invalid_argument("can't multiply vectors of different sizes"); } + auto slot_count = tenseal_context()->slot_count(); + auto to_mul = vector_to_mul.chunks(slot_count); + + for (size_t idx = 0; idx < this->_ciphertexts.size(); ++idx) { + this->_mul_plain_inplace(this->_ciphertexts[idx], + to_mul[idx].data_ref()); + } - return this->_mul_plain_inplace(to_mul.data_ref()); + return shared_from_this(); } shared_ptr CKKSVector::mul_plain_inplace(const double& to_mul) { - return this->_mul_plain_inplace(to_mul); + for (auto& ct : this->_ciphertexts) this->_mul_plain_inplace(ct, to_mul); + return shared_from_this(); } template -shared_ptr CKKSVector::_mul_plain_inplace(const T& to_mul) { +void CKKSVector::_mul_plain_inplace(Ciphertext& ct, const T& to_mul) { Plaintext plaintext; this->tenseal_context()->encode(to_mul, plaintext, this->_init_scale); - this->auto_same_mod(plaintext, _ciphertext); + this->auto_same_mod(plaintext, ct); try { - this->tenseal_context()->evaluator->multiply_plain_inplace( - this->_ciphertext, plaintext); + this->tenseal_context()->evaluator->multiply_plain_inplace(ct, + plaintext); } catch (const std::logic_error& e) { if (strcmp(e.what(), "result ciphertext is transparent") == 0) { // replace by encryption of zero - this->tenseal_context()->encrypt_zero(this->_ciphertext); - this->_ciphertext.scale() = this->_init_scale; - return this->copy(); + this->tenseal_context()->encrypt_zero(ct); + ct.scale() = this->_init_scale; + return; } else { // Something else, need to be forwarded throw; } } - this->auto_rescale(_ciphertext); - - return this->copy(); + this->auto_rescale(ct); } shared_ptr CKKSVector::matmul_plain_inplace( const CKKSVector::plain_t& matrix) { - this->_ciphertext = this->diagonal_ct_vector_matmul(matrix); + if (this->_ciphertexts.size() != 1) + throw invalid_argument("can't execute matmul_plain on chunked vectors"); + + this->_ciphertexts = {this->diagonal_ct_vector_matmul(matrix)}; - this->_size = matrix.shape()[1]; - this->auto_rescale(_ciphertext); + this->_sizes = {matrix.shape()[1]}; + this->auto_rescale(_ciphertexts[0]); return shared_from_this(); } @@ -315,8 +396,10 @@ shared_ptr CKKSVector::polyval_inplace( // we can multiply by 0, or return the encryption of zero if (degree == -1) { // we set the vector to the encryption of zero - this->tenseal_context()->encrypt_zero(this->_ciphertext); - this->_ciphertext.scale() = this->_init_scale; + for (auto& ct : this->_ciphertexts) { + this->tenseal_context()->encrypt_zero(ct); + ct.scale() = this->_init_scale; + } return shared_from_this(); } @@ -344,12 +427,16 @@ shared_ptr CKKSVector::polyval_inplace( result->add_inplace(x); } - this->_ciphertext = result->ciphertext(); + this->_ciphertexts = result->ciphertext(); return shared_from_this(); } shared_ptr CKKSVector::conv2d_im2col_inplace( const CKKSVector::plain_t& kernel, const size_t windows_nb) { + if (this->_ciphertexts.size() != 1) + throw invalid_argument( + "can't execute conv2d_im2col on chunked vectors"); + if (windows_nb == 0) { throw invalid_argument("Windows number can't be zero"); } @@ -370,6 +457,10 @@ shared_ptr CKKSVector::enc_matmul_plain_inplace( throw invalid_argument("Plain vector can't be empty"); } + if (this->_ciphertexts.size() != 1) + throw invalid_argument( + "can't execute enc_matmul_plain on chunked vectors"); + // calculate the next power of 2 size_t plain_vec_size = 1 << (static_cast(ceil(log2(plain_vec.size())))); @@ -380,12 +471,12 @@ shared_ptr CKKSVector::enc_matmul_plain_inplace( size_t chunks_nb = padded_plain_vec.size(); - if (this->_size / rows_nb != chunks_nb) { + if (this->size() / rows_nb != chunks_nb) { throw invalid_argument("Matrix shape doesn't match with vector size"); } vector new_plain_vec; - new_plain_vec.reserve(this->_size); + new_plain_vec.reserve(this->size()); for (size_t i = 0; i < chunks_nb; i++) { vector tmp(rows_nb, padded_plain_vec[i]); @@ -396,7 +487,7 @@ shared_ptr CKKSVector::enc_matmul_plain_inplace( // multiplications size_t slot_count = this->tenseal_context()->slot_count(); replicate_vector(new_plain_vec, slot_count); - this->_size = slot_count; + this->_sizes = {slot_count}; this->mul_plain_inplace(new_plain_vec); @@ -413,28 +504,33 @@ shared_ptr CKKSVector::enc_matmul_plain_inplace( this->add_inplace(tmp); } - this->_size = rows_nb; + this->_sizes = {rows_nb}; return shared_from_this(); } shared_ptr CKKSVector::replicate_first_slot_inplace(size_t n) { + if (this->_ciphertexts.size() != 1) + throw invalid_argument( + "can't execute replicate_first_slot on chunked vectors"); + // mask - vector mask(this->_size, 0); + vector mask(this->size(), 0); mask[0] = 1; this->mul_plain_inplace(mask); // replicate - Ciphertext tmp = this->_ciphertext; + Ciphertext tmp = this->_ciphertexts[0]; auto galois_keys = this->tenseal_context()->galois_keys(); for (size_t i = 0; i < (size_t)ceil(log2(n)); i++) { this->tenseal_context()->evaluator->rotate_vector_inplace( tmp, static_cast(-pow(2, i)), *galois_keys); - this->tenseal_context()->evaluator->add_inplace(this->_ciphertext, tmp); - tmp = this->_ciphertext; + this->tenseal_context()->evaluator->add_inplace(this->_ciphertexts[0], + tmp); + tmp = this->_ciphertexts[0]; } - this->_size = n; + this->_sizes = {n}; return shared_from_this(); } @@ -442,17 +538,27 @@ void CKKSVector::load_proto(const CKKSVectorProto& vec) { if (this->tenseal_context() == nullptr) { throw invalid_argument("context missing for deserialization"); } - this->_size = vec.size(); - this->_ciphertext = SEALDeserialize( - *this->tenseal_context()->seal_context(), vec.ciphertext()); + + this->_sizes = vector(); + this->_ciphertexts = vector(); + + for (auto& sz : vec.sizes()) this->_sizes.push_back(sz); + for (auto& ct : vec.ciphertexts()) + this->_ciphertexts.push_back(SEALDeserialize( + *this->tenseal_context()->seal_context(), ct)); + this->_init_scale = vec.scale(); } CKKSVectorProto CKKSVector::save_proto() const { CKKSVectorProto buffer; - *buffer.mutable_ciphertext() = SEALSerialize(this->_ciphertext); - buffer.set_size(static_cast(this->_size)); + for (auto& ct : this->_ciphertexts) { + buffer.add_ciphertexts(SEALSerialize(ct)); + } + for (auto& sz : this->_sizes) { + buffer.add_sizes(sz); + } buffer.set_scale(this->_init_scale); return buffer; diff --git a/tenseal/cpp/tensors/ckksvector.h b/tenseal/cpp/tensors/ckksvector.h index af786f6a..9c8e4536 100644 --- a/tenseal/cpp/tensors/ckksvector.h +++ b/tenseal/cpp/tensors/ckksvector.h @@ -126,11 +126,11 @@ class CKKSVector Private evaluation functions to process both scalar and vector arguments. */ template - encrypted_t _add_plain_inplace(const T& to_add); + void _add_plain_inplace(Ciphertext& ct, const T& to_add); template - encrypted_t _sub_plain_inplace(const T& to_sub); + void _sub_plain_inplace(Ciphertext& ct, const T& to_sub); template - encrypted_t _mul_plain_inplace(const T& to_mul); + void _mul_plain_inplace(Ciphertext& ct, const T& to_mul); CKKSVector(const shared_ptr& ctx, const plain_t& vec, optional scale = {}); diff --git a/tenseal/cpp/tensors/encrypted_vector.h b/tenseal/cpp/tensors/encrypted_vector.h index 63c874ad..0fc2adc4 100644 --- a/tenseal/cpp/tensors/encrypted_vector.h +++ b/tenseal/cpp/tensors/encrypted_vector.h @@ -51,15 +51,21 @@ class EncryptedVector : public EncryptedTensor { /** * Return the size of the encrypted vector. **/ - size_t size() const { return this->_size; } - void size(size_t val) { this->_size = val; } - + size_t size() const { + return std::accumulate(this->_sizes.begin(), this->_sizes.end(), 0); + } + void chunked_size(const vector& val) { this->_sizes = val; } + vector chunked_size() const { return this->_sizes; } /** * Return information about the ciphertext. **/ - size_t ciphertext_size() const { return this->_ciphertext.size(); } - const Ciphertext& ciphertext() const { return this->_ciphertext; } - void ciphertext(Ciphertext&& other) { this->_ciphertext = other; } + vector ciphertext_size() const { + vector res; + for (auto& ct : this->_ciphertexts) res.push_back(ct.size()); + return res; + } + const vector& ciphertext() const { return this->_ciphertexts; } + void ciphertext(vector&& other) { this->_ciphertexts = other; } /** * Replicate the first slot of a ciphertext n times. Requires a *multiplication. @@ -116,8 +122,11 @@ class EncryptedVector : public EncryptedTensor { * @param[in] The Galois keys **/ void rotate_vector_inplace(int steps, const GaloisKeys& galois_keys) { + if (this->_ciphertexts.size() != 1) + throw invalid_argument( + "Vector rotation not supported for big vectors"); this->tenseal_context()->evaluator->rotate_vector_inplace( - this->_ciphertext, steps, galois_keys); + this->_ciphertexts[0], steps, galois_keys); } /* @@ -129,6 +138,9 @@ class EncryptedVector : public EncryptedTensor { Ciphertext diagonal_ct_vector_matmul(const PlainTensor& matrix) { // matrix is organized by rows // _check_matrix(matrix, this->size()) + if (this->_ciphertexts.size() != 1) + throw invalid_argument( + "diagonal_ct_vector_matmul not supported for big vectors"); if (this->size() != matrix.size()) { throw invalid_argument( @@ -142,16 +154,16 @@ class EncryptedVector : public EncryptedTensor { Ciphertext result; // result should have the same scale and modulus as vec * pt_diag (ct) - this->tenseal_context()->encrypt_zero(this->_ciphertext.parms_id(), + this->tenseal_context()->encrypt_zero(this->_ciphertexts[0].parms_id(), result); - result.scale() = - this->_ciphertext.scale() * this->tenseal_context()->global_scale(); + result.scale() = this->_ciphertexts[0].scale() * + this->tenseal_context()->global_scale(); auto worker_func = [&](size_t start, size_t end) -> Ciphertext { Ciphertext thread_result; - this->tenseal_context()->encrypt_zero(this->_ciphertext.parms_id(), - thread_result); - thread_result.scale() = this->_ciphertext.scale() * + this->tenseal_context()->encrypt_zero( + this->_ciphertexts[0].parms_id(), thread_result); + thread_result.scale() = this->_ciphertexts[0].scale() * this->tenseal_context()->global_scale(); for (size_t local_i = start; local_i < end; ++local_i) { @@ -171,11 +183,11 @@ class EncryptedVector : public EncryptedTensor { this->tenseal_context()->template encode(diag, pt_diag); - if (this->_ciphertext.parms_id() != pt_diag.parms_id()) { - this->set_to_same_mod(pt_diag, _ciphertext); + if (this->_ciphertexts[0].parms_id() != pt_diag.parms_id()) { + this->set_to_same_mod(pt_diag, _ciphertexts[0]); } this->tenseal_context()->evaluator->multiply_plain( - this->_ciphertext, pt_diag, ct); + this->_ciphertexts[0], pt_diag, ct); this->tenseal_context()->evaluator->rotate_vector_inplace( ct, local_i, *this->tenseal_context()->galois_keys()); @@ -221,8 +233,8 @@ class EncryptedVector : public EncryptedTensor { virtual ~EncryptedVector(){}; protected: - size_t _size; - Ciphertext _ciphertext; + std::vector _sizes; + std::vector _ciphertexts; }; } // namespace tenseal diff --git a/tenseal/cpp/tensors/plain_tensor.h b/tenseal/cpp/tensors/plain_tensor.h index d2810c6b..0285ef54 100644 --- a/tenseal/cpp/tensors/plain_tensor.h +++ b/tenseal/cpp/tensors/plain_tensor.h @@ -202,6 +202,19 @@ class PlainTensor { * Casts the tensor to an 1D vector. */ operator vector() const { return _data.data(); } + /** + * Split the tensor in chunks + * */ + vector> chunks(size_t max_size) const { + auto storage_chunks = _data.chunks(max_size); + + vector> result; + result.reserve(storage_chunks.size()); + + for (auto& chunk : storage_chunks) + result.push_back(PlainTensor(chunk)); + return result; + } /** * Iterator utils **/ diff --git a/tenseal/cpp/tensors/tensor_storage.h b/tenseal/cpp/tensors/tensor_storage.h index 8411596c..4df0093e 100644 --- a/tenseal/cpp/tensors/tensor_storage.h +++ b/tenseal/cpp/tensors/tensor_storage.h @@ -2,6 +2,7 @@ #define TENSEAL_TENSOR_STORAGE_H #include "gsl/span" +#include "tenseal/cpp/utils/helpers.h" #include "xtensor/xadapt.hpp" #include "xtensor/xarray.hpp" #include "xtensor/xjson.hpp" @@ -330,6 +331,20 @@ class TensorStorage { } _data = xt::adapt(flat_data, {flat_data.size()}); } + /** + * Split the internal storage in 1D chunks of max_size maximum size. + * */ + vector> chunks(size_t max_size) const { + auto flat_data = vector(_data.begin(), _data.end()); + auto storage_chunks = split_vector(flat_data, max_size); + + vector> result; + result.reserve(storage_chunks.size()); + + for (auto& chunk : storage_chunks) + result.push_back(TensorStorage(chunk)); + return result; + } static TensorStorage repeat_value(dtype_t value, vector shape) { size_t size = 1; diff --git a/tenseal/cpp/tensors/utils/utils.h b/tenseal/cpp/tensors/utils/utils.h index 5e2616e7..70fb31b2 100644 --- a/tenseal/cpp/tensors/utils/utils.h +++ b/tenseal/cpp/tensors/utils/utils.h @@ -57,6 +57,7 @@ shared_ptr compute_polynomial_term(int degree, double coeff, return x; } +// TODO support multi-ciphertext vectors template shared_ptr pack_vectors(const vector>& vectors) { size_t vectors_nb = vectors.size(); @@ -93,7 +94,7 @@ shared_ptr pack_vectors(const vector>& vectors) { replicate_vector(replicated_mask, slot_count); auto packed_vec = vectors[0]->copy(); - packed_vec->size(slot_count); + packed_vec->chunked_size({slot_count}); packed_vec->mul_plain_inplace(replicated_mask); for (size_t i = 1; i < vectors_nb; i++) { @@ -104,13 +105,13 @@ shared_ptr pack_vectors(const vector>& vectors) { // multiply with the mask vector then accumulate auto vec = vectors[i]->copy(); - vec->size(slot_count); + vec->chunked_size({slot_count}); vec->mul_plain_inplace(replicated_mask); packed_vec->add_inplace(vec); } // set packed vector size to the total size of vectors - packed_vec->size(output_size); + packed_vec->chunked_size({output_size}); return packed_vec; } diff --git a/tenseal/cpp/utils/helpers.h b/tenseal/cpp/utils/helpers.h index aab2c00d..2799fc66 100644 --- a/tenseal/cpp/utils/helpers.h +++ b/tenseal/cpp/utils/helpers.h @@ -1,11 +1,36 @@ #ifndef TENSEAL_UTILS_HELPERS_H #define TENSEAL_UTILS_HELPERS_H +#include +#include +#include + namespace tenseal { template constexpr typename std::underlying_type::type to_underlying(E e) noexcept { return static_cast::type>(e); } + +template +auto split_vector(const Vector& v, unsigned number_lines) { + using Iterator = typename Vector::const_iterator; + std::vector rtn; + Iterator it = v.cbegin(); + const Iterator end = v.cend(); + + while (it != end) { + Vector v; + std::back_insert_iterator inserter(v); + const auto num_to_copy = std::min( + static_cast(std::distance(it, end)), number_lines); + std::copy(it, it + num_to_copy, inserter); + rtn.push_back(std::move(v)); + std::advance(it, num_to_copy); + } + + return rtn; +} + } // namespace tenseal #endif diff --git a/tenseal/proto/tensors.proto b/tenseal/proto/tensors.proto index ef64b0d1..d65fd2c2 100644 --- a/tenseal/proto/tensors.proto +++ b/tenseal/proto/tensors.proto @@ -3,18 +3,18 @@ package tenseal; //BFVVector parameters message BFVVectorProto { - // The size of the encrypted vector - uint32 size = 1; - // The serialized ciphertext - bytes ciphertext = 2; + // The sizes of the chunks of the encrypted vector + repeated uint32 sizes = 1; + // The serialized ciphertexts + repeated bytes ciphertexts = 2; }; //CKKSVector parameters message CKKSVectorProto { - // The size of the encrypted vector - uint32 size = 1; - // The serialized ciphertext - bytes ciphertext = 2; + // The size of the chunks of the encrypted vector + repeated uint32 sizes = 1; + // The serialized ciphertexts + repeated bytes ciphertexts = 2; // Scale value double scale = 3; }; diff --git a/tenseal/version.py b/tenseal/version.py index cc0d9bf6..36a0cf67 100644 --- a/tenseal/version.py +++ b/tenseal/version.py @@ -1 +1 @@ -__version__ = "0.3.0a4" +__version__ = "0.3.0a5" diff --git a/tests/cpp/tensors/bfvvector_test.cpp b/tests/cpp/tensors/bfvvector_test.cpp index cd443e48..86895f9c 100644 --- a/tests/cpp/tensors/bfvvector_test.cpp +++ b/tests/cpp/tensors/bfvvector_test.cpp @@ -34,7 +34,7 @@ TEST_P(BFVVectorTest, TestCreateBFV) { } ASSERT_EQ(l->size(), 3); - ASSERT_EQ(l->ciphertext_size(), 2); + ASSERT_THAT(l->ciphertext_size(), ElementsAreArray({2})); } TEST_P(BFVVectorTest, TestBFVAdd) { @@ -49,7 +49,7 @@ TEST_P(BFVVectorTest, TestBFVAdd) { auto r = BFVVector::Create(ctx, vector({2, 3, 4})); auto add = l->add(r); - ASSERT_EQ(add->ciphertext_size(), 2); + ASSERT_THAT(add->ciphertext_size(), ElementsAreArray({2})); auto decr = add->decrypt(); EXPECT_THAT(decr.data(), ElementsAreArray({3, 5, 7})); @@ -63,7 +63,7 @@ TEST_P(BFVVectorTest, TestBFVAdd) { l = duplicate(l); } - ASSERT_EQ(l->ciphertext_size(), 2); + ASSERT_THAT(l->ciphertext_size(), ElementsAreArray({2})); decr = l->decrypt(); EXPECT_THAT(decr.data(), ElementsAreArray({9, 14, 19})); } @@ -80,7 +80,7 @@ TEST_P(BFVVectorTest, TestBFVMul) { auto r = BFVVector::Create(ctx, vector({2, 3, 4})); auto mul = l->mul(r); - ASSERT_EQ(mul->ciphertext_size(), 2); + ASSERT_THAT(mul->ciphertext_size(), ElementsAreArray({2})); auto decr = mul->decrypt(); EXPECT_THAT(decr.data(), ElementsAreArray({2, 6, 12})); @@ -96,7 +96,7 @@ TEST_P(BFVVectorTest, TestBFVMul) { l = duplicate(l); } - ASSERT_EQ(l->ciphertext_size(), 2); + ASSERT_THAT(l->ciphertext_size(), ElementsAreArray({2})); decr = l->decrypt(); EXPECT_THAT(decr.data(), ElementsAreArray({16, 32, 48})); @@ -215,6 +215,39 @@ TEST_F(BFVVectorTest, TestBFVSerializationSize) { ASSERT_TRUE(2 * sym_buffer.size() > pk_buffer.size()); } +TEST_P(BFVVectorTest, TestBFVAddBigVector) { + auto should_serialize_first = get<0>(GetParam()); + auto enc_type = get<1>(GetParam()); + + int poly_mod = 8192; + int input_size = 100000; + + auto ctx = TenSEALContext::Create(scheme_type::bfv, poly_mod, 1032193, {}, + enc_type); + ASSERT_TRUE(ctx != nullptr); + + vector l_input, r_input, expected; + for (int64_t i = 1; i < input_size; i++) { + l_input.push_back(2 * i); + r_input.push_back(3 * i); + expected.push_back(5 * i); + } + + auto l = BFVVector::Create(ctx, l_input); + auto r = BFVVector::Create(ctx, r_input); + + auto add = l->add(r); + + if (should_serialize_first) { + add = duplicate(add); + } + + ASSERT_EQ(add->chunked_size().size(), int(input_size / poly_mod) + 1); + + auto decr = add->decrypt(); + EXPECT_THAT(decr.data(), ElementsAreArray(expected)); +} + INSTANTIATE_TEST_CASE_P( TestBFVVector, BFVVectorTest, ::testing::Values(make_tuple(false, encryption_type::asymmetric), diff --git a/tests/cpp/tensors/ckksvector_test.cpp b/tests/cpp/tensors/ckksvector_test.cpp index 6cdec24e..a46182c5 100644 --- a/tests/cpp/tensors/ckksvector_test.cpp +++ b/tests/cpp/tensors/ckksvector_test.cpp @@ -9,7 +9,7 @@ using namespace ::testing; using namespace std; template -bool are_close(const Iterable& l, const std::vector& r) { +bool are_close(const Iterable& l, const std::vector& r) { if (l.size() != r.size()) { return false; } @@ -45,7 +45,7 @@ TEST_P(CKKSVectorTest, TestCreateCKKS) { l = duplicate(l); } - ASSERT_EQ(l->ciphertext_size(), 2); + ASSERT_THAT(l->ciphertext_size(), ElementsAreArray({2})); } TEST_P(CKKSVectorTest, TestCreateCKKSFail) { @@ -84,7 +84,7 @@ TEST_P(CKKSVectorTest, TestCKKSAdd) { l = duplicate(l); } - ASSERT_EQ(add->ciphertext_size(), 2); + ASSERT_THAT(add->ciphertext_size(), ElementsAreArray({2})); auto decr = add->decrypt(); ASSERT_TRUE(are_close(decr.data(), {4, 6, 7})); @@ -96,7 +96,7 @@ TEST_P(CKKSVectorTest, TestCKKSAdd) { l = duplicate(l); } - ASSERT_EQ(l->ciphertext_size(), 2); + ASSERT_THAT(l->ciphertext_size(), ElementsAreArray({2})); decr = l->decrypt(); ASSERT_TRUE(are_close(decr.data(), {7, 10, 11})); } @@ -119,7 +119,7 @@ TEST_P(CKKSVectorTest, TestCKKSMul) { auto r = CKKSVector::Create(ctx, std::vector({2, 2, 2})); auto mul = l->mul(r); - ASSERT_EQ(mul->ciphertext_size(), 2); + ASSERT_THAT(mul->ciphertext_size(), ElementsAreArray({2})); auto decr = mul->decrypt(); std::cout << decr.at({0}) << std::endl; @@ -132,7 +132,7 @@ TEST_P(CKKSVectorTest, TestCKKSMul) { l = duplicate(l); } - ASSERT_EQ(l->ciphertext_size(), 2); + ASSERT_THAT(l->ciphertext_size(), ElementsAreArray({2})); decr = l->decrypt(); ASSERT_TRUE(are_close(decr.data(), {4, 8, 12})); } @@ -161,7 +161,7 @@ TEST_P(CKKSVectorTest, TestCKKSMulMany) { l = duplicate(l); } - ASSERT_EQ(l->ciphertext_size(), 2); + ASSERT_THAT(l->ciphertext_size(), ElementsAreArray({2})); auto decr = l->decrypt(); ASSERT_TRUE(are_close(decr.data(), {4, 8, 12})); } @@ -190,7 +190,7 @@ TEST_P(CKKSVectorTest, TestCKKSMulNoRelin) { l = duplicate(l); } - ASSERT_EQ(l->ciphertext_size(), 4); + ASSERT_THAT(l->ciphertext_size(), ElementsAreArray({4})); auto decr = l->decrypt(); ASSERT_TRUE(are_close(decr.data(), {4, 8, 12})); } @@ -238,7 +238,7 @@ TEST_P(CKKSVectorTest, TestCKKSPlainMatMul) { auto vec = CKKSVector::Create(ctx, std::vector({1, 2, 3})); auto matrix = PlainTensor( vector>{{1, 2, 3}, {1, 2, 3}, {1, 2, 3}}); - auto expected_result = vector{6, 12, 18}; + auto expected_result = vector{6, 12, 18}; auto result = vec->matmul_plain(matrix); @@ -288,6 +288,46 @@ TEST_F(CKKSVectorTest, TestCKKSVectorSerializationSize) { ASSERT_TRUE(pk_buffer.size() != sym_buffer.size()); ASSERT_TRUE(2 * sym_buffer.size() > pk_buffer.size()); } + +TEST_P(CKKSVectorTest, TestCKKSAddBigVector) { + auto should_serialize_first = get<0>(GetParam()); + auto enc_type = get<1>(GetParam()); + + int poly_mod = 8192; + int input_size = 100000; + + auto ctx = TenSEALContext::Create(scheme_type::ckks, poly_mod, -1, + {60, 40, 40, 60}, enc_type); + ASSERT_TRUE(ctx != nullptr); + + ctx->global_scale(std::pow(2, 40)); + + ctx->auto_relin(false); + ctx->auto_rescale(false); + ctx->auto_mod_switch(false); + + vector l_input, r_input, expected; + for (double i = 1.3; i < input_size; i++) { + l_input.push_back(2 * i); + r_input.push_back(3 * i); + expected.push_back(5 * i); + } + + auto l = CKKSVector::Create(ctx, l_input); + auto r = CKKSVector::Create(ctx, r_input); + + auto add = l->add(r); + + if (should_serialize_first) { + l = duplicate(l); + } + + ASSERT_EQ(add->chunked_size().size(), 2 * int(input_size / poly_mod) + 1); + + auto decr = add->decrypt(); + ASSERT_TRUE(are_close(decr.data(), expected)); +} + INSTANTIATE_TEST_CASE_P( TestCKKSVector, CKKSVectorTest, ::testing::Values(make_tuple(false, encryption_type::asymmetric), diff --git a/tests/python/tenseal/tensors/test_bfv_vector.py b/tests/python/tenseal/tensors/test_bfv_vector.py index 63de117d..83dbfbff 100644 --- a/tests/python/tenseal/tensors/test_bfv_vector.py +++ b/tests/python/tenseal/tensors/test_bfv_vector.py @@ -21,6 +21,8 @@ def context(): ([1, 2, 3, 4], [4, 3, 2, 1]), ([-1, -2], [-73, -10]), ([1, 2], [-73, -10]), + ([2 * i for i in range(10000)], [3 * i for i in range(10000)]), + ([2 * i for i in range(100000)], [3 * i for i in range(100000)]), ], ) def test_add(context, vec1, vec2): @@ -50,6 +52,8 @@ def test_add(context, vec1, vec2): ([1, 2, 3, 4], [4, 3, 2, 1]), ([-1, -2], [-73, -10]), ([1, 2], [-73, -10]), + ([2 * i for i in range(10000)], [3 * i for i in range(10000)]), + ([2 * i for i in range(100000)], [3 * i for i in range(100000)]), ], ) def test_add_inplace(context, vec1, vec2): @@ -78,6 +82,8 @@ def test_add_inplace(context, vec1, vec2): ([1, 2, 3, 4], [4, 3, 2, 1]), ([-1, -2], [-73, -10]), ([1, 2], [-73, -10]), + ([2 * i for i in range(10000)], [3 * i for i in range(10000)]), + ([2 * i for i in range(100000)], [3 * i for i in range(100000)]), ], ) def test_add_plain(context, vec1, vec2): @@ -106,6 +112,8 @@ def test_add_plain(context, vec1, vec2): ([1, 2, 3, 4], [4, 3, 2, 1]), ([-1, -2], [-73, -10]), ([1, 2], [-73, -10]), + ([2 * i for i in range(10000)], [3 * i for i in range(10000)]), + ([2 * i for i in range(100000)], [3 * i for i in range(100000)]), ], ) def test_add_plain_inplace(context, vec1, vec2): @@ -133,6 +141,8 @@ def test_add_plain_inplace(context, vec1, vec2): ([1, 2, 3, 4], [4, 3, 2, 1]), ([-1, -2], [-73, -10]), ([1, 2], [-73, -10]), + ([2 * i for i in range(10000)], [3 * i for i in range(10000)]), + ([2 * i for i in range(100000)], [3 * i for i in range(100000)]), ], ) def test_sub(context, vec1, vec2): @@ -161,6 +171,8 @@ def test_sub(context, vec1, vec2): ([1, 2, 3, 4], [4, 3, 2, 1]), ([-1, -2], [-73, -10]), ([1, 2], [-73, -10]), + ([2 * i for i in range(10000)], [3 * i for i in range(10000)]), + ([2 * i for i in range(100000)], [3 * i for i in range(100000)]), ], ) def test_sub_inplace(context, vec1, vec2): @@ -189,6 +201,8 @@ def test_sub_inplace(context, vec1, vec2): ([1, 2, 3, 4], [4, 3, 2, 1]), ([-1, -2], [-73, -10]), ([1, 2], [-73, -10]), + ([2 * i for i in range(10000)], [3 * i for i in range(10000)]), + ([2 * i for i in range(100000)], [3 * i for i in range(100000)]), ], ) def test_sub_plain(context, vec1, vec2): @@ -217,6 +231,8 @@ def test_sub_plain(context, vec1, vec2): ([1, 2, 3, 4], [4, 3, 2, 1]), ([-1, -2], [-73, -10]), ([1, 2], [-73, -10]), + ([2 * i for i in range(10000)], [3 * i for i in range(10000)]), + ([2 * i for i in range(100000)], [3 * i for i in range(100000)]), ], ) def test_sub_plain_inplace(context, vec1, vec2): @@ -243,6 +259,7 @@ def test_sub_plain_inplace(context, vec1, vec2): ([1, 2, 3, 4], [4, 3, 2, 1]), ([-1, -2], [-73, -10]), ([1, 2], [-73, -10]), + ([2 for i in range(10000)], [3 for i in range(10000)]), ], ) def test_mul(context, vec1, vec2): diff --git a/tests/python/tenseal/tensors/test_ckks_vector.py b/tests/python/tenseal/tensors/test_ckks_vector.py index aedc4174..bd2359ac 100644 --- a/tests/python/tenseal/tensors/test_ckks_vector.py +++ b/tests/python/tenseal/tensors/test_ckks_vector.py @@ -42,7 +42,16 @@ def precision(): @pytest.mark.parametrize( - "plain_vec", [[0], [-1], [1], [21, 81, 90], [-73, -81, -90], [-11, 82, -43, 52]] + "plain_vec", + [ + [0], + [-1], + [1], + [21, 81, 90], + [-73, -81, -90], + [-11, 82, -43, 52], + [i for i in range(100000)], + ], ) def test_negate(context, plain_vec, precision): ckks_vec = ts.ckks_vector(context, plain_vec) @@ -54,7 +63,16 @@ def test_negate(context, plain_vec, precision): @pytest.mark.parametrize( - "plain_vec", [[0], [-1], [1], [21, 81, 90], [-73, -81, -90], [-11, 82, -43, 52]] + "plain_vec", + [ + [0], + [-1], + [1], + [21, 81, 90], + [-73, -81, -90], + [-11, 82, -43, 52], + [i for i in range(100000)], + ], ) def test_negate_inplace(context, plain_vec, precision): ckks_vec = ts.ckks_vector(context, plain_vec) @@ -82,6 +100,7 @@ def test_negate_inplace(context, plain_vec, precision): ([1, -2, 3, -4], 6, 1), ([1, -2, 3, -4], 7, 0), ([1, -2, 3, -4], 8, -1), + ([2 for i in range(100000)], 3, 1), ], ) def test_power(context, plain_vec, power, precision): @@ -140,6 +159,7 @@ def test_power_inplace(context, plain_vec, power, precision): [1, -2, 3, -4], [3, -2, 5, -4], [1, -4, 3, 5], + [2 for i in range(10000)], ], ) def test_square(context, plain_vec, precision): @@ -192,6 +212,8 @@ def test_square_inplace(context, plain_vec, precision): ([1, 0, -2, 73], [-5,]), ([1, 2, 3, 4, 5], [1,]), ([1, 0, -2, 0, -8, 4, 73], [81,]), + ([2 * i for i in range(100000)], [3 * i for i in range(100000)]), + ([2 for i in range(1000000)], [3 for i in range(1000000)]), ], ) def test_add(context, vec1, vec2, precision): @@ -231,6 +253,7 @@ def test_add(context, vec1, vec2, precision): ([1, 0, -2, 73], [-5,]), ([1, 2, 3, 4, 5], [1,]), ([1, 0, -2, 0, -8, 4, 73], [81,]), + ([2 * i for i in range(100000)], [3 * i for i in range(100000)]), ], ) def test_add_inplace(context, vec1, vec2, precision): @@ -267,6 +290,7 @@ def test_add_inplace(context, vec1, vec2, precision): ([1, 2, 3, 4], 2), ([1, 2, 3, 4], 0), ([1, 2, 3, 4], -2), + ([2 * i for i in range(100000)], 5), ], ) def test_add_plain(context, vec1, vec2, precision): @@ -362,6 +386,7 @@ def test_add_plain_inplace(context, vec1, vec2, precision): ([1, 0, -2, 73], [-5,]), ([1, 2, 3, 4, 5], [1,]), ([1, 0, -2, 0, -8, 4, 73], [81,]), + ([2 * i for i in range(100000)], [3 * i for i in range(100000)]), ], ) def test_sub(context, vec1, vec2, precision): @@ -440,6 +465,7 @@ def test_sub_inplace(context, vec1, vec2, precision): ([1, 2, 3, 4], 2), ([1, 2, 3, 4], 0), ([1, 2, 3, 4], -2), + ([2 * i for i in range(100000)], 3), ], ) def test_sub_plain(context, vec1, vec2, precision): @@ -541,6 +567,7 @@ def test_sub_plain_inplace(context, vec1, vec2, precision): ([1, 0, -2, 73], [-5,]), ([1, 2, 3, 4, 5], [1,]), ([1, 0, -2, 0, -8, 4, 73], [81,]), + ([2 for i in range(10000)], [3 for i in range(10000)]), ], ) def test_mul(context, vec1, vec2, precision): @@ -619,6 +646,7 @@ def test_mul_inplace(context, vec1, vec2, precision): ([1, 2, 3, 4], 2), ([1, 2, 3, 4], 0), ([1, 2, 3, 4], -2), + ([2 * i for i in range(100000)], 3), ], ) def test_mul_plain(context, vec1, vec2, precision): @@ -856,6 +884,7 @@ def test_dot_product_plain_inplace(context, vec1, vec2, precision): ([1, 2, 3, 4, 5, 6]), ([1, 2, 3, 4, 5, 6, 7]), ([1, 2, 3, 4, 5, 6, 7, 8]), + ([2 * i for i in range(100000)]), ], ) def test_sum(context, vec1, precision): @@ -1069,6 +1098,7 @@ def generate_input(matrix_shape, vector_size): ([0, 1, 2, 3, 4], [0, 0, 0, 1]), ([0, 1, 2, 3, 4], [3, 2, 4, 5]), ([0, -1, -2, -3, -4], [-3, -2, -4, -5, 1]), + ([2 for i in range(100000)], [-3, -2, -4, -5, 1]), ], ) def test_polynomial(context, data, polynom, precision):