diff --git a/.github/workflows/nestbuildmatrix.yml b/.github/workflows/nestbuildmatrix.yml index ad31de7fb4..8423e87f6e 100644 --- a/.github/workflows/nestbuildmatrix.yml +++ b/.github/workflows/nestbuildmatrix.yml @@ -707,6 +707,8 @@ jobs: echo "backend : svg" > $HOME/.matplotlib/matplotlibrc - name: "Run NEST testsuite" + env: + DO_TESTS_SKIP_TEST_REQUIRING_MANY_CORES: ${{ contains(matrix.use, 'mpi') && contains(matrix.use, 'openmp') }} run: | pwd cd "$NEST_VPATH" @@ -757,7 +759,7 @@ jobs: - name: "Set up Python 3.x" uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: - python-version: 3.9 + python-version: 3.12 - name: "Install MacOS system dependencies" run: | diff --git a/README.md b/README.md index 02d2a2fbc1..03d172480a 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![Documentation](https://img.shields.io/readthedocs/nest-simulator?logo=readthedocs&logo=Read%20the%20Docs&label=Documentation)](https://nest-simulator.org/documentation) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2218/badge)](https://bestpractices.coreinfrastructure.org/projects/2218) [![License](http://img.shields.io/:license-GPLv2+-green.svg)](http://www.gnu.org/licenses/gpl-2.0.html) -[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.8069926.svg)](https://doi.org/10.5281/zenodo.8069926) +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10834751.svg)](https://doi.org/10.5281/zenodo.10834751) [![Latest release](https://img.shields.io/github/release/nest/nest-simulator.svg?color=brightgreen&label=latest%20release)](https://github.com/nest/nest-simulator/releases) [![GitHub contributors](https://img.shields.io/github/contributors/nest/nest-simulator?logo=github)](https://github.com/nest/nest-simulator) diff --git a/doc/htmldoc/hpc/parallel_computing.rst b/doc/htmldoc/hpc/parallel_computing.rst index 93cfc0d4b9..6d24b365cc 100644 --- a/doc/htmldoc/hpc/parallel_computing.rst +++ b/doc/htmldoc/hpc/parallel_computing.rst @@ -161,7 +161,7 @@ Spikes between neurons and devices Synaptic plasticity models ~~~~~~~~~~~~~~~~~~~~~~~~~~ -For synapse models supporting plasticity, synapse dynamics in the +For synapse models supporting :hxt_ref:`plasticity`, synapse dynamics in the ``Connection`` object are always handled by the virtual process of the `target node`. diff --git a/doc/htmldoc/tutorials/music_tutorial/music_tutorial_1.rst b/doc/htmldoc/tutorials/music_tutorial/music_tutorial_1.rst index e4733dc189..5089b9d324 100644 --- a/doc/htmldoc/tutorials/music_tutorial/music_tutorial_1.rst +++ b/doc/htmldoc/tutorials/music_tutorial/music_tutorial_1.rst @@ -56,7 +56,7 @@ A NEST network consists of three types of elements: neurons, devices, and connections between them. Neurons are the basic building blocks, and in NEST they are generally -spiking point neuron models. Devices are supporting units that for +spiking :hxt_ref:`point neuron` models. Devices are supporting units that for instance generate inputs to neurons or record data from them. The Poisson spike generator, the spike recorder recording device, and the MUSIC input and output proxies are all devices. Neurons and devices are diff --git a/doc/htmldoc/tutorials/pynest_tutorial/part_3_connecting_networks_with_synapses.rst b/doc/htmldoc/tutorials/pynest_tutorial/part_3_connecting_networks_with_synapses.rst index 2ccef0b9a2..f69b183900 100644 --- a/doc/htmldoc/tutorials/pynest_tutorial/part_3_connecting_networks_with_synapses.rst +++ b/doc/htmldoc/tutorials/pynest_tutorial/part_3_connecting_networks_with_synapses.rst @@ -64,10 +64,10 @@ STDP synapses For the majority of synapses, all of their parameters are accessible via :py:func:`.GetDefaults` and :py:func:`.SetDefaults`. Synapse models implementing -spike-timing dependent plasticity are an exception to this, as their +:hxt_ref:`spike-timing dependent plasticity` are an exception to this, as their dynamics are driven by the postsynaptic :hxt_ref:`spike train` as well as the pre-synaptic one. As a consequence, the time constant of the depressing -window of :hxt_ref:`STDP` is a parameter of the postsynaptic neuron. It can be set +window of STDP is a parameter of the postsynaptic neuron. It can be set as follows: :: diff --git a/lib/sli/nest-init.sli b/lib/sli/nest-init.sli index 925eb24f03..238af19c04 100644 --- a/lib/sli/nest-init.sli +++ b/lib/sli/nest-init.sli @@ -488,9 +488,14 @@ def %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% add conversion from NodeCollection -/cva [/nodecollectiontype] - /cva_g load +% add new functions to trie if it exists, else create new +/cva dup lookup not +{ + trie +} if + [/connectiontype] /cva_C load addtotrie + [/nodecollectiontype] { /all cva_g_l } addtotrie + [/nodecollectiontype /literaltype] /cva_g_l load addtotrie def %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -1426,8 +1431,6 @@ def [/integertype /integertype] /TimeCommunicationAlltoallv_i_i load addtotrie def -/cva [/connectiontype] /cva_C load def - /abort { statusdict /exitcodes get /userabort get diff --git a/libnestutil/CMakeLists.txt b/libnestutil/CMakeLists.txt index 91063b91df..3700edea51 100644 --- a/libnestutil/CMakeLists.txt +++ b/libnestutil/CMakeLists.txt @@ -27,6 +27,7 @@ set( nestutil_sources lockptr.h logging_event.h logging_event.cpp logging.h + nest_types.h numerics.h numerics.cpp regula_falsi.h sort.h diff --git a/nestkernel/nest_types.h b/libnestutil/nest_types.h similarity index 100% rename from nestkernel/nest_types.h rename to libnestutil/nest_types.h diff --git a/libnestutil/numerics.cpp b/libnestutil/numerics.cpp index 8eff885f2a..412a5fadd4 100644 --- a/libnestutil/numerics.cpp +++ b/libnestutil/numerics.cpp @@ -20,6 +20,11 @@ * */ +#include +#include +#include + +#include "nest_types.h" #include "numerics.h" #ifndef HAVE_M_E @@ -126,3 +131,142 @@ is_integer( double n ) // factor 4 allows for two bits of rounding error return frac_part < 4 * n * std::numeric_limits< double >::epsilon(); } + +long +mod_inverse( long a, long m ) +{ + /* + The implementation here is based on the extended Euclidean algorithm which + solves + + a x + m y = gcd( a, m ) = 1 mod m + + for x and y. Note that the m y term is zero mod m, so the equation is equivalent + to + + a x = 1 mod m + + Since we only need x, we can ignore y and use just half of the algorithm. + + We can assume without loss of generality that a < m, because if a = a' + j m with + 0 < a' < m, we have + + a x mod m = (a' + j m) x mod m = a' x + j x m mod m = a' x. + + This implies that m ≥ 2. + + For details on the algorithm, see D. E. Knuth, The Art of Computer Programming, + ch 4.5.2, Algorithm X (vol 2), and ch 1.2.1, Algorithm E (vol 1). + */ + + assert( 0 < a ); + assert( 2 <= m ); + + const long a_orig = a; + const long m_orig = m; + + // If a ≥ m, the algorithm needs two extra rounds to transform this to + // a' < m, so we take care of this in a single step here. + a = a % m; + + // Use half of extended Euclidean algorithm required to compute inverse + long s_0 = 1; + long s_1 = 0; + + while ( a > 0 ) + { + // get quotient and remainder in one go + const auto res = div( m, a ); + m = a; + a = res.rem; + + // line ordering matters here + const long s_0_new = -res.quot * s_0 + s_1; + s_1 = s_0; + s_0 = s_0_new; + } + + // ensure positive result + s_1 = ( s_1 + m_orig ) % m_orig; + + assert( m == 1 ); // gcd() == 1 required + assert( ( a_orig * s_1 ) % m_orig == 1 ); // self-test + + return s_1; +} + +size_t +first_index( long period, long phase0, long step, long phase ) +{ + assert( period > 0 ); + assert( step > 0 ); + assert( 0 <= phase0 and phase0 < period ); + assert( 0 <= phase and phase < period ); + + /* + The implementation here is based on + https://math.stackexchange.com/questions/25390/how-to-find-the-inverse-modulo-m + + We first need to solve + + phase0 + k step = phase mod period + <=> k step = ( phase - phase0 ) = d_phase mod period + + This has a solution iff d = gcd(step, period) divides d_phase. + + Then, if d = 1, the solution is unique and given by + + k' = mod_inv(step) * d_phase mod period + + If d > 1, we need to divide the equation by it and solve + + (step / d) k_0 = d_phase / d mod (period / d) + + The set of solutions is then given by + + k_j = k_0 + j * period / d for j = 0, 1, ..., d-1 + + and we need the smallest of these. Now we are interested in + an index given by k * step with a period of lcm(step, period) + (the outer_period below, marked by | in the illustration in + the doxygen comment), we immediately run over + + k_j * step = k_0 * step + j * step * period / d mod lcm(step, period) + + below and take the smallest. But since step * period / d = lcm(step, period), + the term in j above vanishes and k_0 * step mod lcm(step, period) is actually + the solution. + + We do all calculations in signed long since we may encounter negative + values during the algorithm. The result will be non-negative and returned + as size_t. This is important because the "not found" case is signaled + by invalid_index, which is size_t. + */ + + // This check is not only a convenience: If step == k * period, we only match if + // phase == phase0 and the algorithm below will fail if we did not return here + // immediately, because we get d == period -> period_d = 1 and modular inverse + // for modulus 1 makes no sense. + if ( phase == phase0 ) + { + return 0; + } + + const long d_phase = ( phase - phase0 + period ) % period; + const long d = std::gcd( step, period ); + + if ( d_phase % d != 0 ) + { + return nest::invalid_index; // no solution exists + } + + // Scale by GCD, since modular inverse requires gcd==1 + const long period_d = period / d; + const long step_d = step / d; + const long d_phase_d = d_phase / d; + + // Compute k_0 and multiply by step, see explanation in introductory comment + const long idx = ( d_phase_d * mod_inverse( step_d, period_d ) * step ) % std::lcm( period, step ); + + return static_cast< size_t >( idx ); +} diff --git a/libnestutil/numerics.h b/libnestutil/numerics.h index 2513dac574..a4c077ffe6 100644 --- a/libnestutil/numerics.h +++ b/libnestutil/numerics.h @@ -131,4 +131,52 @@ double dtruncate( double ); */ bool is_integer( double ); +/** + * Returns inverse of integer a modulo m + * + * For integer a > 0, m ≥ 2, find x so that ( a * x ) mod m = 1. + */ +long mod_inverse( long a, long m ); + +/** + * Return first matching index for entry in container with double periodicity. + * + * Consider + * - a container of arbitrary length containing elements (e.g. GIDs) which map to certain values, + * e.g., VP(GID), with a given *period*, e.g., the number of virtual processes + * - that the phase (e.g., the VP number) of the first entry in the container is *phase0* + * - that we slice the container with a given *step*, causing a double periodicity + * - that we want to know the index of the first element in the container with a given *phase*, + * e.g., the first element on a given VP + * + * If such an index x exists, it is given by + * + * x = phase0 + k' mod lcm(period, step) + * k' = min_k ( phase0 + k step = phase mod period ) + * + * As an example, consider + * + * idx 0 1 2 3 4 5 6 7 8 9 10 11 | 12 13 14 15 16 17 18 + * gid 1 2 3 4 5 6 7 8 9 10 11 12 | 13 14 15 16 17 18 19 + * vp 1 2 3 0 1 2 3 0 1 2 3 0 | 1 2 3 0 1 2 3 + * * * * * | * * * + * 1 0 3 2 | + * + * Here, idx is a linear index into the container, gid neuron ids which map to the given vp numbers. + * The container is sliced with step=3, i.e., starting with the first element we take every third, as + * marked by the asterisks. The question is then at which index we find the first entry belonging to + * vp 0, 1, 2, or 3. The numbers in the bottom row show for clarity on which VP we find the respective + * asterisks. The | symbol marks where the pattern repeats itself. + * + * phase0 in this example is 1, i.e., the VP of the first element in the container. + * + * @note This function returns that index. It is the responsibility of the caller to check if the returned index + * is within the bounds of the actual container—the algorithm assumes an infinite container. + * + * @note The function returns *invalid_index* if no solution exists. + * + * See comments in the function definition for implementation details. + */ +size_t first_index( long period, long phase0, long step, long phase ); + #endif diff --git a/models/iaf_psc_delta.cpp b/models/iaf_psc_delta.cpp index ceadb3da45..4de329ed60 100644 --- a/models/iaf_psc_delta.cpp +++ b/models/iaf_psc_delta.cpp @@ -154,7 +154,7 @@ nest::iaf_psc_delta::Parameters_::set( const DictionaryDatum& d, Node* node ) } if ( c_m_ <= 0 ) { - throw BadProperty( "Capacitance must be >0." ); + throw BadProperty( "Capacitance must be > 0." ); } if ( t_ref_ < 0 ) { diff --git a/nestkernel/CMakeLists.txt b/nestkernel/CMakeLists.txt index 50f99859f0..94aee72cc9 100644 --- a/nestkernel/CMakeLists.txt +++ b/nestkernel/CMakeLists.txt @@ -44,7 +44,6 @@ set ( nestkernel_sources histentry.h histentry.cpp model.h model.cpp model_manager.h model_manager_impl.h model_manager.cpp - nest_types.h nest_datums.h nest_datums.cpp nest_names.cpp nest_names.h nestmodule.h nestmodule.cpp diff --git a/nestkernel/conn_builder.cpp b/nestkernel/conn_builder.cpp index a67b0b95a5..12df5c03eb 100644 --- a/nestkernel/conn_builder.cpp +++ b/nestkernel/conn_builder.cpp @@ -623,7 +623,7 @@ nest::OneToOneBuilder::connect_() Node* target = n->get_node(); const size_t tnode_id = n->get_node_id(); - const long lid = targets_->get_lid( tnode_id ); + const long lid = targets_->get_nc_index( tnode_id ); if ( lid < 0 ) // Is local node in target list? { continue; @@ -823,7 +823,7 @@ nest::AllToAllBuilder::connect_() const size_t tnode_id = n->get_node_id(); // Is the local node in the targets list? - if ( targets_->get_lid( tnode_id ) < 0 ) + if ( targets_->get_nc_index( tnode_id ) < 0 ) { continue; } @@ -1106,7 +1106,7 @@ nest::FixedInDegreeBuilder::connect_() const size_t tnode_id = n->get_node_id(); // Is the local node in the targets list? - if ( targets_->get_lid( tnode_id ) < 0 ) + if ( targets_->get_nc_index( tnode_id ) < 0 ) { continue; } @@ -1538,7 +1538,7 @@ nest::BernoulliBuilder::connect_() const size_t tnode_id = n->get_node_id(); // Is the local node in the targets list? - if ( targets_->get_lid( tnode_id ) < 0 ) + if ( targets_->get_nc_index( tnode_id ) < 0 ) { continue; } @@ -1654,7 +1654,7 @@ nest::PoissonBuilder::connect_() const size_t tnode_id = n->get_node_id(); // Is the local node in the targets list? - if ( targets_->get_lid( tnode_id ) < 0 ) + if ( targets_->get_nc_index( tnode_id ) < 0 ) { continue; } @@ -1847,7 +1847,8 @@ nest::TripartiteBernoulliWithPoolBuilder::connect_() } else { - std::copy_n( third_->begin() + get_first_pool_index_( target.lid ), pool_size_, std::back_inserter( pool ) ); + std::copy_n( + third_->begin() + get_first_pool_index_( target.nc_index ), pool_size_, std::back_inserter( pool ) ); } // step 3, iterate through indegree to make connections for this target diff --git a/nestkernel/connection_creator.h b/nestkernel/connection_creator.h index 4e2debfe91..62d2671344 100644 --- a/nestkernel/connection_creator.h +++ b/nestkernel/connection_creator.h @@ -87,8 +87,6 @@ class ConnectionCreator * - "mask": Mask definition (dictionary or masktype). * - "kernel": Kernel definition (dictionary, parametertype, or double). * - "synapse_model": The synapse model to use. - * - "targets": Which targets (model or lid) to select (dictionary). - * - "sources": Which targets (model or lid) to select (dictionary). * - "weight": Synaptic weight (dictionary, parametertype, or double). * - "delay": Synaptic delays (dictionary, parametertype, or double). * - other parameters are interpreted as synapse parameters, and may diff --git a/nestkernel/connection_creator_impl.h b/nestkernel/connection_creator_impl.h index 7b28a80757..7aa224347f 100644 --- a/nestkernel/connection_creator_impl.h +++ b/nestkernel/connection_creator_impl.h @@ -264,7 +264,7 @@ ConnectionCreator::pairwise_bernoulli_on_source_( Layer< D >& source, if ( not tgt->is_proxy() ) { - const Position< D > target_pos = target.get_position( ( *tgt_it ).lid ); + const Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); if ( mask_.get() ) { @@ -339,7 +339,7 @@ ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source, const int thread_id = kernel().vp_manager.get_thread_id(); try { - NodeCollection::const_iterator target_begin = target_nc->local_begin(); + NodeCollection::const_iterator target_begin = target_nc->thread_local_begin(); NodeCollection::const_iterator target_end = target_nc->end(); for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) @@ -348,7 +348,7 @@ ConnectionCreator::pairwise_bernoulli_on_target_( Layer< D >& source, assert( not tgt->is_proxy() ); - const Position< D > target_pos = target.get_position( ( *tgt_it ).lid ); + const Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); if ( mask_.get() ) { @@ -423,7 +423,7 @@ ConnectionCreator::pairwise_poisson_( Layer< D >& source, if ( not tgt->is_proxy() ) { - const Position< D > target_pos = target.get_position( ( *tgt_it ).lid ); + const Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); if ( mask_.get() ) { @@ -477,7 +477,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, throw IllegalConnection( "Spatial Connect with fixed_indegree to devices is not possible." ); } - NodeCollection::const_iterator target_begin = target_nc->MPI_local_begin(); + NodeCollection::const_iterator target_begin = target_nc->rank_local_begin(); NodeCollection::const_iterator target_end = target_nc->end(); // protect against connecting to devices without proxies @@ -504,7 +504,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, size_t target_thread = tgt->get_thread(); RngPtr rng = get_vp_specific_rng( target_thread ); - Position< D > target_pos = target.get_position( ( *tgt_it ).lid ); + Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); // We create a source pos vector here that can be updated with the // source position. This is done to avoid creating and destroying @@ -637,7 +637,7 @@ ConnectionCreator::fixed_indegree_( Layer< D >& source, Node* const tgt = kernel().node_manager.get_node_or_proxy( target_id ); size_t target_thread = tgt->get_thread(); RngPtr rng = get_vp_specific_rng( target_thread ); - Position< D > target_pos = target.get_position( ( *tgt_it ).lid ); + Position< D > target_pos = target.get_position( ( *tgt_it ).nc_index ); unsigned long target_number_connections = std::round( number_of_connections_->value( rng, tgt ) ); @@ -769,7 +769,7 @@ ConnectionCreator::fixed_outdegree_( Layer< D >& source, throw IllegalConnection( "Spatial Connect with fixed_outdegree to devices is not possible." ); } - NodeCollection::const_iterator target_begin = target_nc->MPI_local_begin(); + NodeCollection::const_iterator target_begin = target_nc->rank_local_begin(); NodeCollection::const_iterator target_end = target_nc->end(); for ( NodeCollection::const_iterator tgt_it = target_begin; tgt_it < target_end; ++tgt_it ) diff --git a/nestkernel/free_layer.h b/nestkernel/free_layer.h index 0267d83d10..40b1c972cc 100644 --- a/nestkernel/free_layer.h +++ b/nestkernel/free_layer.h @@ -48,9 +48,9 @@ template < int D > class FreeLayer : public Layer< D > { public: - Position< D > get_position( size_t sind ) const; - void set_status( const DictionaryDatum& ); - void get_status( DictionaryDatum& ) const; + Position< D > get_position( size_t sind ) const override; + void set_status( const DictionaryDatum& ) override; + void get_status( DictionaryDatum&, NodeCollection const* ) const override; protected: /** @@ -61,14 +61,14 @@ class FreeLayer : public Layer< D > template < class Ins > void communicate_positions_( Ins iter, NodeCollectionPTR node_collection ); - void insert_global_positions_ntree_( Ntree< D, size_t >& tree, NodeCollectionPTR node_collection ); + void insert_global_positions_ntree_( Ntree< D, size_t >& tree, NodeCollectionPTR node_collection ) override; void insert_global_positions_vector_( std::vector< std::pair< Position< D >, size_t > >& vec, - NodeCollectionPTR node_collection ); + NodeCollectionPTR node_collection ) override; /** * Calculate the index in the position vector on this MPI process based on the local ID. * - * @param lid local ID of the node + * @param lid global index of node within layer * @return index in the local position vector */ size_t lid_to_position_id_( size_t lid ) const; @@ -255,15 +255,46 @@ FreeLayer< D >::set_status( const DictionaryDatum& d ) template < int D > void -FreeLayer< D >::get_status( DictionaryDatum& d ) const +FreeLayer< D >::get_status( DictionaryDatum& d, NodeCollection const* nc ) const { - Layer< D >::get_status( d ); + Layer< D >::get_status( d, nc ); TokenArray points; - for ( typename std::vector< Position< D > >::const_iterator it = positions_.begin(); it != positions_.end(); ++it ) + + if ( not nc ) + { + // This is needed by NodeCollectionMetadata::operator==() which does not have access to the node collection + for ( const auto& pos : positions_ ) + { + points.push_back( pos.getToken() ); + } + } + else { - points.push_back( it->getToken() ); + // Selecting the right positions + // - Coordinates for all nodes in the underlying primitive node collection + // which belong to this rank are stored in positions_ + // - nc has information on which nodes actually belong to it, especially + // important for sliced collections with step > 1. + // - Use the rank-local iterator over the node collection to pick the right + // nodes, then step in lockstep through the positions_ array. + auto nc_it = nc->rank_local_begin(); + const auto nc_end = nc->end(); + if ( nc_it < nc_end ) + { + // Node index in node collection is global to NEST, so we need to scale down + // to get right indices into positions_, which has only rank-local data. + const size_t n_procs = kernel().mpi_manager.get_num_processes(); + size_t pos_idx = ( *nc_it ).nc_index / n_procs; + size_t step = nc_it.get_step_size() / n_procs; + + for ( ; nc_it < nc->end(); pos_idx += step, ++nc_it ) + { + points.push_back( positions_.at( pos_idx ).getToken() ); + } + } } + def2< TokenArray, ArrayDatum >( d, names::positions, points ); } @@ -288,7 +319,7 @@ FreeLayer< D >::communicate_positions_( Ins iter, NodeCollectionPTR node_collect // know that all nodes in the NodeCollection have proxies. Likewise, if it returns false we know that // no nodes have proxies. NodeCollection::const_iterator nc_begin = - node_collection->has_proxies() ? node_collection->MPI_local_begin() : node_collection->begin(); + node_collection->has_proxies() ? node_collection->rank_local_begin() : node_collection->begin(); NodeCollection::const_iterator nc_end = node_collection->end(); // Reserve capacity in the vector based on number of local nodes. If the NodeCollection is sliced, @@ -299,7 +330,7 @@ FreeLayer< D >::communicate_positions_( Ins iter, NodeCollectionPTR node_collect // Push node ID into array to communicate local_node_id_pos.push_back( ( *nc_it ).node_id ); // Push coordinates one by one - const auto pos = get_position( ( *nc_it ).lid ); + const auto pos = get_position( ( *nc_it ).nc_index ); for ( int j = 0; j < D; ++j ) { local_node_id_pos.push_back( pos[ j ] ); diff --git a/nestkernel/grid_layer.h b/nestkernel/grid_layer.h index fe1c4fd67f..75fc467fb5 100644 --- a/nestkernel/grid_layer.h +++ b/nestkernel/grid_layer.h @@ -122,12 +122,12 @@ class GridLayer : public Layer< D > * @param sind index of node * @returns position of node. */ - Position< D > get_position( size_t sind ) const; + Position< D > get_position( size_t sind ) const override; /** * Get position of node. Also allowed for non-local nodes. * - * @param lid local index of node + * @param lid global index of node within layer * @returns position of node. */ Position< D > lid_to_position( size_t lid ) const; @@ -148,17 +148,17 @@ class GridLayer : public Layer< D > Position< D, size_t > get_dims() const; - void set_status( const DictionaryDatum& d ); - void get_status( DictionaryDatum& d ) const; + void set_status( const DictionaryDatum& d ) override; + void get_status( DictionaryDatum& d, NodeCollection const* ) const override; protected: Position< D, size_t > dims_; ///< number of nodes in each direction. template < class Ins > void insert_global_positions_( Ins iter, NodeCollectionPTR node_collection ); - void insert_global_positions_ntree_( Ntree< D, size_t >& tree, NodeCollectionPTR node_collection ); + void insert_global_positions_ntree_( Ntree< D, size_t >& tree, NodeCollectionPTR node_collection ) override; void insert_global_positions_vector_( std::vector< std::pair< Position< D >, size_t > >& vec, - NodeCollectionPTR node_collection ); + NodeCollectionPTR node_collection ) override; }; template < int D > @@ -206,9 +206,9 @@ GridLayer< D >::set_status( const DictionaryDatum& d ) template < int D > void -GridLayer< D >::get_status( DictionaryDatum& d ) const +GridLayer< D >::get_status( DictionaryDatum& d, NodeCollection const* nc ) const { - Layer< D >::get_status( d ); + Layer< D >::get_status( d, nc ); ( *d )[ names::shape ] = std::vector< size_t >( dims_.get_vector() ); } @@ -286,7 +286,7 @@ GridLayer< D >::insert_global_positions_( Ins iter, NodeCollectionPTR node_colle for ( auto gi = node_collection->begin(); gi < node_collection->end(); ++gi ) { const auto triple = *gi; - *iter++ = std::pair< Position< D >, size_t >( lid_to_position( triple.lid ), triple.node_id ); + *iter++ = std::pair< Position< D >, size_t >( lid_to_position( triple.nc_index ), triple.node_id ); } } diff --git a/nestkernel/layer.h b/nestkernel/layer.h index 81bdef7f5d..7adf212118 100644 --- a/nestkernel/layer.h +++ b/nestkernel/layer.h @@ -69,17 +69,19 @@ class AbstractLayer /** * Export properties of the layer by setting - * entries in the status dictionary. + * entries in the status dictionary, respects slicing of given NodeCollection * @param d Dictionary. + * + * @note If nullptr is passed for NodeCollection*, full metadata irrespective of any slicing is returned. */ - virtual void get_status( DictionaryDatum& ) const = 0; + virtual void get_status( DictionaryDatum&, NodeCollection const* ) const = 0; virtual unsigned int get_num_dimensions() const = 0; /** * Get position of node. Only possible for local nodes. * - * @param lid index of node within layer + * @param lid global index of node within layer * @returns position of node as std::vector */ virtual std::vector< double > get_position_vector( const size_t lid ) const = 0; @@ -234,13 +236,8 @@ class Layer : public AbstractLayer */ void set_status( const DictionaryDatum& ) override; - /** - * Export properties of the layer by setting - * entries in the status dictionary. - * - * @param d Dictionary. - */ - void get_status( DictionaryDatum& ) const override; + //! Retrieve status, slice according to node collection if given + void get_status( DictionaryDatum&, NodeCollection const* ) const override; unsigned int get_num_dimensions() const override diff --git a/nestkernel/layer_impl.h b/nestkernel/layer_impl.h index 5f44de8527..0b46aebc4d 100644 --- a/nestkernel/layer_impl.h +++ b/nestkernel/layer_impl.h @@ -92,7 +92,7 @@ Layer< D >::set_status( const DictionaryDatum& d ) template < int D > void -Layer< D >::get_status( DictionaryDatum& d ) const +Layer< D >::get_status( DictionaryDatum& d, NodeCollection const* nc ) const { ( *d )[ names::extent ] = std::vector< double >( extent_.get_vector() ); ( *d )[ names::center ] = std::vector< double >( ( lower_left_ + extent_ / 2 ).get_vector() ); @@ -105,6 +105,13 @@ Layer< D >::get_status( DictionaryDatum& d ) const { ( *d )[ names::edge_wrap ] = true; } + + if ( nc ) + { + // This is for backward compatibility with some tests and scripts + // TODO: Rename parameter + ( *d )[ names::network_size ] = nc->size(); + } } template < int D > @@ -286,12 +293,12 @@ template < int D > void Layer< D >::dump_nodes( std::ostream& out ) const { - for ( NodeCollection::const_iterator it = this->node_collection_->MPI_local_begin(); + for ( NodeCollection::const_iterator it = this->node_collection_->rank_local_begin(); it < this->node_collection_->end(); ++it ) { out << ( *it ).node_id << ' '; - get_position( ( *it ).lid ).print( out ); + get_position( ( *it ).nc_index ).print( out ); out << std::endl; } } @@ -345,7 +352,7 @@ Layer< D >::dump_connections( std::ostream& out, Layer< D >* tgt_layer = dynamic_cast< Layer< D >* >( target_layer.get() ); out << ' '; - const long tnode_lid = tgt_layer->node_collection_->get_lid( target_node_id ); + const long tnode_lid = tgt_layer->node_collection_->get_nc_index( target_node_id ); assert( tnode_lid >= 0 ); tgt_layer->compute_displacement( source_pos, tnode_lid ).print( out ); out << '\n'; diff --git a/nestkernel/nest.cpp b/nestkernel/nest.cpp index 00e0e78fc8..5e99aebda4 100644 --- a/nestkernel/nest.cpp +++ b/nestkernel/nest.cpp @@ -416,31 +416,4 @@ node_collection_array_index( const Datum* datum, const bool* array, unsigned lon return new NodeCollectionDatum( NodeCollection::create( node_ids ) ); } -void -slice_positions_if_sliced_nc( DictionaryDatum& dict, const NodeCollectionDatum& nc ) -{ - // If metadata contains node positions and the NodeCollection is sliced, get only positions of the sliced nodes. - if ( dict->known( names::positions ) ) - { - const auto positions = getValue< TokenArray >( dict, names::positions ); - if ( nc->size() != positions.size() ) - { - TokenArray sliced_points; - // Iterate only local nodes - NodeCollection::const_iterator nc_begin = nc->has_proxies() ? nc->MPI_local_begin() : nc->begin(); - NodeCollection::const_iterator nc_end = nc->end(); - for ( auto node = nc_begin; node < nc_end; ++node ) - { - // Because the local ID also includes non-local nodes, it must be adapted to represent - // the index for the local node position. - const auto index = - static_cast< size_t >( std::floor( ( *node ).lid / kernel().mpi_manager.get_num_processes() ) ); - sliced_points.push_back( positions[ index ] ); - } - def2< TokenArray, ArrayDatum >( dict, names::positions, sliced_points ); - } - } -} - - } // namespace nest diff --git a/nestkernel/nest.h b/nestkernel/nest.h index 2ecfece57b..5cdd7c2b46 100644 --- a/nestkernel/nest.h +++ b/nestkernel/nest.h @@ -191,15 +191,6 @@ std::vector< double > apply( const ParameterDatum& param, const DictionaryDatum& Datum* node_collection_array_index( const Datum* datum, const long* array, unsigned long n ); Datum* node_collection_array_index( const Datum* datum, const bool* array, unsigned long n ); -/** - * @brief Get only positions of the sliced nodes if metadata contains node positions and the NodeCollection is sliced. - * - * Puts an array of positions sliced the same way as a sliced NodeCollection into dict. - * Positions have to be sliced on introspection because metadata of a sliced NodeCollection - * for internal consistency and efficiency points to the metadata of the original - * NodeCollection. - */ -void slice_positions_if_sliced_nc( DictionaryDatum& dict, const NodeCollectionDatum& nc ); } diff --git a/nestkernel/nestmodule.cpp b/nestkernel/nestmodule.cpp index 639f886077..d5da67945f 100644 --- a/nestkernel/nestmodule.cpp +++ b/nestkernel/nestmodule.cpp @@ -510,17 +510,8 @@ NestModule::GetMetadata_gFunction::execute( SLIInterpreter* i ) const "InvalidNodeCollection: note that ResetKernel invalidates all previously created NodeCollections." ); } - NodeCollectionMetadataPTR meta = nc->get_metadata(); DictionaryDatum dict = DictionaryDatum( new Dictionary ); - - // return empty dict if NC does not have metadata - if ( meta.get() ) - { - meta->get_status( dict ); - slice_positions_if_sliced_nc( dict, nc ); - - ( *dict )[ names::network_size ] = nc->size(); - } + nc->get_metadata_status( dict ); i->OStack.pop(); i->OStack.push( dict ); @@ -1053,13 +1044,16 @@ NestModule::Cvnodecollection_ivFunction::execute( SLIInterpreter* i ) const } void -NestModule::Cva_gFunction::execute( SLIInterpreter* i ) const +NestModule::Cva_g_lFunction::execute( SLIInterpreter* i ) const { - i->assert_stack_load( 1 ); - NodeCollectionDatum nodecollection = getValue< NodeCollectionDatum >( i->OStack.pick( 0 ) ); - ArrayDatum node_ids = nodecollection->to_array(); + i->assert_stack_load( 2 ); - i->OStack.pop(); + const std::string selection = getValue< std::string >( i->OStack.pick( 0 ) ); + NodeCollectionDatum nodecollection = getValue< NodeCollectionDatum >( i->OStack.pick( 1 ) ); + + ArrayDatum node_ids = nodecollection->to_array( selection ); + + i->OStack.pop( 2 ); i->OStack.push( node_ids ); i->EStack.pop(); } @@ -1120,7 +1114,7 @@ NestModule::Find_g_iFunction::execute( SLIInterpreter* i ) const NodeCollectionDatum nodecollection = getValue< NodeCollectionDatum >( i->OStack.pick( 1 ) ); const long node_id = getValue< long >( i->OStack.pick( 0 ) ); - const auto res = nodecollection->get_lid( node_id ); + const auto res = nodecollection->get_nc_index( node_id ); i->OStack.pop( 2 ); i->OStack.push( res ); i->EStack.pop(); @@ -2160,7 +2154,7 @@ NestModule::init( SLIInterpreter* i ) i->createcommand( "cvnodecollection_i_i", &cvnodecollection_i_ifunction ); i->createcommand( "cvnodecollection_ia", &cvnodecollection_iafunction ); i->createcommand( "cvnodecollection_iv", &cvnodecollection_ivfunction ); - i->createcommand( "cva_g", &cva_gfunction ); + i->createcommand( "cva_g_l", &cva_g_lfunction ); i->createcommand( "size_g", &size_gfunction ); i->createcommand( "ValidQ_g", &validq_gfunction ); i->createcommand( "join_g_g", &join_g_gfunction ); diff --git a/nestkernel/nestmodule.h b/nestkernel/nestmodule.h index 55df2bce23..1b47b4711c 100644 --- a/nestkernel/nestmodule.h +++ b/nestkernel/nestmodule.h @@ -847,10 +847,10 @@ class NestModule : public SLIModule void execute( SLIInterpreter* ) const override; } cvnodecollection_ivfunction; - class Cva_gFunction : public SLIFunction + class Cva_g_lFunction : public SLIFunction { void execute( SLIInterpreter* ) const override; - } cva_gfunction; + } cva_g_lfunction; class Size_gFunction : public SLIFunction { @@ -1468,35 +1468,6 @@ class NestModule : public SLIModule * linear, exponential and other. * * - * Parameter name: source - * - * Type: dictionary - * - * Parameter description: - * - * The source dictionary enables us to give further detail on - * how the nodes in the source layer used in the connection function - * should be processed. - * - * Parameters: - * model* literal - * lid^ integer - * - * *modeltype (i.e. /iaf_psc_alpha) of nodes that should be connected to - * in the layer. All nodes are used if this variable isn't set. - * ^Nesting depth of nodes that should be connected to. All layers are used - * if this variable isn't set. - * - * - * Parameter name: target - * - * Type: dictionary - * - * Parameter description: - * - * See description for source dictionary. - * - * * Parameter name: number_of_connections * * Type: integer diff --git a/nestkernel/node_collection.cpp b/nestkernel/node_collection.cpp index 355831a666..b4323921f4 100644 --- a/nestkernel/node_collection.cpp +++ b/nestkernel/node_collection.cpp @@ -22,6 +22,9 @@ #include "node_collection.h" +// Includes from libnestutil +#include "numerics.h" + // Includes from nestkernel: #include "kernel_manager.h" #include "mpi_manager_impl.h" @@ -30,13 +33,18 @@ // C++ includes: #include // copy +#include // lcm #include // accumulate namespace nest { -// function object for sorting a vector of NodeCollectionPrimitives +/** + * Functor for sorting a vector of NodeCollectionPrimitives. + * + * Since primitives are contiguous, sort by GID of first element. + */ const struct PrimitiveSortOp { bool @@ -50,75 +58,210 @@ const struct PrimitiveSortOp nc_const_iterator::nc_const_iterator( NodeCollectionPTR collection_ptr, const NodeCollectionPrimitive& collection, size_t offset, - size_t step ) + size_t stride, + NCIteratorKind kind ) : coll_ptr_( collection_ptr ) , element_idx_( offset ) , part_idx_( 0 ) - , step_( step ) + , step_( kind == NCIteratorKind::RANK_LOCAL + ? std::lcm( stride, kernel().mpi_manager.get_num_processes() ) + : ( kind == NCIteratorKind::THREAD_LOCAL ? std::lcm( stride, kernel().vp_manager.get_num_virtual_processes() ) + : stride ) ) + , kind_( kind ) + , rank_or_vp_( kind == NCIteratorKind::RANK_LOCAL + ? kernel().mpi_manager.get_rank() + : ( kind == NCIteratorKind::THREAD_LOCAL ? kernel().vp_manager.get_vp() : invalid_thread ) ) , primitive_collection_( &collection ) , composite_collection_( nullptr ) { assert( not collection_ptr.get() or collection_ptr.get() == &collection ); - - if ( offset > collection.size() ) // allow == size() for end iterator - { - throw KernelException( "Invalid offset into NodeCollectionPrimitive" ); - } + assert( element_idx_ <= collection.size() ); // allow == for end() + + FULL_LOGGING_ONLY( + kernel().write_to_dump( String::compose( "NCIT Prim ctor rk %1, thr %2, pix %3, eix %4, step %5, kind %6, rvp %7", + kernel().mpi_manager.get_rank(), + kernel().vp_manager.get_thread_id(), + part_idx_, + element_idx_, + step_, + static_cast< int >( kind_ ), + rank_or_vp_ ) ); ) } nc_const_iterator::nc_const_iterator( NodeCollectionPTR collection_ptr, const NodeCollectionComposite& collection, size_t part, size_t offset, - size_t step ) + size_t stride, + NCIteratorKind kind ) : coll_ptr_( collection_ptr ) , element_idx_( offset ) , part_idx_( part ) - , step_( step ) + , step_( kind == NCIteratorKind::RANK_LOCAL + ? std::lcm( stride, kernel().mpi_manager.get_num_processes() ) + : ( kind == NCIteratorKind::THREAD_LOCAL ? std::lcm( stride, kernel().vp_manager.get_num_virtual_processes() ) + : stride ) ) + , kind_( kind ) + , rank_or_vp_( kind == NCIteratorKind::RANK_LOCAL + ? kernel().mpi_manager.get_rank() + : ( kind == NCIteratorKind::THREAD_LOCAL ? kernel().vp_manager.get_vp() : invalid_thread ) ) , primitive_collection_( nullptr ) , composite_collection_( &collection ) { assert( not collection_ptr.get() or collection_ptr.get() == &collection ); - if ( ( part >= collection.parts_.size() or offset >= collection.parts_[ part ].size() ) - and not( part == collection.parts_.size() and offset == 0 ) // end iterator - ) + // Allow <= for end iterator + assert( ( part < collection.parts_.size() and offset <= collection.parts_[ part ].size() ) ); + + FULL_LOGGING_ONLY( + kernel().write_to_dump( String::compose( "NCIT Comp ctor rk %1, thr %2, pix %3, eix %4, step %5, kind %6, rvp %7", + kernel().mpi_manager.get_rank(), + kernel().vp_manager.get_thread_id(), + part_idx_, + element_idx_, + step_, + static_cast< int >( kind_ ), + rank_or_vp_ ) ); ) +} + +size_t +nc_const_iterator::find_next_within_part_( size_t n ) const +{ + const size_t new_element_idx = element_idx_ + n * step_; + + if ( primitive_collection_ ) { - throw KernelException( "Invalid part or offset into NodeCollectionComposite" ); + // Avoid running over end of collection; primitive_collection_->size() is end marker + return std::min( new_element_idx, primitive_collection_->size() ); } + + if ( new_element_idx < composite_collection_->parts_[ part_idx_ ].size() ) + { + if ( composite_collection_->valid_idx_( part_idx_, new_element_idx ) ) + { + // We have found an element in the part + return new_element_idx; + } + else + { + // We have reached the end of the node collection, return index for end iterator + assert( part_idx_ == composite_collection_->last_part_ ); + return composite_collection_->last_elem_ + 1; + } + } + + // No new element found in this part and collection not exhausted + return element_idx_; } void -nc_const_iterator::composite_update_indices_() +nc_const_iterator::advance_global_iter_to_new_part_( size_t n ) { - // If we went past the size of the primitive, we need to adjust the element - // and primitive part indices. - size_t primitive_size = composite_collection_->parts_[ part_idx_ ].size(); - while ( element_idx_ >= primitive_size ) + if ( part_idx_ == composite_collection_->last_part_ ) + { + // No more parts, set to end() + part_idx_ = composite_collection_->last_part_; + element_idx_ = composite_collection_->last_elem_ + 1; + return; + } + + // Find new position counting from beginning of node collection + const auto part_abs_begin = part_idx_ == 0 ? 0 : composite_collection_->cumul_abs_size_[ part_idx_ - 1 ]; + const auto new_abs_idx = part_abs_begin + element_idx_ + n * composite_collection_->stride_; + + // Confirm that new position is in a new part + assert( new_abs_idx >= composite_collection_->cumul_abs_size_[ part_idx_ ] ); + + // Move to part that contains new position + do { - element_idx_ = element_idx_ - primitive_size; ++part_idx_; - if ( part_idx_ < composite_collection_->parts_.size() ) - { - primitive_size = composite_collection_->parts_[ part_idx_ ].size(); - } + } while ( part_idx_ <= composite_collection_->last_part_ + and composite_collection_->cumul_abs_size_[ part_idx_ ] <= new_abs_idx ); + + // If there is another element, it must have this index + element_idx_ = new_abs_idx - composite_collection_->cumul_abs_size_[ part_idx_ - 1 ]; + + if ( not composite_collection_->valid_idx_( part_idx_, element_idx_ ) ) + { + // Node collection exhausted + part_idx_ = composite_collection_->last_part_; + element_idx_ = composite_collection_->last_elem_ + 1; } - // If we went past the end of the composite, we need to adjust the - // position of the iterator. - if ( composite_collection_->is_sliced_ ) +} + +void +nc_const_iterator::advance_local_iter_to_new_part_( size_t n ) +{ + // We know that we need to look in another part + if ( part_idx_ == composite_collection_->last_part_ ) { - assert( composite_collection_->end_offset_ != 0 or composite_collection_->end_part_ != 0 ); - if ( part_idx_ >= composite_collection_->end_part_ and element_idx_ >= composite_collection_->end_offset_ ) + // No more parts, set to end() + part_idx_ = composite_collection_->last_part_; + element_idx_ = composite_collection_->last_elem_ + 1; + return; + } + + // {RANK,THREAD}_LOCAL iterators require phase adjustment + // which is feasible only for single steps, so unroll + for ( size_t k = 0; k < n; ++k ) + { + // Find next part that has element in underlying GLOBAL stride + do + { + ++part_idx_; + } while ( part_idx_ <= composite_collection_->last_part_ + and composite_collection_->first_in_part_[ part_idx_ ] == invalid_index ); + + if ( part_idx_ <= composite_collection_->last_part_ ) { - part_idx_ = composite_collection_->end_part_; - element_idx_ = composite_collection_->end_offset_; + // We have a candidate part and a first valid element in it, so we perform phase adjustment + + assert( composite_collection_->first_in_part_[ part_idx_ ] != invalid_index ); + element_idx_ = composite_collection_->first_in_part_[ part_idx_ ]; + + // Now perform phase adjustment + switch ( kind_ ) + { + case NCIteratorKind::RANK_LOCAL: + { + const size_t num_ranks = kernel().mpi_manager.get_num_processes(); + const size_t current_rank = kernel().mpi_manager.get_rank(); + + std::tie( part_idx_, element_idx_ ) = composite_collection_->specific_local_begin_( + num_ranks, current_rank, part_idx_, element_idx_, NodeCollectionComposite::gid_to_rank_ ); + + FULL_LOGGING_ONLY( kernel().write_to_dump( + String::compose( "ACIL rk %1, pix %2, eix %3", kernel().mpi_manager.get_rank(), part_idx_, element_idx_ ) ); ) + break; + } + case NCIteratorKind::THREAD_LOCAL: + { + const size_t num_vps = kernel().vp_manager.get_num_virtual_processes(); + const size_t current_vp = kernel().vp_manager.thread_to_vp( kernel().vp_manager.get_thread_id() ); + + std::tie( part_idx_, element_idx_ ) = composite_collection_->specific_local_begin_( + num_vps, current_vp, part_idx_, element_idx_, NodeCollectionComposite::gid_to_vp_ ); + + break; + } + default: + assert( false ); // should not be here, otherwise kind_ is inconsistent + break; + } + } + else + { + break; // no more parts to search } } - else if ( part_idx_ >= composite_collection_->parts_.size() ) + + // In case we did not find a solution in phase adjustment, set to end() + if ( part_idx_ == invalid_index or not composite_collection_->valid_idx_( part_idx_, element_idx_ ) ) { - auto end_of_composite = composite_collection_->end(); - part_idx_ = end_of_composite.part_idx_; - element_idx_ = end_of_composite.element_idx_; + // Node collection exhausted, set to end() + part_idx_ = composite_collection_->last_part_; + element_idx_ = composite_collection_->last_elem_ + 1; } } @@ -141,71 +284,32 @@ nc_const_iterator::operator*() const throw KernelException( "Invalid NodeCollection iterator (primitive element beyond last element)" ); } gt.model_id = primitive_collection_->model_id_; - gt.lid = element_idx_; + gt.nc_index = element_idx_; } else { - // for efficiency we check each value instead of simply checking against - // composite_collection->end() - if ( not( part_idx_ < composite_collection_->end_part_ - or ( part_idx_ == composite_collection_->end_part_ - and element_idx_ < composite_collection_->end_offset_ ) ) ) + if ( not composite_collection_->valid_idx_( part_idx_, element_idx_ ) ) { - throw KernelException( "Invalid NodeCollection iterator (composite element beyond specified end element)" ); - } - - // Add to local placement from NodeCollectionPrimitives that comes before the - // current one. - gt.lid = 0; - for ( const auto& part : composite_collection_->parts_ ) - { - // Using a stripped-down comparison of Primitives to avoid redundant and potentially expensive comparisons of - // metadata. - const auto& current_part = composite_collection_->parts_[ part_idx_ ]; - if ( part.first_ == current_part.first_ and part.last_ == current_part.last_ ) - { - break; - } - gt.lid += part.size(); + FULL_LOGGING_ONLY( kernel().write_to_dump( + String::compose( "nci::op* comp err rk %1, lp %2, le %3, pix %4, eix %5, end_pix %6, end_eix %7", + kernel().mpi_manager.get_rank(), + composite_collection_->last_part_, + composite_collection_->last_elem_, + part_idx_, + element_idx_, + composite_collection_->end().part_idx_, + composite_collection_->end().element_idx_ ) ); ) + assert( false ); + throw KernelException( "Invalid NodeCollection iterator for composite collection)" ); } + const auto part_begin_idx = part_idx_ == 0 ? 0 : composite_collection_->cumul_abs_size_[ part_idx_ - 1 ]; + gt.nc_index = part_begin_idx + element_idx_; gt.node_id = composite_collection_->parts_[ part_idx_ ][ element_idx_ ]; gt.model_id = composite_collection_->parts_[ part_idx_ ].model_id_; - gt.lid += element_idx_; - } - return gt; -} - -nc_const_iterator& -nc_const_iterator::operator++() -{ - element_idx_ += step_; - if ( primitive_collection_ ) - { - if ( element_idx_ >= primitive_collection_->size() ) - { - element_idx_ = primitive_collection_->size(); - } } - else - { - composite_update_indices_(); - } - return *this; -} -nc_const_iterator -nc_const_iterator::operator++( int ) -{ - nc_const_iterator tmp = *this; - ++( *this ); - return tmp; -} - -NodeCollectionPTR -operator+( NodeCollectionPTR lhs, NodeCollectionPTR rhs ) -{ - return lhs->operator+( rhs ); + return gt; } NodeCollection::NodeCollection() @@ -303,7 +407,7 @@ NodeCollection::create_( const std::vector< size_t >& node_ids ) std::vector< NodeCollectionPrimitive > parts; size_t old_node_id = current_first; - for ( auto node_id = ++( node_ids.begin() ); node_id != node_ids.end(); ++node_id ) + for ( auto node_id = std::next( node_ids.begin() ); node_id < node_ids.end(); ++node_id ) { if ( *node_id == old_node_id ) { @@ -320,7 +424,7 @@ NodeCollection::create_( const std::vector< size_t >& node_ids ) } else { - // store Primitive; node goes in new Primitive + // store completed Primitive; node goes in new Primitive parts.emplace_back( current_first, current_last, current_model ); current_first = *node_id; current_last = current_first; @@ -347,6 +451,17 @@ NodeCollection::valid() const return fingerprint_ == kernel().get_fingerprint(); } +void +NodeCollection::get_metadata_status( DictionaryDatum& d ) const +{ + NodeCollectionMetadataPTR meta = get_metadata(); + if ( not meta ) + { + return; + } + meta->get_status( d, this ); +} + NodeCollectionPrimitive::NodeCollectionPrimitive( size_t first, size_t last, size_t model_id, @@ -357,9 +472,8 @@ NodeCollectionPrimitive::NodeCollectionPrimitive( size_t first, , metadata_( meta ) , nodes_have_no_proxies_( not kernel().model_manager.get_node_model( model_id_ )->has_proxies() ) { - assert_consistent_model_ids_( model_id_ ); - assert( first_ <= last_ ); + assert_consistent_model_ids_( model_id_ ); } NodeCollectionPrimitive::NodeCollectionPrimitive( size_t first, size_t last, size_t model_id ) @@ -405,14 +519,58 @@ NodeCollectionPrimitive::NodeCollectionPrimitive() } ArrayDatum -NodeCollectionPrimitive::to_array() const +NodeCollection::to_array( const std::string& selection ) const { ArrayDatum node_ids; - node_ids.reserve( size() ); - for ( auto it = begin(); it < end(); ++it ) + + if ( selection == "thread" ) + { + // We must do the folloing on the corresponding threads, but one at + // a time to fill properly. Thread beginnings are marked by 0 thread 0 +#pragma omp parallel + { +#pragma omp critical + { + // We need to defined zero explicitly here, otherwise push_back() does strange things + const size_t zero = 0; + node_ids.push_back( zero ); + node_ids.push_back( kernel().vp_manager.get_thread_id() ); + node_ids.push_back( zero ); + + const auto end_it = end(); + for ( auto it = thread_local_begin(); it < end_it; ++it ) + { + node_ids.push_back( ( *it ).node_id ); + } + } // end critical + } // end parallel + } + else { - node_ids.push_back( ( *it ).node_id ); + // Slightly repetitive code but nc_const_iterator does not have + // no-argument constructor nor copy constructor and this is a debug function only. + if ( selection == "all" ) + { + for ( const auto& val : *this ) + { + node_ids.push_back( val.node_id ); + } + } + else if ( selection == "rank" ) + { + const auto end_it = end(); + for ( auto it = rank_local_begin(); it < end_it; ++it ) + { + node_ids.push_back( ( *it ).node_id ); + } + } + else + { + throw BadParameter( + String::compose( "to_array() accepts only 'all', 'rank', 'thread', but got '%1'.", selection ) ); + } } + return node_ids; } @@ -444,6 +602,7 @@ NodeCollectionPrimitive::operator+( NodeCollectionPTR rhs ) const return std::make_shared< NodeCollectionComposite >( *rhs_ptr ); } } + if ( ( get_metadata().get() or rhs->get_metadata().get() ) and not( get_metadata() == rhs->get_metadata() ) ) { throw BadProperty( "Can only join NodeCollections with same metadata." ); @@ -458,16 +617,18 @@ NodeCollectionPrimitive::operator+( NodeCollectionPTR rhs ) const throw BadProperty( "Cannot join overlapping NodeCollections." ); } if ( ( last_ + 1 ) == rhs_ptr->first_ and model_id_ == rhs_ptr->model_id_ ) - // if contiguous and homogeneous { + // contiguous and homogeneous, lhs before rhs return std::make_shared< NodeCollectionPrimitive >( first_, rhs_ptr->last_, model_id_, metadata_ ); } else if ( ( rhs_ptr->last_ + 1 ) == first_ and model_id_ == rhs_ptr->model_id_ ) { + // contiguous and homogeneous, rhs before lhs return std::make_shared< NodeCollectionPrimitive >( rhs_ptr->first_, last_, model_id_, metadata_ ); } - else // not contiguous and homogeneous + else { + // not contiguous and homogeneous std::vector< NodeCollectionPrimitive > primitives; primitives.reserve( 2 ); primitives.push_back( *this ); @@ -483,45 +644,45 @@ NodeCollectionPrimitive::operator+( NodeCollectionPTR rhs ) const } } -NodeCollectionPrimitive::const_iterator -NodeCollectionPrimitive::local_begin( NodeCollectionPTR cp ) const +NodeCollection::const_iterator +NodeCollectionPrimitive::rank_local_begin( NodeCollectionPTR cp ) const { - const size_t num_vps = kernel().vp_manager.get_num_virtual_processes(); - const size_t current_vp = kernel().vp_manager.thread_to_vp( kernel().vp_manager.get_thread_id() ); - const size_t vp_first_node = kernel().vp_manager.node_id_to_vp( first_ ); - const size_t offset = ( current_vp - vp_first_node + num_vps ) % num_vps; + const size_t num_processes = kernel().mpi_manager.get_num_processes(); + const size_t rank = kernel().mpi_manager.get_rank(); + const size_t first_elem_rank = + kernel().mpi_manager.get_process_id_of_vp( kernel().vp_manager.node_id_to_vp( first_ ) ); + const size_t elem_idx = ( rank - first_elem_rank + num_processes ) % num_processes; - if ( offset >= size() ) // Too few node IDs to be shared among all vps. + if ( elem_idx > size() ) // Too few node IDs to be shared among all MPI processes. { - return const_iterator( cp, *this, size() ); + return const_iterator( cp, *this, size(), 1, nc_const_iterator::NCIteratorKind::END ); // end iterator } else { - return const_iterator( cp, *this, offset, num_vps ); + return const_iterator( cp, *this, elem_idx, num_processes, nc_const_iterator::NCIteratorKind::RANK_LOCAL ); } } -NodeCollectionPrimitive::const_iterator -NodeCollectionPrimitive::MPI_local_begin( NodeCollectionPTR cp ) const +NodeCollection::const_iterator +NodeCollectionPrimitive::thread_local_begin( NodeCollectionPTR cp ) const { - const size_t num_processes = kernel().mpi_manager.get_num_processes(); - const size_t rank = kernel().mpi_manager.get_rank(); - const size_t rank_first_node = - kernel().mpi_manager.get_process_id_of_vp( kernel().vp_manager.node_id_to_vp( first_ ) ); - const size_t offset = ( rank - rank_first_node + num_processes ) % num_processes; + const size_t num_vps = kernel().vp_manager.get_num_virtual_processes(); + const size_t current_vp = kernel().vp_manager.thread_to_vp( kernel().vp_manager.get_thread_id() ); + const size_t vp_first_node = kernel().vp_manager.node_id_to_vp( first_ ); + const size_t offset = ( current_vp - vp_first_node + num_vps ) % num_vps; - if ( offset > size() ) // Too few node IDs to be shared among all MPI processes. + if ( offset >= size() ) // Too few node IDs to be shared among all vps. { - return const_iterator( cp, *this, size() ); + return nc_const_iterator( cp, *this, size(), 1, nc_const_iterator::NCIteratorKind::END ); // end iterator } else { - return const_iterator( cp, *this, offset, num_processes ); + return nc_const_iterator( cp, *this, offset, num_vps, nc_const_iterator::NCIteratorKind::THREAD_LOCAL ); } } NodeCollectionPTR -NodeCollectionPrimitive::slice( size_t start, size_t end, size_t step ) const +NodeCollectionPrimitive::slice( size_t start, size_t end, size_t stride ) const { if ( not( start < end ) ) { @@ -538,7 +699,7 @@ NodeCollectionPrimitive::slice( size_t start, size_t end, size_t step ) const } NodeCollectionPTR sliced_nc; - if ( step == 1 and not metadata_ ) + if ( stride == 1 and not metadata_ ) { // Create primitive NodeCollection passing node IDs. // Subtract 1 because "end" is one past last element to take while constructor expects ID of last node. @@ -546,7 +707,8 @@ NodeCollectionPrimitive::slice( size_t start, size_t end, size_t step ) const } else { - sliced_nc = std::make_shared< NodeCollectionComposite >( *this, start, end, step ); + // This is the "slicing" constructor, so we use slicing logic and pass end as it is + sliced_nc = std::make_shared< NodeCollectionComposite >( *this, start, end, stride ); } return sliced_nc; @@ -619,29 +781,33 @@ NodeCollectionPrimitive::assert_consistent_model_ids_( const size_t expected_mod NodeCollectionComposite::NodeCollectionComposite( const NodeCollectionPrimitive& primitive, size_t start, size_t end, - size_t step ) - : parts_() - , size_( ( end - start - 1 ) / step + 1 ) - , step_( step ) - , start_part_( 0 ) - , start_offset_( start ) - // If end is at the end of the primitive, set the end to the first in the next (nonexistent) part, - // for consistency with iterator comparisons. - , end_part_( end == primitive.size() ? 1 : 0 ) - , end_offset_( end == primitive.size() ? 0 : end ) - , is_sliced_( start != 0 or end != primitive.size() or step > 1 ) + size_t stride ) + : parts_( { primitive } ) + , size_( 1 + ( end - start - 1 ) / stride ) // see comment on constructor + , stride_( stride ) + , first_part_( 0 ) + , first_elem_( start ) + , last_part_( 0 ) + , last_elem_( end - 1 ) + , is_sliced_( start != 0 or end != primitive.size() or stride > 1 ) + , cumul_abs_size_( { primitive.size() } ) + , first_in_part_( { first_elem_ } ) { - parts_.push_back( primitive ); + assert( end > 0 ); + assert( first_elem_ <= last_elem_ ); } NodeCollectionComposite::NodeCollectionComposite( const std::vector< NodeCollectionPrimitive >& parts ) - : size_( 0 ) - , step_( 1 ) - , start_part_( 0 ) - , start_offset_( 0 ) - , end_part_( parts.size() ) - , end_offset_( 0 ) + : parts_() + , size_( 0 ) + , stride_( 1 ) + , first_part_( 0 ) + , first_elem_( 0 ) + , last_part_( 0 ) + , last_elem_( 0 ) , is_sliced_( false ) + , cumul_abs_size_() + , first_in_part_() { if ( parts.size() < 1 ) { @@ -649,31 +815,58 @@ NodeCollectionComposite::NodeCollectionComposite( const std::vector< NodeCollect } NodeCollectionMetadataPTR meta = parts[ 0 ].get_metadata(); - parts_.reserve( parts.size() ); + for ( const auto& part : parts ) { if ( meta.get() and not( meta == part.get_metadata() ) ) { throw BadProperty( "all metadata in a NodeCollection must be the same" ); } - parts_.push_back( part ); - size_ += part.size(); + + if ( not part.empty() ) + { + parts_.push_back( part ); + size_ += part.size(); + } } + + const auto n_parts = parts_.size(); + if ( parts_.size() == 0 ) + { + throw BadProperty( "Cannot create composite NodeCollection from only empty parts" ); + } + std::sort( parts_.begin(), parts_.end(), primitive_sort_op ); + + // Only after sorting can we set up the remaining fields + last_part_ = n_parts - 1; + last_elem_ = parts_[ last_part_ ].size() - 1; // well defined because we allow no empty parts + + cumul_abs_size_.resize( n_parts ); + cumul_abs_size_[ 0 ] = parts_[ 0 ].size(); + for ( size_t pix = 1; pix < n_parts; ++pix ) + { + cumul_abs_size_[ pix ] = cumul_abs_size_[ pix - 1 ] + parts_[ pix ].size(); + } + + // All parts start at beginning since no slicing + std::vector< size_t >( n_parts, 0 ).swap( first_in_part_ ); } NodeCollectionComposite::NodeCollectionComposite( const NodeCollectionComposite& composite, size_t start, size_t end, - size_t step ) + size_t stride ) : parts_( composite.parts_ ) - , size_( ( end - start - 1 ) / step + 1 ) - , step_( step ) - , start_part_( 0 ) - , start_offset_( 0 ) - , end_part_( composite.parts_.size() ) - , end_offset_( 0 ) + , size_( 1 + ( end - start - 1 ) / stride ) // see comment on constructor + , stride_( stride ) + , first_part_( 0 ) + , first_elem_( 0 ) + , last_part_( 0 ) + , last_elem_( 0 ) , is_sliced_( true ) + , cumul_abs_size_( parts_.size(), 0 ) + , first_in_part_( parts_.size(), invalid_index ) { if ( end - start < 1 ) { @@ -686,28 +879,63 @@ NodeCollectionComposite::NodeCollectionComposite( const NodeCollectionComposite& if ( composite.is_sliced_ ) { - assert( composite.step_ > 1 or composite.end_part_ != 0 or composite.end_offset_ != 0 ); - // The NodeCollection is sliced if ( size_ > 1 ) { // Creating a sliced NC with more than one node ID from a sliced NC is impossible. throw BadProperty( "Cannot slice a sliced composite NodeCollection." ); } - // we have a single single node ID, must just find where it is. - const const_iterator it = composite.begin() + start; - it.get_current_part_offset( start_part_, start_offset_ ); - end_part_ = start_part_; - end_offset_ = start_offset_ + 1; + + // we have a single node ID, must just find where it is. + const nc_const_iterator it = composite.begin() + start; + std::tie( first_part_, first_elem_ ) = it.get_part_offset(); + last_part_ = first_part_; + last_elem_ = first_elem_; + + cumul_abs_size_[ first_part_ ] = parts_[ first_part_ ].size(); // absolute size of the one valid part + first_in_part_[ first_part_ ] = first_elem_; } else { // The NodeCollection is not sliced // Update start and stop positions. - const const_iterator start_it = composite.begin() + start; - start_it.get_current_part_offset( start_part_, start_offset_ ); + const nc_const_iterator first_it = composite.begin() + start; + std::tie( first_part_, first_elem_ ) = first_it.get_part_offset(); + + const nc_const_iterator last_it = composite.begin() + ( end - 1 ); + std::tie( last_part_, last_elem_ ) = last_it.get_part_offset(); + + // Fill cumulative size/first_in data structures beginning with first_part_ + // All entries have been initialized with 0 or invalid_index, respectively + cumul_abs_size_[ first_part_ ] = parts_[ first_part_ ].size(); + first_in_part_[ first_part_ ] = first_elem_; + + for ( size_t pix = first_part_ + 1; pix <= last_part_; ++pix ) + { + const auto prev_cas = cumul_abs_size_[ pix - 1 ]; + cumul_abs_size_[ pix ] = prev_cas + parts_[ pix ].size(); + + // Compute absolute index from beginning of first_part_ for first element beyond part j-1 + const auto prev_num_elems = 1 + ( ( prev_cas - 1 - first_elem_ ) / stride_ ); + const auto next_elem_abs_idx = first_elem_ + prev_num_elems * stride_; + assert( next_elem_abs_idx >= prev_cas ); + const auto next_elem_loc_idx = next_elem_abs_idx - prev_cas; + + // We have a next element if it is in the part; if we are in last_part_, we must not have passed last_elem + if ( next_elem_abs_idx < cumul_abs_size_[ pix ] and ( pix < last_part_ or next_elem_loc_idx <= last_elem_ ) ) + { + first_in_part_[ pix ] = next_elem_loc_idx; + } + else + { + first_in_part_[ pix ] = invalid_index; + } + } + } - const const_iterator end_it = composite.begin() + end; - end_it.get_current_part_offset( end_part_, end_offset_ ); + // For consistency, fill size values of remaining entries + for ( size_t pix = last_part_ + 1; pix < parts_.size(); ++pix ) + { + cumul_abs_size_[ pix ] = cumul_abs_size_[ last_part_ ]; } } @@ -718,20 +946,24 @@ NodeCollectionComposite::operator+( NodeCollectionPTR rhs ) const { return std::make_shared< NodeCollectionComposite >( *this ); } + if ( get_metadata().get() and not( get_metadata() == rhs->get_metadata() ) ) { throw BadProperty( "can only join NodeCollections with the same metadata" ); } + if ( not valid() or not rhs->valid() ) { throw KernelException( "InvalidNodeCollection: note that ResetKernel invalidates all previously created NodeCollections." ); } + if ( is_sliced_ ) { - assert( step_ > 1 or end_part_ != 0 or end_offset_ != 0 ); + assert( stride_ > 1 or last_part_ != 0 or last_elem_ != 0 ); throw BadProperty( "Cannot add NodeCollection to a sliced composite." ); } + auto const* const rhs_ptr = dynamic_cast< NodeCollectionPrimitive const* >( rhs.get() ); if ( rhs_ptr ) // if rhs is Primitive { @@ -751,7 +983,7 @@ NodeCollectionComposite::operator+( NodeCollectionPTR rhs ) const assert( rhs_ptr ); if ( rhs_ptr->is_sliced_ ) { - assert( rhs_ptr->step_ > 1 or rhs_ptr->end_part_ != 0 or rhs_ptr->end_offset_ != 0 ); + assert( rhs_ptr->stride_ > 1 or rhs_ptr->last_part_ != 0 or rhs_ptr->last_elem_ != 0 ); throw BadProperty( "Cannot add NodeCollection to a sliced composite." ); } @@ -762,9 +994,9 @@ NodeCollectionComposite::operator+( NodeCollectionPTR rhs ) const const auto& shortest = shortest_longest_nc.first; const auto& longest = shortest_longest_nc.second; - for ( auto short_it = shortest.begin(); short_it < shortest.end(); ++short_it ) + for ( const auto& short_elem : shortest ) { - if ( longest.contains( ( *short_it ).node_id ) ) + if ( longest.contains( short_elem.node_id ) ) { throw BadProperty( "Cannot join overlapping NodeCollections." ); } @@ -823,13 +1055,13 @@ NodeCollectionComposite::operator[]( const size_t i ) const { if ( is_sliced_ ) { - assert( step_ > 1 or start_part_ > 0 or start_offset_ > 0 or end_part_ != parts_.size() or end_offset_ > 0 ); // Composite is sliced, we use iterator arithmetic. return ( *( begin() + i ) ).node_id; } else { - // Composite is unsliced, we can do a more efficient search. + // Composite is not sliced, we can do a more efficient search. + // TODO: Is this actually more efficient? size_t tot_prev_node_ids = 0; for ( const auto& part : parts_ ) // iterate over NodeCollections { @@ -844,7 +1076,7 @@ NodeCollectionComposite::operator[]( const size_t i ) const } } // throw exception if outside of NodeCollection - throw std::out_of_range( "pos points outside of the NodeCollection" ); + throw std::out_of_range( String::compose( "pos %1 points outside of the NodeCollection", i ) ); } } @@ -860,7 +1092,7 @@ NodeCollectionComposite::operator==( NodeCollectionPTR rhs ) const return false; } auto rhs_nc = rhs_ptr->parts_.begin(); - for ( auto lhs_nc = parts_.begin(); lhs_nc != parts_.end(); ++lhs_nc, ++rhs_nc ) // iterate over NodeCollections + for ( auto lhs_nc = parts_.begin(); lhs_nc < parts_.end(); ++lhs_nc, ++rhs_nc ) // iterate over NodeCollections { if ( not( ( *lhs_nc ) == ( *rhs_nc ) ) ) { @@ -870,69 +1102,141 @@ NodeCollectionComposite::operator==( NodeCollectionPTR rhs ) const return true; } -NodeCollectionComposite::const_iterator -NodeCollectionComposite::local_begin( NodeCollectionPTR cp ) const + +std::pair< size_t, size_t > +NodeCollectionComposite::specific_local_begin_( size_t period, + size_t phase, + size_t first_part, + size_t first_elem, + gid_to_phase_fcn_ gid_to_phase ) const { - const size_t num_vps = kernel().vp_manager.get_num_virtual_processes(); - const size_t current_vp = kernel().vp_manager.thread_to_vp( kernel().vp_manager.get_thread_id() ); - const size_t vp_first_node = kernel().vp_manager.node_id_to_vp( operator[]( 0 ) ); + assert( first_elem < parts_[ first_part ].size() ); - return local_begin_( cp, num_vps, current_vp, vp_first_node ); + size_t pix = first_part; + do + { + const size_t phase_first_node = gid_to_phase( parts_[ pix ][ first_elem ] ); + + size_t elem_idx = first_index( period, phase_first_node, stride_, phase ); + /* elem_idx can now be + * - elem_idx < part.size() : we have a solution + * - invalid_index : equation not solvable in existing part (eg even thread and nc has only odd gids), must search + * in remaining parts + * - elem_idx >= part.size() : there would be a solution if the part had been larger with same structure + */ + + // Add starting point only if valid offset, otherwise we would invalidate invalid_index marker + if ( elem_idx != invalid_index ) + { + elem_idx += first_elem; + } + + FULL_LOGGING_ONLY( + kernel().write_to_dump( String::compose( "SPLB rk %1, thr %2, phase_first %3, offs %4, stp %5, sto %6," + " pix %7, lp %8, le %9, primsz %10, nprts: %11, this: %12", + kernel().mpi_manager.get_rank(), + kernel().vp_manager.get_thread_id(), + phase_first_node, + offset, + first_part, + first_elem, + pix, + last_part_, + last_elem_, + parts_[ pix ].size(), + parts_.size(), + this ) ); ) + + if ( elem_idx != invalid_index and elem_idx < parts_[ pix ].size() + and ( pix < last_part_ or elem_idx <= last_elem_ ) ) + { + assert( gid_to_phase( parts_[ pix ][ elem_idx ] ) == phase ); + return { pix, elem_idx }; + } + else + { + // find next part with at least one element in stride + do + { + ++pix; + } while ( pix <= last_part_ and first_in_part_[ pix ] == invalid_index ); + + if ( pix > last_part_ ) + { + // node collection exhausted + return { invalid_index, invalid_index }; + } + else + { + first_elem = first_in_part_[ pix ]; + } + } + } while ( pix <= last_part_ ); + + return { invalid_index, invalid_index }; } -NodeCollectionComposite::const_iterator -NodeCollectionComposite::MPI_local_begin( NodeCollectionPTR cp ) const +size_t +NodeCollectionComposite::gid_to_vp_( size_t gid ) { - const size_t num_processes = kernel().mpi_manager.get_num_processes(); - const size_t rank = kernel().mpi_manager.get_rank(); - const size_t rank_first_node = - kernel().mpi_manager.get_process_id_of_vp( kernel().vp_manager.node_id_to_vp( operator[]( 0 ) ) ); - - return local_begin_( cp, num_processes, rank, rank_first_node ); + return kernel().vp_manager.node_id_to_vp( gid ); } +size_t +NodeCollectionComposite::gid_to_rank_( size_t gid ) +{ + return kernel().mpi_manager.get_process_id_of_vp( kernel().vp_manager.node_id_to_vp( gid ) ); +} -NodeCollectionComposite::const_iterator -NodeCollectionComposite::local_begin_( const NodeCollectionPTR cp, - const size_t num_vp_elements, - const size_t current_vp_element, - const size_t vp_element_first_node ) const +NodeCollection::const_iterator +NodeCollectionComposite::rank_local_begin( NodeCollectionPTR cp ) const { - const size_t offset = ( current_vp_element - vp_element_first_node ) % num_vp_elements; + const size_t num_ranks = kernel().mpi_manager.get_num_processes(); + const size_t current_rank = kernel().mpi_manager.get_rank(); - if ( ( current_vp_element - vp_element_first_node ) % step_ != 0 ) - { // There are no local nodes in the NodeCollection. - return end( cp ); + const auto [ part_index, part_offset ] = + specific_local_begin_( num_ranks, current_rank, first_part_, first_elem_, gid_to_rank_ ); + if ( part_index != invalid_index and part_offset != invalid_index ) + { + return nc_const_iterator( cp, + *this, + part_index, + part_offset, + std::lcm( stride_, num_ranks ), + nc_const_iterator::NCIteratorKind::RANK_LOCAL ); } - - size_t current_part = start_part_; - size_t current_offset = start_offset_; - if ( offset ) + else { - // First create an iterator at the start position. - auto tmp_it = const_iterator( cp, *this, start_part_, start_offset_, step_ ); - tmp_it += offset; // Go forward to the offset. - // Get current position. - tmp_it.get_current_part_offset( current_part, current_offset ); + return end( cp ); } - - return const_iterator( cp, *this, current_part, current_offset, num_vp_elements * step_ ); } -ArrayDatum -NodeCollectionComposite::to_array() const +NodeCollection::const_iterator +NodeCollectionComposite::thread_local_begin( NodeCollectionPTR cp ) const { - ArrayDatum node_ids; - node_ids.reserve( size() ); - for ( auto it = begin(); it < end(); ++it ) + const size_t num_vps = kernel().vp_manager.get_num_virtual_processes(); + const size_t current_vp = kernel().vp_manager.thread_to_vp( kernel().vp_manager.get_thread_id() ); + + const auto [ part_index, part_offset ] = + specific_local_begin_( num_vps, current_vp, first_part_, first_elem_, gid_to_vp_ ); + + if ( part_index != invalid_index and part_offset != invalid_index ) { - node_ids.push_back( ( *it ).node_id ); + return nc_const_iterator( cp, + *this, + part_index, + part_offset, + std::lcm( stride_, num_vps ), + nc_const_iterator::NCIteratorKind::THREAD_LOCAL ); + } + else + { + return end( cp ); } - return node_ids; } NodeCollectionPTR -NodeCollectionComposite::slice( size_t start, size_t end, size_t step ) const +NodeCollectionComposite::slice( size_t start, size_t end, size_t stride ) const { if ( not( start < end ) ) { @@ -948,14 +1252,26 @@ NodeCollectionComposite::slice( size_t start, size_t end, size_t step ) const "InvalidNodeCollection: note that ResetKernel invalidates all previously created NodeCollections." ); } - const auto new_composite = NodeCollectionComposite( *this, start, end, step ); + FULL_LOGGING_ONLY( kernel().write_to_dump( "Calling NCC from slice()" ); ) + const auto new_composite = NodeCollectionComposite( *this, start, end, stride ); + FULL_LOGGING_ONLY( kernel().write_to_dump( "Calling NCC from slice() --- DONE" ); ) - if ( step == 1 and new_composite.start_part_ == new_composite.end_part_ ) + if ( stride == 1 and new_composite.first_part_ == new_composite.last_part_ ) { - // Return only the primitive - return new_composite.parts_[ new_composite.start_part_ ].slice( - new_composite.start_offset_, new_composite.end_offset_ ); + // Return only the primitive; pass last_elem_+1 because slice() expects end argument + return new_composite.parts_[ new_composite.first_part_ ].slice( + new_composite.first_elem_, new_composite.last_elem_ + 1 ); } + + FULL_LOGGING_ONLY( + kernel().write_to_dump( String::compose( "NewComposite: fp %1, fe %2, lp %3, le %4, sz %5, strd %6", + new_composite.first_part_, + new_composite.first_elem_, + new_composite.last_part_, + new_composite.last_elem_, + new_composite.size_, + new_composite.stride_ ) ); ) + return std::make_shared< NodeCollectionComposite >( new_composite ); } @@ -986,74 +1302,80 @@ NodeCollectionComposite::merge_parts_( std::vector< NodeCollectionPrimitive >& p } } -bool -NodeCollectionComposite::contains( const size_t node_id ) const -{ - return get_lid( node_id ) != -1; -} - long -NodeCollectionComposite::get_lid( const size_t node_id ) const +NodeCollectionComposite::get_nc_index( const size_t node_id ) const { - const auto add_size_op = []( const long a, const NodeCollectionPrimitive& b ) { return a + b.size(); }; + // Check if node is in node collection + if ( node_id < parts_[ first_part_ ][ first_elem_ ] or parts_[ last_part_ ][ last_elem_ ] < node_id ) + { + return -1; + } - long lower = 0; - long upper = parts_.size() - 1; - while ( lower <= upper ) + // Find part to which node belongs + size_t lower = first_part_; + size_t upper = last_part_; + while ( lower < upper ) { - const size_t middle = ( lower + upper ) / 2; + // Because lower < upper, we are guaranteed that mid < upper + const size_t mid = ( lower + upper ) / 2; - if ( parts_[ middle ][ parts_[ middle ].size() - 1 ] < node_id ) + // Because mid < upper <=> mid < last_part_, we do not need to worry about last_elem_ + if ( parts_[ mid ][ parts_[ mid ].size() - 1 ] < node_id ) { - lower = middle + 1; + lower = mid + 1; } - else if ( node_id < ( parts_[ middle ][ 0 ] ) ) + // mid == first_part_ is possible, but if node_id is before start_elem_, + // we handled that at the beginning, so here we just check if the node_id + // comes before the mid part + else if ( node_id < parts_[ mid ][ 0 ] ) { - upper = middle - 1; + upper = mid - 1; } else { - // At this point we know that node_id is in parts_[middle]. - if ( is_sliced_ ) - { - assert( start_offset_ != 0 or start_part_ != 0 or end_part_ != 0 or end_offset_ != 0 or step_ > 1 ); + lower = upper = mid; + } + } - if ( middle < start_part_ or end_part_ < middle ) - { - // middle is outside of the sliced area - return -1; - } - // Need to find number of nodes in previous parts to know if the the step hits the node_id. - const auto num_prev_nodes = - std::accumulate( parts_.begin(), parts_.begin() + middle, static_cast< size_t >( 0 ), add_size_op ); - const auto absolute_pos = num_prev_nodes + parts_[ middle ].get_lid( node_id ); - - // The first or the last node can be somewhere in the middle part. - const auto absolute_part_start = start_part_ == middle ? start_offset_ : 0; - const auto absolute_part_end = end_part_ == middle ? end_offset_ : parts_[ middle ].size(); - - // Is node_id in the sliced NC? - const auto node_id_before_start = node_id < parts_[ middle ][ absolute_part_start ]; - const auto node_id_after_end = parts_[ middle ][ absolute_part_end - 1 ] < node_id; - const auto node_id_missed_by_step = ( ( absolute_pos - start_offset_ ) % step_ ) != 0; - if ( node_id_before_start or node_id_after_end or node_id_missed_by_step ) - { - return -1; - } + // If node_id is not in the NodeCollection, lower may pass upper in the loop above + // See test_regression_issue-3213.py for an example case. + assert( lower >= upper ); - // Return the calculated local ID of node_id. - return ( absolute_pos - start_offset_ ) / step_; - } - else - { - // Since NC is not sliced, we can just calculate and return the local ID. - const auto sum_pre = - std::accumulate( parts_.begin(), parts_.begin() + middle, static_cast< size_t >( 0 ), add_size_op ); - return sum_pre + parts_[ middle ].get_lid( node_id ); - } + if ( lower > upper or node_id < parts_[ lower ][ 0 ] or parts_[ lower ][ parts_[ lower ].size() - 1 ] < node_id ) + { + // node_id is in a gap of nc + return -1; + } + + // We now know that lower == upper and that if the node is in this part + // if it is in the node collection. We do not need to check for first/last, + // since we did that above. + const auto part_begin_idx = lower == 0 ? 0 : cumul_abs_size_[ lower - 1 ]; + const auto node_idx = part_begin_idx + parts_[ lower ].get_nc_index( node_id ); + + if ( not is_sliced_ ) + { + // Since NC is not sliced, node_idx is the desired index + assert( this->operator[]( node_idx ) == node_id ); + return node_idx; + } + else + { + // We need to take stride into account + const auto distance_from_first = node_idx - first_elem_; + + // Exploit that same stride applies to all parts + if ( distance_from_first % stride_ == 0 ) + { + const auto sliced_node_idx = distance_from_first / stride_; + assert( this->operator[]( sliced_node_idx ) == node_id ); + return sliced_node_idx; + } + else + { + return -1; } } - return -1; } bool @@ -1072,9 +1394,6 @@ NodeCollectionComposite::print_me( std::ostream& out ) const if ( is_sliced_ ) { - assert( step_ > 1 or ( end_part_ != 0 or end_offset_ != 0 ) ); - // Sliced composite NodeCollection - size_t current_part = 0; size_t current_offset = 0; size_t previous_part = std::numeric_limits< size_t >::infinity(); @@ -1086,12 +1405,14 @@ NodeCollectionComposite::print_me( std::ostream& out ) const std::vector< std::string > string_vector; out << nc << "metadata=" << metadata << ","; - for ( const_iterator it = begin(); it < end(); ++it ) + + const auto end_it = end(); + for ( nc_const_iterator it = begin(); it < end_it; ++it ) { - it.get_current_part_offset( current_part, current_offset ); + std::tie( current_part, current_offset ) = it.get_part_offset(); if ( current_part != previous_part ) // New primitive { - if ( it != begin() ) + if ( it > begin() ) { // Need to count the primitive, so can't start at begin() out << "\n" + space @@ -1105,9 +1426,9 @@ NodeCollectionComposite::print_me( std::ostream& out ) const { out << "first=" << first_in_primitive.node_id << ", last="; out << primitive_last; - if ( step_ > 1 ) + if ( stride_ > 1 ) { - out << ", step=" << step_ << ";"; + out << ", step=" << stride_ << ";"; } } } @@ -1133,17 +1454,17 @@ NodeCollectionComposite::print_me( std::ostream& out ) const { out << "first=" << first_in_primitive.node_id << ", last="; out << primitive_last; - if ( step_ > 1 ) + if ( stride_ > 1 ) { - out << ", step=" << step_; + out << ", step=" << stride_; } } } else { - // None-sliced Composite NodeCollection + // Unsliced Composite NodeCollection out << nc << "metadata=" << metadata << ","; - for ( auto it = parts_.begin(); it != parts_.end(); ++it ) + for ( auto it = parts_.begin(); it < parts_.end(); ++it ) { if ( it == parts_.end() - 1 ) { diff --git a/nestkernel/node_collection.h b/nestkernel/node_collection.h index 98b6394cd3..0b127e574d 100644 --- a/nestkernel/node_collection.h +++ b/nestkernel/node_collection.h @@ -41,6 +41,9 @@ #include "arraydatum.h" #include "dictdatum.h" +// Includes from thirdparty: +#include "compose.hpp" + namespace nest { class Node; @@ -65,7 +68,14 @@ class NodeCollectionMetadata virtual ~NodeCollectionMetadata() = default; virtual void set_status( const DictionaryDatum&, bool ) = 0; - virtual void get_status( DictionaryDatum& ) const = 0; + + /** + * Retrieve status information sliced according to slicing of node collection + * + * @note If nullptr is passed for NodeCollection*, full metadata irrespective of any slicing is returned. + * This is used by NodeCollectionMetadata::operator==() which does not have access to the NodeCollection. + */ + virtual void get_status( DictionaryDatum&, NodeCollection const* ) const = 0; virtual void set_first_node_id( size_t ) = 0; virtual size_t get_first_node_id() const = 0; @@ -74,12 +84,17 @@ class NodeCollectionMetadata virtual bool operator==( const NodeCollectionMetadataPTR ) const = 0; }; +/** + * Represent single node entry in node collection. + * + * These triples are not actually stored in the node collection, they are constructed when an iterator is dereferenced. + */ class NodeIDTriple { public: - size_t node_id { 0 }; - size_t model_id { 0 }; - size_t lid { 0 }; + size_t node_id { 0 }; //!< Global ID of neuron + size_t model_id { 0 }; //!< ID of neuron model + size_t nc_index { 0 }; //!< position with node collection NodeIDTriple() = default; }; @@ -88,30 +103,342 @@ class NodeIDTriple * * This iterator can iterate over primitive and composite NodeCollections. * Behavior is determined by the constructor used to create the iterator. + * + * @note In addition to a raw pointer to either a primitive or composite node collection, which is used + * for all actual work, the iterator also holds a NodeCollectionPTR to the NC it iterates over. This is necessary + * so that anonymous node collections at the SLI/Python level are not auto-destroyed when they go out + * of scope while the iterator lives on. + * + * @note We decided not to implement iterators for primitive and composite node collections or for + * stepping through rank- og thread-local elements using subclasses to avoid vtable lookups. Whether + * this indeed gives best performance should be checked at some point. + * + * In the following discussion, we use the following terms: + * + * - **global iterator** is an iterator that steps through all elements of a node collection irrespective of the VP they + * belong to + * - **{rank,thread}-local iterator** is an iterator that steps only through those elements that belong to the + * rank/thread on which the iterator was created + * - **stride** is the user-given stride through a node collection as in ``nc[::stride]`` + * - **period** is 1 for global iterators and the number of ranks/threads for {rank/thread}-local iterators + * - **phase** is the placement of a given node within the period, thus always 0 for global iterators; + * for composite node collections, the phase needs to be determined independently for each part + * - **step** is the number of elements of the underlying primitive node collection to advance by to move to the next + * element; for global iterators, it is equal to the stride, for {rank/thread}-local iterators it is given by + * lcm(stride, period) and applies only *within* each part of a composite node collection + * + * Note further that + * - `first_`, `first_part_`, `first_elem_` and `last_`, `last_part_`, `last_elem_` refer to the + * first and last elements belonging to the node collection; in particular "last" is *inclusive* + * - For primitive NCs, the end iterator is given by `part_idx_ == 0, element_idx_ = last_ + 1` + * - For and empty primitive NC, the end iterator is given by `part_idx_ == 0, element_idx_ = 0` + * - For composite NCs, the end iterator is given by `part_idx_ == last_part_, element_idx_ == last_elem_ + 1` + * - To check whether an iterator over a composite NC is valid, use `NodeCollectionComposite::valid_idx_()` + * - Composite NCs can never be empty + * - The `end()` iterator is the same independent of whether one iterates globally or locally + * - Constructing the `end()` iterator is costly, especially for composite NCs. In classic for loops including `... ; it + * < nc->end() ; ...`, the `end()` iterator is constructed anew for every iteration, even if `nc` is `const` (tested + * with AppleClang 15 and GCC 13, `-std=c++17`). Therefore, either construct the `end()` iterator first + * + * const auto end_it = nc->end(); + * for ( auto it = nc->thread_local_begin() ; it < end_it ; ++it ) + * + * or use range iteration (global iteration only, uses `!=` to compare iterators) + * + * for ( const auto& nc_elem : nc ) + * + * ## Iteration over primitive collections + * + * For ``NodeCollectionPrimitive``, creating a rank/thread-local ``begin()`` iterator and stepping is straightforward: + * + * 1. Since `stride == 1` by definition for a primitive NC, we have `step == period` + * 2. Find the `phase` of the first element of the NC + * 3. Use modular arithmetic to find the first element in the NC belonging to the current rank/thread, if it exists. + * 4. To move forward by `n` elements, add `n * step` and check that one is still `<= last_` + * 5. If one stepped past `last_`, set `element_idx_ = last_ + 1` to ensure that `it != nc->end()` compares correctly + * + * + * ## Constructing a composite node collection + * + * Upon construction of a `NodeCollectionComposite` instance, we need to determine its first and last entries. + * We can distinguish three different cases, mapping to three different constructors. + * + * ### Case A: Slicing a primitive collection + * + * If `nc` is a primivtive collection and we slice as `nc[start:end:stride]`, we only have a single part and + * + * - `first_part_ = 0, first_elem_ = start` + * - `last_part_ = 0, last_elem_ = end - 1` + * - `stride_ = stride` + * - `size_ = 1 + ( end - start - 1 ) / stride` (integer division, see c'tor doc for derivation) + * + * ### Case B: Joining multiple primitive collections + * + * We receive a list of parts, which are all primitive collections. The new collection consists of all elements of all + * parts. + * + * 1. Collect all non-empty parts into the vector `parts_` + * 2. Ensure parts do not overlap and sort in order of increasing GIDs + * + * The collection now begins with the first element of the first part and ends with the last element of the last part, + * i.e., + * + * - `first_part_ = 0, first_elem_ = 0` + * - `last_part_ = 0, last_elem_ = parts_[last_elem_].size() - 1` + * - `stride_ = 1` + * - `size_ = sum_k parts_[k].size()` + * + * ### Case C: Slicing a composite node collection + * + * Here, we have two subcases: + * + * #### Case C.1: Single element of sliced composite + * + * If `nc` is already sliced, we can only pick a single element given by `start` and `end==start+1`. We proceed as + * follows: + * + * 1. Build iterator pointing to element as `it = nc.begin() + start` + * 2. Extract from this iterator + * - `first_part_ = it.part_idx_, first_elem_ = it.elem_idx_` + * 3. We further have + * - `last_part_ = first_part_, last_elem_ = last_elem_` + * - `stride_ = 1` (the constructor is called with stride==1 if we do `nc[1]`) + * - `size_ = 1` (but computed using equation above for consistency with C.2) + * + * #### Case C.2: Slicing of non-sliced composite + * + * We slice as `nc[start:end:stride]` but are guaranteed that all elements in `parts_` are in `nc` and all parts have + * stride 1. We thus proceed as follows: + * 1. Build iterator pointing to first element as `first_it = nc.begin() + start` + * 2. Build iterator pointing to last element as `last_it = nc.begin() + (end - 1)` + * 2. Extract from these iterators + * - `first_part_ = first_it.part_idx_, first_elem_ = first_it.elem_idx_` + * - `last_part_ = last_it.part_idx_, last_elem_ = last_it.elem_idx_` + * 3. We further have + * - `stride_ = stride` (irrelevant in this case, but set thus for consistency with C.2) + * - `size_ = 1 + ( end - start - 1 ) / stride` + * + * ### Additional data structures + * + * We further construct a vector of the cumulative sizes of all parts and a vector containing for each part the + * part-local index to the first element of each part that belongs to the node collection, or `invalid_index` if there + * is no element in the part. + * **Note:** The cumulative sizes include *all* elements of the parts, including elements before `first_elem_` or after + * `last_elem_`, but they do *not* include elements in parts before `first_part_` or after `last_part_`. + * + * #### Case A + * + * - `cumul_abs_size_` has a single element equal to the size of the underlying primitive collection. + * - `first_in_part_` has a single element equal to `first_elem_` + * + * #### Case B + * + * - `cumul_abs_size_` are straightforward cumulative sums of the sizes, beginning with `parts_[0].size()` + * - `first_in_part_` has `parts_.size()` elements, all zero since `stride==1` so we start from the beginning of each + * part + * + * #### Case C.1 + * + * - `cumul_abs_size_`: zero before `first_part_`, for all subsequent parts `parts_[first_part_].size()` + * - `first_in_part_`: for `first_part_`, it is `first_elem_`, for all others `invalid_index` + * + * #### Case C.2 + * + * - `cumul_abs_size_`: + * - zero before `first_part_` + * - cumulative sums from `first_part_` on starting with `parts_[first_part_].size()` + * - from `last_part_` on all `cumul_abs_size_[last_part_]` + * - `first_in_part_`: + * - for `first_part_`, it is `first_elem_` + * - for all subsequent parts `part_idx_ <= last_part_`: + * 1. Compute number of elements in previous parts, taking stride into account (same equation as for composite size) + * `n_pe = 1 + ( cumul_abs_size_[ part_idx_ - 1 ] - 1 - first_elem_ ) / stride` + * 2. Compute absolute index of next element from beginning of first_part_ + * `next_abs_idx = first_elem_ + n_pe * stride_` + * 3. Compute part-local index + * `next_loc_idx = next_abs_idx - cumul_abs_size_[ part_idx_ - 1 ]` + * 4. If `next_loc_idx_` is valid index, store as `first_in_part_[part_idx_]`, otherwise store `invalid_index` + * - `invalid_index` for all parts before `first_part_` and after `last_part_` + * + * + * ## Iteration over composite collections + * + * - All underlying parts are primitive node collections and thus have `stride == 1`. One cannot join node collections + * with different strides. + * - Thus, if a composite NC is sliced, the same `stride` applies to all parts; by definition, also the same `period` + * applies to all parts + * - Therefore, the `step = lcm(stride, period)` is the same throughout + * - When slicing a compositve node collection, we always retain the full set of parts and mark by `first_part_` and + * `last_part_` (inclusive) which parts are relevant for the sliced collection. + * + * We now need to distinguish between global and local iteration. + * + * ### Global iteration + * + * For global iteration, we can ignore phase relations. We need to take into account slicing and possible gaps between + * parts, as well as the possibility, for `stride > 1`, that parts contain no elements. Consider the following node + * collection with several parts; vertical bars indicate borders between parts ( to construct such a node collection, + * join collections with different neuron models). In the table, (PartIdx, ElemIdx) show the iterator values for + * iterators pointing to the corresponding element in the collection, while PythonIdx is the index that applies for + * slicing from the Python level. Different slices are shown in the final lines of the table, with asterisks marking the + * elements belonging to the sliced collection. Note that the second slice does not contain any elements from the first + * and third parts. + * + * GID 1 2 | 3 4 5 | 6 7 8 | 9 10 11 + * PartIdx 0 0 | 1 1 1 | 2 2 2 | 3 3 3 + * ElemIdx 0 1 | 0 1 2 | 0 1 2 | 0 1 2 + * ---------------------------------------- + * PythonIdx 0 1 | 2 3 4 | 5 6 7 | 8 9 10 + * ---------------------------------------- + * [::3] * | * | * | * + * [4::5] | * | | * + * [1:11:3] * | * | * | + * + * #### Iterator initialization + * + * The `begin()` iterator is given by + * - `part_idx_ = nc.first_part_, element_idx_ = nc.first_elem_` + * - `step_ = nc.stride_` + * - `kind_ = NCIterator::GLOBAL` + * + * #### Iterator stepping + * - Assume we want to move `n` elements forward. In the `[::3]` example above, starting with `begin()` and stepping by + * `n=2` elements forward would take us from the element with GID 1 to the element with GID 7. + * - Proceed like this + * 1. Compute candidate element index `new_idx = element_idx_ + n * step_` + * 2. If we have passed the end of the collection, we set `part_idx_, element_idx_` to the end-iterator values and + * return + * 3. Otherwise, if we are still in the current part, we set `element_idx_ = new_idx` and return + * 4. Otherwise, we need to look in the next part. + * 5. We first check if there is a next part, if not, we set to end() + * 6. Otherwise, we move through remaining parts and use `cumul_abs_size_` to check if we have reached a part + * containing `new_idx`. If so, we also need to check if we ended up in `last_part_` and if so, if before + * `last_elem_`. + * 7. If we have found a valid element in a new part, we set `part_idx_, element_idx_`, otherwise, we set them to the + * end() iterator. + * + * ### Local iteration + * + * Rank-local and thread-local iteration work in exactly the same way, just that the period and phase are in one case + * given by the ranks and in the other by the threads. Thus, we discuss the algorithms only once. + * + * - For `period > 1`, the relation of `phase` to position in a given part can differ from part to part. Consider the + * following node collection in a simulation with four threads (one MPI process). The table shows the GID of the neuron, + * its thread (phase), the part_idx and the element_idx of a iterator pointing to the element. Finally, elements that + * belong to thread 1 and 2 respectively, are marked with an asterisk (only 11 elements shown for brevity): + * + * GID 1 2 3 4 5 6 7 8 9 10 11 + * PartIdx 0 0 0 0 0 0 0 0 0 0 0 + * ElemIdx 0 1 2 3 4 5 6 7 8 9 10 + * Phase 1 2 3 0 1 2 3 0 1 2 3 + * ------------------------------------------- + * On thr 1 * * * + * On thr 2 * * * + * + * The phase relation here is `phase == element_idx % period + 1`, where `1` is the phase of the node with GID 1. + * + * Now consider a new node collection constructed by + * + * nc2 = nc[:5] + nc[7:] + * + * This yields a node collection with two parts, marked with the vertical line (note that nodes 6 and 7 are not + * included). We consider it once it its entirety and once sliced as `nc2[::3]`. The `1stInPt` line marks the elements + * indexed by the `first_in_part_` vector. + * + * GID 1 2 3 4 5 | 8 9 10 11 12 13 14 15 16 17 18 19 20 21 + * PartIdx 0 0 0 0 0 | 1 1 1 1 1 1 1 1 1 1 1 1 1 1 + * ElemIdx 0 1 2 3 4 | 0 1 2 3 4 5 6 7 8 9 10 11 12 13 + * Phase 1 2 3 0 1 | 0 1 2 3 0 1 2 3 0 1 2 3 0 1 + * ------------------------------------------------------------------------------------- + * All 1stInPt # | # + * All thr 0 * | * * * * + * All thr 1 * * | * * * * + * All thr 2 * | * * * + * All thr 3 * | * * * + * ------------------------------------------------------------------------------------- + * [::3] 1stInPt # | # + * [::3] thr 0 * | * + * [::3] thr 1 * | * * + * [::3] thr 2 | * + * [::3] thr 3 | * + * + * The phase relation is the same as above for the first part, but for the second part, the phase relation is + * `phase == element_idx % period + 0`, where `0` is the phase of the node with GID 8, the first node in the second + * part. + * + * + * #### Local iterator initialization for primitive collection + * + * 1. Obtain the `first_phase_`, i.e., the phase (rank/thread) of the `first_` element in the collection. + * 2. Let `proc_phase_` be the phase of the rank or thread constructing the iterator (i.e, the number of the rank or + * thread) + * 3. Then set + * - `part_idx_ = 0, element_idx_ = ( proc_phase_ - first_phase_ + period ) % period` + * - `step_ = period` + * - `kind_ = NCIterator::{RANK,THREAD}_LOCAL` + * + * #### Local iterator initialization for composite collection + * + * Here, we may need to move through several parts to find the first valid entry. We proceed as follows: + * + * 1. Set `part_idx = nc.first_part_, elem_idx = nc.first_elem_` + * 2. With `elem_idx` as starting point, find index of first element in part `part_idx` that belongs to the current + * rank/thread. This is done by `first_index()` provided by `libnestutil/numerics.h`, see documentation of algorithm + * there. This step is the "phase adjustment" mentioned above. + * 3. If Step 2 did not return a valid index, increase `part_idx` until we have found a part containing a valid entry, + * i.e., one with `nc.first_in_part_[part_idx] != invalid_index` or until we have exhausted the collection + * 4. If we have exhausted the collection, set iterator to `end()`, otherwise set `elem_idx = + * nc.first_in_part_[part_idx]` with Step 2. + * + * We further have + * - `step_ = lcm(nc.stride_, period)` + * - `kind_ = NCIterator::{RANK, THREAD}_LOCAL` + * + * #### Stepping a local iterator + * + * - Assume we want to move `n` elements forward. In the `[::3]` example with four threads above, + * starting with `thread_local_begin()` on thread 0 abd stepping `n=1` element forward + * would take us from the element with GID 4 to the element with GID 16. + * - Proceed like this + * 1. Compute candidate element index `new_idx = element_idx_ + n * step_` + * 2. If `new_idx` is in the current part, we set `element_idx_ = new_idx` and are done. + * 3. Otherwise, if there are no more parts, set to the end iterator + * 4. If more parts are available, we need to unroll the step by `n` into `n` steps of size 1, because otherwise + * we cannot handle the phase adjustment on transition into new parts correctly. + * 5. For each individual step we do the following + * a. Advance `part_idx_` until we have a valid `nc.first_in_part_[part_idx_]` or no more parts + * b. If there are no more parts, set to end iterator and return + * c. Otherwise, set `element_idx_ = nc.first_in_part_[part_idx_]` and then find the first element + * after that local the current thread/rank using the same algorithm as for the local iterator intialization + * 6. Set to the end() iterator if we did not find a valid solution. */ class nc_const_iterator { friend class NodeCollectionPrimitive; friend class NodeCollectionComposite; +public: + //! Markers for kind of iterator, required by `composite_update_indices_()`. + enum class NCIteratorKind + { + GLOBAL, //!< iterate over all elements of node collection + RANK_LOCAL, //!< iterate only over elements on owning rank + THREAD_LOCAL, //!< iterate only over elements on owning thread + END //!< end iterator, never increase + }; + private: - NodeCollectionPTR coll_ptr_; //!< holds pointer reference in safe iterators + NodeCollectionPTR coll_ptr_; //!< pointer to keep node collection alive, see note size_t element_idx_; //!< index into (current) primitive node collection size_t part_idx_; //!< index into parts vector of composite collection - size_t step_; //!< step for skipping due to e.g. slicing + size_t step_; //!< internal step also accounting for stepping over rank/thread + const NCIteratorKind kind_; //!< whether to iterate over all elements or rank/thread specific + const size_t rank_or_vp_; //!< rank or vp iterator is bound to - /** - * Pointer to primitive collection to iterate over. - * - * Zero if iterator is for composite collection. - */ + //! Pointer to primitive collection to iterate over. Zero if iterator is for composite collection. NodeCollectionPrimitive const* const primitive_collection_; - /** - * Pointer to composite collection to iterate over. - * - * Zero if iterator is for primitive collection. - */ + //! Pointer to composite collection to iterate over. Zero if iterator is for primitive collection. NodeCollectionComposite const* const composite_collection_; /** @@ -120,12 +447,13 @@ class nc_const_iterator * @param collection_ptr smart pointer to collection to keep collection alive * @param collection Collection to iterate over * @param offset Index of collection element iterator points to - * @param step Step for skipping due to e.g. slicing + * @param stride Step for skipping due to e.g. slicing; does NOT include stepping over rank/thread */ explicit nc_const_iterator( NodeCollectionPTR collection_ptr, const NodeCollectionPrimitive& collection, size_t offset, - size_t step = 1 ); + size_t stride, + NCIteratorKind kind = NCIteratorKind::GLOBAL ); /** * Create safe iterator for NodeCollectionComposite. @@ -134,18 +462,29 @@ class nc_const_iterator * @param collection Collection to iterate over * @param part Index of part of collection iterator points to * @param offset Index of element in NC part that iterator points to - * @param step Step for skipping due to e.g. slicing + * @param stride Step for skipping due to e.g. slicing; does NOT include stepping over rank/thread */ explicit nc_const_iterator( NodeCollectionPTR collection_ptr, const NodeCollectionComposite& collection, size_t part, size_t offset, - size_t step = 1 ); + size_t stride, + NCIteratorKind kind = NCIteratorKind::GLOBAL ); /** - * Conditionally update element_idx and part_idx for composite NodeCollections + * Return element_idx_ for next element if within part. Returns current element_idx_ otherwise. */ - void composite_update_indices_(); + size_t find_next_within_part_( size_t n ) const; + + /** + * Advance composite GLOBAL iterator by n elements, taking stride into account. + */ + void advance_global_iter_to_new_part_( size_t n ); + + /** + * Advance composite {THREAD,RANK}_LOCAL iterator by n elements, taking stride into account. + */ + void advance_local_iter_to_new_part_( size_t n ); public: using iterator_category = std::forward_iterator_tag; @@ -155,22 +494,35 @@ class nc_const_iterator using reference = NodeIDTriple&; nc_const_iterator( const nc_const_iterator& nci ) = default; - void get_current_part_offset( size_t&, size_t& ) const; + std::pair< size_t, size_t > get_part_offset() const; NodeIDTriple operator*() const; bool operator==( const nc_const_iterator& rhs ) const; bool operator!=( const nc_const_iterator& rhs ) const; bool operator<( const nc_const_iterator& rhs ) const; bool operator<=( const nc_const_iterator& rhs ) const; + bool operator>( const nc_const_iterator& rhs ) const; + bool operator>=( const nc_const_iterator& rhs ) const; nc_const_iterator& operator++(); nc_const_iterator operator++( int ); // postfix nc_const_iterator& operator+=( const size_t ); nc_const_iterator operator+( const size_t ) const; + /** + * Return step size of iterator. + * + * For thread- and rank-local iterators, this takes into account stepping over all VPs / ranks. + * For stepped node collections, this takes also stepping into account. Thus if we have a + * thread-local iterator in a simulation with 4 VPs and a node-collection step of 3, then the + * iterator's step is 12. + */ + size_t get_step_size() const; + void print_me( std::ostream& ) const; }; + /** * Superclass for NodeCollections. * @@ -181,6 +533,36 @@ class nc_const_iterator * The superclass also contains handling of the fingerprint, a unique identity * the NodeCollection gets from the kernel on creation, which ensures that the * NodeCollection is not used after the kernel is reset. + * + * There are two types of NodeCollections + * + * - **Primitive NCs** contain a contiguous range of GIDs of the same neuron model and always have stride 1. + * + * - Slicing a primitive node collection in the form ``nc[j:k]`` returns a new primitive node collection, + * *except* when ``nc`` has (spatial) metadata, in which case a composite node collection is returned. + * The reason for this is that we otherwise would have to create a copy of the position information. + * + * - **Composite NCs** can contain + * + * - A single primitive node collection with metadata created by ``nc[j:k]`` slicing; the composite NC + * then represents a view on the primitive node collection with window ``j:k``. + * - Any sequence of primitive node collections with the same or different neuron types; if the node collections + * contain metadata, all must contain the same metadata and all parts of the composite are separate views + * - A striding slice over a NC in the form ``nc[j:k:s]``, where ``j`` and ``k`` are optional. Here, + * ``nc`` must have stride 1. If ``nc`` has metadata, it must be a primitive node collection. + * + * For any node collection, three types of iterators can be obtained by different ``begin()`` methods: + * + * - ``begin()`` returns an iterator iterating over all elements of the node collection + * - ``rank_local_begin()`` returns an iterator iterating over the elements of the node collection which are local to + * the rank on which it is called + * - ``thread_local_begin()`` returns an iterator iterating over the elements of the node collection which are local + * to the thread (ie VP) on which it is called + * + * There is only a single type of end iterator returned by ``end()``. + * + * For more information on composite node collections, see the documentation for the derived class and + * `nc_const_iterator`. */ class NodeCollection { @@ -189,6 +571,7 @@ class NodeCollection public: using const_iterator = nc_const_iterator; + /** * Initializer gets current fingerprint from the kernel. */ @@ -299,12 +682,12 @@ class NodeCollection virtual const_iterator begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const = 0; /** - * Method to get an iterator representing the beginning of the NodeCollection. + * Return iterator stepping from first node on the thread it is called on over nodes on that thread. * * @return an iterator representing the beginning of the NodeCollection, in a * parallel context. */ - virtual const_iterator local_begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const = 0; + virtual const_iterator thread_local_begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const = 0; /** * Method to get an iterator representing the beginning of the NodeCollection. @@ -312,7 +695,7 @@ class NodeCollection * @return an iterator representing the beginning of the NodeCollection, in an * MPI-parallel context. */ - virtual const_iterator MPI_local_begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const = 0; + virtual const_iterator rank_local_begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const = 0; /** * Method to get an iterator representing the end of the NodeCollection. @@ -325,11 +708,13 @@ class NodeCollection virtual const_iterator end( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const = 0; /** - * Method that creates an ArrayDatum filled with node IDs from the NodeCollection. + * Method that creates an ArrayDatum filled with node IDs from the NodeCollection; for debugging * - * @return an ArrayDatum containing node IDs + * @param selection is "all", "rank" or "thread" + * + * @return an ArrayDatum containing node IDs ; if thread, separate thread sections by "0 thread# 0" */ - virtual ArrayDatum to_array() const = 0; + ArrayDatum to_array( const std::string& selection ) const; /** * Get the size of the NodeCollection. @@ -341,9 +726,9 @@ class NodeCollection /** * Get the step of the NodeCollection. * - * @return step between node IDs in the NodeCollection + * @return Stride between node IDs in the NodeCollection */ - virtual size_t step() const = 0; + virtual size_t stride() const = 0; /** * Check if the NodeCollection contains a specified node ID @@ -361,10 +746,10 @@ class NodeCollection * * @param start Index of the NodeCollection to start at * @param end One past the index of the NodeCollection to stop at - * @param step Number of places between node IDs to skip. Defaults to 1 + * @param stride Number of places between node IDs to skip. Defaults to 1 * @return a NodeCollection pointer to the new, sliced NodeCollection. */ - virtual NodeCollectionPTR slice( size_t start, size_t end, size_t step ) const = 0; + virtual NodeCollectionPTR slice( size_t start, size_t end, size_t stride ) const = 0; /** * Sets the metadata of the NodeCollection. @@ -392,9 +777,11 @@ class NodeCollection /** * Returns index of node with given node ID in NodeCollection. * + * Index here is into the sliced node collection, so that nc[ nc.get_nc_index( gid )].node_id == gid. + * * @return Index of node with given node ID; -1 if node not in NodeCollection. */ - virtual long get_lid( const size_t ) const = 0; + virtual long get_nc_index( const size_t ) const = 0; /** * Returns whether the NodeCollection contains any nodes with proxies or not. @@ -403,6 +790,11 @@ class NodeCollection */ virtual bool has_proxies() const = 0; + /** + * Collect metadata into dictionary. + */ + void get_metadata_status( DictionaryDatum& ) const; + /** * return the first stored ID (i.e, ID at index zero) inside the NodeCollection */ @@ -431,6 +823,8 @@ class NodeCollectionPrimitive : public NodeCollection friend class nc_const_iterator; private: + // Even though all members are logically const, we cannot declare them const because + // sorting or merging the parts_ array requires assignment. size_t first_; //!< The first node ID in the primitive size_t last_; //!< The last node ID in the primitive size_t model_id_; //!< Model ID of the node IDs @@ -447,8 +841,6 @@ class NodeCollectionPrimitive : public NodeCollection void assert_consistent_model_ids_( const size_t ) const; public: - using const_iterator = nc_const_iterator; - /** * Create a primitive from a range of node IDs, with provided model ID and * metadata pointer. @@ -508,21 +900,18 @@ class NodeCollectionPrimitive : public NodeCollection bool operator==( const NodeCollectionPrimitive& rhs ) const; const_iterator begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const override; - const_iterator local_begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const override; - const_iterator MPI_local_begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const override; + const_iterator thread_local_begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const override; + const_iterator rank_local_begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const override; const_iterator end( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const override; - //! Returns an ArrayDatum filled with node IDs from the primitive. - ArrayDatum to_array() const override; - //! Returns total number of node IDs in the primitive. size_t size() const override; - //! Returns the step between node IDs in the primitive. - size_t step() const override; + //! Returns the stride between node IDs in the primitive (always 1). + size_t stride() const override; bool contains( const size_t node_id ) const override; - NodeCollectionPTR slice( size_t start, size_t end, size_t step = 1 ) const override; + NodeCollectionPTR slice( size_t start, size_t end, size_t stride = 1 ) const override; void set_metadata( NodeCollectionMetadataPTR ) override; @@ -531,7 +920,7 @@ class NodeCollectionPrimitive : public NodeCollection bool is_range() const override; bool empty() const override; - long get_lid( const size_t ) const override; + long get_nc_index( const size_t ) const override; bool has_proxies() const override; @@ -564,20 +953,34 @@ NodeCollectionPTR operator+( NodeCollectionPTR lhs, NodeCollectionPTR rhs ); * contiguous and homogeneous with each other. If the composite is sliced, it * also holds information about what index to start at, one past the index to end at, and * the step. The endpoint is one past the last valid node. + * + * @note To avoid creating copies of Primitives (not sure that saves much), Composite keeps + * primitives as they are. These are called parts. It then sets markers + * + * - ``first_part_``, ``first_elem_`` to the first node belonging to the slice + * - ``last_part_``, ``last_elem_`` to the last node belongig to the slice + * + * @note + * - Any part after ``first_part_`` but before ``last_part_`` will always be in the NC in its entirety. + * - A composite node collection is never empty (in that case it would be replaced with a Primitive. Therefore, + * there is always at least one part with one element. */ class NodeCollectionComposite : public NodeCollection { friend class nc_const_iterator; private: - std::vector< NodeCollectionPrimitive > parts_; //!< Vector of primitives - size_t size_; //!< Total number of node IDs - size_t step_; //!< Step length, set when slicing. - size_t start_part_; //!< Primitive to start at, set when slicing - size_t start_offset_; //!< Element to start at, set when slicing - size_t end_part_; //!< Primitive or one past the primitive to end at, set when slicing - size_t end_offset_; //!< One past the element to end at, set when slicing + std::vector< NodeCollectionPrimitive > parts_; //!< Primitives forming composite + size_t size_; //!< Total number of node IDs, takes into account slicing + size_t stride_; //!< Step length, set when slicing. + size_t first_part_; //!< Primitive to start at, set when slicing + size_t first_elem_; //!< Element to start at, set when slicing + size_t last_part_; //!< Last entry of parts_ belonging to sliced NC + size_t last_elem_; //!< Last entry of parts_[last_part_] belonging to sliced NC bool is_sliced_; //!< Whether the NodeCollectionComposite is sliced + std::vector< size_t > cumul_abs_size_; //!< Cumulative size of parts + std::vector< size_t > + first_in_part_; //!< Local index to first element in each part when slicing is taken into account, or invalid_index /** * Goes through the vector of primitives, merging as much as possible. @@ -586,15 +989,59 @@ class NodeCollectionComposite : public NodeCollection */ void merge_parts_( std::vector< NodeCollectionPrimitive >& parts ) const; - const_iterator local_begin_( const NodeCollectionPTR cp, - const size_t num_vp_elements, - const size_t current_vp_element, - const size_t vp_element_first_node ) const; + //! Type for lambda-helper function used by {rank, thread, specific}_local_begin + typedef size_t ( *gid_to_phase_fcn_ )( size_t ); + + /** + * Abstraction of {rank, thread}_local_begin. + * + * @param period number of ranks or virtual processes + * @param phase calling rank or virtual process + * @param start_part begin seach in this part of the collection + * @param start_offset begin search from this offset in start_part + * @param period_first_node function converting gid to rank or thread + * @returns { part_index, part_offset } — values are `invalid_index` if no solution found + */ + std::pair< size_t, size_t > specific_local_begin_( size_t period, + size_t phase, + size_t start_part, + size_t start_offset, + gid_to_phase_fcn_ period_first_node ) const; + + /** + * Return true if part_idx/element_idx pair indicates element of collection + */ + bool valid_idx_( const size_t part_idx, const size_t element_idx ) const; + + /** + * Find next part and offset in it after moving beyond previous part, based on stride. + * + * @param part_idx Part for current iterator position + * @param element_idx Element for current iterator position + * @param n Number of node collection elements we advance by (ie argument that was passed to to `operator+(n)`) + * + * @return New part-offset tuple pointing into new part, or invalid_index tuple. + */ + std::pair< size_t, size_t > find_next_part_( size_t part_idx, size_t element_idx, size_t n = 1 ) const; + + //! helper for thread_local_begin/compsite_update_indices + static size_t gid_to_vp_( size_t gid ); + + //! helper for rank_local_begin/compsite_update_indices + static size_t gid_to_rank_( size_t gid ); + public: /** * Create a composite from a primitive, with boundaries and step length. * + * Let the slicing be given by b:e:s for brevity. Then the elements of the sliced composite will be given by + * + * b, b + s, ..., b + j s < e <=> b, b + s, ..., b + j s ≤ e - 1 <=> j ≤ floor( ( e - 1 - b ) / s ) + * + * Since j = 0 is included in the sequence above, the sliced node collection has 1 + floor( ( e - 1 - b ) / s ) + * elements. Flooring is implemented via integer division. + * * @param primitive Primitive to be converted * @param start Offset in the primitive to begin at. * @param end Offset in the primitive, one past the node to end at. @@ -602,17 +1049,14 @@ class NodeCollectionComposite : public NodeCollection */ NodeCollectionComposite( const NodeCollectionPrimitive&, size_t, size_t, size_t ); - /** - * Composite copy constructor. - * - * @param comp Composite to be copied. - */ - NodeCollectionComposite( const NodeCollectionComposite& ) = default; - /** * Creates a new composite from another, with boundaries and step length. * This constructor is used only when slicing. * + * Since we do not allow slicing of sliced node collections with step > 1, the underlying node collections all + * have step one and we can calculate the size of the sliced node collection as described in the constructor + * taking a NodeCollectionPrimitive as argument. + * * @param composite Composite to slice. * @param start Index in the composite to begin at. * @param end Index in the composite one past the node to end at. @@ -623,10 +1067,20 @@ class NodeCollectionComposite : public NodeCollection /** * Create a composite from a vector of primitives. * + * Since primitives by definition contain contiguous elements, the size of the composite collection is the + * sum of the size of its parts. + * * @param parts Vector of primitives. */ explicit NodeCollectionComposite( const std::vector< NodeCollectionPrimitive >& ); + /** + * Composite copy constructor. + * + * @param comp Composite to be copied. + */ + NodeCollectionComposite( const NodeCollectionComposite& ) = default; + void print_me( std::ostream& ) const override; size_t operator[]( const size_t ) const override; @@ -646,18 +1100,15 @@ class NodeCollectionComposite : public NodeCollection bool operator==( const NodeCollectionPTR rhs ) const override; const_iterator begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const override; - const_iterator local_begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const override; - const_iterator MPI_local_begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const override; + const_iterator thread_local_begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const override; + const_iterator rank_local_begin( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const override; const_iterator end( NodeCollectionPTR = NodeCollectionPTR( nullptr ) ) const override; - //! Returns an ArrayDatum filled with node IDs from the composite. - ArrayDatum to_array() const override; - //! Returns total number of node IDs in the composite. size_t size() const override; - //! Returns the step between node IDs in the composite. - size_t step() const override; + //! Returns the stride between node IDs in the composite. + size_t stride() const override; bool contains( const size_t node_id ) const override; NodeCollectionPTR slice( size_t start, size_t end, size_t step = 1 ) const override; @@ -669,7 +1120,7 @@ class NodeCollectionComposite : public NodeCollection bool is_range() const override; bool empty() const override; - long get_lid( const size_t ) const override; + long get_nc_index( const size_t ) const override; bool has_proxies() const override; }; @@ -695,19 +1146,42 @@ NodeCollection::get_first() const inline size_t NodeCollection::get_last() const { - size_t offset = size() - 1; - return ( *( begin() + offset ) ).node_id; + assert( size() > 0 ); + return ( *( begin() + ( size() - 1 ) ) ).node_id; } - inline nc_const_iterator& nc_const_iterator::operator+=( const size_t n ) { - element_idx_ += n * step_; - if ( composite_collection_ ) + assert( kind_ != NCIteratorKind::END ); + + if ( n == 0 ) + { + return *this; + } + + const auto new_element_idx = find_next_within_part_( n ); + + // For a primitive collection, we either have a new element or are at the end + // For a composite collection, we may need to search through further parts, + // which is signalled by new_element_idx == element_idx_ + if ( primitive_collection_ or new_element_idx != element_idx_ ) { - composite_update_indices_(); + element_idx_ = new_element_idx; } + else + { + // We did not find a new element in the current part and have not exhausted the collection + if ( kind_ == NCIteratorKind::GLOBAL ) + { + advance_global_iter_to_new_part_( n ); + } + else + { + advance_local_iter_to_new_part_( n ); + } + } + return *this; } @@ -718,6 +1192,21 @@ nc_const_iterator::operator+( const size_t n ) const return it += n; } +inline nc_const_iterator& +nc_const_iterator::operator++() +{ + ( *this ) += 1; + return *this; +} + +inline nc_const_iterator +nc_const_iterator::operator++( int ) +{ + nc_const_iterator tmp = *this; + ++( *this ); + return tmp; +} + inline bool nc_const_iterator::operator==( const nc_const_iterator& rhs ) const { @@ -727,7 +1216,7 @@ nc_const_iterator::operator==( const nc_const_iterator& rhs ) const inline bool nc_const_iterator::operator!=( const nc_const_iterator& rhs ) const { - return not( part_idx_ == rhs.part_idx_ and element_idx_ == rhs.element_idx_ ); + return not( *this == rhs ); } inline bool @@ -739,14 +1228,37 @@ nc_const_iterator::operator<( const nc_const_iterator& rhs ) const inline bool nc_const_iterator::operator<=( const nc_const_iterator& rhs ) const { - return ( part_idx_ < rhs.part_idx_ or ( part_idx_ == rhs.part_idx_ and element_idx_ <= rhs.element_idx_ ) ); + return ( *this < rhs or *this == rhs ); } -inline void -nc_const_iterator::get_current_part_offset( size_t& part, size_t& offset ) const +inline bool +nc_const_iterator::operator>( const nc_const_iterator& rhs ) const +{ + return not( *this <= rhs ); +} + +inline bool +nc_const_iterator::operator>=( const nc_const_iterator& rhs ) const +{ + return not( *this < rhs ); +} + +inline std::pair< size_t, size_t > +nc_const_iterator::get_part_offset() const { - part = part_idx_; - offset = element_idx_; + return { part_idx_, element_idx_ }; +} + +inline size_t +nc_const_iterator::get_step_size() const +{ + return step_; +} + +inline NodeCollectionPTR +operator+( NodeCollectionPTR lhs, NodeCollectionPTR rhs ) +{ + return lhs->operator+( rhs ); } inline size_t @@ -755,7 +1267,7 @@ NodeCollectionPrimitive::operator[]( const size_t idx ) const // throw exception if outside of NodeCollection if ( first_ + idx > last_ ) { - throw std::out_of_range( "pos points outside of the NodeCollection" ); + throw std::out_of_range( String::compose( "pos %1 points outside of the NodeCollection", idx ) ); } return first_ + idx; } @@ -771,12 +1283,8 @@ NodeCollectionPrimitive::operator==( NodeCollectionPTR rhs ) const return false; } - // Not dereferencing rhs_ptr->metadata_ in the equality comparison because we want to avoid overloading - // operator==() of *metadata_, and to let it handle typechecking. - const bool eq_metadata = ( not metadata_ and not rhs_ptr->metadata_ ) - or ( metadata_ and rhs_ptr->metadata_ and *metadata_ == rhs_ptr->metadata_ ); - - return first_ == rhs_ptr->first_ and last_ == rhs_ptr->last_ and model_id_ == rhs_ptr->model_id_ and eq_metadata; + // We know we have a primitive collection, so forward + return *this == *rhs_ptr; } inline bool @@ -790,16 +1298,17 @@ NodeCollectionPrimitive::operator==( const NodeCollectionPrimitive& rhs ) const return first_ == rhs.first_ and last_ == rhs.last_ and model_id_ == rhs.model_id_ and eq_metadata; } -inline NodeCollectionPrimitive::const_iterator +inline NodeCollection::const_iterator NodeCollectionPrimitive::begin( NodeCollectionPTR cp ) const { - return const_iterator( cp, *this, 0 ); + return nc_const_iterator( cp, *this, /* offset */ 0, /* stride */ 1 ); } -inline NodeCollectionPrimitive::const_iterator +inline NodeCollection::const_iterator NodeCollectionPrimitive::end( NodeCollectionPTR cp ) const { - return const_iterator( cp, *this, size() ); + // The unique end() element of a primitive NC is given by (part 0, element size()) ) + return nc_const_iterator( cp, *this, /* offset */ size(), /* stride */ 1, nc_const_iterator::NCIteratorKind::END ); } inline size_t @@ -810,7 +1319,7 @@ NodeCollectionPrimitive::size() const } inline size_t -NodeCollectionPrimitive::step() const +NodeCollectionPrimitive::stride() const { return 1; } @@ -846,9 +1355,9 @@ NodeCollectionPrimitive::empty() const } inline long -NodeCollectionPrimitive::get_lid( const size_t neuron_id ) const +NodeCollectionPrimitive::get_nc_index( const size_t neuron_id ) const { - if ( neuron_id > last_ ) + if ( neuron_id < first_ or last_ < neuron_id ) { return -1; } @@ -864,23 +1373,19 @@ NodeCollectionPrimitive::has_proxies() const return not nodes_have_no_proxies_; } -inline NodeCollectionComposite::const_iterator +inline NodeCollection::const_iterator NodeCollectionComposite::begin( NodeCollectionPTR cp ) const { - return const_iterator( cp, *this, start_part_, start_offset_, step_ ); + return nc_const_iterator( cp, *this, first_part_, first_elem_, stride_ ); } -inline NodeCollectionComposite::const_iterator +inline NodeCollection::const_iterator NodeCollectionComposite::end( NodeCollectionPTR cp ) const { - if ( is_sliced_ ) - { - return const_iterator( cp, *this, end_part_, end_offset_, step_ ); - } - else - { - return const_iterator( cp, *this, parts_.size(), 0 ); - } + // The unique end() element of a composite NC is given by one past the last element + // This is the (potentially non-existing) next element irrespective of stride and step + return nc_const_iterator( + cp, *this, last_part_, last_elem_ + 1, /* stride */ 1, nc_const_iterator::NCIteratorKind::END ); } inline size_t @@ -890,9 +1395,9 @@ NodeCollectionComposite::size() const } inline size_t -NodeCollectionComposite::step() const +NodeCollectionComposite::stride() const { - return step_; + return stride_; } inline void @@ -922,6 +1427,19 @@ NodeCollectionComposite::empty() const // Composite NodeCollections can never be empty. return false; } + +inline bool +NodeCollectionComposite::contains( const size_t node_id ) const +{ + return get_nc_index( node_id ) != -1; +} + +inline bool +NodeCollectionComposite::valid_idx_( const size_t part_idx, const size_t element_idx ) const +{ + return part_idx < last_part_ or ( part_idx == last_part_ and element_idx <= last_elem_ ); +} + } // namespace nest #endif /* #ifndef NODE_COLLECTION_H */ diff --git a/nestkernel/spatial.h b/nestkernel/spatial.h index bd95528e9b..27534cb1b5 100644 --- a/nestkernel/spatial.h +++ b/nestkernel/spatial.h @@ -60,9 +60,9 @@ class LayerMetadata : public NodeCollectionMetadata void set_status( const DictionaryDatum&, bool ) override {}; void - get_status( DictionaryDatum& d ) const override + get_status( DictionaryDatum& d, NodeCollection const* nc ) const override { - layer_->get_status( d ); + layer_->get_status( d, nc ); } //! Returns pointer to object with layer representation @@ -102,8 +102,11 @@ class LayerMetadata : public NodeCollectionMetadata // Compare status dictionaries of this layer and rhs layer DictionaryDatum dict( new Dictionary() ); DictionaryDatum rhs_dict( new Dictionary() ); - get_status( dict ); - rhs_layer_metadata->get_status( rhs_dict ); + + // Since we do not have access to the node collection here, we + // compare based on all metadata, irrespective of any slicing + get_status( dict, /* nc */ nullptr ); + rhs_layer_metadata->get_status( rhs_dict, /* nc */ nullptr ); return *dict == *rhs_dict; } diff --git a/pynest/examples/brunel-py-ex-12502-0.gdf b/pynest/examples/brunel-py-ex-12502-0.gdf deleted file mode 100644 index 1d1e4b6469..0000000000 --- a/pynest/examples/brunel-py-ex-12502-0.gdf +++ /dev/null @@ -1,1441 +0,0 @@ -11 14.500 -18 14.500 -41 14.600 -44 14.900 -7 16.300 -8 15.300 -14 15.700 -15 15.100 -23 15.400 -26 15.200 -28 15.900 -36 15.100 -37 16.300 -39 16.500 -50 16.200 -1 17.900 -9 17.100 -40 18.000 -32 23.600 -12 26.600 -19 26.100 -25 26.200 -29 25.900 -34 26.000 -38 26.800 -16 28.100 -27 27.200 -35 28.100 -45 27.900 -48 27.900 -5 29.100 -43 29.800 -4 30.900 -33 30.100 -49 30.100 -17 32.000 -21 34.400 -24 33.100 -31 34.400 -47 33.200 -3 35.000 -6 34.700 -30 35.200 -22 36.900 -23 36.600 -39 38.900 -44 37.600 -15 40.200 -46 41.900 -20 42.400 -50 43.200 -11 44.000 -2 45.200 -13 45.400 -1 52.500 -10 51.200 -26 52.300 -5 54.100 -40 55.100 -41 54.200 -37 55.800 -7 58.400 -16 57.400 -19 58.100 -27 58.300 -9 59.800 -14 58.900 -22 58.900 -45 59.200 -21 60.500 -34 61.500 -36 60.900 -43 60.200 -12 64.200 -18 63.100 -24 64.000 -38 64.400 -49 65.900 -17 67.500 -25 68.800 -33 68.500 -35 67.900 -11 70.500 -23 70.300 -28 69.400 -42 69.500 -3 71.000 -8 70.900 -31 71.000 -32 71.700 -39 70.800 -44 71.100 -47 70.600 -15 72.700 -20 72.300 -50 73.500 -4 74.000 -29 76.200 -48 76.000 -1 77.300 -30 77.800 -13 79.500 -10 80.100 -46 80.700 -6 81.900 -26 86.800 -2 87.400 -37 89.900 -5 90.300 -14 90.700 -40 91.200 -24 92.000 -27 92.600 -30 91.600 -33 92.500 -39 92.700 -49 91.600 -3 93.300 -23 93.500 -36 94.000 -7 96.500 -11 97.000 -16 96.900 -18 97.100 -21 97.300 -19 97.600 -38 98.200 -41 98.200 -35 100.100 -47 99.500 -4 100.600 -22 100.700 -25 100.700 -43 101.300 -8 102.200 -45 103.300 -12 104.600 -28 104.600 -31 103.700 -44 104.500 -9 106.300 -10 106.900 -32 106.600 -46 107.600 -13 108.900 -34 108.800 -50 108.500 -2 113.200 -1 114.900 -15 115.500 -20 114.300 -29 114.600 -42 115.100 -48 116.900 -17 118.000 -6 119.100 -21 118.700 -41 119.200 -5 122.500 -22 122.800 -3 123.800 -14 123.600 -23 124.500 -33 124.500 -40 123.300 -30 125.900 -36 124.900 -37 125.200 -11 126.700 -26 126.100 -35 126.500 -39 126.300 -47 126.600 -24 127.900 -38 128.800 -4 129.100 -19 130.700 -43 130.800 -7 132.300 -18 134.200 -25 134.900 -31 135.600 -45 135.300 -49 135.400 -8 136.600 -16 137.000 -29 137.200 -27 139.300 -10 139.900 -28 139.800 -32 142.200 -46 142.200 -2 142.800 -12 142.700 -13 143.600 -15 143.500 -20 142.900 -34 142.700 -1 146.100 -9 145.900 -44 147.400 -42 150.800 -21 152.000 -22 152.000 -36 153.300 -50 153.600 -3 155.300 -5 155.300 -6 156.900 -14 157.200 -27 157.500 -25 158.100 -37 157.700 -11 159.400 -30 159.500 -43 159.500 -48 159.200 -7 160.600 -17 160.600 -23 163.400 -24 163.400 -34 162.900 -38 162.300 -47 162.200 -35 163.700 -39 163.900 -8 165.900 -16 165.600 -33 166.300 -26 168.400 -40 170.500 -18 171.300 -29 171.800 -2 173.100 -4 172.900 -10 172.800 -19 173.300 -45 172.600 -12 174.100 -42 174.100 -32 176.300 -49 176.000 -11 177.600 -9 179.900 -28 179.300 -44 179.500 -6 180.600 -13 180.400 -50 181.300 -1 183.000 -14 182.700 -21 182.500 -31 182.500 -46 183.400 -3 187.000 -5 186.300 -20 186.700 -22 186.800 -24 186.600 -36 186.700 -41 186.300 -15 187.900 -17 188.300 -25 187.700 -23 189.800 -40 189.600 -47 189.900 -33 193.000 -37 192.100 -38 192.200 -48 193.900 -8 195.200 -30 195.800 -27 197.300 -4 199.400 -16 199.100 -18 199.500 -39 199.500 -19 200.900 -43 199.900 -26 202.200 -34 202.100 -42 202.300 -10 203.300 -35 202.600 -41 202.900 -2 204.100 -49 204.500 -7 206.800 -12 207.300 -6 209.200 -45 210.000 -31 210.500 -11 212.900 -17 212.300 -25 212.900 -50 212.100 -13 213.700 -9 214.900 -24 215.900 -15 216.400 -21 216.100 -32 217.000 -44 216.900 -5 218.800 -22 218.800 -1 219.800 -20 219.900 -40 219.100 -46 219.200 -29 222.000 -14 224.200 -48 225.000 -23 226.200 -36 226.200 -47 225.800 -37 226.600 -38 227.500 -3 229.000 -4 229.400 -27 229.500 -16 230.800 -8 231.200 -17 232.300 -18 231.600 -28 231.600 -34 231.400 -45 231.500 -26 234.800 -43 234.200 -30 235.700 -35 236.000 -7 238.000 -41 237.200 -31 239.100 -42 238.900 -10 241.200 -12 241.500 -33 240.300 -39 240.800 -19 244.300 -11 245.900 -2 246.700 -6 246.200 -1 248.700 -9 250.400 -24 249.300 -25 249.100 -38 251.700 -15 252.600 -21 253.100 -22 253.100 -49 253.000 -5 253.600 -14 254.600 -44 254.900 -17 255.300 -20 255.200 -29 255.100 -40 255.800 -46 257.000 -13 258.600 -27 258.200 -28 259.000 -32 258.400 -36 258.100 -4 261.000 -45 259.800 -19 261.900 -3 263.200 -34 262.600 -42 262.700 -16 265.500 -18 264.400 -37 265.100 -50 264.300 -8 266.400 -41 266.400 -47 266.800 -48 266.300 -35 267.100 -43 267.600 -11 272.200 -31 272.700 -26 273.400 -33 274.600 -12 277.000 -30 277.400 -24 280.200 -10 280.700 -15 281.000 -2 282.700 -6 283.200 -9 282.800 -23 283.800 -27 284.400 -1 285.800 -17 286.200 -20 285.500 -28 285.200 -40 286.400 -44 285.100 -49 286.200 -5 286.600 -14 287.900 -39 288.000 -22 289.500 -32 288.700 -21 290.700 -38 290.200 -42 290.500 -29 292.100 -36 291.300 -13 292.800 -46 292.900 -4 294.400 -7 295.200 -8 294.400 -16 294.600 -34 294.200 -37 294.300 -18 295.700 -19 296.200 -45 295.700 -50 296.100 -25 299.500 -31 304.100 -35 304.200 -43 304.100 -44 307.200 -3 308.600 -26 308.000 -41 308.300 -47 307.700 -22 309.500 -33 310.000 -48 309.200 -11 312.400 -1 314.300 -2 314.600 -15 314.700 -6 315.700 -24 315.800 -5 316.800 -9 317.700 -10 316.600 -27 316.600 -28 317.000 -12 318.700 -23 319.500 -30 319.300 -49 319.500 -17 320.600 -37 319.800 -20 321.900 -39 322.200 -34 323.400 -40 323.300 -29 325.100 -36 324.900 -21 325.900 -38 325.900 -14 328.400 -16 327.200 -41 327.800 -4 328.700 -7 329.900 -8 329.300 -13 330.000 -19 329.800 -25 329.100 -45 330.400 -50 332.500 -46 334.100 -18 334.800 -35 337.000 -42 338.400 -5 340.200 -43 340.400 -11 341.100 -33 341.900 -44 341.500 -31 342.900 -32 343.500 -1 344.300 -28 344.300 -47 344.600 -2 346.100 -24 345.200 -37 345.500 -48 347.400 -29 348.800 -45 348.400 -6 349.800 -9 350.100 -15 351.000 -17 351.000 -12 352.500 -39 352.200 -41 352.000 -49 352.400 -27 353.500 -3 355.700 -30 356.000 -20 357.800 -26 358.400 -34 357.400 -22 359.500 -38 359.100 -46 358.800 -25 360.300 -14 362.400 -4 364.300 -16 364.100 -36 363.600 -10 365.500 -13 364.700 -40 364.900 -42 365.200 -19 366.600 -50 366.100 -7 368.900 -21 368.700 -8 369.900 -44 369.600 -1 371.600 -18 371.400 -23 370.700 -31 371.600 -2 373.500 -11 372.200 -28 374.000 -33 374.600 -43 373.800 -24 375.800 -48 375.600 -35 378.000 -39 377.800 -5 379.400 -32 380.400 -45 381.000 -29 382.300 -37 383.400 -41 383.800 -26 386.200 -27 386.300 -4 388.200 -9 387.300 -15 387.700 -20 387.200 -49 387.700 -16 389.000 -3 390.300 -14 391.000 -30 391.000 -34 390.200 -12 392.400 -17 392.800 -6 393.300 -42 394.200 -36 395.500 -46 394.600 -10 398.500 -23 397.600 -25 398.000 -28 398.800 -22 400.300 -38 399.800 -1 401.100 -40 401.900 -50 402.500 -2 404.600 -8 404.600 -13 406.200 -18 405.800 -20 406.200 -47 405.200 -48 405.200 -21 407.600 -29 408.000 -11 408.300 -19 408.500 -31 408.900 -45 408.100 -7 410.600 -24 410.800 -41 411.000 -43 410.800 -44 410.400 -5 411.800 -15 414.700 -26 418.600 -49 418.700 -4 420.700 -33 423.000 -6 423.600 -9 423.700 -27 423.800 -30 423.900 -39 423.900 -14 425.100 -16 424.900 -32 425.700 -37 425.000 -3 426.400 -12 426.400 -35 426.300 -42 426.200 -23 428.900 -17 430.500 -40 429.600 -10 430.600 -25 431.000 -22 432.800 -46 432.900 -36 433.800 -47 434.600 -7 436.400 -18 436.000 -38 436.500 -34 437.900 -50 437.200 -1 438.800 -8 438.900 -28 438.400 -39 439.000 -13 440.200 -45 440.500 -48 444.000 -21 444.600 -11 446.900 -26 446.800 -44 446.000 -5 447.300 -20 447.200 -29 448.000 -31 447.700 -41 447.500 -43 447.600 -2 449.200 -19 448.900 -27 448.900 -42 451.500 -15 452.900 -49 451.800 -24 453.800 -6 454.700 -4 456.400 -16 457.400 -9 458.700 -35 458.600 -37 458.500 -25 460.500 -3 461.800 -12 461.600 -23 461.300 -32 460.900 -7 462.800 -14 462.700 -17 462.200 -33 462.500 -34 462.600 -10 464.300 -22 465.100 -30 465.200 -50 465.700 -38 467.700 -45 467.500 -47 468.000 -40 468.300 -20 475.500 -39 475.300 -44 474.900 -36 475.800 -48 476.400 -13 478.400 -46 477.200 -21 479.200 -26 479.800 -28 479.000 -1 481.000 -8 481.400 -18 480.800 -31 480.600 -41 482.400 -5 483.500 -6 486.400 -11 486.100 -15 487.500 -19 486.500 -42 486.200 -37 487.800 -29 490.800 -7 492.700 -27 493.000 -43 493.500 -4 494.800 -12 494.600 -17 494.700 -22 494.400 -30 493.900 -23 495.100 -2 496.600 -3 497.600 -33 497.000 -9 498.900 -16 498.500 -24 498.800 -49 498.200 -35 500.300 -50 499.900 -14 501.600 -38 501.200 -25 503.500 -32 502.600 -39 503.000 -5 504.400 -10 505.400 -40 505.100 -45 504.900 -47 505.300 -8 506.400 -34 506.200 -44 505.800 -46 506.200 -36 508.000 -13 508.800 -21 510.400 -28 514.500 -37 514.400 -41 513.300 -18 515.300 -48 515.100 -31 516.600 -6 518.000 -11 519.300 -19 520.200 -20 519.800 -3 525.000 -7 525.000 -22 524.100 -26 524.400 -30 523.700 -42 524.800 -43 526.300 -1 527.300 -12 527.500 -17 527.800 -27 527.700 -36 527.000 -15 528.300 -2 529.900 -38 530.800 -49 530.200 -4 531.900 -9 531.300 -14 531.600 -29 532.200 -33 532.500 -24 532.800 -50 533.400 -5 535.500 -23 534.400 -32 534.100 -10 535.800 -35 535.600 -39 536.800 -8 537.600 -13 538.300 -16 537.400 -18 538.400 -25 538.500 -46 537.700 -45 540.800 -31 543.900 -48 543.400 -34 546.000 -47 544.700 -44 547.300 -21 548.000 -40 548.700 -41 547.700 -28 550.100 -6 554.100 -19 554.500 -37 553.700 -22 556.100 -30 556.700 -3 560.300 -11 560.400 -20 559.600 -27 560.700 -49 561.000 -12 561.200 -14 561.300 -23 562.400 -24 562.200 -35 561.800 -36 561.900 -39 561.900 -42 561.200 -2 563.400 -7 563.000 -8 562.900 -18 563.100 -1 565.300 -4 564.900 -50 564.500 -15 566.600 -43 568.000 -10 569.800 -47 569.100 -5 570.300 -9 570.400 -17 570.500 -32 570.300 -29 571.800 -13 574.200 -48 573.200 -33 576.000 -16 577.200 -26 576.500 -46 576.500 -28 578.600 -25 579.900 -34 580.400 -44 579.300 -40 580.600 -41 581.400 -21 582.400 -24 582.100 -31 583.100 -45 582.500 -38 585.100 -2 587.600 -19 586.800 -30 587.300 -37 589.100 -14 589.900 -20 590.900 -12 591.300 -22 591.100 -49 591.100 -35 593.400 -39 593.200 -50 593.600 -47 595.000 -23 597.000 -32 596.300 -36 596.900 -1 597.100 -3 598.000 -8 598.300 -11 597.400 -15 597.500 -27 598.300 -43 598.000 -7 599.800 -9 600.500 -17 600.900 -48 600.800 -18 603.800 -29 604.000 -33 603.500 -46 603.900 -4 605.100 -10 608.900 -21 608.400 -6 609.200 -26 609.600 -28 610.000 -13 611.000 -16 611.600 -24 611.800 -31 611.100 -42 611.900 -44 612.000 -34 613.100 -40 613.100 -41 613.500 -5 614.200 -45 615.700 -20 617.200 -38 617.100 -2 619.400 -25 618.100 -50 619.600 -12 621.900 -47 622.000 -19 623.800 -49 623.200 -14 624.400 -32 624.300 -15 625.600 -22 627.000 -37 626.300 -48 626.600 -23 627.200 -27 628.200 -18 629.200 -35 628.900 -39 631.100 -1 632.900 -36 631.900 -7 635.500 -26 634.700 -30 635.200 -43 634.600 -3 636.200 -29 636.500 -24 638.200 -46 638.900 -17 639.200 -21 640.000 -33 640.400 -42 639.500 -4 641.300 -28 642.000 -44 641.100 -8 643.100 -16 643.300 -45 643.400 -6 644.900 -10 644.600 -11 644.100 -34 644.400 -47 643.700 -50 646.300 -20 647.300 -5 649.500 -13 649.100 -22 648.700 -40 650.600 -2 652.200 -25 652.300 -31 652.500 -32 652.200 -41 652.400 -38 652.600 -9 656.000 -35 657.000 -48 656.300 -12 657.100 -15 659.500 -27 659.900 -30 659.400 -49 659.100 -18 660.800 -19 660.300 -37 660.100 -39 661.000 -14 661.800 -23 661.700 -43 662.200 -11 664.300 -29 664.400 -7 665.100 -4 670.100 -36 671.300 -45 671.100 -1 672.300 -26 672.900 -46 672.700 -17 674.000 -21 674.500 -42 675.000 -44 674.900 -3 675.200 -8 675.200 -24 675.100 -34 675.600 -16 677.700 -31 676.900 -33 677.700 -13 679.000 -22 678.200 -28 679.500 -47 679.100 -5 679.800 -6 679.900 -38 680.700 -10 681.500 -25 683.500 -50 683.000 -20 685.200 -32 686.800 -2 687.300 -40 687.900 -23 688.700 -41 689.200 -48 689.200 -35 691.100 -12 692.900 -14 692.700 -30 692.700 -15 693.500 -37 694.900 -43 695.800 -4 696.900 -39 697.200 -11 698.400 -18 697.700 -19 697.700 -49 697.800 -29 699.400 -9 701.700 -45 702.000 -1 704.600 -10 705.900 -46 706.500 -28 706.600 -36 707.600 -13 708.800 -6 710.100 -17 710.000 -21 710.700 -42 710.200 -44 710.900 -8 712.500 -24 712.300 -3 713.200 -7 713.400 -14 713.100 -16 713.400 -32 712.600 -34 712.700 -37 712.900 -27 715.200 -33 715.900 -20 717.300 -26 717.200 -40 718.000 -47 718.100 -12 719.800 -15 719.600 -25 719.400 -31 719.900 -50 719.800 -23 721.400 -30 721.300 -5 722.000 -48 722.500 -19 724.400 -22 723.300 -39 725.200 -4 726.300 -38 727.000 -35 728.200 -41 727.600 -49 729.800 -2 731.300 -43 732.500 -18 733.600 -16 736.000 -29 735.500 -9 737.900 -10 737.700 -28 738.900 -37 739.300 -1 740.700 -17 741.500 -24 742.400 -6 743.100 -31 743.700 -36 743.300 -42 742.800 -46 745.400 -3 746.500 -34 746.600 -13 747.600 -21 747.400 -7 748.700 -27 749.400 -38 748.600 -32 750.300 -5 753.000 -8 752.700 -15 752.100 -26 752.000 -44 752.300 -45 752.600 -11 753.200 -25 754.300 -50 753.100 -30 755.200 -4 757.400 -12 757.200 -47 757.100 -14 758.700 -19 758.700 -22 758.700 -23 759.000 -39 757.800 -41 758.800 -33 759.400 -40 760.400 -48 761.900 -49 761.300 -18 763.500 -35 762.300 -2 764.000 -42 765.900 -16 767.100 -1 769.200 -36 770.100 -43 769.800 -10 771.500 -11 772.500 -21 772.300 -31 772.400 -6 772.900 -9 772.600 -20 774.600 -24 775.000 -37 774.400 -17 776.400 -7 781.300 -13 781.200 -27 780.900 -29 781.000 -38 780.500 -41 780.900 -3 781.900 -15 782.900 -32 781.700 -46 781.600 -50 783.200 -23 787.200 -30 786.100 -49 788.100 -44 789.700 -45 790.200 -47 789.400 -4 790.800 -25 790.700 -33 792.000 -34 791.800 -40 791.300 -39 793.700 -8 796.400 -28 796.200 -6 797.600 -12 796.800 -19 797.000 -26 797.900 -35 796.600 -1 798.100 -22 799.400 -36 799.400 -5 800.100 -10 801.200 -14 801.300 -42 802.400 -48 802.000 -43 803.700 -3 806.700 -15 806.200 -9 807.400 -16 808.100 -24 808.200 -50 808.500 -2 808.600 -11 808.600 -31 809.500 -37 811.000 -38 811.000 -47 810.300 -18 812.200 -21 812.800 -27 811.800 -32 813.700 -41 814.400 -17 814.700 -29 815.500 -46 815.400 -13 816.200 -22 817.100 -4 817.700 -7 819.500 -20 819.600 -33 823.000 -30 824.900 -40 825.000 -49 825.600 -19 827.500 -25 827.300 -39 827.600 -45 827.100 -1 828.400 -23 828.700 -2 830.600 -16 830.100 -34 830.400 -36 830.600 -44 830.100 -26 832.000 -10 833.400 -8 834.200 -14 834.500 -28 834.800 -5 836.900 -6 835.600 -15 835.700 -18 835.600 -35 836.800 -42 837.000 -43 836.700 -47 838.300 -24 840.800 -31 841.500 -38 840.600 -12 841.700 -27 842.900 -3 843.600 -4 844.300 -9 844.000 -11 843.100 -37 843.100 -48 843.900 -21 847.100 -32 846.900 -41 846.100 -17 848.800 -20 848.700 -13 850.400 -29 849.500 -46 849.600 -50 849.600 -7 850.800 -33 852.700 -34 854.600 -2 856.000 -25 856.100 -30 856.900 -44 857.800 -39 859.000 -40 859.000 -49 859.400 -22 859.800 -23 860.100 -36 860.700 -8 861.800 -15 862.000 -1 864.000 -18 863.300 -19 863.500 -26 863.600 -42 864.000 -45 862.600 -16 864.800 -47 866.600 -6 867.700 -35 868.300 -10 868.800 -31 872.800 -38 872.300 -43 873.000 -14 873.800 -28 873.300 -9 874.900 -17 877.000 -5 878.300 -12 878.600 -37 878.500 -4 879.100 -11 880.300 -20 879.700 -3 881.800 -24 881.300 -29 880.800 -7 882.600 -45 882.900 -27 887.400 -32 887.000 -33 887.000 -48 887.400 -50 888.100 -13 890.100 -23 890.200 -25 889.800 -21 891.300 -30 892.100 -36 892.200 -39 892.200 -46 891.600 -2 893.400 -47 893.900 -49 893.400 -15 894.900 -17 896.900 -22 897.000 -34 896.600 -18 898.000 -19 898.100 -26 898.400 -41 898.000 -42 897.500 -44 897.100 -35 900.000 -6 901.100 -1 902.000 -38 902.200 -40 901.900 -16 903.500 -24 904.200 -31 905.700 -43 904.900 -8 908.900 -4 909.800 -12 909.100 -37 909.500 -11 911.100 -14 910.700 -28 911.400 -5 912.200 -9 912.700 -10 914.900 -45 915.100 -47 916.100 -3 919.300 -7 918.400 -48 919.400 -50 918.400 -22 920.600 -33 920.600 -41 919.900 -25 922.200 -36 922.000 -46 921.200 -30 923.200 -32 924.300 -39 924.300 -23 925.900 -29 927.000 -34 925.900 -49 926.100 -15 928.300 -1 930.000 -13 929.100 -18 930.000 -24 930.000 -26 929.200 -2 931.200 -19 931.400 -21 930.900 -35 930.500 -42 931.000 -27 932.800 -44 932.300 -40 933.900 -8 935.200 -6 937.300 -31 937.000 -37 937.000 -47 936.100 -38 941.300 -14 943.500 -20 943.400 -28 942.900 -11 944.500 -22 945.900 -43 945.600 -46 945.200 -4 947.000 -16 947.700 -33 947.900 -45 947.700 -12 948.200 -50 948.800 -29 951.700 -5 953.000 -25 953.700 -48 954.500 -13 956.500 -1 957.500 -10 958.300 -15 958.300 -35 957.200 -41 957.600 -7 959.800 -17 959.300 -49 959.300 -23 960.400 -30 960.500 -36 960.100 -40 961.300 -9 961.600 -18 963.400 -42 963.200 -44 964.200 -47 964.300 -6 965.600 -8 964.600 -39 965.800 -2 967.500 -19 966.600 -24 966.300 -20 968.500 -21 968.300 -34 968.800 -31 969.700 -37 969.400 -3 971.600 -27 973.500 -28 973.600 -4 976.200 -16 978.900 -22 978.800 -32 978.600 -46 979.300 -48 979.500 -12 981.800 -26 981.300 -33 982.200 -5 983.200 -13 982.800 -14 982.600 -38 983.800 -43 983.500 -45 983.400 -50 983.500 -1 984.100 -23 986.400 -29 986.800 -9 988.200 -41 988.000 -21 990.400 -8 992.500 -11 992.800 -25 992.800 -37 991.800 -6 993.700 -7 993.300 -17 994.400 -49 993.100 -10 995.600 -15 994.700 -34 996.800 -2 998.300 -18 997.600 -47 998.600 diff --git a/pynest/examples/brunel-py-in-12503-0.gdf b/pynest/examples/brunel-py-in-12503-0.gdf deleted file mode 100644 index 3c47cc21e5..0000000000 --- a/pynest/examples/brunel-py-in-12503-0.gdf +++ /dev/null @@ -1,1422 +0,0 @@ -10005 14.600 -10010 14.300 -10030 14.900 -10033 14.500 -10037 14.100 -10038 14.500 -10041 14.400 -10046 14.700 -10001 15.800 -10007 15.100 -10011 15.800 -10014 15.600 -10016 15.100 -10019 15.400 -10021 16.500 -10027 15.600 -10029 15.100 -10039 16.300 -10043 15.300 -10044 16.200 -10012 16.800 -10031 22.100 -10024 23.500 -10032 25.100 -10004 26.700 -10020 27.000 -10035 27.000 -10040 25.700 -10003 27.900 -10006 27.900 -10013 27.100 -10036 27.900 -10049 27.500 -10017 28.700 -10022 28.600 -10023 29.500 -10002 32.800 -10008 32.400 -10039 31.700 -10018 33.600 -10025 33.200 -10042 34.500 -10034 34.600 -10009 37.400 -10015 36.200 -10048 37.400 -10026 37.600 -10021 39.700 -10050 40.000 -10028 40.800 -10033 40.700 -10044 41.200 -10012 43.500 -10027 42.100 -10007 45.000 -10047 43.900 -10005 45.900 -10043 46.000 -10016 46.600 -10038 47.500 -10037 48.500 -10001 51.500 -10029 51.700 -10045 53.200 -10004 55.400 -10031 54.400 -10046 55.700 -10003 57.200 -10014 57.500 -10017 57.300 -10041 58.200 -10006 59.500 -10020 59.400 -10023 58.600 -10049 60.000 -10002 61.300 -10018 60.100 -10040 60.200 -10032 62.200 -10042 63.000 -10008 64.400 -10034 63.100 -10011 65.500 -10013 64.600 -10025 66.000 -10035 66.900 -10015 68.800 -10019 70.200 -10022 69.900 -10045 69.100 -10005 71.800 -10009 72.000 -10024 70.600 -10039 70.900 -10010 72.800 -10021 73.300 -10030 72.900 -10033 73.400 -10048 72.300 -10050 73.100 -10027 74.800 -10044 76.500 -10028 78.800 -10038 79.600 -10047 81.000 -10026 81.700 -10036 81.100 -10007 82.900 -10001 86.100 -10004 86.000 -10016 88.500 -10017 88.500 -10029 87.400 -10022 89.700 -10037 89.600 -10002 90.700 -10014 90.100 -10032 90.900 -10040 91.200 -10049 90.300 -10003 92.900 -10006 92.300 -10012 92.800 -10023 92.800 -10025 93.000 -10031 92.300 -10043 92.800 -10013 94.300 -10034 93.300 -10042 93.600 -10035 96.100 -10020 98.800 -10039 100.500 -10018 101.600 -10050 101.900 -10011 103.100 -10015 103.500 -10027 102.200 -10008 103.900 -10010 104.700 -10041 104.300 -10005 107.500 -10019 106.900 -10030 106.600 -10033 107.900 -10046 107.900 -10003 108.400 -10038 108.500 -10045 108.700 -10024 109.600 -10023 113.000 -10028 113.000 -10044 113.700 -10021 114.700 -10026 114.700 -10036 114.500 -10037 116.000 -10047 115.600 -10048 118.000 -10001 118.700 -10002 120.000 -10004 119.600 -10014 121.400 -10035 120.600 -10017 124.200 -10029 123.600 -10007 125.400 -10009 125.800 -10010 124.700 -10022 124.800 -10050 125.700 -10006 127.000 -10025 127.100 -10040 126.400 -10043 127.000 -10049 126.500 -10046 127.900 -10039 130.200 -10031 130.800 -10011 132.300 -10016 132.500 -10034 134.900 -10012 135.900 -10013 135.100 -10032 136.300 -10041 135.800 -10042 136.500 -10044 136.000 -10027 138.000 -10003 138.100 -10005 138.900 -10020 138.200 -10033 140.300 -10008 141.400 -10030 141.800 -10015 143.200 -10018 143.200 -10023 143.400 -10038 142.700 -10019 144.400 -10024 144.500 -10028 146.500 -10009 147.200 -10037 148.500 -10021 148.600 -10026 148.600 -10036 148.900 -10047 149.300 -10029 150.900 -10002 152.400 -10035 153.500 -10048 153.700 -10045 155.300 -10004 158.100 -10014 157.600 -10039 157.900 -10040 158.900 -10046 158.100 -10050 158.400 -10022 159.500 -10007 160.600 -10012 160.600 -10020 160.600 -10034 161.300 -10043 160.600 -10006 163.100 -10033 163.800 -10001 166.200 -10003 165.700 -10010 165.400 -10025 166.000 -10005 167.900 -10011 166.600 -10016 166.700 -10044 171.000 -10030 171.700 -10031 171.300 -10049 172.100 -10015 173.800 -10017 173.200 -10018 173.300 -10042 173.500 -10008 174.700 -10013 174.700 -10024 174.500 -10041 175.400 -10032 176.800 -10037 178.900 -10023 181.000 -10038 180.100 -10046 180.800 -10047 180.400 -10019 182.400 -10026 183.000 -10027 181.600 -10036 182.300 -10045 181.800 -10048 181.600 -10021 186.300 -10009 188.900 -10012 188.500 -10014 188.400 -10002 189.200 -10028 191.700 -10035 191.300 -10039 193.000 -10004 194.800 -10006 193.600 -10025 193.800 -10043 194.800 -10011 197.500 -10007 199.900 -10013 200.000 -10008 202.300 -10010 202.100 -10016 201.200 -10027 202.200 -10031 202.000 -10040 202.000 -10003 203.300 -10015 202.600 -10022 203.400 -10029 202.700 -10001 204.500 -10005 205.500 -10017 204.900 -10033 204.100 -10023 207.300 -10030 209.900 -10041 209.600 -10020 210.900 -10042 210.800 -10049 211.400 -10050 210.200 -10018 212.200 -10024 211.600 -10026 212.400 -10019 214.200 -10038 213.600 -10047 213.100 -10045 214.600 -10048 215.900 -10032 216.100 -10034 216.200 -10036 217.300 -10046 217.200 -10021 217.700 -10039 219.000 -10037 219.700 -10044 220.300 -10002 224.000 -10012 224.800 -10028 223.600 -10009 225.400 -10014 226.200 -10025 226.800 -10040 228.600 -10006 230.200 -10008 229.800 -10011 230.500 -10004 231.200 -10013 231.600 -10027 231.500 -10029 231.900 -10043 231.800 -10031 232.900 -10035 232.600 -10007 236.200 -10010 236.000 -10017 236.900 -10015 238.000 -10003 238.700 -10016 239.800 -10018 239.400 -10022 239.500 -10023 242.200 -10019 243.800 -10041 243.700 -10049 244.400 -10005 245.000 -10033 244.600 -10036 245.300 -10038 245.400 -10045 245.800 -10050 244.700 -10020 246.900 -10044 247.400 -10001 247.900 -10024 248.500 -10030 248.200 -10032 248.100 -10047 247.600 -10046 249.700 -10026 251.400 -10039 251.600 -10037 252.700 -10021 255.000 -10034 254.800 -10042 254.300 -10009 256.300 -10004 257.600 -10008 258.000 -10014 257.600 -10028 257.800 -10012 259.400 -10029 258.900 -10040 261.200 -10048 261.500 -10002 263.800 -10018 265.300 -10007 266.700 -10027 266.000 -10031 266.300 -10006 267.500 -10011 269.200 -10003 270.400 -10010 270.900 -10045 270.300 -10015 273.000 -10023 272.400 -10013 274.500 -10033 274.500 -10035 274.200 -10017 274.800 -10025 275.700 -10043 275.900 -10044 275.800 -10049 275.800 -10005 277.500 -10016 276.100 -10019 277.300 -10041 277.300 -10047 276.600 -10032 279.000 -10030 279.400 -10050 279.400 -10022 281.500 -10020 282.600 -10024 282.100 -10042 283.400 -10001 285.500 -10004 286.300 -10012 286.300 -10036 285.100 -10039 286.000 -10046 285.900 -10026 286.800 -10037 287.400 -10029 288.100 -10034 289.700 -10008 292.000 -10028 293.800 -10031 292.900 -10021 294.600 -10038 295.200 -10040 294.200 -10048 295.600 -10009 298.100 -10014 297.800 -10007 300.600 -10011 301.400 -10023 300.500 -10002 302.100 -10013 301.600 -10016 303.000 -10027 302.000 -10035 303.500 -10022 307.500 -10033 307.300 -10043 306.700 -10006 308.900 -10017 308.600 -10018 308.700 -10019 308.900 -10045 307.600 -10003 312.000 -10005 311.900 -10034 310.600 -10036 311.000 -10026 312.200 -10044 312.600 -10001 313.600 -10024 316.200 -10030 316.500 -10042 316.200 -10047 315.600 -10049 315.600 -10008 317.100 -10010 318.000 -10015 317.100 -10029 317.900 -10032 317.000 -10050 317.600 -10041 318.600 -10037 320.600 -10046 320.100 -10028 326.300 -10012 327.600 -10021 327.500 -10048 328.200 -10007 329.500 -10016 328.900 -10018 329.600 -10031 330.400 -10035 331.400 -10038 330.200 -10013 332.600 -10019 331.600 -10009 333.600 -10034 334.600 -10003 337.100 -10039 336.800 -10040 336.500 -10002 338.500 -10004 338.400 -10017 338.000 -10001 340.500 -10011 339.400 -10020 340.500 -10025 339.100 -10033 339.100 -10023 341.900 -10043 340.600 -10045 341.300 -10027 342.200 -10044 342.300 -10036 343.700 -10005 346.400 -10014 346.400 -10026 345.800 -10037 346.600 -10006 349.100 -10042 348.300 -10010 349.700 -10022 350.000 -10032 352.100 -10047 351.400 -10050 352.300 -10028 353.400 -10049 353.600 -10019 355.300 -10029 354.100 -10041 354.100 -10013 357.400 -10031 357.900 -10018 361.500 -10035 361.400 -10048 360.800 -10015 362.100 -10011 364.800 -10016 365.500 -10008 367.000 -10012 367.200 -10038 366.100 -10021 368.800 -10043 368.600 -10046 368.600 -10001 369.700 -10002 370.200 -10003 370.000 -10007 369.300 -10024 369.900 -10039 369.500 -10017 370.700 -10044 371.900 -10009 372.400 -10030 373.000 -10045 373.000 -10040 374.200 -10004 376.900 -10005 377.400 -10023 379.300 -10042 378.600 -10028 380.100 -10020 382.400 -10037 381.400 -10041 382.400 -10014 382.600 -10026 382.900 -10027 383.800 -10034 383.300 -10036 383.400 -10006 386.700 -10019 386.400 -10022 387.000 -10033 386.600 -10050 386.100 -10010 387.500 -10049 388.000 -10029 389.400 -10047 389.400 -10016 391.100 -10043 390.900 -10013 392.700 -10031 392.000 -10025 397.000 -10032 396.700 -10048 397.300 -10002 398.600 -10018 397.800 -10035 398.200 -10038 399.800 -10044 399.900 -10001 400.600 -10009 401.400 -10012 401.000 -10021 400.800 -10017 402.200 -10040 403.500 -10030 405.000 -10003 405.100 -10008 405.800 -10011 405.100 -10015 405.200 -10046 405.400 -10050 406.400 -10039 407.000 -10004 410.000 -10007 409.600 -10045 410.300 -10005 411.100 -10037 411.400 -10026 414.000 -10034 414.800 -10006 418.100 -10029 417.600 -10024 420.000 -10027 419.600 -10033 419.500 -10049 419.500 -10014 420.600 -10047 420.300 -10022 421.900 -10023 422.900 -10042 422.800 -10019 423.900 -10020 424.500 -10028 423.500 -10036 423.500 -10043 423.100 -10010 425.300 -10041 425.400 -10008 426.900 -10013 426.600 -10031 426.700 -10032 426.900 -10016 427.900 -10001 430.600 -10009 431.900 -10018 430.900 -10025 432.600 -10002 434.900 -10044 436.000 -10004 437.500 -10021 436.900 -10030 437.000 -10038 437.600 -10048 437.300 -10012 438.200 -10046 438.200 -10050 440.100 -10015 442.300 -10003 443.400 -10007 443.900 -10035 442.800 -10011 444.400 -10014 445.500 -10017 445.200 -10024 445.100 -10005 446.500 -10006 446.900 -10040 447.000 -10045 446.600 -10033 450.800 -10034 450.700 -10039 451.300 -10049 454.100 -10037 455.300 -10022 457.200 -10027 456.300 -10010 458.900 -10023 459.000 -10026 457.700 -10028 459.700 -10036 459.300 -10044 460.200 -10004 461.400 -10043 460.900 -10029 462.700 -10031 462.600 -10047 462.100 -10048 463.500 -10016 464.500 -10050 464.400 -10020 465.400 -10030 466.400 -10032 465.900 -10042 465.200 -10001 467.600 -10009 467.800 -10019 467.100 -10024 468.000 -10025 467.400 -10041 467.800 -10046 467.400 -10008 469.500 -10018 468.800 -10013 469.700 -10038 469.600 -10002 472.000 -10015 471.300 -10034 474.100 -10033 477.000 -10035 477.000 -10040 476.600 -10021 477.700 -10045 478.000 -10012 479.400 -10014 479.900 -10017 479.800 -10005 481.900 -10006 483.000 -10011 481.700 -10027 484.200 -10043 486.000 -10004 486.100 -10007 487.700 -10015 489.900 -10037 490.000 -10003 491.300 -10023 491.900 -10022 492.600 -10016 493.600 -10026 493.900 -10024 495.300 -10036 495.200 -10044 495.800 -10048 495.100 -10049 495.100 -10010 497.500 -10020 496.800 -10025 496.600 -10042 496.600 -10028 498.500 -10031 500.800 -10039 500.800 -10046 499.800 -10008 502.100 -10013 502.500 -10019 502.100 -10029 501.800 -10030 502.000 -10050 501.500 -10018 504.700 -10002 507.600 -10033 507.300 -10047 507.200 -10001 509.800 -10012 509.600 -10038 509.300 -10041 509.600 -10005 510.400 -10009 510.500 -10021 511.400 -10035 510.600 -10014 513.100 -10017 518.400 -10040 517.900 -10004 519.800 -10011 519.800 -10037 520.000 -10042 519.400 -10010 520.900 -10032 520.800 -10006 522.400 -10007 523.500 -10023 522.300 -10043 522.100 -10025 526.000 -10015 529.000 -10022 528.600 -10027 528.700 -10045 528.400 -10048 528.400 -10016 530.800 -10020 530.000 -10036 530.600 -10049 530.000 -10028 532.400 -10031 532.400 -10008 532.600 -10009 533.100 -10026 533.800 -10030 534.000 -10002 534.600 -10003 535.300 -10013 537.600 -10032 539.900 -10033 538.700 -10034 539.400 -10046 539.600 -10050 539.600 -10024 541.100 -10035 541.100 -10038 540.800 -10039 541.000 -10044 541.400 -10019 541.700 -10012 543.200 -10018 543.300 -10041 543.600 -10047 543.500 -10029 545.000 -10004 546.300 -10001 548.700 -10040 548.400 -10042 550.000 -10014 550.600 -10021 551.700 -10023 553.300 -10007 553.800 -10015 556.000 -10043 556.100 -10005 557.800 -10011 556.900 -10037 557.500 -10048 557.900 -10025 558.500 -10036 558.700 -10017 560.100 -10044 560.500 -10049 560.600 -10006 561.700 -10016 562.500 -10026 562.500 -10045 561.900 -10008 563.300 -10010 563.600 -10019 568.400 -10009 568.600 -10022 570.000 -10038 569.700 -10013 571.500 -10024 570.500 -10030 570.100 -10032 570.300 -10035 570.400 -10046 570.100 -10027 572.600 -10034 572.100 -10002 573.800 -10039 574.400 -10041 574.300 -10042 574.000 -10004 575.500 -10020 575.100 -10028 574.800 -10029 575.000 -10031 574.700 -10012 578.300 -10003 580.400 -10018 580.000 -10007 581.400 -10050 581.200 -10033 583.400 -10040 583.200 -10011 584.900 -10001 585.400 -10049 587.500 -10015 590.200 -10023 590.900 -10026 590.600 -10036 592.100 -10043 592.400 -10006 593.600 -10048 593.900 -10009 594.400 -10019 594.200 -10037 594.400 -10005 596.600 -10010 596.000 -10014 596.700 -10017 595.800 -10021 595.700 -10024 596.600 -10046 596.400 -10025 597.900 -10032 598.500 -10047 598.400 -10045 598.600 -10008 602.300 -10016 602.500 -10027 602.900 -10035 602.800 -10044 604.000 -10050 603.500 -10013 605.900 -10034 606.700 -10038 606.700 -10041 606.600 -10002 608.200 -10022 607.600 -10042 607.600 -10012 609.400 -10030 609.500 -10004 611.600 -10018 611.300 -10020 611.500 -10029 613.200 -10028 613.800 -10003 615.900 -10021 616.500 -10024 616.200 -10033 615.400 -10001 617.100 -10011 616.800 -10039 617.000 -10040 619.400 -10043 619.100 -10049 618.600 -10023 620.600 -10036 620.700 -10007 622.100 -10031 621.900 -10009 625.400 -10015 627.000 -10017 627.200 -10019 628.300 -10026 627.300 -10045 628.100 -10048 627.400 -10050 628.500 -10047 631.200 -10016 634.400 -10035 634.400 -10008 634.900 -10010 635.000 -10037 635.200 -10042 635.400 -10005 637.300 -10002 638.000 -10014 638.300 -10025 638.700 -10038 638.600 -10027 641.900 -10041 641.300 -10034 642.500 -10006 644.400 -10012 644.800 -10044 643.900 -10046 643.800 -10004 645.700 -10022 645.500 -10030 646.300 -10024 647.900 -10031 647.100 -10003 648.300 -10013 648.600 -10028 649.100 -10043 648.100 -10001 650.400 -10011 650.300 -10039 650.400 -10018 652.200 -10020 651.900 -10021 652.400 -10029 651.600 -10032 652.700 -10015 654.200 -10036 654.400 -10040 654.600 -10009 656.900 -10049 657.000 -10007 660.800 -10010 661.300 -10017 660.600 -10033 660.800 -10023 662.600 -10008 663.700 -10016 664.400 -10019 663.300 -10035 663.500 -10037 663.300 -10042 665.500 -10047 665.100 -10046 667.300 -10026 669.500 -10041 669.700 -10045 669.100 -10048 670.200 -10050 669.100 -10025 671.500 -10002 673.200 -10044 673.500 -10005 673.600 -10024 673.700 -10030 674.600 -10034 674.400 -10040 674.600 -10003 677.400 -10012 677.000 -10028 677.900 -10013 679.000 -10027 678.100 -10031 678.300 -10038 680.300 -10043 680.000 -10006 681.100 -10001 683.400 -10011 683.200 -10004 685.300 -10016 686.900 -10020 685.600 -10021 685.800 -10009 689.900 -10007 690.300 -10017 690.500 -10015 692.000 -10022 691.600 -10035 692.400 -10036 692.000 -10049 691.600 -10008 694.400 -10029 693.200 -10042 694.200 -10050 696.900 -10014 699.000 -10023 697.600 -10025 698.900 -10032 697.700 -10019 700.000 -10046 699.300 -10005 702.300 -10013 702.700 -10018 702.200 -10045 702.300 -10024 703.600 -10026 705.000 -10039 704.900 -10047 704.600 -10010 705.800 -10034 705.500 -10041 706.100 -10003 707.700 -10027 707.300 -10030 707.700 -10048 708.500 -10002 710.500 -10016 710.700 -10028 710.600 -10033 709.600 -10040 709.900 -10012 711.900 -10038 712.500 -10006 713.000 -10020 714.000 -10044 712.800 -10021 715.700 -10031 715.600 -10011 718.700 -10043 719.000 -10049 719.000 -10001 720.900 -10037 720.700 -10042 721.100 -10004 721.600 -10009 723.900 -10029 728.100 -10035 728.100 -10036 728.300 -10007 729.100 -10025 729.200 -10032 729.900 -10039 731.800 -10005 732.900 -10048 732.800 -10008 734.400 -10010 733.900 -10013 733.700 -10018 733.600 -10019 734.000 -10033 733.800 -10015 739.500 -10023 740.700 -10024 740.000 -10030 740.000 -10003 742.300 -10016 742.300 -10026 741.900 -10046 741.500 -10050 741.100 -10027 743.200 -10034 743.000 -10045 743.000 -10002 748.000 -10014 748.100 -10020 748.000 -10022 748.400 -10028 747.300 -10031 747.800 -10038 747.400 -10006 749.000 -10044 749.700 -10049 748.900 -10011 750.300 -10047 750.400 -10032 751.600 -10037 752.900 -10040 752.200 -10012 754.400 -10021 753.500 -10043 753.500 -10041 756.100 -10009 759.000 -10039 760.300 -10008 761.000 -10017 761.600 -10035 761.500 -10004 762.400 -10013 763.100 -10029 763.200 -10030 762.900 -10001 764.800 -10007 764.700 -10010 764.300 -10019 763.700 -10025 763.900 -10005 767.500 -10033 767.500 -10027 768.900 -10036 769.400 -10042 770.400 -10018 772.400 -10048 772.000 -10003 773.900 -10015 773.400 -10024 774.500 -10023 777.500 -10028 779.600 -10044 779.800 -10046 779.900 -10011 780.500 -10016 780.300 -10026 780.100 -10050 780.200 -10031 783.000 -10032 782.000 -10006 783.900 -10020 783.700 -10037 783.200 -10038 783.200 -10045 784.200 -10043 785.200 -10029 787.000 -10002 787.600 -10010 790.100 -10012 789.200 -10040 789.600 -10014 791.300 -10039 790.800 -10049 792.600 -10034 794.300 -10008 797.200 -10009 797.500 -10015 797.400 -10017 796.900 -10022 797.000 -10019 798.300 -10027 798.800 -10035 798.300 -10041 798.600 -10047 798.200 -10004 800.200 -10011 800.600 -10025 800.800 -10042 800.200 -10001 801.400 -10003 801.500 -10005 802.500 -10033 801.500 -10036 802.100 -10013 804.700 -10021 804.700 -10030 806.600 -10046 806.900 -10018 807.600 -10044 809.000 -10007 811.000 -10048 811.000 -10026 812.000 -10038 811.600 -10004 813.500 -10024 813.600 -10028 813.700 -10032 813.900 -10045 814.100 -10023 816.400 -10031 816.100 -10039 817.300 -10050 816.500 -10016 818.700 -10035 819.000 -10043 818.500 -10002 820.100 -10010 819.400 -10014 821.600 -10017 820.600 -10020 821.000 -10040 820.800 -10009 824.600 -10034 824.400 -10006 827.800 -10015 828.000 -10025 827.100 -10029 827.000 -10012 828.700 -10022 828.800 -10049 828.400 -10003 831.000 -10037 830.100 -10033 832.500 -10041 831.400 -10005 834.000 -10047 835.000 -10008 835.600 -10021 835.800 -10027 836.400 -10001 837.600 -10013 837.500 -10038 837.400 -10042 837.600 -10019 840.100 -10026 841.400 -10030 842.800 -10011 843.800 -10024 843.700 -10048 844.400 -10004 845.000 -10035 846.000 -10036 844.700 -10018 847.400 -10020 846.200 -10032 847.000 -10010 849.500 -10039 850.300 -10044 849.100 -10046 849.200 -10028 850.700 -10050 851.300 -10023 852.500 -10016 854.300 -10002 855.600 -10007 856.100 -10017 855.600 -10025 856.300 -10040 855.700 -10043 856.500 -10003 857.400 -10009 857.700 -10014 857.300 -10022 856.800 -10031 856.600 -10033 856.600 -10034 857.100 -10012 858.600 -10029 862.400 -10037 862.400 -10038 861.900 -10045 861.300 -10005 864.000 -10006 863.400 -10013 863.100 -10015 863.600 -10019 863.400 -10049 863.000 -10027 865.000 -10021 869.700 -10041 870.800 -10026 872.400 -10032 872.600 -10042 872.700 -10001 873.600 -10018 873.400 -10030 873.200 -10036 873.100 -10004 875.600 -10039 876.000 -10040 876.200 -10028 878.500 -10048 878.600 -10020 880.500 -10035 879.500 -10008 880.600 -10029 880.900 -10031 881.400 -10022 883.000 -10044 882.400 -10017 884.800 -10047 884.400 -10023 885.800 -10046 886.500 -10050 886.300 -10011 887.200 -10016 887.600 -10006 889.100 -10049 888.900 -10033 891.000 -10037 890.700 -10014 891.200 -10015 892.200 -10019 892.500 -10025 891.600 -10012 892.800 -10013 893.000 -10002 895.400 -10034 894.100 -10036 895.400 -10021 896.100 -10038 896.900 -10007 897.700 -10009 897.800 -10003 899.500 -10005 900.200 -10043 900.700 -10045 901.500 -10001 902.700 -10024 901.900 -10010 904.200 -10018 904.100 -10026 903.200 -10022 905.500 -10042 905.800 -10040 906.200 -10008 907.900 -10030 908.900 -10039 909.300 -10016 911.700 -10028 911.800 -10047 911.400 -10048 910.900 -10004 912.700 -10027 912.500 -10031 912.500 -10032 913.600 -10049 916.500 -10041 917.800 -10017 918.500 -10020 918.400 -10029 919.200 -10046 919.900 -10019 922.500 -10023 921.900 -10025 922.500 -10044 922.500 -10002 924.900 -10014 925.800 -10035 926.800 -10006 927.500 -10012 927.800 -10021 927.500 -10033 927.100 -10036 928.300 -10037 927.900 -10007 929.100 -10015 929.200 -10003 930.100 -10038 930.100 -10042 931.500 -10050 930.100 -10034 932.700 -10005 935.500 -10018 934.700 -10026 935.300 -10043 935.500 -10001 937.300 -10024 936.300 -10045 936.800 -10008 937.700 -10030 938.100 -10011 939.700 -10022 940.000 -10031 939.200 -10040 940.300 -10010 940.700 -10013 940.600 -10039 941.900 -10004 945.800 -10027 946.400 -10047 946.100 -10019 947.200 -10032 949.200 -10044 948.400 -10041 950.400 -10023 952.500 -10049 951.500 -10026 956.900 -10028 955.800 -10017 957.600 -10025 957.300 -10002 959.500 -10009 959.200 -10016 959.400 -10018 959.500 -10037 959.800 -10048 959.200 -10020 960.700 -10006 962.000 -10012 962.900 -10014 961.700 -10046 962.100 -10038 963.600 -10042 964.100 -10050 963.800 -10015 964.800 -10029 965.200 -10036 966.900 -10043 967.300 -10003 968.900 -10005 968.400 -10001 969.100 -10007 969.300 -10045 969.900 -10010 971.400 -10024 970.600 -10008 973.200 -10030 972.400 -10031 972.900 -10039 972.100 -10013 974.300 -10019 974.500 -10021 974.400 -10033 974.400 -10035 974.800 -10044 975.400 -10034 977.100 -10011 978.900 -10022 979.200 -10040 978.600 -10016 980.700 -10027 980.100 -10028 981.300 -10032 981.400 -10041 981.700 -10017 985.200 -10037 985.500 -10042 985.400 -10047 986.600 -10049 985.900 -10004 987.200 -10001 991.200 -10002 990.300 -10009 991.800 -10025 992.400 -10023 993.900 -10026 993.500 -10006 995.200 -10020 995.000 -10046 995.300 -10035 997.300 -10048 997.300 -10012 999.200 -10043 1000.000 diff --git a/pynest/nest/lib/hl_api_types.py b/pynest/nest/lib/hl_api_types.py index 98dd9d3a80..16294e142c 100644 --- a/pynest/nest/lib/hl_api_types.py +++ b/pynest/nest/lib/hl_api_types.py @@ -39,6 +39,7 @@ is_literal, restructure_data, ) +from .hl_api_parallel_computing import Rank from .hl_api_simulation import GetKernelStatus try: @@ -514,6 +515,40 @@ def tolist(self): return list(self.get("global_id")) if len(self) > 1 else [self.get("global_id")] + def _to_array(self, selection="all"): + """ + Debugging helper to extract GIDs from node collections. + + `selection` can be `"all"`, `"rank"` or `"thread"` and extracts either all + nodes or those on the rank or thread on which it is executed. For `"thread"`, + separate lists are returned for all local threads independently. + """ + + res = sli_func("cva_g_l", self, selection) + + if selection == "all": + return {"All": res} + elif selection == "rank": + return {f"Rank {Rank()}": res} + elif selection == "thread": + t_res = {} + thr = None + ix = 0 + while ix < len(res): + while ix < len(res) and res[ix] != 0: + t_res[thr].append(res[ix]) + ix += 1 + assert ix == len(res) or ix + 3 <= len(res) + if ix < len(res): + assert res[ix] == 0 and res[ix + 2] == 0 + thr = res[ix + 1] + assert thr not in t_res + t_res[thr] = [] + ix += 3 + return t_res + else: + return res + def index(self, node_id): """ Find the index of a node ID in the `NodeCollection`. diff --git a/pynest/nest/raster_plot.py b/pynest/nest/raster_plot.py index 811ecfeedd..fa6450762c 100644 --- a/pynest/nest/raster_plot.py +++ b/pynest/nest/raster_plot.py @@ -55,17 +55,16 @@ def extract_events(data, time=None, sel=None): """ val = [] + t_min, t_max = 0, float("inf") if time: t_max = time[-1] if len(time) > 1: t_min = time[0] - else: - t_min = 0 for v in data: t = v[1] node_id = v[0] - if time and (t < t_min or t >= t_max): + if not (t_min <= t < t_max): continue if not sel or node_id in sel: val.append(v) @@ -131,6 +130,7 @@ def from_file_pandas(fname, **kwargs): """Use pandas.""" data = None for f in fname: + # pylint: disable=possibly-used-before-assignment dataFrame = pandas.read_table(f, header=2, skipinitialspace=True) newdata = dataFrame.values diff --git a/sli/tokenarray.h b/sli/tokenarray.h index 5e1d57d8ee..0c2fa0ed41 100644 --- a/sli/tokenarray.h +++ b/sli/tokenarray.h @@ -262,6 +262,16 @@ class TokenArray } // Insertion, deletion + + /** + * Insert element at end. + * + * @note Calling with literal value can lead to undefined behavior. The following seems safe: + * + * TokenArray ta; + * const size_t zero = 0; + * ta.push_back( zero ); + */ void push_back( const Token& t ) { @@ -269,6 +279,15 @@ class TokenArray data->push_back( t ); } + /** + * Insert element at end. + * + * @note Calling with literal value can lead to undefined behavior. The following seems safe: + * + * TokenArray ta; + * const size_t zero = 0; + * ta.push_back( zero ); + */ void push_back( Datum* d ) { diff --git a/testsuite/do_tests.sh b/testsuite/do_tests.sh index 1a3884e8d3..09eac13637 100755 --- a/testsuite/do_tests.sh +++ b/testsuite/do_tests.sh @@ -520,8 +520,16 @@ if test "${PYTHON}"; then for numproc in $(cd ${PYNEST_TEST_DIR}/mpi/; ls -d */ | tr -d '/'); do XUNIT_FILE="${REPORTDIR}/${XUNIT_NAME}_mpi_${numproc}.xml" PYTEST_ARGS="--verbose --timeout $TIME_LIMIT --junit-xml=${XUNIT_FILE} ${PYNEST_TEST_DIR}/mpi/${numproc}" + + if "${DO_TESTS_SKIP_TEST_REQUIRING_MANY_CORES:-false}"; then + PYTEST_ARGS="${PYTEST_ARGS} -m 'not requires_many_cores'" + fi + set +e - $(sli -c "${numproc} (${PYTHON} -m pytest) (${PYTEST_ARGS}) mpirun =only") 2>&1 | tee -a "${TEST_LOGFILE}" + + # We need to use eval here because $() splits run_command in weird ways + run_command="$(sli -c "${numproc} (${PYTHON} -m pytest) (${PYTEST_ARGS}) mpirun =only")" + eval "${run_command}" 2>&1 | tee -a "${TEST_LOGFILE}" set -e done fi diff --git a/testsuite/mpitests/test_spatial_distributed_positions.sli b/testsuite/mpitests/test_spatial_distributed_positions.sli index 9b014f5006..f91116a032 100644 --- a/testsuite/mpitests/test_spatial_distributed_positions.sli +++ b/testsuite/mpitests/test_spatial_distributed_positions.sli @@ -30,18 +30,13 @@ skip_if_not_threaded ResetKernel << /total_num_virtual_procs 4 >> SetKernelStatus - << /uniform << /min 0.0 /max 1.0 >> >> CreateParameter /pos_param Set - pos_param pos_param dimension2d /pos Set - /layer_spec - << /positions pos - /n 4 + << /positions [ 1 4 ] { 0 2 arraystore } Table /edge_wrap false /elements /iaf_psc_alpha >> def /layer layer_spec CreateLayer def - layer GetMetadata /meta Set { diff --git a/testsuite/pytests/conftest.py b/testsuite/pytests/conftest.py index 163aca8ba4..0128b6eb9f 100644 --- a/testsuite/pytests/conftest.py +++ b/testsuite/pytests/conftest.py @@ -47,6 +47,18 @@ def test_gsl(): import testutil # noqa +def pytest_configure(config): + """ + Add NEST-specific markers. + + See https://docs.pytest.org/en/8.0.x/how-to/mark.html. + """ + config.addinivalue_line( + "markers", + "requires_many_cores: mark tests as needing many cores (deselect with '-m \"not requires_many_cores\"')", + ) + + @pytest.fixture(scope="module", autouse=True) def safety_reset(): """ diff --git a/testsuite/pytests/connect_test_base.py b/testsuite/pytests/connect_test_base.py index 8cda8d403d..12124080ba 100644 --- a/testsuite/pytests/connect_test_base.py +++ b/testsuite/pytests/connect_test_base.py @@ -451,6 +451,9 @@ def get_degrees(fan, pop1, pop2): degrees = np.sum(M, axis=1) elif fan == "out": degrees = np.sum(M, axis=0) + else: + raise ValueError(f"fan must be 'in' or 'out', got '{fan}'.") + return degrees diff --git a/testsuite/pytests/mpi/2/test_issue_3108.py b/testsuite/pytests/mpi/2/test_issue_3108.py new file mode 120000 index 0000000000..98403dd8f7 --- /dev/null +++ b/testsuite/pytests/mpi/2/test_issue_3108.py @@ -0,0 +1 @@ +../../test_issue_3108.py \ No newline at end of file diff --git a/testsuite/pytests/mpi/3/test_issue_3108.py b/testsuite/pytests/mpi/3/test_issue_3108.py new file mode 120000 index 0000000000..98403dd8f7 --- /dev/null +++ b/testsuite/pytests/mpi/3/test_issue_3108.py @@ -0,0 +1 @@ +../../test_issue_3108.py \ No newline at end of file diff --git a/testsuite/pytests/mpi/4/test_issue_3108.py b/testsuite/pytests/mpi/4/test_issue_3108.py new file mode 120000 index 0000000000..98403dd8f7 --- /dev/null +++ b/testsuite/pytests/mpi/4/test_issue_3108.py @@ -0,0 +1 @@ +../../test_issue_3108.py \ No newline at end of file diff --git a/testsuite/pytests/sli2py_mpi/README.md b/testsuite/pytests/sli2py_mpi/README.md index cb48da01fc..6f9abee576 100644 --- a/testsuite/pytests/sli2py_mpi/README.md +++ b/testsuite/pytests/sli2py_mpi/README.md @@ -2,4 +2,4 @@ Test in this directory run NEST with different numbers of MPI ranks and compare results. -See documentation in mpi_test_wrappe.py for details. +See documentation in mpi_test_wrapper.py for details. diff --git a/testsuite/pytests/sli2py_mpi/mpi_test_wrapper.py b/testsuite/pytests/sli2py_mpi/mpi_test_wrapper.py index 9a76568b2b..5a052c53de 100644 --- a/testsuite/pytests/sli2py_mpi/mpi_test_wrapper.py +++ b/testsuite/pytests/sli2py_mpi/mpi_test_wrapper.py @@ -139,8 +139,11 @@ def _params_as_str(self, *args, **kwargs): return ", ".join( part for part in ( - ", ".join(f"{arg}" for arg in args), - ", ".join(f"{key}={value}" for key, value in kwargs.items()), + ", ".join(f"{arg if not inspect.isfunction(arg) else arg.__name__}" for arg in args), + ", ".join( + f"{key}={value if not inspect.isfunction(value) else value.__name__}" + for key, value in kwargs.items() + ), ) if part ) diff --git a/testsuite/pytests/test_clopath_synapse.py b/testsuite/pytests/test_clopath_synapse.py index 96e10451d4..89967e143a 100644 --- a/testsuite/pytests/test_clopath_synapse.py +++ b/testsuite/pytests/test_clopath_synapse.py @@ -127,6 +127,9 @@ def test_SynapseDepressionFacilitation(self): "tau_u_bar_plus": 114.0, "delay_u_bars": 5.0, } + else: + raise ValueError(f"Unsupported neuron model '{nrn_model}'") + syn_weights = [] # Loop over pairs of spike trains for s_t_pre, s_t_post in zip(spike_times_pre, spike_times_post): @@ -146,6 +149,8 @@ def test_SynapseDepressionFacilitation(self): conn_weight = 80.0 elif nrn_model == "hh_psc_alpha_clopath": conn_weight = 2000.0 + else: + raise ValueError(f"Unsupported neuron model '{nrn_model}'") spike_gen_post = nest.Create("spike_generator", 1, {"spike_times": s_t_post}) @@ -176,6 +181,8 @@ def test_SynapseDepressionFacilitation(self): correct_weights = [57.82638722, 72.16730112, 149.43359357, 103.30408341, 124.03640668, 157.02882555] elif nrn_model == "hh_psc_alpha_clopath": correct_weights = [70.14343863, 99.49206222, 178.1028757, 119.63314118, 167.37750688, 178.83111685] + else: + raise ValueError(f"Unsupported neuron model '{nrn_model}'") self.assertTrue(np.allclose(syn_weights, correct_weights, rtol=1e-7)) diff --git a/testsuite/pytests/test_connect_tripartite_bernoulli.py b/testsuite/pytests/test_connect_tripartite_bernoulli.py index 04ee1f003e..60d983bae2 100644 --- a/testsuite/pytests/test_connect_tripartite_bernoulli.py +++ b/testsuite/pytests/test_connect_tripartite_bernoulli.py @@ -177,6 +177,9 @@ def get_degrees(fan, pop1, pop2): degrees = np.sum(M, axis=1) elif fan == "out": degrees = np.sum(M, axis=0) + else: + raise ValueError(f"fan must be 'in' or 'out', got '{fan}'.") + return degrees diff --git a/testsuite/pytests/test_get_connections.py b/testsuite/pytests/test_get_connections.py index f08d1d40f5..06e67138e2 100644 --- a/testsuite/pytests/test_get_connections.py +++ b/testsuite/pytests/test_get_connections.py @@ -84,6 +84,20 @@ def test_get_connections_with_sliced_node_collection(): assert actual_sources == expected_sources +def test_get_connections_with_sliced_node_collection_2(): + """Test that ``GetConnections`` works with sliced ``NodeCollection``.""" + + nodes = nest.Create("iaf_psc_alpha", 11) + nest.Connect(nodes, nodes) + + # ([ 2 3 4 ] + [ 8 9 10 11 ])[::3] -> [2 8 11] + conns = nest.GetConnections((nodes[1:4] + nodes[7:])[::3]) + actual_sources = conns.get("source") + + expected_sources = [2] * 11 + [8] * 11 + [11] * 11 + assert actual_sources == expected_sources + + def test_get_connections_bad_source_raises(): """Test that ``GetConnections`` raises an error when called with 0.""" diff --git a/testsuite/pytests/test_issue_3106.py b/testsuite/pytests/test_issue_3106.py new file mode 100644 index 0000000000..afcae2a5d0 --- /dev/null +++ b/testsuite/pytests/test_issue_3106.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# +# test_issue_3106.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + + +import nest +import pytest + + +@pytest.mark.skipif_missing_threads +def test_connect_with_threads_slice_and_mpi(): + """ + Test that connection with sliced layer is possible on multiple threads. + """ + + num_neurons = 10 + nest.local_num_threads = 4 + + layer = nest.Create( + model="parrot_neuron", + n=num_neurons, + positions=nest.spatial.free(pos=nest.random.uniform(min=-1, max=1), num_dimensions=2, edge_wrap=False), + ) + + tgts = layer[::3] + + # Distance-dependent weight forces use of layer-connect + nest.Connect(layer, tgts, {"rule": "pairwise_bernoulli", "p": 1}, {"weight": nest.spatial.distance}) + # nest.Connect(layer, tgts, {"rule": "fixed_indegree", "indegree": 5}) + + assert nest.num_connections == len(layer) * len(tgts) diff --git a/testsuite/pytests/test_issue_3108.py b/testsuite/pytests/test_issue_3108.py new file mode 100644 index 0000000000..de2b178b2b --- /dev/null +++ b/testsuite/pytests/test_issue_3108.py @@ -0,0 +1,298 @@ +# -*- coding: utf-8 -*- +# +# test_issue_3108.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + + +import itertools + +import nest +import pytest + +""" +Test in this file were developed for regressions under three MPI processes. + +They should be run with 1, 3 and 4 MPI processes to ensure all passes under various settings. + +The spatial tests test that NodeCollection::rank_local_begin() works. +The connect tests test that NodeCollection::thread_local_begin() works. +""" + +# Needs up to 16 cores when run on 4 MPI ranks. +# Experiences severe slowdown on Github runners under Linux with MPI and OpenMP +pytestmark = pytest.mark.requires_many_cores + +if nest.ll_api.sli_func("is_threaded"): + num_threads = [1, 2, 3, 4] +else: + num_threads = [1] + + +@pytest.mark.parametrize("n_threads", num_threads) +@pytest.mark.parametrize( + "transform", + [ + lambda nc: nc[::3], + lambda nc: nc[1:], + lambda nc: nc[:5] + nc[8:], + lambda nc: (nc[:5] + nc[8:])[-1:], + lambda nc: (nc[:5] + nc[8:])[::2], + lambda nc: (nc[:5] + nc[8:])[::3], + lambda nc: (nc[:5] + nc[9:])[::2], + lambda nc: (nc[:5] + nc[9:])[::3], + lambda nc: (nc[:5] + nc[8:])[7:], + lambda nc: (nc[:5] + nc[8:])[7::3], + ], +) +def test_slice_node_collections(n_threads, transform): + nest.ResetKernel() + nest.local_num_threads = n_threads + + n_orig = nest.Create("parrot_neuron", 128) + n_orig_gids = n_orig._to_array()["All"] + + n_sliced = transform(n_orig) + n_pyslice_gids = transform(n_orig_gids) + + n_pyslice_gids_on_rank = sorted( + n.global_id for n in nest.NodeCollection(n_pyslice_gids) if n.vp % nest.NumProcesses() == nest.Rank() + ) + + assert n_sliced._to_array()["All"] == n_pyslice_gids + + n_sliced_gids_by_rank = n_sliced._to_array("rank") + assert len(n_sliced_gids_by_rank) == 1 + assert sorted(next(iter(n_sliced_gids_by_rank.values()))) == n_pyslice_gids_on_rank + + n_sliced_gids_by_thread = n_sliced._to_array("thread") + assert sorted(itertools.chain(*n_sliced_gids_by_thread.values())) == n_pyslice_gids_on_rank + + for thread, tgids in n_sliced_gids_by_thread.items(): + for node in nest.NodeCollection(tgids): + assert node.thread == thread + + +@pytest.mark.parametrize("n_threads", num_threads) +@pytest.mark.parametrize("stride", [1, 2, 3, 7, 12]) +def test_slice_single_element_parts(n_threads, stride): + """Same test as above, but on NC with single-element parts to check stepping over parts""" + + nest.ResetKernel() + nest.local_num_threads = n_threads + + # Use different models to get single-element node collections + mod_names = [f"pn{n:02d}" for n in range(91)] + for mod in mod_names: + nest.CopyModel("parrot_neuron", mod) + + n_orig = sum(nest.Create(mod) for mod in mod_names) + n_orig_gids = n_orig._to_array()["All"] + + n_sliced = n_orig[::stride] + n_pyslice_gids = n_orig_gids[::stride] + + n_pyslice_gids_on_rank = sorted( + (n.global_id for n in nest.NodeCollection(n_pyslice_gids) if n.vp % nest.NumProcesses() == nest.Rank()) + ) + + assert n_sliced._to_array()["All"] == n_pyslice_gids + + n_sliced_gids_by_rank = n_sliced._to_array("rank") + assert len(n_sliced_gids_by_rank) == 1 + assert sorted(next(iter(n_sliced_gids_by_rank.values()))) == n_pyslice_gids_on_rank + + n_sliced_gids_by_thread = n_sliced._to_array("thread") + assert sorted(itertools.chain(*n_sliced_gids_by_thread.values())) == n_pyslice_gids_on_rank + + for thread, tgids in n_sliced_gids_by_thread.items(): + for node in nest.NodeCollection(tgids): + assert node.thread == thread + + +@pytest.mark.parametrize("n_threads", num_threads) +@pytest.mark.parametrize("stride", [1, 2, 3, 7, 12]) +def test_multi_parts_slicing(n_threads, stride): + """Same test as above, but on NC from parts of different size with different gaps""" + + nest.ResetKernel() + nest.local_num_threads = n_threads + + n_orig = nest.Create("parrot_neuron", 97) + n_orig_gids = n_orig._to_array()["All"] + + n_sliced = n_orig[:5] + n_orig[6:7] + n_orig[8:18] + n_orig[23:34] + n_orig[41:] + n_pyslice_gids = n_orig_gids[:5] + n_orig_gids[6:7] + n_orig_gids[8:18] + n_orig_gids[23:34] + n_orig_gids[41:] + + n_pyslice_gids_on_rank = sorted( + (n.global_id for n in nest.NodeCollection(n_pyslice_gids) if n.vp % nest.NumProcesses() == nest.Rank()) + ) + + assert n_sliced._to_array()["All"] == n_pyslice_gids + + n_sliced_gids_by_rank = n_sliced._to_array("rank") + assert len(n_sliced_gids_by_rank) == 1 + assert sorted(next(iter(n_sliced_gids_by_rank.values()))) == n_pyslice_gids_on_rank + + n_sliced_gids_by_thread = n_sliced._to_array("thread") + assert sorted(itertools.chain(*n_sliced_gids_by_thread.values())) == n_pyslice_gids_on_rank + + for thread, tgids in n_sliced_gids_by_thread.items(): + for node in nest.NodeCollection(tgids): + assert node.thread == thread + + +@pytest.mark.parametrize("n_threads", num_threads) +@pytest.mark.parametrize("start, step", ([[0, 1], [0, 3]] + [[2, n] for n in range(1, 9)])) +def test_get_positions_with_mpi(n_threads, start, step): + """ + Test that correct positions can be obtained from sliced node collections. + + Two cases above for starting without offset, the remaining with a small offset. + With the range of step values, combined with 3 and 4 MPI processes, we ensure + that we have cases where the step is half of or a multiple of the number of + processes. + """ + + num_neurons = 128 + + nest.ResetKernel() + nest.local_num_threads = n_threads + + # Need floats because NEST returns positions as floats + node_pos = [(float(x), 0.0) for x in range(num_neurons)] + + layer = nest.Create( + model="parrot_neuron", + n=num_neurons, + positions=nest.spatial.free(pos=node_pos, edge_wrap=False), + ) + + pos = layer[start::step].spatial["positions"] + node_ranks = [n.vp % nest.NumProcesses() for n in layer] + assert len(node_ranks) == num_neurons + + # pos is a tuple of tuples, so we need to create a tuple for comparison + expected_pos = tuple( + npos for npos, nrk in zip(node_pos[start::step], node_ranks[start::step]) if nrk == nest.Rank() + ) + + assert pos == expected_pos + + +@pytest.mark.parametrize("n_threads", num_threads) +@pytest.mark.parametrize("pick", range(0, 7)) +def test_get_spatial_for_single_element_and_mpi(n_threads, pick): + """ + Test that spatial information can be collected from a single layer element. + + This was an original minimal reproducer for #3108. + """ + + num_neurons = 7 + + nest.ResetKernel() + nest.local_num_threads = n_threads + + node_pos = [(float(x), 0.0) for x in range(num_neurons)] + + layer = nest.Create( + model="parrot_neuron", + n=num_neurons, + positions=nest.spatial.free(pos=node_pos, edge_wrap=False), + ) + + # We want to retrieve this on all ranks to see that it does not break NEST + sp = layer[pick].spatial["positions"] + + pick_rank = layer[pick].vp % nest.NumProcesses() + if pick_rank == nest.Rank(): + assert sp[0] == node_pos[pick] + else: + assert len(sp) == 0 + + +@pytest.mark.parametrize("n_threads", num_threads) +@pytest.mark.parametrize("pick", range(0, 5)) +def test_connect_with_single_element_slice_and_mpi(n_threads, pick): + """ + Test that connection with single-element sliced layer is possible on multiple mpi processes. + + This was an original minimal reproducer for #3108. + """ + + num_neurons = 5 + + nest.ResetKernel() + nest.local_num_threads = n_threads + + layer = nest.Create( + model="parrot_neuron", + n=num_neurons, + positions=nest.spatial.free(pos=nest.random.uniform(min=-1, max=1), num_dimensions=2, edge_wrap=False), + ) + + # space-dependent syn_spec passed only to force use of ConnectLayers + nest.Connect(layer[pick], layer, {"rule": "pairwise_bernoulli", "p": 1.0}, {"weight": nest.spatial.distance}) + + local_nodes = tuple(n.global_id for n in layer if n.vp % nest.NumProcesses() == nest.Rank()) + + c = nest.GetConnections() + src = tuple(c.sources()) + tgt = tuple(c.targets()) + assert src == (layer[pick].global_id,) * len(local_nodes) + assert sorted(tgt) == sorted(local_nodes) + + +@pytest.mark.parametrize("n_threads", num_threads) +@pytest.mark.parametrize("sstep", [2, 3, 4, 6]) +@pytest.mark.parametrize("tstep", [2, 3, 4, 6]) +def test_connect_slice_to_slice_and_mpi(n_threads, sstep, tstep): + """ + Test that connection with stepped source and target layers is possible on multiple mpi processes. + + This was an original minimal reproducer for #3108. + """ + + num_neurons = 128 + + nest.ResetKernel() + nest.local_num_threads = n_threads + + layer = nest.Create( + model="parrot_neuron", + n=num_neurons, + positions=nest.spatial.free(pos=nest.random.uniform(min=-1, max=1), num_dimensions=2, edge_wrap=False), + ) + + # space-dependent syn_spec passed only to force use of ConnectLayers + nest.Connect( + layer[::sstep], layer[2::tstep], {"rule": "pairwise_bernoulli", "p": 1.0}, {"weight": nest.spatial.distance} + ) + + local_nodes = tuple(n.global_id for n in layer if n.vp % nest.NumProcesses() == nest.Rank()) + local_targets = set(n.global_id for n in layer[2::tstep] if n.vp % nest.NumProcesses() == nest.Rank()) + + c = nest.GetConnections() + src = tuple(c.sources()) + tgt = tuple(c.targets()) + + assert len(c) == len(layer[::sstep]) * len(local_targets) + assert set(tgt) == local_targets # all local neurons in layer[2::tstep] must be targets + if local_targets: + assert set(src) == set(layer[::sstep].global_id) # all neurons in layer[::sstep] neurons must be sources diff --git a/testsuite/pytests/test_nodeParametrization.py b/testsuite/pytests/test_node_parametrization.py similarity index 99% rename from testsuite/pytests/test_nodeParametrization.py rename to testsuite/pytests/test_node_parametrization.py index 857b0c4bef..e78c3c3057 100644 --- a/testsuite/pytests/test_nodeParametrization.py +++ b/testsuite/pytests/test_node_parametrization.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# test_nodeParametrization.py +# test_node_parametrization.py # # This file is part of NEST. # diff --git a/testsuite/pytests/test_regression_issue-3213.py b/testsuite/pytests/test_regression_issue-3213.py new file mode 100644 index 0000000000..2e13f9aac7 --- /dev/null +++ b/testsuite/pytests/test_regression_issue-3213.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# +# test_regression_issue-3213.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +import nest +import pytest + +""" +Test that GetConnections works if NodeCollection with gaps is provided as source arg. +""" + + +def test_get_conns_works(): + """Main concern is that GetConnections() passes, expected number of connections based on all-to-all.""" + + num_n = 12 + n = nest.Create("parrot_neuron", num_n) + nest.Connect(n, n) + pick = [3, 7, 9, 11] + conns = nest.GetConnections(source=n[pick]) + assert len(conns) == num_n * len(pick) diff --git a/testsuite/pytests/test_spatial/test_connect_layers.py b/testsuite/pytests/test_spatial/test_connect_layers.py index 219844d829..2357036e69 100644 --- a/testsuite/pytests/test_spatial/test_connect_layers.py +++ b/testsuite/pytests/test_spatial/test_connect_layers.py @@ -154,7 +154,7 @@ def _assert_connect_layers_multapses(self, multapses): else: self.assertEqual(num_nonunique_conns, 0) - def _assert_connect_sliced(self, pre, post): + def _assert_connect_sliced(self, pre, post, kind): """Helper function which asserts that connecting with ConnectLayers on the SLI level gives the expected number of connections.""" # Using distance based probability with zero weight to @@ -165,7 +165,9 @@ def _assert_connect_sliced(self, pre, post): nest.Connect(pre, post, conn_spec) conns = nest.GetConnections() - result = "{} ({}), pre length={}, post length={}".format(len(conns), expected_conns, len(pre), len(post)) + result = "{} ({}), pre length={}, post length={} (kind {})".format( + len(conns), expected_conns, len(pre), len(post), kind + ) print(result) self.assertEqual(len(conns), expected_conns, "pre length={}, post length={}".format(len(pre), len(post))) @@ -422,12 +424,12 @@ def test_connect_sliced_grid_layer(self): layers = self._reset_and_create_sliced(positions) layer = layers["layer"] sliced_pre = layers[sliced] - self._assert_connect_sliced(sliced_pre, layer) + self._assert_connect_sliced(sliced_pre, layer, f"{sliced} pre") for sliced in ["single", "range", "step"]: layers = self._reset_and_create_sliced(positions) layer = layers["layer"] sliced_post = layers[sliced] - self._assert_connect_sliced(layer, sliced_post) + self._assert_connect_sliced(layer, sliced_post, f"{sliced} post") def test_connect_sliced_free_layer(self): """Connecting with sliced free layer""" @@ -436,12 +438,12 @@ def test_connect_sliced_free_layer(self): layers = self._reset_and_create_sliced(positions) layer = layers["layer"] sliced_pre = layers[sliced] - self._assert_connect_sliced(sliced_pre, layer) + self._assert_connect_sliced(sliced_pre, layer, f"{sliced} pre") for sliced in ["single", "range", "step"]: layers = self._reset_and_create_sliced(positions) layer = layers["layer"] sliced_post = layers[sliced] - self._assert_connect_sliced(layer, sliced_post) + self._assert_connect_sliced(layer, sliced_post, f"{sliced} post") def test_connect_synapse_label(self): indegree = 10 diff --git a/testsuite/pytests/test_stdp_nn_synapses.py b/testsuite/pytests/test_stdp_nn_synapses.py index 8313bf6ac3..ae25882ff5 100644 --- a/testsuite/pytests/test_stdp_nn_synapses.py +++ b/testsuite/pytests/test_stdp_nn_synapses.py @@ -187,18 +187,16 @@ def reproduce_weight_drift(self, _pre_spikes, _post_spikes, _initial_weight): t = _pre_spikes[pre_spikes_forced_to_grid.index(time_in_simulation_steps)] # Evaluating the depression rule. - if self.synapse_parameters["synapse_model"] == "stdp_nn_restr_synapse": - current_nearest_neighbour_pair_is_suitable = t_previous_post > t_previous_pre - # If '<', t_previous_post has already been paired - # with t_previous_pre, thus due to the restricted - # pairing scheme we do not account it. - if ( - self.synapse_parameters["synapse_model"] == "stdp_nn_symm_synapse" - or self.synapse_parameters["synapse_model"] == "stdp_nn_pre_centered_synapse" - ): - # The current pre-spike is simply paired with the - # nearest post-spike. - current_nearest_neighbour_pair_is_suitable = True + # For the first two rules below, simply pair current pre-spike with nearest post-spike. + # For "nn_restr" and `post < pre`, the previous post has already been paired, thus due + # to the restricted pairing scheme, we do not account it. + current_nearest_neighbour_pair_is_suitable = self.synapse_parameters["synapse_model"] in [ + "stdp_nn_symm_synapse", + "stdp_nn_pre_centered_synapse", + ] or ( + self.synapse_parameters["synapse_model"] == "stdp_nn_restr_synapse" + and t_previous_post > t_previous_pre + ) if current_nearest_neighbour_pair_is_suitable and t_previous_post != -1: # Otherwise, if == -1, there have been diff --git a/testsuite/pytests/test_tripartite_connect.py b/testsuite/pytests/test_tripartite_connect.py index 115c128ecd..488513d979 100644 --- a/testsuite/pytests/test_tripartite_connect.py +++ b/testsuite/pytests/test_tripartite_connect.py @@ -122,7 +122,7 @@ def test_block_pool_wide(): assert len(nest.GetConnections(third, post)) == n_primary -def test_bipartitet_raises(): +def test_bipartite_raises(): n_pre, n_post, n_third = 4, 2, 8 pre = nest.Create("parrot_neuron", n_pre) post = nest.Create("parrot_neuron", n_post) @@ -132,6 +132,26 @@ def test_bipartitet_raises(): nest.TripartiteConnect(pre, post, third, {"rule": "one_to_one"}) +@pytest.mark.skipif_missing_threads +def test_sliced_third(): + """Test that connection works on multiple threads when using complex node collection as third factor.""" + + nest.local_num_threads = 4 + nrn = nest.Create("parrot_neuron", 20) + third = (nrn[:3] + nrn[5:])[::3] + + nest.TripartiteConnect( + nrn, nrn, third, {"rule": "tripartite_bernoulli_with_pool", "pool_type": "random", "pool_size": 2} + ) + + t_in = nest.GetConnections(target=third) + t_out = nest.GetConnections(source=third) + + assert len(t_in) == len(t_out) + assert set(t_in.targets()) <= set(third.global_id) + assert set(t_out.sources()) <= set(third.global_id) + + def test_connect_complex_synspecs(): n_pre, n_post, n_third = 4, 2, 3 pre = nest.Create("parrot_neuron", n_pre) diff --git a/testsuite/summarize_tests.py b/testsuite/summarize_tests.py index 7eb26426da..6d849192cf 100644 --- a/testsuite/summarize_tests.py +++ b/testsuite/summarize_tests.py @@ -71,10 +71,15 @@ def parse_result_file(fname): for pfile in sorted(glob.glob(os.path.join(test_outdir, "*.xml"))): ph_name = os.path.splitext(os.path.split(pfile)[1])[0].replace("_", " ") - ph_res = parse_result_file(pfile) - results[ph_name] = ph_res - for k, v in ph_res.items(): - totals[k] += v + try: + ph_res = parse_result_file(pfile) + results[ph_name] = ph_res + for k, v in ph_res.items(): + totals[k] += v + except Exception as err: + msg = f"ERROR: {pfile} not parsable with error {err}" + results[ph_name] = {"Tests": 0, "Skipped": 0, "Failures": 0, "Errors": 0, "Time": 0, "Failed tests": [msg]} + totals["Failed tests"].append(msg) cols = ["Tests", "Skipped", "Failures", "Errors", "Time"] @@ -97,10 +102,13 @@ def parse_result_file(fname): print(tline) for pn, pr in results.items(): print(f"{pn:<{first_col_w}s}", end="") - for c in cols: - fmt = ".1f" if c == "Time" else "d" - print(f"{pr[c]:{col_w}{fmt}}", end="") - print() + if pr["Tests"] == 0 and pr["Failed tests"]: + print(f"{'--- XML PARSING FAILURE ---':^{len(cols) * col_w}}") + else: + for c in cols: + fmt = ".1f" if c == "Time" else "d" + print(f"{pr[c]:{col_w}{fmt}}", end="") + print() print(tline) print(f"{'Total':<{first_col_w}s}", end="") @@ -111,7 +119,8 @@ def parse_result_file(fname): print(tline) print() - if totals["Failures"] + totals["Errors"] > 0: + # Second condition handles xml parsing failures + if totals["Failures"] + totals["Errors"] > 0 or totals["Failed tests"]: print("THE NEST TESTSUITE DISCOVERED PROBLEMS") print(" The following tests failed") for t in totals["Failed tests"]: