diff --git a/build_support/check_all_c_c++_cu_files.sh b/build_support/check_all_c_c++_cu_files.sh
new file mode 100755
index 000000000..bc99fb34a
--- /dev/null
+++ b/build_support/check_all_c_c++_cu_files.sh
@@ -0,0 +1,125 @@
+#!/bin/bash
+
+#
+# This file is part of NEST GPU.
+#
+# Copyright (C) 2021 The NEST Initiative
+#
+# NEST GPU is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST GPU is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST GPU. If not, see .
+#
+
+# With this script you can easily check all C/C++/CU files contained in
+# the src directory of NEST GPU. Internally it uses clang-tidy to do
+# the actual check.
+#
+
+function make_temp_dir {
+ # Create a temporary directory and store its name in a variable.
+ TEMPD=$(mktemp -d)
+
+ # Exit if the temp directory wasn't created successfully.
+ if [ ! -e "$TEMPD" ]; then
+ >&2 echo "Error: failed to create temp directory"
+ exit 1
+ fi
+
+
+ # Make sure the temp directory gets removed on script exit.
+ trap "exit 1" HUP INT PIPE QUIT TERM
+ trap 'rm -rf "$TEMPD"' EXIT
+}
+
+if [ "$#" -ne 1 ]; then
+ echo "Usage: $0 "
+ exit 1
+fi
+
+CMD_DIR=$(dirname $(echo $0))
+CLANG_TIDY=${CMD_DIR}/clang-tidy-cuda.sh
+
+if [ ! -f $CLANG_TIDY ]; then
+ echo "Error: $CLANG_TIDY file not found in $CMD_DIR folder"
+ exit 1
+fi
+
+SRC_DIR=$1
+if [ -d "$SRC_DIR" ]; then
+ if [ -L "$SRC_DIR" ]; then
+ # It is a symlink
+ echo "Error: cannot pass a symboloc link as source path"
+ exit 1
+ fi
+else
+ echo "Error: source path $SRC_DIR not found"
+ exit 1
+fi
+
+make_temp_dir
+CONF_DIR=${TEMPD}/config
+mkdir $CONF_DIR
+if [ ! -e "$CONF_DIR" ]; then
+ >&2 echo "Error: failed to create $CONF_DIR directory"
+ exit 1
+fi
+CONF_H=${CONF_DIR}/config.h
+:>$CONF_H
+if [ ! -f $CONF_H ]; then
+ echo "Error: cannot create temporary file $CONF_H"
+ exit 1
+fi
+
+
+cp $CLANG_TIDY $TEMPD
+CLANG_TIDY=$(basename $CLANG_TIDY)
+if [ ! -f $TEMPD/$CLANG_TIDY ]; then
+ echo "Error: cannot create temporary executable $CLANG_TIDY in folder $TEMPD"
+ exit 1
+fi
+
+pushd .
+cd $SRC_DIR
+
+for fn in $(ls *.cu *.cpp *.cc *.c *.cuh *.hpp *.h); do
+ cat $fn | sed 's:////:#if 0:;s:////:#endif:' > $TEMPD/$fn
+ if [ ! -f $TEMPD/$fn ]; then
+ echo "Error: cannot create file $TEMPD/$fn"
+ popd
+ exit 1
+ fi
+done
+
+
+cd $TEMPD
+
+PASSED_NUM=0
+for fn in $(ls *.cu *.cpp *.cc *.c | grep -v user_m); do
+ echo " - Check with $CLANG_TIDY C/C++/CUDA file: $fn"
+ #$TEMPD/$CLANG_TIDY --include-path=../../build_cmake/libnestutil/ $fn
+ echo "$TEMPD/$CLANG_TIDY --include-path=$CONF_DIR $fn"
+ $TEMPD/$CLANG_TIDY --include-path=$CONF_DIR $fn
+ if [ $? -eq 0 ]; then
+ echo PASSED
+ PASSED_NUM=$(($PASSED_NUM + 1))
+ else
+ popd
+ exit 1
+ fi
+
+done
+
+popd
+echo "Checked $PASSED_NUM files with clang-tidy-cuda.sh"
+echo "All tests PASSED"
+
+exit 0
diff --git a/build_support/clang-format-cuda.sh b/build_support/clang-format-cuda.sh
new file mode 100755
index 000000000..e31fef014
--- /dev/null
+++ b/build_support/clang-format-cuda.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+if [ "$#" -ne 1 ]; then
+ echo "Usage: $0 input-file"
+ return
+fi
+
+if [ ! -f .clang-format ]; then
+ echo "Error: .clang-format file not found in current directory"
+ return
+fi
+
+if [ ! -f $1 ]; then
+ echo "Error: input file $1 not found"
+ return
+fi
+
+if grep -q '$$<' $1; then
+ echo 'Error: illegal character sequence in input file: "$$<"'
+ return
+fi
+if grep -q '$ >' $1; then
+ echo 'Error: illegal character sequence in input file: "$ >"'
+ return
+fi
+if grep -q '$>' $1; then
+ echo 'Error: illegal character sequence in input file: "$>"'
+ return
+fi
+
+cat $1 | sed 's/<<$$>>/$ >/g;' > tmp1~
+clang-format -style=file:.clang-format tmp1~ > tmp2~
+cat tmp2~ | sed 's/$$<</>>>/g;s/$>/>>>/g;' > $1
+rm -f tmp1~
+rm -f tmp2~
diff --git a/build_support/clang-tidy-cuda.sh b/build_support/clang-tidy-cuda.sh
new file mode 100755
index 000000000..d16af0626
--- /dev/null
+++ b/build_support/clang-tidy-cuda.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+
+#
+# This file is part of NEST GPU.
+#
+# Copyright (C) 2021 The NEST Initiative
+#
+# NEST GPU is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST GPU is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST GPU. If not, see .
+#
+
+cuda_default_path="/usr/local/cuda/include"
+
+if [ "$#" -eq 0 ]; then
+ echo "Usage: $0 [--include-path=INCLUDE_PATHS] [--cuda-path=CUDA_PATHS] [--mpi-path=MPI_PATHS] input-file"
+ echo "where INCLUDE_PATHS are optional header paths separated by colons,"
+ echo "CUDA_PATHS are the paths of CUDA headers separated by colons"
+ echo "(default: $cuda_default_path)"
+ echo "and MPI_PATHS are the paths of MPI headers separated by colons"
+ exit 0
+fi
+
+cuda_path=""
+mpi_path=""
+include_path=""
+
+for i in "$@"; do
+ case $i in
+ --include-path=*)
+ include_path="${i#*=}"
+ shift # past argument=value
+ ;;
+ --cuda-path=*)
+ cuda_path="${i#*=}"
+ shift # past argument=value
+ ;;
+ --mpi-path=*)
+ mpi_path="${i#*=}"
+ shift # past argument=value
+ ;;
+ -*|--*)
+ echo "Error: unknown option $i"
+ exit 1
+ ;;
+ *)
+ ;;
+ esac
+done
+
+if [[ -n $1 ]]; then
+ echo "Input file: $1"
+else
+ echo "Error: input file not specified."
+ exit 1
+fi
+
+if [ ! -f $1 ]; then
+ echo "Error: input file $1 not found."
+ exit 1
+fi
+
+if [ "$include_path" != "" ]; then
+ include_path=$(echo ":$include_path" | sed 's/::*/:/g;s/:$//;s/:/ -I /g')
+fi
+
+# Searches the paths of CUDA headers
+if [ "$cuda_path" == "" ]; then
+ cuda_path=":/usr/local/cuda/include"
+else
+ cuda_path=$(echo ":$cuda_path" | sed 's/::*/:/g;s/:$//')
+fi
+
+cuda_path_spaced=$(echo $cuda_path | tr ':' ' ')
+cuda_err=1
+for dn in $cuda_path_spaced; do
+ if test -f "$dn/cuda.h" ; then
+ echo "cuda.h found in path $dn"
+ cuda_err=0
+ break
+ fi
+done
+
+if [ $cuda_err -eq 1 ]; then
+ echo "cuda.h not found in path(s) $cuda_path_spaced"
+ echo "You can specify path for CUDA headers with the option --cuda-path=CUDA_PATHS"
+ echo "where CUDA_PATHS are the paths of CUDA headers separated by colons"
+ echo "(default: $cuda_default_path)"
+ exit 1
+fi
+
+cuda_include=$(echo $cuda_path | sed 's/:/ -isystem /g')
+
+#cat $1 | sed 's:////:#if 0:;s:////:#endif:' > tmp~
+
+#cat ../build_cmake/compile_commands.json | sed "s:-Xcompiler=-fPIC::;s:-forward-unknown-to-host-compiler::;s:--compiler-options='.*'::;s:--generate-code=arch=compute_80,code=\[compute_80,sm_80\]::;s:--maxrregcount=55::" > compile_commands.json
+
+# Searches the paths of MPI headers
+if [ "$mpi_path" == "" ]; then
+ mpi_include=$( \
+ for l in $(mpicc -showme); do \
+ echo $l; \
+ done | grep '^-I')
+ if [ "$mpi_include" == "" ]; then
+ echo "Error: cannot find MPI include paths"
+ echo "You can specify path for MPI headers with the option --mpi-path=MPI_PATHS"
+ echo "where MPI_PATHS are the paths of MPI headers separated by colons"
+ exit 1
+ fi
+ mpi_include=$(echo $mpi_include | sed 's/-I/ -isystem /g')
+ mpi_path_spaced=$(echo $mpi_include | sed 's/-I/ /g')
+else
+ mpi_path=$(echo ":$mpi_path" | sed 's/::*/:/g;s/:$//')
+ mpi_path_spaced=$(echo $mpi_path | tr ':' ' ')
+ mpi_include=$(echo $mpi_path | sed 's/:/ -isystem /g')
+fi
+
+mpi_err=1
+for dn in $mpi_path_spaced; do
+ if test -f "$dn/mpi.h" ; then
+ echo "mpi.h found in path $dn"
+ mpi_err=0
+ break
+ fi
+done
+
+if [ $mpi_err -eq 1 ]; then
+ echo "mpi.h not found in path(s) $mpi_path_spaced"
+ echo "You can specify path for MPI headers with the option --mpi-path=MPI_PATHS"
+ echo "where MPI_PATHS are the paths of MPI headers separated by colons"
+ exit 1
+fi
+
+echo "clang-tidy $1 -p . -- $include_path $mpi_include $cuda_include --no-cuda-version-check"
+
+clang-tidy $1 -p . -- $include_path $mpi_include $cuda_include --no-cuda-version-check
diff --git a/build_support/format_all_c_c++_cu_files.sh b/build_support/format_all_c_c++_cu_files.sh
new file mode 100755
index 000000000..b0ffed1af
--- /dev/null
+++ b/build_support/format_all_c_c++_cu_files.sh
@@ -0,0 +1,163 @@
+#!/bin/bash
+
+#
+# This file is part of NEST GPU.
+#
+# Copyright (C) 2021 The NEST Initiative
+#
+# NEST GPU is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST GPU is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST GPU. If not, see .
+#
+
+# With this script you can easily format all C/C++/CU files contained in
+# the src directory of NEST GPU. Internally it uses clang-format to do
+# the actual formatting.
+#
+# NEST GPU C/C++/CUDA code should be formatted according to clang-format
+# version 17.0.4. If you would like to see how the code will be formatted
+# with a different clang-format version, execute e.g.
+# `CLANG_FORMAT=clang-format-14 ./format_all_c_c++_cu_files.sh`.
+#
+# By default the script starts at the current working directory ($PWD), but
+# supply a different starting directory as the first argument to the command.
+
+CLANG_FORMAT=${CLANG_FORMAT:-clang-format}
+CLANG_FORMAT_FILE=${CLANG_FORMAT_FILE:-${PWD}/.clang-format}
+
+# Drop files that should not be checked
+FILES_TO_IGNORE="" # not used now, bult could be used in the future
+DIRS_TO_IGNORE="thirdparty" # not used now, bult could be used in the future
+
+CHANGE_COUNT=0
+
+function clang_format_cuda {
+ if [ ! -f $1 ]; then
+ echo "Error: input file $1 not found"
+ return
+ fi
+
+ if grep -q '$$<' $1; then
+ echo 'Error: illegal character sequence in input file: "$$<"'
+ return
+ fi
+ if grep -q '$ >' $1; then
+ echo 'Error: illegal character sequence in input file: "$ >"'
+ return
+ fi
+ if grep -q '$>' $1; then
+ echo 'Error: illegal character sequence in input file: "$>"'
+ return
+ fi
+
+ cat $1 | sed 's/<<$$>>/$ >/g;' > $TEMPD/tmp1~
+ #echo "CLANG_FORMAT_FILE: $CLANG_FORMAT_FILE"
+ clang-format -style=file:$CLANG_FORMAT_FILE $TEMPD/tmp1~ > $TEMPD/tmp2~
+ cat $TEMPD/tmp2~ | sed 's/$$<</>>>/g;s/$>/>>>/g;' > $TEMPD/tmp1~
+ if ! cmp -s $TEMPD/tmp1~ $1; then # file changed by clang-format
+ /bin/cp -f $TEMPD/tmp1~ $1
+ CHANGE_COUNT=$((CHANGE_COUNT+1))
+ echo " FILE CHANGED BY FORMATTING"
+ fi
+}
+
+# Recursively process all C/C++/CUDA files in all sub-directories.
+function process_dir {
+ dir=$1
+ echo "Process directory: $dir"
+
+ if [[ " $DIRS_TO_IGNORE " =~ .*[[:space:]]${dir##*/}[[:space:]].* ]]; then
+ echo " Directory explicitly ignored."
+ return
+ fi
+
+ for f in $dir/*; do
+ if [[ -d $f ]]; then
+ # Recursively process sub-directories.
+ process_dir $f
+ else
+ ignore_file=0
+
+ for FILE_TO_IGNORE in $FILES_TO_IGNORE; do
+ if [[ $f == *$FILE_TO_IGNORE* ]]; then
+ ignore_file=1
+ break
+ fi
+ done
+
+ if [ $ignore_file == 1 ] ; then
+ continue
+ fi
+
+ case $f in
+ *.cpp | *.cc | *.c | *.h | *.hpp | *.cu | *.cuh )
+ # Format C/C++/CUDA files.
+ echo " - Format C/C++/CUDA file: $f"
+ # $CLANG_FORMAT -i $f
+ clang_format_cuda $f
+ ;;
+ * )
+ # Ignore all other files.
+ esac
+ fi
+ done
+}
+
+function help_output {
+ echo "The $CLANG_FORMAT_FILE requires clang-format version 13 or later."
+ echo "Use like: [CLANG_FORMAT=] ./build_support/`basename $0` [start folder, defaults to '$PWD']"
+ exit 0
+}
+
+function make_temp_dir {
+ # Create a temporary directory and store its name in a variable.
+ TEMPD=$(mktemp -d)
+
+ # Exit if the temp directory wasn't created successfully.
+ if [ ! -e "$TEMPD" ]; then
+ >&2 echo "Failed to create temp directory"
+ exit 1
+ fi
+
+
+ # Make sure the temp directory gets removed on script exit.
+ trap "exit 1" HUP INT PIPE QUIT TERM
+ trap 'rm -rf "$TEMPD"' EXIT
+}
+
+make_temp_dir
+
+if [[ ! -f $CLANG_FORMAT_FILE ]]; then
+ echo "Cannot find $CLANG_FORMAT_FILE file. Please start '`basename $0`' from the NEST GPU base source directory."
+ help_output
+fi
+
+if [[ $# -eq 0 ]]; then
+ # Start with current directory.
+ startdir=$PWD
+elif [[ $# -eq 1 ]]; then
+ if [[ -d $1 ]]; then
+ # Start with given directory.
+ startdir=$1
+ else
+ # Not a directory.
+ help_output
+ fi
+else
+ # Two or more arguments...
+ help_output
+fi
+
+# Start formatting the $startdir and all subdirectories
+process_dir $startdir
+
+echo "$CHANGE_COUNT files have been changed by formatting"
diff --git a/build_support/log.txt b/build_support/log.txt
new file mode 100644
index 000000000..c8ec400c0
--- /dev/null
+++ b/build_support/log.txt
@@ -0,0 +1,522 @@
+~/nest-gpu-git/golosio/nest-gpu/build_support ~/nest-gpu-git/golosio/nest-gpu/build_support
+ls: cannot access '*.cc': No such file or directory
+ls: cannot access '*.c': No such file or directory
+ls: cannot access '*.cuh': No such file or directory
+ls: cannot access '*.hpp': No such file or directory
+ls: cannot access '*.cc': No such file or directory
+ls: cannot access '*.c': No such file or directory
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: aeif_cond_alpha.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config aeif_cond_alpha.cu
+Input file: aeif_cond_alpha.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy aeif_cond_alpha.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: aeif_cond_alpha_multisynapse.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config aeif_cond_alpha_multisynapse.cu
+Input file: aeif_cond_alpha_multisynapse.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy aeif_cond_alpha_multisynapse.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: aeif_cond_beta.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config aeif_cond_beta.cu
+Input file: aeif_cond_beta.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy aeif_cond_beta.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: aeif_cond_beta_multisynapse.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config aeif_cond_beta_multisynapse.cu
+Input file: aeif_cond_beta_multisynapse.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy aeif_cond_beta_multisynapse.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: aeif_psc_alpha.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config aeif_psc_alpha.cu
+Input file: aeif_psc_alpha.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy aeif_psc_alpha.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: aeif_psc_alpha_multisynapse.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config aeif_psc_alpha_multisynapse.cu
+Input file: aeif_psc_alpha_multisynapse.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy aeif_psc_alpha_multisynapse.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: aeif_psc_delta.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config aeif_psc_delta.cu
+Input file: aeif_psc_delta.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy aeif_psc_delta.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: aeif_psc_exp.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config aeif_psc_exp.cu
+Input file: aeif_psc_exp.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy aeif_psc_exp.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: aeif_psc_exp_multisynapse.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config aeif_psc_exp_multisynapse.cu
+Input file: aeif_psc_exp_multisynapse.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy aeif_psc_exp_multisynapse.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: base_neuron.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config base_neuron.cu
+Input file: base_neuron.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy base_neuron.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: connect.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config connect.cu
+Input file: connect.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy connect.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: connect_mpi.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config connect_mpi.cu
+Input file: connect_mpi.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy connect_mpi.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: connect_rules.cpp
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config connect_rules.cpp
+Input file: connect_rules.cpp
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy connect_rules.cpp -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+2 warnings generated.
+warning: argument unused during compilation: '--no-cuda-version-check' [clang-diagnostic-unused-command-line-argument]
+Suppressed 1 warnings (1 in non-user code).
+Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: dummyfile.cpp
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config dummyfile.cpp
+Input file: dummyfile.cpp
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy dummyfile.cpp -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated.
+warning: argument unused during compilation: '--no-cuda-version-check' [clang-diagnostic-unused-command-line-argument]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: ext_neuron.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config ext_neuron.cu
+Input file: ext_neuron.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy ext_neuron.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: getRealTime.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config getRealTime.cu
+Input file: getRealTime.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy getRealTime.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: get_spike.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config get_spike.cu
+Input file: get_spike.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy get_spike.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+2 warnings generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Suppressed 1 warnings (1 in non-user code).
+Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: iaf_psc_alpha.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config iaf_psc_alpha.cu
+Input file: iaf_psc_alpha.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy iaf_psc_alpha.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: iaf_psc_exp.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config iaf_psc_exp.cu
+Input file: iaf_psc_exp.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy iaf_psc_exp.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: iaf_psc_exp_g.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config iaf_psc_exp_g.cu
+Input file: iaf_psc_exp_g.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy iaf_psc_exp_g.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: iaf_psc_exp_hc.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config iaf_psc_exp_hc.cu
+Input file: iaf_psc_exp_hc.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy iaf_psc_exp_hc.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: izhikevich_cond_beta.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config izhikevich_cond_beta.cu
+Input file: izhikevich_cond_beta.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy izhikevich_cond_beta.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: izhikevich.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config izhikevich.cu
+Input file: izhikevich.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy izhikevich.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: izhikevich_psc_exp_2s.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config izhikevich_psc_exp_2s.cu
+Input file: izhikevich_psc_exp_2s.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy izhikevich_psc_exp_2s.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: izhikevich_psc_exp_5s.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config izhikevich_psc_exp_5s.cu
+Input file: izhikevich_psc_exp_5s.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy izhikevich_psc_exp_5s.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: izhikevich_psc_exp.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config izhikevich_psc_exp.cu
+Input file: izhikevich_psc_exp.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy izhikevich_psc_exp.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: locate.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config locate.cu
+Input file: locate.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy locate.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: multimeter.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config multimeter.cu
+Input file: multimeter.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy multimeter.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: nestgpu_C.cpp
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config nestgpu_C.cpp
+Input file: nestgpu_C.cpp
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy nestgpu_C.cpp -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+2 warnings generated.
+warning: argument unused during compilation: '--no-cuda-version-check' [clang-diagnostic-unused-command-line-argument]
+Suppressed 1 warnings (1 in non-user code).
+Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: nestgpu.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config nestgpu.cu
+Input file: nestgpu.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy nestgpu.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+2 warnings generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Suppressed 1 warnings (1 in non-user code).
+Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: neuron_models.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config neuron_models.cu
+Input file: neuron_models.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy neuron_models.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+2 warnings generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Suppressed 1 warnings (1 in non-user code).
+Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: node_group.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config node_group.cu
+Input file: node_group.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy node_group.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+3 warnings generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Suppressed 2 warnings (2 in non-user code).
+Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: parrot_neuron.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config parrot_neuron.cu
+Input file: parrot_neuron.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy parrot_neuron.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+2 warnings generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Suppressed 1 warnings (1 in non-user code).
+Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: poiss_gen.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config poiss_gen.cu
+Input file: poiss_gen.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy poiss_gen.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+2 warnings generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Suppressed 1 warnings (1 in non-user code).
+Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: poisson.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config poisson.cu
+Input file: poisson.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy poisson.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: prefix_scan.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config prefix_scan.cu
+Input file: prefix_scan.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy prefix_scan.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: propagator_stability.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config propagator_stability.cu
+Input file: propagator_stability.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy propagator_stability.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: random.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config random.cu
+Input file: random.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy random.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: rev_spike.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config rev_spike.cu
+Input file: rev_spike.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy rev_spike.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: rk5.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config rk5.cu
+Input file: rk5.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy rk5.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: scan.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config scan.cu
+Input file: scan.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy scan.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: send_spike.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config send_spike.cu
+Input file: send_spike.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy send_spike.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: spike_buffer.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config spike_buffer.cu
+Input file: spike_buffer.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy spike_buffer.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: spike_detector.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config spike_detector.cu
+Input file: spike_detector.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy spike_detector.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+2 warnings generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Suppressed 1 warnings (1 in non-user code).
+Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: spike_generator.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config spike_generator.cu
+Input file: spike_generator.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy spike_generator.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+2 warnings generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Suppressed 1 warnings (1 in non-user code).
+Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: spike_mpi.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config spike_mpi.cu
+Input file: spike_mpi.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy spike_mpi.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: stdp.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config stdp.cu
+Input file: stdp.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy stdp.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: syn_model.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config syn_model.cu
+Input file: syn_model.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy syn_model.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+2 warnings generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Suppressed 1 warnings (1 in non-user code).
+Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+ - Check with clang-tidy-cuda.sh C/C++/CUDA file: test_syn_model.cu
+/tmp/tmp.gXRoMuKv8t/clang-tidy-cuda.sh --include-path=/tmp/tmp.gXRoMuKv8t/config test_syn_model.cu
+Input file: test_syn_model.cu
+cuda.h found in path /usr/local/cuda/include
+mpi.h found in path /usr/lib/x86_64-linux-gnu/openmpi/include
+clang-tidy test_syn_model.cu -p . -- -I /tmp/tmp.gXRoMuKv8t/config -isystem /usr/lib/x86_64-linux-gnu/openmpi/include -isystem /usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -isystem /usr/local/cuda/include --no-cuda-version-check
+1 warning generated when compiling for host.
+warning: CUDA version is newer than the latest partially supported version 12.1 [clang-diagnostic-unknown-cuda-version]
+Resource filename: /home/golosio/.local/lib/python3.10/site-packages/clang_tidy/data/bin/clang-tidy
+PASSED
+~/nest-gpu-git/golosio/nest-gpu/build_support
+Checked 49 files with clang-tidy-cuda.sh
+All tests PASSED
diff --git a/c++/examples/brunel_mpi.cpp b/c++/examples/brunel_mpi.cpp
index fe9335e9f..be6f0d196 100644
--- a/c++/examples/brunel_mpi.cpp
+++ b/c++/examples/brunel_mpi.cpp
@@ -21,49 +21,48 @@
*/
-
-
-
-#include
+#include "nestgpu.h"
+#include
#include
+#include
#include
-#include
-#include "nestgpu.h"
using namespace std;
-int main(int argc, char *argv[])
+int
+main( int argc, char* argv[] )
{
NESTGPU ngpu;
- ngpu.ConnectMpiInit(argc, argv);
+ ngpu.ConnectMpiInit( argc, argv );
int mpi_np = ngpu.MpiNp();
- if (argc != 2 || mpi_np != 2) {
- cout << "Usage: mpirun -np 2 " << argv[0] << " n_neurons\n";
+ if ( argc != 2 || mpi_np != 2 )
+ {
+ cout << "Usage: mpirun -np 2 " << argv[ 0 ] << " n_neurons\n";
ngpu.MpiFinalize();
return 0;
}
int arg1;
- sscanf(argv[1], "%d", &arg1);
+ sscanf( argv[ 1 ], "%d", &arg1 );
int mpi_id = ngpu.MpiId();
- cout << "Building on host " << mpi_id << " ..." <
+#include "nestgpu.h"
+#include
#include
+#include
#include
-#include
-#include "nestgpu.h"
using namespace std;
-int main(int argc, char *argv[])
+int
+main( int argc, char* argv[] )
{
- if (argc != 2) {
- cout << "Usage: " << argv[0] << " n_neurons\n";
+ if ( argc != 2 )
+ {
+ cout << "Usage: " << argv[ 0 ] << " n_neurons\n";
return 0;
}
int arg1;
- sscanf(argv[1], "%d", &arg1);
+ sscanf( argv[ 1 ], "%d", &arg1 );
NESTGPU ngpu;
cout << "Building ...\n";
- ngpu.SetRandomSeed(1234ULL); // seed for GPU random numbers
-
+ ngpu.SetRandomSeed( 1234ULL ); // seed for GPU random numbers
+
int n_receptors = 2;
- int order = arg1/5;
+ int order = arg1 / 5;
int NE = 4 * order; // number of excitatory neurons
int NI = 1 * order; // number of inhibitory neurons
int n_neurons = NE + NI; // number of neurons in total
- int CE = 800; // number of excitatory synapses per neuron
- int CI = CE/4; // number of inhibitory synapses per neuron
+ int CE = 800; // number of excitatory synapses per neuron
+ int CI = CE / 4; // number of inhibitory synapses per neuron
float Wex = 0.05;
float Win = 0.35;
@@ -64,68 +63,66 @@ int main(int argc, char *argv[])
float poiss_delay = 0.2; // poisson signal delay in ms
// create poisson generator
- NodeSeq pg = ngpu.Create("poisson_generator");
- ngpu.SetNeuronParam(pg, "rate", poiss_rate);
+ NodeSeq pg = ngpu.Create( "poisson_generator" );
+ ngpu.SetNeuronParam( pg, "rate", poiss_rate );
// create n_neurons neurons with n_receptor receptor ports
- NodeSeq neuron = ngpu.Create("aeif_cond_beta", n_neurons,
- n_receptors);
- NodeSeq exc_neuron = neuron.Subseq(0,NE-1); // excitatory neuron group
- NodeSeq inh_neuron = neuron.Subseq(NE, n_neurons-1); //inhibitory neuron group
+ NodeSeq neuron = ngpu.Create( "aeif_cond_beta", n_neurons, n_receptors );
+ NodeSeq exc_neuron = neuron.Subseq( 0, NE - 1 ); // excitatory neuron group
+ NodeSeq inh_neuron = neuron.Subseq( NE, n_neurons - 1 ); // inhibitory neuron group
// neuron parameters
- float E_rev[] = {0.0, -85.0};
- float tau_decay[] = {1.0, 1.0};
- float tau_rise[] = {1.0, 1.0};
- ngpu.SetNeuronParam(neuron, "E_rev", E_rev, 2);
- ngpu.SetNeuronParam(neuron, "tau_decay", tau_decay, 2);
- ngpu.SetNeuronParam(neuron, "tau_rise", tau_rise, 2);
-
+ float E_rev[] = { 0.0, -85.0 };
+ float tau_decay[] = { 1.0, 1.0 };
+ float tau_rise[] = { 1.0, 1.0 };
+ ngpu.SetNeuronParam( neuron, "E_rev", E_rev, 2 );
+ ngpu.SetNeuronParam( neuron, "tau_decay", tau_decay, 2 );
+ ngpu.SetNeuronParam( neuron, "tau_rise", tau_rise, 2 );
+
float mean_delay = 0.5;
float std_delay = 0.25;
float min_delay = 0.1;
// Excitatory connections
// connect excitatory neurons to port 0 of all neurons
// normally distributed delays, weight Wex and CE connections per neuron
- float *exc_delays = ngpu.RandomNormalClipped(CE*n_neurons, mean_delay,
- std_delay, min_delay,
- mean_delay+3*std_delay);
-
- ConnSpec conn_spec1(FIXED_INDEGREE, CE);
+ float* exc_delays =
+ ngpu.RandomNormalClipped( CE * n_neurons, mean_delay, std_delay, min_delay, mean_delay + 3 * std_delay );
+
+ ConnSpec conn_spec1( FIXED_INDEGREE, CE );
SynSpec syn_spec1;
- syn_spec1.SetParam("receptor", 0);
- syn_spec1.SetParam("weight", Wex);
- syn_spec1.SetParam("delay_array", exc_delays);
- ngpu.Connect(exc_neuron, neuron, conn_spec1, syn_spec1);
+ syn_spec1.SetParam( "receptor", 0 );
+ syn_spec1.SetParam( "weight", Wex );
+ syn_spec1.SetParam( "delay_array", exc_delays );
+ ngpu.Connect( exc_neuron, neuron, conn_spec1, syn_spec1 );
delete[] exc_delays;
// Inhibitory connections
// connect inhibitory neurons to port 1 of all neurons
// normally distributed delays, weight Win and CI connections per neuron
- float *inh_delays = ngpu.RandomNormalClipped(CI*n_neurons, mean_delay,
- std_delay, min_delay,
- mean_delay+3*std_delay);
+ float* inh_delays =
+ ngpu.RandomNormalClipped( CI * n_neurons, mean_delay, std_delay, min_delay, mean_delay + 3 * std_delay );
- ConnSpec conn_spec2(FIXED_INDEGREE, CI);
+ ConnSpec conn_spec2( FIXED_INDEGREE, CI );
SynSpec syn_spec2;
- syn_spec2.SetParam("receptor", 1);
- syn_spec2.SetParam("weight", Win);
- syn_spec2.SetParam("delay_array", inh_delays);
- ngpu.Connect(inh_neuron, neuron, conn_spec2, syn_spec2);
+ syn_spec2.SetParam( "receptor", 1 );
+ syn_spec2.SetParam( "weight", Win );
+ syn_spec2.SetParam( "delay_array", inh_delays );
+ ngpu.Connect( inh_neuron, neuron, conn_spec2, syn_spec2 );
delete[] inh_delays;
- ConnSpec conn_spec3(ALL_TO_ALL);
- SynSpec syn_spec3(STANDARD_SYNAPSE, poiss_weight, poiss_delay, 0);
+ ConnSpec conn_spec3( ALL_TO_ALL );
+ SynSpec syn_spec3( STANDARD_SYNAPSE, poiss_weight, poiss_delay, 0 );
// connect poisson generator to port 0 of all neurons
- ngpu.Connect(pg, neuron, conn_spec3, syn_spec3);
+ ngpu.Connect( pg, neuron, conn_spec3, syn_spec3 );
char filename[] = "test_brunel_net.dat";
- int i_neuron_arr[] = {neuron[0], neuron[rand()%n_neurons],
- neuron[n_neurons-1]}; // any set of neuron indexes
+ int i_neuron_arr[] = {
+ neuron[ 0 ], neuron[ rand() % n_neurons ], neuron[ n_neurons - 1 ]
+ }; // any set of neuron indexes
// create multimeter record of V_m
- std::string var_name_arr[] = {"V_m", "V_m", "V_m"};
- ngpu.CreateRecord(string(filename), var_name_arr, i_neuron_arr, 3);
+ std::string var_name_arr[] = { "V_m", "V_m", "V_m" };
+ ngpu.CreateRecord( string( filename ), var_name_arr, i_neuron_arr, 3 );
ngpu.Simulate();
diff --git a/c++/examples/brunel_outdegree.cpp b/c++/examples/brunel_outdegree.cpp
index 3c2daf5a6..1a50f019a 100644
--- a/c++/examples/brunel_outdegree.cpp
+++ b/c++/examples/brunel_outdegree.cpp
@@ -21,39 +21,38 @@
*/
-
-
-
-#include
+#include "nestgpu.h"
+#include
#include
+#include
#include
-#include
-#include "nestgpu.h"
using namespace std;
-int main(int argc, char *argv[])
+int
+main( int argc, char* argv[] )
{
- if (argc != 2) {
- cout << "Usage: " << argv[0] << " n_neurons\n";
+ if ( argc != 2 )
+ {
+ cout << "Usage: " << argv[ 0 ] << " n_neurons\n";
return 0;
}
int arg1;
- sscanf(argv[1], "%d", &arg1);
+ sscanf( argv[ 1 ], "%d", &arg1 );
NESTGPU ngpu;
cout << "Building ...\n";
- ngpu.SetRandomSeed(12345ULL); // seed for GPU random numbers
-
+ ngpu.SetRandomSeed( 12345ULL ); // seed for GPU random numbers
+
int n_receptors = 2;
- int order = arg1/5;
+ int order = arg1 / 5;
int NE = 4 * order; // number of excitatory neurons
int NI = 1 * order; // number of inhibitory neurons
int n_neurons = NE + NI; // number of neurons in total
int CPN = 1000; // number of output connections per neuron
-
+
float Wex = 0.05;
float Win = 0.35;
@@ -63,73 +62,74 @@ int main(int argc, char *argv[])
float poiss_delay = 0.2; // poisson signal delay in ms
// create poisson generator
- NodeSeq pg = ngpu.Create("poisson_generator");
- ngpu.SetNeuronParam(pg, "rate", poiss_rate);
+ NodeSeq pg = ngpu.Create( "poisson_generator" );
+ ngpu.SetNeuronParam( pg, "rate", poiss_rate );
// create n_neurons neurons with n_receptor receptor ports
- NodeSeq neuron = ngpu.Create("aeif_cond_beta", n_neurons,
- n_receptors);
- NodeSeq exc_neuron = neuron.Subseq(0,NE-1); // excitatory neuron group
- NodeSeq inh_neuron = neuron.Subseq(NE, n_neurons-1); //inhibitory neuron group
+ NodeSeq neuron = ngpu.Create( "aeif_cond_beta", n_neurons, n_receptors );
+ NodeSeq exc_neuron = neuron.Subseq( 0, NE - 1 ); // excitatory neuron group
+ NodeSeq inh_neuron = neuron.Subseq( NE, n_neurons - 1 ); // inhibitory neuron group
// neuron parameters
- float E_rev[] = {0.0, -85.0};
- float tau_decay[] = {1.0, 1.0};
- float tau_rise[] = {1.0, 1.0};
- ngpu.SetNeuronParam(neuron, "E_rev", E_rev, 2);
- ngpu.SetNeuronParam(neuron, "tau_decay", tau_decay, 2);
- ngpu.SetNeuronParam(neuron, "tau_rise", tau_rise, 2);
-
+ float E_rev[] = { 0.0, -85.0 };
+ float tau_decay[] = { 1.0, 1.0 };
+ float tau_rise[] = { 1.0, 1.0 };
+ ngpu.SetNeuronParam( neuron, "E_rev", E_rev, 2 );
+ ngpu.SetNeuronParam( neuron, "tau_decay", tau_decay, 2 );
+ ngpu.SetNeuronParam( neuron, "tau_rise", tau_rise, 2 );
+
float mean_delay = 0.5;
float std_delay = 0.25;
float min_delay = 0.1;
// Excitatory connections
// connect excitatory neurons to port 0 of all neurons
// normally distributed delays, weight Wex and CPN connections per neuron
- float *exc_delays = ngpu.RandomNormalClipped(CPN*NE, mean_delay,
- std_delay, min_delay,
- mean_delay+3*std_delay);
-
- ConnSpec conn_spec1(FIXED_OUTDEGREE, CPN);
+ float* exc_delays =
+ ngpu.RandomNormalClipped( CPN * NE, mean_delay, std_delay, min_delay, mean_delay + 3 * std_delay );
+
+ ConnSpec conn_spec1( FIXED_OUTDEGREE, CPN );
SynSpec syn_spec1;
- syn_spec1.SetParam("receptor", 0);
- syn_spec1.SetParam("weight", Wex);
- syn_spec1.SetParam("delay_array", exc_delays);
- ngpu.Connect(exc_neuron, neuron, conn_spec1, syn_spec1);
+ syn_spec1.SetParam( "receptor", 0 );
+ syn_spec1.SetParam( "weight", Wex );
+ syn_spec1.SetParam( "delay_array", exc_delays );
+ ngpu.Connect( exc_neuron, neuron, conn_spec1, syn_spec1 );
delete[] exc_delays;
// Inhibitory connections
// connect inhibitory neurons to port 1 of all neurons
// normally distributed delays, weight Win and CPN connections per neuron
- float *inh_delays = ngpu.RandomNormalClipped(CPN*NI, mean_delay,
- std_delay, min_delay,
- mean_delay+3*std_delay);
+ float* inh_delays =
+ ngpu.RandomNormalClipped( CPN * NI, mean_delay, std_delay, min_delay, mean_delay + 3 * std_delay );
- ConnSpec conn_spec2(FIXED_OUTDEGREE, CPN);
+ ConnSpec conn_spec2( FIXED_OUTDEGREE, CPN );
SynSpec syn_spec2;
- syn_spec2.SetParam("receptor", 1);
- syn_spec2.SetParam("weight", Win);
- syn_spec2.SetParam("delay_array", inh_delays);
- ngpu.Connect(inh_neuron, neuron, conn_spec2, syn_spec2);
+ syn_spec2.SetParam( "receptor", 1 );
+ syn_spec2.SetParam( "weight", Win );
+ syn_spec2.SetParam( "delay_array", inh_delays );
+ ngpu.Connect( inh_neuron, neuron, conn_spec2, syn_spec2 );
delete[] inh_delays;
- ConnSpec conn_spec3(ALL_TO_ALL);
- SynSpec syn_spec3(STANDARD_SYNAPSE, poiss_weight, poiss_delay, 0);
+ ConnSpec conn_spec3( ALL_TO_ALL );
+ SynSpec syn_spec3( STANDARD_SYNAPSE, poiss_weight, poiss_delay, 0 );
// connect poisson generator to port 0 of all neurons
- ngpu.Connect(pg, neuron, conn_spec3, syn_spec3);
+ ngpu.Connect( pg, neuron, conn_spec3, syn_spec3 );
char filename[] = "test_brunel_outdegree.dat";
// any set of neuron indexes
- int i_neuron_arr[] = {neuron[0], neuron[rand()%n_neurons],
- neuron[rand()%n_neurons], neuron[rand()%n_neurons],
- neuron[rand()%n_neurons], neuron[rand()%n_neurons],
- neuron[rand()%n_neurons], neuron[rand()%n_neurons],
- neuron[rand()%n_neurons], neuron[n_neurons-1]};
+ int i_neuron_arr[] = { neuron[ 0 ],
+ neuron[ rand() % n_neurons ],
+ neuron[ rand() % n_neurons ],
+ neuron[ rand() % n_neurons ],
+ neuron[ rand() % n_neurons ],
+ neuron[ rand() % n_neurons ],
+ neuron[ rand() % n_neurons ],
+ neuron[ rand() % n_neurons ],
+ neuron[ rand() % n_neurons ],
+ neuron[ n_neurons - 1 ] };
// create multimeter record of V_m
- std::string var_name_arr[] = {"V_m", "V_m", "V_m", "V_m", "V_m", "V_m",
- "V_m", "V_m", "V_m", "V_m"};
- ngpu.CreateRecord(string(filename), var_name_arr, i_neuron_arr, 10);
+ std::string var_name_arr[] = { "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m" };
+ ngpu.CreateRecord( string( filename ), var_name_arr, i_neuron_arr, 10 );
ngpu.Simulate();
diff --git a/c++/examples/brunel_outdegree_mpi.cpp b/c++/examples/brunel_outdegree_mpi.cpp
index 38a8130dd..2803449f8 100644
--- a/c++/examples/brunel_outdegree_mpi.cpp
+++ b/c++/examples/brunel_outdegree_mpi.cpp
@@ -21,54 +21,53 @@
*/
-
-
-
-#include
+#include "nestgpu.h"
+#include
#include
+#include
#include
-#include
-#include "nestgpu.h"
using namespace std;
-int main(int argc, char *argv[])
+int
+main( int argc, char* argv[] )
{
NESTGPU ngpu;
- ngpu.ConnectMpiInit(argc, argv);
+ ngpu.ConnectMpiInit( argc, argv );
int mpi_np = ngpu.MpiNp();
- if (argc != 2 || mpi_np != 2) {
- cout << "Usage: mpirun -np 2 " << argv[0] << " n_neurons\n";
+ if ( argc != 2 || mpi_np != 2 )
+ {
+ cout << "Usage: mpirun -np 2 " << argv[ 0 ] << " n_neurons\n";
ngpu.MpiFinalize();
return 0;
}
int arg1;
- sscanf(argv[1], "%d", &arg1);
-
+ sscanf( argv[ 1 ], "%d", &arg1 );
+
int mpi_id = ngpu.MpiId();
- cout << "Building on host " << mpi_id << " ..." <
+#include "nestgpu.h"
+#include
#include
+#include
#include
-#include
-#include "nestgpu.h"
using namespace std;
-int main(int argc, char *argv[])
+int
+main( int argc, char* argv[] )
{
- if (argc != 2) {
- cout << "Usage: " << argv[0] << " n_neurons\n";
+ if ( argc != 2 )
+ {
+ cout << "Usage: " << argv[ 0 ] << " n_neurons\n";
return 0;
}
int arg1;
- sscanf(argv[1], "%d", &arg1);
+ sscanf( argv[ 1 ], "%d", &arg1 );
NESTGPU ngpu;
cout << "Building ...\n";
- ngpu.SetRandomSeed(1234ULL); // seed for GPU random numbers
-
+ ngpu.SetRandomSeed( 1234ULL ); // seed for GPU random numbers
+
int n_receptors = 2;
- int order = arg1/5;
+ int order = arg1 / 5;
int NE = 4 * order; // number of excitatory neurons
int NI = 1 * order; // number of inhibitory neurons
int n_neurons = NE + NI; // number of neurons in total
- int CE = 800; // number of excitatory synapses per neuron
- int CI = CE/4; // number of inhibitory synapses per neuron
+ int CE = 800; // number of excitatory synapses per neuron
+ int CI = CE / 4; // number of inhibitory synapses per neuron
float Wex = 0.05;
float Win = 0.35;
@@ -64,72 +63,70 @@ int main(int argc, char *argv[])
float poiss_delay = 0.2; // poisson signal delay in ms
// create poisson generator
- NodeSeq pg = ngpu.Create("poisson_generator");
- ngpu.SetNeuronParam(pg, "rate", poiss_rate);
- std::vector pg_vect = pg.ToVector();
+ NodeSeq pg = ngpu.Create( "poisson_generator" );
+ ngpu.SetNeuronParam( pg, "rate", poiss_rate );
+ std::vector< int > pg_vect = pg.ToVector();
// create n_neurons neurons with n_receptor receptor ports
- NodeSeq neuron = ngpu.Create("aeif_cond_beta", n_neurons,
- n_receptors);
- std::vector neuron_vect = neuron.ToVector();
- NodeSeq exc_neuron = neuron.Subseq(0,NE-1); // excitatory neuron group
- std::vector exc_neuron_vect = exc_neuron.ToVector();
- NodeSeq inh_neuron = neuron.Subseq(NE, n_neurons-1); //inhibitory neuron group
- std::vector inh_neuron_vect = inh_neuron.ToVector();
+ NodeSeq neuron = ngpu.Create( "aeif_cond_beta", n_neurons, n_receptors );
+ std::vector< int > neuron_vect = neuron.ToVector();
+ NodeSeq exc_neuron = neuron.Subseq( 0, NE - 1 ); // excitatory neuron group
+ std::vector< int > exc_neuron_vect = exc_neuron.ToVector();
+ NodeSeq inh_neuron = neuron.Subseq( NE, n_neurons - 1 ); // inhibitory neuron group
+ std::vector< int > inh_neuron_vect = inh_neuron.ToVector();
// neuron parameters
- float E_rev[] = {0.0, -85.0};
- float tau_decay[] = {1.0, 1.0};
- float tau_rise[] = {1.0, 1.0};
- ngpu.SetNeuronParam(neuron_vect, "E_rev", E_rev, 2);
- ngpu.SetNeuronParam(neuron_vect, "tau_decay", tau_decay, 2);
- ngpu.SetNeuronParam(neuron_vect, "tau_rise", tau_rise, 2);
-
+ float E_rev[] = { 0.0, -85.0 };
+ float tau_decay[] = { 1.0, 1.0 };
+ float tau_rise[] = { 1.0, 1.0 };
+ ngpu.SetNeuronParam( neuron_vect, "E_rev", E_rev, 2 );
+ ngpu.SetNeuronParam( neuron_vect, "tau_decay", tau_decay, 2 );
+ ngpu.SetNeuronParam( neuron_vect, "tau_rise", tau_rise, 2 );
+
float mean_delay = 0.5;
float std_delay = 0.25;
float min_delay = 0.1;
// Excitatory connections
// connect excitatory neurons to port 0 of all neurons
// normally distributed delays, weight Wex and CE connections per neuron
- float *exc_delays = ngpu.RandomNormalClipped(CE*n_neurons, mean_delay,
- std_delay, min_delay,
- mean_delay+3*std_delay);
-
- ConnSpec conn_spec1(FIXED_INDEGREE, CE);
+ float* exc_delays =
+ ngpu.RandomNormalClipped( CE * n_neurons, mean_delay, std_delay, min_delay, mean_delay + 3 * std_delay );
+
+ ConnSpec conn_spec1( FIXED_INDEGREE, CE );
SynSpec syn_spec1;
- syn_spec1.SetParam("receptor", 0);
- syn_spec1.SetParam("weight", Wex);
- syn_spec1.SetParam("delay_array", exc_delays);
- ngpu.Connect(exc_neuron_vect, neuron, conn_spec1, syn_spec1);
+ syn_spec1.SetParam( "receptor", 0 );
+ syn_spec1.SetParam( "weight", Wex );
+ syn_spec1.SetParam( "delay_array", exc_delays );
+ ngpu.Connect( exc_neuron_vect, neuron, conn_spec1, syn_spec1 );
delete[] exc_delays;
// Inhibitory connections
// connect inhibitory neurons to port 1 of all neurons
// normally distributed delays, weight Win and CI connections per neuron
- float *inh_delays = ngpu.RandomNormalClipped(CI*n_neurons, mean_delay,
- std_delay, min_delay,
- mean_delay+3*std_delay);
+ float* inh_delays =
+ ngpu.RandomNormalClipped( CI * n_neurons, mean_delay, std_delay, min_delay, mean_delay + 3 * std_delay );
- ConnSpec conn_spec2(FIXED_INDEGREE, CI);
+ ConnSpec conn_spec2( FIXED_INDEGREE, CI );
SynSpec syn_spec2;
- syn_spec2.SetParam("receptor", 1);
- syn_spec2.SetParam("weight", Win);
- syn_spec2.SetParam("delay_array", inh_delays);
- ngpu.Connect(inh_neuron, neuron_vect, conn_spec2, syn_spec2);
+ syn_spec2.SetParam( "receptor", 1 );
+ syn_spec2.SetParam( "weight", Win );
+ syn_spec2.SetParam( "delay_array", inh_delays );
+ ngpu.Connect( inh_neuron, neuron_vect, conn_spec2, syn_spec2 );
delete[] inh_delays;
- ConnSpec conn_spec3(ALL_TO_ALL);
- SynSpec syn_spec3(STANDARD_SYNAPSE, poiss_weight, poiss_delay, 0);
+ ConnSpec conn_spec3( ALL_TO_ALL );
+ SynSpec syn_spec3( STANDARD_SYNAPSE, poiss_weight, poiss_delay, 0 );
// connect poisson generator to port 0 of all neurons
- ngpu.Connect(pg_vect, neuron_vect, conn_spec3, syn_spec3);
+ ngpu.Connect( pg_vect, neuron_vect, conn_spec3, syn_spec3 );
char filename[] = "test_brunel_vect.dat";
-
- int i_neuron_arr[] = {neuron[0], neuron[rand()%n_neurons],
- neuron[n_neurons-1]}; // any set of neuron indexes
+
+ int i_neuron_arr[] = {
+ neuron[ 0 ], neuron[ rand() % n_neurons ], neuron[ n_neurons - 1 ]
+ }; // any set of neuron indexes
// create multimeter record of V_m
- std::string var_name_arr[] = {"V_m", "V_m", "V_m"};
- ngpu.CreateRecord(string(filename), var_name_arr, i_neuron_arr, 3);
+ std::string var_name_arr[] = { "V_m", "V_m", "V_m" };
+ ngpu.CreateRecord( string( filename ), var_name_arr, i_neuron_arr, 3 );
ngpu.Simulate();
diff --git a/c++/examples/test_aeif_cond_beta.cpp b/c++/examples/test_aeif_cond_beta.cpp
index b5e2937e8..93e855c6a 100644
--- a/c++/examples/test_aeif_cond_beta.cpp
+++ b/c++/examples/test_aeif_cond_beta.cpp
@@ -21,64 +21,63 @@
*/
-
-
-
-#include
+#include "nestgpu.h"
+#include
#include
+#include
#include
-#include
-#include "nestgpu.h"
using namespace std;
-int main(int argc, char *argv[])
+int
+main( int argc, char* argv[] )
{
NESTGPU ngpu;
cout << "Building ...\n";
-
- srand(12345);
+
+ srand( 12345 );
int n_neurons = 10000;
-
+
// create n_neurons neurons with 3 receptor ports
- NodeSeq neuron = ngpu.Create("aeif_cond_beta", n_neurons, 3);
+ NodeSeq neuron = ngpu.Create( "aeif_cond_beta", n_neurons, 3 );
// neuron parameters
- float E_rev[] = {20.0, 0.0, -85.0};
- float tau_decay[] = {40.0, 20.0, 30.0};
- float tau_rise[] = {20.0, 10.0, 5.0};
- ngpu.SetNeuronParam(neuron, "E_rev", E_rev, 3);
- ngpu.SetNeuronParam(neuron, "tau_decay", tau_decay, 3);
- ngpu.SetNeuronParam(neuron, "tau_rise", tau_rise, 3);
- ngpu.SetNeuronParam(neuron, "a", 4.0);
- ngpu.SetNeuronParam(neuron, "b", 80.5);
- ngpu.SetNeuronParam(neuron, "E_L", -70.6);
- ngpu.SetNeuronParam(neuron, "g_L", 300.0);
+ float E_rev[] = { 20.0, 0.0, -85.0 };
+ float tau_decay[] = { 40.0, 20.0, 30.0 };
+ float tau_rise[] = { 20.0, 10.0, 5.0 };
+ ngpu.SetNeuronParam( neuron, "E_rev", E_rev, 3 );
+ ngpu.SetNeuronParam( neuron, "tau_decay", tau_decay, 3 );
+ ngpu.SetNeuronParam( neuron, "tau_rise", tau_rise, 3 );
+ ngpu.SetNeuronParam( neuron, "a", 4.0 );
+ ngpu.SetNeuronParam( neuron, "b", 80.5 );
+ ngpu.SetNeuronParam( neuron, "E_L", -70.6 );
+ ngpu.SetNeuronParam( neuron, "g_L", 300.0 );
- NodeSeq sg = ngpu.Create("spike_generator"); // create spike generator
+ NodeSeq sg = ngpu.Create( "spike_generator" ); // create spike generator
- float spike_times[] = {10.0, 400.0};
- float spike_heights[] = {1.0, 0.5};
+ float spike_times[] = { 10.0, 400.0 };
+ float spike_heights[] = { 1.0, 0.5 };
int n_spikes = 2;
// set spike times and height
- ngpu.SetNeuronParam(sg, "spike_times", spike_times, n_spikes);
- ngpu.SetNeuronParam(sg, "spike_heights", spike_heights, n_spikes);
-
- float delay[] = {1.0, 100.0, 130.0};
- float weight[] = {0.1, 0.2, 0.5};
+ ngpu.SetNeuronParam( sg, "spike_times", spike_times, n_spikes );
+ ngpu.SetNeuronParam( sg, "spike_heights", spike_heights, n_spikes );
+
+ float delay[] = { 1.0, 100.0, 130.0 };
+ float weight[] = { 0.1, 0.2, 0.5 };
- for (int i_port=0; i_port<3; i_port++) {
- ConnSpec conn_spec(ALL_TO_ALL);
- SynSpec syn_spec(STANDARD_SYNAPSE, weight[i_port], delay[i_port], i_port);
- ngpu.Connect(sg, neuron, conn_spec, syn_spec);
+ for ( int i_port = 0; i_port < 3; i_port++ )
+ {
+ ConnSpec conn_spec( ALL_TO_ALL );
+ SynSpec syn_spec( STANDARD_SYNAPSE, weight[ i_port ], delay[ i_port ], i_port );
+ ngpu.Connect( sg, neuron, conn_spec, syn_spec );
}
string filename = "test_aeif_cond_beta.dat";
- int i_neuron[] = {neuron[rand()%n_neurons]}; // any set of neuron indexes
- string var_name[] = {"V_m"};
+ int i_neuron[] = { neuron[ rand() % n_neurons ] }; // any set of neuron indexes
+ string var_name[] = { "V_m" };
// create multimeter record of V_m
- ngpu.CreateRecord(filename, var_name, i_neuron, 1);
+ ngpu.CreateRecord( filename, var_name, i_neuron, 1 );
- ngpu.Simulate(800.0);
+ ngpu.Simulate( 800.0 );
return 0;
}
diff --git a/c++/examples/test_connect.cpp b/c++/examples/test_connect.cpp
index d0f229758..1acca648e 100644
--- a/c++/examples/test_connect.cpp
+++ b/c++/examples/test_connect.cpp
@@ -21,70 +21,70 @@
*/
-
-
-
-#include
#include
+#include
#include
#include "nestgpu.h"
-int main(int argc, char *argv[])
+int
+main( int argc, char* argv[] )
{
const int N = 5;
-
+
NESTGPU ngpu;
- NodeSeq neuron = ngpu.Create("aeif_cond_beta", 2*N);
- std::vector neuron_even;
- std::vector neuron_odd;
- for (int i=0; i neuron_even;
+ std::vector< int > neuron_odd;
+ for ( int i = 0; i < N; i++ )
+ {
+ neuron_even.push_back( neuron[ 2 * i ] );
+ neuron_odd.push_back( neuron[ 2 * i + 1 ] );
}
- float even_to_odd_delay[N*N];
- float even_to_odd_weight[N*N];
- float odd_to_even_delay[N*N];
- float odd_to_even_weight[N*N];
- for (int is=0; is conn_id
- = ngpu.GetConnections(neuron_even, neuron);
- std::vector conn_stat_vect
- = ngpu.GetConnectionStatus(conn_id);
+ std::vector< ConnectionId > conn_id = ngpu.GetConnections( neuron_even, neuron );
+ std::vector< ConnectionStatus > conn_stat_vect = ngpu.GetConnectionStatus( conn_id );
std::cout << "########################################\n";
std::cout << "Even to all\n";
- for (unsigned int i=0; i
+#include "nestgpu.h"
+#include
#include
+#include
#include
-#include
-#include "nestgpu.h"
using namespace std;
-int main(int argc, char *argv[])
+int
+main( int argc, char* argv[] )
{
NESTGPU ngpu;
cout << "Building ...\n";
-
- srand(12345);
+
+ srand( 12345 );
int n_neurons = 10000;
-
+
// create n_neurons neurons with 1 receptor ports
- NodeSeq neuron = ngpu.Create("aeif_cond_beta", n_neurons, 1);
+ NodeSeq neuron = ngpu.Create( "aeif_cond_beta", n_neurons, 1 );
// neuron parameters
- ngpu.SetNeuronParam(neuron, "a", 4.0);
- ngpu.SetNeuronParam(neuron, "b", 80.5);
- ngpu.SetNeuronParam(neuron, "E_L", -70.6);
- ngpu.SetNeuronParam(neuron, "I_e", 800.0);
+ ngpu.SetNeuronParam( neuron, "a", 4.0 );
+ ngpu.SetNeuronParam( neuron, "b", 80.5 );
+ ngpu.SetNeuronParam( neuron, "E_L", -70.6 );
+ ngpu.SetNeuronParam( neuron, "I_e", 800.0 );
string filename = "test_constcurr.dat";
- int i_neurons[] = {neuron[rand()%n_neurons]}; // any set of neuron indexes
- string var_name[] = {"V_m"};
+ int i_neurons[] = { neuron[ rand() % n_neurons ] }; // any set of neuron indexes
+ string var_name[] = { "V_m" };
// create multimeter record of V_m
- ngpu.CreateRecord(filename, var_name, i_neurons, 1);
+ ngpu.CreateRecord( filename, var_name, i_neurons, 1 );
ngpu.Simulate();
diff --git a/c++/examples/test_error.cpp b/c++/examples/test_error.cpp
index 32e2e9d5d..a58005257 100644
--- a/c++/examples/test_error.cpp
+++ b/c++/examples/test_error.cpp
@@ -21,118 +21,117 @@
*/
-
-
-
-#include
-#include
-#include
-#include
#include "nestgpu.h"
#include "ngpu_exception.h"
+#include
+#include
+#include
+#include
using namespace std;
-int main(int argc, char *argv[])
+int
+main( int argc, char* argv[] )
{
- BEGIN_TRY {
- if (argc != 2) {
- cout << "Usage: " << argv[0] << " n_neurons\n";
+ BEGIN_TRY
+ {
+ if ( argc != 2 )
+ {
+ cout << "Usage: " << argv[ 0 ] << " n_neurons\n";
+ return 0;
+ }
+ int arg1;
+ sscanf( argv[ 1 ], "%d", &arg1 );
+ NESTGPU ngpu;
+ cout << "Building ...\n";
+
+ ngpu.SetRandomSeed( 1234ULL ); // seed for GPU random numbers
+
+ int n_receptors = 2;
+
+ int order = arg1 / 5;
+ int NE = 4 * order; // number of excitatory neurons
+ int NI = 1 * order; // number of inhibitory neurons
+ int n_neurons = NE + NI; // number of neurons in total
+
+ int CE = 800; // number of excitatory synapses per neuron
+ int CI = CE / 4; // number of inhibitory synapses per neuron
+
+ float Wex = 0.05;
+ float Win = 0.35;
+
+ // poisson generator parameters
+ float poiss_rate = 20000.0; // poisson signal rate in Hz
+ float poiss_weight = 0.37;
+ float poiss_delay = 0.2; // poisson signal delay in ms
+ int n_pg = n_neurons; // number of poisson generators
+ // create poisson generator
+ NodeSeq pg = ngpu.CreatePoissonGenerator( n_pg, poiss_rate );
+
+ // create n_neurons neurons with n_receptor receptor ports
+ NodeSeq neuron = ngpu.Create( "aeif_cond_beta", n_neurons, n_receptors );
+
+ NodeSeq exc_neuron = neuron.Subseq( 0, NE - 1 ); // excitatory neuron group
+ NodeSeq inh_neuron = neuron.Subseq( NE, n_neurons - 1 ); // inhibitory neuron group
+
+ // neuron parameters
+ float E_rev[] = { 0.0, -85.0 };
+ float tau_decay[] = { 1.0, 1.0 };
+ float tau_rise[] = { 1.0, 1.0 };
+
+ ngpu.SetNeuronParam( neuron, "Non-existent", E_rev, 2 );
+ ngpu.SetNeuronParam( neuron, "tau_decay", tau_decay, 2 );
+ ngpu.SetNeuronParam( neuron, "tau_rise", tau_rise, 2 );
+
+ float mean_delay = 0.5;
+ float std_delay = 0.25;
+ float min_delay = 0.1;
+ // Excitatory connections
+ // connect excitatory neurons to port 0 of all neurons
+ // normally distributed delays, weight Wex and CE connections per neuron
+ float* exc_delays =
+ ngpu.RandomNormalClipped( CE * n_neurons, mean_delay, std_delay, min_delay, mean_delay + 3 * std_delay );
+
+ ConnSpec conn_spec1( FIXED_INDEGREE, CE );
+ SynSpec syn_spec1;
+ syn_spec1.SetParam( "receptor", 0 );
+ syn_spec1.SetParam( "weight", Wex );
+ syn_spec1.SetParam( "delay_array", exc_delays );
+ ngpu.Connect( exc_neuron, neuron, conn_spec1, syn_spec1 );
+ delete[] exc_delays;
+
+ // Inhibitory connections
+ // connect inhibitory neurons to port 1 of all neurons
+ // normally distributed delays, weight Win and CI connections per neuron
+ float* inh_delays =
+ ngpu.RandomNormalClipped( CI * n_neurons, mean_delay, std_delay, min_delay, mean_delay + 3 * std_delay );
+
+ ConnSpec conn_spec2( FIXED_INDEGREE, CI );
+ SynSpec syn_spec2;
+ syn_spec2.SetParam( "receptor", 1 );
+ syn_spec2.SetParam( "weight", Win );
+ syn_spec2.SetParam( "delay_array", inh_delays );
+ ngpu.Connect( inh_neuron, neuron, conn_spec2, syn_spec2 );
+
+ delete[] inh_delays;
+
+ ConnSpec conn_spec3( ONE_TO_ONE );
+ SynSpec syn_spec3( STANDARD_SYNAPSE, poiss_weight, poiss_delay, 0 );
+ // connect poisson generator to port 0 of all neurons
+ ngpu.Connect( pg, neuron, conn_spec3, syn_spec3 );
+
+ char filename[] = "test_brunel_net.dat";
+ int i_neuron_arr[] = {
+ neuron[ 0 ], neuron[ rand() % n_neurons ], neuron[ n_neurons - 1 ]
+ }; // any set of neuron indexes
+ // create multimeter record of V_m
+ std::string var_name_arr[] = { "V_m", "V_m", "V_m" };
+ ngpu.CreateRecord( string( filename ), var_name_arr, i_neuron_arr, 3 );
+
+ ngpu.Simulate();
+
return 0;
}
- int arg1;
- sscanf(argv[1], "%d", &arg1);
- NESTGPU ngpu;
- cout << "Building ...\n";
-
- ngpu.SetRandomSeed(1234ULL); // seed for GPU random numbers
-
- int n_receptors = 2;
-
- int order = arg1/5;
- int NE = 4 * order; // number of excitatory neurons
- int NI = 1 * order; // number of inhibitory neurons
- int n_neurons = NE + NI; // number of neurons in total
-
- int CE = 800; // number of excitatory synapses per neuron
- int CI = CE/4; // number of inhibitory synapses per neuron
-
- float Wex = 0.05;
- float Win = 0.35;
-
- // poisson generator parameters
- float poiss_rate = 20000.0; // poisson signal rate in Hz
- float poiss_weight = 0.37;
- float poiss_delay = 0.2; // poisson signal delay in ms
- int n_pg = n_neurons; // number of poisson generators
- // create poisson generator
- NodeSeq pg = ngpu.CreatePoissonGenerator(n_pg, poiss_rate);
-
- // create n_neurons neurons with n_receptor receptor ports
- NodeSeq neuron = ngpu.Create("aeif_cond_beta", n_neurons,
- n_receptors);
-
- NodeSeq exc_neuron = neuron.Subseq(0,NE-1); // excitatory neuron group
- NodeSeq inh_neuron = neuron.Subseq(NE, n_neurons-1); //inhibitory neuron group
-
- // neuron parameters
- float E_rev[] = {0.0, -85.0};
- float tau_decay[] = {1.0, 1.0};
- float tau_rise[] = {1.0, 1.0};
-
- ngpu.SetNeuronParam(neuron, "Non-existent", E_rev, 2);
- ngpu.SetNeuronParam(neuron, "tau_decay", tau_decay, 2);
- ngpu.SetNeuronParam(neuron, "tau_rise", tau_rise, 2);
-
- float mean_delay = 0.5;
- float std_delay = 0.25;
- float min_delay = 0.1;
- // Excitatory connections
- // connect excitatory neurons to port 0 of all neurons
- // normally distributed delays, weight Wex and CE connections per neuron
- float *exc_delays = ngpu.RandomNormalClipped(CE*n_neurons, mean_delay,
- std_delay, min_delay,
- mean_delay+3*std_delay);
-
- ConnSpec conn_spec1(FIXED_INDEGREE, CE);
- SynSpec syn_spec1;
- syn_spec1.SetParam("receptor", 0);
- syn_spec1.SetParam("weight", Wex);
- syn_spec1.SetParam("delay_array", exc_delays);
- ngpu.Connect(exc_neuron, neuron, conn_spec1, syn_spec1);
- delete[] exc_delays;
-
- // Inhibitory connections
- // connect inhibitory neurons to port 1 of all neurons
- // normally distributed delays, weight Win and CI connections per neuron
- float *inh_delays = ngpu.RandomNormalClipped(CI*n_neurons, mean_delay,
- std_delay, min_delay,
- mean_delay+3*std_delay);
-
- ConnSpec conn_spec2(FIXED_INDEGREE, CI);
- SynSpec syn_spec2;
- syn_spec2.SetParam("receptor", 1);
- syn_spec2.SetParam("weight", Win);
- syn_spec2.SetParam("delay_array", inh_delays);
- ngpu.Connect(inh_neuron, neuron, conn_spec2, syn_spec2);
-
- delete[] inh_delays;
-
- ConnSpec conn_spec3(ONE_TO_ONE);
- SynSpec syn_spec3(STANDARD_SYNAPSE, poiss_weight, poiss_delay, 0);
- // connect poisson generator to port 0 of all neurons
- ngpu.Connect(pg, neuron, conn_spec3, syn_spec3);
-
- char filename[] = "test_brunel_net.dat";
- int i_neuron_arr[] = {neuron[0], neuron[rand()%n_neurons],
- neuron[n_neurons-1]}; // any set of neuron indexes
- // create multimeter record of V_m
- std::string var_name_arr[] = {"V_m", "V_m", "V_m"};
- ngpu.CreateRecord(string(filename), var_name_arr, i_neuron_arr, 3);
-
- ngpu.Simulate();
-
- return 0;
- } END_TRY
+ END_TRY
return -1;
}
diff --git a/c++/examples/test_setvar.cpp b/c++/examples/test_setvar.cpp
index f161276c0..b51a0be01 100644
--- a/c++/examples/test_setvar.cpp
+++ b/c++/examples/test_setvar.cpp
@@ -21,68 +21,68 @@
*/
-
-
-
-#include
+#include "nestgpu.h"
+#include
#include
+#include
#include
-#include
-#include "nestgpu.h"
using namespace std;
-int main(int argc, char *argv[])
+int
+main( int argc, char* argv[] )
{
NESTGPU ngpu;
cout << "Building ...\n";
-
- srand(12345);
+
+ srand( 12345 );
int n_neurons = 3;
-
+
// create n_neurons neurons with 2 receptor ports
- NodeSeq neuron = ngpu.Create("aeif_cond_beta", n_neurons, 2);
- float tau_decay[] = {60.0, 10.0};
- float tau_rise[] = {40.0, 5.0};
- ngpu.SetNeuronParam(neuron, "tau_decay", tau_decay, 2);
- ngpu.SetNeuronParam(neuron, "tau_rise", tau_rise, 2);
-
- NodeSeq neuron0 = neuron.Subseq(0,0);
- NodeSeq neuron1 = neuron.Subseq(1,1);
- NodeSeq neuron2 = neuron.Subseq(2,2);
- float g11[] = {0.0, 0.1};
- float g12[] = {0.1, 0.0};
-
+ NodeSeq neuron = ngpu.Create( "aeif_cond_beta", n_neurons, 2 );
+ float tau_decay[] = { 60.0, 10.0 };
+ float tau_rise[] = { 40.0, 5.0 };
+ ngpu.SetNeuronParam( neuron, "tau_decay", tau_decay, 2 );
+ ngpu.SetNeuronParam( neuron, "tau_rise", tau_rise, 2 );
+
+ NodeSeq neuron0 = neuron.Subseq( 0, 0 );
+ NodeSeq neuron1 = neuron.Subseq( 1, 1 );
+ NodeSeq neuron2 = neuron.Subseq( 2, 2 );
+ float g11[] = { 0.0, 0.1 };
+ float g12[] = { 0.1, 0.0 };
+
// neuron variables
- ngpu.SetNeuronVar(neuron0, "V_m", -80.0);
- ngpu.SetNeuronVar(neuron1, "g1", g11, 2);
- ngpu.SetNeuronVar(neuron2, "g1", g12, 2);
+ ngpu.SetNeuronVar( neuron0, "V_m", -80.0 );
+ ngpu.SetNeuronVar( neuron1, "g1", g11, 2 );
+ ngpu.SetNeuronVar( neuron2, "g1", g12, 2 );
// reading parameters and variables test
- float *read_td = ngpu.GetNeuronParam(neuron, "tau_decay");
- float *read_tr = ngpu.GetNeuronParam(neuron, "tau_rise");
- float *read_Vm = ngpu.GetNeuronVar(neuron, "V_m");
- float *read_Vth = ngpu.GetNeuronParam(neuron, "V_th");
- float *read_g1 = ngpu.GetNeuronVar(neuron, "g1");
+ float* read_td = ngpu.GetNeuronParam( neuron, "tau_decay" );
+ float* read_tr = ngpu.GetNeuronParam( neuron, "tau_rise" );
+ float* read_Vm = ngpu.GetNeuronVar( neuron, "V_m" );
+ float* read_Vth = ngpu.GetNeuronParam( neuron, "V_th" );
+ float* read_g1 = ngpu.GetNeuronVar( neuron, "g1" );
- for (int in=0; in<3; in++) {
- printf("Neuron n. %d\n", in);
- printf("\tV_m: %f\n", read_Vm[in]);
- printf("\tV_th: %f\n", read_Vth[in]);
- for (int ip=0; ip<2; ip++) {
- printf("\tg1: %f\n", read_g1[in*2+ip]);
- printf("\ttau_rise: %f\n", read_tr[in*2+ip]);
- printf("\ttau_decay: %f\n", read_td[in*2+ip]);
+ for ( int in = 0; in < 3; in++ )
+ {
+ printf( "Neuron n. %d\n", in );
+ printf( "\tV_m: %f\n", read_Vm[ in ] );
+ printf( "\tV_th: %f\n", read_Vth[ in ] );
+ for ( int ip = 0; ip < 2; ip++ )
+ {
+ printf( "\tg1: %f\n", read_g1[ in * 2 + ip ] );
+ printf( "\ttau_rise: %f\n", read_tr[ in * 2 + ip ] );
+ printf( "\ttau_decay: %f\n", read_td[ in * 2 + ip ] );
}
- printf("\n");
+ printf( "\n" );
}
string filename = "test_setvar.dat";
- int i_neurons[] = {neuron[0], neuron[1], neuron[2]};
- string var_name[] = {"V_m", "V_m", "V_m"};
+ int i_neurons[] = { neuron[ 0 ], neuron[ 1 ], neuron[ 2 ] };
+ string var_name[] = { "V_m", "V_m", "V_m" };
// create multimeter record of V_m
- ngpu.CreateRecord(filename, var_name, i_neurons, 3);
+ ngpu.CreateRecord( filename, var_name, i_neurons, 3 );
ngpu.Simulate();
diff --git a/c++/tests/test_connections.cpp b/c++/tests/test_connections.cpp
index 018b1fe11..4d4b3947e 100644
--- a/c++/tests/test_connections.cpp
+++ b/c++/tests/test_connections.cpp
@@ -21,19 +21,17 @@
*/
-
-
-
+#include "nestgpu.h"
+#include
+#include
#include
#include
-#include
#include
-#include
-#include "nestgpu.h"
using namespace std;
-int main(int argc, char *argv[])
+int
+main( int argc, char* argv[] )
{
// Intializes C random number generator
// srand((unsigned) time(&t));
@@ -41,16 +39,16 @@ int main(int argc, char *argv[])
NESTGPU ngpu;
cout << "Building ...\n";
- ngpu.SetRandomSeed(1234ULL); // seed for GPU random numbers
-
+ ngpu.SetRandomSeed( 1234ULL ); // seed for GPU random numbers
+
// poisson generator parameters
float poiss_rate = 5000.0; // poisson signal rate in Hz
float poiss_weight = 1.0;
float poiss_delay = 0.2; // poisson signal delay in ms
// create poisson generator
- NodeSeq pg = ngpu.Create("poisson_generator");
- ngpu.SetNeuronParam(pg, "rate", poiss_rate);
+ NodeSeq pg = ngpu.Create( "poisson_generator" );
+ ngpu.SetNeuronParam( pg, "rate", poiss_rate );
int n_recept = 3; // number of receptors
// create 3 neuron groups
@@ -58,86 +56,83 @@ int main(int argc, char *argv[])
int n_neur2 = 20;
int n_neur3 = 50;
int n_neurons = n_neur1 + n_neur2 + n_neur3;
-
- NodeSeq neur_group = ngpu.Create("aeif_cond_beta", n_neurons, n_recept);
- NodeSeq neur_group1 = neur_group.Subseq(0, n_neur1 - 1);
- NodeSeq neur_group2 = neur_group.Subseq(n_neur1, n_neur1 + n_neur2 - 1);
- NodeSeq neur_group3 = neur_group.Subseq(n_neur1 + n_neur2, n_neurons - 1);
-
+
+ NodeSeq neur_group = ngpu.Create( "aeif_cond_beta", n_neurons, n_recept );
+ NodeSeq neur_group1 = neur_group.Subseq( 0, n_neur1 - 1 );
+ NodeSeq neur_group2 = neur_group.Subseq( n_neur1, n_neur1 + n_neur2 - 1 );
+ NodeSeq neur_group3 = neur_group.Subseq( n_neur1 + n_neur2, n_neurons - 1 );
+
// neuron parameters
- float E_rev[] = {0.0, 0.0, 0.0};
- float tau_decay[] = {1.0, 1.0, 1.0};
- float tau_rise[] = {1.0, 1.0, 1.0};
- ngpu.SetNeuronParam(neur_group1, "E_rev", E_rev, 3);
- ngpu.SetNeuronParam(neur_group1, "tau_decay", tau_decay, 3);
- ngpu.SetNeuronParam(neur_group1, "tau_rise", tau_rise, 3);
- ngpu.SetNeuronParam(neur_group2, "E_rev", E_rev, 3);
- ngpu.SetNeuronParam(neur_group2, "tau_decay", tau_decay, 3);
- ngpu.SetNeuronParam(neur_group2, "tau_rise", tau_rise, 3);
- ngpu.SetNeuronParam(neur_group3, "E_rev", E_rev, 3);
- ngpu.SetNeuronParam(neur_group3, "tau_decay", tau_decay, 3);
- ngpu.SetNeuronParam(neur_group3, "tau_rise", tau_rise, 3);
-
- int i11 = neur_group1[rand()%n_neur1];
- int i12 = neur_group2[rand()%n_neur2];
- int i13 = neur_group2[rand()%n_neur2];
- int i14 = neur_group3[rand()%n_neur3];
-
- int i21 = neur_group2[rand()%n_neur2];
-
- int i31 = neur_group1[rand()%n_neur1];
- int i32 = neur_group3[rand()%n_neur3];
-
- int it1 = neur_group1[rand()%n_neur1];
- int it2 = neur_group2[rand()%n_neur2];
- int it3 = neur_group3[rand()%n_neur3];
-
+ float E_rev[] = { 0.0, 0.0, 0.0 };
+ float tau_decay[] = { 1.0, 1.0, 1.0 };
+ float tau_rise[] = { 1.0, 1.0, 1.0 };
+ ngpu.SetNeuronParam( neur_group1, "E_rev", E_rev, 3 );
+ ngpu.SetNeuronParam( neur_group1, "tau_decay", tau_decay, 3 );
+ ngpu.SetNeuronParam( neur_group1, "tau_rise", tau_rise, 3 );
+ ngpu.SetNeuronParam( neur_group2, "E_rev", E_rev, 3 );
+ ngpu.SetNeuronParam( neur_group2, "tau_decay", tau_decay, 3 );
+ ngpu.SetNeuronParam( neur_group2, "tau_rise", tau_rise, 3 );
+ ngpu.SetNeuronParam( neur_group3, "E_rev", E_rev, 3 );
+ ngpu.SetNeuronParam( neur_group3, "tau_decay", tau_decay, 3 );
+ ngpu.SetNeuronParam( neur_group3, "tau_rise", tau_rise, 3 );
+
+ int i11 = neur_group1[ rand() % n_neur1 ];
+ int i12 = neur_group2[ rand() % n_neur2 ];
+ int i13 = neur_group2[ rand() % n_neur2 ];
+ int i14 = neur_group3[ rand() % n_neur3 ];
+
+ int i21 = neur_group2[ rand() % n_neur2 ];
+
+ int i31 = neur_group1[ rand() % n_neur1 ];
+ int i32 = neur_group3[ rand() % n_neur3 ];
+
+ int it1 = neur_group1[ rand() % n_neur1 ];
+ int it2 = neur_group2[ rand() % n_neur2 ];
+ int it3 = neur_group3[ rand() % n_neur3 ];
+
// connect poisson generator to port 0 of all neurons
- ngpu.Connect(pg[0], i11, 0, 0, poiss_weight, poiss_delay);
- ngpu.Connect(pg[0], i12, 0, 0, poiss_weight, poiss_delay);
- ngpu.Connect(pg[0], i13, 0, 0, poiss_weight, poiss_delay);
- ngpu.Connect(pg[0], i14, 0, 0, poiss_weight, poiss_delay);
- ngpu.Connect(pg[0], i21, 0, 0, poiss_weight, poiss_delay);
- ngpu.Connect(pg[0], i31, 0, 0, poiss_weight, poiss_delay);
- ngpu.Connect(pg[0], i32, 0, 0, poiss_weight, poiss_delay);
+ ngpu.Connect( pg[ 0 ], i11, 0, 0, poiss_weight, poiss_delay );
+ ngpu.Connect( pg[ 0 ], i12, 0, 0, poiss_weight, poiss_delay );
+ ngpu.Connect( pg[ 0 ], i13, 0, 0, poiss_weight, poiss_delay );
+ ngpu.Connect( pg[ 0 ], i14, 0, 0, poiss_weight, poiss_delay );
+ ngpu.Connect( pg[ 0 ], i21, 0, 0, poiss_weight, poiss_delay );
+ ngpu.Connect( pg[ 0 ], i31, 0, 0, poiss_weight, poiss_delay );
+ ngpu.Connect( pg[ 0 ], i32, 0, 0, poiss_weight, poiss_delay );
float weight = 0.01; // connection weight
- float delay = 0.2; // connection delay in ms
+ float delay = 0.2; // connection delay in ms
// connect neurons to target neuron n. 1
- ngpu.Connect(i11, it1, 0, 0, weight, delay);
- ngpu.Connect(i12, it1, 1, 0, weight, delay);
- ngpu.Connect(i13, it1, 1, 0, weight, delay);
- ngpu.Connect(i14, it1, 2, 0, weight, delay);
+ ngpu.Connect( i11, it1, 0, 0, weight, delay );
+ ngpu.Connect( i12, it1, 1, 0, weight, delay );
+ ngpu.Connect( i13, it1, 1, 0, weight, delay );
+ ngpu.Connect( i14, it1, 2, 0, weight, delay );
// connect neuron to target neuron n. 2
- ngpu.Connect(i21, it2, 0, 0, weight, delay);
+ ngpu.Connect( i21, it2, 0, 0, weight, delay );
+
+ // connect neurons to target neuron n. 3
+ ngpu.Connect( i31, it3, 0, 0, weight, delay );
+ ngpu.Connect( i32, it3, 1, 0, weight, delay );
- // connect neurons to target neuron n. 3
- ngpu.Connect(i31, it3, 0, 0, weight, delay);
- ngpu.Connect(i32, it3, 1, 0, weight, delay);
-
// create multimeter record n.1
string filename1 = "test_connections_voltage.dat";
- int i_neuron_arr1[] = {i11, i12, i13, i14, i21, i31, i32, it1, it2, it3};
- std::string var_name_arr1[] = {"V_m", "V_m", "V_m", "V_m", "V_m", "V_m",
- "V_m", "V_m", "V_m", "V_m"};
- ngpu.CreateRecord(filename1, var_name_arr1, i_neuron_arr1, 10);
+ int i_neuron_arr1[] = { i11, i12, i13, i14, i21, i31, i32, it1, it2, it3 };
+ std::string var_name_arr1[] = { "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m" };
+ ngpu.CreateRecord( filename1, var_name_arr1, i_neuron_arr1, 10 );
// create multimeter record n.2
string filename2 = "test_connections_g1.dat";
- int i_neuron_arr2[] = {it1, it1, it1, it2, it3, it3};
- int i_receptor_arr[] = {0, 1, 2, 0, 0, 1};
- std::string var_name_arr2[] = {"g1", "g1", "g1", "g1", "g1", "g1"};
- ngpu.CreateRecord(filename2, var_name_arr2, i_neuron_arr2,
- i_receptor_arr, 6);
+ int i_neuron_arr2[] = { it1, it1, it1, it2, it3, it3 };
+ int i_receptor_arr[] = { 0, 1, 2, 0, 0, 1 };
+ std::string var_name_arr2[] = { "g1", "g1", "g1", "g1", "g1", "g1" };
+ ngpu.CreateRecord( filename2, var_name_arr2, i_neuron_arr2, i_receptor_arr, 6 );
// create multimeter record n.3
string filename3 = "test_connections_spikes.dat";
- int i_neuron_arr3[] = {i11, i12, i13, i14, i21, i31, i32};
- std::string var_name_arr3[] = {"spike", "spike", "spike", "spike", "spike",
- "spike", "spike"};
- ngpu.CreateRecord(filename3, var_name_arr3, i_neuron_arr3, 7);
+ int i_neuron_arr3[] = { i11, i12, i13, i14, i21, i31, i32 };
+ std::string var_name_arr3[] = { "spike", "spike", "spike", "spike", "spike", "spike", "spike" };
+ ngpu.CreateRecord( filename3, var_name_arr3, i_neuron_arr3, 7 );
ngpu.Simulate();
diff --git a/c++/tests/test_neuron_groups.cpp b/c++/tests/test_neuron_groups.cpp
index 5a824105b..7bf6fe9fc 100644
--- a/c++/tests/test_neuron_groups.cpp
+++ b/c++/tests/test_neuron_groups.cpp
@@ -21,28 +21,26 @@
*/
-
-
-
+#include "nestgpu.h"
+#include
+#include
#include
#include
-#include
#include
-#include
#include
-#include "nestgpu.h"
using namespace std;
-int main(int argc, char *argv[])
+int
+main( int argc, char* argv[] )
{
// Intializes C random number generator
// srand((unsigned) time(&t));
NESTGPU ngpu;
cout << "Building ...\n";
-
- ngpu.SetRandomSeed(1234ULL); // seed for GPU random numbers
+
+ ngpu.SetRandomSeed( 1234ULL ); // seed for GPU random numbers
// poisson generator parameters
float poiss_rate = 5000.0; // poisson signal rate in Hz
@@ -50,113 +48,109 @@ int main(int argc, char *argv[])
float poiss_delay = 0.2; // poisson signal delay in ms
// create poisson generator
- NodeSeq pg = ngpu.Create("poisson_generator");
- ngpu.SetNeuronParam(pg, "rate", poiss_rate);
+ NodeSeq pg = ngpu.Create( "poisson_generator" );
+ ngpu.SetNeuronParam( pg, "rate", poiss_rate );
// create 3 neuron groups
int n_neur1 = 100; // number of neurons
int n_recept1 = 3; // number of receptors
- NodeSeq neur_group1 = ngpu.Create("aeif_cond_beta", n_neur1, n_recept1);
- int n_neur2 = 20; // number of neurons
+ NodeSeq neur_group1 = ngpu.Create( "aeif_cond_beta", n_neur1, n_recept1 );
+ int n_neur2 = 20; // number of neurons
int n_recept2 = 1; // number of receptors
- NodeSeq neur_group2 = ngpu.Create("aeif_cond_beta", n_neur2, n_recept2);
- int n_neur3 = 50; // number of neurons
+ NodeSeq neur_group2 = ngpu.Create( "aeif_cond_beta", n_neur2, n_recept2 );
+ int n_neur3 = 50; // number of neurons
int n_recept3 = 2; // number of receptors
- NodeSeq neur_group3 = ngpu.Create("aeif_cond_beta", n_neur3, n_recept3);
-
+ NodeSeq neur_group3 = ngpu.Create( "aeif_cond_beta", n_neur3, n_recept3 );
+
// neuron parameters
- float E_rev[] = {0.0, 0.0, 0.0};
- float tau_decay[] = {1.0, 1.0, 1.0};
- float tau_rise[] = {1.0, 1.0, 1.0};
- ngpu.SetNeuronParam(neur_group1, "E_rev", E_rev, 3);
- ngpu.SetNeuronParam(neur_group1, "tau_decay", tau_decay, 3);
- ngpu.SetNeuronParam(neur_group1, "tau_rise", tau_rise, 3);
- ngpu.SetNeuronParam(neur_group2, "E_rev", E_rev, 1);
- ngpu.SetNeuronParam(neur_group2, "tau_decay", tau_decay, 1);
- ngpu.SetNeuronParam(neur_group2, "tau_rise", tau_rise, 1);
- ngpu.SetNeuronParam(neur_group3, "E_rev", E_rev, 2);
- ngpu.SetNeuronParam(neur_group3, "tau_decay", tau_decay, 2);
- ngpu.SetNeuronParam(neur_group3, "tau_rise", tau_rise, 2);
-
- int i11 = neur_group1[rand()%n_neur1];
- int i12 = neur_group2[rand()%n_neur2];
- int i13 = neur_group2[rand()%n_neur2];
- int i14 = neur_group3[rand()%n_neur3];
-
- int i21 = neur_group2[rand()%n_neur2];
-
- int i31 = neur_group1[rand()%n_neur1];
- int i32 = neur_group3[rand()%n_neur3];
-
- int it1 = neur_group1[rand()%n_neur1];
- int it2 = neur_group2[rand()%n_neur2];
- int it3 = neur_group3[rand()%n_neur3];
-
+ float E_rev[] = { 0.0, 0.0, 0.0 };
+ float tau_decay[] = { 1.0, 1.0, 1.0 };
+ float tau_rise[] = { 1.0, 1.0, 1.0 };
+ ngpu.SetNeuronParam( neur_group1, "E_rev", E_rev, 3 );
+ ngpu.SetNeuronParam( neur_group1, "tau_decay", tau_decay, 3 );
+ ngpu.SetNeuronParam( neur_group1, "tau_rise", tau_rise, 3 );
+ ngpu.SetNeuronParam( neur_group2, "E_rev", E_rev, 1 );
+ ngpu.SetNeuronParam( neur_group2, "tau_decay", tau_decay, 1 );
+ ngpu.SetNeuronParam( neur_group2, "tau_rise", tau_rise, 1 );
+ ngpu.SetNeuronParam( neur_group3, "E_rev", E_rev, 2 );
+ ngpu.SetNeuronParam( neur_group3, "tau_decay", tau_decay, 2 );
+ ngpu.SetNeuronParam( neur_group3, "tau_rise", tau_rise, 2 );
+
+ int i11 = neur_group1[ rand() % n_neur1 ];
+ int i12 = neur_group2[ rand() % n_neur2 ];
+ int i13 = neur_group2[ rand() % n_neur2 ];
+ int i14 = neur_group3[ rand() % n_neur3 ];
+
+ int i21 = neur_group2[ rand() % n_neur2 ];
+
+ int i31 = neur_group1[ rand() % n_neur1 ];
+ int i32 = neur_group3[ rand() % n_neur3 ];
+
+ int it1 = neur_group1[ rand() % n_neur1 ];
+ int it2 = neur_group2[ rand() % n_neur2 ];
+ int it3 = neur_group3[ rand() % n_neur3 ];
+
// connect poisson generator to port 0 of all neurons
- ngpu.Connect(pg[0], i11, 0, 0, poiss_weight, poiss_delay);
- ngpu.Connect(pg[0], i12, 0, 0, poiss_weight, poiss_delay);
- ngpu.Connect(pg[0], i13, 0, 0, poiss_weight, poiss_delay);
- ngpu.Connect(pg[0], i14, 0, 0, poiss_weight, poiss_delay);
- ngpu.Connect(pg[0], i21, 0, 0, poiss_weight, poiss_delay);
- ngpu.Connect(pg[0], i31, 0, 0, poiss_weight, poiss_delay);
- ngpu.Connect(pg[0], i32, 0, 0, poiss_weight, poiss_delay);
+ ngpu.Connect( pg[ 0 ], i11, 0, 0, poiss_weight, poiss_delay );
+ ngpu.Connect( pg[ 0 ], i12, 0, 0, poiss_weight, poiss_delay );
+ ngpu.Connect( pg[ 0 ], i13, 0, 0, poiss_weight, poiss_delay );
+ ngpu.Connect( pg[ 0 ], i14, 0, 0, poiss_weight, poiss_delay );
+ ngpu.Connect( pg[ 0 ], i21, 0, 0, poiss_weight, poiss_delay );
+ ngpu.Connect( pg[ 0 ], i31, 0, 0, poiss_weight, poiss_delay );
+ ngpu.Connect( pg[ 0 ], i32, 0, 0, poiss_weight, poiss_delay );
float weight = 0.01; // connection weight
- float delay = 0.2; // connection delay in ms
+ float delay = 0.2; // connection delay in ms
// connect neurons to target neuron n. 1
- ngpu.Connect(i11, it1, 0, 0, weight, delay);
- ngpu.Connect(i12, it1, 1, 0, weight, delay);
- ngpu.Connect(i13, it1, 1, 0, weight, delay);
- ngpu.Connect(i14, it1, 2, 0, weight, delay);
+ ngpu.Connect( i11, it1, 0, 0, weight, delay );
+ ngpu.Connect( i12, it1, 1, 0, weight, delay );
+ ngpu.Connect( i13, it1, 1, 0, weight, delay );
+ ngpu.Connect( i14, it1, 2, 0, weight, delay );
// connect neuron to target neuron n. 2
- ngpu.Connect(i21, it2, 0, 0, weight, delay);
+ ngpu.Connect( i21, it2, 0, 0, weight, delay );
+
+ // connect neurons to target neuron n. 3
+ ngpu.Connect( i31, it3, 0, 0, weight, delay );
+ ngpu.Connect( i32, it3, 1, 0, weight, delay );
- // connect neurons to target neuron n. 3
- ngpu.Connect(i31, it3, 0, 0, weight, delay);
- ngpu.Connect(i32, it3, 1, 0, weight, delay);
-
// create multimeter record n.1
string filename1 = "test_neuron_groups_voltage.dat";
- int i_neuron_arr1[] = {i11, i12, i13, i14, i21, i31, i32, it1, it2, it3};
- string var_name_arr1[] = {"V_m", "V_m", "V_m", "V_m", "V_m", "V_m",
- "V_m", "V_m", "V_m", "V_m"};
- int record1 = ngpu.CreateRecord(filename1, var_name_arr1,
- i_neuron_arr1, 10);
+ int i_neuron_arr1[] = { i11, i12, i13, i14, i21, i31, i32, it1, it2, it3 };
+ string var_name_arr1[] = { "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m", "V_m" };
+ int record1 = ngpu.CreateRecord( filename1, var_name_arr1, i_neuron_arr1, 10 );
// create multimeter record n.2
string filename2 = "test_neuron_groups_g1.dat";
- int i_neuron_arr2[] = {it1, it1, it1, it2, it3, it3};
- int i_receptor_arr[] = {0, 1, 2, 0, 0, 1};
- string var_name_arr2[] = {"g1", "g1", "g1", "g1", "g1", "g1"};
- //int record2 =
- ngpu.CreateRecord(filename2, var_name_arr2,
- i_neuron_arr2, i_receptor_arr, 6);
+ int i_neuron_arr2[] = { it1, it1, it1, it2, it3, it3 };
+ int i_receptor_arr[] = { 0, 1, 2, 0, 0, 1 };
+ string var_name_arr2[] = { "g1", "g1", "g1", "g1", "g1", "g1" };
+ // int record2 =
+ ngpu.CreateRecord( filename2, var_name_arr2, i_neuron_arr2, i_receptor_arr, 6 );
// create multimeter record n.3
string filename3 = "test_neuron_groups_spikes.dat";
- int i_neuron_arr3[] = {i11, i12, i13, i14, i21, i31, i32};
- string var_name_arr3[] = {"spike", "spike", "spike", "spike", "spike",
- "spike", "spike"};
- //int record3 =
- ngpu.CreateRecord(filename3, var_name_arr3,
- i_neuron_arr3, 7);
+ int i_neuron_arr3[] = { i11, i12, i13, i14, i21, i31, i32 };
+ string var_name_arr3[] = { "spike", "spike", "spike", "spike", "spike", "spike", "spike" };
+ // int record3 =
+ ngpu.CreateRecord( filename3, var_name_arr3, i_neuron_arr3, 7 );
ngpu.Simulate();
- std::vector> data_vect1 =
- *ngpu.GetRecordData(record1);
+ std::vector< std::vector< float > > data_vect1 = *ngpu.GetRecordData( record1 );
- FILE *fp=fopen("test_neuron_group_record.dat", "w");
- for (uint i=0; i vect = data_vect1[i];
- for (uint j=0; j vect = data_vect1[ i ];
+ for ( uint j = 0; j < vect.size() - 1; j++ )
+ {
+ fprintf( fp, "%f\t", vect[ j ] );
}
- fprintf(fp,"%f\n", vect[vect.size()-1]);
+ fprintf( fp, "%f\n", vect[ vect.size() - 1 ] );
}
- fclose(fp);
-
+ fclose( fp );
+
return 0;
}
diff --git a/python/test/test_stdp/cases/test_all.sh b/python/test/test_stdp/cases/test_all.sh
index 3cf5c02d1..1e2c6c71e 100755
--- a/python/test/test_stdp/cases/test_all.sh
+++ b/python/test/test_stdp/cases/test_all.sh
@@ -1 +1 @@
-for i in $(seq 1 10); do python3 case$i.py | tail -1; done
+for i in $(seq 1 10); do python3 case$i.py | grep 'dw/w'; done
diff --git a/src/aeif_cond_alpha.cu b/src/aeif_cond_alpha.cu
index 65cccda6f..d5606150e 100644
--- a/src/aeif_cond_alpha.cu
+++ b/src/aeif_cond_alpha.cu
@@ -21,24 +21,20 @@
*/
-
-
-
-#include
-#include
-#include
+#include "aeif_cond_alpha.h"
#include "aeif_cond_alpha_kernel.h"
#include "rk5.h"
-#include "aeif_cond_alpha.h"
+#include
+#include
+#include
namespace aeif_cond_alpha_ns
{
-__device__
-void NodeInit(int n_var, int n_param, double x, float *y, float *param,
- aeif_cond_alpha_rk5 data_struct)
+__device__ void
+NodeInit( int n_var, int n_param, double x, float* y, float* param, aeif_cond_alpha_rk5 data_struct )
{
- //int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
+ // int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
V_th = -50.4;
Delta_T = 2.0;
@@ -67,11 +63,10 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param,
g1_in = 0;
}
-__device__
-void NodeCalibrate(int n_var, int n_param, double x, float *y,
- float *param, aeif_cond_alpha_rk5 data_struct)
+__device__ void
+NodeCalibrate( int n_var, int n_param, double x, float* y, float* param, aeif_cond_alpha_rk5 data_struct )
{
- //int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
+ // int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
refractory_step = 0;
// use normalization for alpha function
@@ -80,27 +75,26 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y,
}
}
-
-__device__
-void NodeInit(int n_var, int n_param, double x, float *y,
- float *param, aeif_cond_alpha_rk5 data_struct)
+
+__device__ void
+NodeInit( int n_var, int n_param, double x, float* y, float* param, aeif_cond_alpha_rk5 data_struct )
{
- aeif_cond_alpha_ns::NodeInit(n_var, n_param, x, y, param, data_struct);
+ aeif_cond_alpha_ns::NodeInit( n_var, n_param, x, y, param, data_struct );
}
-__device__
-void NodeCalibrate(int n_var, int n_param, double x, float *y,
- float *param, aeif_cond_alpha_rk5 data_struct)
+__device__ void
+NodeCalibrate( int n_var, int n_param, double x, float* y, float* param, aeif_cond_alpha_rk5 data_struct )
{
- aeif_cond_alpha_ns::NodeCalibrate(n_var, n_param, x, y, param, data_struct);
+ aeif_cond_alpha_ns::NodeCalibrate( n_var, n_param, x, y, param, data_struct );
}
using namespace aeif_cond_alpha_ns;
-int aeif_cond_alpha::Init(int i_node_0, int n_node, int n_port,
- int i_group, unsigned long long *seed) {
- BaseNeuron::Init(i_node_0, n_node, 2 /*n_port*/, i_group, seed);
+int
+aeif_cond_alpha::Init( int i_node_0, int n_node, int n_port, int i_group, unsigned long long* seed )
+{
+ BaseNeuron::Init( i_node_0, n_node, 2 /*n_port*/, i_group, seed );
node_type_ = i_aeif_cond_alpha_model;
n_scal_var_ = N_SCAL_VAR;
n_var_ = n_scal_var_;
@@ -108,45 +102,48 @@ int aeif_cond_alpha::Init(int i_node_0, int n_node, int n_port,
n_param_ = n_scal_param_;
n_group_param_ = N_GROUP_PARAM;
- group_param_ = new float[N_GROUP_PARAM];
-
+ group_param_ = new float[ N_GROUP_PARAM ];
+
scal_var_name_ = aeif_cond_alpha_scal_var_name;
scal_param_name_ = aeif_cond_alpha_scal_param_name;
group_param_name_ = aeif_cond_alpha_group_param_name;
- //rk5_data_struct_.node_type_ = i_aeif_cond_alpha_model;
+ // rk5_data_struct_.node_type_ = i_aeif_cond_alpha_model;
rk5_data_struct_.i_node_0_ = i_node_0_;
- SetGroupParam("h_min_rel", 1.0e-3);
- SetGroupParam("h0_rel", 1.0e-2);
- h_ = h0_rel_* 0.1;
-
- rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_);
+ SetGroupParam( "h_min_rel", 1.0e-3 );
+ SetGroupParam( "h0_rel", 1.0e-2 );
+ h_ = h0_rel_ * 0.1;
+
+ rk5_.Init( n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_ );
var_arr_ = rk5_.GetYArr();
param_arr_ = rk5_.GetParamArr();
- port_weight_arr_ = GetParamArr() + GetScalParamIdx("g0_ex");
+ port_weight_arr_ = GetParamArr() + GetScalParamIdx( "g0_ex" );
port_weight_arr_step_ = n_param_;
port_weight_port_step_ = 1;
- port_input_arr_ = GetVarArr() + GetScalVarIdx("g1_ex");
+ port_input_arr_ = GetVarArr() + GetScalVarIdx( "g1_ex" );
port_input_arr_step_ = n_var_;
port_input_port_step_ = 1;
- den_delay_arr_ = GetParamArr() + GetScalParamIdx("den_delay");
+ den_delay_arr_ = GetParamArr() + GetScalParamIdx( "den_delay" );
return 0;
}
-int aeif_cond_alpha::Calibrate(double time_min, float time_resolution)
+int
+aeif_cond_alpha::Calibrate( double time_min, float time_resolution )
{
- h_min_ = h_min_rel_* time_resolution;
- h_ = h0_rel_* time_resolution;
- rk5_.Calibrate(time_min, h_, rk5_data_struct_);
-
+ h_min_ = h_min_rel_ * time_resolution;
+ h_ = h0_rel_ * time_resolution;
+ rk5_.Calibrate( time_min, h_, rk5_data_struct_ );
+
return 0;
}
-int aeif_cond_alpha::Update(long long it, double t1) {
- rk5_.Update(t1, h_min_, rk5_data_struct_);
+int
+aeif_cond_alpha::Update( long long it, double t1 )
+{
+ rk5_.Update< N_SCAL_VAR, N_SCAL_PARAM >( t1, h_min_, rk5_data_struct_ );
return 0;
}
diff --git a/src/aeif_cond_alpha.h b/src/aeif_cond_alpha.h
index 226241fe9..0338879a6 100644
--- a/src/aeif_cond_alpha.h
+++ b/src/aeif_cond_alpha.h
@@ -21,19 +21,16 @@
*/
-
-
-
#ifndef AEIFCONDALPHA_H
#define AEIFCONDALPHA_H
-#include
-#include
-#include "cuda_error.h"
-#include "rk5.h"
-#include "node_group.h"
#include "base_neuron.h"
+#include "cuda_error.h"
#include "neuron_models.h"
+#include "node_group.h"
+#include "rk5.h"
+#include
+#include
/* BeginUserDocs: neuron, integrate-and-fire, adaptive threshold, conductance-based
@@ -45,7 +42,7 @@ Conductance-based adaptive exponential integrate-and-fire neuron model
Description
+++++++++++
-``aeif_cond_alpha`` is a conductance-based adaptive exponential
+``aeif_cond_alpha`` is a conductance-based adaptive exponential
integrate-and-fire neuron model according to [1]_ with synaptic
conductance modeled by an alpha function, as described in [2]_
@@ -123,9 +120,9 @@ tau_syn_in ms Time constant of inhibitory synaptic conductance
============= ======= =========================================================
**Integration parameters**
-------------------------------------------------------------------------------
-h0_rel real Starting step in ODE integration relative to time
+h0_rel real Starting step in ODE integration relative to time
resolution
-h_min_rel real Minimum step in ODE integration relative to time
+h_min_rel real Minimum step in ODE integration relative to time
resolution
============= ======= =========================================================
@@ -148,7 +145,7 @@ aeif_cond_alpha_multisynapse, aeif_cond_beta
EndUserDocs */
-//#define MAX_PORT_NUM 20
+// #define MAX_PORT_NUM 20
struct aeif_cond_alpha_rk5
{
@@ -157,30 +154,32 @@ struct aeif_cond_alpha_rk5
class aeif_cond_alpha : public BaseNeuron
{
- public:
- RungeKutta5 rk5_;
+public:
+ RungeKutta5< aeif_cond_alpha_rk5 > rk5_;
float h_min_;
float h_;
aeif_cond_alpha_rk5 rk5_data_struct_;
-
- int Init(int i_node_0, int n_neuron, int n_port, int i_group,
- unsigned long long *seed);
-
- int Calibrate(double time_min, float time_resolution);
-
- int Update(long long it, double t1);
-
- int GetX(int i_neuron, int n_node, double *x) {
- return rk5_.GetX(i_neuron, n_node, x);
+
+ int Init( int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long* seed );
+
+ int Calibrate( double time_min, float time_resolution );
+
+ int Update( long long it, double t1 );
+
+ int
+ GetX( int i_neuron, int n_node, double* x )
+ {
+ return rk5_.GetX( i_neuron, n_node, x );
}
-
- int GetY(int i_var, int i_neuron, int n_node, float *y) {
- return rk5_.GetY(i_var, i_neuron, n_node, y);
+
+ int
+ GetY( int i_var, int i_neuron, int n_node, float* y )
+ {
+ return rk5_.GetY( i_var, i_neuron, n_node, y );
}
-
- template
- int UpdateNR(long long it, double t1);
+ template < int N_PORT >
+ int UpdateNR( long long it, double t1 );
};
#endif
diff --git a/src/aeif_cond_alpha_kernel.h b/src/aeif_cond_alpha_kernel.h
index 52a31ec17..80c529251 100644
--- a/src/aeif_cond_alpha_kernel.h
+++ b/src/aeif_cond_alpha_kernel.h
@@ -21,25 +21,23 @@
*/
-
-
-
#ifndef AEIFCONDALPHAKERNEL_H
#define AEIFCONDALPHAKERNEL_H
#include
- //#include
-#include "spike_buffer.h"
-#include "node_group.h"
+// #include
#include "aeif_cond_alpha.h"
+#include "node_group.h"
+#include "spike_buffer.h"
-#define MIN(a,b) (((a)<(b))?(a):(b))
+#define MIN( a, b ) ( ( ( a ) < ( b ) ) ? ( a ) : ( b ) )
extern __constant__ float NESTGPUTimeResolution;
namespace aeif_cond_alpha_ns
{
-enum ScalVarIndexes {
+enum ScalVarIndexes
+{
i_g_ex = 0,
i_g_in,
i_g1_ex,
@@ -49,7 +47,8 @@ enum ScalVarIndexes {
N_SCAL_VAR
};
-enum ScalParamIndexes {
+enum ScalParamIndexes
+{
i_g0_ex = 0,
i_g0_in,
i_E_rev_ex,
@@ -73,22 +72,16 @@ enum ScalParamIndexes {
N_SCAL_PARAM
};
-enum GroupParamIndexes {
- i_h_min_rel = 0, // Min. step in ODE integr. relative to time resolution
- i_h0_rel, // Starting step in ODE integr. relative to time resolution
+enum GroupParamIndexes
+{
+ i_h_min_rel = 0, // Min. step in ODE integr. relative to time resolution
+ i_h0_rel, // Starting step in ODE integr. relative to time resolution
N_GROUP_PARAM
};
-const std::string aeif_cond_alpha_scal_var_name[N_SCAL_VAR] = {
- "g_ex",
- "g_in",
- "g1_ex",
- "g1_in",
- "V_m",
- "w"
-};
+const std::string aeif_cond_alpha_scal_var_name[ N_SCAL_VAR ] = { "g_ex", "g_in", "g1_ex", "g1_in", "V_m", "w" };
-const std::string aeif_cond_alpha_scal_param_name[N_SCAL_PARAM] = {
+const std::string aeif_cond_alpha_scal_param_name[ N_SCAL_PARAM ] = {
"g0_ex",
"g0_in",
"E_rev_ex",
@@ -111,75 +104,70 @@ const std::string aeif_cond_alpha_scal_param_name[N_SCAL_PARAM] = {
"den_delay",
};
-const std::string aeif_cond_alpha_group_param_name[N_GROUP_PARAM] = {
- "h_min_rel",
- "h0_rel"
-};
+const std::string aeif_cond_alpha_group_param_name[ N_GROUP_PARAM ] = { "h_min_rel", "h0_rel" };
//
// I know that defines are "bad", but the defines below make the
// following equations much more readable.
// For every rule there is some exceptions!
//
-#define g_ex y[i_g_ex]
-#define g1_ex y[i_g1_ex]
-#define g_in y[i_g_in]
-#define g1_in y[i_g1_in]
-#define V_m y[i_V_m]
-#define w y[i_w]
-
-#define dg_exdt dydx[i_g_ex]
-#define dg1_exdt dydx[i_g1_ex]
-#define dg_indt dydx[i_g_in]
-#define dg1_indt dydx[i_g1_in]
-#define dVdt dydx[i_V_m]
-#define dwdt dydx[i_w]
-
-#define g0_ex param[i_g0_ex]
-#define g0_in param[i_g0_in]
-#define E_rev_ex param[i_E_rev_ex]
-#define E_rev_in param[i_E_rev_in]
-#define tau_syn_ex param[i_tau_syn_ex]
-#define tau_syn_in param[i_tau_syn_in]
-#define V_th param[i_V_th]
-#define Delta_T param[i_Delta_T]
-#define g_L param[i_g_L]
-#define E_L param[i_E_L]
-#define C_m param[i_C_m]
-#define a param[i_a]
-#define b param[i_b]
-#define tau_w param[i_tau_w]
-#define I_e param[i_I_e]
-#define V_peak param[i_V_peak]
-#define V_reset param[i_V_reset]
-#define t_ref param[i_t_ref]
-#define refractory_step param[i_refractory_step]
-#define den_delay param[i_den_delay]
-
-#define h_min_rel_ group_param_[i_h_min_rel]
-#define h0_rel_ group_param_[i_h0_rel]
-
-
- template //, class DataStruct>
-__device__
- void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_cond_alpha_rk5 data_struct)
+#define g_ex y[ i_g_ex ]
+#define g1_ex y[ i_g1_ex ]
+#define g_in y[ i_g_in ]
+#define g1_in y[ i_g1_in ]
+#define V_m y[ i_V_m ]
+#define w y[ i_w ]
+
+#define dg_exdt dydx[ i_g_ex ]
+#define dg1_exdt dydx[ i_g1_ex ]
+#define dg_indt dydx[ i_g_in ]
+#define dg1_indt dydx[ i_g1_in ]
+#define dVdt dydx[ i_V_m ]
+#define dwdt dydx[ i_w ]
+
+#define g0_ex param[ i_g0_ex ]
+#define g0_in param[ i_g0_in ]
+#define E_rev_ex param[ i_E_rev_ex ]
+#define E_rev_in param[ i_E_rev_in ]
+#define tau_syn_ex param[ i_tau_syn_ex ]
+#define tau_syn_in param[ i_tau_syn_in ]
+#define V_th param[ i_V_th ]
+#define Delta_T param[ i_Delta_T ]
+#define g_L param[ i_g_L ]
+#define E_L param[ i_E_L ]
+#define C_m param[ i_C_m ]
+#define a param[ i_a ]
+#define b param[ i_b ]
+#define tau_w param[ i_tau_w ]
+#define I_e param[ i_I_e ]
+#define V_peak param[ i_V_peak ]
+#define V_reset param[ i_V_reset ]
+#define t_ref param[ i_t_ref ]
+#define refractory_step param[ i_refractory_step ]
+#define den_delay param[ i_den_delay ]
+
+#define h_min_rel_ group_param_[ i_h_min_rel ]
+#define h0_rel_ group_param_[ i_h0_rel ]
+
+
+template < int NVAR, int NPARAM > //, class DataStruct>
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_cond_alpha_rk5 data_struct )
{
float I_syn_ex = 0.0;
float I_syn_in = 0.0;
- float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak);
+ float V = ( refractory_step > 0 ) ? V_reset : MIN( V_m, V_peak );
- I_syn_ex += g_ex*(E_rev_ex - V);
- I_syn_in += g_in*(E_rev_in - V);
+ I_syn_ex += g_ex * ( E_rev_ex - V );
+ I_syn_in += g_in * ( E_rev_in - V );
- float V_spike = Delta_T*exp((V - V_th)/Delta_T);
+ float V_spike = Delta_T * exp( ( V - V_th ) / Delta_T );
- dVdt = ( refractory_step > 0 ) ? 0 :
- ( -g_L*(V - E_L - V_spike) + I_syn_ex + I_syn_in - w + I_e) / C_m;
+ dVdt = ( refractory_step > 0 ) ? 0 : ( -g_L * ( V - E_L - V_spike ) + I_syn_ex + I_syn_in - w + I_e ) / C_m;
// Adaptation current w.
- dwdt = (a*(V - E_L) - w) / tau_w;
+ dwdt = ( a * ( V - E_L ) - w ) / tau_w;
// Synaptic conductance derivative
dg1_exdt = -g1_ex / tau_syn_ex;
dg_exdt = g1_ex - g_ex / tau_syn_ex;
@@ -187,39 +175,44 @@ __device__
dg_indt = g1_in - g_in / tau_syn_in;
}
- template //, class DataStruct>
-__device__
- void ExternalUpdate
- (double x, float *y, float *param, bool end_time_step,
- aeif_cond_alpha_rk5 data_struct)
+template < int NVAR, int NPARAM > //, class DataStruct>
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_cond_alpha_rk5 data_struct )
{
- if ( V_m < -1.0e3) { // numerical instability
- printf("V_m out of lower bound\n");
+ if ( V_m < -1.0e3 )
+ { // numerical instability
+ printf( "V_m out of lower bound\n" );
V_m = V_reset;
- w=0;
+ w = 0;
return;
}
- if ( w < -1.0e6 || w > 1.0e6) { // numerical instability
- printf("w out of bound\n");
+ if ( w < -1.0e6 || w > 1.0e6 )
+ { // numerical instability
+ printf( "w out of bound\n" );
V_m = V_reset;
- w=0;
+ w = 0;
return;
}
- if (refractory_step > 0.0) {
+ if ( refractory_step > 0.0 )
+ {
V_m = V_reset;
- if (end_time_step) {
+ if ( end_time_step )
+ {
refractory_step -= 1.0;
}
}
- else {
- if ( V_m >= V_peak ) { // send spike
+ else
+ {
+ if ( V_m >= V_peak )
+ { // send spike
int neuron_idx = threadIdx.x + blockIdx.x * blockDim.x;
- PushSpike(data_struct.i_node_0_ + neuron_idx, 1.0);
+ PushSpike( data_struct.i_node_0_ + neuron_idx, 1.0 );
V_m = V_reset;
w += b; // spike-driven adaptation
- refractory_step = (int)::round(t_ref/NESTGPUTimeResolution);
- if (refractory_step<0) {
- refractory_step = 0;
+ refractory_step = ( int ) ::round( t_ref / NESTGPUTimeResolution );
+ if ( refractory_step < 0 )
+ {
+ refractory_step = 0;
}
}
}
@@ -228,25 +221,20 @@ __device__
};
-int Update(long long it, double t1);
+int Update( long long it, double t1 );
-template
-__device__
-void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_cond_alpha_rk5 data_struct)
+template < int NVAR, int NPARAM >
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_cond_alpha_rk5 data_struct )
{
- aeif_cond_alpha_ns::Derivatives(x, y, dydx, param,
- data_struct);
+ aeif_cond_alpha_ns::Derivatives< NVAR, NPARAM >( x, y, dydx, param, data_struct );
}
-template
-__device__
-void ExternalUpdate(double x, float *y, float *param, bool end_time_step,
- aeif_cond_alpha_rk5 data_struct)
+template < int NVAR, int NPARAM >
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_cond_alpha_rk5 data_struct )
{
- aeif_cond_alpha_ns::ExternalUpdate(x, y, param,
- end_time_step,
- data_struct);
+ aeif_cond_alpha_ns::ExternalUpdate< NVAR, NPARAM >( x, y, param, end_time_step, data_struct );
}
diff --git a/src/aeif_cond_alpha_multisynapse.cu b/src/aeif_cond_alpha_multisynapse.cu
index 3a03604a5..fbec4db7a 100644
--- a/src/aeif_cond_alpha_multisynapse.cu
+++ b/src/aeif_cond_alpha_multisynapse.cu
@@ -21,25 +21,21 @@
*/
-
-
-
-#include
-#include
-#include
+#include "aeif_cond_alpha_multisynapse.h"
#include "aeif_cond_alpha_multisynapse_kernel.h"
#include "rk5.h"
-#include "aeif_cond_alpha_multisynapse.h"
+#include
+#include
+#include
namespace aeif_cond_alpha_multisynapse_ns
{
-__device__
-void NodeInit(int n_var, int n_param, double x, float *y, float *param,
- aeif_cond_alpha_multisynapse_rk5 data_struct)
+__device__ void
+NodeInit( int n_var, int n_param, double x, float* y, float* param, aeif_cond_alpha_multisynapse_rk5 data_struct )
{
- //int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
- int n_port = (n_var-N_SCAL_VAR)/N_PORT_VAR;
+ // int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
+ int n_port = ( n_var - N_SCAL_VAR ) / N_PORT_VAR;
V_th = -50.4;
Delta_T = 2.0;
@@ -54,54 +50,54 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param,
V_reset = -60.0;
t_ref = 0.0;
den_delay = 0.0;
-
+
V_m = E_L;
w = 0;
refractory_step = 0;
- for (int i = 0; i
-int aeif_cond_alpha_multisynapse::UpdateNR<0>(long long it, double t1)
+int
+aeif_cond_alpha_multisynapse::UpdateNR< 0 >( long long it, double t1 )
{
return 0;
}
-int aeif_cond_alpha_multisynapse::Update(long long it, double t1) {
- UpdateNR(it, t1);
+int
+aeif_cond_alpha_multisynapse::Update( long long it, double t1 )
+{
+ UpdateNR< MAX_PORT_NUM >( it, t1 );
return 0;
}
diff --git a/src/aeif_cond_alpha_multisynapse.h b/src/aeif_cond_alpha_multisynapse.h
index 4a72b895d..63fc34238 100644
--- a/src/aeif_cond_alpha_multisynapse.h
+++ b/src/aeif_cond_alpha_multisynapse.h
@@ -21,19 +21,16 @@
*/
-
-
-
#ifndef AEIFCONDALPHAMULTISYNAPSE_H
#define AEIFCONDALPHAMULTISYNAPSE_H
-#include
-#include
-#include "cuda_error.h"
-#include "rk5.h"
-#include "node_group.h"
#include "base_neuron.h"
+#include "cuda_error.h"
#include "neuron_models.h"
+#include "node_group.h"
+#include "rk5.h"
+#include
+#include
/* BeginUserDocs: neuron, integrate-and-fire, adaptive threshold, conductance-based
@@ -46,7 +43,7 @@ Conductance-based adaptive exponential integrate-and-fire neuron model
Description
+++++++++++
-``aeif_cond_alpha_multisynapse`` is a conductance-based adaptive exponential
+``aeif_cond_alpha_multisynapse`` is a conductance-based adaptive exponential
integrate-and-fire neuron model according to [1]_ with multiple
synaptic time constants, and synaptic conductance modeled by an
alpha function.
@@ -134,9 +131,9 @@ tau_syn list of ms Time constant of synaptic conductance
============= ======= =========================================================
**Integration parameters**
-------------------------------------------------------------------------------
-h0_rel real Starting step in ODE integration relative to time
+h0_rel real Starting step in ODE integration relative to time
resolution
-h_min_rel real Minimum step in ODE integration relative to time
+h_min_rel real Minimum step in ODE integration relative to time
resolution
============= ======= =========================================================
@@ -169,30 +166,32 @@ struct aeif_cond_alpha_multisynapse_rk5
class aeif_cond_alpha_multisynapse : public BaseNeuron
{
- public:
- RungeKutta5 rk5_;
+public:
+ RungeKutta5< aeif_cond_alpha_multisynapse_rk5 > rk5_;
float h_min_;
float h_;
aeif_cond_alpha_multisynapse_rk5 rk5_data_struct_;
-
- int Init(int i_node_0, int n_neuron, int n_port, int i_group,
- unsigned long long *seed);
-
- int Calibrate(double time_min, float time_resolution);
-
- int Update(long long it, double t1);
-
- int GetX(int i_neuron, int n_node, double *x) {
- return rk5_.GetX(i_neuron, n_node, x);
+
+ int Init( int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long* seed );
+
+ int Calibrate( double time_min, float time_resolution );
+
+ int Update( long long it, double t1 );
+
+ int
+ GetX( int i_neuron, int n_node, double* x )
+ {
+ return rk5_.GetX( i_neuron, n_node, x );
}
-
- int GetY(int i_var, int i_neuron, int n_node, float *y) {
- return rk5_.GetY(i_var, i_neuron, n_node, y);
+
+ int
+ GetY( int i_var, int i_neuron, int n_node, float* y )
+ {
+ return rk5_.GetY( i_var, i_neuron, n_node, y );
}
-
- template
- int UpdateNR(long long it, double t1);
+ template < int N_PORT >
+ int UpdateNR( long long it, double t1 );
};
#endif
diff --git a/src/aeif_cond_alpha_multisynapse_kernel.h b/src/aeif_cond_alpha_multisynapse_kernel.h
index b4d206f85..fc9424a35 100644
--- a/src/aeif_cond_alpha_multisynapse_kernel.h
+++ b/src/aeif_cond_alpha_multisynapse_kernel.h
@@ -21,37 +21,37 @@
*/
-
-
-
#ifndef AEIFCONDALPHAMULTISYNAPSEKERNEL_H
#define AEIFCONDALPHAMULTISYNAPSEKERNEL_H
#include
- //#include
-#include "spike_buffer.h"
-#include "node_group.h"
+// #include
#include "aeif_cond_alpha_multisynapse.h"
+#include "node_group.h"
+#include "spike_buffer.h"
-#define MIN(a,b) (((a)<(b))?(a):(b))
+#define MIN( a, b ) ( ( ( a ) < ( b ) ) ? ( a ) : ( b ) )
extern __constant__ float NESTGPUTimeResolution;
namespace aeif_cond_alpha_multisynapse_ns
{
-enum ScalVarIndexes {
+enum ScalVarIndexes
+{
i_V_m = 0,
i_w,
N_SCAL_VAR
};
-enum PortVarIndexes {
+enum PortVarIndexes
+{
i_g = 0,
i_g1,
N_PORT_VAR
};
-enum ScalParamIndexes {
+enum ScalParamIndexes
+{
i_V_th = 0,
i_Delta_T,
i_g_L,
@@ -69,31 +69,26 @@ enum ScalParamIndexes {
N_SCAL_PARAM
};
-enum PortParamIndexes {
+enum PortParamIndexes
+{
i_E_rev = 0,
i_tau_syn,
i_g0,
N_PORT_PARAM
};
-enum GroupParamIndexes {
- i_h_min_rel = 0, // Min. step in ODE integr. relative to time resolution
- i_h0_rel, // Starting step in ODE integr. relative to time resolution
+enum GroupParamIndexes
+{
+ i_h_min_rel = 0, // Min. step in ODE integr. relative to time resolution
+ i_h0_rel, // Starting step in ODE integr. relative to time resolution
N_GROUP_PARAM
};
-const std::string aeif_cond_alpha_multisynapse_scal_var_name[N_SCAL_VAR] = {
- "V_m",
- "w"
-};
+const std::string aeif_cond_alpha_multisynapse_scal_var_name[ N_SCAL_VAR ] = { "V_m", "w" };
-const std::string aeif_cond_alpha_multisynapse_port_var_name[N_PORT_VAR] = {
- "g",
- "g1"
-};
+const std::string aeif_cond_alpha_multisynapse_port_var_name[ N_PORT_VAR ] = { "g", "g1" };
-const std::string aeif_cond_alpha_multisynapse_scal_param_name[N_SCAL_PARAM] = {
- "V_th",
+const std::string aeif_cond_alpha_multisynapse_scal_param_name[ N_SCAL_PARAM ] = { "V_th",
"Delta_T",
"g_L",
"E_L",
@@ -106,117 +101,117 @@ const std::string aeif_cond_alpha_multisynapse_scal_param_name[N_SCAL_PARAM] = {
"V_reset",
"t_ref",
"refractory_step",
- "den_delay"
-};
+ "den_delay" };
-const std::string aeif_cond_alpha_multisynapse_port_param_name[N_PORT_PARAM] = {
- "E_rev",
- "tau_syn",
- "g0"
-};
+const std::string aeif_cond_alpha_multisynapse_port_param_name[ N_PORT_PARAM ] = { "E_rev", "tau_syn", "g0" };
-const std::string aeif_cond_alpha_multisynapse_group_param_name[N_GROUP_PARAM] = {
- "h_min_rel",
- "h0_rel"
-};
+const std::string aeif_cond_alpha_multisynapse_group_param_name[ N_GROUP_PARAM ] = { "h_min_rel", "h0_rel" };
//
// I know that defines are "bad", but the defines below make the
// following equations much more readable.
// For every rule there is some exceptions!
//
-#define V_m y[i_V_m]
-#define w y[i_w]
-#define g(i) y[N_SCAL_VAR + N_PORT_VAR*i + i_g]
-#define g1(i) y[N_SCAL_VAR + N_PORT_VAR*i + i_g1]
-
-#define dVdt dydx[i_V_m]
-#define dwdt dydx[i_w]
-#define dgdt(i) dydx[N_SCAL_VAR + N_PORT_VAR*i + i_g]
-#define dg1dt(i) dydx[N_SCAL_VAR + N_PORT_VAR*i + i_g1]
-
-#define V_th param[i_V_th]
-#define Delta_T param[i_Delta_T]
-#define g_L param[i_g_L]
-#define E_L param[i_E_L]
-#define C_m param[i_C_m]
-#define a param[i_a]
-#define b param[i_b]
-#define tau_w param[i_tau_w]
-#define I_e param[i_I_e]
-#define V_peak param[i_V_peak]
-#define V_reset param[i_V_reset]
-#define t_ref param[i_t_ref]
-#define refractory_step param[i_refractory_step]
-#define den_delay param[i_den_delay]
-
-#define E_rev(i) param[N_SCAL_PARAM + N_PORT_PARAM*i + i_E_rev]
-#define tau_syn(i) param[N_SCAL_PARAM + N_PORT_PARAM*i + i_tau_syn]
-#define g0(i) param[N_SCAL_PARAM + N_PORT_PARAM*i + i_g0]
-
-#define h_min_rel_ group_param_[i_h_min_rel]
-#define h0_rel_ group_param_[i_h0_rel]
-
-
- template //, class DataStruct>
-__device__
- void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_cond_alpha_multisynapse_rk5 data_struct)
+#define V_m y[ i_V_m ]
+#define w y[ i_w ]
+#define g( i ) y[ N_SCAL_VAR + N_PORT_VAR * i + i_g ]
+#define g1( i ) y[ N_SCAL_VAR + N_PORT_VAR * i + i_g1 ]
+
+#define dVdt dydx[ i_V_m ]
+#define dwdt dydx[ i_w ]
+#define dgdt( i ) dydx[ N_SCAL_VAR + N_PORT_VAR * i + i_g ]
+#define dg1dt( i ) dydx[ N_SCAL_VAR + N_PORT_VAR * i + i_g1 ]
+
+#define V_th param[ i_V_th ]
+#define Delta_T param[ i_Delta_T ]
+#define g_L param[ i_g_L ]
+#define E_L param[ i_E_L ]
+#define C_m param[ i_C_m ]
+#define a param[ i_a ]
+#define b param[ i_b ]
+#define tau_w param[ i_tau_w ]
+#define I_e param[ i_I_e ]
+#define V_peak param[ i_V_peak ]
+#define V_reset param[ i_V_reset ]
+#define t_ref param[ i_t_ref ]
+#define refractory_step param[ i_refractory_step ]
+#define den_delay param[ i_den_delay ]
+
+#define E_rev( i ) param[ N_SCAL_PARAM + N_PORT_PARAM * i + i_E_rev ]
+#define tau_syn( i ) param[ N_SCAL_PARAM + N_PORT_PARAM * i + i_tau_syn ]
+#define g0( i ) param[ N_SCAL_PARAM + N_PORT_PARAM * i + i_g0 ]
+
+#define h_min_rel_ group_param_[ i_h_min_rel ]
+#define h0_rel_ group_param_[ i_h0_rel ]
+
+
+template < int NVAR, int NPARAM > //, class DataStruct>
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_cond_alpha_multisynapse_rk5 data_struct )
{
- enum { n_port = (NVAR-N_SCAL_VAR)/N_PORT_VAR };
+ enum
+ {
+ n_port = ( NVAR - N_SCAL_VAR ) / N_PORT_VAR
+ };
float I_syn = 0.0;
- float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak);
- for (int i = 0; i 0 ) ? V_reset : MIN( V_m, V_peak );
+ for ( int i = 0; i < n_port; i++ )
+ {
+ I_syn += g( i ) * ( E_rev( i ) - V );
}
- float V_spike = Delta_T*exp((V - V_th)/Delta_T);
+ float V_spike = Delta_T * exp( ( V - V_th ) / Delta_T );
- dVdt = ( refractory_step > 0 ) ? 0 :
- ( -g_L*(V - E_L - V_spike) + I_syn - w + I_e) / C_m;
+ dVdt = ( refractory_step > 0 ) ? 0 : ( -g_L * ( V - E_L - V_spike ) + I_syn - w + I_e ) / C_m;
// Adaptation current w.
- dwdt = (a*(V - E_L) - w) / tau_w;
- for (int i=0; i //, class DataStruct>
-__device__
- void ExternalUpdate
- (double x, float *y, float *param, bool end_time_step,
- aeif_cond_alpha_multisynapse_rk5 data_struct)
+template < int NVAR, int NPARAM > //, class DataStruct>
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_cond_alpha_multisynapse_rk5 data_struct )
{
- if ( V_m < -1.0e3) { // numerical instability
- printf("V_m out of lower bound\n");
+ if ( V_m < -1.0e3 )
+ { // numerical instability
+ printf( "V_m out of lower bound\n" );
V_m = V_reset;
- w=0;
+ w = 0;
return;
}
- if ( w < -1.0e6 || w > 1.0e6) { // numerical instability
- printf("w out of bound\n");
+ if ( w < -1.0e6 || w > 1.0e6 )
+ { // numerical instability
+ printf( "w out of bound\n" );
V_m = V_reset;
- w=0;
+ w = 0;
return;
}
- if (refractory_step > 0.0) {
+ if ( refractory_step > 0.0 )
+ {
V_m = V_reset;
- if (end_time_step) {
+ if ( end_time_step )
+ {
refractory_step -= 1.0;
}
}
- else {
- if ( V_m >= V_peak ) { // send spike
+ else
+ {
+ if ( V_m >= V_peak )
+ { // send spike
int neuron_idx = threadIdx.x + blockIdx.x * blockDim.x;
- PushSpike(data_struct.i_node_0_ + neuron_idx, 1.0);
+ PushSpike( data_struct.i_node_0_ + neuron_idx, 1.0 );
V_m = V_reset;
w += b; // spike-driven adaptation
- refractory_step = (int)::round(t_ref/NESTGPUTimeResolution);
- if (refractory_step<0) {
- refractory_step = 0;
+ refractory_step = ( int ) ::round( t_ref / NESTGPUTimeResolution );
+ if ( refractory_step < 0 )
+ {
+ refractory_step = 0;
}
}
}
@@ -226,43 +221,40 @@ __device__
};
template <>
-int aeif_cond_alpha_multisynapse::UpdateNR<0>(long long it, double t1);
+int aeif_cond_alpha_multisynapse::UpdateNR< 0 >( long long it, double t1 );
-template
-int aeif_cond_alpha_multisynapse::UpdateNR(long long it, double t1)
+template < int N_PORT >
+int
+aeif_cond_alpha_multisynapse::UpdateNR( long long it, double t1 )
{
- if (N_PORT == n_port_) {
- const int NVAR = aeif_cond_alpha_multisynapse_ns::N_SCAL_VAR
- + aeif_cond_alpha_multisynapse_ns::N_PORT_VAR*N_PORT;
- const int NPARAM = aeif_cond_alpha_multisynapse_ns::N_SCAL_PARAM
- + aeif_cond_alpha_multisynapse_ns::N_PORT_PARAM*N_PORT;
+ if ( N_PORT == n_port_ )
+ {
+ const int NVAR = aeif_cond_alpha_multisynapse_ns::N_SCAL_VAR + aeif_cond_alpha_multisynapse_ns::N_PORT_VAR * N_PORT;
+ const int NPARAM =
+ aeif_cond_alpha_multisynapse_ns::N_SCAL_PARAM + aeif_cond_alpha_multisynapse_ns::N_PORT_PARAM * N_PORT;
- rk5_.Update(t1, h_min_, rk5_data_struct_);
+ rk5_.Update< NVAR, NPARAM >( t1, h_min_, rk5_data_struct_ );
}
- else {
- UpdateNR(it, t1);
+ else
+ {
+ UpdateNR< N_PORT - 1 >( it, t1 );
}
return 0;
}
-template
-__device__
-void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_cond_alpha_multisynapse_rk5 data_struct)
+template < int NVAR, int NPARAM >
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_cond_alpha_multisynapse_rk5 data_struct )
{
- aeif_cond_alpha_multisynapse_ns::Derivatives(x, y, dydx, param,
- data_struct);
+ aeif_cond_alpha_multisynapse_ns::Derivatives< NVAR, NPARAM >( x, y, dydx, param, data_struct );
}
-template
-__device__
-void ExternalUpdate(double x, float *y, float *param, bool end_time_step,
- aeif_cond_alpha_multisynapse_rk5 data_struct)
+template < int NVAR, int NPARAM >
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_cond_alpha_multisynapse_rk5 data_struct )
{
- aeif_cond_alpha_multisynapse_ns::ExternalUpdate(x, y, param,
- end_time_step,
- data_struct);
+ aeif_cond_alpha_multisynapse_ns::ExternalUpdate< NVAR, NPARAM >( x, y, param, end_time_step, data_struct );
}
diff --git a/src/aeif_cond_alpha_multisynapse_rk5.h b/src/aeif_cond_alpha_multisynapse_rk5.h
index 159a18eca..433b0e4e1 100644
--- a/src/aeif_cond_alpha_multisynapse_rk5.h
+++ b/src/aeif_cond_alpha_multisynapse_rk5.h
@@ -21,31 +21,24 @@
*/
-
-
-
#ifndef AEIFCONDALPHAMULTISYNAPSERK5_H
#define AEIFCONDALPHAMULTISYNAPSERK5_H
struct aeif_cond_alpha_multisynapse_rk5;
-template
-__device__
-void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_cond_alpha_multisynapse_rk5 data_struct);
+template < int NVAR, int NPARAM >
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_cond_alpha_multisynapse_rk5 data_struct );
-template
-__device__
-void ExternalUpdate(double x, float *y, float *param, bool end_time_step,
- aeif_cond_alpha_multisynapse_rk5 data_struct);
+template < int NVAR, int NPARAM >
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_cond_alpha_multisynapse_rk5 data_struct );
-__device__
-void NodeInit(int n_var, int n_param, double x, float *y,
- float *param, aeif_cond_alpha_multisynapse_rk5 data_struct);
+__device__ void
+NodeInit( int n_var, int n_param, double x, float* y, float* param, aeif_cond_alpha_multisynapse_rk5 data_struct );
-__device__
-void NodeCalibrate(int n_var, int n_param, double x, float *y,
- float *param, aeif_cond_alpha_multisynapse_rk5 data_struct);
+__device__ void
+NodeCalibrate( int n_var, int n_param, double x, float* y, float* param, aeif_cond_alpha_multisynapse_rk5 data_struct );
#endif
diff --git a/src/aeif_cond_beta.cu b/src/aeif_cond_beta.cu
index 7d9b196d9..2749a75c6 100644
--- a/src/aeif_cond_beta.cu
+++ b/src/aeif_cond_beta.cu
@@ -21,25 +21,21 @@
*/
-
-
-
-#include
-#include
-#include
+#include "aeif_cond_beta.h"
#include "aeif_cond_beta_kernel.h"
#include "rk5.h"
-#include "aeif_cond_beta.h"
+#include
+#include
+#include
namespace aeif_cond_beta_ns
{
-__device__
-void NodeInit(int n_var, int n_param, double x, float *y, float *param,
- aeif_cond_beta_rk5 data_struct)
+__device__ void
+NodeInit( int n_var, int n_param, double x, float* y, float* param, aeif_cond_beta_rk5 data_struct )
{
- //int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
-
+ // int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
+
V_th = -50.4;
Delta_T = 2.0;
g_L = 30.0;
@@ -59,7 +55,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param,
tau_decay_in = 20.0;
tau_rise_ex = 2.0;
tau_rise_in = 2.0;
-
+
V_m = E_L;
w = 0;
refractory_step = 0;
@@ -69,71 +65,75 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param,
g1_in = 0;
}
-__device__
-void NodeCalibrate(int n_var, int n_param, double x, float *y,
- float *param, aeif_cond_beta_rk5 data_struct)
+__device__ void
+NodeCalibrate( int n_var, int n_param, double x, float* y, float* param, aeif_cond_beta_rk5 data_struct )
{
- //int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
-
+ // int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
+
refractory_step = 0;
-
+
// denominator is computed here to check that it is != 0
float denom1 = tau_decay_ex - tau_rise_ex;
float denom2 = 0;
- if (denom1 != 0) {
+ if ( denom1 != 0 )
+ {
// peak time
- float t_p = tau_decay_ex*tau_rise_ex*log(tau_decay_ex/tau_rise_ex) / denom1;
+ float t_p = tau_decay_ex * tau_rise_ex * log( tau_decay_ex / tau_rise_ex ) / denom1;
// another denominator is computed here to check that it is != 0
- denom2 = exp(-t_p / tau_decay_ex) - exp(-t_p / tau_rise_ex);
+ denom2 = exp( -t_p / tau_decay_ex ) - exp( -t_p / tau_rise_ex );
}
- if (denom2 == 0) { // if rise time == decay time use alpha function
+ if ( denom2 == 0 )
+ { // if rise time == decay time use alpha function
// use normalization for alpha function in this case
g0_ex = M_E / tau_decay_ex;
}
- else { // if rise time != decay time use beta function
+ else
+ { // if rise time != decay time use beta function
// normalization factor for conductance
g0_ex = ( 1. / tau_rise_ex - 1. / tau_decay_ex ) / denom2;
}
denom1 = tau_decay_in - tau_rise_in;
denom2 = 0;
- if (denom1 != 0) {
+ if ( denom1 != 0 )
+ {
// peak time
- float t_p = tau_decay_in*tau_rise_in*log(tau_decay_in/tau_rise_in) / denom1;
+ float t_p = tau_decay_in * tau_rise_in * log( tau_decay_in / tau_rise_in ) / denom1;
// another denominator is computed here to check that it is != 0
- denom2 = exp(-t_p / tau_decay_in) - exp(-t_p / tau_rise_in);
+ denom2 = exp( -t_p / tau_decay_in ) - exp( -t_p / tau_rise_in );
}
- if (denom2 == 0) { // if rise time == decay time use alpha function
+ if ( denom2 == 0 )
+ { // if rise time == decay time use alpha function
// use normalization for alpha function in this case
g0_in = M_E / tau_decay_in;
}
- else { // if rise time != decay time use beta function
+ else
+ { // if rise time != decay time use beta function
// normalization factor for conductance
g0_in = ( 1. / tau_rise_in - 1. / tau_decay_in ) / denom2;
}
}
}
-
-__device__
-void NodeInit(int n_var, int n_param, double x, float *y,
- float *param, aeif_cond_beta_rk5 data_struct)
+
+__device__ void
+NodeInit( int n_var, int n_param, double x, float* y, float* param, aeif_cond_beta_rk5 data_struct )
{
- aeif_cond_beta_ns::NodeInit(n_var, n_param, x, y, param, data_struct);
+ aeif_cond_beta_ns::NodeInit( n_var, n_param, x, y, param, data_struct );
}
-__device__
-void NodeCalibrate(int n_var, int n_param, double x, float *y,
- float *param, aeif_cond_beta_rk5 data_struct)
+__device__ void
+NodeCalibrate( int n_var, int n_param, double x, float* y, float* param, aeif_cond_beta_rk5 data_struct )
{
- aeif_cond_beta_ns::NodeCalibrate(n_var, n_param, x, y, param, data_struct);
+ aeif_cond_beta_ns::NodeCalibrate( n_var, n_param, x, y, param, data_struct );
}
using namespace aeif_cond_beta_ns;
-int aeif_cond_beta::Init(int i_node_0, int n_node, int n_port,
- int i_group, unsigned long long *seed) {
- BaseNeuron::Init(i_node_0, n_node, 2 /*n_port*/, i_group, seed);
+int
+aeif_cond_beta::Init( int i_node_0, int n_node, int n_port, int i_group, unsigned long long* seed )
+{
+ BaseNeuron::Init( i_node_0, n_node, 2 /*n_port*/, i_group, seed );
node_type_ = i_aeif_cond_beta_model;
n_scal_var_ = N_SCAL_VAR;
n_scal_param_ = N_SCAL_PARAM;
@@ -142,45 +142,48 @@ int aeif_cond_beta::Init(int i_node_0, int n_node, int n_port,
n_var_ = n_scal_var_;
n_param_ = n_scal_param_;
- group_param_ = new float[N_GROUP_PARAM];
+ group_param_ = new float[ N_GROUP_PARAM ];
scal_var_name_ = aeif_cond_beta_scal_var_name;
scal_param_name_ = aeif_cond_beta_scal_param_name;
group_param_name_ = aeif_cond_beta_group_param_name;
- //rk5_data_struct_.node_type_ = i_aeif_cond_beta_model;
+ // rk5_data_struct_.node_type_ = i_aeif_cond_beta_model;
rk5_data_struct_.i_node_0_ = i_node_0_;
- SetGroupParam("h_min_rel", 1.0e-3);
- SetGroupParam("h0_rel", 1.0e-2);
- h_ = h0_rel_* 0.1;
-
- rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_);
+ SetGroupParam( "h_min_rel", 1.0e-3 );
+ SetGroupParam( "h0_rel", 1.0e-2 );
+ h_ = h0_rel_ * 0.1;
+
+ rk5_.Init( n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_ );
var_arr_ = rk5_.GetYArr();
param_arr_ = rk5_.GetParamArr();
- port_weight_arr_ = GetParamArr() + GetScalParamIdx("g0_ex");
+ port_weight_arr_ = GetParamArr() + GetScalParamIdx( "g0_ex" );
port_weight_arr_step_ = n_param_;
port_weight_port_step_ = 1;
- port_input_arr_ = GetVarArr() + GetScalVarIdx("g1_ex");
+ port_input_arr_ = GetVarArr() + GetScalVarIdx( "g1_ex" );
port_input_arr_step_ = n_var_;
port_input_port_step_ = 1;
- den_delay_arr_ = GetParamArr() + GetScalParamIdx("den_delay");
+ den_delay_arr_ = GetParamArr() + GetScalParamIdx( "den_delay" );
return 0;
}
-int aeif_cond_beta::Calibrate(double time_min, float time_resolution)
+int
+aeif_cond_beta::Calibrate( double time_min, float time_resolution )
{
- h_min_ = h_min_rel_* time_resolution;
- h_ = h0_rel_* time_resolution;
- rk5_.Calibrate(time_min, h_, rk5_data_struct_);
-
+ h_min_ = h_min_rel_ * time_resolution;
+ h_ = h0_rel_ * time_resolution;
+ rk5_.Calibrate( time_min, h_, rk5_data_struct_ );
+
return 0;
}
-int aeif_cond_beta::Update(long long it, double t1) {
- rk5_.Update(t1, h_min_, rk5_data_struct_);
+int
+aeif_cond_beta::Update( long long it, double t1 )
+{
+ rk5_.Update< N_SCAL_VAR, N_SCAL_PARAM >( t1, h_min_, rk5_data_struct_ );
return 0;
}
diff --git a/src/aeif_cond_beta.h b/src/aeif_cond_beta.h
index b76f9cdf9..c761d7f53 100644
--- a/src/aeif_cond_beta.h
+++ b/src/aeif_cond_beta.h
@@ -21,19 +21,16 @@
*/
-
-
-
#ifndef AEIFCONDBETA_H
#define AEIFCONDBETA_H
-#include
-#include
-#include "cuda_error.h"
-#include "rk5.h"
-#include "node_group.h"
#include "base_neuron.h"
+#include "cuda_error.h"
#include "neuron_models.h"
+#include "node_group.h"
+#include "rk5.h"
+#include
+#include
/* BeginUserDocs: neuron, adaptive threshold, integrate-and-fire, conductance-based
@@ -46,7 +43,7 @@ Conductance-based adaptive exponential integrate-and-fire neuron model
Description
+++++++++++
-``aeif_cond_beta`` is a conductance-based adaptive exponential
+``aeif_cond_beta`` is a conductance-based adaptive exponential
integrate-and-fire neuron model according to [1]_ with synaptic
conductance modeled by a beta function, as described in [2]_.
@@ -126,9 +123,9 @@ tau_decay_in ms Decay time constant of inhibitory synaptic conductanc
========= ======= =========================================================
**Integration parameters**
---------------------------------------------------------------------------
-h0_rel real Starting step in ODE integration relative to time
+h0_rel real Starting step in ODE integration relative to time
resolution
-h_min_rel real Minimum step in ODE integration relative to time
+h_min_rel real Minimum step in ODE integration relative to time
resolution
========= ======= =========================================================
@@ -152,7 +149,7 @@ aeif_cond_beta_multisynapse, aeif_cond_alpha
EndUserDocs */
-//#define MAX_PORT_NUM 20
+// #define MAX_PORT_NUM 20
struct aeif_cond_beta_rk5
{
@@ -161,30 +158,32 @@ struct aeif_cond_beta_rk5
class aeif_cond_beta : public BaseNeuron
{
- public:
- RungeKutta5 rk5_;
+public:
+ RungeKutta5< aeif_cond_beta_rk5 > rk5_;
float h_min_;
float h_;
aeif_cond_beta_rk5 rk5_data_struct_;
-
- int Init(int i_node_0, int n_neuron, int n_port, int i_group,
- unsigned long long *seed);
-
- int Calibrate(double time_min, float time_resolution);
-
- int Update(long long it, double t1);
-
- int GetX(int i_neuron, int n_node, double *x) {
- return rk5_.GetX(i_neuron, n_node, x);
+
+ int Init( int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long* seed );
+
+ int Calibrate( double time_min, float time_resolution );
+
+ int Update( long long it, double t1 );
+
+ int
+ GetX( int i_neuron, int n_node, double* x )
+ {
+ return rk5_.GetX( i_neuron, n_node, x );
}
-
- int GetY(int i_var, int i_neuron, int n_node, float *y) {
- return rk5_.GetY(i_var, i_neuron, n_node, y);
+
+ int
+ GetY( int i_var, int i_neuron, int n_node, float* y )
+ {
+ return rk5_.GetY( i_var, i_neuron, n_node, y );
}
-
- template
- int UpdateNR(long long it, double t1);
+ template < int N_PORT >
+ int UpdateNR( long long it, double t1 );
};
#endif
diff --git a/src/aeif_cond_beta_kernel.h b/src/aeif_cond_beta_kernel.h
index f324342c5..d6af173c7 100644
--- a/src/aeif_cond_beta_kernel.h
+++ b/src/aeif_cond_beta_kernel.h
@@ -21,25 +21,23 @@
*/
-
-
-
#ifndef AEIFCONDBETAKERNEL_H
#define AEIFCONDBETAKERNEL_H
-#include
-#include
-#include "spike_buffer.h"
-#include "node_group.h"
#include "aeif_cond_beta.h"
+#include "node_group.h"
+#include "spike_buffer.h"
+#include
+#include
-#define MIN(a,b) (((a)<(b))?(a):(b))
+#define MIN( a, b ) ( ( ( a ) < ( b ) ) ? ( a ) : ( b ) )
extern __constant__ float NESTGPUTimeResolution;
namespace aeif_cond_beta_ns
{
-enum ScalVarIndexes {
+enum ScalVarIndexes
+{
i_g_ex = 0,
i_g_in,
i_g1_ex,
@@ -49,7 +47,8 @@ enum ScalVarIndexes {
N_SCAL_VAR
};
-enum ScalParamIndexes {
+enum ScalParamIndexes
+{
i_g0_ex = 0,
i_g0_in,
i_E_rev_ex,
@@ -75,24 +74,17 @@ enum ScalParamIndexes {
N_SCAL_PARAM
};
-enum GroupParamIndexes {
- i_h_min_rel = 0, // Min. step in ODE integr. relative to time resolution
- i_h0_rel, // Starting step in ODE integr. relative to time resolution
+enum GroupParamIndexes
+{
+ i_h_min_rel = 0, // Min. step in ODE integr. relative to time resolution
+ i_h0_rel, // Starting step in ODE integr. relative to time resolution
N_GROUP_PARAM
};
-const std::string aeif_cond_beta_scal_var_name[N_SCAL_VAR] = {
- "g_ex",
- "g_in",
- "g1_ex",
- "g1_in",
- "V_m",
- "w"
-};
+const std::string aeif_cond_beta_scal_var_name[ N_SCAL_VAR ] = { "g_ex", "g_in", "g1_ex", "g1_in", "V_m", "w" };
-const std::string aeif_cond_beta_scal_param_name[N_SCAL_PARAM] = {
- "g0_ex",
+const std::string aeif_cond_beta_scal_param_name[ N_SCAL_PARAM ] = { "g0_ex",
"g0_in",
"E_rev_ex",
"E_rev_in",
@@ -113,117 +105,116 @@ const std::string aeif_cond_beta_scal_param_name[N_SCAL_PARAM] = {
"V_reset",
"t_ref",
"refractory_step",
- "den_delay"
-};
+ "den_delay" };
-const std::string aeif_cond_beta_group_param_name[N_GROUP_PARAM] = {
- "h_min_rel",
- "h0_rel"
-};
+const std::string aeif_cond_beta_group_param_name[ N_GROUP_PARAM ] = { "h_min_rel", "h0_rel" };
//
// I know that defines are "bad", but the defines below make the
// following equations much more readable.
// For every rule there is some exceptions!
//
-#define g_ex y[i_g_ex]
-#define g1_ex y[i_g1_ex]
-#define g_in y[i_g_in]
-#define g1_in y[i_g1_in]
-#define V_m y[i_V_m]
-#define w y[i_w]
-
-#define dg_exdt dydx[i_g_ex]
-#define dg1_exdt dydx[i_g1_ex]
-#define dg_indt dydx[i_g_in]
-#define dg1_indt dydx[i_g1_in]
-#define dVdt dydx[i_V_m]
-#define dwdt dydx[i_w]
-
-#define g0_ex param[i_g0_ex]
-#define g0_in param[i_g0_in]
-#define E_rev_ex param[i_E_rev_ex]
-#define E_rev_in param[i_E_rev_in]
-#define tau_rise_ex param[i_tau_rise_ex]
-#define tau_rise_in param[i_tau_rise_in]
-#define tau_decay_ex param[i_tau_decay_ex]
-#define tau_decay_in param[i_tau_decay_in]
-#define V_th param[i_V_th]
-#define Delta_T param[i_Delta_T]
-#define g_L param[i_g_L]
-#define E_L param[i_E_L]
-#define C_m param[i_C_m]
-#define a param[i_a]
-#define b param[i_b]
-#define tau_w param[i_tau_w]
-#define I_e param[i_I_e]
-#define V_peak param[i_V_peak]
-#define V_reset param[i_V_reset]
-#define t_ref param[i_t_ref]
-#define refractory_step param[i_refractory_step]
-#define den_delay param[i_den_delay]
-
-#define h_min_rel_ group_param_[i_h_min_rel]
-#define h0_rel_ group_param_[i_h0_rel]
-
-
- template //, class DataStruct>
-__device__
- void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_cond_beta_rk5 data_struct)
+#define g_ex y[ i_g_ex ]
+#define g1_ex y[ i_g1_ex ]
+#define g_in y[ i_g_in ]
+#define g1_in y[ i_g1_in ]
+#define V_m y[ i_V_m ]
+#define w y[ i_w ]
+
+#define dg_exdt dydx[ i_g_ex ]
+#define dg1_exdt dydx[ i_g1_ex ]
+#define dg_indt dydx[ i_g_in ]
+#define dg1_indt dydx[ i_g1_in ]
+#define dVdt dydx[ i_V_m ]
+#define dwdt dydx[ i_w ]
+
+#define g0_ex param[ i_g0_ex ]
+#define g0_in param[ i_g0_in ]
+#define E_rev_ex param[ i_E_rev_ex ]
+#define E_rev_in param[ i_E_rev_in ]
+#define tau_rise_ex param[ i_tau_rise_ex ]
+#define tau_rise_in param[ i_tau_rise_in ]
+#define tau_decay_ex param[ i_tau_decay_ex ]
+#define tau_decay_in param[ i_tau_decay_in ]
+#define V_th param[ i_V_th ]
+#define Delta_T param[ i_Delta_T ]
+#define g_L param[ i_g_L ]
+#define E_L param[ i_E_L ]
+#define C_m param[ i_C_m ]
+#define a param[ i_a ]
+#define b param[ i_b ]
+#define tau_w param[ i_tau_w ]
+#define I_e param[ i_I_e ]
+#define V_peak param[ i_V_peak ]
+#define V_reset param[ i_V_reset ]
+#define t_ref param[ i_t_ref ]
+#define refractory_step param[ i_refractory_step ]
+#define den_delay param[ i_den_delay ]
+
+#define h_min_rel_ group_param_[ i_h_min_rel ]
+#define h0_rel_ group_param_[ i_h0_rel ]
+
+
+template < int NVAR, int NPARAM > //, class DataStruct>
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_cond_beta_rk5 data_struct )
{
float I_syn_in = 0.0;
float I_syn_ex = 0.0;
- float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak);
- I_syn_ex += g_ex*(E_rev_ex - V);
- I_syn_in += g_in*(E_rev_in - V);
+ float V = ( refractory_step > 0 ) ? V_reset : MIN( V_m, V_peak );
+ I_syn_ex += g_ex * ( E_rev_ex - V );
+ I_syn_in += g_in * ( E_rev_in - V );
- float V_spike = Delta_T*exp((V - V_th)/Delta_T);
+ float V_spike = Delta_T * exp( ( V - V_th ) / Delta_T );
- dVdt = ( refractory_step > 0 ) ? 0 :
- ( -g_L*(V - E_L - V_spike) + I_syn_ex + I_syn_in - w + I_e) / C_m;
+ dVdt = ( refractory_step > 0 ) ? 0 : ( -g_L * ( V - E_L - V_spike ) + I_syn_ex + I_syn_in - w + I_e ) / C_m;
// Adaptation current w.
- dwdt = (a*(V - E_L) - w) / tau_w;
+ dwdt = ( a * ( V - E_L ) - w ) / tau_w;
dg1_exdt = -g1_ex / tau_rise_ex;
dg_exdt = g1_ex - g_ex / tau_decay_ex;
dg1_indt = -g1_in / tau_rise_in;
dg_indt = g1_in - g_in / tau_decay_in;
}
- template //, class DataStruct>
-__device__
- void ExternalUpdate
- (double x, float *y, float *param, bool end_time_step,
- aeif_cond_beta_rk5 data_struct)
+template < int NVAR, int NPARAM > //, class DataStruct>
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_cond_beta_rk5 data_struct )
{
- if ( V_m < -1.0e3) { // numerical instability
- printf("V_m out of lower bound\n");
+ if ( V_m < -1.0e3 )
+ { // numerical instability
+ printf( "V_m out of lower bound\n" );
V_m = V_reset;
- w=0;
+ w = 0;
return;
}
- if ( w < -1.0e6 || w > 1.0e6) { // numerical instability
- printf("w out of bound\n");
+ if ( w < -1.0e6 || w > 1.0e6 )
+ { // numerical instability
+ printf( "w out of bound\n" );
V_m = V_reset;
- w=0;
+ w = 0;
return;
}
- if (refractory_step > 0.0) {
+ if ( refractory_step > 0.0 )
+ {
V_m = V_reset;
- if (end_time_step) {
+ if ( end_time_step )
+ {
refractory_step -= 1.0;
}
}
- else {
- if ( V_m >= V_peak ) { // send spike
+ else
+ {
+ if ( V_m >= V_peak )
+ { // send spike
int neuron_idx = threadIdx.x + blockIdx.x * blockDim.x;
- PushSpike(data_struct.i_node_0_ + neuron_idx, 1.0);
+ PushSpike( data_struct.i_node_0_ + neuron_idx, 1.0 );
V_m = V_reset;
w += b; // spike-driven adaptation
- refractory_step = (int)round(t_ref/NESTGPUTimeResolution);
- if (refractory_step<0) {
- refractory_step = 0;
+ refractory_step = ( int ) round( t_ref / NESTGPUTimeResolution );
+ if ( refractory_step < 0 )
+ {
+ refractory_step = 0;
}
}
}
@@ -232,25 +223,20 @@ __device__
};
-int Update(long long it, double t1);
+int Update( long long it, double t1 );
-template
-__device__
-void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_cond_beta_rk5 data_struct)
+template < int NVAR, int NPARAM >
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_cond_beta_rk5 data_struct )
{
- aeif_cond_beta_ns::Derivatives(x, y, dydx, param,
- data_struct);
+ aeif_cond_beta_ns::Derivatives< NVAR, NPARAM >( x, y, dydx, param, data_struct );
}
-template
-__device__
-void ExternalUpdate(double x, float *y, float *param, bool end_time_step,
- aeif_cond_beta_rk5 data_struct)
+template < int NVAR, int NPARAM >
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_cond_beta_rk5 data_struct )
{
- aeif_cond_beta_ns::ExternalUpdate(x, y, param,
- end_time_step,
- data_struct);
+ aeif_cond_beta_ns::ExternalUpdate< NVAR, NPARAM >( x, y, param, end_time_step, data_struct );
}
diff --git a/src/aeif_cond_beta_multisynapse.cu b/src/aeif_cond_beta_multisynapse.cu
index f184dab7e..afe3d4347 100644
--- a/src/aeif_cond_beta_multisynapse.cu
+++ b/src/aeif_cond_beta_multisynapse.cu
@@ -21,25 +21,21 @@
*/
-
-
-
-#include
-#include
-#include
+#include "aeif_cond_beta_multisynapse.h"
#include "aeif_cond_beta_multisynapse_kernel.h"
#include "rk5.h"
-#include "aeif_cond_beta_multisynapse.h"
+#include
+#include
+#include
namespace aeif_cond_beta_multisynapse_ns
{
-__device__
-void NodeInit(int n_var, int n_param, double x, float *y, float *param,
- aeif_cond_beta_multisynapse_rk5 data_struct)
+__device__ void
+NodeInit( int n_var, int n_param, double x, float* y, float* param, aeif_cond_beta_multisynapse_rk5 data_struct )
{
- //int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
- int n_port = (n_var-N_SCAL_VAR)/N_PORT_VAR;
+ // int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
+ int n_port = ( n_var - N_SCAL_VAR ) / N_PORT_VAR;
V_th = -50.4;
Delta_T = 2.0;
@@ -54,72 +50,73 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param,
V_reset = -60.0;
t_ref = 0.0;
den_delay = 0.0;
-
+
V_m = E_L;
w = 0;
refractory_step = 0;
- for (int i = 0; i
-int aeif_cond_beta_multisynapse::UpdateNR<0>(long long it, double t1)
+int
+aeif_cond_beta_multisynapse::UpdateNR< 0 >( long long it, double t1 )
{
return 0;
}
-int aeif_cond_beta_multisynapse::Update(long long it, double t1) {
- UpdateNR(it, t1);
+int
+aeif_cond_beta_multisynapse::Update( long long it, double t1 )
+{
+ UpdateNR< MAX_PORT_NUM >( it, t1 );
return 0;
}
diff --git a/src/aeif_cond_beta_multisynapse.h b/src/aeif_cond_beta_multisynapse.h
index e207a8326..0195e7b90 100644
--- a/src/aeif_cond_beta_multisynapse.h
+++ b/src/aeif_cond_beta_multisynapse.h
@@ -21,19 +21,16 @@
*/
-
-
-
#ifndef AEIFCONDBETAMULTISYNAPSE_H
#define AEIFCONDBETAMULTISYNAPSE_H
-#include
-#include
-#include "cuda_error.h"
-#include "rk5.h"
-#include "node_group.h"
#include "base_neuron.h"
+#include "cuda_error.h"
#include "neuron_models.h"
+#include "node_group.h"
+#include "rk5.h"
+#include
+#include
/* BeginUserDocs: neuron, adaptive threshold, integrate-and-fire, conductance-based
@@ -46,7 +43,7 @@ Conductance-based adaptive exponential integrate-and-fire neuron model
Description
+++++++++++
-``aeif_cond_beta_multisynapse`` is a conductance-based adaptive exponential
+``aeif_cond_beta_multisynapse`` is a conductance-based adaptive exponential
integrate-and-fire neuron model according to [1]_ with
multiple synaptic rise time and decay time constants, and synaptic conductance
modeled by a beta function.
@@ -136,9 +133,9 @@ tau_decay list of ms Decay time constant of synaptic conductance
========= ======= =========================================================
**Integration parameters**
---------------------------------------------------------------------------
-h0_rel real Starting step in ODE integration relative to time
+h0_rel real Starting step in ODE integration relative to time
resolution
-h_min_rel real Minimum step in ODE integration relative to time
+h_min_rel real Minimum step in ODE integration relative to time
resolution
========= ======= =========================================================
@@ -171,30 +168,32 @@ struct aeif_cond_beta_multisynapse_rk5
class aeif_cond_beta_multisynapse : public BaseNeuron
{
- public:
- RungeKutta5 rk5_;
+public:
+ RungeKutta5< aeif_cond_beta_multisynapse_rk5 > rk5_;
float h_min_;
float h_;
aeif_cond_beta_multisynapse_rk5 rk5_data_struct_;
-
- int Init(int i_node_0, int n_neuron, int n_port, int i_group,
- unsigned long long *seed);
-
- int Calibrate(double time_min, float time_resolution);
-
- int Update(long long it, double t1);
-
- int GetX(int i_neuron, int n_node, double *x) {
- return rk5_.GetX(i_neuron, n_node, x);
+
+ int Init( int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long* seed );
+
+ int Calibrate( double time_min, float time_resolution );
+
+ int Update( long long it, double t1 );
+
+ int
+ GetX( int i_neuron, int n_node, double* x )
+ {
+ return rk5_.GetX( i_neuron, n_node, x );
}
-
- int GetY(int i_var, int i_neuron, int n_node, float *y) {
- return rk5_.GetY(i_var, i_neuron, n_node, y);
+
+ int
+ GetY( int i_var, int i_neuron, int n_node, float* y )
+ {
+ return rk5_.GetY( i_var, i_neuron, n_node, y );
}
-
- template
- int UpdateNR(long long it, double t1);
+ template < int N_PORT >
+ int UpdateNR( long long it, double t1 );
};
#endif
diff --git a/src/aeif_cond_beta_multisynapse_kernel.h b/src/aeif_cond_beta_multisynapse_kernel.h
index 798cfa871..625aa3f00 100644
--- a/src/aeif_cond_beta_multisynapse_kernel.h
+++ b/src/aeif_cond_beta_multisynapse_kernel.h
@@ -21,37 +21,37 @@
*/
-
-
-
#ifndef AEIFCONDBETAMULTISYNAPSEKERNEL_H
#define AEIFCONDBETAMULTISYNAPSEKERNEL_H
-#include
-#include
-#include "spike_buffer.h"
-#include "node_group.h"
#include "aeif_cond_beta_multisynapse.h"
+#include "node_group.h"
+#include "spike_buffer.h"
+#include
+#include
-#define MIN(a,b) (((a)<(b))?(a):(b))
+#define MIN( a, b ) ( ( ( a ) < ( b ) ) ? ( a ) : ( b ) )
extern __constant__ float NESTGPUTimeResolution;
namespace aeif_cond_beta_multisynapse_ns
{
-enum ScalVarIndexes {
+enum ScalVarIndexes
+{
i_V_m = 0,
i_w,
N_SCAL_VAR
};
-enum PortVarIndexes {
+enum PortVarIndexes
+{
i_g = 0,
i_g1,
N_PORT_VAR
};
-enum ScalParamIndexes {
+enum ScalParamIndexes
+{
i_V_th = 0,
i_Delta_T,
i_g_L,
@@ -69,7 +69,8 @@ enum ScalParamIndexes {
N_SCAL_PARAM
};
-enum PortParamIndexes {
+enum PortParamIndexes
+{
i_E_rev = 0,
i_tau_rise,
i_tau_decay,
@@ -77,25 +78,19 @@ enum PortParamIndexes {
N_PORT_PARAM
};
-enum GroupParamIndexes {
- i_h_min_rel = 0, // Min. step in ODE integr. relative to time resolution
- i_h0_rel, // Starting step in ODE integr. relative to time resolution
+enum GroupParamIndexes
+{
+ i_h_min_rel = 0, // Min. step in ODE integr. relative to time resolution
+ i_h0_rel, // Starting step in ODE integr. relative to time resolution
N_GROUP_PARAM
};
-const std::string aeif_cond_beta_multisynapse_scal_var_name[N_SCAL_VAR] = {
- "V_m",
- "w"
-};
+const std::string aeif_cond_beta_multisynapse_scal_var_name[ N_SCAL_VAR ] = { "V_m", "w" };
-const std::string aeif_cond_beta_multisynapse_port_var_name[N_PORT_VAR] = {
- "g",
- "g1"
-};
+const std::string aeif_cond_beta_multisynapse_port_var_name[ N_PORT_VAR ] = { "g", "g1" };
-const std::string aeif_cond_beta_multisynapse_scal_param_name[N_SCAL_PARAM] = {
- "V_th",
+const std::string aeif_cond_beta_multisynapse_scal_param_name[ N_SCAL_PARAM ] = { "V_th",
"Delta_T",
"g_L",
"E_L",
@@ -108,118 +103,120 @@ const std::string aeif_cond_beta_multisynapse_scal_param_name[N_SCAL_PARAM] = {
"V_reset",
"t_ref",
"refractory_step",
- "den_delay"
-};
+ "den_delay" };
-const std::string aeif_cond_beta_multisynapse_port_param_name[N_PORT_PARAM] = {
- "E_rev",
+const std::string aeif_cond_beta_multisynapse_port_param_name[ N_PORT_PARAM ] = { "E_rev",
"tau_rise",
"tau_decay",
- "g0"
-};
+ "g0" };
-const std::string aeif_cond_beta_multisynapse_group_param_name[N_GROUP_PARAM] = {
- "h_min_rel",
- "h0_rel"
-};
+const std::string aeif_cond_beta_multisynapse_group_param_name[ N_GROUP_PARAM ] = { "h_min_rel", "h0_rel" };
//
// I know that defines are "bad", but the defines below make the
// following equations much more readable.
// For every rule there is some exceptions!
//
-#define V_m y[i_V_m]
-#define w y[i_w]
-#define g(i) y[N_SCAL_VAR + N_PORT_VAR*i + i_g]
-#define g1(i) y[N_SCAL_VAR + N_PORT_VAR*i + i_g1]
-
-#define dVdt dydx[i_V_m]
-#define dwdt dydx[i_w]
-#define dgdt(i) dydx[N_SCAL_VAR + N_PORT_VAR*i + i_g]
-#define dg1dt(i) dydx[N_SCAL_VAR + N_PORT_VAR*i + i_g1]
-
-#define V_th param[i_V_th]
-#define Delta_T param[i_Delta_T]
-#define g_L param[i_g_L]
-#define E_L param[i_E_L]
-#define C_m param[i_C_m]
-#define a param[i_a]
-#define b param[i_b]
-#define tau_w param[i_tau_w]
-#define I_e param[i_I_e]
-#define V_peak param[i_V_peak]
-#define V_reset param[i_V_reset]
-#define t_ref param[i_t_ref]
-#define refractory_step param[i_refractory_step]
-#define den_delay param[i_den_delay]
-
-#define E_rev(i) param[N_SCAL_PARAM + N_PORT_PARAM*i + i_E_rev]
-#define tau_rise(i) param[N_SCAL_PARAM + N_PORT_PARAM*i + i_tau_rise]
-#define tau_decay(i) param[N_SCAL_PARAM + N_PORT_PARAM*i + i_tau_decay]
-#define g0(i) param[N_SCAL_PARAM + N_PORT_PARAM*i + i_g0]
-
-#define h_min_rel_ group_param_[i_h_min_rel]
-#define h0_rel_ group_param_[i_h0_rel]
-
-
- template //, class DataStruct>
-__device__
- void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_cond_beta_multisynapse_rk5 data_struct)
+#define V_m y[ i_V_m ]
+#define w y[ i_w ]
+#define g( i ) y[ N_SCAL_VAR + N_PORT_VAR * i + i_g ]
+#define g1( i ) y[ N_SCAL_VAR + N_PORT_VAR * i + i_g1 ]
+
+#define dVdt dydx[ i_V_m ]
+#define dwdt dydx[ i_w ]
+#define dgdt( i ) dydx[ N_SCAL_VAR + N_PORT_VAR * i + i_g ]
+#define dg1dt( i ) dydx[ N_SCAL_VAR + N_PORT_VAR * i + i_g1 ]
+
+#define V_th param[ i_V_th ]
+#define Delta_T param[ i_Delta_T ]
+#define g_L param[ i_g_L ]
+#define E_L param[ i_E_L ]
+#define C_m param[ i_C_m ]
+#define a param[ i_a ]
+#define b param[ i_b ]
+#define tau_w param[ i_tau_w ]
+#define I_e param[ i_I_e ]
+#define V_peak param[ i_V_peak ]
+#define V_reset param[ i_V_reset ]
+#define t_ref param[ i_t_ref ]
+#define refractory_step param[ i_refractory_step ]
+#define den_delay param[ i_den_delay ]
+
+#define E_rev( i ) param[ N_SCAL_PARAM + N_PORT_PARAM * i + i_E_rev ]
+#define tau_rise( i ) param[ N_SCAL_PARAM + N_PORT_PARAM * i + i_tau_rise ]
+#define tau_decay( i ) param[ N_SCAL_PARAM + N_PORT_PARAM * i + i_tau_decay ]
+#define g0( i ) param[ N_SCAL_PARAM + N_PORT_PARAM * i + i_g0 ]
+
+#define h_min_rel_ group_param_[ i_h_min_rel ]
+#define h0_rel_ group_param_[ i_h0_rel ]
+
+
+template < int NVAR, int NPARAM > //, class DataStruct>
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_cond_beta_multisynapse_rk5 data_struct )
{
- enum { n_port = (NVAR-N_SCAL_VAR)/N_PORT_VAR };
+ enum
+ {
+ n_port = ( NVAR - N_SCAL_VAR ) / N_PORT_VAR
+ };
float I_syn = 0.0;
- float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak);
- for (int i = 0; i 0 ) ? V_reset : MIN( V_m, V_peak );
+ for ( int i = 0; i < n_port; i++ )
+ {
+ I_syn += g( i ) * ( E_rev( i ) - V );
}
- float V_spike = Delta_T*exp((V - V_th)/Delta_T);
+ float V_spike = Delta_T * exp( ( V - V_th ) / Delta_T );
- dVdt = ( refractory_step > 0 ) ? 0 :
- ( -g_L*(V - E_L - V_spike) + I_syn - w + I_e) / C_m;
+ dVdt = ( refractory_step > 0 ) ? 0 : ( -g_L * ( V - E_L - V_spike ) + I_syn - w + I_e ) / C_m;
// Adaptation current w.
- dwdt = (a*(V - E_L) - w) / tau_w;
- for (int i=0; i //, class DataStruct>
-__device__
- void ExternalUpdate
- (double x, float *y, float *param, bool end_time_step,
- aeif_cond_beta_multisynapse_rk5 data_struct)
+template < int NVAR, int NPARAM > //, class DataStruct>
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_cond_beta_multisynapse_rk5 data_struct )
{
- if ( V_m < -1.0e3) { // numerical instability
- printf("V_m out of lower bound\n");
+ if ( V_m < -1.0e3 )
+ { // numerical instability
+ printf( "V_m out of lower bound\n" );
V_m = V_reset;
- w=0;
+ w = 0;
return;
}
- if ( w < -1.0e6 || w > 1.0e6) { // numerical instability
- printf("w out of bound\n");
+ if ( w < -1.0e6 || w > 1.0e6 )
+ { // numerical instability
+ printf( "w out of bound\n" );
V_m = V_reset;
- w=0;
+ w = 0;
return;
}
- if (refractory_step > 0.0) {
+ if ( refractory_step > 0.0 )
+ {
V_m = V_reset;
- if (end_time_step) {
+ if ( end_time_step )
+ {
refractory_step -= 1.0;
}
}
- else {
- if ( V_m >= V_peak ) { // send spike
+ else
+ {
+ if ( V_m >= V_peak )
+ { // send spike
int neuron_idx = threadIdx.x + blockIdx.x * blockDim.x;
- PushSpike(data_struct.i_node_0_ + neuron_idx, 1.0);
+ PushSpike( data_struct.i_node_0_ + neuron_idx, 1.0 );
V_m = V_reset;
w += b; // spike-driven adaptation
- refractory_step = (int)round(t_ref/NESTGPUTimeResolution);
- if (refractory_step<0) {
- refractory_step = 0;
+ refractory_step = ( int ) round( t_ref / NESTGPUTimeResolution );
+ if ( refractory_step < 0 )
+ {
+ refractory_step = 0;
}
}
}
@@ -229,43 +226,40 @@ __device__
};
template <>
-int aeif_cond_beta_multisynapse::UpdateNR<0>(long long it, double t1);
+int aeif_cond_beta_multisynapse::UpdateNR< 0 >( long long it, double t1 );
-template
-int aeif_cond_beta_multisynapse::UpdateNR(long long it, double t1)
+template < int N_PORT >
+int
+aeif_cond_beta_multisynapse::UpdateNR( long long it, double t1 )
{
- if (N_PORT == n_port_) {
- const int NVAR = aeif_cond_beta_multisynapse_ns::N_SCAL_VAR
- + aeif_cond_beta_multisynapse_ns::N_PORT_VAR*N_PORT;
- const int NPARAM = aeif_cond_beta_multisynapse_ns::N_SCAL_PARAM
- + aeif_cond_beta_multisynapse_ns::N_PORT_PARAM*N_PORT;
+ if ( N_PORT == n_port_ )
+ {
+ const int NVAR = aeif_cond_beta_multisynapse_ns::N_SCAL_VAR + aeif_cond_beta_multisynapse_ns::N_PORT_VAR * N_PORT;
+ const int NPARAM =
+ aeif_cond_beta_multisynapse_ns::N_SCAL_PARAM + aeif_cond_beta_multisynapse_ns::N_PORT_PARAM * N_PORT;
- rk5_.Update(t1, h_min_, rk5_data_struct_);
+ rk5_.Update< NVAR, NPARAM >( t1, h_min_, rk5_data_struct_ );
}
- else {
- UpdateNR(it, t1);
+ else
+ {
+ UpdateNR< N_PORT - 1 >( it, t1 );
}
return 0;
}
-template
-__device__
-void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_cond_beta_multisynapse_rk5 data_struct)
+template < int NVAR, int NPARAM >
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_cond_beta_multisynapse_rk5 data_struct )
{
- aeif_cond_beta_multisynapse_ns::Derivatives(x, y, dydx, param,
- data_struct);
+ aeif_cond_beta_multisynapse_ns::Derivatives< NVAR, NPARAM >( x, y, dydx, param, data_struct );
}
-template
-__device__
-void ExternalUpdate(double x, float *y, float *param, bool end_time_step,
- aeif_cond_beta_multisynapse_rk5 data_struct)
+template < int NVAR, int NPARAM >
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_cond_beta_multisynapse_rk5 data_struct )
{
- aeif_cond_beta_multisynapse_ns::ExternalUpdate(x, y, param,
- end_time_step,
- data_struct);
+ aeif_cond_beta_multisynapse_ns::ExternalUpdate< NVAR, NPARAM >( x, y, param, end_time_step, data_struct );
}
diff --git a/src/aeif_cond_beta_multisynapse_rk5.h b/src/aeif_cond_beta_multisynapse_rk5.h
index 543ed5879..e931e2bc9 100644
--- a/src/aeif_cond_beta_multisynapse_rk5.h
+++ b/src/aeif_cond_beta_multisynapse_rk5.h
@@ -21,31 +21,24 @@
*/
-
-
-
#ifndef AEIFCONDBETAMULTISYNAPSERK5_H
#define AEIFCONDBETAMULTISYNAPSERK5_H
struct aeif_cond_beta_multisynapse_rk5;
-template
-__device__
-void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_cond_beta_multisynapse_rk5 data_struct);
+template < int NVAR, int NPARAM >
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_cond_beta_multisynapse_rk5 data_struct );
-template
-__device__
-void ExternalUpdate(double x, float *y, float *param, bool end_time_step,
- aeif_cond_beta_multisynapse_rk5 data_struct);
+template < int NVAR, int NPARAM >
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_cond_beta_multisynapse_rk5 data_struct );
-__device__
-void NodeInit(int n_var, int n_param, double x, float *y,
- float *param, aeif_cond_beta_multisynapse_rk5 data_struct);
+__device__ void
+NodeInit( int n_var, int n_param, double x, float* y, float* param, aeif_cond_beta_multisynapse_rk5 data_struct );
-__device__
-void NodeCalibrate(int n_var, int n_param, double x, float *y,
- float *param, aeif_cond_beta_multisynapse_rk5 data_struct);
+__device__ void
+NodeCalibrate( int n_var, int n_param, double x, float* y, float* param, aeif_cond_beta_multisynapse_rk5 data_struct );
#endif
diff --git a/src/aeif_psc_alpha.cu b/src/aeif_psc_alpha.cu
index 401c59dc8..27083a7a9 100644
--- a/src/aeif_psc_alpha.cu
+++ b/src/aeif_psc_alpha.cu
@@ -21,24 +21,20 @@
*/
-
-
-
-#include
-#include
-#include
+#include "aeif_psc_alpha.h"
#include "aeif_psc_alpha_kernel.h"
#include "rk5.h"
-#include "aeif_psc_alpha.h"
+#include
+#include
+#include
namespace aeif_psc_alpha_ns
{
-__device__
-void NodeInit(int n_var, int n_param, double x, float *y, float *param,
- aeif_psc_alpha_rk5 data_struct)
+__device__ void
+NodeInit( int n_var, int n_param, double x, float* y, float* param, aeif_psc_alpha_rk5 data_struct )
{
- //int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
+ // int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
V_th = -50.4;
Delta_T = 2.0;
@@ -53,7 +49,7 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param,
V_reset = -60.0;
t_ref = 0.0;
den_delay = 0.0;
-
+
V_m = E_L;
w = 0.0;
refractory_step = 0;
@@ -65,14 +61,14 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param,
tau_syn_in = 2.0;
}
-__device__
-void NodeCalibrate(int n_var, int n_param, double x, float *y,
- float *param, aeif_psc_alpha_rk5 data_struct)
+__device__ void
+NodeCalibrate( int n_var, int n_param, double x, float* y, float* param, aeif_psc_alpha_rk5 data_struct )
{
- //int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
+ // int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
refractory_step = 0;
// set the right threshold depending on Delta_T
- if (Delta_T <= 0.0) {
+ if ( Delta_T <= 0.0 )
+ {
V_peak = V_th; // same as IAF dynamics for spikes if Delta_T == 0.
}
I0_ex = M_E / tau_syn_ex;
@@ -81,73 +77,75 @@ void NodeCalibrate(int n_var, int n_param, double x, float *y,
}
-__device__
-void NodeInit(int n_var, int n_param, double x, float *y,
- float *param, aeif_psc_alpha_rk5 data_struct)
+__device__ void
+NodeInit( int n_var, int n_param, double x, float* y, float* param, aeif_psc_alpha_rk5 data_struct )
{
- aeif_psc_alpha_ns::NodeInit(n_var, n_param, x, y, param, data_struct);
+ aeif_psc_alpha_ns::NodeInit( n_var, n_param, x, y, param, data_struct );
}
-__device__
-void NodeCalibrate(int n_var, int n_param, double x, float *y,
- float *param, aeif_psc_alpha_rk5 data_struct)
+__device__ void
+NodeCalibrate( int n_var, int n_param, double x, float* y, float* param, aeif_psc_alpha_rk5 data_struct )
{
- aeif_psc_alpha_ns::NodeCalibrate(n_var, n_param, x, y, param, data_struct);
+ aeif_psc_alpha_ns::NodeCalibrate( n_var, n_param, x, y, param, data_struct );
}
using namespace aeif_psc_alpha_ns;
-int aeif_psc_alpha::Init(int i_node_0, int n_node, int n_port,
- int i_group, unsigned long long *seed) {
- BaseNeuron::Init(i_node_0, n_node, 2 /*n_port*/, i_group, seed);
+int
+aeif_psc_alpha::Init( int i_node_0, int n_node, int n_port, int i_group, unsigned long long* seed )
+{
+ BaseNeuron::Init( i_node_0, n_node, 2 /*n_port*/, i_group, seed );
node_type_ = i_aeif_psc_alpha_model;
n_scal_var_ = N_SCAL_VAR;
n_scal_param_ = N_SCAL_PARAM;
- n_group_param_ = N_GROUP_PARAM;
+ n_group_param_ = N_GROUP_PARAM;
n_var_ = n_scal_var_;
n_param_ = n_scal_param_;
- group_param_ = new float[N_GROUP_PARAM];
+ group_param_ = new float[ N_GROUP_PARAM ];
scal_var_name_ = aeif_psc_alpha_scal_var_name;
scal_param_name_ = aeif_psc_alpha_scal_param_name;
group_param_name_ = aeif_psc_alpha_group_param_name;
- //rk5_data_struct_.node_type_ = i_aeif_psc_alpha_model;
+ // rk5_data_struct_.node_type_ = i_aeif_psc_alpha_model;
rk5_data_struct_.i_node_0_ = i_node_0_;
- SetGroupParam("h_min_rel", 1.0e-3);
- SetGroupParam("h0_rel", 1.0e-2);
- h_ = h0_rel_* 0.1;
+ SetGroupParam( "h_min_rel", 1.0e-3 );
+ SetGroupParam( "h0_rel", 1.0e-2 );
+ h_ = h0_rel_ * 0.1;
- rk5_.Init(n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_);
+ rk5_.Init( n_node, n_var_, n_param_, 0.0, h_, rk5_data_struct_ );
var_arr_ = rk5_.GetYArr();
param_arr_ = rk5_.GetParamArr();
- port_weight_arr_ = GetParamArr() + GetScalParamIdx("I0_ex");
+ port_weight_arr_ = GetParamArr() + GetScalParamIdx( "I0_ex" );
port_weight_arr_step_ = n_param_;
port_weight_port_step_ = 1;
-
- port_input_arr_ = GetVarArr() + GetScalVarIdx("I1_syn_ex");
+
+ port_input_arr_ = GetVarArr() + GetScalVarIdx( "I1_syn_ex" );
port_input_arr_step_ = n_var_;
port_input_port_step_ = 1;
- den_delay_arr_ = GetParamArr() + GetScalParamIdx("den_delay");
+ den_delay_arr_ = GetParamArr() + GetScalParamIdx( "den_delay" );
return 0;
}
-int aeif_psc_alpha::Calibrate(double time_min, float time_resolution)
+int
+aeif_psc_alpha::Calibrate( double time_min, float time_resolution )
{
- h_min_ = h_min_rel_* time_resolution;
- h_ = h0_rel_* time_resolution;
- rk5_.Calibrate(time_min, h_, rk5_data_struct_);
-
+ h_min_ = h_min_rel_ * time_resolution;
+ h_ = h0_rel_ * time_resolution;
+ rk5_.Calibrate( time_min, h_, rk5_data_struct_ );
+
return 0;
}
-int aeif_psc_alpha::Update(long long it, double t1) {
- rk5_.Update(t1, h_min_, rk5_data_struct_);
+int
+aeif_psc_alpha::Update( long long it, double t1 )
+{
+ rk5_.Update< N_SCAL_VAR, N_SCAL_PARAM >( t1, h_min_, rk5_data_struct_ );
return 0;
}
diff --git a/src/aeif_psc_alpha.h b/src/aeif_psc_alpha.h
index be7e7e6a0..55694cfca 100644
--- a/src/aeif_psc_alpha.h
+++ b/src/aeif_psc_alpha.h
@@ -21,19 +21,16 @@
*/
-
-
-
#ifndef AEIFPSCALPHA_H
#define AEIFPSCALPHA_H
-#include
-#include
-#include "cuda_error.h"
-#include "rk5.h"
-#include "node_group.h"
#include "base_neuron.h"
+#include "cuda_error.h"
#include "neuron_models.h"
+#include "node_group.h"
+#include "rk5.h"
+#include
+#include
/* BeginUserDocs: neuron, adaptive threshold, integrate-and-fire, current-based
@@ -121,9 +118,9 @@ The following parameters can be set in the status dictionary.
============= ======= =========================================================
**Integration parameters**
-------------------------------------------------------------------------------
-h0_rel real Starting step in ODE integration relative to time
+h0_rel real Starting step in ODE integration relative to time
resolution
-h_min_rel real Minimum step in ODE integration relative to time
+h_min_rel real Minimum step in ODE integration relative to time
resolution
============= ======= =========================================================
@@ -142,7 +139,7 @@ aeif_psc_alpha_multisynapse, iaf_psc_alpha, aeif_cond_alpha
EndUserDocs */
-//#define MAX_PORT_NUM 20
+// #define MAX_PORT_NUM 20
struct aeif_psc_alpha_rk5
{
@@ -151,30 +148,32 @@ struct aeif_psc_alpha_rk5
class aeif_psc_alpha : public BaseNeuron
{
- public:
- RungeKutta5 rk5_;
+public:
+ RungeKutta5< aeif_psc_alpha_rk5 > rk5_;
float h_min_;
float h_;
aeif_psc_alpha_rk5 rk5_data_struct_;
-
- int Init(int i_node_0, int n_neuron, int n_port, int i_group,
- unsigned long long *seed);
-
- int Calibrate(double time_min, float time_resolution);
-
- int Update(long long it, double t1);
-
- int GetX(int i_neuron, int n_node, double *x) {
- return rk5_.GetX(i_neuron, n_node, x);
+
+ int Init( int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long* seed );
+
+ int Calibrate( double time_min, float time_resolution );
+
+ int Update( long long it, double t1 );
+
+ int
+ GetX( int i_neuron, int n_node, double* x )
+ {
+ return rk5_.GetX( i_neuron, n_node, x );
}
-
- int GetY(int i_var, int i_neuron, int n_node, float *y) {
- return rk5_.GetY(i_var, i_neuron, n_node, y);
+
+ int
+ GetY( int i_var, int i_neuron, int n_node, float* y )
+ {
+ return rk5_.GetY( i_var, i_neuron, n_node, y );
}
-
- template
- int UpdateNR(long long it, double t1);
+ template < int N_PORT >
+ int UpdateNR( long long it, double t1 );
};
#endif
diff --git a/src/aeif_psc_alpha_kernel.h b/src/aeif_psc_alpha_kernel.h
index c396c02b1..f33690de3 100644
--- a/src/aeif_psc_alpha_kernel.h
+++ b/src/aeif_psc_alpha_kernel.h
@@ -21,25 +21,23 @@
*/
-
-
-
#ifndef AEIFPSCALPHAKERNEL_H
#define AEIFPSCALPHAKERNEL_H
-#include
-#include
-#include "spike_buffer.h"
-#include "node_group.h"
#include "aeif_psc_alpha.h"
+#include "node_group.h"
+#include "spike_buffer.h"
+#include
+#include
-#define MIN(a,b) (((a)<(b))?(a):(b))
+#define MIN( a, b ) ( ( ( a ) < ( b ) ) ? ( a ) : ( b ) )
extern __constant__ float NESTGPUTimeResolution;
namespace aeif_psc_alpha_ns
{
-enum ScalVarIndexes {
+enum ScalVarIndexes
+{
i_I_syn_ex = 0,
i_I_syn_in,
i_I1_syn_ex,
@@ -49,7 +47,8 @@ enum ScalVarIndexes {
N_SCAL_VAR
};
-enum ScalParamIndexes {
+enum ScalParamIndexes
+{
i_tau_syn_ex = 0,
i_tau_syn_in,
i_I0_ex,
@@ -71,24 +70,22 @@ enum ScalParamIndexes {
N_SCAL_PARAM
};
-enum GroupParamIndexes {
- i_h_min_rel = 0, // Min. step in ODE integr. relative to time resolution
- i_h0_rel, // Starting step in ODE integr. relative to time resolution
+enum GroupParamIndexes
+{
+ i_h_min_rel = 0, // Min. step in ODE integr. relative to time resolution
+ i_h0_rel, // Starting step in ODE integr. relative to time resolution
N_GROUP_PARAM
};
-const std::string aeif_psc_alpha_scal_var_name[N_SCAL_VAR] = {
- "I_syn_ex",
+const std::string aeif_psc_alpha_scal_var_name[ N_SCAL_VAR ] = { "I_syn_ex",
"I_syn_in",
"I1_syn_ex",
"I1_syn_in",
"V_m",
- "w"
-};
+ "w" };
-const std::string aeif_psc_alpha_scal_param_name[N_SCAL_PARAM] = {
- "tau_syn_ex",
+const std::string aeif_psc_alpha_scal_param_name[ N_SCAL_PARAM ] = { "tau_syn_ex",
"tau_syn_in",
"I0_ex",
"I0_in",
@@ -105,112 +102,111 @@ const std::string aeif_psc_alpha_scal_param_name[N_SCAL_PARAM] = {
"V_reset",
"t_ref",
"refractory_step",
- "den_delay"
-};
+ "den_delay" };
-const std::string aeif_psc_alpha_group_param_name[N_GROUP_PARAM] = {
- "h_min_rel",
- "h0_rel"
-};
+const std::string aeif_psc_alpha_group_param_name[ N_GROUP_PARAM ] = { "h_min_rel", "h0_rel" };
//
// I know that defines are "bad", but the defines below make the
// following equations much more readable.
// For every rule there is some exceptions!
//
-#define I_syn_ex y[i_I_syn_ex]
-#define I_syn_in y[i_I_syn_in]
-#define I1_syn_ex y[i_I1_syn_ex]
-#define I1_syn_in y[i_I1_syn_in]
-#define V_m y[i_V_m]
-#define w y[i_w]
-
-#define dI_syn_exdt dydx[i_I_syn_ex]
-#define dI_syn_indt dydx[i_I_syn_in]
-#define dI1_syn_exdt dydx[i_I1_syn_ex]
-#define dI1_syn_indt dydx[i_I1_syn_in]
-#define dVdt dydx[i_V_m]
-#define dwdt dydx[i_w]
-
-#define I0_ex param[i_I0_ex]
-#define I0_in param[i_I0_in]
-#define tau_syn_ex param[i_tau_syn_ex]
-#define tau_syn_in param[i_tau_syn_in]
-#define V_th param[i_V_th]
-#define Delta_T param[i_Delta_T]
-#define g_L param[i_g_L]
-#define E_L param[i_E_L]
-#define C_m param[i_C_m]
-#define a param[i_a]
-#define b param[i_b]
-#define tau_w param[i_tau_w]
-#define I_e param[i_I_e]
-#define V_peak param[i_V_peak]
-#define V_reset param[i_V_reset]
-#define t_ref param[i_t_ref]
-#define refractory_step param[i_refractory_step]
-#define den_delay param[i_den_delay]
-
-#define h_min_rel_ group_param_[i_h_min_rel]
-#define h0_rel_ group_param_[i_h0_rel]
-
-
- template //, class DataStruct>
-__device__
- void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_psc_alpha_rk5 data_struct)
+#define I_syn_ex y[ i_I_syn_ex ]
+#define I_syn_in y[ i_I_syn_in ]
+#define I1_syn_ex y[ i_I1_syn_ex ]
+#define I1_syn_in y[ i_I1_syn_in ]
+#define V_m y[ i_V_m ]
+#define w y[ i_w ]
+
+#define dI_syn_exdt dydx[ i_I_syn_ex ]
+#define dI_syn_indt dydx[ i_I_syn_in ]
+#define dI1_syn_exdt dydx[ i_I1_syn_ex ]
+#define dI1_syn_indt dydx[ i_I1_syn_in ]
+#define dVdt dydx[ i_V_m ]
+#define dwdt dydx[ i_w ]
+
+#define I0_ex param[ i_I0_ex ]
+#define I0_in param[ i_I0_in ]
+#define tau_syn_ex param[ i_tau_syn_ex ]
+#define tau_syn_in param[ i_tau_syn_in ]
+#define V_th param[ i_V_th ]
+#define Delta_T param[ i_Delta_T ]
+#define g_L param[ i_g_L ]
+#define E_L param[ i_E_L ]
+#define C_m param[ i_C_m ]
+#define a param[ i_a ]
+#define b param[ i_b ]
+#define tau_w param[ i_tau_w ]
+#define I_e param[ i_I_e ]
+#define V_peak param[ i_V_peak ]
+#define V_reset param[ i_V_reset ]
+#define t_ref param[ i_t_ref ]
+#define refractory_step param[ i_refractory_step ]
+#define den_delay param[ i_den_delay ]
+
+#define h_min_rel_ group_param_[ i_h_min_rel ]
+#define h0_rel_ group_param_[ i_h0_rel ]
+
+
+template < int NVAR, int NPARAM > //, class DataStruct>
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_psc_alpha_rk5 data_struct )
{
float I_syn_tot = 0.0;
-
- float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak);
+
+ float V = ( refractory_step > 0 ) ? V_reset : MIN( V_m, V_peak );
I_syn_tot += I_syn_ex - I_syn_in;
- float V_spike = Delta_T == 0. ? 0. : Delta_T*exp((V - V_th)/Delta_T);
+ float V_spike = Delta_T == 0. ? 0. : Delta_T * exp( ( V - V_th ) / Delta_T );
- dVdt = ( refractory_step > 0 ) ? 0 :
- ( -g_L*(V - E_L - V_spike) + I_syn_tot - w + I_e) / C_m;
+ dVdt = ( refractory_step > 0 ) ? 0 : ( -g_L * ( V - E_L - V_spike ) + I_syn_tot - w + I_e ) / C_m;
// Adaptation current w.
- dwdt = (a*(V - E_L) - w) / tau_w;
- dI1_syn_exdt = -I1_syn_ex/tau_syn_ex;
- dI1_syn_indt = -I1_syn_in/tau_syn_in;
- dI_syn_exdt = I1_syn_ex - I_syn_ex/tau_syn_ex;
- dI_syn_indt = I1_syn_in - I_syn_in/tau_syn_in;
+ dwdt = ( a * ( V - E_L ) - w ) / tau_w;
+ dI1_syn_exdt = -I1_syn_ex / tau_syn_ex;
+ dI1_syn_indt = -I1_syn_in / tau_syn_in;
+ dI_syn_exdt = I1_syn_ex - I_syn_ex / tau_syn_ex;
+ dI_syn_indt = I1_syn_in - I_syn_in / tau_syn_in;
}
- template //, class DataStruct>
-__device__
- void ExternalUpdate
- (double x, float *y, float *param, bool end_time_step,
- aeif_psc_alpha_rk5 data_struct)
+template < int NVAR, int NPARAM > //, class DataStruct>
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_psc_alpha_rk5 data_struct )
{
- if ( V_m < -1.0e3) { // numerical instability
- printf("V_m out of lower bound\n");
+ if ( V_m < -1.0e3 )
+ { // numerical instability
+ printf( "V_m out of lower bound\n" );
V_m = V_reset;
- w=0;
+ w = 0;
return;
}
- if ( w < -1.0e6 || w > 1.0e6) { // numerical instability
- printf("w out of bound\n");
+ if ( w < -1.0e6 || w > 1.0e6 )
+ { // numerical instability
+ printf( "w out of bound\n" );
V_m = V_reset;
- w=0;
+ w = 0;
return;
}
- if (refractory_step > 0.0) {
+ if ( refractory_step > 0.0 )
+ {
V_m = V_reset;
- if (end_time_step) {
+ if ( end_time_step )
+ {
refractory_step -= 1.0;
}
}
- else {
- if ( V_m >= V_peak ) { // send spike
+ else
+ {
+ if ( V_m >= V_peak )
+ { // send spike
int neuron_idx = threadIdx.x + blockIdx.x * blockDim.x;
- PushSpike(data_struct.i_node_0_ + neuron_idx, 1.0);
+ PushSpike( data_struct.i_node_0_ + neuron_idx, 1.0 );
V_m = V_reset;
w += b; // spike-driven adaptation
- refractory_step = (int)round(t_ref/NESTGPUTimeResolution);
- if (refractory_step<0) {
- refractory_step = 0;
+ refractory_step = ( int ) round( t_ref / NESTGPUTimeResolution );
+ if ( refractory_step < 0 )
+ {
+ refractory_step = 0;
}
}
}
@@ -219,25 +215,20 @@ __device__
};
-int Update(long long it, double t1);
+int Update( long long it, double t1 );
-template
-__device__
-void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_psc_alpha_rk5 data_struct)
+template < int NVAR, int NPARAM >
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_psc_alpha_rk5 data_struct )
{
- aeif_psc_alpha_ns::Derivatives(x, y, dydx, param,
- data_struct);
+ aeif_psc_alpha_ns::Derivatives< NVAR, NPARAM >( x, y, dydx, param, data_struct );
}
-template
-__device__
-void ExternalUpdate(double x, float *y, float *param, bool end_time_step,
- aeif_psc_alpha_rk5 data_struct)
+template < int NVAR, int NPARAM >
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_psc_alpha_rk5 data_struct )
{
- aeif_psc_alpha_ns::ExternalUpdate(x, y, param,
- end_time_step,
- data_struct);
+ aeif_psc_alpha_ns::ExternalUpdate< NVAR, NPARAM >( x, y, param, end_time_step, data_struct );
}
diff --git a/src/aeif_psc_alpha_multisynapse.cu b/src/aeif_psc_alpha_multisynapse.cu
index db8d2db14..fa69108b6 100644
--- a/src/aeif_psc_alpha_multisynapse.cu
+++ b/src/aeif_psc_alpha_multisynapse.cu
@@ -21,25 +21,21 @@
*/
-
-
-
-#include
-#include
-#include
+#include "aeif_psc_alpha_multisynapse.h"
#include "aeif_psc_alpha_multisynapse_kernel.h"
#include "rk5.h"
-#include "aeif_psc_alpha_multisynapse.h"
+#include
+#include
+#include
namespace aeif_psc_alpha_multisynapse_ns
{
-__device__
-void NodeInit(int n_var, int n_param, double x, float *y, float *param,
- aeif_psc_alpha_multisynapse_rk5 data_struct)
+__device__ void
+NodeInit( int n_var, int n_param, double x, float* y, float* param, aeif_psc_alpha_multisynapse_rk5 data_struct )
{
- //int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
- int n_port = (n_var-N_SCAL_VAR)/N_PORT_VAR;
+ // int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
+ int n_port = ( n_var - N_SCAL_VAR ) / N_PORT_VAR;
V_th = -50.4;
Delta_T = 2.0;
@@ -54,56 +50,57 @@ void NodeInit(int n_var, int n_param, double x, float *y, float *param,
V_reset = -60.0;
t_ref = 0.0;
den_delay = 0.0;
-
+
V_m = E_L;
w = 0.0;
refractory_step = 0;
- for (int i = 0; i
-int aeif_psc_alpha_multisynapse::UpdateNR<0>(long long it, double t1)
+int
+aeif_psc_alpha_multisynapse::UpdateNR< 0 >( long long it, double t1 )
{
return 0;
}
-int aeif_psc_alpha_multisynapse::Update(long long it, double t1) {
- UpdateNR(it, t1);
+int
+aeif_psc_alpha_multisynapse::Update( long long it, double t1 )
+{
+ UpdateNR< MAX_PORT_NUM >( it, t1 );
return 0;
}
diff --git a/src/aeif_psc_alpha_multisynapse.h b/src/aeif_psc_alpha_multisynapse.h
index d01e8f223..6235c6be6 100644
--- a/src/aeif_psc_alpha_multisynapse.h
+++ b/src/aeif_psc_alpha_multisynapse.h
@@ -21,19 +21,16 @@
*/
-
-
-
#ifndef AEIFPSCALPHAMULTISYNAPSE_H
#define AEIFPSCALPHAMULTISYNAPSE_H
-#include
-#include
-#include "cuda_error.h"
-#include "rk5.h"
-#include "node_group.h"
#include "base_neuron.h"
+#include "cuda_error.h"
#include "neuron_models.h"
+#include "node_group.h"
+#include "rk5.h"
+#include
+#include
/* BeginUserDocs: neuron, adaptive threshold, integrate-and-fire, current-based
@@ -120,9 +117,9 @@ The following parameters can be set in the status dictionary.
============= ======= =========================================================
**Integration parameters**
-------------------------------------------------------------------------------
-h0_rel real Starting step in ODE integration relative to time
+h0_rel real Starting step in ODE integration relative to time
resolution
-h_min_rel real Minimum step in ODE integration relative to time
+h_min_rel real Minimum step in ODE integration relative to time
resolution
============= ======= =========================================================
@@ -150,30 +147,32 @@ struct aeif_psc_alpha_multisynapse_rk5
class aeif_psc_alpha_multisynapse : public BaseNeuron
{
- public:
- RungeKutta5 rk5_;
+public:
+ RungeKutta5< aeif_psc_alpha_multisynapse_rk5 > rk5_;
float h_min_;
float h_;
aeif_psc_alpha_multisynapse_rk5 rk5_data_struct_;
-
- int Init(int i_node_0, int n_neuron, int n_port, int i_group,
- unsigned long long *seed);
-
- int Calibrate(double time_min, float time_resolution);
-
- int Update(long long it, double t1);
-
- int GetX(int i_neuron, int n_node, double *x) {
- return rk5_.GetX(i_neuron, n_node, x);
+
+ int Init( int i_node_0, int n_neuron, int n_port, int i_group, unsigned long long* seed );
+
+ int Calibrate( double time_min, float time_resolution );
+
+ int Update( long long it, double t1 );
+
+ int
+ GetX( int i_neuron, int n_node, double* x )
+ {
+ return rk5_.GetX( i_neuron, n_node, x );
}
-
- int GetY(int i_var, int i_neuron, int n_node, float *y) {
- return rk5_.GetY(i_var, i_neuron, n_node, y);
+
+ int
+ GetY( int i_var, int i_neuron, int n_node, float* y )
+ {
+ return rk5_.GetY( i_var, i_neuron, n_node, y );
}
-
- template
- int UpdateNR(long long it, double t1);
+ template < int N_PORT >
+ int UpdateNR( long long it, double t1 );
};
#endif
diff --git a/src/aeif_psc_alpha_multisynapse_kernel.h b/src/aeif_psc_alpha_multisynapse_kernel.h
index 61a34e895..225a85f01 100644
--- a/src/aeif_psc_alpha_multisynapse_kernel.h
+++ b/src/aeif_psc_alpha_multisynapse_kernel.h
@@ -21,37 +21,37 @@
*/
-
-
-
#ifndef AEIFPSCALPHAMULTISYNAPSEKERNEL_H
#define AEIFPSCALPHAMULTISYNAPSEKERNEL_H
-#include
-#include
-#include "spike_buffer.h"
-#include "node_group.h"
#include "aeif_psc_alpha_multisynapse.h"
+#include "node_group.h"
+#include "spike_buffer.h"
+#include
+#include
-#define MIN(a,b) (((a)<(b))?(a):(b))
+#define MIN( a, b ) ( ( ( a ) < ( b ) ) ? ( a ) : ( b ) )
extern __constant__ float NESTGPUTimeResolution;
namespace aeif_psc_alpha_multisynapse_ns
{
-enum ScalVarIndexes {
+enum ScalVarIndexes
+{
i_V_m = 0,
i_w,
N_SCAL_VAR
};
-enum PortVarIndexes {
+enum PortVarIndexes
+{
i_I_syn = 0,
i_I1_syn,
N_PORT_VAR
};
-enum ScalParamIndexes {
+enum ScalParamIndexes
+{
i_V_th = 0,
i_Delta_T,
i_g_L,
@@ -69,31 +69,26 @@ enum ScalParamIndexes {
N_SCAL_PARAM
};
-enum PortParamIndexes {
+enum PortParamIndexes
+{
i_tau_syn = 0,
i_I0,
N_PORT_PARAM
};
-enum GroupParamIndexes {
- i_h_min_rel = 0, // Min. step in ODE integr. relative to time resolution
- i_h0_rel, // Starting step in ODE integr. relative to time resolution
+enum GroupParamIndexes
+{
+ i_h_min_rel = 0, // Min. step in ODE integr. relative to time resolution
+ i_h0_rel, // Starting step in ODE integr. relative to time resolution
N_GROUP_PARAM
};
-const std::string aeif_psc_alpha_multisynapse_scal_var_name[N_SCAL_VAR] = {
- "V_m",
- "w"
-};
+const std::string aeif_psc_alpha_multisynapse_scal_var_name[ N_SCAL_VAR ] = { "V_m", "w" };
-const std::string aeif_psc_alpha_multisynapse_port_var_name[N_PORT_VAR] = {
- "I_syn",
- "I1_syn"
-};
+const std::string aeif_psc_alpha_multisynapse_port_var_name[ N_PORT_VAR ] = { "I_syn", "I1_syn" };
-const std::string aeif_psc_alpha_multisynapse_scal_param_name[N_SCAL_PARAM] = {
- "V_th",
+const std::string aeif_psc_alpha_multisynapse_scal_param_name[ N_SCAL_PARAM ] = { "V_th",
"Delta_T",
"g_L",
"E_L",
@@ -106,115 +101,116 @@ const std::string aeif_psc_alpha_multisynapse_scal_param_name[N_SCAL_PARAM] = {
"V_reset",
"t_ref",
"refractory_step",
- "den_delay"
-};
+ "den_delay" };
-const std::string aeif_psc_alpha_multisynapse_port_param_name[N_PORT_PARAM] = {
- "tau_syn",
- "I0"
-};
+const std::string aeif_psc_alpha_multisynapse_port_param_name[ N_PORT_PARAM ] = { "tau_syn", "I0" };
-const std::string aeif_psc_alpha_multisynapse_group_param_name[N_GROUP_PARAM] = {
- "h_min_rel",
- "h0_rel"
-};
+const std::string aeif_psc_alpha_multisynapse_group_param_name[ N_GROUP_PARAM ] = { "h_min_rel", "h0_rel" };
//
// I know that defines are "bad", but the defines below make the
// following equations much more readable.
// For every rule there is some exceptions!
//
-#define V_m y[i_V_m]
-#define w y[i_w]
-#define I_syn(i) y[N_SCAL_VAR + N_PORT_VAR*i + i_I_syn]
-#define I1_syn(i) y[N_SCAL_VAR + N_PORT_VAR*i + i_I1_syn]
-
-#define dVdt dydx[i_V_m]
-#define dwdt dydx[i_w]
-#define dI_syndt(i) dydx[N_SCAL_VAR + N_PORT_VAR*i + i_I_syn]
-#define dI1_syndt(i) dydx[N_SCAL_VAR + N_PORT_VAR*i + i_I1_syn]
-#define I0(i) param[N_SCAL_PARAM + N_PORT_PARAM*i + i_I0]
-
-#define V_th param[i_V_th]
-#define Delta_T param[i_Delta_T]
-#define g_L param[i_g_L]
-#define E_L param[i_E_L]
-#define C_m param[i_C_m]
-#define a param[i_a]
-#define b param[i_b]
-#define tau_w param[i_tau_w]
-#define I_e param[i_I_e]
-#define V_peak param[i_V_peak]
-#define V_reset param[i_V_reset]
-#define t_ref param[i_t_ref]
-#define refractory_step param[i_refractory_step]
-#define den_delay param[i_den_delay]
-
-#define tau_syn(i) param[N_SCAL_PARAM + N_PORT_PARAM*i + i_tau_syn]
-
-#define h_min_rel_ group_param_[i_h_min_rel]
-#define h0_rel_ group_param_[i_h0_rel]
-
-
- template //, class DataStruct>
-__device__
- void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_psc_alpha_multisynapse_rk5 data_struct)
+#define V_m y[ i_V_m ]
+#define w y[ i_w ]
+#define I_syn( i ) y[ N_SCAL_VAR + N_PORT_VAR * i + i_I_syn ]
+#define I1_syn( i ) y[ N_SCAL_VAR + N_PORT_VAR * i + i_I1_syn ]
+
+#define dVdt dydx[ i_V_m ]
+#define dwdt dydx[ i_w ]
+#define dI_syndt( i ) dydx[ N_SCAL_VAR + N_PORT_VAR * i + i_I_syn ]
+#define dI1_syndt( i ) dydx[ N_SCAL_VAR + N_PORT_VAR * i + i_I1_syn ]
+#define I0( i ) param[ N_SCAL_PARAM + N_PORT_PARAM * i + i_I0 ]
+
+#define V_th param[ i_V_th ]
+#define Delta_T param[ i_Delta_T ]
+#define g_L param[ i_g_L ]
+#define E_L param[ i_E_L ]
+#define C_m param[ i_C_m ]
+#define a param[ i_a ]
+#define b param[ i_b ]
+#define tau_w param[ i_tau_w ]
+#define I_e param[ i_I_e ]
+#define V_peak param[ i_V_peak ]
+#define V_reset param[ i_V_reset ]
+#define t_ref param[ i_t_ref ]
+#define refractory_step param[ i_refractory_step ]
+#define den_delay param[ i_den_delay ]
+
+#define tau_syn( i ) param[ N_SCAL_PARAM + N_PORT_PARAM * i + i_tau_syn ]
+
+#define h_min_rel_ group_param_[ i_h_min_rel ]
+#define h0_rel_ group_param_[ i_h0_rel ]
+
+
+template < int NVAR, int NPARAM > //, class DataStruct>
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_psc_alpha_multisynapse_rk5 data_struct )
{
- enum { n_port = (NVAR-N_SCAL_VAR)/N_PORT_VAR };
+ enum
+ {
+ n_port = ( NVAR - N_SCAL_VAR ) / N_PORT_VAR
+ };
float I_syn_tot = 0.0;
-
- float V = ( refractory_step > 0 ) ? V_reset : MIN(V_m, V_peak);
- for (int i = 0; i 0 ) ? V_reset : MIN( V_m, V_peak );
+ for ( int i = 0; i < n_port; i++ )
+ {
+ I_syn_tot += I_syn( i );
}
- float V_spike = Delta_T == 0. ? 0. : Delta_T*exp((V - V_th)/Delta_T);
+ float V_spike = Delta_T == 0. ? 0. : Delta_T * exp( ( V - V_th ) / Delta_T );
- dVdt = ( refractory_step > 0 ) ? 0 :
- ( -g_L*(V - E_L - V_spike) + I_syn_tot - w + I_e) / C_m;
+ dVdt = ( refractory_step > 0 ) ? 0 : ( -g_L * ( V - E_L - V_spike ) + I_syn_tot - w + I_e ) / C_m;
// Adaptation current w.
- dwdt = (a*(V - E_L) - w) / tau_w;
- for (int i=0; i //, class DataStruct>
-__device__
- void ExternalUpdate
- (double x, float *y, float *param, bool end_time_step,
- aeif_psc_alpha_multisynapse_rk5 data_struct)
+template < int NVAR, int NPARAM > //, class DataStruct>
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_psc_alpha_multisynapse_rk5 data_struct )
{
- if ( V_m < -1.0e3) { // numerical instability
- printf("V_m out of lower bound\n");
+ if ( V_m < -1.0e3 )
+ { // numerical instability
+ printf( "V_m out of lower bound\n" );
V_m = V_reset;
- w=0;
+ w = 0;
return;
}
- if ( w < -1.0e6 || w > 1.0e6) { // numerical instability
- printf("w out of bound\n");
+ if ( w < -1.0e6 || w > 1.0e6 )
+ { // numerical instability
+ printf( "w out of bound\n" );
V_m = V_reset;
- w=0;
+ w = 0;
return;
}
- if (refractory_step > 0.0) {
+ if ( refractory_step > 0.0 )
+ {
V_m = V_reset;
- if (end_time_step) {
+ if ( end_time_step )
+ {
refractory_step -= 1.0;
}
}
- else {
- if ( V_m >= V_peak ) { // send spike
+ else
+ {
+ if ( V_m >= V_peak )
+ { // send spike
int neuron_idx = threadIdx.x + blockIdx.x * blockDim.x;
- PushSpike(data_struct.i_node_0_ + neuron_idx, 1.0);
+ PushSpike( data_struct.i_node_0_ + neuron_idx, 1.0 );
V_m = V_reset;
w += b; // spike-driven adaptation
- refractory_step = (int)round(t_ref/NESTGPUTimeResolution);
- if (refractory_step<0) {
- refractory_step = 0;
+ refractory_step = ( int ) round( t_ref / NESTGPUTimeResolution );
+ if ( refractory_step < 0 )
+ {
+ refractory_step = 0;
}
}
}
@@ -224,43 +220,40 @@ __device__
};
template <>
-int aeif_psc_alpha_multisynapse::UpdateNR<0>(long long it, double t1);
+int aeif_psc_alpha_multisynapse::UpdateNR< 0 >( long long it, double t1 );
-template
-int aeif_psc_alpha_multisynapse::UpdateNR(long long it, double t1)
+template < int N_PORT >
+int
+aeif_psc_alpha_multisynapse::UpdateNR( long long it, double t1 )
{
- if (N_PORT == n_port_) {
- const int NVAR = aeif_psc_alpha_multisynapse_ns::N_SCAL_VAR
- + aeif_psc_alpha_multisynapse_ns::N_PORT_VAR*N_PORT;
- const int NPARAM = aeif_psc_alpha_multisynapse_ns::N_SCAL_PARAM
- + aeif_psc_alpha_multisynapse_ns::N_PORT_PARAM*N_PORT;
+ if ( N_PORT == n_port_ )
+ {
+ const int NVAR = aeif_psc_alpha_multisynapse_ns::N_SCAL_VAR + aeif_psc_alpha_multisynapse_ns::N_PORT_VAR * N_PORT;
+ const int NPARAM =
+ aeif_psc_alpha_multisynapse_ns::N_SCAL_PARAM + aeif_psc_alpha_multisynapse_ns::N_PORT_PARAM * N_PORT;
- rk5_.Update(t1, h_min_, rk5_data_struct_);
+ rk5_.Update< NVAR, NPARAM >( t1, h_min_, rk5_data_struct_ );
}
- else {
- UpdateNR(it, t1);
+ else
+ {
+ UpdateNR< N_PORT - 1 >( it, t1 );
}
return 0;
}
-template
-__device__
-void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_psc_alpha_multisynapse_rk5 data_struct)
+template < int NVAR, int NPARAM >
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_psc_alpha_multisynapse_rk5 data_struct )
{
- aeif_psc_alpha_multisynapse_ns::Derivatives(x, y, dydx, param,
- data_struct);
+ aeif_psc_alpha_multisynapse_ns::Derivatives< NVAR, NPARAM >( x, y, dydx, param, data_struct );
}
-template
-__device__
-void ExternalUpdate(double x, float *y, float *param, bool end_time_step,
- aeif_psc_alpha_multisynapse_rk5 data_struct)
+template < int NVAR, int NPARAM >
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_psc_alpha_multisynapse_rk5 data_struct )
{
- aeif_psc_alpha_multisynapse_ns::ExternalUpdate(x, y, param,
- end_time_step,
- data_struct);
+ aeif_psc_alpha_multisynapse_ns::ExternalUpdate< NVAR, NPARAM >( x, y, param, end_time_step, data_struct );
}
diff --git a/src/aeif_psc_alpha_multisynapse_rk5.h b/src/aeif_psc_alpha_multisynapse_rk5.h
index 3f0c82cad..8baf73324 100644
--- a/src/aeif_psc_alpha_multisynapse_rk5.h
+++ b/src/aeif_psc_alpha_multisynapse_rk5.h
@@ -21,31 +21,24 @@
*/
-
-
-
#ifndef AEIFPSCALPHAMULTISYNAPSERK5_H
#define AEIFPSCALPHAMULTISYNAPSERK5_H
struct aeif_psc_alpha_multisynapse_rk5;
-template
-__device__
-void Derivatives(double x, float *y, float *dydx, float *param,
- aeif_psc_alpha_multisynapse_rk5 data_struct);
+template < int NVAR, int NPARAM >
+__device__ void
+Derivatives( double x, float* y, float* dydx, float* param, aeif_psc_alpha_multisynapse_rk5 data_struct );
-template
-__device__
-void ExternalUpdate(double x, float *y, float *param, bool end_time_step,
- aeif_psc_alpha_multisynapse_rk5 data_struct);
+template < int NVAR, int NPARAM >
+__device__ void
+ExternalUpdate( double x, float* y, float* param, bool end_time_step, aeif_psc_alpha_multisynapse_rk5 data_struct );
-__device__
-void NodeInit(int n_var, int n_param, double x, float *y,
- float *param, aeif_psc_alpha_multisynapse_rk5 data_struct);
+__device__ void
+NodeInit( int n_var, int n_param, double x, float* y, float* param, aeif_psc_alpha_multisynapse_rk5 data_struct );
-__device__
-void NodeCalibrate(int n_var, int n_param, double x, float *y,
- float *param, aeif_psc_alpha_multisynapse_rk5 data_struct);
+__device__ void
+NodeCalibrate( int n_var, int n_param, double x, float* y, float* param, aeif_psc_alpha_multisynapse_rk5 data_struct );
#endif
diff --git a/src/aeif_psc_delta.cu b/src/aeif_psc_delta.cu
index 2b4d6adb1..8bcc58e30 100644
--- a/src/aeif_psc_delta.cu
+++ b/src/aeif_psc_delta.cu
@@ -21,24 +21,20 @@
*/
-
-
-
-#include
-#include
-#include
+#include "aeif_psc_delta.h"
#include "aeif_psc_delta_kernel.h"
#include "rk5.h"
-#include "aeif_psc_delta.h"
+#include
+#include