[med-svn] [cnrun] 14/25: WIP (libcn mostly good)
andrei zavada
hmmr-guest at moszumanska.debian.org
Thu Nov 6 22:08:31 UTC 2014
This is an automated email from the git hooks/post-receive script.
hmmr-guest pushed a commit to branch WIP
in repository cnrun.
commit cacd07f4635db84e6542cd9e518b01c311ed6afd
Author: andrei zavada <johnhommer at gmail.com>
Date: Fri Sep 19 09:07:18 2014 +0400
WIP (libcn mostly good)
---
.gitignore | 30 -
.gitignore => upstream/.gitignore | 4 -
upstream/src/cnrun/main.cc | 85 +-
upstream/src/cnrun/runner.hh | 4 +-
upstream/src/libcn/Makefile.am | 2 +
upstream/src/libcn/base-neuron.hh | 384 +++---
upstream/src/libcn/base-synapse.hh | 98 +-
upstream/src/libcn/base-unit.cc | 834 ++++++------
upstream/src/libcn/base-unit.hh | 444 +++----
upstream/src/libcn/forward-decls.hh | 43 +
upstream/src/libcn/hosted-neurons.cc | 876 ++++++------
upstream/src/libcn/hosted-neurons.hh | 374 +++---
upstream/src/libcn/hosted-synapses.cc | 329 ++---
upstream/src/libcn/hosted-synapses.hh | 298 +++--
upstream/src/libcn/integrate-base.hh | 63 +-
upstream/src/libcn/integrate-rk65.hh | 38 +-
upstream/src/libcn/model-cycle.cc | 744 +++++------
upstream/src/libcn/model-nmlio.cc | 735 +++++-----
upstream/src/libcn/model-struct.cc | 2072 +++++++++++++++--------------
upstream/src/libcn/model.hh | 835 ++++++------
upstream/src/libcn/mx-attr.hh | 31 +-
upstream/src/libcn/param-unit-literals.hh | 12 +-
upstream/src/libcn/sources.cc | 186 +--
upstream/src/libcn/sources.hh | 211 +--
upstream/src/libcn/standalone-neurons.cc | 408 +++---
upstream/src/libcn/standalone-neurons.hh | 212 +--
upstream/src/libcn/standalone-synapses.cc | 74 +-
upstream/src/libcn/standalone-synapses.hh | 97 +-
upstream/src/libcn/types.cc | 912 ++++++-------
upstream/src/libcn/types.hh | 158 +--
upstream/src/libstilton/containers.hh | 11 +-
upstream/src/libstilton/exprparser.cc | 487 +++----
upstream/src/libstilton/exprparser.hh | 129 +-
upstream/src/libstilton/lang.hh | 10 +-
upstream/src/libstilton/libcommon.cc | 38 +-
upstream/src/libstilton/string.hh | 2 +
36 files changed, 5683 insertions(+), 5587 deletions(-)
diff --git a/.gitignore b/.gitignore
index c7569fb..5176126 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,33 +1,3 @@
.DirIcon
.backups
-Doxygen
-.libs
-.deps
-autom4te*
-aclocal*
-config.h
-config.h.in
-config.guess
-config.log
-config.sub
-config.status
-configure
-depcomp
-libtool
-install-sh
-ltmain.sh
-m4
-missing
-stamp-h1
-Makefile
-Makefile.in
-*.o
-*.lo
-*.la
-*.a
-*.pc
-*.gch
-cscope.*
-TAGS
-
*~
diff --git a/.gitignore b/upstream/.gitignore
similarity index 91%
copy from .gitignore
copy to upstream/.gitignore
index c7569fb..c2333f0 100644
--- a/.gitignore
+++ b/upstream/.gitignore
@@ -1,5 +1,3 @@
-.DirIcon
-.backups
Doxygen
.libs
.deps
@@ -29,5 +27,3 @@ Makefile.in
*.gch
cscope.*
TAGS
-
-*~
diff --git a/upstream/src/cnrun/main.cc b/upstream/src/cnrun/main.cc
index 5047c4e..52486dd 100644
--- a/upstream/src/cnrun/main.cc
+++ b/upstream/src/cnrun/main.cc
@@ -29,7 +29,6 @@ using namespace std;
using namespace cnrun;
-
// argparse
const char
@@ -46,13 +45,13 @@ enum TOptChar {
dir = 'C',
listen_dt = 'E',
log_spiking = 'k',
- log_spiking_threshold = 0,
+ log_spiking_threshold = '\0',
log_option = 'L',
precision = 'e',
dt_max = 'M',
dt_min = 'm',
- dt_xcap = 0,
- sort_units = 0,
+ dt_xcap = '\0',
+ sort_units = '\0',
verbosely = 'v',
help = 'h',
};
@@ -180,8 +179,8 @@ parse_opt( int key, const char *arg, struct argp_state *state)
case opt::log_spiking:
Q.log_spikers = true;
switch ( *arg ) {
- case '0': Q.log_spikers_use_serial_id = true; break;
- case 'l': Q.log_spikers_use_serial_id = false; break;
+ case '0': Q.log_spikers_use_serial_id = true; break;
+ case 'l': Q.log_spikers_use_serial_id = false; break;
default:
fprintf( stderr, "Expecting a '0' or 'l' for log-spiking spec, got \"%s\"", arg);
return (error_t)ARGP_ERR_UNKNOWN;
@@ -208,7 +207,7 @@ parse_opt( int key, const char *arg, struct argp_state *state)
break;
case opt::defvar:
- {
+ {
double unused;
CExpression
expr;
@@ -244,7 +243,7 @@ lprintf( int level, int verbosely, const char* fmt, ...)
va_start (ap, fmt);
vprintf( fmt, ap);
va_end (ap);
- }
+ }
}
@@ -300,41 +299,41 @@ main( int argc, char *argv[])
// void
// usage( const char *argv0)
// {
-// cout << "Usage: " << argv0 << "\n" <<
-// " -e <script_fname>\tExecute script\n"
-// " -D \t\t\tDump all unit types in the model and exit\n"
-// " -C <dir>\t\tWork in dir\n"
-// " -s \t\t\tSort units\n"
-// "\n"
-// " -L[d1Lx] \t\tLogging & listeners:\n"
-// " d \t\t\tdefer writing to disk until done rather than writing continuously\n"
-// " 1\t\t\tonly log the first variable\n"
-// " x\t\t\twrite in native binary form rather than in ASCII\n"
-// " L\t\t\tlog integrator dt\n"
-// " -E<double>\t\tListen at this interval (default " << Options.listen_dt << ";\n"
-// "\t\t\t set to 0 to listen every cycle)\n"
-// "\n"
-// " -kl \t\t\tWrite a model-wide log of spiking neurons, using labels\n"
-// " -k0 \t\t\t... use unit id instead\n"
-// " -kS<double>\t\tSpike detection threshold (default " << Options.spike_threshold << ")\n"
-// "\n"
-// " -e <uint>\t\tSet precision for all output (default " << Options.precision << ")\n"
-// "\n"
-// " -tT <double>\t\tdt_max (default " << Options.integration_dt_max << ")\n"
-// " -tt <double>\t\tdt_min (default " << Options.integration_dt_min << ")\n"
-// " -tx <double>\t\tCap dt by current dt value x this (default " << Options.integration_dt_max_cap << ")\n"
-// "\n"
-// " -D EXPR\t\tAny valid expression, will inject whatever variables get assigned in it\n"
-// "\n"
-// " -v <int>\t\tSet verbosity level (default " << Options.verbosely << ")\n"
-// "\t\t\t Use a negative value to show the progress percentage only,\n"
-// " -v[%[-]t[-]]\t\tDisplay (with -, suppress) progress percent and/or time\n"
-// "\t\t\t indented on the line at 8 x (minus) this value.\n"
-// "\n"
-// " -U \t\t\tList available unit types with parameter names\n"
-// "\t\t\t and values, and exit\n"
-// " -h \t\t\tDisplay this help\n"
-// "\n";
+// cout << "Usage: " << argv0 << "\n" <<
+// " -e <script_fname>\tExecute script\n"
+// " -D \t\t\tDump all unit types in the model and exit\n"
+// " -C <dir>\t\tWork in dir\n"
+// " -s \t\t\tSort units\n"
+// "\n"
+// " -L[d1Lx] \t\tLogging & listeners:\n"
+// " d \t\t\tdefer writing to disk until done rather than writing continuously\n"
+// " 1\t\t\tonly log the first variable\n"
+// " x\t\t\twrite in native binary form rather than in ASCII\n"
+// " L\t\t\tlog integrator dt\n"
+// " -E<double>\t\tListen at this interval (default " << Options.listen_dt << ";\n"
+// "\t\t\t set to 0 to listen every cycle)\n"
+// "\n"
+// " -kl \t\t\tWrite a model-wide log of spiking neurons, using labels\n"
+// " -k0 \t\t\t... use unit id instead\n"
+// " -kS<double>\t\tSpike detection threshold (default " << Options.spike_threshold << ")\n"
+// "\n"
+// " -e <uint>\t\tSet precision for all output (default " << Options.precision << ")\n"
+// "\n"
+// " -tT <double>\t\tdt_max (default " << Options.integration_dt_max << ")\n"
+// " -tt <double>\t\tdt_min (default " << Options.integration_dt_min << ")\n"
+// " -tx <double>\t\tCap dt by current dt value x this (default " << Options.integration_dt_max_cap << ")\n"
+// "\n"
+// " -D EXPR\t\tAny valid expression, will inject whatever variables get assigned in it\n"
+// "\n"
+// " -v <int>\t\tSet verbosity level (default " << Options.verbosely << ")\n"
+// "\t\t\t Use a negative value to show the progress percentage only,\n"
+// " -v[%[-]t[-]]\t\tDisplay (with -, suppress) progress percent and/or time\n"
+// "\t\t\t indented on the line at 8 x (minus) this value.\n"
+// "\n"
+// " -U \t\t\tList available unit types with parameter names\n"
+// "\t\t\t and values, and exit\n"
+// " -h \t\t\tDisplay this help\n"
+// "\n";
// }
// } // namespace
diff --git a/upstream/src/cnrun/runner.hh b/upstream/src/cnrun/runner.hh
index 123d8d3..c9f7419 100644
--- a/upstream/src/cnrun/runner.hh
+++ b/upstream/src/cnrun/runner.hh
@@ -9,8 +9,8 @@
* License: GPL
*/
-#ifndef CN_CNRUN_RUNNER_H_
-#define CN_CNRUN_RUNNER_H_
+#ifndef CNRUN_CNRUN_RUNNER_H_
+#define CNRUN_CNRUN_RUNNER_H_
#include <list>
#include <string>
diff --git a/upstream/src/libcn/Makefile.am b/upstream/src/libcn/Makefile.am
index 7679188..5691f66 100644
--- a/upstream/src/libcn/Makefile.am
+++ b/upstream/src/libcn/Makefile.am
@@ -3,6 +3,7 @@ include $(top_srcdir)/src/Common.mk
pkglib_LTLIBRARIES = libcn.la
libcn_la_SOURCES = \
+ forward-decls.hh \
sources.cc \
types.cc \
base-unit.cc \
@@ -31,6 +32,7 @@ libcn_la_LDFLAGS = \
if DO_PCH
BUILT_SOURCES = \
+ forward-decls.hh.gch \
sources.hh.gch \
types.hh.gch \
param-unit-literals.hh.gch \
diff --git a/upstream/src/libcn/base-neuron.hh b/upstream/src/libcn/base-neuron.hh
index 47a6ee0..f0927b8 100644
--- a/upstream/src/libcn/base-neuron.hh
+++ b/upstream/src/libcn/base-neuron.hh
@@ -9,15 +9,16 @@
*/
-#ifndef LIBCN_BASE_NEURON_H
-#define LIBCN_BASE_NEURON_H
+#ifndef CNRUN_LIBCN_BASENEURON_H_
+#define CNRUN_LIBCN_BASENEURON_H_
#include <list>
#include <cstring>
#include <cmath>
#include <map>
-#include <numeric>
+#include <tuple>
+#include "forward-decls.hh"
#include "base-unit.hh"
#include "base-synapse.hh"
@@ -26,121 +27,122 @@
#endif
-
using namespace std;
namespace cnrun {
#define CN_MIN_DISTANCE .1
-
-
-
-class CModel;
struct SSpikeloggerService;
-
typedef map<C_BaseSynapse*, double> SCleft;
inline double operator+ ( double a, const pair<C_BaseSynapse*, double>& b) { return a + b.second; }
class C_BaseNeuron
: public C_BaseUnit {
- struct SCoord {
- double x, y, z;
-
- SCoord( const double& inx, const double& iny, const double& inz)
- : x (inx), y (iny), z (inz)
- {}
-
- // distance
- double operator - ( const SCoord &p)
- {
- return sqrt( pow(x - p.x, 2) + pow(y - p.y, 2) + pow(z - p.z, 2));
- }
-
- bool operator == ( const SCoord &p) const
- {
- return x == p.x && y == p.y && z == p.z;
- }
- bool operator != ( const SCoord &p) const
- {
- return x != p.x || y != p.y || z != p.z;
- }
- bool too_close( const SCoord& p, double mindist = CN_MIN_DISTANCE) const
- {
- return fabs(x - p.x) < mindist &&
- fabs(y - p.y) < mindist &&
- fabs(z - p.z) < mindist;
- }
- };
-
- friend class CModel;
- friend class C_BaseSynapse;
+ DELETE_DEFAULT_METHODS (C_BaseNeuron)
+
+ friend class CModel;
+ friend class C_BaseSynapse;
protected:
- C_BaseNeuron();
+ C_BaseNeuron (TUnitType intype, const string& inlabel,
+ double inx, double iny, double inz,
+ CModel* inM, int s_mask = 0)
+ : C_BaseUnit (intype, inlabel, inM, s_mask),
+ pos (inx, iny, inz),
+ _spikelogger_agent (nullptr)
+ {}
+
+ virtual ~C_BaseNeuron();
+
+ struct SCoord {
+
+ DELETE_DEFAULT_METHODS (SCoord)
+
+ double x, y, z;
+
+ SCoord( double inx, double iny, double inz)
+ : x (inx), y (iny), z (inz)
+ {}
+
+ SCoord& operator= ( tuple<double, double, double> v)
+ {
+ tie(x, y, z) = v;
+ return *this;
+ }
+
+ // distance
+ double operator- ( const SCoord &p)
+ {
+ return sqrt( pow(x - p.x, 2) + pow(y - p.y, 2) + pow(z - p.z, 2));
+ }
+
+ bool operator== ( const SCoord &p) const
+ {
+ return x == p.x && y == p.y && z == p.z;
+ }
+ bool operator!= ( const SCoord &p) const
+ {
+ return x != p.x || y != p.y || z != p.z;
+ }
+ bool too_close( const SCoord& p, double mindist = CN_MIN_DISTANCE) const
+ {
+ return fabs(x - p.x) < mindist &&
+ fabs(y - p.y) < mindist &&
+ fabs(z - p.z) < mindist;
+ }
+ };
- SCleft _dendrites;
- list<C_BaseSynapse*>
- _axonal_harbour;
public:
- SCoord pos;
+ SCoord pos;
- size_t axonal_conns() const { return _axonal_harbour.size(); }
- size_t dendrites() const { return _dendrites.size(); }
+ size_t axonal_conns() const { return _axonal_harbour.size(); }
+ size_t dendrites() const { return _dendrites.size(); }
- bool connects_to( const C_BaseNeuron &to) const __attribute__ ((pure));
- C_BaseSynapse *connects_via( C_BaseNeuron &to,
- SCleft::mapped_type *g_ptr = nullptr) const;
+ bool
+ connects_to( const C_BaseNeuron &to) const;
- protected:
- C_BaseNeuron( TUnitType intype, const char *inlabel,
- double inx, double iny, double inz,
- CModel* inM, int s_mask = 0)
- : C_BaseUnit (intype, inlabel, inM, s_mask),
- pos (inx, iny, inz),
- _spikelogger_agent (nullptr)
- {}
-
- virtual ~C_BaseNeuron();
+ C_BaseSynapse*
+ connects_via( const C_BaseNeuron &to,
+ SCleft::mapped_type *g_ptr = nullptr) const;
- public:
- void reset_state();
+ void reset_state();
// even though for rate-based neurons, E is not meaningful
// leave these here to make the method available to synapses wanting _target-E
- virtual double E() const
- { return __cn_dummy_double; }
- virtual double E( vector<double>&) const
- { return __cn_dummy_double; }
+ virtual double E() const
+ { return 0; }
+ virtual double E( vector<double>&) const
+ { return 0; }
// likewise, for those needing _source->F
- virtual double F() const
- { return __cn_dummy_double; }
- virtual double F( vector<double>&) const
- { return __cn_dummy_double; }
-
- // struct __SCleft_second_plus {
- // double operator() ( double a, const SCleft::value_type &i) { return a + i.second; }
- // };
- double Isyn() const // is the sum of Isyn() on all dendrites
- {
- double I = 0.;
- for ( auto &Y : _dendrites )
- I += Y.first->Isyn(*this, Y.second);
- return I;
- }
-
- double Isyn( vector<double> &x) const // an honourable mention
- {
- double I = 0.;
- for ( auto &Y : _dendrites )
- I += Y.first->Isyn(x, *this, Y.second);
- return I;
- }
-
- virtual void possibly_fire()
- {}
+ virtual double F() const
+ { return 0; }
+ virtual double F( vector<double>&) const
+ { return 0; }
+
+ // struct __SCleft_second_plus {
+ // double operator() ( double a, const SCleft::value_type &i) { return a + i.second; }
+ // };
+ double Isyn() const // is the sum of Isyn() on all dendrites
+ {
+ double I = 0.;
+ for ( auto &Y : _dendrites )
+ I += Y.first->Isyn(*this, Y.second);
+ return I;
+ }
+
+ double Isyn( vector<double> &x) const // an honourable mention
+ {
+ double I = 0.;
+ for ( auto &Y : _dendrites )
+ I += Y.first->Isyn(x, *this, Y.second);
+ return I;
+ }
+
+ virtual void possibly_fire()
+ {}
// Even though rate-based neurons do not track individual spikes,
// we can estimate a probability of such a neuron spiking as F*dt*rand().
@@ -148,110 +150,116 @@ class C_BaseNeuron
// Note this assumes P[0] is F for all rate-based neurons, and E
// for those conductance-based, which by now is hard-coded for all neurons.
- virtual unsigned n_spikes_in_last_dt() const
- { return 0; }
- virtual void do_detect_spike_or_whatever()
- {}
+ virtual size_t n_spikes_in_last_dt() const
+ { return 0; }
+ virtual void do_detect_spike_or_whatever()
+ {}
- protected:
- SSpikeloggerService *_spikelogger_agent;
+ SSpikeloggerService* spikelogger_agent() { return _spikelogger_agent; }
+ SSpikeloggerService*
+ enable_spikelogging_service( int s_mask = 0);
+ SSpikeloggerService*
+ enable_spikelogging_service( double sample_period, double sigma, double from = 0.,
+ int s_mask = 0);
+ void disable_spikelogging_service();
+ void sync_spikelogging_history();
- public:
- SSpikeloggerService* spikelogger_agent() { return _spikelogger_agent; }
- SSpikeloggerService* enable_spikelogging_service( int s_mask = 0);
- SSpikeloggerService* enable_spikelogging_service( double sample_period, double sigma, double from = 0.,
- int s_mask = 0);
- void disable_spikelogging_service();
- void sync_spikelogging_history();
+ double distance_to( C_BaseNeuron*) const; // will do on demand
- double distance_to( C_BaseNeuron*) const; // will do on demand
+ void dump( bool with_params = false, FILE *strm = stdout) const;
- void dump( bool with_params = false, FILE *strm = stdout) const;
+ protected:
+ SCleft _dendrites;
+ list<C_BaseSynapse*>
+ _axonal_harbour;
+
+ SSpikeloggerService
+ *_spikelogger_agent;
};
-#define CN_KL_COMPUTESDF (1 << 0)
-#define CN_KL_ISSPIKINGNOW (1 << 1)
-#define CN_KL_PERSIST (1 << 2) // should not be deleted at disable_spikelogging_service
-#define CN_KL_IDLE (1 << 3) // should not be placed on spikelogging_neu_list on enable_spikelogging_service
+#define CN_KL_COMPUTESDF (1 << 0)
+#define CN_KL_ISSPIKINGNOW (1 << 1)
+#define CN_KL_PERSIST (1 << 2) // should not be deleted at disable_spikelogging_service
+#define CN_KL_IDLE (1 << 3) // should not be placed on spikelogging_neurons on enable_spikelogging_service
struct SSpikeloggerService {
- friend class C_BaseNeuron;
- friend class C_HostedConductanceBasedNeuron; // accesses _status from do_spikelogging_or_whatever
- friend class COscillatorDotPoisson; // same
- friend class COscillatorPoisson; // same
- friend class CModel; // checks CN_KL_IDLE in include_unit
- private:
- SSpikeloggerService();
+ DELETE_DEFAULT_METHODS (SSpikeloggerService)
- int _status;
+ friend class C_BaseNeuron;
+ friend class C_HostedConductanceBasedNeuron; // accesses _status from do_spikelogging_or_whatever
+ friend class COscillatorDotPoisson; // same
+ friend class COscillatorPoisson; // same
+ friend class CModel; // checks CN_KL_IDLE in include_unit
public:
- SSpikeloggerService( C_BaseNeuron *client,
- int s_mask = 0)
- : _status (s_mask & ~CN_KL_COMPUTESDF),
- _client (client),
- t_last_spike_start (-INFINITY), t_last_spike_end (-INFINITY),
- sample_period (42), sigma (42), start_delay (0.)
- {}
- SSpikeloggerService( C_BaseNeuron *client,
- double insample_period, double insigma, double instart_delay = 0.,
- int s_mask = 0)
- : _status (s_mask | CN_KL_COMPUTESDF),
- _client (client),
- t_last_spike_start (-INFINITY), t_last_spike_end (-INFINITY),
- sample_period (insample_period), sigma (insigma), start_delay (instart_delay)
- {}
-
- C_BaseNeuron *_client;
-
- double t_last_spike_start,
- t_last_spike_end;
-
- size_t n_spikes_since( double since = 0.) const __attribute__ ((pure));
-
- double sample_period,
- sigma,
- start_delay;
-
-// void spike_detect(); // multiplexing units will have a different version
- // replaced by do_spikelogging_or_whatever on the client side
-
- vector<double> spike_history;
-
- void reset()
- {
- _status &= ~CN_KL_ISSPIKINGNOW;
- t_last_spike_start = t_last_spike_end
- /*= t_firing_started = t_firing_ended */ = -INFINITY;
- spike_history.clear();
- }
-
- protected:
- void sync_history();
+ SSpikeloggerService (C_BaseNeuron *client,
+ int s_mask = 0)
+ : _client (client),
+ t_last_spike_start (-INFINITY), t_last_spike_end (-INFINITY),
+ sample_period (42), sigma (42), start_delay (0.),
+ _status (s_mask & ~CN_KL_COMPUTESDF)
+ {}
+ SSpikeloggerService (C_BaseNeuron *client,
+ double insample_period, double insigma, double instart_delay = 0.,
+ int s_mask = 0)
+ : _client (client),
+ t_last_spike_start (-INFINITY), t_last_spike_end (-INFINITY),
+ sample_period (insample_period), sigma (insigma), start_delay (instart_delay),
+ _status (s_mask | CN_KL_COMPUTESDF)
+ {}
+
+ C_BaseNeuron *_client;
+
+ double t_last_spike_start,
+ t_last_spike_end;
+
+ double sample_period,
+ sigma,
+ start_delay;
+
+// void spike_detect(); // multiplexing units will have a different version
+ // replaced by do_spikelogging_or_whatever on the client side
+
+ vector<double> spike_history;
+
+ void reset()
+ {
+ _status &= ~CN_KL_ISSPIKINGNOW;
+ t_last_spike_start = t_last_spike_end
+ /*= t_firing_started = t_firing_ended */ = -INFINITY;
+ spike_history.clear();
+ }
+
+ size_t n_spikes_since( double since = 0.) const;
- public:
// spike density function
- double sdf( double at, double sample_length, double sigma, unsigned* nspikes = nullptr) const;
+ double sdf( double at, double sample_length, double sigma, size_t* nspikes = nullptr) const;
// spike homogeneity function
- double shf( double at, double sample_length) const;
+ double shf( double at, double sample_length) const;
// why not allow custom sampling?
- size_t get_sxf_vector_custom( vector<double> *sdf_buf, vector<double> *shf_buf, vector<unsigned> *nsp_buf,
- double sample_period_custom, double sigma_custom,
- double from = 0., double to = 0.) const; // "to == 0." for model_time()
- size_t get_sxf_vector( vector<double> *sdf_buf, vector<double> *shf_buf, vector<unsigned> *nsp_buf,
- double from = 0., double to = 0.) const
- {
- return get_sxf_vector_custom( sdf_buf, shf_buf, nsp_buf,
- sample_period, sigma,
- from, to);
- }
+ size_t get_sxf_vector_custom( vector<double> *sdf_buf, vector<double> *shf_buf, vector<size_t> *nsp_buf,
+ double sample_period_custom, double sigma_custom,
+ double from = 0., double to = 0.) const; // "to == 0." for model_time()
+ size_t get_sxf_vector( vector<double> *sdf_buf, vector<double> *shf_buf, vector<size_t> *nsp_buf,
+ double from = 0., double to = 0.) const
+ {
+ return get_sxf_vector_custom( sdf_buf, shf_buf, nsp_buf,
+ sample_period, sigma,
+ from, to);
+ }
+
+ protected:
+ void sync_history();
+
+ private:
+ int _status;
};
@@ -260,9 +268,9 @@ struct SSpikeloggerService {
inline void
C_BaseNeuron::reset_state()
{
- C_BaseUnit::reset_state();
- if ( _spikelogger_agent )
- _spikelogger_agent->reset();
+ C_BaseUnit::reset_state();
+ if ( _spikelogger_agent )
+ _spikelogger_agent->reset();
}
@@ -270,23 +278,22 @@ C_BaseNeuron::reset_state()
inline void
C_BaseNeuron::sync_spikelogging_history()
{
- if ( _spikelogger_agent )
- _spikelogger_agent->sync_history();
+ if ( _spikelogger_agent )
+ _spikelogger_agent->sync_history();
}
inline double
-C_BaseSynapse::g_on_target( const C_BaseNeuron &which) const
+C_BaseSynapse::g_on_target( C_BaseNeuron &neuron) const
{
- return (find( _targets.begin(), _targets.end(), &which) != _targets.end())
- ? which._dendrites.at(const_cast<C_BaseSynapse*>(this)) : __cn_dummy_double;
+ return neuron._dendrites.at(
+ const_cast<C_BaseSynapse*>(this));
}
inline void
-C_BaseSynapse::set_g_on_target( C_BaseNeuron &which, double g)
+C_BaseSynapse::set_g_on_target( C_BaseNeuron &neuron, double g)
{
- if ( find( _targets.begin(), _targets.end(), &which) != _targets.end() )
- which._dendrites[this] = g;
+ neuron._dendrites[this] = g;
}
@@ -294,4 +301,9 @@ C_BaseSynapse::set_g_on_target( C_BaseNeuron &which, double g)
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/base-synapse.hh b/upstream/src/libcn/base-synapse.hh
index 286b5b1..3ed2aab 100644
--- a/upstream/src/libcn/base-synapse.hh
+++ b/upstream/src/libcn/base-synapse.hh
@@ -10,15 +10,17 @@
*/
-#ifndef LIBCN_BASE_SYNAPSE_H
-#define LIBCN_BASE_SYNAPSE_H
+#ifndef CNRUN_LIBCN_BASE_SYNAPSE_H_
+#define CNRUN_LIBCN_BASE_SYNAPSE_H_
#include <cmath>
#include <vector>
#include <list>
#include <map>
-#include <algorithm>
+#include "libstilton/lang.hh"
+#include "libstilton/containers.hh"
+#include "forward-decls.hh"
#include "base-unit.hh"
#if HAVE_CONFIG_H && !defined(VERSION)
@@ -28,74 +30,68 @@
using namespace std;
-
namespace cnrun {
-class C_BaseSynapse;
-class C_BaseNeuron;
-class CModel;
-
-
-typedef map<C_BaseSynapse*, double> SCleft;
-
class C_BaseSynapse
: public C_BaseUnit {
- friend class CModel;
- friend class C_BaseNeuron;
-
- protected:
- C_BaseSynapse(); // not constructible without parameters
+ DELETE_DEFAULT_METHODS (C_BaseSynapse)
- C_BaseNeuron *_source;
- list<C_BaseNeuron*>
- _targets;
- typedef list<C_BaseNeuron*>::iterator lni;
- typedef list<C_BaseNeuron*>::reverse_iterator lnri;
- typedef list<C_BaseNeuron*>::const_iterator lnci;
- bool has_target( const C_BaseNeuron *tgt) __attribute__ ((pure))
- {
- return find( _targets.begin(), _targets.end(), tgt) != _targets.end();
- }
+ friend class CModel;
+ friend class C_BaseNeuron;
- double t_last_release_started;
+ protected:
+ C_BaseSynapse( TUnitType intype,
+ C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel *inM, int s_mask = 0);
+ virtual ~C_BaseSynapse();
public:
- C_BaseNeuron
- *source() { return _source; }
+ bool has_target( const C_BaseNeuron& tgt) const __attribute__ ((pure))
+ {
+ return cnrun::alg::member(
+ const_cast<C_BaseNeuron*>(&tgt), _targets);
+ }
+ C_BaseNeuron* source() { return _source; }
- double g_on_target( const C_BaseNeuron &which) const;
- void set_g_on_target( C_BaseNeuron &which, double g);
+ double g_on_target( C_BaseNeuron&) const;
+ void set_g_on_target( C_BaseNeuron&, double);
- C_BaseSynapse *clone_to_target( C_BaseNeuron *nt, double g);
- C_BaseSynapse *make_clone_independent( C_BaseNeuron *target);
+ C_BaseSynapse *clone_to_target( C_BaseNeuron *nt, double g);
+ C_BaseSynapse *make_clone_independent( C_BaseNeuron *target);
- protected:
- C_BaseSynapse( TUnitType intype,
- C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel *inM, int s_mask = 0);
- virtual ~C_BaseSynapse();
+ void reset_state()
+ {
+ C_BaseUnit::reset_state();
+ t_last_release_started = -INFINITY;
+ }
- public:
- void reset_state()
- {
- C_BaseUnit::reset_state();
- t_last_release_started = -INFINITY;
- }
+ virtual double Isyn( const C_BaseNeuron &with_neuron, double g) const = 0;
+ virtual double Isyn( vector<double> &base, const C_BaseNeuron &with_neuron, double g) const = 0;
+ // no gsyn known to the synapse: now C_BaseNeuron::SCleft knows it
- virtual double Isyn( const C_BaseNeuron &with_neuron, double g) const = 0;
- virtual double Isyn( vector<double> &base, const C_BaseNeuron &with_neuron, double g) const = 0;
- // no gsyn known to the synapse: now C_BaseNeuron::SCleft knows it
+ void dump( bool with_params = false, FILE *strm = stdout) const;
+
+ protected:
+ C_BaseNeuron
+ *_source;
+ list<C_BaseNeuron*>
+ _targets;
- void dump( bool with_params = false, FILE *strm = stdout) const;
+ double t_last_release_started;
private:
- virtual void update_queue()
- {}
+ virtual void update_queue()
+ {}
};
}
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/base-unit.cc b/upstream/src/libcn/base-unit.cc
index 92bdf36..4929cf5 100644
--- a/upstream/src/libcn/base-unit.cc
+++ b/upstream/src/libcn/base-unit.cc
@@ -13,10 +13,11 @@
#include <unistd.h>
#include <iostream>
#include <limits>
-#include <algorithm>
+#include <functional>
#include <gsl/gsl_statistics_double.h>
+#include "libstilton/containers.hh"
#include "base-unit.hh"
#include "model.hh"
@@ -26,29 +27,29 @@
using namespace std;
-
+using cnrun::alg::member;
cnrun::C_BaseUnit::
-C_BaseUnit( TUnitType intype, const char *inlabel,
- CModel* inM, int s_mask)
- : _type (intype), _status (0 |/* CN_UENABLED |*/ s_mask),
- M (inM),
- _binwrite_handle (-1), _listener_disk (nullptr), _listener_mem (nullptr),
- precision (__cn_default_unit_precision)
+C_BaseUnit( TUnitType intype, const string& inlabel,
+ CModel* inM, int s_mask)
+ : precision (cn_default_unit_precision),
+ _type (intype), _status (0 |/* CN_UENABLED |*/ s_mask),
+ M (inM),
+ _binwrite_handle (-1), _listener_disk (nullptr), _listener_mem (nullptr)
{
- memset( _label, 0, CN_MAX_LABEL_SIZE);
- if ( inlabel ) {
- strncpy( _label, inlabel, CN_MAX_LABEL_SIZE);
- if ( inM && inM->unit_by_label( _label) ) {
- fprintf( stderr, "Model %s already has a unit labelled \"%s\"\n", inM->name.c_str(), _label);
- _status |= CN_UERROR;
- }
- } else
- snprintf( _label, CN_MAX_LABEL_SIZE-1, "fafa%p", this);
-
- reset_params();
- // don't have field idx to do reset_vars() safely
+ memset( _label, 0, max_label_size);
+ if ( inlabel.size() ) {
+ strncpy( _label, inlabel.c_str(), max_label_size);
+ if ( inM && inM->unit_by_label( _label) ) {
+ fprintf( stderr, "Model %s already has a unit labelled \"%s\"\n", inM->name.c_str(), _label);
+ _status |= CN_UERROR;
+ }
+ } else
+ snprintf( _label, max_label_size-1, "fafa%p", this);
+
+ reset_params();
+ // don't have field idx to do reset_vars() safely
}
@@ -57,32 +58,32 @@ void
cnrun::C_BaseUnit::
reset_state()
{
- if ( M && M->verbosely > 3 )
- fprintf( stderr, "Resetting \"%s\"\n", _label);
- reset_vars();
- if ( is_listening() )
- restart_listening();
+ if ( M && M->options.verbosely > 3 )
+ fprintf( stderr, "Resetting \"%s\"\n", _label);
+ reset_vars();
+ if ( is_listening() )
+ restart_listening();
}
int
cnrun::C_BaseUnit::
-param_idx_by_sym( const char *sym) const
+param_idx_by_sym( const string& sym) const
{
- for ( int i = 0; i < p_no(); ++i )
- if ( strcmp( sym, __CNUDT[_type].stock_param_syms[i]) == 0 )
- return i;
- return -1;
+ for ( size_t i = 0; i < p_no(); ++i )
+ if ( sym == __CNUDT[_type].stock_param_syms[i] )
+ return i;
+ return -1;
}
int
cnrun::C_BaseUnit::
-var_idx_by_sym( const char *sym) const
+var_idx_by_sym( const string& sym) const
{
- for ( unsigned short i = 0; i < v_no(); ++i )
- if ( strcmp( sym, __CNUDT[_type].stock_var_syms[i]) == 0 )
- return i;
- return -1;
+ for ( size_t i = 0; i < v_no(); ++i )
+ if ( sym == __CNUDT[_type].stock_var_syms[i] )
+ return i;
+ return -1;
}
@@ -96,55 +97,55 @@ void
cnrun::C_BaseUnit::
start_listening( int mask)
{
- if ( !M ) {
- fprintf( stderr, "start_listening() called for an unattached unit \"%s\"\n", _label);
- return;
- }
- if ( _listener_disk || _listener_mem || _binwrite_handle != -1 ) { // listening already; check if user wants us to listen differently
- if ( (_status | (mask & (CN_ULISTENING_DISK | CN_ULISTENING_MEM | CN_ULISTENING_BINARY | CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE)))
- != mask ) {
- stop_listening(); // this will nullptrify _listener_{mem,disk}, avoiding recursion
- start_listening( mask);
- if ( M->verbosely > 4 )
- fprintf( stderr, "Unit \"%s\" was already listening\n", _label);
- return;
- }
- }
+ if ( !M ) {
+ fprintf( stderr, "start_listening() called for an unattached unit \"%s\"\n", _label);
+ return;
+ }
+ if ( _listener_disk || _listener_mem || _binwrite_handle != -1 ) { // listening already; check if user wants us to listen differently
+ if ( (_status | (mask & (CN_ULISTENING_DISK | CN_ULISTENING_MEM | CN_ULISTENING_BINARY | CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE)))
+ != mask ) {
+ stop_listening(); // this will nullptrify _listener_{mem,disk}, avoiding recursion
+ start_listening( mask);
+ if ( M->options.verbosely > 4 )
+ fprintf( stderr, "Unit \"%s\" was already listening\n", _label);
+ return;
+ }
+ }
// deferred write implies a mem listener
- if ( mask & CN_ULISTENING_DEFERWRITE && !(mask & CN_ULISTENING_MEM) )
- mask |= CN_ULISTENING_MEM;
-
- if ( mask & CN_ULISTENING_MEM )
- _listener_mem = new vector<double>;
-
- if ( mask & CN_ULISTENING_DISK ) {
- if ( M->_status & CN_MDL_DISKLESS )
- fprintf( stderr, "Cannot get Unit \"%s\" to listen to disk in a diskless model\n", _label);
- else {
- _listener_disk = new ofstream( (string(_label)+".var").c_str(), ios_base::trunc);
- _listener_disk->precision( precision);
-
- *_listener_disk << "# " << _label << " variables\n#<time>";
- if ( mask & CN_ULISTENING_1VARONLY )
- *_listener_disk << "\t<" << var_sym(0) << ">";
- else
- for ( unsigned short v = 0; v < v_no(); ++v )
- *_listener_disk << "\t<" << var_sym(v) << ">";
- *_listener_disk << endl;
- if ( M->verbosely > 4 )
- fprintf( stderr, "Unit \"%s\" now listening\n", _label);
- }
- }
-
- if ( mask & CN_ULISTENING_BINARY )
- _binwrite_handle = open( (string(_label)+".varx").c_str(), O_WRONLY|O_CREAT|O_TRUNC, S_IRUSR | S_IWUSR);
-
- _status |= (mask & (CN_ULISTENING_DISK | CN_ULISTENING_MEM | CN_ULISTENING_BINARY |
- CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE));
+ if ( mask & CN_ULISTENING_DEFERWRITE && !(mask & CN_ULISTENING_MEM) )
+ mask |= CN_ULISTENING_MEM;
+
+ if ( mask & CN_ULISTENING_MEM )
+ _listener_mem = new vector<double>;
+
+ if ( mask & CN_ULISTENING_DISK ) {
+ if ( M->is_diskless )
+ fprintf( stderr, "Cannot get Unit \"%s\" to listen to disk in a diskless model\n", _label);
+ else {
+ _listener_disk = new ofstream( (string(_label)+".var").c_str(), ios_base::trunc);
+ _listener_disk->precision( precision);
+
+ *_listener_disk << "# " << _label << " variables\n#<time>";
+ if ( mask & CN_ULISTENING_1VARONLY )
+ *_listener_disk << "\t<" << var_sym(0) << ">";
+ else
+ for ( size_t v = 0; v < v_no(); ++v )
+ *_listener_disk << "\t<" << var_sym(v) << ">";
+ *_listener_disk << endl;
+ if ( M->options.verbosely > 4 )
+ fprintf( stderr, "Unit \"%s\" now listening\n", _label);
+ }
+ }
+
+ if ( mask & CN_ULISTENING_BINARY )
+ _binwrite_handle = open( (string(_label)+".varx").c_str(), O_WRONLY|O_CREAT|O_TRUNC, S_IRUSR | S_IWUSR);
+
+ _status |= (mask & (CN_ULISTENING_DISK | CN_ULISTENING_MEM | CN_ULISTENING_BINARY |
+ CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE));
// inform the model
- M->register_listener( this);
+ M->register_listener( this);
}
@@ -153,46 +154,46 @@ cnrun::C_BaseUnit::
stop_listening()
{
// do deferred write
- if ( _status & CN_ULISTENING_DEFERWRITE && _listener_mem ) {
- if ( _listener_disk ) {
- for ( auto mI = _listener_mem->begin(); mI != _listener_mem->end(); ) {
- *_listener_disk << *(mI++);
- if ( _status & CN_ULISTENING_1VARONLY )
- *_listener_disk << "\t" << *(mI++);
- else
- for ( size_t v = 0; v < v_no(); ++v )
- *_listener_disk << "\t" << *(mI++);
- *_listener_disk << endl;
- }
- }
- if ( _binwrite_handle != -1 )
- if ( write( _binwrite_handle, _listener_mem->data(),
- sizeof(double) * _listener_mem->size()) < 1 )
- fprintf( stderr, "write() failed on \"%s.varx\"\n", _label);
- }
-
- if ( _listener_mem ) {
- delete _listener_mem;
- _listener_mem = nullptr;
- }
-
- if ( _listener_disk ) {
- _listener_disk->close();
- delete _listener_disk;
- _listener_disk = nullptr;
- }
-
- if ( _binwrite_handle != -1 ) {
- close( _binwrite_handle);
- _binwrite_handle = -1;
- }
-
- _status &= ~(CN_ULISTENING_MEM | CN_ULISTENING_DISK | CN_ULISTENING_BINARY);
-
- if ( M )
- M->unregister_listener( this);
- if ( M->verbosely > 4 )
- fprintf( stderr, "Unit \"%s\" not listening now\n", _label);
+ if ( _status & CN_ULISTENING_DEFERWRITE && _listener_mem ) {
+ if ( _listener_disk ) {
+ for ( auto mI = _listener_mem->begin(); mI != _listener_mem->end(); ) {
+ *_listener_disk << *(mI++);
+ if ( _status & CN_ULISTENING_1VARONLY )
+ *_listener_disk << "\t" << *(mI++);
+ else
+ for ( size_t v = 0; v < v_no(); ++v )
+ *_listener_disk << "\t" << *(mI++);
+ *_listener_disk << endl;
+ }
+ }
+ if ( _binwrite_handle != -1 )
+ if ( write( _binwrite_handle, _listener_mem->data(),
+ sizeof(double) * _listener_mem->size()) < 1 )
+ fprintf( stderr, "write() failed on \"%s.varx\"\n", _label);
+ }
+
+ if ( _listener_mem ) {
+ delete _listener_mem;
+ _listener_mem = nullptr;
+ }
+
+ if ( _listener_disk ) {
+ _listener_disk->close();
+ delete _listener_disk;
+ _listener_disk = nullptr;
+ }
+
+ if ( _binwrite_handle != -1 ) {
+ close( _binwrite_handle);
+ _binwrite_handle = -1;
+ }
+
+ _status &= ~(CN_ULISTENING_MEM | CN_ULISTENING_DISK | CN_ULISTENING_BINARY);
+
+ if ( M )
+ M->unregister_listener( this);
+ if ( M->options.verbosely > 4 )
+ fprintf( stderr, "Unit \"%s\" not listening now\n", _label);
}
@@ -203,32 +204,32 @@ void
cnrun::C_BaseUnit::
tell()
{
- if ( _binwrite_handle != -1 && !(_status & CN_ULISTENING_DEFERWRITE) ) {
- if ( write( _binwrite_handle, &M->V[0], sizeof(double)) < 1 ||
- write( _binwrite_handle, &var_value(0),
- sizeof(double) * ((_status & CN_ULISTENING_1VARONLY) ? 1 : v_no())) < 1 )
- fprintf( stderr, "write() failed in tell() for \"%s\"\n", _label);
- }
-
- if ( _listener_disk && !(_status & CN_ULISTENING_DEFERWRITE) ) {
- *_listener_disk << model_time();
- if ( _status & CN_ULISTENING_1VARONLY )
- *_listener_disk << "\t" << var_value(0);
- else
- for ( size_t v = 0; v < v_no(); ++v )
- *_listener_disk << "\t" << var_value(v);
- *_listener_disk << endl;
- }
-
- if ( _listener_mem ) {
-// _listener_mem->push_back( 999);
- _listener_mem->push_back( model_time());
- if ( _status & CN_ULISTENING_1VARONLY )
- _listener_mem->push_back( var_value(0));
- else
- for ( size_t v = 0; v < v_no(); ++v )
- _listener_mem->push_back( var_value(v));
- }
+ if ( _binwrite_handle != -1 && !(_status & CN_ULISTENING_DEFERWRITE) ) {
+ if ( write( _binwrite_handle, &M->V[0], sizeof(double)) < 1 ||
+ write( _binwrite_handle, &var_value(0),
+ sizeof(double) * ((_status & CN_ULISTENING_1VARONLY) ? 1 : v_no())) < 1 )
+ fprintf( stderr, "write() failed in tell() for \"%s\"\n", _label);
+ }
+
+ if ( _listener_disk && !(_status & CN_ULISTENING_DEFERWRITE) ) {
+ *_listener_disk << model_time();
+ if ( _status & CN_ULISTENING_1VARONLY )
+ *_listener_disk << "\t" << var_value(0);
+ else
+ for ( size_t v = 0; v < v_no(); ++v )
+ *_listener_disk << "\t" << var_value(v);
+ *_listener_disk << endl;
+ }
+
+ if ( _listener_mem ) {
+// _listener_mem->push_back( 999);
+ _listener_mem->push_back( model_time());
+ if ( _status & CN_ULISTENING_1VARONLY )
+ _listener_mem->push_back( var_value(0));
+ else
+ for ( size_t v = 0; v < v_no(); ++v )
+ _listener_mem->push_back( var_value(v));
+ }
}
@@ -240,36 +241,36 @@ void
cnrun::C_BaseUnit::
dump( bool with_params, FILE *strm) const
{
- fprintf( strm, "[%lu] (%s) \"%s\"\n", _serial_id, species(), _label);
-
- if ( with_params ) {
- fprintf( strm, " Pp: ");
- for ( size_t p = 0; p < p_no(); ++p )
- if ( *param_sym(p) != '.' || M->verbosely > 5 )
- fprintf( strm, "%s = %g; ", param_sym(p), get_param_value(p));
- fprintf( strm, "\n");
- }
- fprintf( strm, " Vv: ");
- for ( size_t v = 0; v < v_no(); ++v )
- if ( *var_sym(v) != '.' || M->verbosely > 5 )
- fprintf( strm, "%s = %g; ", var_sym(v), get_var_value(v));
- fprintf( strm, "\n");
-
- if ( sources.size() ) {
- fprintf( strm, " has sources: ");
- for ( auto &S : sources )
- fprintf( strm, "%s << %s; ",
- (S.sink_type == SINK_PARAM) ? param_sym(S.idx) : var_sym(S.idx),
- S.source->name.c_str());
- fprintf( strm, "\n");
- }
-
- if ( is_listening() ) {
- fprintf( strm, " listening to %s%s%s\n",
- _listener_mem ? "mem" : "",
- _listener_mem && _listener_disk ? ", " : "",
- _listener_disk ? "disk" : "");
- }
+ fprintf( strm, "[%lu] (%s) \"%s\"\n", _serial_id, species(), _label);
+
+ if ( with_params ) {
+ fprintf( strm, " Pp: ");
+ for ( size_t p = 0; p < p_no(); ++p )
+ if ( *param_sym(p) != '.' || M->options.verbosely > 5 )
+ fprintf( strm, "%s = %g; ", param_sym(p), get_param_value(p));
+ fprintf( strm, "\n");
+ }
+ fprintf( strm, " Vv: ");
+ for ( size_t v = 0; v < v_no(); ++v )
+ if ( *var_sym(v) != '.' || M->options.verbosely > 5 )
+ fprintf( strm, "%s = %g; ", var_sym(v), get_var_value(v));
+ fprintf( strm, "\n");
+
+ if ( sources.size() ) {
+ fprintf( strm, " has sources: ");
+ for ( auto &S : sources )
+ fprintf( strm, "%s << %s; ",
+ (S.sink_type == SINK_PARAM) ? param_sym(S.idx) : var_sym(S.idx),
+ S.source->name.c_str());
+ fprintf( strm, "\n");
+ }
+
+ if ( is_listening() ) {
+ fprintf( strm, " listening to %s%s%s\n",
+ _listener_mem ? "mem" : "",
+ _listener_mem && _listener_disk ? ", " : "",
+ _listener_disk ? "disk" : "");
+ }
}
@@ -281,13 +282,14 @@ dump( bool with_params, FILE *strm) const
void
cnrun::C_BaseUnit::
-detach_source( C_BaseSource *s, TSinkType sink_type, unsigned short idx)
+detach_source( C_BaseSource *s, TSinkType sink_type, size_t idx)
{
- list <SSourceInterface <C_BaseSource> >::iterator K;
- while ( (K = find( sources.begin(), sources.end(),
- SSourceInterface<C_BaseSource> (s, sink_type, idx))) != sources.end() )
- sources.erase( K);
- M->unregister_unit_with_sources(this);
+ // list <SSourceInterface<C_BaseSource>>::iterator K;
+ // while ( (K = find( sources.begin(), sources.end(),
+ // )) != sources.end() )
+ // sources.erase( K);
+ sources.remove( SSourceInterface<C_BaseSource> (s, sink_type, idx));
+ M->unregister_unit_with_sources( this);
}
@@ -295,33 +297,34 @@ void
cnrun::C_BaseUnit::
apprise_from_sources()
{
- for ( auto &S : sources )
- switch ( S.sink_type ) {
- case SINK_PARAM:
-// printf( "apprise_from_sources() for %s{%d} = %g\n", _label, S->idx, (*S->source)( model_time()));
- param_value( S.idx) = (*S.source)( model_time());
- param_changed_hook();
- break;
- case SINK_VAR:
- var_value( S.idx) = (*S.source)( model_time());
- break;
- }
+ for ( auto &S : sources )
+ switch ( S.sink_type ) {
+ case SINK_PARAM:
+// printf( "apprise_from_sources() for %s{%d} = %g\n", _label, S->idx, (*S->source)( model_time()));
+ param_value( S.idx) = (*S.source)( model_time());
+ param_changed_hook();
+ break;
+ case SINK_VAR:
+ var_value( S.idx) = (*S.source)( model_time());
+ break;
+ }
}
+
cnrun::C_BaseUnit::
~C_BaseUnit()
{
- if ( M && M->verbosely > 5 )
- fprintf( stderr, " deleting base unit \"%s\"\n", _label);
-
- if ( is_listening() ) {
- stop_listening();
- if ( M && M->model_time() == 0. )
- // nothing has been written yet, delete the files on disk
- unlink( (string(_label) + ".var").c_str());
- }
- if ( M )
- M->exclude_unit( this, false);
+ if ( M && M->options.verbosely > 5 )
+ fprintf( stderr, " deleting base unit \"%s\"\n", _label);
+
+ if ( is_listening() ) {
+ stop_listening();
+ if ( M && M->model_time() == 0. )
+ // nothing has been written yet, delete the files on disk
+ unlink( (string(_label) + ".var").c_str());
+ }
+ if ( M )
+ M->exclude_unit( this, CModel::TExcludeOption::no_delete);
}
@@ -329,8 +332,6 @@ cnrun::C_BaseUnit::
-
-
// ----- C_BaseNeuron
@@ -338,233 +339,219 @@ bool
cnrun::C_BaseNeuron::
connects_to( const C_BaseNeuron &to) const
{
- for ( auto &A : _axonal_harbour )
- if ( A->has_target( &to) )
- return true;
- return false;
+ for ( auto &A : _axonal_harbour )
+ if ( A->has_target( to) )
+ return true;
+ return false;
}
cnrun::C_BaseSynapse*
cnrun::C_BaseNeuron::
-connects_via( C_BaseNeuron &to,
- SCleft::mapped_type *g_ptr) const
+connects_via( const C_BaseNeuron &to,
+ SCleft::mapped_type *g_ptr) const
{
- for ( auto &A : _axonal_harbour )
- if ( A->has_target( &to) ) {
- if ( g_ptr ) *g_ptr = to._dendrites[A];
- return A;
- }
- if ( g_ptr ) *g_ptr = NAN;
- return nullptr;
+ for ( auto &A : _axonal_harbour )
+ if ( A->has_target( to) ) {
+ if ( g_ptr )
+ *g_ptr = to._dendrites.at(A);
+ return A;
+ }
+ if ( g_ptr )
+ *g_ptr = NAN;
+ return nullptr;
}
-
-
-
-
void
cnrun::C_BaseNeuron::
dump( bool with_params, FILE *strm) const
{
- C_BaseUnit::dump( with_params);
- if ( _spikelogger_agent && !(_spikelogger_agent->_status & CN_KL_IDLE) )
- fprintf( strm, " logging spikes at %g:%g\n", _spikelogger_agent->sample_period, _spikelogger_agent->sigma);
- fprintf( strm, "\n");
+ C_BaseUnit::dump( with_params);
+ if ( _spikelogger_agent && !(_spikelogger_agent->_status & CN_KL_IDLE) )
+ fprintf( strm, " logging spikes at %g:%g\n", _spikelogger_agent->sample_period, _spikelogger_agent->sigma);
+ fprintf( strm, "\n");
}
-
-
-
-
-
-
cnrun::C_BaseNeuron::
~C_BaseNeuron()
{
- if ( M && M->verbosely > 4 )
- fprintf( stderr, " deleting base neuron \"%s\"\n", _label);
+ if ( M && M->options.verbosely > 4 )
+ fprintf( stderr, " deleting base neuron \"%s\"\n", _label);
// kill all efferents
- for ( auto Y = _axonal_harbour.rbegin(); Y != _axonal_harbour.rend(); ++Y ) {
- (*Y) -> _source = nullptr;
- delete (*Y);
- }
+ for ( auto Y = _axonal_harbour.rbegin(); Y != _axonal_harbour.rend(); ++Y ) {
+ (*Y) -> _source = nullptr;
+ delete (*Y);
+ }
// unlink ourselves from all afferents
- for ( auto Y = _dendrites.rbegin(); Y != _dendrites.rend(); ++Y )
- Y->first->_targets.remove( this);
-
- if ( _spikelogger_agent ) {
- if ( M && !(_spikelogger_agent->_status & CN_KL_IDLE) )
- M->unregister_spikelogger( this);
- delete _spikelogger_agent;
- _spikelogger_agent = nullptr;
- }
+ for ( auto Y = _dendrites.rbegin(); Y != _dendrites.rend(); ++Y )
+ Y->first->_targets.remove( this);
+
+ if ( _spikelogger_agent ) {
+ if ( M && !(_spikelogger_agent->_status & CN_KL_IDLE) )
+ M->unregister_spikelogger( this);
+ delete _spikelogger_agent;
+ _spikelogger_agent = nullptr;
+ }
}
-
-
// --- SSpikeloggerService
double
cnrun::SSpikeloggerService::
-sdf( double at, double sample_width, double sigma, unsigned *nspikes) const
+sdf( double at, double sample_width, double sigma, size_t *nspikes) const
{
- if ( nspikes )
- *nspikes = 0;
-
- double dt,
- result = 0.;
- for ( auto &T : spike_history ) {
- dt = T - at;
- if ( dt < -sample_width/2. )
- continue;
- if ( dt > sample_width/2. )
- break;
- if ( nspikes )
- (*nspikes)++;
- result += exp( -dt*dt/(sigma * sigma));
- }
- return result;
+ if ( nspikes )
+ *nspikes = 0;
+
+ double dt,
+ result = 0.;
+ for ( auto &T : spike_history ) {
+ dt = T - at;
+ if ( dt < -sample_width/2. )
+ continue;
+ if ( dt > sample_width/2. )
+ break;
+ if ( nspikes )
+ ++(*nspikes);
+ result += exp( -dt*dt/(sigma * sigma));
+ }
+ return result;
}
-
double
cnrun::SSpikeloggerService::
shf( double at, double sample_width) const
{
- double dt,
- last_spike_at;
- vector<double> intervals;
- bool counted_one = false;
- for ( auto &T : spike_history ) {
- dt = T - at;
- if ( dt < -sample_width/2. )
- continue;
- if ( dt > sample_width/2. )
- break;
-
- if ( counted_one )
- intervals.push_back( last_spike_at - T);
- else
- counted_one = true;
-
- last_spike_at = T;
- }
-
- return (intervals.size() < 3) ? 0 : gsl_stats_sd( intervals.data(), 1, intervals.size());
+ double dt,
+ last_spike_at;
+ vector<double>
+ intervals;
+ bool counted_one = false;
+ for ( auto &T : spike_history ) {
+ dt = T - at;
+ if ( dt < -sample_width/2. )
+ continue;
+ if ( dt > sample_width/2. )
+ break;
+
+ if ( counted_one )
+ intervals.push_back( last_spike_at - T);
+ else
+ counted_one = true;
+
+ last_spike_at = T;
+ }
+
+ return (intervals.size() < 3)
+ ? 0
+ : gsl_stats_sd( intervals.data(), 1, intervals.size());
}
-
-
-
size_t
cnrun::SSpikeloggerService::
get_sxf_vector_custom( vector<double> *sdf_buffer, vector<double> *shf_buffer,
- vector<unsigned> *nspikes_buffer,
- double sample_period_custom, double sigma_custom,
- double from, double to) const
+ vector<size_t> *nspikes_buffer,
+ double sample_period_custom, double sigma_custom,
+ double from, double to) const
{
- if ( to == 0. )
- to = _client->M->model_time();
-
- if ( sdf_buffer ) sdf_buffer->clear();
- if ( shf_buffer ) shf_buffer->clear();
- if ( nspikes_buffer) nspikes_buffer->clear();
-
- for ( double t = from; t <= to; t += sample_period_custom ) {
- unsigned nspikes = 0;
- double sdf_value = sdf( t, sample_period_custom, sigma_custom, &nspikes);
- if ( sdf_buffer ) sdf_buffer->push_back( sdf_value);
- if ( shf_buffer ) shf_buffer->push_back( shf( t, sample_period_custom));
- if ( nspikes_buffer ) nspikes_buffer->push_back( nspikes);
- }
-
- return (to - from) / sample_period_custom;
+ if ( to == 0. )
+ to = _client->M->model_time();
+
+ if ( sdf_buffer )
+ sdf_buffer->clear();
+ if ( shf_buffer )
+ shf_buffer->clear();
+ if ( nspikes_buffer )
+ nspikes_buffer->clear();
+
+ for ( double t = from; t <= to; t += sample_period_custom ) {
+ size_t nspikes = 0;
+ double sdf_value = sdf(
+ t, sample_period_custom,
+ sigma_custom, &nspikes);
+ if ( sdf_buffer )
+ sdf_buffer->push_back( sdf_value);
+ if ( shf_buffer )
+ shf_buffer->push_back( shf( t, sample_period_custom));
+ if ( nspikes_buffer )
+ nspikes_buffer->push_back( nspikes);
+ }
+
+ return (to - from) / sample_period_custom;
}
-
-
-
-
-
void
cnrun::SSpikeloggerService::
sync_history()
{
- if ( !_client->M || (_client->M && _client->M->_status & CN_MDL_DISKLESS) )
- return;
-
- ofstream spikecnt_strm( (string(_client->_label) + ".spikes").c_str());
- spikecnt_strm.precision( _client->precision);
- spikecnt_strm << "#spike time\n";
-
- for ( auto &V : spike_history )
- spikecnt_strm << V << endl;
-
- if ( _status & CN_KL_COMPUTESDF ) {
- ofstream sdf_strm( (string(_client->_label) + ".sxf").c_str());
- sdf_strm.precision( _client->precision);
- sdf_strm << "#<time>\t<sdf>\t<shf>\t<nspikes>\n";
-
- vector<double> sdf_vector, shf_vector;
- vector<unsigned> nspikes_vector;
- get_sxf_vector( &sdf_vector, &shf_vector, &nspikes_vector,
- start_delay, 0);
-
- double t = start_delay;
- for ( size_t i = 0; i < sdf_vector.size(); ++i, t += sample_period )
- sdf_strm << t << "\t"
- << sdf_vector[i] << "\t"
- << shf_vector[i] << "\t"
- << nspikes_vector[i] << endl;
- }
+ if ( !_client->M || (_client->M && _client->M->is_diskless) )
+ return;
+
+ ofstream spikecnt_strm( (string(_client->_label) + ".spikes").c_str());
+ spikecnt_strm.precision( _client->precision);
+ spikecnt_strm << "#spike time\n";
+
+ for ( auto &V : spike_history )
+ spikecnt_strm << V << endl;
+
+ if ( _status & CN_KL_COMPUTESDF ) {
+ ofstream sdf_strm( (string(_client->_label) + ".sxf").c_str());
+ sdf_strm.precision( _client->precision);
+ sdf_strm << "#<time>\t<sdf>\t<shf>\t<nspikes>\n";
+
+ vector<double> sdf_vector, shf_vector;
+ vector<size_t> nspikes_vector;
+ get_sxf_vector( &sdf_vector, &shf_vector, &nspikes_vector,
+ start_delay, 0);
+
+ double t = start_delay;
+ for ( size_t i = 0; i < sdf_vector.size(); ++i, t += sample_period )
+ sdf_strm << t << "\t"
+ << sdf_vector[i] << "\t"
+ << shf_vector[i] << "\t"
+ << nspikes_vector[i] << endl;
+ }
}
-
-
-
size_t
cnrun::SSpikeloggerService::
n_spikes_since( double since) const
{
- size_t i = 0;
- for ( auto &K : spike_history )
- if ( K > since )
- return spike_history.size() - i++;
- return 0;
+ size_t i = 0;
+ for ( auto &K : spike_history )
+ if ( K > since )
+ return spike_history.size() - i++;
+ return 0;
}
-
-
// ----- CSynapse
-
cnrun::C_BaseSynapse::
C_BaseSynapse( TUnitType intype,
- C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel *inM, int s_mask)
+ C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel *inM, int s_mask)
: C_BaseUnit (intype, "overwrite-me", inM, s_mask),
- _source (insource),
- t_last_release_started (-INFINITY)
+ _source (insource),
+ t_last_release_started (-INFINITY)
{
- if ( M && M->verbosely > 5 )
- printf( "Creating a \"%s\" base synapse\n", species());
- _targets.push_back( intarget);
- intarget->_dendrites[this] = ing;
- _source->_axonal_harbour.push_back( this);
- snprintf( _label, CN_MAX_LABEL_SIZE-1, "%s:1", _source->_label);
+ if ( M && M->options.verbosely > 5 )
+ printf( "Creating a \"%s\" base synapse\n", species());
+ _targets.push_back( intarget);
+ intarget->_dendrites[this] = ing;
+ _source->_axonal_harbour.push_back( this);
+ snprintf( _label, max_label_size-1, "%s:1", _source->_label);
}
@@ -577,18 +564,18 @@ cnrun::C_BaseSynapse::
clone_to_target( C_BaseNeuron *tgt, double g)
{
// check if we have no existing connection already to tgt
- if ( find( _targets.begin(), _targets.end(), tgt) != _targets.end() ) {
- fprintf( stderr, "Neuron \"%s\" already synapsing onto \"%s\"\n",
- _source->_label, tgt->_label);
- return nullptr;
- }
+ if ( member( tgt, _targets) ) {
+ fprintf( stderr, "Neuron \"%s\" already synapsing onto \"%s\"\n",
+ _source->_label, tgt->_label);
+ return nullptr;
+ }
- tgt -> _dendrites[this] = g;
- _targets.push_back( tgt);
+ tgt -> _dendrites[this] = g;
+ _targets.push_back( tgt);
- snprintf( _label, CN_MAX_LABEL_SIZE-1, "%s:%zu", _source->_label, _targets.size());
+ snprintf( _label, max_label_size-1, "%s:%zu", _source->_label, _targets.size());
- return this;
+ return this;
}
@@ -598,33 +585,36 @@ cnrun::C_BaseSynapse*
cnrun::C_BaseSynapse::
make_clone_independent( C_BaseNeuron *tgt)
{
- double g = g_on_target( *tgt);
- if ( !isfinite(g) || !M )
- return nullptr;
-
- if ( M && M->verbosely > 4 )
- printf( "promoting a clone of %s synapse from \"%s\" to \"%s\"\n", species(), _label, tgt->_label);
- if ( find( _targets.begin(), _targets.end(), tgt) == _targets.end() )
- fprintf( stderr, "ебать!\n");
- _targets.erase( find( _targets.begin(), _targets.end(), tgt));
-
- if ( tgt->_dendrites.find(this) == tgt->_dendrites.end() )
- fprintf( stderr, "ебать-колотить!\n");
- tgt -> _dendrites.erase( tgt->_dendrites.find(this));
-
- snprintf( _label, CN_MAX_LABEL_SIZE-1, "%s:%zu", _source->_label, _targets.size());
-
- C_BaseSynapse* ret = M -> add_synapse_species( _type, _source, tgt, g, false /* prevents re-creation of a clone we have just excised */,
- true);
- // the newly added synapse has stock paramaters yet: copy ours
- if ( ret ) {
- ret->P = P;
- // also see to vars
- for ( size_t i = 0; i < v_no(); ++i )
- ret->var_value(i) = get_var_value(i);
- return ret;
- }
- return nullptr;
+ double g = g_on_target( *tgt);
+ if ( !isfinite(g) || !M )
+ return nullptr;
+
+ if ( M && M->options.verbosely > 4 )
+ printf( "promoting a clone of %s synapse from \"%s\" to \"%s\"\n",
+ species(), _label, tgt->_label);
+ if ( member( tgt, _targets ) )
+ fprintf( stderr, "ебать!\n");
+ _targets.remove( tgt);
+
+ if ( member( this, tgt->_dendrites ) )
+ fprintf( stderr, "ебать-колотить!\n");
+ tgt -> _dendrites.erase( this);
+
+ snprintf( _label, max_label_size-1, "%s:%zu", _source->_label, _targets.size());
+
+ C_BaseSynapse* ret = M -> add_synapse_species(
+ _type, _source, tgt, g,
+ CModel::TSynapseCloningOption::no /* prevents re-creation of a clone we have just excised */,
+ TIncludeOption::is_last);
+ // the newly added synapse has stock paramaters yet: copy ours
+ if ( ret ) {
+ ret->P = P;
+ // also see to vars
+ for ( size_t i = 0; i < v_no(); ++i )
+ ret->var_value(i) = get_var_value(i);
+ return ret;
+ }
+ return nullptr;
}
@@ -636,11 +626,11 @@ void
cnrun::C_BaseSynapse::
dump( bool with_params, FILE *strm) const
{
- C_BaseUnit::dump( with_params);
- fprintf( strm, " gsyn on targets (%zu): ", _targets.size());
- for ( auto &T : _targets )
- fprintf( strm, "%s: %g; ", T->_label, g_on_target( *T));
- fprintf( strm, "\n\n");
+ C_BaseUnit::dump( with_params);
+ fprintf( strm, " gsyn on targets (%zu): ", _targets.size());
+ for ( auto &T : _targets )
+ fprintf( strm, "%s: %g; ", T->_label, g_on_target( *T));
+ fprintf( strm, "\n\n");
}
@@ -650,22 +640,24 @@ dump( bool with_params, FILE *strm) const
cnrun::C_BaseSynapse::
~C_BaseSynapse()
{
- if ( M && M->verbosely > 4 )
- fprintf( stderr, " deleting base synapse \"%s\"\n", _label);
-
- for ( auto &T : _targets )
- if ( T )
- T->_dendrites.erase( this);
-
- if ( _source ) {
- _source->_axonal_harbour.erase(
- find( _source->_axonal_harbour.begin(), _source->_axonal_harbour.end(), this));
- if ( M && M->verbosely > 5 )
- printf( " removing ourselves from \"%s\" axonals (%zu still there)\n",
- _source->_label, _source->_axonal_harbour.size());
- }
+ if ( M && M->options.verbosely > 4 )
+ fprintf( stderr, " deleting base synapse \"%s\"\n", _label);
+
+ for ( auto &T : _targets )
+ if ( T )
+ T->_dendrites.erase( this);
+
+ if ( _source ) {
+ _source->_axonal_harbour.remove( this);
+ if ( M && M->options.verbosely > 5 )
+ printf( " removing ourselves from \"%s\" axonals (%zu still there)\n",
+ _source->_label, _source->_axonal_harbour.size());
+ }
}
-
-
-// eof
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/base-unit.hh b/upstream/src/libcn/base-unit.hh
index ef8fb72..9710cf0 100644
--- a/upstream/src/libcn/base-unit.hh
+++ b/upstream/src/libcn/base-unit.hh
@@ -9,14 +9,17 @@
*/
-#ifndef LIBCN_BASE_UNIT_H
-#define LIBCN_BASE_UNIT_H
+#ifndef CNRUN_LIBCN_BASEUNIT_H_
+#define CNRUN_LIBCN_BASEUNIT_H_
#include <fstream>
#include <cstring>
#include <vector>
#include <list>
+#include "libstilton/lang.hh"
+#include "libstilton/string.hh"
+#include "forward-decls.hh"
#include "types.hh"
#include "sources.hh"
@@ -26,41 +29,27 @@
using namespace std;
+using cnrun::stilton::str::sasprintf;
namespace cnrun {
-// this gets referenced in case of out-of-bounds idx or misspelled sym
-// in param accessors
-extern double __cn_dummy_double;
-
-
-
-// forward decls
-class CModel;
-class C_BaseUnit;
-
-
-
-
-
+extern unsigned short cn_default_unit_precision;
+extern int cn_verbosely;
// for all units
-#define CN_UERROR (1 << 0)
-#define CN_UOWNED (1 << 1)
-#define CN_UHASPARAMRANGE (1 << 2)
-#define CN_ULISTENING_MEM (1 << 3)
-#define CN_ULISTENING_DISK (1 << 4)
-#define CN_ULISTENING_1VARONLY (1 << 5)
-#define CN_ULISTENING_DEFERWRITE (1 << 6)
-#define CN_ULISTENING_BINARY (1 << 7)
-//#define CN_NDYNPARAMS (1 << 8)
+#define CN_UERROR (1 << 0)
+#define CN_UOWNED (1 << 1)
+#define CN_UHASPARAMRANGE (1 << 2)
+#define CN_ULISTENING_MEM (1 << 3)
+#define CN_ULISTENING_DISK (1 << 4)
+#define CN_ULISTENING_1VARONLY (1 << 5)
+#define CN_ULISTENING_DEFERWRITE (1 << 6)
+#define CN_ULISTENING_BINARY (1 << 7)
+//#define CN_NDYNPARAMS (1 << 8)
// only for neurons
-#define CN_NFIRING (1 << 9) // firing now
-#define CN_NREFRACT (1 << 10) // in refractory phase now
-
-
-#define CN_MAX_LABEL_SIZE 40
+#define CN_NFIRING (1 << 9) // firing now
+#define CN_NREFRACT (1 << 10) // in refractory phase now
// the base unit provides the methods for the following:
@@ -70,224 +59,231 @@ class C_BaseUnit;
// * listening, i.e., keeping a history of vars along a timeline;
class C_BaseUnit {
- private:
- C_BaseUnit(); // not callable
+ DELETE_DEFAULT_METHODS (C_BaseUnit)
- protected:
- TUnitType
- _type; // will look up p, pno and vno from __CNUDT using _type as index
- public:
- TUnitType
- type() const { return _type; }
-
- // classification
- const int traits() const { return __CNUDT[_type].traits; }
- const bool is_hostable() const { return __CNUDT[_type].traits & UT_HOSTED; }
- const bool is_ddtbound() const { return __CNUDT[_type].traits & UT_DDTSET; }
- const bool is_neuron() const { return _type >= NT_FIRST && _type <= NT_LAST; }
- const bool is_synapse() const { return _type >= YT_FIRST && _type <= YT_LAST; }
- const bool is_oscillator() const { return __CNUDT[_type].traits & UT_OSCILLATOR; }
- const bool is_conscious() const { return is_oscillator(); }
-
- const char *class_name() const
- { return is_neuron() ? "Neuron" : "Synapse"; }
- const char *species() const
- { return __CNUDT[_type].species; }
- const char *family() const
- { return __CNUDT[_type].family; }
- const char *type_description() const
- { return __CNUDT[_type].description; }
+ friend class CModel;
+ friend class SSpikeloggerService;
- // parameter & variable names and symbols
- const char *const param_name( size_t i) const { return __CNUDT[_type].stock_param_names[i]; }
- const char *const param_sym( size_t i) const { return __CNUDT[_type].stock_param_syms[i]; }
- int param_idx_by_sym( const char*) const __attribute__ ((pure));
- const char *const var_name( size_t i) const { return __CNUDT[_type].stock_var_names[i]; }
- const char *const var_sym( size_t i) const { return __CNUDT[_type].stock_var_syms[i]; }
- int var_idx_by_sym( const char*) const __attribute__ ((pure));
- unsigned short v_no() const { return __CNUDT[_type].vno; }
- unsigned short p_no() const { return __CNUDT[_type].pno; }
-
- protected:
- // identification
- unsigned long
- _serial_id; // assigned incrementally as read by import_NetworkML
- char _label[CN_MAX_LABEL_SIZE];
public:
- unsigned long serial() const
- { return _serial_id; }
- const char *label() const // for synapses, it is "%s:%d", src->label, targets.size()
- { return _label; }
- void set_label( const char *new_label)
- { strncpy( _label, new_label, CN_MAX_LABEL_SIZE-1); }
-
- // status bitfield & properties
- protected:
- int _status;
- public:
- int status() { return _status; }
+ static const constexpr size_t max_label_size = 40;
- // ctor & dtor
protected:
- C_BaseUnit( TUnitType, const char *label,
- CModel*, int s_mask);
+ C_BaseUnit (TUnitType, const string& label,
+ CModel*, int s_mask);
public:
- virtual ~C_BaseUnit(); // surely virtual
+ virtual ~C_BaseUnit(); // surely virtual
- // parent model
- friend class CModel;
- friend class SSpikeloggerService;
- protected:
- CModel *M;
- public:
- const CModel&
- parent_model() const { return *M; }
- bool is_owned() const { return _status & CN_UOWNED; }
- const double&
- model_time() const; // defined in model.h
+ // written variables precision
+ unsigned short precision;
- public:
- // private copy of params
- vector<double> P;
- double get_param_value( size_t p) const
- { return P[p]; }
- double get_param_value( const char *sym) const
- {
- int id = param_idx_by_sym( sym);
- return (id == -1) ? __cn_dummy_double : P[id];
- }
- double ¶m_value( size_t p) { return P[p]; }
- double ¶m_value( const char *sym)
- {
- int id = param_idx_by_sym( sym);
- return (id == -1) ? __cn_dummy_double : P[id];
- }
- void reset_params()
- {
- P.resize( p_no());
- memcpy( P.data(), __CNUDT[_type].stock_param_values,
- sizeof(double) * p_no());
- param_changed_hook();
- }
+ int status() const { return _status; }
+ TUnitType type() const { return _type; }
- // purity checks
- bool is_not_altered() const
- {
- return (memcmp( P.data(), __CNUDT[_type].stock_param_values,
- sizeof (double) * p_no()) == 0) &&
- !has_sources();
- }
- bool has_same_params( const C_BaseUnit &rv) const
- {
- return _type == rv._type &&
- memcmp( P.data(), rv.P.data(), sizeof (double) * p_no()) == 0;
- }
- bool has_sources() const __attribute__ ((pure))
- {
- return not sources.empty();
- }
- bool has_same_sources( const C_BaseUnit &rv) const __attribute__ ((pure))
- {
- return sources == rv.sources;
- // not sure taking the order of otherwise identical sources should matter
- }
- bool is_identical( const C_BaseUnit &rv) const __attribute__ ((pure))
- {
- return _type == rv._type && has_same_params(rv) &&
- ((has_sources() && has_same_sources(rv)) ||
- (!has_sources() && !rv.has_sources()));
- }
- virtual void dump( bool with_params = false, FILE *strm = stdout) const;
-
-
- // Source interface
- enum TSinkType { SINK_PARAM, SINK_VAR };
-
- template <class T>
- struct SSourceInterface {
- friend class C_BaseUnit;
- friend class CModel;
- private:
- C_BaseSource *source;
- TSinkType sink_type;
- unsigned short idx;
-
- SSourceInterface( T *insource, TSinkType insink_type, unsigned short inidx)
- : source (insource), sink_type (insink_type), idx (inidx)
- {}
- public:
- bool operator== ( const SSourceInterface &rv) const
- { return source == rv.source && sink_type == rv.sink_type && idx == rv.idx; }
- };
- list <SSourceInterface <C_BaseSource> > sources;
- template <class T> void attach_source( T *s, TSinkType t, unsigned short idx);
-
- void detach_source( C_BaseSource*, TSinkType, unsigned short idx);
-
- void apprise_from_sources();
- virtual void param_changed_hook()
- {}
-
-
- // access to state variables: differs per hosted or standalone
- virtual double &var_value( size_t) = 0;
- virtual const double &get_var_value( size_t) const = 0;
- virtual void reset_vars() = 0;
- virtual void reset_state();
+ // classification
+ int traits() const { return __CNUDT[_type].traits; }
+ bool is_hostable() const { return __CNUDT[_type].traits & UT_HOSTED; }
+ bool is_ddtbound() const { return __CNUDT[_type].traits & UT_DDTSET; }
+ bool is_neuron() const { return _type >= NT_FIRST && _type <= NT_LAST; }
+ bool is_synapse() const { return _type >= YT_FIRST && _type <= YT_LAST; }
+ bool is_oscillator() const { return __CNUDT[_type].traits & UT_OSCILLATOR; }
+ bool is_conscious() const { return is_oscillator(); }
+
+ unsigned long serial() const
+ { return _serial_id; }
+ const char *label() const // for synapses, it is "%s:%d", src->label, targets.size()
+ { return _label; }
+ void set_label( const string& new_label)
+ { strncpy( _label, new_label.c_str(), max_label_size-1); }
+
+ const char *class_name() const
+ { return is_neuron() ? "Neuron" : "Synapse"; }
+ const char *species() const
+ { return __CNUDT[_type].species; }
+ const char *family() const
+ { return __CNUDT[_type].family; }
+ const char *type_description() const
+ { return __CNUDT[_type].description; }
- // state history
- bool is_listening() const
- {
- return _status & (CN_ULISTENING_DISK | CN_ULISTENING_MEM);
- }
- void start_listening( int mask = 0 | CN_ULISTENING_DISK);
- void stop_listening();
- void restart_listening()
- {
- int lbits = _status & (CN_ULISTENING_DISK | CN_ULISTENING_MEM
- | CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE);
- stop_listening();
- start_listening( lbits);
- }
- void pause_listening();
- void resume_listening();
+ // parent model
+ const CModel&
+ parent_model() const { return *M; }
+ const double
+ model_time() const; // defined in model.h
- private:
- // where vars are written by tell()
- int _binwrite_handle;
- ofstream *_listener_disk;
- // ... and/or stored, in a diskless model
- vector<double> *_listener_mem;
- public:
- // by this method
- void tell();
- const vector<double> *listener_mem() const { return _listener_mem; }
+ bool is_owned() const { return _status & CN_UOWNED; }
+
+ // parameter & variable names and symbols
+ const char *const param_name( size_t i) const { return __CNUDT[_type].stock_param_names[i]; }
+ const char *const param_sym( size_t i) const { return __CNUDT[_type].stock_param_syms[i]; }
+ int param_idx_by_sym( const string&) const __attribute__ ((pure));
- unsigned short precision;
+ const char *const var_name( size_t i) const { return __CNUDT[_type].stock_var_names[i]; }
+ const char *const var_sym( size_t i) const { return __CNUDT[_type].stock_var_syms[i]; }
+ int var_idx_by_sym( const string&) const __attribute__ ((pure));
- // one common method for all descendants
-};
+ unsigned short v_no() const { return __CNUDT[_type].vno; }
+ unsigned short p_no() const { return __CNUDT[_type].pno; }
+ // purity checks
+ bool is_not_altered() const
+ {
+ return (memcmp( P.data(), __CNUDT[_type].stock_param_values,
+ sizeof (double) * p_no()) == 0) &&
+ !has_sources();
+ }
+ bool has_same_params( const C_BaseUnit &rv) const
+ {
+ return _type == rv._type &&
+ memcmp( P.data(), rv.P.data(), sizeof (double) * p_no()) == 0;
+ }
+ bool has_sources() const __attribute__ ((pure))
+ {
+ return not sources.empty();
+ }
+ bool has_same_sources( const C_BaseUnit &rv) const __attribute__ ((pure))
+ {
+ return sources == rv.sources;
+ // not sure taking the order of otherwise identical sources should matter
+ }
+ bool is_identical( const C_BaseUnit &rv) const __attribute__ ((pure))
+ {
+ return _type == rv._type && has_same_params(rv) &&
+ ((has_sources() && has_same_sources(rv)) ||
+ (!has_sources() && !rv.has_sources()));
+ }
+
+ // parameters
+ double
+ get_param_value( size_t p) const
+ { return P[p]; }
+
+ double
+ get_param_value( const string& sym) const
+ {
+ int id = param_idx_by_sym( sym);
+ if ( id == -1 )
+ throw sasprintf( "Bad parameter name \"%s\" for unit \"%s\"", sym.c_str(), _label);
+ return P[id];
+ }
+
+ double&
+ param_value( size_t p)
+ {
+ return P[p];
+ }
+
+ double&
+ param_value( const string& sym)
+ {
+ int id = param_idx_by_sym( sym);
+ if ( id == -1 )
+ throw sasprintf( "Bad parameter name \"%s\" for unit \"%s\"", sym.c_str(), _label);
+ return P[id];
+ }
+
+ void
+ reset_params()
+ {
+ P.resize( p_no());
+ memcpy( P.data(), __CNUDT[_type].stock_param_values,
+ sizeof(double) * p_no());
+ param_changed_hook();
+ }
+
+ // variables: differs per hosted or standalone
+ virtual double &var_value( size_t) = 0;
+ virtual const double &get_var_value( size_t) const = 0;
+ virtual void reset_vars() = 0;
+ virtual void reset_state();
+
+ virtual void dump( bool with_params = false, FILE *strm = stdout) const;
+ // state history
+ bool is_listening() const
+ {
+ return _status & (CN_ULISTENING_DISK | CN_ULISTENING_MEM);
+ }
+ void start_listening( int mask = 0 | CN_ULISTENING_DISK);
+ void stop_listening();
+ void restart_listening()
+ {
+ int lbits = _status & (CN_ULISTENING_DISK | CN_ULISTENING_MEM
+ | CN_ULISTENING_1VARONLY | CN_ULISTENING_DEFERWRITE);
+ stop_listening();
+ start_listening( lbits);
+ }
+ void pause_listening();
+ void resume_listening();
+
+ void tell();
+
+ const vector<double>*
+ listener_mem() const
+ { return _listener_mem; }
+
+ // source interface
+ enum TSinkType { SINK_PARAM, SINK_VAR };
+
+ template <class T>
+ struct SSourceInterface {
+ friend class C_BaseUnit;
+ friend class CModel;
+ private:
+ C_BaseSource *source;
+ TSinkType sink_type;
+ unsigned short idx;
+
+ SSourceInterface (T *insource, TSinkType insink_type, unsigned short inidx)
+ : source (insource), sink_type (insink_type), idx (inidx)
+ {}
+ public:
+ bool operator== ( const SSourceInterface &rv) const
+ {
+ return source == rv.source &&
+ sink_type == rv.sink_type &&
+ idx == rv.idx;
+ }
+ };
+ template <class T>
+ void attach_source( T *s, TSinkType t, unsigned short idx);
+ void detach_source( C_BaseSource*, TSinkType, size_t idx);
+
+ void apprise_from_sources();
+ virtual void param_changed_hook()
+ {}
-extern unsigned short __cn_default_unit_precision;
+ protected:
+ TUnitType
+ _type; // will look up p, pno and vno from __CNUDT using _type as index
+ int _status;
-extern int __cn_verbosely;
+ unsigned long
+ _serial_id; // assigned incrementally as read by import_NetworkML
+ char _label[max_label_size];
+ CModel *M;
+ // private copy of params
+ vector<double> P;
+ list<SSourceInterface<C_BaseSource>>
+ sources;
-class __C_BaseUnitCompareByLabel {
- public:
- bool operator () ( C_BaseUnit *&lv, C_BaseUnit *&rv)
- {
- return strcmp( lv->label(), rv->label()) < 0;
- }
+ private:
+ // where vars are written by tell()
+ int _binwrite_handle;
+ ofstream *_listener_disk;
+ // ... and/or stored, in a diskless model
+ vector<double> *_listener_mem;
};
-
}
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/forward-decls.hh b/upstream/src/libcn/forward-decls.hh
new file mode 100644
index 0000000..3e20242
--- /dev/null
+++ b/upstream/src/libcn/forward-decls.hh
@@ -0,0 +1,43 @@
+/*
+ * Author: Andrei Zavada <johnhommer at gmail.com>
+ * building on original work by Thomas Nowotny <tnowotny at ucsd.edu>
+ *
+ * License: GPL-2+
+ *
+ * Initial version: 2014-09-16
+ *
+ * Purpose: forward declarations
+ */
+
+
+#ifndef CNRUN_LIBCN_FORWARD_DECLS_H
+#define CNRUN_LIBCN_FORWARD_DECLS_H
+
+namespace cnrun {
+
+class C_BaseUnit;
+class C_BaseNeuron;
+class C_BaseSynapse;
+class C_HostedNeuron;
+class C_HostedSynapse;
+class C_StandaloneNeuron;
+class C_StandaloneSynapse;
+
+class C_HostedConductanceBasedNeuron;
+class C_HostedRateBasedNeuron;
+
+class CNeuronMap;
+class CSynapseMap;
+
+class CModel;
+
+}
+
+#endif
+
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/hosted-neurons.cc b/upstream/src/libcn/hosted-neurons.cc
index 2a036e0..5a9c6eb 100644
--- a/upstream/src/libcn/hosted-neurons.cc
+++ b/upstream/src/libcn/hosted-neurons.cc
@@ -24,25 +24,25 @@
cnrun::C_HostedNeuron::
-C_HostedNeuron( TUnitType intype, const char *inlabel,
- double inx, double iny, double inz,
- CModel* inM, int s_mask,
- bool do_allocations_immediately)
+C_HostedNeuron (TUnitType intype, const string& inlabel,
+ double inx, double iny, double inz,
+ CModel* inM, int s_mask,
+ TIncludeOption include_option)
: C_BaseNeuron (intype, inlabel, inx, iny, inz, inM, s_mask)
{
- if ( M )
- M->include_unit( this, do_allocations_immediately);
- else {
-// _status &= ~CN_UENABLED;
- idx = (unsigned long)-1;
- }
+ if ( M )
+ M->include_unit( this, include_option);
+ else {
+// _status &= ~CN_UENABLED;
+ idx = (unsigned long)-1;
+ }
}
// C_HostedNeuron::~C_HostedNeuron()
// {
-// if ( __cn_verbosely > 5 )
-// cout << " deleting hosted neuron " << label << endl;
+// if ( __cn_verbosely > 5 )
+// cout << " deleting hosted neuron " << label << endl;
// }
@@ -54,18 +54,18 @@ void
cnrun::C_HostedConductanceBasedNeuron::
do_detect_spike_or_whatever()
{
- if ( unlikely (E() >= M->spike_threshold) ) {
- if ( !(_spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) ) {
- _spikelogger_agent->spike_history.push_back(
- _spikelogger_agent->t_last_spike_start = model_time());
- _spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
- }
- } else
-// if ( model_time() - t_last_spike_end > M->spike_lapse ) {
- if ( _spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) {
- _spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
- _spikelogger_agent->t_last_spike_end = model_time();
- }
+ if ( unlikely (E() >= M->options.spike_threshold) ) {
+ if ( !(_spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) ) {
+ _spikelogger_agent->spike_history.push_back(
+ _spikelogger_agent->t_last_spike_start = model_time());
+ _spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
+ }
+ } else
+// if ( model_time() - t_last_spike_end > M->spike_lapse ) {
+ if ( _spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) {
+ _spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
+ _spikelogger_agent->t_last_spike_end = model_time();
+ }
}
@@ -80,75 +80,75 @@ do_detect_spike_or_whatever()
// ===== HH and variations
const char* const cnrun::__CN_ParamNames_NeuronHH_d[] = {
- "Na conductance, " __CN_PU_CONDUCTANCE,
- "Na equi potential, " __CN_PU_POTENTIAL,
- "K conductance, " __CN_PU_CONDUCTANCE,
- "K equi potential, " __CN_PU_POTENTIAL,
- "Leak conductance, " __CN_PU_CONDUCTANCE,
- "Leak equi potential, " __CN_PU_POTENTIAL,
- "Membrane specific capacitance, " __CN_PU_CAPACITY_DENSITY,
+ "Na conductance, " __CN_PU_CONDUCTANCE,
+ "Na equi potential, " __CN_PU_POTENTIAL,
+ "K conductance, " __CN_PU_CONDUCTANCE,
+ "K equi potential, " __CN_PU_POTENTIAL,
+ "Leak conductance, " __CN_PU_CONDUCTANCE,
+ "Leak equi potential, " __CN_PU_POTENTIAL,
+ "Membrane specific capacitance, " __CN_PU_CAPACITY_DENSITY,
- ".alpha_m_a", ".alpha_m_b", ".alpha_m_c", ".beta_m_a", ".beta_m_b", ".beta_m_c",
- ".alpha_h_a", ".alpha_h_b", ".alpha_h_c", ".beta_h_a", ".beta_h_b", ".beta_h_c",
- ".alpha_n_a", ".alpha_n_b", ".alpha_n_c", ".beta_n_a", ".beta_n_b", ".beta_n_c",
+ ".alpha_m_a", ".alpha_m_b", ".alpha_m_c", ".beta_m_a", ".beta_m_b", ".beta_m_c",
+ ".alpha_h_a", ".alpha_h_b", ".alpha_h_c", ".beta_h_a", ".beta_h_b", ".beta_h_c",
+ ".alpha_n_a", ".alpha_n_b", ".alpha_n_c", ".beta_n_a", ".beta_n_b", ".beta_n_c",
- "Externally applied DC, " __CN_PU_CURRENT,
+ "Externally applied DC, " __CN_PU_CURRENT,
};
const char* const cnrun::__CN_ParamSyms_NeuronHH_d[] = {
- "gNa",
- "ENa",
- "gK",
- "EK",
- "gl",
- "El",
- "Cmem",
+ "gNa",
+ "ENa",
+ "gK",
+ "EK",
+ "gl",
+ "El",
+ "Cmem",
- ".alpha_m_a", ".alpha_m_b", ".alpha_m_c", ".beta_m_a", ".beta_m_b", ".beta_m_c",
- ".alpha_h_a", ".alpha_h_b", ".alpha_h_c", ".beta_h_a", ".beta_h_b", ".beta_h_c",
- ".alpha_n_a", ".alpha_n_b", ".alpha_n_c", ".beta_n_a", ".beta_n_b", ".beta_n_c",
+ ".alpha_m_a", ".alpha_m_b", ".alpha_m_c", ".beta_m_a", ".beta_m_b", ".beta_m_c",
+ ".alpha_h_a", ".alpha_h_b", ".alpha_h_c", ".beta_h_a", ".beta_h_b", ".beta_h_c",
+ ".alpha_n_a", ".alpha_n_b", ".alpha_n_c", ".beta_n_a", ".beta_n_b", ".beta_n_c",
- "Idc",
+ "Idc",
};
const double cnrun::__CN_Params_NeuronHH_d[] = {
- 7.15, // gNa: Na conductance in 1/(mOhms * cm^2)
- 50.0, // ENa: Na equi potential in mV
- 1.430, // gK: K conductance in 1/(mOhms * cm^2)
- -95.0, // EK: K equi potential in mV
- 0.0267, // gl: leak conductance in 1/(mOhms * cm^2)
- -63.563, // El: leak equi potential in mV
- 0.143, // Cmem: membr. specific capacitance, muF/cm^2
+ 7.15, // gNa: Na conductance in 1/(mOhms * cm^2)
+ 50.0, // ENa: Na equi potential in mV
+ 1.430, // gK: K conductance in 1/(mOhms * cm^2)
+ -95.0, // EK: K equi potential in mV
+ 0.0267, // gl: leak conductance in 1/(mOhms * cm^2)
+ -63.563, // El: leak equi potential in mV
+ 0.143, // Cmem: membr. specific capacitance, muF/cm^2
- 0.32, 52., 4.,
- 0.28, 25., 5.,
- 0.128, 48., 18.,
- 4.0, 25., 5.,
- 0.032, 50., 5.,
- 0.5, 55., 40.,
+ 0.32, 52., 4.,
+ 0.28, 25., 5.,
+ 0.128, 48., 18.,
+ 4.0, 25., 5.,
+ 0.032, 50., 5.,
+ 0.5, 55., 40.,
- 0. // Externally applied constant current
+ 0. // Externally applied constant current
};
const double cnrun::__CN_Vars_NeuronHH_d[] = {
- -66.81, // 0 - membrane potential E
- 0.023, // 1 - prob. for Na channel activation m
- 0.800, // 2 - prob. for not Na channel blocking h
- 0.220, // 3 - prob. for K channel activation n
+ -66.81, // 0 - membrane potential E
+ 0.023, // 1 - prob. for Na channel activation m
+ 0.800, // 2 - prob. for not Na channel blocking h
+ 0.220, // 3 - prob. for K channel activation n
};
const char* const cnrun::__CN_VarNames_NeuronHH_d[] = {
- "Membrane potential, " __CN_PU_POTENTIAL,
- "Prob. of Na channel activation",
- "1-Prob. of Na channel blocking",
- "Prob. of K channel activation",
+ "Membrane potential, " __CN_PU_POTENTIAL,
+ "Prob. of Na channel activation",
+ "1-Prob. of Na channel blocking",
+ "Prob. of K channel activation",
};
const char* const cnrun::__CN_VarSyms_NeuronHH_d[] = {
- "E",
- ".m",
- ".h",
- ".n"
+ "E",
+ ".m",
+ ".h",
+ ".n"
};
@@ -159,73 +159,73 @@ cnrun::CNeuronHH_d::
derivative( vector<double>& x, vector<double>& dx)
{
// differential eqn for E, the membrane potential
- dE(dx) = (
- P[gNa] * gsl_pow_3(m(x)) * h(x) * (P[ENa] - E(x))
- + P[gK] * gsl_pow_4(n(x)) * (P[EK] - E(x))
- + P[gl] * (P[El] - E(x)) + (Isyn(x) + P[Idc])
- ) / P[Cmem];
+ dE(dx) = (
+ P[gNa] * gsl_pow_3(m(x)) * h(x) * (P[ENa] - E(x))
+ + P[gK] * gsl_pow_4(n(x)) * (P[EK] - E(x))
+ + P[gl] * (P[El] - E(x)) + (Isyn(x) + P[Idc])
+ ) / P[Cmem];
- double _a, _b, K;
+ double _a, _b, K;
// diferential eqn for m, the probability for one Na channel activation
// particle
- K = -P[alpha_m_b] - E(x),
- _a = P[alpha_m_a] * K / expm1( K / P[alpha_m_c]);
-// _a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
- K = P[beta_m_b] + E(x),
- _b = P[beta_m_a] * K / expm1( K / P[beta_m_c]);
-// _b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
- dm(dx) = _a * (1 - m(x)) - _b * m(x);
+ K = -P[alpha_m_b] - E(x),
+ _a = P[alpha_m_a] * K / expm1( K / P[alpha_m_c]);
+// _a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
+ K = P[beta_m_b] + E(x),
+ _b = P[beta_m_a] * K / expm1( K / P[beta_m_c]);
+// _b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
+ dm(dx) = _a * (1 - m(x)) - _b * m(x);
// differential eqn for h, the probability for the Na channel blocking
// particle to be absent
- K = -P[alpha_h_b] - E(x),
- _a = P[alpha_h_a] * exp( K / P[alpha_h_c]);
-// _a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
- K = -P[beta_h_b] - E(x),
- _b = P[beta_h_a] / (exp( K / P[beta_h_c]) + 1);
-// _b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
- dh(dx) = _a * (1 - h(x)) - _b * h(x);
+ K = -P[alpha_h_b] - E(x),
+ _a = P[alpha_h_a] * exp( K / P[alpha_h_c]);
+// _a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
+ K = -P[beta_h_b] - E(x),
+ _b = P[beta_h_a] / (exp( K / P[beta_h_c]) + 1);
+// _b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
+ dh(dx) = _a * (1 - h(x)) - _b * h(x);
// differential eqn for n, the probability for one K channel activation
// particle
- K = -P[alpha_n_b] - E(x),
- _a = P[alpha_n_a] * K / expm1( K / P[alpha_n_c]);
-// _a = 0.032 * (15.0 - E(x) - P[V0]) / (exp( (15.0 - E(x) - P[V0]) / 5.0) - 1.0);
- K = -P[beta_n_b] - E(x),
- _b = P[beta_n_a] * exp( K / P[beta_n_c]);
-// _b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
- dn(dx)= _a * (1 - n(x)) -_b * n(x);
+ K = -P[alpha_n_b] - E(x),
+ _a = P[alpha_n_a] * K / expm1( K / P[alpha_n_c]);
+// _a = 0.032 * (15.0 - E(x) - P[V0]) / (exp( (15.0 - E(x) - P[V0]) / 5.0) - 1.0);
+ K = -P[beta_n_b] - E(x),
+ _b = P[beta_n_a] * exp( K / P[beta_n_c]);
+// _b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
+ dn(dx)= _a * (1 - n(x)) -_b * n(x);
}
// void
// CNeuronHH::derivative( vector<double>& x, vector<double>& dx)
// {
-// enum TParametersNeuronHH {
-// gNa, ENa, gK, EK, gl, El, Cmem, Idc
-// };
+// enum TParametersNeuronHH {
+// gNa, ENa, gK, EK, gl, El, Cmem, Idc
+// };
// // differential eqn for E, the membrane potential
-// dE(dx) = (
-// P[gNa] * ___pow3(m(x)) * h(x) * (P[ENa] - E(x))
-// + P[gK] * ___pow4(n(x)) * (P[EK] - E(x))
-// + P[gl] * (P[El] - E(x)) + (Isyn(x) + P[Idc])
-// ) / P[Cmem];
+// dE(dx) = (
+// P[gNa] * ___pow3(m(x)) * h(x) * (P[ENa] - E(x))
+// + P[gK] * ___pow4(n(x)) * (P[EK] - E(x))
+// + P[gl] * (P[El] - E(x)) + (Isyn(x) + P[Idc])
+// ) / P[Cmem];
-// double _a, _b;
+// double _a, _b;
// // diferential eqn for m, the probability for Na channel activation
-// _a = (3.5 + 0.1 * E(x)) / -expm1( -3.5 - 0.1 * E(x));
-// _b = 4.0 * exp( -(E(x) + 60.0) / 18.0);
-// dm(dx) = _a * (1.0 - m(x)) - _b * m(x);
+// _a = (3.5 + 0.1 * E(x)) / -expm1( -3.5 - 0.1 * E(x));
+// _b = 4.0 * exp( -(E(x) + 60.0) / 18.0);
+// dm(dx) = _a * (1.0 - m(x)) - _b * m(x);
// // differential eqn for h, the probability for Na channel inactivation
-// _a = 0.07 * exp( -E(x) / 20.0 - 3.0);
-// _b = 1.0 / (exp( -3.0 - 0.1 * E(x)) + 1.0);
-// dh(dx) = _a * (1.0 - h(x)) -_b * h(x);
+// _a = 0.07 * exp( -E(x) / 20.0 - 3.0);
+// _b = 1.0 / (exp( -3.0 - 0.1 * E(x)) + 1.0);
+// dh(dx) = _a * (1.0 - h(x)) -_b * h(x);
// // differential eqn for n, the probability for K channel activation
-// _a = (-0.5 - 0.01 * E(x)) / expm1( -5.0 - 0.1 * E(x));
-// _b = 0.125 * exp( -(E(x) + 60.0) / 80.0);
-// dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
+// _a = (-0.5 - 0.01 * E(x)) / expm1( -5.0 - 0.1 * E(x));
+// _b = 0.125 * exp( -(E(x) + 60.0) / 80.0);
+// dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
// }
@@ -236,79 +236,79 @@ derivative( vector<double>& x, vector<double>& dx)
const char* const cnrun::__CN_ParamNames_NeuronHH2_d[] = {
- "Na conductance, " __CN_PU_CONDUCTANCE,
- "Na equi potential, " __CN_PU_POTENTIAL,
- "K conductance, " __CN_PU_CONDUCTANCE,
- "K equi potential, " __CN_PU_POTENTIAL,
- "Leak conductance, " __CN_PU_CONDUCTANCE,
- "Leak equi potential, " __CN_PU_POTENTIAL,
- "Membrane specific capacitance, " __CN_PU_CAPACITY_DENSITY,
- "K leakage conductance, " __CN_PU_CONDUCTANCE,
- "K leakage equi potential, " __CN_PU_POTENTIAL,
+ "Na conductance, " __CN_PU_CONDUCTANCE,
+ "Na equi potential, " __CN_PU_POTENTIAL,
+ "K conductance, " __CN_PU_CONDUCTANCE,
+ "K equi potential, " __CN_PU_POTENTIAL,
+ "Leak conductance, " __CN_PU_CONDUCTANCE,
+ "Leak equi potential, " __CN_PU_POTENTIAL,
+ "Membrane specific capacitance, " __CN_PU_CAPACITY_DENSITY,
+ "K leakage conductance, " __CN_PU_CONDUCTANCE,
+ "K leakage equi potential, " __CN_PU_POTENTIAL,
- ".alpha_m_a", ".alpha_m_b", ".alpha_m_c", ".beta_m_a", ".beta_m_b", ".beta_m_c",
- ".alpha_h_a", ".alpha_h_b", ".alpha_h_c", ".beta_h_a", ".beta_h_b", ".beta_h_c",
- ".alpha_n_a", ".alpha_n_b", ".alpha_n_c", ".beta_n_a", ".beta_n_b", ".beta_n_c",
+ ".alpha_m_a", ".alpha_m_b", ".alpha_m_c", ".beta_m_a", ".beta_m_b", ".beta_m_c",
+ ".alpha_h_a", ".alpha_h_b", ".alpha_h_c", ".beta_h_a", ".beta_h_b", ".beta_h_c",
+ ".alpha_n_a", ".alpha_n_b", ".alpha_n_c", ".beta_n_a", ".beta_n_b", ".beta_n_c",
-// "Total equi potential (?), " __CN_PU_POTENTIAL,
+// "Total equi potential (?), " __CN_PU_POTENTIAL,
- "Externally applied DC, " __CN_PU_CURRENT,
+ "Externally applied DC, " __CN_PU_CURRENT,
};
const char* const cnrun::__CN_ParamSyms_NeuronHH2_d[] = {
- "gNa",
- "ENa",
- "gK",
- "EK",
- "gl",
- "El",
- "Cmem",
- "gKl",
- "EKl",
+ "gNa",
+ "ENa",
+ "gK",
+ "EK",
+ "gl",
+ "El",
+ "Cmem",
+ "gKl",
+ "EKl",
- ".alpha_m_a", ".alpha_m_b", ".alpha_m_c", ".beta_m_a", ".beta_m_b", ".beta_m_c",
- ".alpha_h_a", ".alpha_h_b", ".alpha_h_c", ".beta_h_a", ".beta_h_b", ".beta_h_c",
- ".alpha_n_a", ".alpha_n_b", ".alpha_n_c", ".beta_n_a", ".beta_n_b", ".beta_n_c",
+ ".alpha_m_a", ".alpha_m_b", ".alpha_m_c", ".beta_m_a", ".beta_m_b", ".beta_m_c",
+ ".alpha_h_a", ".alpha_h_b", ".alpha_h_c", ".beta_h_a", ".beta_h_b", ".beta_h_c",
+ ".alpha_n_a", ".alpha_n_b", ".alpha_n_c", ".beta_n_a", ".beta_n_b", ".beta_n_c",
-// "V0",
+// "V0",
- "Idc",
+ "Idc",
};
const double cnrun::__CN_Params_NeuronHH2_d[] = {
- 7.15, // gNa: Na conductance in 1/(mOhms * cm^2)
- 50.0, // ENa: Na equi potential in mV
- 1.43, // gK: K conductance in 1/(mOhms * cm^2)
- -95.0, // EK: K equi potential in mV
- 0.0267, // gl: leak conductance in 1/(mOhms * cm^2)
- -63.56, // El: leak equi potential in mV
- 0.143, // Cmem: membr. specific capacitance, muF/cm^2
- 0.00572, // gKl: potassium leakage conductivity
- -95.0, // EKl: potassium leakage equi pot in mV
+ 7.15, // gNa: Na conductance in 1/(mOhms * cm^2)
+ 50.0, // ENa: Na equi potential in mV
+ 1.43, // gK: K conductance in 1/(mOhms * cm^2)
+ -95.0, // EK: K equi potential in mV
+ 0.0267, // gl: leak conductance in 1/(mOhms * cm^2)
+ -63.56, // El: leak equi potential in mV
+ 0.143, // Cmem: membr. specific capacitance, muF/cm^2
+ 0.00572, // gKl: potassium leakage conductivity
+ -95.0, // EKl: potassium leakage equi pot in mV
- 0.32, 52., 4.,
- 0.28, 25., 5.,
- 0.128, 48., 18.,
- 4.0, 25., 5.,
- 0.032, 50., 5.,
- 0.5, 55., 40.,
+ 0.32, 52., 4.,
+ 0.28, 25., 5.,
+ 0.128, 48., 18.,
+ 4.0, 25., 5.,
+ 0.032, 50., 5.,
+ 0.5, 55., 40.,
-// 65.0, // V0: ~ total equi potential (?)
+// 65.0, // V0: ~ total equi potential (?)
- 0., // Idc: constant, externally applied current
+ 0., // Idc: constant, externally applied current
};
const double cnrun::__CN_Vars_NeuronHH2_d[] = {
// as in a single-neuron run
- -66.56, // 0 - membrane potential E
- 0.0217, // 1 - prob. for Na channel activation m
- 0.993, // 2 - prob. for not Na channel blocking h
- 0.051, // 3 - prob. for K channel activation n
+ -66.56, // 0 - membrane potential E
+ 0.0217, // 1 - prob. for Na channel activation m
+ 0.993, // 2 - prob. for not Na channel blocking h
+ 0.051, // 3 - prob. for K channel activation n
// previously thought to be resting state values
-// -60.0, // 0 - membrane potential E
-// 0.0529324, // 1 - prob. for Na channel activation m
-// 0.3176767, // 2 - prob. for not Na channel blocking h
-// 0.5961207, // 3 - prob. for K channel activation n
+// -60.0, // 0 - membrane potential E
+// 0.0529324, // 1 - prob. for Na channel activation m
+// 0.3176767, // 2 - prob. for not Na channel blocking h
+// 0.5961207, // 3 - prob. for K channel activation n
};
@@ -319,56 +319,56 @@ void
cnrun::CNeuronHH2_d::
derivative( vector<double>& x, vector<double>& dx)
{
- enum TParametersNeuronHH2 {
- gNa, ENa, gK, EK, gl, El, Cmem,
- gKl, EKl, //V0,
- alpha_m_a, alpha_m_b, alpha_m_c,
- beta_m_a, beta_m_b, beta_m_c,
- alpha_h_a, alpha_h_b, alpha_h_c,
- beta_h_a, beta_h_b, beta_h_c,
- alpha_n_a, alpha_n_b, alpha_n_c,
- beta_n_a, beta_n_b, beta_n_c,
- Idc,
- };
+ enum TParametersNeuronHH2 {
+ gNa, ENa, gK, EK, gl, El, Cmem,
+ gKl, EKl, //V0,
+ alpha_m_a, alpha_m_b, alpha_m_c,
+ beta_m_a, beta_m_b, beta_m_c,
+ alpha_h_a, alpha_h_b, alpha_h_c,
+ beta_h_a, beta_h_b, beta_h_c,
+ alpha_n_a, alpha_n_b, alpha_n_c,
+ beta_n_a, beta_n_b, beta_n_c,
+ Idc,
+ };
// differential eqn for E, the membrane potential
- dE(dx) = (
- P[gNa] * gsl_pow_3(m(x)) * h(x) * (P[ENa] - E(x))
- + P[gK] * gsl_pow_4(n(x)) * (P[EK] - E(x))
- + P[gl] * (P[El] - E(x))
- + P[gKl] * (P[EKl] - E(x)) + (Isyn(x) + P[Idc])
- ) / P[Cmem];
-
- double _a, _b, K;
+ dE(dx) = (
+ P[gNa] * gsl_pow_3(m(x)) * h(x) * (P[ENa] - E(x))
+ + P[gK] * gsl_pow_4(n(x)) * (P[EK] - E(x))
+ + P[gl] * (P[El] - E(x))
+ + P[gKl] * (P[EKl] - E(x)) + (Isyn(x) + P[Idc])
+ ) / P[Cmem];
+
+ double _a, _b, K;
// diferential eqn for m, the probability for one Na channel activation
// particle
- K = -P[alpha_m_b] - E(x),
- _a = P[alpha_m_a] * K / expm1( K / P[alpha_m_c]);
-// _a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
- K = P[beta_m_b] + E(x),
- _b = P[beta_m_a] * K / expm1( K / P[beta_m_c]);
-// _b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
- dm(dx) = _a * (1 - m(x)) - _b * m(x);
+ K = -P[alpha_m_b] - E(x),
+ _a = P[alpha_m_a] * K / expm1( K / P[alpha_m_c]);
+// _a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
+ K = P[beta_m_b] + E(x),
+ _b = P[beta_m_a] * K / expm1( K / P[beta_m_c]);
+// _b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
+ dm(dx) = _a * (1 - m(x)) - _b * m(x);
// differential eqn for h, the probability for the Na channel blocking
// particle to be absent
- K = -P[alpha_h_b] - E(x),
- _a = P[alpha_h_a] * exp( K / P[alpha_h_c]);
-// _a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
- K = -P[beta_h_b] - E(x),
- _b = P[beta_h_a] / (exp( K / P[beta_h_c]) + 1);
-// _b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
- dh(dx) = _a * (1 - h(x)) - _b * h(x);
+ K = -P[alpha_h_b] - E(x),
+ _a = P[alpha_h_a] * exp( K / P[alpha_h_c]);
+// _a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
+ K = -P[beta_h_b] - E(x),
+ _b = P[beta_h_a] / (exp( K / P[beta_h_c]) + 1);
+// _b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
+ dh(dx) = _a * (1 - h(x)) - _b * h(x);
// differential eqn for n, the probability for one K channel activation
// particle
- K = -P[alpha_n_b] - E(x),
- _a = P[alpha_n_a] * K / expm1( K / P[alpha_n_c]);
-// _a = 0.032 * (15.0 - E(x) - P[V0]) / (exp( (15.0 - E(x) - P[V0]) / 5.0) - 1.0);
- K = -P[beta_n_b] - E(x),
- _b = P[beta_n_a] * exp( K / P[beta_n_c]);
-// _b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
- dn(dx)= _a * (1 - n(x)) -_b * n(x);
+ K = -P[alpha_n_b] - E(x),
+ _a = P[alpha_n_a] * K / expm1( K / P[alpha_n_c]);
+// _a = 0.032 * (15.0 - E(x) - P[V0]) / (exp( (15.0 - E(x) - P[V0]) / 5.0) - 1.0);
+ K = -P[beta_n_b] - E(x),
+ _b = P[beta_n_a] * exp( K / P[beta_n_c]);
+// _b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
+ dn(dx)= _a * (1 - n(x)) -_b * n(x);
}
@@ -382,77 +382,77 @@ derivative( vector<double>& x, vector<double>& dx)
const char* const cnrun::__CN_ParamNames_NeuronEC_d[] = {
- "Na conductance, " __CN_PU_CONDUCTANCE,
- "Na equi potential, " __CN_PU_POTENTIAL,
- "K conductance, " __CN_PU_CONDUCTANCE,
- "K equi potential, " __CN_PU_POTENTIAL,
- "Leak conductance, " __CN_PU_CONDUCTANCE,
- "Leak equi potential, " __CN_PU_POTENTIAL,
- "Membrane capacity density, " __CN_PU_CAPACITY_DENSITY,
- "Externally applied DC, " __CN_PU_CURRENT,
- "K leakage conductance, " __CN_PU_CONDUCTANCE,
- "K leakage equi potential, " __CN_PU_POTENTIAL,
- "Total equi potential, " __CN_PU_POTENTIAL,
- "gh1",
- "gh2",
- "Vh, " __CN_PU_POTENTIAL
+ "Na conductance, " __CN_PU_CONDUCTANCE,
+ "Na equi potential, " __CN_PU_POTENTIAL,
+ "K conductance, " __CN_PU_CONDUCTANCE,
+ "K equi potential, " __CN_PU_POTENTIAL,
+ "Leak conductance, " __CN_PU_CONDUCTANCE,
+ "Leak equi potential, " __CN_PU_POTENTIAL,
+ "Membrane capacity density, " __CN_PU_CAPACITY_DENSITY,
+ "Externally applied DC, " __CN_PU_CURRENT,
+ "K leakage conductance, " __CN_PU_CONDUCTANCE,
+ "K leakage equi potential, " __CN_PU_POTENTIAL,
+ "Total equi potential, " __CN_PU_POTENTIAL,
+ "gh1",
+ "gh2",
+ "Vh, " __CN_PU_POTENTIAL
};
const char* const cnrun::__CN_ParamSyms_NeuronEC_d[] = {
- "gNa",
- "ENa",
- "gK",
- "EK",
- "gl",
- "El",
- "Cmem",
- "Idc",
- "gKl",
- "EKl",
- "V0",
- "gh1",
- "gh2",
- "Vh"
+ "gNa",
+ "ENa",
+ "gK",
+ "EK",
+ "gl",
+ "El",
+ "Cmem",
+ "Idc",
+ "gKl",
+ "EKl",
+ "V0",
+ "gh1",
+ "gh2",
+ "Vh"
};
const double cnrun::__CN_Params_NeuronEC_d[] = {
- 7.15, // 0 - gNa: Na conductance in 1/(mOhms * cm^2)
- 50.0, // 1 - ENa: Na equi potential in mV
- 1.43, // 2 - gK: K conductance in 1/(mOhms * cm^2)
- -95.0, // 3 - EK: K equi potential in mV
- 0.021, // 4 - gl: leak conductance in 1/(mOhms * cm^2)
- -55.0, // 5 - El: leak equi potential in mV
- 0.286, // 6 - Cmem: membr. capacity density in muF/cm^2 // 0.143
- 0., // 7 - Externally applied constant current
- 0.035, // 8 - gKl: potassium leakage conductivity
- -95.0, // 9 - EKl: potassium leakage equi pot in mV
- 65.0, // 10 - V0: ~ total equi potential (?)
- 0.0185, // 11 - gh1 // 1.85
- 0.01, // 12 - gh2
- -20.0, // 13 - Vh
+ 7.15, // 0 - gNa: Na conductance in 1/(mOhms * cm^2)
+ 50.0, // 1 - ENa: Na equi potential in mV
+ 1.43, // 2 - gK: K conductance in 1/(mOhms * cm^2)
+ -95.0, // 3 - EK: K equi potential in mV
+ 0.021, // 4 - gl: leak conductance in 1/(mOhms * cm^2)
+ -55.0, // 5 - El: leak equi potential in mV
+ 0.286, // 6 - Cmem: membr. capacity density in muF/cm^2 // 0.143
+ 0., // 7 - Externally applied constant current
+ 0.035, // 8 - gKl: potassium leakage conductivity
+ -95.0, // 9 - EKl: potassium leakage equi pot in mV
+ 65.0, // 10 - V0: ~ total equi potential (?)
+ 0.0185, // 11 - gh1 // 1.85
+ 0.01, // 12 - gh2
+ -20.0, // 13 - Vh
};
const char* const cnrun::__CN_VarNames_NeuronEC_d[] = {
- "Membrane potential",
- "Prob. of Na channel activation",
- "Prob. of not Na channel blocking",
- "Prob. of K channel activation",
- "Ih1 activation",
- "Ih2 activation"
+ "Membrane potential",
+ "Prob. of Na channel activation",
+ "Prob. of not Na channel blocking",
+ "Prob. of K channel activation",
+ "Ih1 activation",
+ "Ih2 activation"
};
const char* const cnrun::__CN_VarSyms_NeuronEC_d[] = {
- "E",
- ".m",
- ".h",
- ".n",
- ".Ih1",
- ".Ih2"
+ "E",
+ ".m",
+ ".h",
+ ".n",
+ ".Ih1",
+ ".Ih2"
};
const double cnrun::__CN_Vars_NeuronEC_d[] = {
- -64.1251, // 0 - membrane potential E
- 0.0176331, // 1 - prob. for Na channel activation m
- 0.994931, // 2 - prob. for not Na channel blocking h
- 0.0433969, // 3 - prob. for K channel activation n
- 0.443961, // 4 - Ih1 activation
- 0.625308 // 5 - Ih2 activation
+ -64.1251, // 0 - membrane potential E
+ 0.0176331, // 1 - prob. for Na channel activation m
+ 0.994931, // 2 - prob. for not Na channel blocking h
+ 0.0433969, // 3 - prob. for K channel activation n
+ 0.443961, // 4 - Ih1 activation
+ 0.625308 // 5 - Ih2 activation
};
@@ -464,44 +464,44 @@ void
cnrun::CNeuronEC_d::
derivative( vector<double>& x, vector<double>& dx)
{
- enum TParametersNeuronEC {
- gNa, ENa, gK, EK, gl, El, Cmem, Idc,
- gKl, EKl, V0,
- gh1, gh2,
- Vh
- };
-
- double _a, _b;
+ enum TParametersNeuronEC {
+ gNa, ENa, gK, EK, gl, El, Cmem, Idc,
+ gKl, EKl, V0,
+ gh1, gh2,
+ Vh
+ };
+
+ double _a, _b;
// differential eqn for E, the membrane potential
- dE(dx) = -(gsl_pow_3( m(x)) * h(x) * P[gNa] * (E(x) - P[ENa]) +
- gsl_pow_4( n(x)) * P[gK] * (E(x) - P[EK]) +
- (Ih1(x) * P[gh1] + Ih2(x) * P[gh2]) * (E(x) - P[Vh])+
- P[gl] * (E(x) - P[El]) + P[gKl] * (E(x) - P[EKl]) - Isyn(x)) / P[Cmem];
+ dE(dx) = -(gsl_pow_3( m(x)) * h(x) * P[gNa] * (E(x) - P[ENa]) +
+ gsl_pow_4( n(x)) * P[gK] * (E(x) - P[EK]) +
+ (Ih1(x) * P[gh1] + Ih2(x) * P[gh2]) * (E(x) - P[Vh])+
+ P[gl] * (E(x) - P[El]) + P[gKl] * (E(x) - P[EKl]) - Isyn(x)) / P[Cmem];
// diferential eqn for m, the probability for one Na channel activation particle
- _a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
- _b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
- dm(dx) = _a * (1.0 - m(x)) - _b * m(x);
+ _a = 0.32 * (13.0 - E(x) - P[V0]) / expm1( (13.0 - E(x) - P[V0]) / 4.0);
+ _b = 0.28 * (E(x) + P[V0] - 40.0) / expm1( (E(x) + P[V0] - 40.0) / 5.0);
+ dm(dx) = _a * (1.0 - m(x)) - _b * m(x);
// differential eqn for h, the probability for the Na channel blocking particle to be absent
- _a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
- _b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
- dh(dx) = _a * (1.0 - h(x)) - _b * h(x);
+ _a = 0.128 * exp( (17.0 - E(x) - P[V0]) / 18.0);
+ _b = 4.0 / (exp( (40.0 - E(x) - P[V0]) / 5.0) + 1.0);
+ dh(dx) = _a * (1.0 - h(x)) - _b * h(x);
// differential eqn for n, the probability for one K channel activation particle
- _a = 0.032 * (15.0 - E(x) - P[V0]) / expm1( (15.0 - E(x) - P[V0]) / 5.0);
- _b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
- dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
+ _a = 0.032 * (15.0 - E(x) - P[V0]) / expm1( (15.0 - E(x) - P[V0]) / 5.0);
+ _b = 0.5 * exp( (10.0 - E(x) - P[V0]) / 40.0);
+ dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
// differential equation for the Ih1 activation variable
- _a = _xfunc (-2.89e-3, -0.445, 24.02, E(x));
- _b = _xfunc ( 2.71e-2, -1.024, -17.40, E(x));
- dIh1(dx) = _a * (1.0 - Ih1(x)) - _b * Ih1(x);
+ _a = _xfunc (-2.89e-3, -0.445, 24.02, E(x));
+ _b = _xfunc ( 2.71e-2, -1.024, -17.40, E(x));
+ dIh1(dx) = _a * (1.0 - Ih1(x)) - _b * Ih1(x);
// differential equation for the Ih2 activation variable
- _a = _xfunc (-3.18e-3, -0.695, 26.72, E(x));
- _b = _xfunc ( 2.16e-2, -1.065, -14.25, E(x));
- dIh2(dx) = _a * (1.0 - Ih2(x)) - _b * Ih2(x);
+ _a = _xfunc (-3.18e-3, -0.695, 26.72, E(x));
+ _b = _xfunc ( 2.16e-2, -1.065, -14.25, E(x));
+ dIh2(dx) = _a * (1.0 - Ih2(x)) - _b * Ih2(x);
}
#undef _xfunc
@@ -518,71 +518,71 @@ derivative( vector<double>& x, vector<double>& dx)
const char* const cnrun::__CN_ParamNames_NeuronECA_d[] = {
- "Na conductance, " __CN_PU_CONDUCTANCE,
- "Na equi potential, " __CN_PU_POTENTIAL,
- "K conductance, " __CN_PU_CONDUCTANCE,
- "K equi potential, " __CN_PU_POTENTIAL,
- "Leak conductance, " __CN_PU_CONDUCTANCE,
- "Leak equi potential, " __CN_PU_POTENTIAL,
- "Membrane capacity density, " __CN_PU_CAPACITY_DENSITY,
- "Externally applied DC, " __CN_PU_CURRENT,
- "gNap",
- "gh",
- "Vh",
+ "Na conductance, " __CN_PU_CONDUCTANCE,
+ "Na equi potential, " __CN_PU_POTENTIAL,
+ "K conductance, " __CN_PU_CONDUCTANCE,
+ "K equi potential, " __CN_PU_POTENTIAL,
+ "Leak conductance, " __CN_PU_CONDUCTANCE,
+ "Leak equi potential, " __CN_PU_POTENTIAL,
+ "Membrane capacity density, " __CN_PU_CAPACITY_DENSITY,
+ "Externally applied DC, " __CN_PU_CURRENT,
+ "gNap",
+ "gh",
+ "Vh",
};
const char* const cnrun::__CN_ParamSyms_NeuronECA_d[] = {
- "gNa",
- "ENa",
- "gK",
- "EK",
- "gl",
- "El",
- "Cmem",
- "Idc",
- "gNap",
- "gh",
- "Vh",
+ "gNa",
+ "ENa",
+ "gK",
+ "EK",
+ "gl",
+ "El",
+ "Cmem",
+ "Idc",
+ "gNap",
+ "gh",
+ "Vh",
};
const double cnrun::__CN_Params_NeuronECA_d[] = {
- 52.0, // 0 - Na conductance in 1/(mOhms * cm^2)
- 55.0, // 1 - Na equi potential in mV
- 11.0, // 2 - K conductance in 1/(mOhms * cm^2)
- -90.0, // 3 - K equi potential in mV
- 0.5, // 4 - Leak conductance in 1/(mOhms * cm^2)
- -65.0, // 5 - Leak equi potential in mV
- 1.5, // 6 - Membr. capacity density in muF/cm^2
- 0., // 7 - Externally applied constant current
- 0.5, // 8 - gNap
- 1.5, // 9 - gh
- -20.0, // 10 - Vh
+ 52.0, // 0 - Na conductance in 1/(mOhms * cm^2)
+ 55.0, // 1 - Na equi potential in mV
+ 11.0, // 2 - K conductance in 1/(mOhms * cm^2)
+ -90.0, // 3 - K equi potential in mV
+ 0.5, // 4 - Leak conductance in 1/(mOhms * cm^2)
+ -65.0, // 5 - Leak equi potential in mV
+ 1.5, // 6 - Membr. capacity density in muF/cm^2
+ 0., // 7 - Externally applied constant current
+ 0.5, // 8 - gNap
+ 1.5, // 9 - gh
+ -20.0, // 10 - Vh
};
const char* const cnrun::__CN_VarNames_NeuronECA_d[] = {
- "Membrane potential",
- "Prob. of Na channel activation",
- "Prob. of Na channel blocking",
- "Prob. of K channel activation",
- "mNap",
- "Ih1 activation",
- "Ih2 activation"
+ "Membrane potential",
+ "Prob. of Na channel activation",
+ "Prob. of Na channel blocking",
+ "Prob. of K channel activation",
+ "mNap",
+ "Ih1 activation",
+ "Ih2 activation"
};
const char* const cnrun::__CN_VarSyms_NeuronECA_d[] = {
- "E",
- ".m",
- ".h",
- ".n",
- ".mNap",
- ".Ih1",
- ".Ih2"
+ "E",
+ ".m",
+ ".h",
+ ".n",
+ ".mNap",
+ ".Ih1",
+ ".Ih2"
};
const double cnrun::__CN_Vars_NeuronECA_d[] = {
- -53.77902178, // E
- 0.0262406368, // prob. for Na channel activation m
- 0.9461831106, // prob. for not Na channel blocking h
- 0.1135915933, // prob. for K channel activation n
- 0.08109646237, // Nap
- 0.06918464221, // Ih1 activation
- 0.09815937825 // Ih2 activation
+ -53.77902178, // E
+ 0.0262406368, // prob. for Na channel activation m
+ 0.9461831106, // prob. for not Na channel blocking h
+ 0.1135915933, // prob. for K channel activation n
+ 0.08109646237, // Nap
+ 0.06918464221, // Ih1 activation
+ 0.09815937825 // Ih2 activation
};
@@ -591,47 +591,47 @@ void
cnrun::CNeuronECA_d::
derivative( vector<double>& x, vector<double>& dx)
{
- enum TParametersNeuronECA { // lacks SParametersNeuronEC's gKl and EKl, so derives directly from HH
- gNa, ENa, gK, EK, gl, El, Cmem, Idc,
- gNap, gh,
- Vh
- };
+ enum TParametersNeuronECA { // lacks SParametersNeuronEC's gKl and EKl, so derives directly from HH
+ gNa, ENa, gK, EK, gl, El, Cmem, Idc,
+ gNap, gh,
+ Vh
+ };
// differential eqn for E, the membrane potential
- dE(dx) = -((gsl_pow_3( m(x)) * h(x) * P[gNa] + P[gNap] * mNap(x)) * (E(x) - P[ENa]) +
- gsl_pow_4( n(x)) * P[gK] * (E(x) - P[EK]) +
- P[gh] * (Ih1(x) * 0.65 + Ih2(x) * 0.35) * (E(x) - P[Vh]) +
- P[gl] * (E(x) - P[El]) - (Isyn(x) + P[Idc]) + 2.85) / P[Cmem];
+ dE(dx) = -((gsl_pow_3( m(x)) * h(x) * P[gNa] + P[gNap] * mNap(x)) * (E(x) - P[ENa]) +
+ gsl_pow_4( n(x)) * P[gK] * (E(x) - P[EK]) +
+ P[gh] * (Ih1(x) * 0.65 + Ih2(x) * 0.35) * (E(x) - P[Vh]) +
+ P[gl] * (E(x) - P[El]) - (Isyn(x) + P[Idc]) + 2.85) / P[Cmem];
- double _a, _b;
+ double _a, _b;
// diferential eqn for m, the probability for one Na channel activation particle
- _a = -0.1 * (E(x) + 23) / expm1( -0.1 * (E(x) + 23));
- _b = 4. * exp( -(E(x) + 48) / 18);
- dm(dx) = _a * (1. - m(x)) - _b * m(x);
+ _a = -0.1 * (E(x) + 23) / expm1( -0.1 * (E(x) + 23));
+ _b = 4. * exp( -(E(x) + 48) / 18);
+ dm(dx) = _a * (1. - m(x)) - _b * m(x);
// differential eqn for h, the probability for the Na channel blocking particle to be absent
- _a = 0.07 * exp( -(E(x) + 37.0) / 20.0);
- _b = 1. / (exp( -0.1 * (E(x) + 7.)) + 1.0);
- dh(dx) = _a * (1.0 - h(x)) - _b * h(x);
+ _a = 0.07 * exp( -(E(x) + 37.0) / 20.0);
+ _b = 1. / (exp( -0.1 * (E(x) + 7.)) + 1.0);
+ dh(dx) = _a * (1.0 - h(x)) - _b * h(x);
// differential eqn for n, the probability for one K channel activation particle
- _a = -0.01 * (E(x) + 27) / expm1( -0.1 * (E(x) + 27));
- _b = 0.125 * exp( -(E(x) + 37) / 80);
- dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
+ _a = -0.01 * (E(x) + 27) / expm1( -0.1 * (E(x) + 27));
+ _b = 0.125 * exp( -(E(x) + 37) / 80);
+ dn(dx) = _a * (1.0 - n(x)) - _b * n(x);
- _a = 1. / (0.15 * (1 + exp( -(E(x) + 38) / 6.5)));
- _b = exp( -(E(x) + 38) / 6.5) / (0.15 * (1 + exp( -(E(x) + 38) / 6.5)));
- dmNap(dx) = _a * (1.0 - mNap(x)) - _b * mNap(x);
+ _a = 1. / (0.15 * (1 + exp( -(E(x) + 38) / 6.5)));
+ _b = exp( -(E(x) + 38) / 6.5) / (0.15 * (1 + exp( -(E(x) + 38) / 6.5)));
+ dmNap(dx) = _a * (1.0 - mNap(x)) - _b * mNap(x);
// differential equation for the Ihf activation variable
- _a = 1. / (1 + exp( (E(x) + 79.2) / 9.78));
- _b = 0.51 / (exp( (E(x) - 1.7) / 10) + exp( -(E(x) + 340) / 52)) + 1;
- dIh1(dx) = (_a - Ih1(x)) / _b;
+ _a = 1. / (1 + exp( (E(x) + 79.2) / 9.78));
+ _b = 0.51 / (exp( (E(x) - 1.7) / 10) + exp( -(E(x) + 340) / 52)) + 1;
+ dIh1(dx) = (_a - Ih1(x)) / _b;
// differential equation for the Ihs activation variable
- _a = 1. / (1 + exp( (E(x) + 71.3) / 7.9));
- _b = 5.6 / (exp( (E(x) - 1.7) / 14) + exp( -(E(x) + 260) / 43)) + 1;
- dIh2(dx) = (_a - Ih2(x)) / _b;
+ _a = 1. / (1 + exp( (E(x) + 71.3) / 7.9));
+ _b = 5.6 / (exp( (E(x) - 1.7) / 14) + exp( -(E(x) + 260) / 43)) + 1;
+ dIh2(dx) = (_a - Ih2(x)) / _b;
}
@@ -640,38 +640,38 @@ derivative( vector<double>& x, vector<double>& dx)
// =========== oscillators
const char* const cnrun::__CN_ParamNames_OscillatorColpitts[] = {
- "a",
- "g",
- "q",
- "\316\267"
+ "a",
+ "g",
+ "q",
+ "\316\267"
};
const char* const cnrun::__CN_ParamSyms_OscillatorColpitts[] = {
- "a",
- "g",
- "q",
- "eta"
+ "a",
+ "g",
+ "q",
+ "eta"
};
const double cnrun::__CN_Params_OscillatorColpitts[] = {
- 1.0, // a
- 0.0797, // g
- 0.6898, // q
- 6.2723 // eta
+ 1.0, // a
+ 0.0797, // g
+ 0.6898, // q
+ 6.2723 // eta
};
const char* const cnrun::__CN_VarNames_OscillatorColpitts[] = {
- "x0",
- "x1",
- "x2"
+ "x0",
+ "x1",
+ "x2"
};
const char* const cnrun::__CN_VarSyms_OscillatorColpitts[] = {
- "x0",
- "x1",
- "x2"
+ "x0",
+ "x1",
+ "x2"
};
const double cnrun::__CN_Vars_OscillatorColpitts[] = {
- 0.02,
- 0.69,
+ 0.02,
+ 0.69,
-0.53
};
@@ -680,17 +680,17 @@ void
cnrun::COscillatorColpitts::
derivative( vector<double>& x, vector<double>& dx)
{
- enum TParametersOscilColpitts {
- a, g, q,
- eta
- };
-
- dx0(dx) = P[a] * x1(x) + Isyn(x);
- dx1(dx) = -P[g] * (x0(x) + x2(x)) - P[q] * x1(x);
- dx2(dx) = P[eta] * (x1(x) + 1.0 - exp( -x0(x)));
-// dx[idx ] = p[0] * x[idx+1] + Isyn;
-// dx[idx+1] = -p[1] * (x[idx ] + x[idx+2]) - p[2] * x[idx+1];
-// dx[idx+2] = p[3] * (x[idx+1] + 1.0 - exp(-x[idx]));
+ enum TParametersOscilColpitts {
+ a, g, q,
+ eta
+ };
+
+ dx0(dx) = P[a] * x1(x) + Isyn(x);
+ dx1(dx) = -P[g] * (x0(x) + x2(x)) - P[q] * x1(x);
+ dx2(dx) = P[eta] * (x1(x) + 1.0 - exp( -x0(x)));
+// dx[idx ] = p[0] * x[idx+1] + Isyn;
+// dx[idx+1] = -p[1] * (x[idx ] + x[idx+2]) - p[2] * x[idx+1];
+// dx[idx+2] = p[3] * (x[idx+1] + 1.0 - exp(-x[idx]));
}
@@ -701,27 +701,27 @@ derivative( vector<double>& x, vector<double>& dx)
/*
const char* const __CN_ParamNames_OscillatorLV[] = {
- "Self inhibition",
+ "Self inhibition",
};
const char* const __CN_ParamSyms_OscillatorLV[] = {
- "rho_ii",
+ "rho_ii",
};
const double __CN_Params_OscillatorLV[] = {
- 1.0, // 0 - rho_ii: "self inhibition"
+ 1.0, // 0 - rho_ii: "self inhibition"
};
const char* const __CN_VarNames_OscillatorLV[] = {
- "Membrane potential, " __CN_PU_POTENTIAL,
- "Firing rate"
+ "Membrane potential, " __CN_PU_POTENTIAL,
+ "Firing rate"
};
const char* const __CN_VarSyms_OscillatorLV[] = {
- "E",
- "fr"
+ "E",
+ "fr"
};
const double __CN_Vars_OscillatorLV[] = {
- 0., // 0 - added a place for E
- 0.1 // 1 - firing rate
+ 0., // 0 - added a place for E
+ 0.1 // 1 - firing rate
};
@@ -734,32 +734,32 @@ const double __CN_Vars_OscillatorLV[] = {
const char* const cnrun::__CN_ParamNames_OscillatorVdPol[] = {
- "\316\267",
- "\317\211\302\262",
-// "\317\203"
+ "\316\267",
+ "\317\211\302\262",
+// "\317\203"
};
const char* const cnrun::__CN_ParamSyms_OscillatorVdPol[] = {
- "eta",
- "omegasq", // omega^2
-// "sigma"
+ "eta",
+ "omegasq", // omega^2
+// "sigma"
};
const double cnrun::__CN_Params_OscillatorVdPol[] = {
- 1.0, // eta
- 0.1, // omega^2
-// 0.0 // noise level
+ 1.0, // eta
+ 0.1, // omega^2
+// 0.0 // noise level
};
const char* const cnrun::__CN_VarNames_OscillatorVdPol[] = {
- "Amplitude",
- "v"
+ "Amplitude",
+ "v"
};
const char* const cnrun::__CN_VarSyms_OscillatorVdPol[] = {
- "A",
- "v"
+ "A",
+ "v"
};
const double cnrun::__CN_Vars_OscillatorVdPol[] = {
- 0.1, // amplitude
- 0.0 // internal var
+ 0.1, // amplitude
+ 0.0 // internal var
};
@@ -767,5 +767,9 @@ const double cnrun::__CN_Vars_OscillatorVdPol[] = {
//#endif // CN_WANT_MORE_NEURONS
-
-// eof
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/hosted-neurons.hh b/upstream/src/libcn/hosted-neurons.hh
index 18c2398..f71955c 100644
--- a/upstream/src/libcn/hosted-neurons.hh
+++ b/upstream/src/libcn/hosted-neurons.hh
@@ -10,11 +10,12 @@
-#ifndef LIBCN_HOSTED_NEURONS_H
-#define LIBCN_HOSTED_NEURONS_H
+#ifndef CNRUN_LIBCN_HOSTEDNEURONS_H_
+#define CNRUN_LIBCN_HOSTEDNEURONS_H_
#include "gsl/gsl_math.h"
+#include "forward-decls.hh"
#include "base-neuron.hh"
#include "hosted-attr.hh"
@@ -24,23 +25,22 @@
namespace cnrun {
-class CModel;
+enum class TIncludeOption { is_last, is_notlast, };
class C_HostedNeuron
: public C_BaseNeuron, public C_HostedAttributes {
- private:
- C_HostedNeuron();
+ DELETE_DEFAULT_METHODS (C_HostedNeuron)
protected:
- C_HostedNeuron (TUnitType intype, const char *inlabel,
- double x, double y, double z,
- CModel*, int s_mask,
- bool do_allocations_immediately);
+ C_HostedNeuron (TUnitType intype, const string& inlabel,
+ double x, double y, double z,
+ CModel*, int s_mask,
+ TIncludeOption include_option);
public:
- void reset_vars();
- double &var_value( size_t);
- const double &get_var_value( size_t) const;
+ void reset_vars();
+ double &var_value( size_t);
+ const double &get_var_value( size_t) const;
};
@@ -50,24 +50,24 @@ class C_HostedNeuron
class C_HostedConductanceBasedNeuron
: public C_HostedNeuron {
- private:
- C_HostedConductanceBasedNeuron();
+ DELETE_DEFAULT_METHODS (C_HostedConductanceBasedNeuron)
+
protected:
- C_HostedConductanceBasedNeuron (TUnitType intype, const char *inlabel,
- double inx, double iny, double inz,
- CModel* inM, int s_mask,
- bool do_allocations_immediately)
- : C_HostedNeuron (intype, inlabel, inx, iny, inz, inM, s_mask, do_allocations_immediately)
- {}
- public:
+ C_HostedConductanceBasedNeuron (TUnitType intype, const string& inlabel,
+ double inx, double iny, double inz,
+ CModel* inM, int s_mask,
+ TIncludeOption include_option)
+ : C_HostedNeuron (intype, inlabel, inx, iny, inz, inM, s_mask, include_option)
+ {}
- double E() const; // needs access to parent model var vector, defined in model.h
- double E( vector<double> &b) const { return b[idx+0]; }
- double& dE( vector<double> &b) { return b[idx+0]; }
+ public:
+ double E() const; // needs access to parent model var vector, defined in model.h
+ double E( vector<double> &b) const { return b[idx+0]; }
+ double& dE( vector<double> &b) { return b[idx+0]; }
- unsigned n_spikes_in_last_dt() const;
+ unsigned n_spikes_in_last_dt() const;
- void do_detect_spike_or_whatever();
+ void do_detect_spike_or_whatever();
};
@@ -78,19 +78,18 @@ class C_HostedConductanceBasedNeuron
class C_HostedRateBasedNeuron
: public C_HostedNeuron {
- private:
- C_HostedRateBasedNeuron();
-// {}
+ DELETE_DEFAULT_METHODS (C_HostedRateBasedNeuron)
+
protected:
- C_HostedRateBasedNeuron (TUnitType intype, const char *inlabel,
- double inx, double iny, double inz,
- CModel* inM, int s_mask,
- bool do_allocations_immediately)
- : C_HostedNeuron (intype, inlabel, inx, iny, inz, inM, s_mask, do_allocations_immediately)
- {}
+ C_HostedRateBasedNeuron (TUnitType intype, const string& inlabel,
+ double inx, double iny, double inz,
+ CModel* inM, int s_mask,
+ TIncludeOption include_option)
+ : C_HostedNeuron (intype, inlabel, inx, iny, inz, inM, s_mask, include_option)
+ {}
public:
- unsigned n_spikes_in_last_dt() const;
+ unsigned n_spikes_in_last_dt() const;
};
@@ -107,35 +106,37 @@ class C_HostedRateBasedNeuron
class CNeuronHH_d
: public C_HostedConductanceBasedNeuron {
+ DELETE_DEFAULT_METHODS (CNeuronHH_d)
+
public:
+ CNeuronHH_d (const string& inlabel,
+ double x, double y, double z,
+ CModel *inM, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last)
+ : C_HostedConductanceBasedNeuron (NT_HH_D, inlabel, x, y, z,
+ inM, s_mask, include_option)
+ {}
+
// parameters (since gcc 4.4, accessible from within member functions defined outside class definition, gee!)
- enum {
- gNa, ENa, gK, EK, gl, El, Cmem,
- alpha_m_a, alpha_m_b, alpha_m_c, beta_m_a, beta_m_b, beta_m_c,
- alpha_h_a, alpha_h_b, alpha_h_c, beta_h_a, beta_h_b, beta_h_c,
- alpha_n_a, alpha_n_b, alpha_n_c, beta_n_a, beta_n_b, beta_n_c,
- Idc,
- };
+ enum {
+ gNa, ENa, gK, EK, gl, El, Cmem,
+ alpha_m_a, alpha_m_b, alpha_m_c, beta_m_a, beta_m_b, beta_m_c,
+ alpha_h_a, alpha_h_b, alpha_h_c, beta_h_a, beta_h_b, beta_h_c,
+ alpha_n_a, alpha_n_b, alpha_n_c, beta_n_a, beta_n_b, beta_n_c,
+ Idc,
+ };
// current state
// these wrappers mainly for code legibility in derivative(); otherwise, not used
// for reporting, CModel accesses vars as V[idx+n]
- double m( vector<double>& b) const { return b[idx+1]; }
- double h( vector<double>& b) const { return b[idx+2]; }
- double n( vector<double>& b) const { return b[idx+3]; }
- double& dm( vector<double>& b) { return b[idx+1]; }
- double& dh( vector<double>& b) { return b[idx+2]; }
- double& dn( vector<double>& b) { return b[idx+3]; }
-
- CNeuronHH_d( const char *inlabel,
- double x, double y, double z,
- CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true)
- : C_HostedConductanceBasedNeuron (NT_HH_D, inlabel, x, y, z,
- inM, s_mask, do_allocations_immediately)
- {}
-
- void derivative( vector<double>&, vector<double>&) __attribute__ ((hot));
+ double m( vector<double>& b) const { return b[idx+1]; }
+ double h( vector<double>& b) const { return b[idx+2]; }
+ double n( vector<double>& b) const { return b[idx+3]; }
+ double& dm( vector<double>& b) { return b[idx+1]; }
+ double& dh( vector<double>& b) { return b[idx+2]; }
+ double& dn( vector<double>& b) { return b[idx+3]; }
+
+ void derivative( vector<double>&, vector<double>&) __attribute__ ((hot));
};
@@ -147,23 +148,25 @@ class CNeuronHH_d
class CNeuronHH2_d
: public C_HostedConductanceBasedNeuron {
+ DELETE_DEFAULT_METHODS (CNeuronHH2_d)
+
public:
- double m( vector<double>& b) const { return b[idx+1]; }
- double h( vector<double>& b) const { return b[idx+2]; }
- double n( vector<double>& b) const { return b[idx+3]; }
- double& dm( vector<double>& b) { return b[idx+1]; }
- double& dh( vector<double>& b) { return b[idx+2]; }
- double& dn( vector<double>& b) { return b[idx+3]; }
-
- CNeuronHH2_d( const char *inlabel,
- double x, double y, double z,
- CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true)
- : C_HostedConductanceBasedNeuron( NT_HH2_D, inlabel, x, y, z,
- inM, s_mask, do_allocations_immediately)
- {}
-
- void derivative( vector<double>&, vector<double>&);
+ CNeuronHH2_d (const string& inlabel,
+ double x, double y, double z,
+ CModel *inM, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last)
+ : C_HostedConductanceBasedNeuron( NT_HH2_D, inlabel, x, y, z,
+ inM, s_mask, include_option)
+ {}
+
+ double m( vector<double>& b) const { return b[idx+1]; }
+ double h( vector<double>& b) const { return b[idx+2]; }
+ double n( vector<double>& b) const { return b[idx+3]; }
+ double& dm( vector<double>& b) { return b[idx+1]; }
+ double& dh( vector<double>& b) { return b[idx+2]; }
+ double& dn( vector<double>& b) { return b[idx+3]; }
+
+ void derivative( vector<double>&, vector<double>&);
};
@@ -175,28 +178,30 @@ class CNeuronHH2_d
class CNeuronEC_d
: public C_HostedConductanceBasedNeuron {
+ DELETE_DEFAULT_METHODS (CNeuronEC_d)
+
public:
- double m( vector<double>& b) const { return b[idx+1]; }
- double h( vector<double>& b) const { return b[idx+2]; }
- double n( vector<double>& b) const { return b[idx+3]; }
- double Ih1( vector<double>& b) const { return b[idx+4]; }
- double Ih2( vector<double>& b) const { return b[idx+5]; }
- double& dm( vector<double>& b) { return b[idx+1]; }
- double& dh( vector<double>& b) { return b[idx+2]; }
- double& dn( vector<double>& b) { return b[idx+3]; }
- double& dIh1( vector<double>& b) { return b[idx+4]; }
- double& dIh2( vector<double>& b) { return b[idx+5]; }
-
-
- CNeuronEC_d( const char *inlabel,
- double x, double y, double z,
- CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true)
- : C_HostedConductanceBasedNeuron (NT_EC_D, inlabel, x, y, z,
- inM, s_mask, do_allocations_immediately)
- {}
-
- void derivative( vector<double>&, vector<double>&);
+ CNeuronEC_d( const string& inlabel,
+ double x, double y, double z,
+ CModel *inM, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last)
+ : C_HostedConductanceBasedNeuron (NT_EC_D, inlabel, x, y, z,
+ inM, s_mask, include_option)
+ {}
+
+ double m ( vector<double>& b) const { return b[idx+1]; }
+ double h ( vector<double>& b) const { return b[idx+2]; }
+ double n ( vector<double>& b) const { return b[idx+3]; }
+ double Ih1 ( vector<double>& b) const { return b[idx+4]; }
+ double Ih2 ( vector<double>& b) const { return b[idx+5]; }
+ double& dm ( vector<double>& b) { return b[idx+1]; }
+ double& dh ( vector<double>& b) { return b[idx+2]; }
+ double& dn ( vector<double>& b) { return b[idx+3]; }
+ double& dIh1 ( vector<double>& b) { return b[idx+4]; }
+ double& dIh2 ( vector<double>& b) { return b[idx+5]; }
+
+
+ void derivative( vector<double>&, vector<double>&);
};
@@ -207,30 +212,32 @@ class CNeuronEC_d
class CNeuronECA_d
: public C_HostedConductanceBasedNeuron {
+ DELETE_DEFAULT_METHODS (CNeuronECA_d)
+
public:
- double m( vector<double>& b) const { return b[idx+1]; }
- double h( vector<double>& b) const { return b[idx+2]; }
- double n( vector<double>& b) const { return b[idx+3]; }
- double mNap( vector<double>& b) const { return b[idx+4]; }
- double Ih1( vector<double>& b) const { return b[idx+5]; }
- double Ih2( vector<double>& b) const { return b[idx+6]; }
-
- double& dm( vector<double>& b) { return b[idx+1]; }
- double& dh( vector<double>& b) { return b[idx+2]; }
- double& dn( vector<double>& b) { return b[idx+3]; }
- double& dmNap( vector<double>& b) { return b[idx+4]; }
- double& dIh1( vector<double>& b) { return b[idx+5]; }
- double& dIh2( vector<double>& b) { return b[idx+6]; }
-
- CNeuronECA_d( const char *inlabel,
- double x, double y, double z,
- CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true)
- : C_HostedConductanceBasedNeuron( NT_ECA_D, inlabel, x, y, z,
- inM, s_mask, do_allocations_immediately)
- {}
-
- void derivative( vector<double>&, vector<double>&);
+ CNeuronECA_d( const string& inlabel,
+ double x, double y, double z,
+ CModel *inM, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last)
+ : C_HostedConductanceBasedNeuron( NT_ECA_D, inlabel, x, y, z,
+ inM, s_mask, include_option)
+ {}
+
+ double m( vector<double>& b) const { return b[idx+1]; }
+ double h( vector<double>& b) const { return b[idx+2]; }
+ double n( vector<double>& b) const { return b[idx+3]; }
+ double mNap( vector<double>& b) const { return b[idx+4]; }
+ double Ih1( vector<double>& b) const { return b[idx+5]; }
+ double Ih2( vector<double>& b) const { return b[idx+6]; }
+
+ double& dm( vector<double>& b) { return b[idx+1]; }
+ double& dh( vector<double>& b) { return b[idx+2]; }
+ double& dn( vector<double>& b) { return b[idx+3]; }
+ double& dmNap( vector<double>& b) { return b[idx+4]; }
+ double& dIh1( vector<double>& b) { return b[idx+5]; }
+ double& dIh2( vector<double>& b) { return b[idx+6]; }
+
+ void derivative( vector<double>&, vector<double>&);
};
//#endif // CN_WANT_MORE_NEURONS
@@ -242,33 +249,30 @@ class CNeuronECA_d
-
-
-
-
-
//#ifdef CN_WANT_MORE_NEURONS
class COscillatorColpitts
: public C_HostedConductanceBasedNeuron {
+ DELETE_DEFAULT_METHODS (COscillatorColpitts)
+
public:
- double x0( vector<double>& b) const { return b[idx+0]; } // there's no E() for this one
- double x1( vector<double>& b) const { return b[idx+1]; }
- double x2( vector<double>& b) const { return b[idx+2]; }
- double& dx0( vector<double>& b) { return b[idx+0]; }
- double& dx1( vector<double>& b) { return b[idx+1]; }
- double& dx2( vector<double>& b) { return b[idx+2]; }
-
- COscillatorColpitts( const char *inlabel,
- double x, double y, double z,
- CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true)
- : C_HostedConductanceBasedNeuron (NT_COLPITTS, inlabel, x, y, z,
- inM, s_mask, do_allocations_immediately)
- {}
-
- virtual void derivative( vector<double>&, vector<double>&);
+ COscillatorColpitts( const string& inlabel,
+ double x, double y, double z,
+ CModel *inM, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last)
+ : C_HostedConductanceBasedNeuron (NT_COLPITTS, inlabel, x, y, z,
+ inM, s_mask, include_option)
+ {}
+
+ double x0( vector<double>& b) const { return b[idx+0]; } // there's no E() for this one
+ double x1( vector<double>& b) const { return b[idx+1]; }
+ double x2( vector<double>& b) const { return b[idx+2]; }
+ double& dx0( vector<double>& b) { return b[idx+0]; }
+ double& dx1( vector<double>& b) { return b[idx+1]; }
+ double& dx2( vector<double>& b) { return b[idx+2]; }
+
+ virtual void derivative( vector<double>&, vector<double>&);
};
@@ -284,24 +288,24 @@ class COscillatorLV
: public C_HostedConductanceBasedNeuron {
public:
- double fr( vector<double>& b) const { return b[idx+1]; }
- double& dfr( vector<double>& b) { return b[idx+1]; }
-
- COscillatorLV( const char *inlabel,
- double x, double y, double z,
- CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true)
- : C_HostedConductanceBasedNeuron( NT_LV, inlabel, x, y, z,
- inM, s_mask, do_allocations_immediately)
- {}
-
- enum TParametersOscilLV {
- rho
- };
- void derivative( vector<double>& x, vector<double>& dx)
- {
- dE(dx) = fr(x) * (1.0 - P[rho] * fr(x)) - Isyn(x);
- }
+ double fr( vector<double>& b) const { return b[idx+1]; }
+ double& dfr( vector<double>& b) { return b[idx+1]; }
+
+ COscillatorLV( const char *inlabel,
+ double x, double y, double z,
+ CModel *inM, int s_mask = 0,
+ CModel::TIncludeOption include_option = true)
+ : C_HostedConductanceBasedNeuron( NT_LV, inlabel, x, y, z,
+ inM, s_mask, include_option)
+ {}
+
+ enum TParametersOscilLV {
+ rho
+ };
+ void derivative( vector<double>& x, vector<double>& dx)
+ {
+ dE(dx) = fr(x) * (1.0 - P[rho] * fr(x)) - Isyn(x);
+ }
};
@@ -313,37 +317,41 @@ class COscillatorLV
class COscillatorVdPol
: public C_HostedConductanceBasedNeuron {
+ DELETE_DEFAULT_METHODS (COscillatorVdPol)
+
public:
- double amp( vector<double>& b) const { return b[idx+0]; }
- double _x( vector<double>& b) const { return b[idx+1]; }
- double& damp( vector<double>& b) { return b[idx+0]; }
- double& d_x( vector<double>& b) { return b[idx+1]; }
-
- COscillatorVdPol( const char *inlabel,
- double x, double y, double z,
- CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true)
- : C_HostedConductanceBasedNeuron (NT_VDPOL, inlabel, x, y, z,
- inM, s_mask, do_allocations_immediately)
- {}
-
- enum TParametersOscilVdPol {
- eta, omega2
- };
- void derivative( vector<double> &x, vector<double> &dx)
- {
- damp(dx) = _x(x);
- d_x(dx) = (P[eta] - gsl_pow_2( amp(x))) * _x(x) - P[omega2] * amp(x) + Isyn(x);
- }
+ COscillatorVdPol (const string& inlabel,
+ double x, double y, double z,
+ CModel *inM, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last)
+ : C_HostedConductanceBasedNeuron (NT_VDPOL, inlabel, x, y, z,
+ inM, s_mask, include_option)
+ {}
+
+ double amp( vector<double>& b) const { return b[idx+0]; }
+ double _x( vector<double>& b) const { return b[idx+1]; }
+ double& damp( vector<double>& b) { return b[idx+0]; }
+ double& d_x( vector<double>& b) { return b[idx+1]; }
+
+ enum TParametersOscilVdPol {
+ eta, omega2
+ };
+ void derivative( vector<double> &x, vector<double> &dx)
+ {
+ damp(dx) = _x(x);
+ d_x(dx) = (P[eta] - gsl_pow_2( amp(x))) * _x(x) - P[omega2] * amp(x) + Isyn(x);
+ }
};
-
//#endif // CN_WANT_MORE_NEURONS
-
-
}
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/hosted-synapses.cc b/upstream/src/libcn/hosted-synapses.cc
index cda2029..7d340e1 100644
--- a/upstream/src/libcn/hosted-synapses.cc
+++ b/upstream/src/libcn/hosted-synapses.cc
@@ -27,16 +27,16 @@ using namespace std;
// the base synapse here
cnrun::C_HostedSynapse::
C_HostedSynapse( TUnitType intype,
- C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel *inM, int s_mask,
- bool do_allocations_immediately)
+ C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel *inM, int s_mask,
+ TIncludeOption include_option)
: C_BaseSynapse( intype, insource, intarget, ing, inM, s_mask),
- C_HostedAttributes()
+ C_HostedAttributes()
{
- if ( M )
- M->include_unit( this, do_allocations_immediately);
- else
- idx = (unsigned long)-1;
+ if ( M )
+ M->include_unit( this, include_option);
+ else
+ idx = (unsigned long)-1;
}
@@ -44,8 +44,8 @@ C_HostedSynapse( TUnitType intype,
cnrun::C_HostedSynapse::
~C_HostedSynapse()
{
- if ( __cn_verbosely > 5 )
- fprintf( stderr, " deleting hosted synapse \"%s\"\n", _label);
+ if ( cn_verbosely > 5 )
+ fprintf( stderr, " deleting hosted synapse \"%s\"\n", _label);
}
@@ -56,84 +56,84 @@ cnrun::C_HostedSynapse::
// -- parameters
const char* const cnrun::__CN_ParamNames_SynapseAB_dd[] = {
-// "Synaptic strength g, " __CN_PU_CONDUCTANCE,
- "Reversal potential Esyn, " __CN_PU_POTENTIAL,
- "Presyn threshold potential Epre, " __CN_PU_POTENTIAL,
- "Rise rate \316\261, " __CN_PU_RATE,
- "Decay rate \316\262, " __CN_PU_RATE,
- "Time of transmitter release, " __CN_PU_TIME,
-// "Noise level \317\203",
+// "Synaptic strength g, " __CN_PU_CONDUCTANCE,
+ "Reversal potential Esyn, " __CN_PU_POTENTIAL,
+ "Presyn threshold potential Epre, " __CN_PU_POTENTIAL,
+ "Rise rate \316\261, " __CN_PU_RATE,
+ "Decay rate \316\262, " __CN_PU_RATE,
+ "Time of transmitter release, " __CN_PU_TIME,
+// "Noise level \317\203",
};
const char* const cnrun::__CN_ParamSyms_SynapseAB_dd[] = {
-// "gsyn",
- "Esyn",
- "Epre",
- "alpha",
- "beta",
- "trel",
-// "sigma",
+// "gsyn",
+ "Esyn",
+ "Epre",
+ "alpha",
+ "beta",
+ "trel",
+// "sigma",
};
const double cnrun::__CN_Params_SynapseAB_dd[] = {
-// 0.12,
- 0,
+// 0.12,
+ 0,
-20,
- 0.5,
- 0.05,
- 5.0,
-// 0.
+ 0.5,
+ 0.05,
+ 5.0,
+// 0.
};
const double cnrun::__CN_Params_SynapseABMinus_dd[] = {
-// 0.12,
- 0,
+// 0.12,
+ 0,
-20,
- 0.27785150819749,
- 0.05,
- 5.0,
-// 0.
+ 0.27785150819749,
+ 0.05,
+ 5.0,
+// 0.
};
const double cnrun::__CN_Params_SynapseMxAB_dd[] = {
-// 0.12,
- 0,
+// 0.12,
+ 0,
-20,
- 0.27785150819749, // the only parameter differing from its AB namesake,
- // which is also by principle the same as in the ABMinus variation
- 0.05,
- 5.0,
-// 0.
+ 0.27785150819749, // the only parameter differing from its AB namesake,
+ // which is also by principle the same as in the ABMinus variation
+ 0.05,
+ 5.0,
+// 0.
};
const char* const cnrun::__CN_ParamNames_SynapseAB_dr[] = {
-// "Synaptic strength g, " __CN_PU_CONDUCTANCE,
- "Assumed (target->E - Esyn), " __CN_PU_POTENTIAL,
- "Presyn threshold potential Epre, " __CN_PU_POTENTIAL,
- "Rise rate \316\261, " __CN_PU_RATE,
- "Decay rate \316\262, " __CN_PU_RATE,
- "Time of transmitter release, " __CN_PU_TIME,
-// "Noise level \317\203",
+// "Synaptic strength g, " __CN_PU_CONDUCTANCE,
+ "Assumed (target->E - Esyn), " __CN_PU_POTENTIAL,
+ "Presyn threshold potential Epre, " __CN_PU_POTENTIAL,
+ "Rise rate \316\261, " __CN_PU_RATE,
+ "Decay rate \316\262, " __CN_PU_RATE,
+ "Time of transmitter release, " __CN_PU_TIME,
+// "Noise level \317\203",
};
const char* const cnrun::__CN_ParamSyms_SynapseAB_dr[] = {
-// "gsyn",
- "Ediff",
- "Epre",
- "alpha",
- "beta",
- "trel",
-// "sigma",
+// "gsyn",
+ "Ediff",
+ "Epre",
+ "alpha",
+ "beta",
+ "trel",
+// "sigma",
};
const double cnrun::__CN_Params_SynapseMxAB_dr[] = {
-// 0.12,
+// 0.12,
-60 - 0, // Ediff: a reasonable Esyn - target->E, the latter being -60 mV at rest
-20,
- 0.27785150819749,
- 0.05,
- 5.0,
-// 0.
+ 0.27785150819749,
+ 0.05,
+ 5.0,
+// 0.
};
@@ -143,52 +143,52 @@ const double cnrun::__CN_Params_SynapseMxAB_dr[] = {
const char* const cnrun::__CN_ParamNames_SynapseAB_rr[] = {
-// "Synaptic strength g, " __CN_PU_CONDUCTANCE,
- "Assumed (target->E - Esyn), " __CN_PU_VOLTAGE,
- "Rise rate \316\261, " __CN_PU_RATE,
- "Decay rate \316\262, " __CN_PU_RATE,
- "Refractory period T, " __CN_PU_TIME,
-// "Noise level \317\203",
+// "Synaptic strength g, " __CN_PU_CONDUCTANCE,
+ "Assumed (target->E - Esyn), " __CN_PU_VOLTAGE,
+ "Rise rate \316\261, " __CN_PU_RATE,
+ "Decay rate \316\262, " __CN_PU_RATE,
+ "Refractory period T, " __CN_PU_TIME,
+// "Noise level \317\203",
};
const char* const cnrun::__CN_ParamSyms_SynapseAB_rr[] = {
-// "gsyn",
- "Ediff",
- "alpha",
- "beta",
- "T",
-// "sigma",
+// "gsyn",
+ "Ediff",
+ "alpha",
+ "beta",
+ "T",
+// "sigma",
};
const double cnrun::__CN_Params_SynapseAB_rr[] = {
-// 0.12,
+// 0.12,
-60 - 0,
- 0.27785150819749,
- 0.05,
- 5,
-// 0.
+ 0.27785150819749,
+ 0.05,
+ 5,
+// 0.
};
const char* const cnrun::__CN_ParamNames_SynapseRall_dd[] = {
-// "Synaptic strength g, " __CN_PU_CONDUCTANCE,
- "Reversal potential, " __CN_PU_POTENTIAL,
- "Presynaptic threshold potential, " __CN_PU_POTENTIAL,
- "\317\204, " __CN_PU_RATE,
-// "Noise level \317\203",
+// "Synaptic strength g, " __CN_PU_CONDUCTANCE,
+ "Reversal potential, " __CN_PU_POTENTIAL,
+ "Presynaptic threshold potential, " __CN_PU_POTENTIAL,
+ "\317\204, " __CN_PU_RATE,
+// "Noise level \317\203",
};
const char* const cnrun::__CN_ParamSyms_SynapseRall_dd[] = {
-// "gsyn",
- "Esyn",
- "Epre",
- "tau",
-// "sigma",
+// "gsyn",
+ "Esyn",
+ "Epre",
+ "tau",
+// "sigma",
};
const double cnrun::__CN_Params_SynapseRall_dd[] = {
-// 0.12,
- 0,
+// 0.12,
+ 0,
-20,
- 2,
-// 0.
+ 2,
+// 0.
};
@@ -197,27 +197,27 @@ const double cnrun::__CN_Params_SynapseRall_dd[] = {
// -- variables
const char* const cnrun::__CN_VarNames_SynapseAB[] = {
- "Amount of neurotransmitter released S"
+ "Amount of neurotransmitter released S"
};
const char* const cnrun::__CN_VarSyms_SynapseAB[] = {
- "S"
+ "S"
};
const double cnrun::__CN_Vars_SynapseAB[] = {
- 0.
+ 0.
};
const char* const cnrun::__CN_VarNames_SynapseRall[] = {
- "Amount of neurotransmitter released S",
- "Amount of neurotransmitter absorbed R",
+ "Amount of neurotransmitter released S",
+ "Amount of neurotransmitter absorbed R",
};
const char* const cnrun::__CN_VarSyms_SynapseRall[] = {
- "S",
- "R",
+ "S",
+ "R",
};
const double cnrun::__CN_Vars_SynapseRall[] = {
- 0.,
- 0.
+ 0.,
+ 0.
};
@@ -230,18 +230,18 @@ void
cnrun::CSynapseAB_dd::
derivative( vector<double>& x, vector<double>& dx)
{
- if ( x[0] - t_last_release_started <= P[_rtime_] ) {
- // continue release from an old spike
- dS(dx) = P[_alpha_] * (1 - S(x)) - P[_beta_] * S(x);
- } else
- if ( _source->E(x) > P[_Epre_] ) {
- // new spike ... start releasing
- t_last_release_started = x[0];
- dS(dx) = P[_alpha_] * (1 - S(x)) - P[_beta_] * S(x);
- } else {
- // no release
- dS(dx) = -P[_beta_] * S(x);
- }
+ if ( x[0] - t_last_release_started <= P[_rtime_] ) {
+ // continue release from an old spike
+ dS(dx) = P[_alpha_] * (1 - S(x)) - P[_beta_] * S(x);
+ } else
+ if ( _source->E(x) > P[_Epre_] ) {
+ // new spike ... start releasing
+ t_last_release_started = x[0];
+ dS(dx) = P[_alpha_] * (1 - S(x)) - P[_beta_] * S(x);
+ } else {
+ // no release
+ dS(dx) = -P[_beta_] * S(x);
+ }
}
@@ -251,18 +251,18 @@ void
cnrun::CSynapseABMinus_dd::
derivative( vector<double>& x, vector<double>& dx)
{
- if ( x[0] - t_last_release_started <= P[_rtime_] ) {
- // continue release from an old spike
- dS(dx) = P[_alpha_] * 1 - P[_beta_] * S(x);
- } else
- if ( _source->E(x) > P[_Epre_] ) {
- // new spike ... start releasing
- t_last_release_started = x[0];
- dS(dx) = P[_alpha_] * 1 - P[_beta_] * S(x);
- } else {
- // no release
- dS(dx) = -P[_beta_] * S(x);
- }
+ if ( x[0] - t_last_release_started <= P[_rtime_] ) {
+ // continue release from an old spike
+ dS(dx) = P[_alpha_] * 1 - P[_beta_] * S(x);
+ } else
+ if ( _source->E(x) > P[_Epre_] ) {
+ // new spike ... start releasing
+ t_last_release_started = x[0];
+ dS(dx) = P[_alpha_] * 1 - P[_beta_] * S(x);
+ } else {
+ // no release
+ dS(dx) = -P[_beta_] * S(x);
+ }
}
@@ -274,26 +274,26 @@ void
cnrun::CSynapseMxAB_dd::
derivative( vector<double>& x, vector<double>& dx)
{
-// printf( "%s %lu %d %g\n", _source->label, _source->serial_id, _source->idx, _source->E(x));
-
- if ( q() > 0 ) {
- unsigned effective_q = q();
- // as we nudge along a little within RK's operational
- // dt, some spikes can expire in that brief while:
- // decrement q then, just for this while
- while ( effective_q && M->model_time(x) - _kq[q()-effective_q] > P[_rtime_] )
- --effective_q;
+// printf( "%s %lu %d %g\n", _source->label, _source->serial_id, _source->idx, _source->E(x));
+
+ if ( q() > 0 ) {
+ size_t effective_q = q();
+ // as we nudge along a little within RK's operational
+ // dt, some spikes can expire in that brief while:
+ // decrement q then, just for this while
+ while ( effective_q && M->model_time(x) - _kq[q()-effective_q] > P[_rtime_] )
+ --effective_q;
#ifdef __CN_MORECODE__
- if ( effective_q < q() && M->verbosely > 6 )
- printf( "YMxAB %s smacks %u spike(s) of %u at %g(+%g)\n", label,
- (unsigned)q() - effective_q, (unsigned)q(),
- M->model_time(),
- M->model_time(x) - M->model_time());
+ if ( effective_q < q() && M->verbosely > 6 )
+ printf( "YMxAB %s smacks %zu spike(s) of %zu at %g(+%g)\n", label,
+ (size_t)q() - effective_q, (size_t)q(),
+ M->model_time(),
+ M->model_time(x) - M->model_time());
#endif
- dS(dx) = P[_alpha_] * effective_q - P[_beta_] * S(x);
- } else
- // no release, decay
- dS(dx) = -P[_beta_] * S(x);
+ dS(dx) = P[_alpha_] * effective_q - P[_beta_] * S(x);
+ } else
+ // no release, decay
+ dS(dx) = -P[_beta_] * S(x);
}
@@ -302,20 +302,20 @@ void
cnrun::CSynapseMxAB_dd::
update_queue()
{
- unsigned k = _source -> n_spikes_in_last_dt();
- while ( k-- )
- _kq.push_back( model_time());
+ size_t k = _source -> n_spikes_in_last_dt();
+ while ( k-- )
+ _kq.push_back( model_time());
// see if the oldest spike has gone past synapse release time
// disregard spike duration, measure time from saved spike_start
// (which is == spike_end)
- while ( true ) {
- if ( q() > 0 && model_time() - _kq.front() > P[_rtime_] )
- _kq.erase( _kq.begin());
- else
- break;
-// cout << "q--\n";
- }
+ while ( true ) {
+ if ( q() > 0 && model_time() - _kq.front() > P[_rtime_] )
+ _kq.erase( _kq.begin());
+ else
+ break;
+// cout << "q--\n";
+ }
}
@@ -331,10 +331,10 @@ void
cnrun::CSynapseAB_rr::
derivative( vector<double>& x, vector<double>& dx)
{
- // if ( source()->F(x) > 0 )
- // printf( "%s->F(x) = %g\n", _source->label, source()->F(x));
- dS(dx) = -P[_beta_] * S(x)
- + P[_alpha_] * _numerator / (exp( P[_beta_] / source()->F(x)) + 1);
+ // if ( source()->F(x) > 0 )
+ // printf( "%s->F(x) = %g\n", _source->label, source()->F(x));
+ dS(dx) = -P[_beta_] * S(x)
+ + P[_alpha_] * _numerator / (exp( P[_beta_] / source()->F(x)) + 1);
}
@@ -349,10 +349,13 @@ void
cnrun::CSynapseRall_dd::
derivative( vector<double>& x, vector<double>& dx)
{
- dR(dx) = 1 / P[_tau_] * (-R(x) + Heaviside( _source->E(x) - P[_Epre_]));
- dS(dx) = 1 / P[_tau_] * (-S(x) + R(x));
+ dR(dx) = 1 / P[_tau_] * (-R(x) + Heaviside( _source->E(x) - P[_Epre_]));
+ dS(dx) = 1 / P[_tau_] * (-S(x) + R(x));
}
-
-
-// eof
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/hosted-synapses.hh b/upstream/src/libcn/hosted-synapses.hh
index f709133..075d321 100644
--- a/upstream/src/libcn/hosted-synapses.hh
+++ b/upstream/src/libcn/hosted-synapses.hh
@@ -10,8 +10,8 @@
*/
-#ifndef LIBCN_HOSTED_SYNAPSES_H
-#define LIBCN_HOSTED_SYNAPSES_H
+#ifndef CNRUN_LIBCN_HOSTEDSYNAPSES_H_
+#define CNRUN_LIBCN_HOSTEDSYNAPSES_H_
#include <vector>
#include <queue>
@@ -35,23 +35,23 @@ namespace cnrun {
class C_HostedSynapse
: public C_BaseSynapse, public C_HostedAttributes {
- private:
- C_HostedSynapse();
+ DELETE_DEFAULT_METHODS (C_HostedSynapse)
+
protected:
- C_HostedSynapse( TUnitType intype,
- C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel*, int s_mask = 0,
- bool do_allocations_immediately = true);
+ C_HostedSynapse (TUnitType intype,
+ C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel*, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last);
public:
~C_HostedSynapse();
- void reset_vars();
- double &var_value( size_t);
- const double &get_var_value( size_t) const;
+ void reset_vars();
+ double &var_value( size_t);
+ const double &get_var_value( size_t) const;
- double S() const; // needs access to parent model var vector, defined in model.h
- double S( vector<double> &b) const { return b[idx+0]; }
- double& dS( vector<double> &b) const { return b[idx+0]; }
+ double S() const; // needs access to parent model var vector, defined in model.h
+ double S( vector<double> &b) const { return b[idx+0]; }
+ double& dS( vector<double> &b) const { return b[idx+0]; }
};
@@ -85,31 +85,33 @@ class C_HostedSynapse
class CSynapseAB_dd
: public C_HostedSynapse {
+ DELETE_DEFAULT_METHODS (CSynapseAB_dd)
+
public:
- CSynapseAB_dd( C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true,
- TUnitType alt_type = YT_AB_DD)
- : C_HostedSynapse( alt_type, insource, intarget,
- ing, inM, s_mask, do_allocations_immediately)
- {}
-
- enum {
- _Esyn_, _Epre_, _alpha_, _beta_, _rtime_
- };
-
- double Isyn( const C_BaseNeuron &with_neuron, double g) const __attribute__ ((hot))
- {
- return -g * S() * (with_neuron.E() - P[_Esyn_]);
-// return -P[_gsyn_] * S() * (_target->E() - P[_Esyn_]);
- }
- double Isyn( vector<double>& b, const C_BaseNeuron &with_neuron, double g) const __attribute__ ((hot))
- {
- return -g * S(b) * (with_neuron.E(b) - P[_Esyn_]);
-// return -P[_gsyn_] * S(b) * (_target->E(b) - P[_Esyn_]);
- }
-
- void derivative( vector<double>&, vector<double>&) __attribute__ ((hot));
+ CSynapseAB_dd (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel *inM, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last,
+ TUnitType alt_type = YT_AB_DD)
+ : C_HostedSynapse (alt_type, insource, intarget,
+ ing, inM, s_mask, include_option)
+ {}
+
+ enum {
+ _Esyn_, _Epre_, _alpha_, _beta_, _rtime_
+ };
+
+ double Isyn( const C_BaseNeuron &with_neuron, double g) const __attribute__ ((hot))
+ {
+ return -g * S() * (with_neuron.E() - P[_Esyn_]);
+// return -P[_gsyn_] * S() * (_target->E() - P[_Esyn_]);
+ }
+ double Isyn( vector<double>& b, const C_BaseNeuron &with_neuron, double g) const __attribute__ ((hot))
+ {
+ return -g * S(b) * (with_neuron.E(b) - P[_Esyn_]);
+// return -P[_gsyn_] * S(b) * (_target->E(b) - P[_Esyn_]);
+ }
+
+ void derivative( vector<double>&, vector<double>&) __attribute__ ((hot));
};
@@ -123,37 +125,39 @@ class CSynapseAB_rd;
class CSynapseAB_rr
: public C_HostedSynapse {
+ DELETE_DEFAULT_METHODS (CSynapseAB_rr)
+
public:
- CSynapseAB_rr( C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true,
- TUnitType alt_type = YT_AB_RR)
- : C_HostedSynapse( alt_type, insource, intarget,
- ing, inM, s_mask, do_allocations_immediately)
- {}
-
- enum {
- _Ediff_, _alpha_, _beta_, _T_, _sigma_
- };
+ CSynapseAB_rr (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel *inM, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last,
+ TUnitType alt_type = YT_AB_RR)
+ : C_HostedSynapse( alt_type, insource, intarget,
+ ing, inM, s_mask, include_option)
+ {}
+
+ enum {
+ _Ediff_, _alpha_, _beta_, _T_, _sigma_
+ };
// supply own Isyn to avoid referencing target->E
- double Isyn( const C_BaseNeuron &with_neuron, double g) const
- {
- return -g * S() * P[_Ediff_];
- }
- double Isyn( vector<double>& x, const C_BaseNeuron &with_neuron, double g) const
- {
- return -g * S(x) * P[_Ediff_];
- }
-
- void derivative( vector<double>&, vector<double>&);
-
- void param_changed_hook()
- {
- _numerator = exp( P[_beta_] * P[_T_]) + 1;
- }
+ double Isyn( const C_BaseNeuron &with_neuron, double g) const
+ {
+ return -g * S() * P[_Ediff_];
+ }
+ double Isyn( vector<double>& x, const C_BaseNeuron &with_neuron, double g) const
+ {
+ return -g * S(x) * P[_Ediff_];
+ }
+
+ void derivative( vector<double>&, vector<double>&);
+
+ void param_changed_hook()
+ {
+ _numerator = exp( P[_beta_] * P[_T_]) + 1;
+ }
private:
- double _numerator;
+ double _numerator;
};
@@ -166,33 +170,35 @@ class CSynapseAB_rr
class CSynapseMxAB_dd
: public CSynapseAB_dd, public C_MultiplexingAttributes {
+ DELETE_DEFAULT_METHODS (CSynapseMxAB_dd)
+
public:
- CSynapseMxAB_dd( C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true,
- TUnitType alt_type = YT_MXAB_DD)
- : CSynapseAB_dd( insource, intarget,
- ing, inM, s_mask, do_allocations_immediately,
- alt_type)
- {}
-
- void reset_state()
- {
- C_HostedSynapse::reset_state();
- C_MultiplexingAttributes::reset();
- }
+ CSynapseMxAB_dd (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel *inM, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last,
+ TUnitType alt_type = YT_MXAB_DD)
+ : CSynapseAB_dd (insource, intarget,
+ ing, inM, s_mask, include_option,
+ alt_type)
+ {}
+
+ void reset_state()
+ {
+ C_HostedSynapse::reset_state();
+ C_MultiplexingAttributes::reset();
+ }
// because Mx*'s synapse source is always a standalone, non-integratable neuron,
// which don't propagate vars onto M->V, we fold S(x) to make the actual S value available
// from within the integrator
- double S() const { return C_HostedSynapse::S(); }
- double S( vector<double> &unused) const { return C_HostedSynapse::S(); }
+ double S() const { return C_HostedSynapse::S(); }
+ double S( vector<double> &unused) const { return C_HostedSynapse::S(); }
- void derivative( vector<double>&, vector<double>&) __attribute__ ((hot));
+ void derivative( vector<double>&, vector<double>&) __attribute__ ((hot));
private:
- friend class CModel;
- void update_queue();
+ friend class CModel;
+ void update_queue();
};
@@ -202,24 +208,26 @@ class CSynapseMxAB_dd
class CSynapseMxAB_dr
: public CSynapseMxAB_dd {
+ DELETE_DEFAULT_METHODS (CSynapseMxAB_dr)
+
public:
- CSynapseMxAB_dr( C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true)
- : CSynapseMxAB_dd( insource, intarget,
- ing, inM, s_mask, do_allocations_immediately,
- YT_MXAB_DR)
- {}
-
- enum { _Ediff_, /* ... */ };
- double Isyn( const C_BaseNeuron &with_neuron, double g) const
- {
- return -g * S() * P[_Ediff_];
- }
- double Isyn( vector<double>& unused, const C_BaseNeuron &with_neuron, double g) const
- {
- return -g * S() * P[_Ediff_];
- }
+ CSynapseMxAB_dr (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel *inM, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last)
+ : CSynapseMxAB_dd (insource, intarget,
+ ing, inM, s_mask, include_option,
+ YT_MXAB_DR)
+ {}
+
+ enum { _Ediff_, /* ... */ };
+ double Isyn( const C_BaseNeuron &with_neuron, double g) const
+ {
+ return -g * S() * P[_Ediff_];
+ }
+ double Isyn( vector<double>& unused, const C_BaseNeuron &with_neuron, double g) const
+ {
+ return -g * S() * P[_Ediff_];
+ }
};
@@ -230,26 +238,25 @@ class CSynapseMxAB_dr
-
-
-
class CSynapseABMinus_dd
: public CSynapseAB_dd {
+ DELETE_DEFAULT_METHODS (CSynapseABMinus_dd)
+
public:
- CSynapseABMinus_dd( C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true)
- : CSynapseAB_dd( insource, intarget,
- ing, inM, s_mask, do_allocations_immediately,
- YT_ABMINUS_DD)
- {}
-
- enum {
- _Esyn_, _Epre_, _alpha_, _beta_, _rtime_, _sigma_
- };
-
- void derivative( vector<double>&, vector<double>&);
+ CSynapseABMinus_dd (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel *inM, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last)
+ : CSynapseAB_dd (insource, intarget,
+ ing, inM, s_mask, include_option,
+ YT_ABMINUS_DD)
+ {}
+
+ enum {
+ _Esyn_, _Epre_, _alpha_, _beta_, _rtime_, _sigma_
+ };
+
+ void derivative( vector<double>&, vector<double>&);
};
@@ -266,31 +273,33 @@ class CSynapseABMinus_rr;
class CSynapseRall_dd
: public C_HostedSynapse {
+ DELETE_DEFAULT_METHODS (CSynapseRall_dd)
+
public:
- CSynapseRall_dd( C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel *inM, int s_mask = 0,
- bool do_allocations_immediately = true)
- : C_HostedSynapse( YT_RALL_DD, insource, intarget,
- ing, inM, s_mask, do_allocations_immediately)
- {}
-
- double& R( vector<double>& b) { return b[idx+1]; }
- double& dR( vector<double>& b) { return b[idx+1]; }
-
- enum {
- _Esyn_, _Epre_, _tau_, _sigma_
- };
-
- double Isyn( const C_BaseNeuron &with_neuron, double g) const
- {
- return -g * S() * (with_neuron.E() - P[_Esyn_]);
- }
- double Isyn( vector<double>&b, const C_BaseNeuron &with_neuron, double g) const
- {
- return -g * S(b) * (with_neuron.E(b) - P[_Esyn_]);
- }
-
- void derivative( vector<double>&, vector<double>&);
+ CSynapseRall_dd (C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel *inM, int s_mask = 0,
+ TIncludeOption include_option = TIncludeOption::is_last)
+ : C_HostedSynapse (YT_RALL_DD, insource, intarget,
+ ing, inM, s_mask, include_option)
+ {}
+
+ double& R( vector<double>& b) { return b[idx+1]; }
+ double& dR( vector<double>& b) { return b[idx+1]; }
+
+ enum {
+ _Esyn_, _Epre_, _tau_, _sigma_
+ };
+
+ double Isyn( const C_BaseNeuron &with_neuron, double g) const
+ {
+ return -g * S() * (with_neuron.E() - P[_Esyn_]);
+ }
+ double Isyn( vector<double>&b, const C_BaseNeuron &with_neuron, double g) const
+ {
+ return -g * S(b) * (with_neuron.E(b) - P[_Esyn_]);
+ }
+
+ void derivative( vector<double>&, vector<double>&);
};
// TODO
@@ -303,4 +312,9 @@ class CSynapseRall_rr;
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/integrate-base.hh b/upstream/src/libcn/integrate-base.hh
index 558a180..e48522b 100644
--- a/upstream/src/libcn/integrate-base.hh
+++ b/upstream/src/libcn/integrate-base.hh
@@ -9,9 +9,11 @@
* A base class for integrators, to be plugged into CModel
*/
+#ifndef CNRUN_LIBCN_INTEGRATE_BASE_H_
+#define CNRUN_LIBCN_INTEGRATE_BASE_H_
-#ifndef LIBCN_INTEGRATE_BASE_H
-#define LIBCN_INTEGRATE_BASE_H
+#include "libstilton/lang.hh"
+#include "forward-decls.hh"
#if HAVE_CONFIG_H && !defined(VERSION)
# include "config.h"
@@ -20,39 +22,42 @@
namespace cnrun {
-class CModel;
-
class CIntegrate_base {
-// friend class CModel;
-// protected:
+ DELETE_DEFAULT_METHODS (CIntegrate_base)
+
public:
- double _dt_min, _dt_max,
- _eps, _eps_abs, _eps_rel,
- dt; // that which is current
-
- bool is_owned;
-
- CModel *model;
-
- CIntegrate_base( double dt_min, double dt_max,
- double eps, double eps_abs, double eps_rel,
- bool inis_owned)
- : _dt_min (dt_min), _dt_max (dt_max),
- _eps (eps), _eps_abs (eps_abs), _eps_rel (eps_rel),
- dt (dt_min),
- is_owned (inis_owned)
- {}
- virtual ~CIntegrate_base()
- {}
-
- virtual void cycle() = 0;
- virtual void fixate() = 0;
- virtual void prepare() = 0;
+ double _dt_min, _dt_max,
+ _eps, _eps_abs, _eps_rel,
+ dt; // that which is current
+
+ bool is_owned;
+
+ CModel *model;
+
+ CIntegrate_base (double dt_min, double dt_max,
+ double eps, double eps_abs, double eps_rel,
+ bool inis_owned)
+ : _dt_min (dt_min), _dt_max (dt_max),
+ _eps (eps), _eps_abs (eps_abs), _eps_rel (eps_rel),
+ dt (dt_min),
+ is_owned (inis_owned)
+ {}
+ virtual ~CIntegrate_base()
+ {}
+
+ virtual void cycle() = 0;
+ virtual void fixate() = 0;
+ virtual void prepare() = 0;
};
}
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/integrate-rk65.hh b/upstream/src/libcn/integrate-rk65.hh
index ee37e74..8f84941 100644
--- a/upstream/src/libcn/integrate-rk65.hh
+++ b/upstream/src/libcn/integrate-rk65.hh
@@ -10,10 +10,12 @@
*/
-#ifndef LIBCN_INTEGRATE_RK65_H
-#define LIBCN_INTEGRATE_RK65_H
+#ifndef CNRUN_LIBCN_INTEGRATERK65_H_
+#define CNRUN_LIBCN_INTEGRATERK65_H_
#include <vector>
+#include "libstilton/lang.hh"
+#include "forward-decls.hh"
#include "integrate-base.hh"
#if HAVE_CONFIG_H && !defined(VERSION)
@@ -25,27 +27,33 @@ namespace cnrun {
class CIntegrateRK65
: public CIntegrate_base {
+ DELETE_DEFAULT_METHODS (CIntegrateRK65)
+
public:
- double _dt_max_cap;
+ double _dt_max_cap;
- CIntegrateRK65( double dt_min = 1e-6, double dt_max = .5, double dt_max_cap = 5,
- double eps = 1e-8, double eps_abs = 1e-12, double eps_rel = 1e-6,
- bool inis_owned = true)
- : CIntegrate_base (dt_min, dt_max, eps, eps_abs, eps_rel, is_owned),
- _dt_max_cap (dt_max_cap)
- {}
+ CIntegrateRK65( double dt_min = 1e-6, double dt_max = .5, double dt_max_cap = 5,
+ double eps = 1e-8, double eps_abs = 1e-12, double eps_rel = 1e-6,
+ bool inis_owned = true)
+ : CIntegrate_base (dt_min, dt_max, eps, eps_abs, eps_rel, is_owned),
+ _dt_max_cap (dt_max_cap)
+ {}
- void cycle() __attribute__ ((hot));
- void fixate() __attribute__ ((hot));
- void prepare();
+ void cycle() __attribute__ ((hot));
+ void fixate() __attribute__ ((hot));
+ void prepare();
private:
- std::vector<double> Y[9], F[9], y5;
+ std::vector<double> Y[9], F[9], y5;
};
-
}
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/model-cycle.cc b/upstream/src/libcn/model-cycle.cc
index e5de3d9..741c322 100644
--- a/upstream/src/libcn/model-cycle.cc
+++ b/upstream/src/libcn/model-cycle.cc
@@ -38,28 +38,28 @@ using namespace std;
inline namespace {
double __Butchers_a[9][8] = {
- { },
- { 1./9 },
- { .5/9, .5/9 },
- { 0.416666666666667e-1, 0., 0.125 },
- { 1./6, 0., -0.5, 2./3 },
- { 0.1875e+1, 0., -0.7875e+1, 0.7e+1, -0.5 },
- { -0.4227272727272727e+1, 0., 0.176995738636364e+2, -0.142883522727273e+2, 0.522017045454545, 0.104403409090909e+1 },
- { 0.840622673179752e+1, 0., -0.337303717185049e+2, 0.271460231129622e+2, 0.342046929709216, -0.184653767923258e+1, 0.577349465373733 },
- { 0.128104575163399, 0., 0., -0.108433734939759, 0.669375, -0.146666666666667, 0.284444444444444, 0.173176381998583 },
+ { },
+ { 1./9 },
+ { .5/9, .5/9 },
+ { 0.416666666666667e-1, 0., 0.125 },
+ { 1./6, 0., -0.5, 2./3 },
+ { 0.1875e+1, 0., -0.7875e+1, 0.7e+1, -0.5 },
+ { -0.4227272727272727e+1, 0., 0.176995738636364e+2, -0.142883522727273e+2, 0.522017045454545, 0.104403409090909e+1 },
+ { 0.840622673179752e+1, 0., -0.337303717185049e+2, 0.271460231129622e+2, 0.342046929709216, -0.184653767923258e+1, 0.577349465373733 },
+ { 0.128104575163399, 0., 0., -0.108433734939759, 0.669375, -0.146666666666667, 0.284444444444444, 0.173176381998583 },
};
double __Butchers_b[9] = {
- 0.567119155354449e-1,
- 0.,
- 0.,
- 0.210909572355356,
- 0.141490384615385,
- 0.202051282051282,
- 0.253186813186813,
- 0.843679809736684e-1,
- 0.512820512820513e-1
+ 0.567119155354449e-1,
+ 0.,
+ 0.,
+ 0.210909572355356,
+ 0.141490384615385,
+ 0.202051282051282,
+ 0.253186813186813,
+ 0.843679809736684e-1,
+ 0.512820512820513e-1
};
} // inline namespace
@@ -69,16 +69,16 @@ void
cnrun::CIntegrateRK65::
prepare()
{
- for ( unsigned short i = 0; i < 9; ++i )
- Y[i].resize( model->_var_cnt), F[i].resize( model->_var_cnt);
- y5.resize( model->_var_cnt);
-
- if ( model->standalone_unit_cnt() > 0 )
- if ( _dt_max > model->_discrete_dt ) {
- _dt_max = model->_discrete_dt;
- if ( model->verbosely > 1 )
- cout << "CIntegrateRK65: Set dt_max to model->discrete_dt: " << _dt_max << endl;
- }
+ for ( unsigned short i = 0; i < 9; ++i )
+ Y[i].resize( model->_var_cnt), F[i].resize( model->_var_cnt);
+ y5.resize( model->_var_cnt);
+
+ if ( model->n_standalone_units() > 0 )
+ if ( _dt_max > model->_discrete_dt ) {
+ _dt_max = model->_discrete_dt;
+ if ( model->options.verbosely > 1 )
+ cout << "CIntegrateRK65: Set dt_max to model->discrete_dt: " << _dt_max << endl;
+ }
}
@@ -89,70 +89,70 @@ cycle()
{
// omp stuff found inapplicable due to considerable overhead in sys time
// (thread creation)
- unsigned int i, j, k;
+ unsigned int i, j, k;
- double aF;
+ double aF;
// calculate iterative terms rk65_Y[__i] and rk65_F[__i] (to sixth order)
- for ( i = 0; i < 9; ++i ) {
+ for ( i = 0; i < 9; ++i ) {
//#pragma omp parallel for schedule(static,model->_var_cnt/2+1) firstprivate(aF,j,i)
- for ( k = 0; k < model->_var_cnt; ++k ) {
- aF = 0.0;
- for ( j = 0; j < i; ++j )
- aF += __Butchers_a[i][j] * F[j][k];
- Y[i][k] = model->V[k] + dt * aF;
- }
- // see to this vector's dt
- F[i][0] = 1.;
+ for ( k = 0; k < model->_var_cnt; ++k ) {
+ aF = 0.0;
+ for ( j = 0; j < i; ++j )
+ aF += __Butchers_a[i][j] * F[j][k];
+ Y[i][k] = model->V[k] + dt * aF;
+ }
+ // see to this vector's dt
+ F[i][0] = 1.;
//#pragma omp consider...
- for_model_hosted_neurons (model,N)
- (*N) -> derivative( Y[i], F[i]);
- for_model_hosted_synapses (model,S)
- (*S) -> derivative( Y[i], F[i]);
- }
+ for ( auto& N : model->hosted_neurons )
+ N -> derivative( Y[i], F[i]);
+ for ( auto& S : model->hosted_synapses )
+ S -> derivative( Y[i], F[i]);
+ }
// sum up Y[i] and F[i] to build 5th order scheme -> y5
//#pragma omp parallel for private(aF,j)
- for ( k = 0; k < model->_var_cnt; ++k ) {
- aF = 0.0;
- for ( j = 0; j < 8; ++j )
- aF += __Butchers_a[8][j] * F[j][k];
- y5[k] = model->V[k] + dt * aF;
- }
+ for ( k = 0; k < model->_var_cnt; ++k ) {
+ aF = 0.0;
+ for ( j = 0; j < 8; ++j )
+ aF += __Butchers_a[8][j] * F[j][k];
+ y5[k] = model->V[k] + dt * aF;
+ }
// sum up Y[i] and F[i] to build 6th order scheme -> W
//#pragma omp parallel for schedule(static,model->_var_cnt/2+1) private(aF,j)
- for ( k = 0; k < model->_var_cnt; ++k ) {
- aF = 0.0;
- for ( j = 0; j < 9; ++j )
- aF += __Butchers_b[j] * F[j][k];
- model->W[k] = model->V[k] + dt * aF;
- }
+ for ( k = 0; k < model->_var_cnt; ++k ) {
+ aF = 0.0;
+ for ( j = 0; j < 9; ++j )
+ aF += __Butchers_b[j] * F[j][k];
+ model->W[k] = model->V[k] + dt * aF;
+ }
// kinkiness in synapses causes dt to rocket
- double dtx = min( _dt_max, dt * _dt_max_cap);
+ double dtx = min( _dt_max, dt * _dt_max_cap);
// determine minimal necessary new dt to get error < eps based on the
// difference between results in y5 and W
- double try_eps, delta, try_dt;
+ double try_eps, delta, try_dt;
// exclude time (at index 0)
//#pragma omp parallel for private(try_eps,delta,try_dtj)
- for ( k = 1; k < model->_var_cnt; ++k ) {
- try_eps = max (_eps_abs, min (_eps, abs(_eps_rel * model->W[k])));
- delta = abs (model->W[k] - y5[k]);
- if ( delta > DBL_EPSILON * y5[k] ) {
- try_dt = exp( (log(try_eps) - log(delta)) / 6) * dt;
- if ( try_dt < dtx )
- dtx = try_dt;
- }
- }
+ for ( k = 1; k < model->_var_cnt; ++k ) {
+ try_eps = max (_eps_abs, min (_eps, abs(_eps_rel * model->W[k])));
+ delta = abs (model->W[k] - y5[k]);
+ if ( delta > DBL_EPSILON * y5[k] ) {
+ try_dt = exp( (log(try_eps) - log(delta)) / 6) * dt;
+ if ( try_dt < dtx )
+ dtx = try_dt;
+ }
+ }
// make sure we don't grind to a halt
- if ( dtx < _dt_min )
- dtx = _dt_min;
+ if ( dtx < _dt_min )
+ dtx = _dt_min;
// set the new step
- dt = dtx;
+ dt = dtx;
}
@@ -163,43 +163,46 @@ cycle()
// -------------- CModel::advance and dependents
+inline namespace {
volatile sig_atomic_t chris_at_kbd;
void
ctrl_c_handler( int signum)
{
- chris_at_kbd = true;
+ chris_at_kbd = true;
+}
+
}
unsigned int
cnrun::CModel::
-advance( double dist, double *cpu_time_used_p)
+advance( const double dist, double * const cpu_time_used_p)
{
- chris_at_kbd = 0;
- signal( SIGINT, ctrl_c_handler);
-
- if ( unit_list.size() == 0 ) {
- fprintf( stderr, "Model is empty\n");
- return 0;
- }
- if ( _status & CN_MDL_NOTREADY )
- prepare_advance();
-
- bool have_hosted_units = (hosted_unit_cnt() > 0),
- have_standalone_units = (standalone_unit_cnt() > 0),
- have_ddtbound_units = (ddtbound_unit_cnt() > 0);
-
- if ( have_hosted_units && !have_standalone_units && !have_ddtbound_units )
- return _do_advance_on_pure_hosted( dist, cpu_time_used_p);
- if ( !have_hosted_units && have_standalone_units && !have_ddtbound_units )
- return _do_advance_on_pure_standalone( dist, cpu_time_used_p);
- if ( !have_hosted_units && !have_standalone_units && have_ddtbound_units )
- return _do_advance_on_pure_ddtbound( dist, cpu_time_used_p);
-
- unsigned int retval = _do_advance_on_mixed( dist, cpu_time_used_p);
- signal( SIGINT, SIG_IGN);
- return retval;
+ chris_at_kbd = 0;
+ signal( SIGINT, ctrl_c_handler);
+
+ if ( units.size() == 0 ) {
+ fprintf( stderr, "Model is empty\n");
+ return 0;
+ }
+ if ( is_ready )
+ prepare_advance();
+
+ bool have_hosted_units = !!n_hosted_units(),
+ have_standalone_units = !!n_standalone_units(),
+ have_ddtbound_units = !!n_ddtbound_units();
+
+ if ( have_hosted_units && !have_standalone_units && !have_ddtbound_units )
+ return _do_advance_on_pure_hosted( dist, cpu_time_used_p);
+ if ( !have_hosted_units && have_standalone_units && !have_ddtbound_units )
+ return _do_advance_on_pure_standalone( dist, cpu_time_used_p);
+ if ( !have_hosted_units && !have_standalone_units && have_ddtbound_units )
+ return _do_advance_on_pure_ddtbound( dist, cpu_time_used_p);
+
+ unsigned int retval = _do_advance_on_mixed( dist, cpu_time_used_p);
+ signal( SIGINT, SIG_IGN);
+ return retval;
}
void
@@ -207,28 +210,29 @@ __attribute__ ((hot))
cnrun::CModel::
_setup_schedulers()
{
- regular_periods.clear();
- regular_periods_last_checked.clear();
- if ( units_with_periodic_sources.size() > 0 ) { // determine period(s) at which to wake up reader update loop
- for_all_units_with_periodic_sources (U)
- for ( auto S = (*U) -> sources.begin(); S != (*U)->sources.end(); ++S )
- regular_periods.push_back( (reinterpret_cast<CSourcePeriodic*>(S->source)) -> period);
- regular_periods.unique();
- regular_periods.sort();
- regular_periods_last_checked.resize( regular_periods.size());
- }
-
- if ( verbosely > 2 && regular_periods.size() > 0 ) {
- printf( "%zd timepoint(s) in scheduler_update_periods: ", regular_periods.size());
- auto I = regular_periods.begin();
- for ( size_t i = 0; i < regular_periods.size()-1; ++i, ++I )
- printf( "%g, ", *I);
- printf( "%g\n\n", regular_periods.back());
- }
+ regular_periods.clear();
+ regular_periods_last_checked.clear();
+ if ( units_with_periodic_sources.size() ) { // determine period(s) at which to wake up reader update loop
+ for ( auto& U : units_with_periodic_sources )
+ for ( auto& S : U -> sources )
+ regular_periods.push_back(
+ (reinterpret_cast<CSourcePeriodic*>(S.source)) -> period);
+ regular_periods.unique();
+ regular_periods.sort();
+ regular_periods_last_checked.resize( regular_periods.size());
+ }
+
+ if ( options.verbosely > 2 && regular_periods.size() > 0 ) {
+ printf( "%zd timepoint(s) in scheduler_update_periods: ", regular_periods.size());
+ auto I = regular_periods.begin();
+ for ( size_t i = 0; i < regular_periods.size()-1; ++i, ++I )
+ printf( "%g, ", *I);
+ printf( "%g\n\n", regular_periods.back());
+ }
// ensure all schedulers are effective at the beginning, too
- for_all_units_with_periodic_sources (U)
- (*U) -> apprise_from_sources();
+ for ( auto& U : units_with_periodic_sources )
+ U->apprise_from_sources();
}
@@ -236,29 +240,20 @@ void
cnrun::CModel::
prepare_advance()
{
- if ( _status & CN_MDL_LOGDT && !_dt_logger ) {
- string fname = name + ".dt";
- _dt_logger = new ofstream( fname.data());
- }
- if ( _status & CN_MDL_LOGSPIKERS && !_spike_logger ) {
- string fname = name + ".spikes";
- _spike_logger = new ofstream( fname.data());
- }
+ if ( options.log_dt && !_dt_logger )
+ _dt_logger = new ofstream( string(name + ".dt").c_str());
+ if ( options.log_spikers && !_spike_logger )
+ _spike_logger = new ofstream( string(name + ".spikes").c_str());
- _setup_schedulers();
+ _setup_schedulers();
- if ( !hosted_unit_cnt() )
- _integrator->dt = _discrete_dt;
+ if ( !n_hosted_units() )
+ _integrator->dt = _discrete_dt;
- if ( ddtbound_unit_cnt() )
- _status |= CN_MDL_HAS_DDTB_UNITS;
- else
- _status &= ~CN_MDL_HAS_DDTB_UNITS;
+ is_ready = true;
- _status &= ~CN_MDL_NOTREADY;
-
- if ( verbosely > 5 )
- fprintf( stderr, "Model prepared\n");
+ if ( options.verbosely > 5 )
+ fprintf( stderr, "Model prepared\n");
}
@@ -269,97 +264,92 @@ prepare_advance()
// they, logically, have no inputs
#define _DO_ADVANCE_COMMON_INLOOP_BEGIN \
- if ( chris_at_kbd ) { \
- printf( "\nInterrupted\n"); \
- break; \
- } \
- for_all_units_with_contiuous_sources (U) \
- (*U)->apprise_from_sources(); \
- { \
- auto I = regular_periods.begin(); \
- auto Ic = regular_periods_last_checked.begin(); \
- for ( ; I != regular_periods.end(); ++I, ++Ic ) \
- if ( unlikely(model_time() >= *I * (*Ic + 1)) ) { \
- (*Ic)++; \
- for_all_units_with_periodic_sources (U) \
- (*U)->apprise_from_sources(); \
- } \
- } \
- for_all_conscious_neurons (N) \
- (*N) -> possibly_fire(); \
- \
- for ( auto Yc = mx_syn_list.begin(); Yc != mx_syn_list.end(); ++Yc ) \
- if ( (*Yc)->_source ) \
- (*Yc) -> update_queue();
+ if ( chris_at_kbd ) { \
+ printf( "\nInterrupted\n"); \
+ break; \
+ } \
+ make_units_with_continuous_sources_apprise_from_sources(); \
+ { \
+ auto I = regular_periods.begin(); \
+ auto Ic = regular_periods_last_checked.begin(); \
+ for ( ; I != regular_periods.end(); ++I, ++Ic ) \
+ if ( unlikely(model_time() >= *I * (*Ic + 1)) ) { \
+ (*Ic)++; \
+ make_units_with_periodic_sources_apprise_from_sources(); \
+ } \
+ } \
+ make_conscious_neurons_possibly_fire(); \
+ \
+ for ( auto& Y : multiplexing_synapses ) \
+ if ( Y->_source ) \
+ Y->update_queue();
#define _DO_ADVANCE_COMMON_INLOOP_MID \
- if ( have_listeners ) { \
- if ( have_discrete_listen_dt ) { \
- if ( model_time() - last_made_listen >= listen_dt ) { \
- for_all_listening_units (U) \
- (*U) -> tell(); \
- last_made_listen += listen_dt; \
- } \
- } else \
- for_all_listening_units (U) \
- (*U) -> tell(); \
- } \
- if ( unlikely (_status & CN_MDL_LOGDT) ) \
- (*_dt_logger) << model_time() << "\t" << dt() << endl; \
- \
- for_all_spikelogging_neurons (N) { \
- (*N) -> do_detect_spike_or_whatever(); \
- if ( !(_status & CN_MDL_DISKLESS) && \
- (*N)->n_spikes_in_last_dt() && \
- _status & CN_MDL_LOGSPIKERS ) { \
- (*_spike_logger) << model_time() << "\t"; \
- if ( _status & CN_MDL_LOGUSINGID ) \
- (*_spike_logger) << (*N)->_serial_id << endl; \
- else \
- (*_spike_logger) << (*N)->_label << endl; \
- } \
- }
+ if ( have_listeners ) { \
+ if ( have_discrete_listen_dt ) { \
+ if ( model_time() - last_made_listen >= options.listen_dt ) { \
+ make_listening_units_tell(); \
+ last_made_listen += options.listen_dt; \
+ } \
+ } else \
+ make_listening_units_tell(); \
+ } \
+ if ( unlikely (options.log_dt) ) \
+ (*_dt_logger) << model_time() << "\t" << dt() << endl; \
+ \
+ for ( auto& N : spikelogging_neurons ) { \
+ N -> do_detect_spike_or_whatever(); \
+ if ( !is_diskless && \
+ N->n_spikes_in_last_dt() && \
+ options.log_spikers ) { \
+ (*_spike_logger) << model_time() << "\t"; \
+ if ( options.log_spikers_use_serial_id ) \
+ (*_spike_logger) << N->_serial_id << endl; \
+ else \
+ (*_spike_logger) << N->_label << endl; \
+ } \
+ }
#define _DO_ADVANCE_COMMON_INLOOP_END \
- ++_cycle; \
- ++steps; \
- if ( verbosely != 0 ) { \
- if ( unlikely (((double)(clock() - cpu_time_lastchecked)) / CLOCKS_PER_SEC > 2) ) { \
- cpu_time_lastchecked = clock(); \
- if ( _status & CN_MDL_DISPLAY_PROGRESS_PERCENT && !(_status & CN_MDL_DISPLAY_PROGRESS_TIME) ) \
- fprintf( stderr, "\r\033[%dC%4.1f%%\r", \
- (verbosely < 0) ? -(verbosely+1)*8 : 0, \
- 100 - (model_time() - time_ending) / (time_started - time_ending) * 100); \
- else if ( _status & CN_MDL_DISPLAY_PROGRESS_TIME && !(_status & CN_MDL_DISPLAY_PROGRESS_PERCENT) ) \
- fprintf( stderr, "\r\033[%dC%'6.0fms\r", \
- (verbosely < 0) ? -(verbosely+1)*16 : 0, \
- model_time()); \
- else if ( _status & CN_MDL_DISPLAY_PROGRESS_PERCENT && _status & CN_MDL_DISPLAY_PROGRESS_TIME ) \
- fprintf( stderr, "\r\033[%dC%'6.0fms %4.1f%%\r", \
- (verbosely < 0) ? -(verbosely+1)*24 : 0, \
- model_time(), \
- 100 - (model_time() - time_ending) / (time_started - time_ending) * 100); \
- fflush( stderr); \
- } \
- }
+ ++_cycle; \
+ ++steps; \
+ if ( options.verbosely != 0 ) { \
+ if ( unlikely (((double)(clock() - cpu_time_lastchecked)) / CLOCKS_PER_SEC > 2) ) { \
+ cpu_time_lastchecked = clock(); \
+ if ( options.display_progress_percent && !options.display_progress_time ) \
+ fprintf( stderr, "\r\033[%dC%4.1f%%\r", \
+ (options.verbosely < 0) ? -(options.verbosely+1)*8 : 0, \
+ 100 - (model_time() - time_ending) / (time_started - time_ending) * 100); \
+ else if ( options.display_progress_time && !options.display_progress_percent ) \
+ fprintf( stderr, "\r\033[%dC%'6.0fms\r", \
+ (options.verbosely < 0) ? -(options.verbosely+1)*16 : 0, \
+ model_time()); \
+ else if ( options.display_progress_percent && options.display_progress_time ) \
+ fprintf( stderr, "\r\033[%dC%'6.0fms %4.1f%%\r", \
+ (options.verbosely < 0) ? -(options.verbosely+1)*24 : 0, \
+ model_time(), \
+ 100 - (model_time() - time_ending) / (time_started - time_ending) * 100); \
+ fflush( stderr); \
+ } \
+ }
#define _DO_ADVANCE_COMMON_EPILOG \
- cpu_time_ended = clock(); \
- double cpu_time_taken_seconds = ((double) (cpu_time_ended - cpu_time_started)) / CLOCKS_PER_SEC; \
- if ( cpu_time_used_p ) \
- *cpu_time_used_p = cpu_time_taken_seconds; \
- if ( verbosely > 0 || verbosely <= -1 ) { \
- fprintf( stderr, "\r\033[K"); \
- fflush( stderr); \
- } \
- if ( verbosely > 0 ) \
- printf( "@%.1fmsec (+%.1f in %lu cycles in %.2f sec CPU time:" \
- " avg %.3g \302\265s/cyc, ratio to CPU time %.2g)\n\n", \
- model_time(), dist, steps, cpu_time_taken_seconds, \
- model_time()/_cycle * 1e3, model_time() / cpu_time_taken_seconds / 1e3);
+ cpu_time_ended = clock(); \
+ double cpu_time_taken_seconds = ((double) (cpu_time_ended - cpu_time_started)) / CLOCKS_PER_SEC; \
+ if ( cpu_time_used_p ) \
+ *cpu_time_used_p = cpu_time_taken_seconds; \
+ if ( options.verbosely > 0 || options.verbosely <= -1 ) { \
+ fprintf( stderr, "\r\033[K"); \
+ fflush( stderr); \
+ } \
+ if ( options.verbosely > 0 ) \
+ printf( "@%.1fmsec (+%.1f in %lu cycles in %.2f sec CPU time:" \
+ " avg %.3g \302\265s/cyc, ratio to CPU time %.2g)\n\n", \
+ model_time(), dist, steps, cpu_time_taken_seconds, \
+ model_time()/_cycle * 1e3, model_time() / cpu_time_taken_seconds / 1e3);
@@ -368,38 +358,38 @@ prepare_advance()
unsigned int
__attribute__ ((hot))
cnrun::CModel::
-_do_advance_on_pure_hosted( double dist, double *cpu_time_used_p)
+_do_advance_on_pure_hosted( const double dist, double * const cpu_time_used_p)
{
- bool have_listeners = (lisn_unit_list.size() > 0),
- have_discrete_listen_dt = (listen_dt > 0.);
+ bool have_listeners = (listening_units.size() > 0),
+ have_discrete_listen_dt = (options.listen_dt > 0.);
- clock_t cpu_time_started = clock(),
- cpu_time_ended,
- cpu_time_lastchecked = cpu_time_started;
+ clock_t cpu_time_started = clock(),
+ cpu_time_ended,
+ cpu_time_lastchecked = cpu_time_started;
- double time_started = model_time(),
- time_ending = time_started + dist,
- last_made_listen = time_started;
+ double time_started = model_time(),
+ time_ending = time_started + dist,
+ last_made_listen = time_started;
- unsigned long steps = 0;
- do {
- _DO_ADVANCE_COMMON_INLOOP_BEGIN
+ unsigned long steps = 0;
+ do {
+ _DO_ADVANCE_COMMON_INLOOP_BEGIN
- _integrator->cycle();
+ _integrator->cycle();
- _DO_ADVANCE_COMMON_INLOOP_MID
+ _DO_ADVANCE_COMMON_INLOOP_MID
- // fixate
- _integrator->fixate();
+ // fixate
+ _integrator->fixate();
- _DO_ADVANCE_COMMON_INLOOP_END
+ _DO_ADVANCE_COMMON_INLOOP_END
- // model_time is advanced implicitly in _integrator->cycle()
- } while ( model_time() < time_ending );
+ // model_time is advanced implicitly in _integrator->cycle()
+ } while ( model_time() < time_ending );
- _DO_ADVANCE_COMMON_EPILOG
+ _DO_ADVANCE_COMMON_EPILOG
- return steps;
+ return steps;
}
@@ -407,51 +397,51 @@ _do_advance_on_pure_hosted( double dist, double *cpu_time_used_p)
unsigned int
__attribute__ ((hot))
cnrun::CModel::
-_do_advance_on_pure_standalone( double dist, double *cpu_time_used_p)
+_do_advance_on_pure_standalone( const double dist, double * const cpu_time_used_p)
{
- bool have_listeners = (lisn_unit_list.size() > 0),
- have_discrete_listen_dt = (listen_dt > 0.);
+ bool have_listeners = !!listening_units.size(),
+ have_discrete_listen_dt = (options.listen_dt > 0.);
- clock_t cpu_time_started = clock(),
- cpu_time_ended,
- cpu_time_lastchecked = cpu_time_started;
+ clock_t cpu_time_started = clock(),
+ cpu_time_ended,
+ cpu_time_lastchecked = cpu_time_started;
- double time_started = model_time(),
- time_ending = time_started + dist,
- last_made_listen = time_started;
+ double time_started = model_time(),
+ time_ending = time_started + dist,
+ last_made_listen = time_started;
- unsigned long steps = 0;
- do {
- _DO_ADVANCE_COMMON_INLOOP_BEGIN
+ unsigned long steps = 0;
+ do {
+ _DO_ADVANCE_COMMON_INLOOP_BEGIN
- // service simple units w/out any vars on the integration vector V
- for_all_standalone_neurons (N)
- if ( !(*N)->is_conscious() )
- (*N) -> preadvance();
- for_all_standalone_synapses (Y)
- (*Y) -> preadvance();
+ // service simple units w/out any vars on the integration vector V
+ for ( auto& N : standalone_neurons )
+ if ( !N->is_conscious() )
+ N -> preadvance();
+ for ( auto& Y : standalone_synapses )
+ Y -> preadvance();
- // even in the case of n_hosted_{neurons,units} == 0, we would need _integrator->cycle() to advance V[0],
- // which is our model_time(); which is kind of expensive, so here's a shortcut
- V[0] += _discrete_dt;
- // _discrete_time += _discrete_dt; // not necessary
+ // even in the case of n_hosted_{neurons,units} == 0, we would need _integrator->cycle() to advance V[0],
+ // which is our model_time(); which is kind of expensive, so here's a shortcut
+ V[0] += _discrete_dt;
+ // _discrete_time += _discrete_dt; // not necessary
- _DO_ADVANCE_COMMON_INLOOP_MID
+ _DO_ADVANCE_COMMON_INLOOP_MID
- // fixate
- for_all_standalone_neurons (N)
- if ( !(*N)->is_conscious() )
- (*N) -> fixate();
- for_all_standalone_synapses (Y)
- (*Y) -> fixate();
+ // fixate
+ for ( auto& N : standalone_neurons )
+ if ( !N->is_conscious() )
+ N -> fixate();
+ for ( auto& Y : standalone_synapses )
+ Y -> fixate();
- _DO_ADVANCE_COMMON_INLOOP_END
+ _DO_ADVANCE_COMMON_INLOOP_END
- } while ( model_time() < time_ending );
+ } while ( model_time() < time_ending );
- _DO_ADVANCE_COMMON_EPILOG
+ _DO_ADVANCE_COMMON_EPILOG
- return steps;
+ return steps;
}
@@ -463,49 +453,49 @@ _do_advance_on_pure_standalone( double dist, double *cpu_time_used_p)
unsigned int
__attribute__ ((hot))
cnrun::CModel::
-_do_advance_on_pure_ddtbound( double dist, double *cpu_time_used_p)
+_do_advance_on_pure_ddtbound( const double dist, double * const cpu_time_used_p)
{
- bool have_listeners = (lisn_unit_list.size() > 0),
- have_discrete_listen_dt = (listen_dt > 0.);
+ bool have_listeners = (listening_units.size() > 0),
+ have_discrete_listen_dt = (options.listen_dt > 0.);
- clock_t cpu_time_started = clock(),
- cpu_time_ended,
- cpu_time_lastchecked = cpu_time_started;
+ clock_t cpu_time_started = clock(),
+ cpu_time_ended,
+ cpu_time_lastchecked = cpu_time_started;
- double time_started = model_time(),
- time_ending = time_started + dist,
- last_made_listen = time_started;
+ double time_started = model_time(),
+ time_ending = time_started + dist,
+ last_made_listen = time_started;
- unsigned long steps = 0;
- do {
- _DO_ADVANCE_COMMON_INLOOP_BEGIN
+ unsigned long steps = 0;
+ do {
+ _DO_ADVANCE_COMMON_INLOOP_BEGIN
- // lastly, service units only serviceable at discrete dt
- for_all_ddtbound_neurons (N)
- if ( !(*N)->is_conscious() )
- (*N) -> preadvance();
- for_all_ddtbound_synapses (Y)
- (*Y) -> preadvance();
+ // lastly, service units only serviceable at discrete dt
+ for ( auto& N : ddtbound_neurons )
+ if ( !N->is_conscious() )
+ N -> preadvance();
+ for ( auto& Y : ddtbound_synapses )
+ Y -> preadvance();
- V[0] += _discrete_dt;
- _discrete_time += _discrete_dt;
+ V[0] += _discrete_dt;
+ _discrete_time += _discrete_dt;
- _DO_ADVANCE_COMMON_INLOOP_MID
+ _DO_ADVANCE_COMMON_INLOOP_MID
- // fixate
- for_all_ddtbound_neurons (N)
- if ( !(*N)->is_conscious() )
- (*N) -> fixate();
- for_all_ddtbound_synapses (Y)
- (*Y) -> fixate();
+ // fixate
+ for ( auto& N : ddtbound_neurons )
+ if ( !N->is_conscious() )
+ N -> fixate();
+ for ( auto& Y : ddtbound_synapses )
+ Y -> fixate();
- _DO_ADVANCE_COMMON_INLOOP_END
+ _DO_ADVANCE_COMMON_INLOOP_END
- } while ( model_time() < time_ending );
+ } while ( model_time() < time_ending );
- _DO_ADVANCE_COMMON_EPILOG
+ _DO_ADVANCE_COMMON_EPILOG
- return steps;
+ return steps;
}
@@ -515,81 +505,81 @@ _do_advance_on_pure_ddtbound( double dist, double *cpu_time_used_p)
unsigned int
__attribute__ ((hot))
cnrun::CModel::
-_do_advance_on_mixed( double dist, double *cpu_time_used_p)
+_do_advance_on_mixed( const double dist, double * const cpu_time_used_p)
{
- bool have_hosted_units = (hosted_unit_cnt() > 0),
- is_discrete_dt_bound = _status & CN_MDL_HAS_DDTB_UNITS,
- have_listeners = (lisn_unit_list.size() > 0),
- have_discrete_listen_dt = (listen_dt > 0.),
- need_fixate_ddtbound_units;
-
- clock_t cpu_time_started = clock(),
- cpu_time_ended,
- cpu_time_lastchecked = cpu_time_started;
-
- double time_started = model_time(),
- time_ending = time_started + dist,
- last_made_listen = time_started;
-
- unsigned long steps = 0;
- do {
- _DO_ADVANCE_COMMON_INLOOP_BEGIN
-
- _integrator->cycle();
-
- // service simple units w/out any vars on the integration vector V
- for_all_standalone_neurons (N)
- if ( !(*N)->is_conscious() )
- (*N) -> preadvance();
- for_all_standalone_synapses (Y)
- (*Y) -> preadvance();
-
- // lastly, service units only serviceable at discrete dt
- if ( is_discrete_dt_bound && model_time() >= _discrete_time ) {
- for_all_ddtbound_neurons (N)
- if ( !(*N)->is_conscious() )
- (*N) -> preadvance();
- for_all_ddtbound_synapses (Y)
- (*Y) -> preadvance();
-
- _discrete_time += _discrete_dt;
- need_fixate_ddtbound_units = true;
- } else
- need_fixate_ddtbound_units = false;
-
- if ( !have_hosted_units )
- V[0] += _discrete_dt;
-
-
- _DO_ADVANCE_COMMON_INLOOP_MID
-
-
- // fixate
- _integrator->fixate();
-
- for_all_standalone_neurons (N)
- if ( !(*N)->is_conscious() )
- (*N) -> fixate();
- for_all_standalone_synapses (Y)
- (*Y) -> fixate();
-
- if ( need_fixate_ddtbound_units ) {
- for_all_ddtbound_neurons (N)
- if ( !(*N)->is_conscious() )
- (*N) -> fixate();
- for_all_ddtbound_synapses (Y)
- (*Y) -> fixate();
- }
-
-
- _DO_ADVANCE_COMMON_INLOOP_END
-
- } while ( model_time() < time_ending );
-
- _DO_ADVANCE_COMMON_EPILOG
-
- return steps;
+ bool have_hosted_units = !!n_hosted_units(),
+ have_listeners = !!listening_units.size(),
+ have_discrete_listen_dt = (options.listen_dt > 0.),
+ need_fixate_ddtbound_units;
+
+ clock_t cpu_time_started = clock(),
+ cpu_time_ended,
+ cpu_time_lastchecked = cpu_time_started;
+
+ double time_started = model_time(),
+ time_ending = time_started + dist,
+ last_made_listen = time_started;
+
+ unsigned long steps = 0;
+ do {
+ _DO_ADVANCE_COMMON_INLOOP_BEGIN
+
+ _integrator->cycle();
+
+ // service simple units w/out any vars on the integration vector V
+ for ( auto& N : standalone_neurons )
+ if ( !N->is_conscious() )
+ N -> preadvance();
+ for ( auto& Y : standalone_synapses )
+ Y -> preadvance();
+
+ // lastly, service units only serviceable at discrete dt
+ if ( this->have_ddtb_units && model_time() >= _discrete_time ) {
+ for ( auto& N : ddtbound_neurons )
+ if ( !N->is_conscious() )
+ N -> preadvance();
+ for ( auto& Y : ddtbound_synapses )
+ Y -> preadvance();
+
+ _discrete_time += _discrete_dt;
+ need_fixate_ddtbound_units = true;
+ } else
+ need_fixate_ddtbound_units = false;
+
+ if ( !have_hosted_units )
+ V[0] += _discrete_dt;
+
+ _DO_ADVANCE_COMMON_INLOOP_MID
+
+ // fixate
+ _integrator->fixate();
+
+ for ( auto& N : standalone_neurons )
+ if ( !N->is_conscious() )
+ N -> fixate();
+ for ( auto& Y : standalone_synapses )
+ Y -> fixate();
+
+ if ( need_fixate_ddtbound_units ) {
+ for ( auto& N : ddtbound_neurons )
+ if ( !N->is_conscious() )
+ N -> fixate();
+ for ( auto& Y : ddtbound_synapses )
+ Y -> fixate();
+ }
+
+ _DO_ADVANCE_COMMON_INLOOP_END
+
+ } while ( model_time() < time_ending );
+
+ _DO_ADVANCE_COMMON_EPILOG
+
+ return steps;
}
-
-// eof
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/model-nmlio.cc b/upstream/src/libcn/model-nmlio.cc
index 5006468..7e2cc22 100644
--- a/upstream/src/libcn/model-nmlio.cc
+++ b/upstream/src/libcn/model-nmlio.cc
@@ -5,13 +5,14 @@
*
* Initial version: 2008-09-02
*
- * NeuroML import/export methods for CModel
+ * Purpose: NeuroML import/export methods for CModel
*/
#include <string>
#include <iostream>
#include <regex.h>
+#include "forward-decls.hh"
#include "model.hh"
#if HAVE_CONFIG_H && !defined(VERSION)
@@ -24,24 +25,21 @@ using namespace std;
#ifdef LIBXML_READER_ENABLED
-
-
-
int
cnrun::CModel::
-import_NetworkML( const char *fname, bool appending)
+import_NetworkML( const string& fname, TNMLImportOption import_option)
{
- LIBXML_TEST_VERSION;
+ // LIBXML_TEST_VERSION;
- xmlDoc *doc = xmlReadFile( fname, nullptr, 0);
- if ( !doc )
- return CN_NMLIN_NOFILE;
+ xmlDoc *doc = xmlReadFile( fname.c_str(), nullptr, 0);
+ if ( !doc )
+ return TNMLIOResult::nofile;
- int retval = import_NetworkML( doc, fname, appending);
+ int retval = import_NetworkML( doc, fname, import_option);
- xmlFreeDoc( doc);
+ xmlFreeDoc( doc);
- return retval;
+ return retval;
}
@@ -52,105 +50,106 @@ inline namespace {
xmlNode*
find_named_root_child_elem( xmlNode *node, // node to start search from
- const char *elem) // name of the element searched for
+ const char *elem) // name of the element searched for
{
- xmlNode *n;
-
- for ( n = node->children; n; n = n->next ) {
- if ( n->type == XML_ELEMENT_NODE ) {
- if ( xmlStrEqual( n->name, BAD_CAST elem) )
- return n;
+ xmlNode *n;
+ for ( n = node->children; n; n = n->next ) {
+ if ( n->type == XML_ELEMENT_NODE ) {
+ if ( xmlStrEqual( n->name, BAD_CAST elem) )
+ return n;
// the <populations> and <projections> nodes are expected to appear as
// direct children of the root node; so don't go search deeper than that
-// if ( n->children ) { // go search deeper
-// ni = find_named_elem( n->children, elem);
-// if ( ni )
-// return ni;
-// }
- }
- }
- return nullptr;
+// if ( n->children ) { // go search deeper
+// ni = find_named_elem( n->children, elem);
+// if ( ni )
+// return ni;
+// }
+ }
+ }
+ return nullptr;
}
}
int
cnrun::CModel::
-import_NetworkML( xmlDoc *doc, const char *fname, bool appending)
+import_NetworkML( xmlDoc *doc, const string& fname,
+ TNMLImportOption import_option)
{
- int retval = 0;
+ int retval = 0;
- // we pass up on validation (for which we would need to keep a
- // .dtd or Schema at hand), and proceed to extracting elements
+ // we pass up on validation (for which we would need to keep a
+ // .dtd or Schema at hand), and proceed to extracting elements
- xmlNode *root_node = xmlDocGetRootElement( doc),
- *n;
+ xmlNode *root_node = xmlDocGetRootElement( doc),
+ *n;
// read meta:notes and make out a name for the model
- if ( !root_node ) {
- fprintf( stderr, "Failed to obtain root element\n");
- retval = CN_NMLIN_NOELEM;
- goto out;
- }
+ if ( !root_node ) {
+ fprintf( stderr, "Failed to obtain root element\n");
+ retval = TNMLIOResult::noelem;
+ goto out;
+ }
// give it a name: assume it's generated by neuroConstruct for now
- if ( !appending ) {
- reset();
- if ( !(n = find_named_root_child_elem( root_node, "notes")) ) {
- if ( verbosely > 1 )
- fprintf( stderr, "<notes> element not found; model will be unnamed\n");
- // this is not critical, so just keep the user
- // informed and proceed
- } else
- if ( n->type == XML_ELEMENT_NODE ) { // only concern ourselves with nodes of this type
- xmlChar *notes_s = xmlNodeGetContent( n);
- // look for a substring specific to neuroConstruct, which is obviously speculative
- regex_t RE;
- regcomp( &RE, ".*project: (\\w*).*", REG_EXTENDED);
- regmatch_t M[1+1];
- name = (0 == regexec( &RE, (char*)notes_s, 1+1, M, 0))
+ if ( import_option == TNMLImportOption::reset ) {
+ reset();
+ if ( !(n = find_named_root_child_elem( root_node, "notes")) ) {
+ if ( options.verbosely > 1 )
+ fprintf( stderr, "<notes> element not found; model will be unnamed\n");
+ // this is not critical, so just keep the user
+ // informed and proceed
+ } else
+ if ( n->type == XML_ELEMENT_NODE ) { // only concern ourselves with nodes of this type
+ xmlChar *notes_s = xmlNodeGetContent( n);
+ // look for a substring specific to neuroConstruct, which is obviously speculative
+ regex_t RE;
+ regcomp( &RE, ".*project: (\\w*).*", REG_EXTENDED);
+ regmatch_t M[1+1];
+ name = (0 == regexec( &RE, (char*)notes_s, 1+1, M, 0))
? string ((char*)notes_s + M[1].rm_so, M[1].rm_eo - M[1].rm_so)
: "(unnamed)";
- xmlFree( notes_s);
- } else
- name = "(unnamed)";
- }
+ xmlFree( notes_s);
+ } else
+ name = "(unnamed)";
+ }
- if ( verbosely > 0 )
- printf( "Model \"%s\": %sing topology from %s\n",
- name.c_str(), (appending ?"Append" :"Import"), fname);
+ if ( options.verbosely > 0 )
+ printf( "Model \"%s\": %s topology from %s\n",
+ name.c_str(), (import_option == TNMLImportOption::merge) ?"Merging" :"Importing",
+ fname.c_str());
- // In the following calls to _process_{populations,instances}
- // functions, the actual order of appearance of these nodes in
- // the xml file doesn't matter, thanks to the xml contents
- // being already wholly read and available to us as a tree.
+ // In the following calls to _process_{populations,instances}
+ // functions, the actual order of appearance of these nodes in
+ // the xml file doesn't matter, thanks to the xml contents
+ // being already wholly read and available to us as a tree.
// process <populations>
- if ( !(n = find_named_root_child_elem( root_node, "populations")) ) {
- retval = CN_NMLIN_NOELEM;
- goto out;
- } // assume there is only one <populations> element: don't loop to catch more
- if ( (retval = _process_populations( n->children)) < 0) // note n->children, which is in fact a pointer to the first child
- goto out;
+ if ( !(n = find_named_root_child_elem( root_node, "populations")) ) {
+ retval = TNMLIOResult::noelem;
+ goto out;
+ } // assume there is only one <populations> element: don't loop to catch more
+ if ( (retval = _process_populations( n->children)) < 0) // note n->children, which is in fact a pointer to the first child
+ goto out;
// process <projections>
// don't strictly require any projections as long as there are some neurons
- if ( (n = find_named_root_child_elem( root_node, "projections")) ) {
- if ( (retval = _process_projections( n->children)) < 0 )
- goto out;
- } else
- if ( verbosely > 2 )
- cout << "No projections found\n";
+ if ( (n = find_named_root_child_elem( root_node, "projections")) ) {
+ if ( (retval = _process_projections( n->children)) < 0 )
+ goto out;
+ } else
+ if ( options.verbosely > 2 )
+ cout << "No projections found\n";
out:
- // we are done with topology; now put units' variables on a vector
- finalize_additions();
- // can call time_step only after finalize_additions
+ // we are done with topology; now put units' variables on a vector
+ finalize_additions();
+ // can call time_step only after finalize_additions
- cout << endl;
+ cout << endl;
- return retval;
+ return retval;
}
@@ -161,65 +160,68 @@ int
cnrun::CModel::
_process_populations( xmlNode *n)
{
- xmlChar *group_id_s = nullptr,
- *cell_type_s = nullptr;
-
- int pop_cnt = 0;
-
- try {
- for ( ; n; n = n->next ) // if is nullptr (parent had no children), we won't do a single loop
- if ( n->type == XML_ELEMENT_NODE && xmlStrEqual( n->name, BAD_CAST "population") ) {
-
- group_id_s = xmlGetProp( n, BAD_CAST "name"); // BAD_CAST is just a cast to xmlChar*
- // with a catch that libxml functions
- // expect strings pointed to to be good UTF
- if ( !group_id_s ) {
- fprintf( stderr, "<population> element missing a \"name\" attribute near line %d\n", n->line);
- return CN_NMLIN_BADATTR;
- }
- // probably having an unnamed popuation isn't an error so serious as to abort the
- // operation, but discipline is above all
-
- cell_type_s = xmlGetProp( n, BAD_CAST "cell_type");
- // now we know the type of cells included in this population; remember it to pass on to
- // _process_population_instances, where it is used to select an appropriate unit type
- // when actually adding a neuron to the model
-
- // but well, let's check if we have units of that species in stock
- if ( !unit_species_is_neuron((char*)cell_type_s) && !unit_family_is_neuron((char*)cell_type_s) ) {
- fprintf( stderr, "Bad cell species or family (\"%s\") in population \"%s\"\n",
- (char*)cell_type_s, group_id_s);
- throw CN_NMLIN_BADCELLTYPE;
- }
-
- xmlNode *nin = n->children; // again, ->children means ->first
- if ( nin )
- for ( ; nin; nin = nin->next ) // deal with multiple <instances> nodes
- if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "instances") ) {
- int subretval = _process_population_instances( nin->children,
- group_id_s, cell_type_s);
- if ( subretval < 0 )
- throw subretval;
-
- if ( verbosely > 2 )
- printf( " %5d instance(s) of type \"%s\" in population \"%s\"\n",
- subretval, cell_type_s, group_id_s);
- pop_cnt++;
- }
-
- xmlFree( cell_type_s), xmlFree( group_id_s);
- }
-
- if ( verbosely > 1 )
- printf( "\tTotal %d population(s)\n", pop_cnt);
-
- } catch (int ex) {
- xmlFree( cell_type_s), xmlFree( group_id_s);
-
- return ex;
- }
-
- return pop_cnt;
+ xmlChar *group_id_s = nullptr,
+ *cell_type_s = nullptr;
+
+ int pop_cnt = 0;
+
+ try {
+ for ( ; n; n = n->next ) { // if is nullptr (parent had no children), we won't do a single loop
+ if ( n->type != XML_ELEMENT_NODE || xmlStrEqual( n->name, BAD_CAST "population") )
+ continue;
+
+ group_id_s = xmlGetProp( n, BAD_CAST "name");
+ // BAD_CAST is just a cast to xmlChar*
+ // with a catch that libxml functions
+ // expect strings pointed to to be good UTF
+ if ( !group_id_s ) {
+ fprintf( stderr, "<population> element missing a \"name\" attribute near line %d\n", n->line);
+ return TNMLIOResult::badattr;
+ }
+ // probably having an unnamed popuation isn't an error so serious as to abort the
+ // operation, but discipline is above all
+
+ cell_type_s = xmlGetProp( n, BAD_CAST "cell_type");
+ // now we know the type of cells included in this population; remember it to pass on to
+ // _process_population_instances, where it is used to select an appropriate unit type
+ // when actually adding a neuron to the model
+
+ // but well, let's check if we have units of that species in stock
+ if ( !unit_species_is_neuron((char*)cell_type_s) && !unit_family_is_neuron((char*)cell_type_s) ) {
+ fprintf( stderr, "Bad cell species or family (\"%s\") in population \"%s\"\n",
+ (char*)cell_type_s, group_id_s);
+ throw TNMLIOResult::badcelltype;
+ }
+
+ xmlNode *nin = n->children; // again, ->children means ->first
+ if ( nin )
+ for ( ; nin; nin = nin->next ) // deal with multiple <instances> nodes
+ if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "instances") ) {
+ int subretval = _process_population_instances(
+ nin->children,
+ group_id_s, cell_type_s);
+ if ( subretval < 0 )
+ throw subretval;
+
+ if ( options.verbosely > 2 )
+ printf( " %5d instance(s) of type \"%s\" in population \"%s\"\n",
+ subretval, cell_type_s, group_id_s);
+ ++pop_cnt;
+ }
+
+ xmlFree( cell_type_s), xmlFree( group_id_s);
+ }
+
+ if ( options.verbosely > 1 )
+ printf( "\tTotal %d population(s)\n", pop_cnt);
+
+ } catch (int ex) {
+ xmlFree( cell_type_s), xmlFree( group_id_s);
+
+ return ex;
+ }
+
+ return pop_cnt;
}
@@ -231,75 +233,76 @@ int
cnrun::CModel::
_process_projections( xmlNode *n)
{
- // much the same code as in _process_populations
-
- xmlChar *prj_name_s = nullptr,
- *prj_src_s = nullptr,
- *prj_tgt_s = nullptr,
- *synapse_type_s = nullptr;
-
- size_t pop_cnt = 0;
-
- try {
- for ( ; n; n = n->next ) {
- if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "projection") )
- continue;
-
- prj_name_s = xmlGetProp( n, BAD_CAST "name");
- if ( !prj_name_s ) {
- fprintf( stderr, "<projection> element missing a \"name\" attribute near line %u\n", n->line);
- return CN_NMLIN_BADATTR;
- }
-
- prj_src_s = xmlGetProp( n, BAD_CAST "source");
- prj_tgt_s = xmlGetProp( n, BAD_CAST "target");
- if ( !prj_src_s || !prj_tgt_s ) {
- fprintf( stderr, "Projection \"%s\" missing a \"source\" and/or \"target\" attribute near line %u\n",
- prj_name_s, n->line);
- throw CN_NMLIN_BADATTR;
- }
-
- xmlNode *nin;
- nin = n->children;
- if ( !nin )
- fprintf( stderr, "Empty <projection> node near line %d\n", n->line);
-
- for ( ; nin; nin = nin->next )
- if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "synapse_props") ) {
- synapse_type_s = xmlGetProp( nin, BAD_CAST "synapse_type");
- if ( !unit_species_is_synapse((char*)synapse_type_s) &&
- !unit_family_is_synapse((char*)synapse_type_s) ) {
- fprintf( stderr, "Bad synapse type \"%s\" near line %u\n",
- (char*)synapse_type_s, nin->line);
- throw CN_NMLIN_BADCELLTYPE;
- }
- }
-
- for ( nin = n->children; nin; nin = nin->next )
- if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "connections") ) {
- int subretval = _process_projection_connections( nin->children,
- prj_name_s, synapse_type_s,
- prj_src_s, prj_tgt_s);
- if ( subretval < 0 )
- throw subretval;
-
- if ( verbosely > 2 )
- printf( " %5d connection(s) of type \"%s\" in projection \"%s\"\n",
- subretval, synapse_type_s, prj_name_s);
- pop_cnt++;
- }
- xmlFree( prj_name_s), xmlFree( prj_src_s), xmlFree( prj_tgt_s);
- }
-
- if ( verbosely > 1 )
- printf( "\tTotal %zd projection(s)\n", pop_cnt);
-
- } catch (int ex) {
- xmlFree( prj_name_s), xmlFree( prj_src_s), xmlFree( prj_tgt_s);
- return ex;
- }
-
- return (int)pop_cnt;
+ // much the same code as in _process_populations
+
+ xmlChar *prj_name_s = nullptr,
+ *prj_src_s = nullptr,
+ *prj_tgt_s = nullptr,
+ *synapse_type_s = nullptr;
+
+ size_t pop_cnt = 0;
+
+ try {
+ for ( ; n; n = n->next ) {
+ if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "projection") )
+ continue;
+
+ prj_name_s = xmlGetProp( n, BAD_CAST "name");
+ if ( !prj_name_s ) {
+ fprintf( stderr, "<projection> element missing a \"name\" attribute near line %u\n", n->line);
+ return TNMLIOResult::badattr;
+ }
+
+ prj_src_s = xmlGetProp( n, BAD_CAST "source");
+ prj_tgt_s = xmlGetProp( n, BAD_CAST "target");
+ if ( !prj_src_s || !prj_tgt_s ) {
+ fprintf( stderr, "Projection \"%s\" missing a \"source\" and/or \"target\" attribute near line %u\n",
+ prj_name_s, n->line);
+ throw TNMLIOResult::badattr;
+ }
+
+ xmlNode *nin;
+ nin = n->children;
+ if ( !nin )
+ fprintf( stderr, "Empty <projection> node near line %d\n", n->line);
+
+ for ( ; nin; nin = nin->next )
+ if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "synapse_props") ) {
+ synapse_type_s = xmlGetProp( nin, BAD_CAST "synapse_type");
+ if ( !unit_species_is_synapse( (char*)synapse_type_s) &&
+ !unit_family_is_synapse( (char*)synapse_type_s) ) {
+ fprintf( stderr, "Bad synapse type \"%s\" near line %u\n",
+ (char*)synapse_type_s, nin->line);
+ throw TNMLIOResult::badcelltype;
+ }
+ }
+
+ for ( nin = n->children; nin; nin = nin->next )
+ if ( nin->type == XML_ELEMENT_NODE && xmlStrEqual( nin->name, BAD_CAST "connections") ) {
+ int subretval = _process_projection_connections(
+ nin->children,
+ prj_name_s, synapse_type_s,
+ prj_src_s, prj_tgt_s);
+ if ( subretval < 0 )
+ throw subretval;
+
+ if ( options.verbosely > 2 )
+ printf( " %5d connection(s) of type \"%s\" in projection \"%s\"\n",
+ subretval, synapse_type_s, prj_name_s);
+ ++pop_cnt;
+ }
+ xmlFree( prj_name_s), xmlFree( prj_src_s), xmlFree( prj_tgt_s);
+ }
+
+ if ( options.verbosely > 1 )
+ printf( "\tTotal %zd projection(s)\n", pop_cnt);
+
+ } catch (int ex) {
+ xmlFree( prj_name_s), xmlFree( prj_src_s), xmlFree( prj_tgt_s);
+ return ex;
+ }
+
+ return (int)pop_cnt;
}
@@ -310,83 +313,90 @@ _process_projections( xmlNode *n)
int
cnrun::CModel::
-_process_population_instances( xmlNode *n, const xmlChar *group_prefix, const xmlChar *type_s)
+_process_population_instances(
+ xmlNode *n,
+ const xmlChar *group_prefix,
+ const xmlChar *type_s)
{
- int retval = 0; // also keeps a count of added neurons
-
- double x, y, z;
- char cell_id[CN_MAX_LABEL_SIZE];
-
- xmlNode *nin;
-
- xmlChar *id_s = nullptr;
- try {
- for ( ; n; n = n->next ) {
- if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "instance") )
- continue;
-
- xmlChar *id_s = xmlGetProp( n, BAD_CAST "id");
- if ( !id_s ) {
- // could be less strict here and allow empty ids, which will then be composed
- // from group_prefix + id (say, "LN0", "LN1" and so on); but then, as
- // individual <projection>s would have to reference both endpoints by explicit
- // ids, it is obviously prone to error to have <instance> ids depend solely on
- // their order of appearance.
- // So we bark at empty ids.
- fprintf( stderr, "<instance> element without an \"id\" attribute near line %u\n", n->line);
- return CN_NMLIN_BADATTR;
- }
-
- size_t total_len = xmlStrlen( group_prefix) + xmlStrlen( id_s);
- if ( total_len >= CN_MAX_LABEL_SIZE ) {
- fprintf( stderr, "Combined label for an <instance> (\"%s%s\") exceeding %d characters near line %u\n",
- group_prefix, id_s, CN_MAX_LABEL_SIZE, n->line);
- throw CN_NMLIN_BIGLABEL;
- }
- _longest_label = max( _longest_label,
- (unsigned short)snprintf( cell_id, CN_MAX_LABEL_SIZE-1, "%s.%s",
- group_prefix, id_s)); // here, a new instance is given a name
- xmlFree( id_s);
-
- if ( !(nin = n->children) )
- return retval;
-
- for ( ; nin; nin = nin->next ) {
- if ( !(nin->type == XML_ELEMENT_NODE &&
- xmlStrEqual( nin->name, BAD_CAST "location")) )
- continue;
-
- xmlChar *x_s = xmlGetProp( nin, BAD_CAST "x"),
- *y_s = xmlGetProp( nin, BAD_CAST "y"),
- *z_s = xmlGetProp( nin, BAD_CAST "z");
- // here we do actually insert neurons into the model
- if ( !(x_s && y_s && z_s) )
- if ( verbosely > 1 )
- fprintf( stderr, "<location> element missing full set of coordinates near line %d\n", nin->line);
- // not an error
- x = strtod( (char*)x_s, nullptr), y = strtod( (char*)y_s, nullptr), z = strtod( (char*)z_s, nullptr);
- xmlFree( x_s), xmlFree( y_s), xmlFree( z_s);
-
- C_BaseNeuron *neu = add_neuron_species( (char*)type_s, cell_id, false);
-
- if ( !neu || neu->_status & CN_UERROR ) {
- if ( neu )
- delete neu;
- fprintf( stderr, "Failed to add a neuron \"%s\" near line %u\n", cell_id, n->line);
- return CN_NMLIN_STRUCTERROR;
- } else {
- neu->_serial_id = _global_unit_id_reservoir++;
- neu->pos = {x, y, z};
- retval++;
- }
- }
- }
- } catch (int ex) {
- xmlFree( id_s);
- return ex;
- }
-
- return retval;
+ int retval = 0; // also keeps a count of added neurons
+
+ double x, y, z;
+ char cell_id[C_BaseUnit::max_label_size];
+
+ xmlNode *nin;
+
+ xmlChar *id_s = nullptr;
+ try {
+ for ( ; n; n = n->next ) {
+ if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "instance") )
+ continue;
+
+ xmlChar *id_s = xmlGetProp( n, BAD_CAST "id");
+ if ( !id_s ) {
+ // could be less strict here and allow empty ids, which will then be composed
+ // from group_prefix + id (say, "LN0", "LN1" and so on); but then, as
+ // individual <projection>s would have to reference both endpoints by explicit
+ // ids, it is obviously prone to error to have <instance> ids depend solely on
+ // their order of appearance.
+ // So we bark at empty ids.
+ fprintf( stderr, "<instance> element without an \"id\" attribute near line %u\n", n->line);
+ return TNMLIOResult::badattr;
+ }
+
+ size_t total_len = xmlStrlen( group_prefix) + xmlStrlen( id_s);
+ if ( total_len >= C_BaseUnit::max_label_size ) {
+ fprintf( stderr, "Combined label for an <instance> (\"%s%s\") exceeding %d characters near line %u\n",
+ group_prefix, id_s, C_BaseUnit::max_label_size, n->line);
+ throw TNMLIOResult::biglabel;
+ }
+ _longest_label = max(
+ _longest_label,
+ (unsigned short)snprintf(
+ cell_id, C_BaseUnit::max_label_size-1, "%s.%s",
+ group_prefix, id_s)); // here, a new instance is given a name
+ xmlFree( id_s);
+
+ if ( !(nin = n->children) )
+ return retval;
+
+ for ( ; nin; nin = nin->next ) {
+ if ( !(nin->type == XML_ELEMENT_NODE &&
+ xmlStrEqual( nin->name, BAD_CAST "location")) )
+ continue;
+
+ xmlChar *x_s = xmlGetProp( nin, BAD_CAST "x"),
+ *y_s = xmlGetProp( nin, BAD_CAST "y"),
+ *z_s = xmlGetProp( nin, BAD_CAST "z");
+ // here we do actually insert neurons into the model
+ if ( !(x_s && y_s && z_s) )
+ if ( options.verbosely > 1 )
+ fprintf( stderr, "<location> element missing full set of coordinates near line %d\n", nin->line);
+ // not an error
+ x = strtod( (char*)x_s, nullptr), y = strtod( (char*)y_s, nullptr), z = strtod( (char*)z_s, nullptr);
+ xmlFree( x_s), xmlFree( y_s), xmlFree( z_s);
+
+ C_BaseNeuron *neu = add_neuron_species(
+ (char*)type_s, cell_id,
+ TIncludeOption::is_notlast);
+
+ if ( !neu || neu->_status & CN_UERROR ) {
+ if ( neu )
+ delete neu;
+ fprintf( stderr, "Failed to add a neuron \"%s\" near line %u\n", cell_id, n->line);
+ return TNMLIOResult::structerror;
+ } else {
+ neu->_serial_id = _global_unit_id_reservoir++;
+ neu->pos = make_tuple( x, y, z);
+ ++retval;
+ }
+ }
+ }
+ } catch (int ex) {
+ xmlFree( id_s);
+ return ex;
+ }
+
+ return retval;
}
@@ -394,94 +404,101 @@ _process_population_instances( xmlNode *n, const xmlChar *group_prefix, const xm
int
cnrun::CModel::
-_process_projection_connections( xmlNode *n,
- const xmlChar *synapse_name, const xmlChar *type_s,
- const xmlChar *src_grp_prefix, const xmlChar *tgt_grp_prefix)
+_process_projection_connections(
+ xmlNode *n,
+ const xmlChar *synapse_name,
+ const xmlChar *type_s,
+ const xmlChar *src_grp_prefix,
+ const xmlChar *tgt_grp_prefix)
{
- // similar to _process_population_instances, except that we read some more attributes (source and
- // target units)
-
- int retval = 0; // is also a counter of synapses
-
- char //synapse_id [CN_MAX_LABEL_SIZE],
- src_s[CN_MAX_LABEL_SIZE],
- tgt_s[CN_MAX_LABEL_SIZE];
- double weight;
-
- C_BaseSynapse *y;
-
- xmlChar *src_cell_id_s = nullptr,
- *tgt_cell_id_s = nullptr,
- *weight_s = nullptr;
- try {
- for ( ; n; n = n->next ) {
- if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "connection") )
- continue;
-
- src_cell_id_s = xmlGetProp( n, BAD_CAST "pre_cell_id"),
- tgt_cell_id_s = xmlGetProp( n, BAD_CAST "post_cell_id"),
- weight_s = xmlGetProp( n, BAD_CAST "weight");
- if ( /*!synapse_id_s || */ !src_cell_id_s || !tgt_cell_id_s ) {
- fprintf( stderr, "A <connection> element without \"pre_cell_id\" and/or \"post_cell_id\" attribute near line %u\n", n->line);
- throw CN_NMLIN_BADATTR;
- }
-
- snprintf( src_s, CN_MAX_LABEL_SIZE-1, "%s.%s", src_grp_prefix, src_cell_id_s);
- snprintf( tgt_s, CN_MAX_LABEL_SIZE-1, "%s.%s", tgt_grp_prefix, tgt_cell_id_s);
-
- if ( !weight_s ) {
- if ( verbosely > 1 )
- fprintf( stderr, "Assuming 0 for a synapse of \"%s.%s\" to \"%s%s\" without a \"weight\" attribute near line %u\n",
- src_grp_prefix, src_cell_id_s, tgt_grp_prefix, tgt_cell_id_s, n->line);
- weight = 0.;
- }
- /* xmlFree( synapse_id_s), */ xmlFree( src_cell_id_s), xmlFree( tgt_cell_id_s),
- xmlFree( weight_s);
-
- y = add_synapse_species( (char*)type_s, src_s, tgt_s, weight, true, false);
-
- if ( !y || y->_status & CN_UERROR ) {
- if ( y )
- delete y;
- fprintf( stderr, "Failed to add an \"%s\" synapse from \"%s\" to \"%s\" near line %u\n",
- (char*)type_s, src_s, tgt_s, n->line);
- return CN_NMLIN_STRUCTERROR;
- } else
- retval++;
- }
- } catch (int ex) {
- /* xmlFree( synapse_id_s), */ xmlFree( src_cell_id_s), xmlFree( tgt_cell_id_s),
- xmlFree( weight_s);
- return ex;
- }
-
- return retval;
+ // similar to _process_population_instances, except that we read some more attributes (source and
+ // target units)
+
+ int retval = 0; // is also a counter of synapses
+
+ char //synapse_id [C_BaseUnit::max_label_size],
+ src_s[C_BaseUnit::max_label_size],
+ tgt_s[C_BaseUnit::max_label_size];
+ double weight;
+
+ C_BaseSynapse *y;
+
+ xmlChar *src_cell_id_s = nullptr,
+ *tgt_cell_id_s = nullptr,
+ *weight_s = nullptr;
+ try {
+ for ( ; n; n = n->next ) {
+ if ( n->type != XML_ELEMENT_NODE || !xmlStrEqual( n->name, BAD_CAST "connection") )
+ continue;
+
+ src_cell_id_s = xmlGetProp( n, BAD_CAST "pre_cell_id"),
+ tgt_cell_id_s = xmlGetProp( n, BAD_CAST "post_cell_id"),
+ weight_s = xmlGetProp( n, BAD_CAST "weight");
+ if ( /*!synapse_id_s || */ !src_cell_id_s || !tgt_cell_id_s ) {
+ fprintf( stderr, "A <connection> element without \"pre_cell_id\" and/or \"post_cell_id\" attribute near line %u\n", n->line);
+ throw TNMLIOResult::badattr;
+ }
+
+ snprintf( src_s, C_BaseUnit::max_label_size-1, "%s.%s", src_grp_prefix, src_cell_id_s);
+ snprintf( tgt_s, C_BaseUnit::max_label_size-1, "%s.%s", tgt_grp_prefix, tgt_cell_id_s);
+
+ if ( !weight_s ) {
+ if ( options.verbosely > 1 )
+ fprintf( stderr, "Assuming 0 for a synapse of \"%s.%s\" to \"%s%s\" without a \"weight\" attribute near line %u\n",
+ src_grp_prefix, src_cell_id_s, tgt_grp_prefix, tgt_cell_id_s, n->line);
+ weight = 0.;
+ }
+ /* xmlFree( synapse_id_s), */ xmlFree( src_cell_id_s), xmlFree( tgt_cell_id_s),
+ xmlFree( weight_s);
+
+ y = add_synapse_species(
+ (char*)type_s, src_s, tgt_s,
+ weight,
+ TSynapseCloningOption::yes,
+ TIncludeOption::is_notlast);
+
+ if ( !y || y->_status & CN_UERROR ) {
+ if ( y )
+ delete y;
+ fprintf( stderr, "Failed to add an \"%s\" synapse from \"%s\" to \"%s\" near line %u\n",
+ (char*)type_s, src_s, tgt_s, n->line);
+ return TNMLIOResult::structerror;
+ } else
+ ++retval;
+ }
+
+ } catch (int ex) {
+ /* xmlFree( synapse_id_s), */ xmlFree( src_cell_id_s), xmlFree( tgt_cell_id_s),
+ xmlFree( weight_s);
+ return ex;
+ }
+
+ return retval;
}
int
cnrun::CModel::
-export_NetworkML( const char *fname)
+export_NetworkML( const string& fname)
{
- int retval = 0;
+ int retval = 0;
- LIBXML_TEST_VERSION;
+ LIBXML_TEST_VERSION;
- fprintf( stderr, "export_NetworkML() not implemented yet\n");
+ fprintf( stderr, "export_NetworkML() not implemented yet\n");
- return retval;
+ return retval;
}
-
-
-
-
-
#else
-#error Need an XMLREADER-enabled libxml2 (>2.6)
-
+# error Need an XMLREADER-enabled libxml2 (>2.6)
#endif // LIBXML_READER_ENABLED
-// eof
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/model-struct.cc b/upstream/src/libcn/model-struct.cc
index d571672..1bffa66 100644
--- a/upstream/src/libcn/model-struct.cc
+++ b/upstream/src/libcn/model-struct.cc
@@ -13,6 +13,7 @@
#include <iostream>
#include <set>
#include <algorithm>
+#include <functional>
#include <regex.h>
@@ -26,101 +27,101 @@
using namespace std;
-
+using namespace cnrun::stilton::str;
cnrun::CModel::
-CModel( const char *inname, CIntegrate_base *inintegrator, int instatus)
+CModel( const string& inname, CIntegrate_base *inintegrator, const SModelOptions& inoptions)
: name (inname),
- _status (instatus | CN_MDL_NOTREADY),
- _global_unit_id_reservoir (0l),
- _longest_label (1),
- _var_cnt (1), // reserve [0] for model_time
- _cycle (0),
- _discrete_time (0.), _discrete_dt (NAN),
- spike_threshold (0.),
- spike_lapse (5.),
- listen_dt (0),
- _dt_logger (nullptr),
- _spike_logger (nullptr), // open these streams at first write instead in prepare_advance()
- verbosely (1)
+ options (inoptions),
+ _global_unit_id_reservoir (0l),
+ V (1),
+ W (1),
+ _var_cnt (1), // reserve [0] for model_time
+ _cycle (0),
+ _discrete_time (0.), _discrete_dt (NAN),
+ _dt_logger (nullptr),
+ _spike_logger (nullptr), // open these streams at first write instead in prepare_advance()
+ is_ready (false),
+ is_diskless (false),
+ have_ddtb_units (false),
+ _longest_label (1)
{
- V.resize( _var_cnt), W.resize( _var_cnt);
- V[0] = 0.;
-
- (_integrator = inintegrator) -> model = this;
-
- {
- const gsl_rng_type * T;
- gsl_rng_env_setup();
- T = gsl_rng_default;
- if ( gsl_rng_default_seed == 0 ) {
- struct timeval tp = { 0L, 0L };
- gettimeofday( &tp, nullptr);
- gsl_rng_default_seed = tp.tv_usec;
- }
- _rng = gsl_rng_alloc( T);
- }
-
- signal( SIGINT, SIG_IGN);
+ V[0] = 0.;
+
+ (_integrator = inintegrator) -> model = this;
+
+ {
+ const gsl_rng_type * T;
+ gsl_rng_env_setup();
+ T = gsl_rng_default;
+ if ( gsl_rng_default_seed == 0 ) {
+ struct timeval tp = { 0L, 0L };
+ gettimeofday( &tp, nullptr);
+ gsl_rng_default_seed = tp.tv_usec;
+ }
+ _rng = gsl_rng_alloc( T);
+ }
+
+ // don't abort interpreter with ^C
+ signal( SIGINT, SIG_IGN);
}
cnrun::CModel::
~CModel()
{
- if ( verbosely > 4 )
- fprintf( stdout, "Deleting all units...\n");
+ if ( options.verbosely > 4 )
+ fprintf( stdout, "Deleting all units...\n");
- while (unit_list.size())
- if ( unit_list.back() -> is_owned() )
- delete unit_list.back();
- else
- unit_list.pop_back();
+ while (units.size())
+ if ( units.back() -> is_owned() )
+ delete units.back();
+ else
+ units.pop_back();
- if ( _integrator->is_owned )
- delete _integrator;
+ if ( _integrator->is_owned )
+ delete _integrator;
- delete _dt_logger;
- delete _spike_logger;
+ delete _dt_logger;
+ delete _spike_logger;
- while ( Sources.size() ) {
- delete Sources.back();
- Sources.pop_back();
- }
+ while ( sources.size() ) {
+ delete sources.back();
+ sources.pop_back();
+ }
- gsl_rng_free( _rng);
+ gsl_rng_free( _rng);
}
void
cnrun::CModel::
-reset( bool also_reset_params)
+reset( TResetOption option)
{
- _cycle = 0, V[0] = 0.;
+ _cycle = 0;
+ V[0] = 0.;
- _integrator->dt = _integrator->_dt_min;
+ _integrator->dt = _integrator->_dt_min;
- reset_state_all_units();
- if ( also_reset_params )
- for_all_units (U)
- (*U)->reset_params();
+ reset_state_all_units();
+ if ( option == TResetOption::with_params )
+ for_each ( units.begin(), units.end(),
+ [] (C_BaseUnit* u) { u->reset_params(); });
- regular_periods.clear();
- regular_periods_last_checked.clear();
+ regular_periods.clear();
+ regular_periods_last_checked.clear();
// this will cause scheduler_update_periods_* to be recomputed by prepare_advance()
- _status |= CN_MDL_NOTREADY;
-
- if ( _status & CN_MDL_LOGDT ) {
- delete _dt_logger;
- string fname = name + ".dtlog";
- _dt_logger = new ofstream( fname.data());
- }
- if ( _status & CN_MDL_LOGSPIKERS ) {
- delete _spike_logger;
- string fname = name + ".spikes";
- _spike_logger = new ofstream( fname.data());
- }
+ is_ready = false;
+
+ if ( options.log_dt ) {
+ delete _dt_logger;
+ _dt_logger = new ofstream( (name + ".dtlog").data());
+ }
+ if ( options.log_spikers ) {
+ delete _spike_logger;
+ _spike_logger = new ofstream( (name + ".spikes").data());
+ }
}
@@ -132,32 +133,32 @@ reset( bool also_reset_params)
cnrun::C_BaseUnit*
cnrun::CModel::
-unit_by_label( const char *inlabel) const
+unit_by_label( const string& label) const
{
- for_all_units_const (U)
- if ( strcmp( (*U)->_label, inlabel) == 0 )
- return *U;
- return nullptr;
+ for ( const auto& U : units )
+ if ( label == U->_label )
+ return U;
+ return nullptr;
}
cnrun::C_BaseNeuron*
cnrun::CModel::
-neuron_by_label( const char *inlabel) const
+neuron_by_label( const string& label) const
{
- for_all_units_const (U)
- if ( (*U)->is_neuron() && strcmp( (*U)->_label, inlabel) == 0 )
- return static_cast<C_BaseNeuron*>(*U);
- return nullptr;
+ for ( const auto& U : units )
+ if ( U->is_neuron() && label == U->label() )
+ return static_cast<C_BaseNeuron*>(U);
+ return nullptr;
}
cnrun::C_BaseSynapse*
cnrun::CModel::
-synapse_by_label( const char *inlabel) const
+synapse_by_label( const string& label) const
{
- for_all_units_const (U)
- if ( (*U)->is_synapse() && strcmp( (*U)->_label, inlabel) == 0 )
- return static_cast<C_BaseSynapse*>(*U);
- return nullptr;
+ for ( const auto& U : units )
+ if ( U->is_synapse() && label == U->label() )
+ return static_cast<C_BaseSynapse*>(U);
+ return nullptr;
}
@@ -171,33 +172,29 @@ void
cnrun::CModel::
_include_base_unit( C_BaseUnit* u)
{
- for_all_units (U)
- if ( (*U) == u ) {
- fprintf( stderr, "Unit %s found already included in model %s\n", u->_label, name.c_str());
- goto skip_ul_pushback;
- }
- unit_list.push_back( u);
-skip_ul_pushback:
-
- if ( verbosely > 5 )
- fprintf( stdout, " registered base unit %s\n", u->_label);
-
- if ( u->has_sources() )
- register_unit_with_sources( u);
-
- if ( u->is_listening() ) {
- for_all_listening_units (U)
- if ( (*U) == u ) {
- fprintf( stderr, "Unit \"%s\" already on listening list\n", u->_label);
- goto skip_lisn_reg;
- }
- lisn_unit_list.push_back( u);
- }
-skip_lisn_reg:
-
- u->M = this;
-
- u->_serial_id = _global_unit_id_reservoir++;
+ if ( any_of( units.begin(), units.end(),
+ bind(equal_to<C_BaseUnit*>(), placeholders::_1, u)) )
+ fprintf( stderr, "Unit %s found already included in model %s\n",
+ u->_label, name.c_str());
+ else
+ units.push_back( u);
+
+ if ( options.verbosely > 5 )
+ fprintf( stdout, " registered base unit %s\n", u->_label);
+
+ if ( u->has_sources() )
+ register_unit_with_sources( u);
+
+ if ( u->is_listening() ) {
+ if ( count( listening_units.begin(), listening_units.end(), u) )
+ fprintf( stderr, "Unit \"%s\" already on listening list\n",
+ u->_label);
+ else
+ listening_units.push_back( u);
+ }
+
+ u->M = this;
+ u->_serial_id = _global_unit_id_reservoir++;
}
@@ -205,45 +202,45 @@ skip_lisn_reg:
int
cnrun::CModel::
-include_unit( C_HostedNeuron *u, bool is_last)
+include_unit( C_HostedNeuron *u, const TIncludeOption option)
{
- _include_base_unit( u);
+ _include_base_unit( u);
- u->idx = _var_cnt;
- _var_cnt += u->v_no();
+ u->idx = _var_cnt;
+ _var_cnt += u->v_no();
- hosted_neu_list.push_back( u);
+ hosted_neurons.push_back( u);
- // if ( u->_spikelogger_agent && !(u->_spikelogger_agent->_status & CN_KL_IDLE) )
- // spikelogging_neu_list.push_back( u);
+ // if ( u->_spikelogger_agent && !(u->_spikelogger_agent->_status & CN_KL_IDLE) )
+ // spikelogging_neurons.push_back( u);
- if ( u->is_conscious() )
- conscious_neu_list.push_back( u);
+ if ( u->is_conscious() )
+ conscious_neurons.push_back( u);
- if ( is_last )
- finalize_additions();
+ if ( option == TIncludeOption::is_last )
+ finalize_additions();
- return 0;
+ return 0;
}
int
cnrun::CModel::
-include_unit( C_HostedSynapse *u, bool is_last)
+include_unit( C_HostedSynapse *u, const TIncludeOption option)
{
- _include_base_unit( u);
+ _include_base_unit( u);
- u->idx = _var_cnt;
- _var_cnt += u->v_no();
+ u->idx = _var_cnt;
+ _var_cnt += u->v_no();
- hosted_syn_list.push_back( u);
+ hosted_synapses.push_back( u);
- if ( u->traits() & UT_MULTIPLEXING )
- mx_syn_list.push_back( u);
+ if ( u->traits() & UT_MULTIPLEXING )
+ multiplexing_synapses.push_back( u);
- if ( is_last )
- finalize_additions();
+ if ( option == TIncludeOption::is_last )
+ finalize_additions();
- return 0;
+ return 0;
}
@@ -252,20 +249,20 @@ int
cnrun::CModel::
include_unit( C_StandaloneNeuron *u)
{
- _include_base_unit( u);
+ _include_base_unit( u);
- // if ( u->_spikelogger_agent && !(u->_spikelogger_agent->_status & CN_KL_IDLE) )
- // spikelogging_neu_list.push_back( u);
+ // if ( u->_spikelogger_agent && !(u->_spikelogger_agent->_status & CN_KL_IDLE) )
+ // spikelogging_neurons.push_back( u);
- if ( u->is_conscious() )
- conscious_neu_list.push_back( u);
+ if ( u->is_conscious() )
+ conscious_neurons.push_back( u);
- if ( u->is_ddtbound() )
- ddtbound_neu_list.push_back( u);
- else
- standalone_neu_list.push_back( u);
+ if ( u->is_ddtbound() )
+ ddtbound_neurons.push_back( u);
+ else
+ standalone_neurons.push_back( u);
- return 0;
+ return 0;
}
@@ -274,23 +271,23 @@ cnrun::CModel::
include_unit( C_StandaloneSynapse *u)
{
/*
- if ( _check_new_synapse( u) ) {
-// u->enable( false);
- u->M = nullptr;
- return -1;
- }
+ if ( _check_new_synapse( u) ) {
+// u->enable( false);
+ u->M = nullptr;
+ return -1;
+ }
*/
- _include_base_unit( u);
+ _include_base_unit( u);
- if ( u->is_ddtbound() )
- ddtbound_syn_list.push_back( u);
- else
- standalone_syn_list.push_back( u);
+ if ( u->is_ddtbound() )
+ ddtbound_synapses.push_back( u);
+ else
+ standalone_synapses.push_back( u);
- if ( u->traits() & UT_MULTIPLEXING )
- mx_syn_list.push_back( u);
+ if ( u->traits() & UT_MULTIPLEXING )
+ multiplexing_synapses.push_back( u);
- return 0;
+ return 0;
}
@@ -298,69 +295,74 @@ include_unit( C_StandaloneSynapse *u)
// preserve the unit if !do_delete, so it can be re-included again
cnrun::C_BaseUnit*
cnrun::CModel::
-exclude_unit( C_BaseUnit *u, bool do_delete)
+exclude_unit( C_BaseUnit *u, const TExcludeOption option)
{
- if ( __cn_verbosely > 5 )
- fprintf( stderr, "-excluding unit \"%s\"", u->_label);
-
- if ( u->has_sources() )
- unregister_unit_with_sources( u);
-
- if ( u->is_listening() )
- u->stop_listening(); // also calls unregister_listener
-
- if ( u->is_synapse() && u->traits() & UT_MULTIPLEXING )
- mx_syn_list.erase( find( mx_syn_list.begin(), mx_syn_list.end(), u));
-
- if ( u->is_conscious() )
- conscious_neu_list.erase( find(conscious_neu_list.begin(), conscious_neu_list.end(), u));
-
- if ( u->is_hostable() ) {
- size_t our_idx;
- if ( u->is_neuron() ) {
- hosted_neu_list.erase( find( hosted_neu_list.begin(), hosted_neu_list.end(), u));
- our_idx = ((C_HostedNeuron*)u) -> idx;
- } else {
- hosted_syn_list.erase( find( hosted_syn_list.begin(), hosted_syn_list.end(), u));
- our_idx = ((C_HostedSynapse*)u) -> idx;
- }
-
- // shrink V
- if ( __cn_verbosely > 5 )
- fprintf( stderr, " (shrink V by %d)", u->v_no());
- for_all_hosted_neurons (N)
- if ( (*N)->idx > our_idx )
- (*N)->idx -= u->v_no();
- for_all_hosted_synapses (Y)
- if ( (*Y)->idx > our_idx )
- (*Y)->idx -= u->v_no();
- memmove( &V[our_idx], &V[our_idx+u->v_no()], (_var_cnt - our_idx - u->v_no()) * sizeof(double));
- V.resize( _var_cnt -= u->v_no());
- }
- if ( u->is_ddtbound() ) {
- if ( u->is_neuron() )
- ddtbound_neu_list.erase( find( ddtbound_neu_list.begin(), ddtbound_neu_list.end(), u));
- else
- ddtbound_syn_list.erase( find( ddtbound_syn_list.begin(), ddtbound_syn_list.end(), u));
- }
- if ( !u->is_hostable() ) {
- if ( u->is_neuron() )
- standalone_neu_list.erase( find( standalone_neu_list.begin(), standalone_neu_list.end(), u));
- else
- standalone_syn_list.erase( find( standalone_syn_list.begin(), standalone_syn_list.end(), u));
- }
-
- unit_list.erase( find( unit_list.begin(), unit_list.end(), u));
-
- if ( do_delete ) {
- delete u;
- u = nullptr;
- } else
- u->M = nullptr;
-
- if ( __cn_verbosely > 5 )
- fprintf( stderr, ".\n");
- return u;
+ if ( options.verbosely > 5 )
+ fprintf( stderr, "-excluding unit \"%s\"", u->_label);
+
+ if ( u->has_sources() )
+ unregister_unit_with_sources( u);
+
+ if ( u->is_listening() )
+ u->stop_listening(); // also calls unregister_listener
+
+ if ( u->is_synapse() && u->traits() & UT_MULTIPLEXING )
+ multiplexing_synapses.erase( find( multiplexing_synapses.begin(), multiplexing_synapses.end(), u));
+
+ if ( u->is_conscious() )
+ conscious_neurons.erase(
+ find( conscious_neurons.begin(), conscious_neurons.end(),
+ u));
+
+ if ( u->is_hostable() ) {
+ size_t our_idx;
+ if ( u->is_neuron() ) {
+ hosted_neurons.erase( find( hosted_neurons.begin(), hosted_neurons.end(), u));
+ our_idx = ((C_HostedNeuron*)u) -> idx;
+ } else {
+ hosted_synapses.erase( find( hosted_synapses.begin(), hosted_synapses.end(), u));
+ our_idx = ((C_HostedSynapse*)u) -> idx;
+ }
+
+ // shrink V
+ if ( options.verbosely > 5 )
+ fprintf( stderr, " (shrink V by %d)", u->v_no());
+ for ( auto& N : hosted_neurons )
+ if ( N->idx > our_idx )
+ N->idx -= u->v_no();
+ for ( auto& Y : hosted_synapses )
+ if ( Y->idx > our_idx )
+ Y->idx -= u->v_no();
+ memmove( &V[our_idx], &V[our_idx+u->v_no()],
+ (_var_cnt - our_idx - u->v_no()) * sizeof(double));
+ V.resize( _var_cnt -= u->v_no());
+ }
+ if ( u->is_ddtbound() ) {
+ if ( u->is_neuron() )
+ ddtbound_neurons.erase( find( ddtbound_neurons.begin(), ddtbound_neurons.end(), u));
+ else
+ ddtbound_synapses.erase( find( ddtbound_synapses.begin(), ddtbound_synapses.end(), u));
+ }
+ if ( !u->is_hostable() ) {
+ if ( u->is_neuron() )
+ standalone_neurons.remove(
+ static_cast<C_StandaloneNeuron*>(u));
+ else
+ standalone_synapses.remove(
+ static_cast<C_StandaloneSynapse*>(u));
+ }
+
+ units.remove( u);
+
+ if ( option == TExcludeOption::with_delete ) {
+ delete u;
+ u = nullptr;
+ } else
+ u->M = nullptr;
+
+ if ( options.verbosely > 5 )
+ fprintf( stderr, ".\n");
+ return u;
}
@@ -375,17 +377,15 @@ void
cnrun::CModel::
register_listener( C_BaseUnit *u)
{
- if ( find( lisn_unit_list.begin(), lisn_unit_list.end(), u) == lisn_unit_list.end() )
- lisn_unit_list.push_back( u);
+ if ( not count( listening_units.begin(), listening_units.end(), u) )
+ listening_units.push_back( u);
}
void
cnrun::CModel::
unregister_listener( C_BaseUnit *u)
{
- const auto& U = find( lisn_unit_list.begin(), lisn_unit_list.end(), u);
- if ( U != lisn_unit_list.end() )
- lisn_unit_list.erase( U);
+ listening_units.remove( u);
}
@@ -398,20 +398,17 @@ void
cnrun::CModel::
register_spikelogger( C_BaseNeuron *n)
{
- spikelogging_neu_list.push_back( n);
- spikelogging_neu_list.sort();
- spikelogging_neu_list.unique();
+ spikelogging_neurons.push_back( n);
+ spikelogging_neurons.sort();
+ spikelogging_neurons.unique();
}
void
cnrun::CModel::
unregister_spikelogger( C_BaseNeuron *n)
{
- for_all_spikelogging_neurons (N)
- if ( (*N) == n ) {
- spikelogging_neu_list.erase( N);
- return;
- }
+ spikelogging_neurons.remove(
+ static_cast<decltype(spikelogging_neurons)::value_type>(n));
}
@@ -429,35 +426,23 @@ void
cnrun::CModel::
register_unit_with_sources( C_BaseUnit *u)
{
- for ( auto& I : u->sources )
- if ( I.source->is_periodic() )
- units_with_periodic_sources.push_back( u);
- else
- units_with_continuous_sources.push_back( u);
- units_with_continuous_sources.unique();
- units_with_periodic_sources.unique();
+ for ( auto& I : u->sources )
+ if ( I.source->is_periodic() )
+ units_with_periodic_sources.push_back( u);
+ else
+ units_with_continuous_sources.push_back( u);
+ units_with_continuous_sources.unique();
+ units_with_periodic_sources.unique();
}
void
cnrun::CModel::
unregister_unit_with_sources( C_BaseUnit *u)
{
-start_over_1:
- for_all_units_with_contiuous_sources (U)
- if ( (*U) == u ) {
- units_with_continuous_sources.erase( U);
- if ( verbosely > 5 )
- fprintf( stderr, " (removed \"%s\" instance from units w/ continuous sources list)\n", u->_label);
- goto start_over_1;
- }
-start_over_2:
- for_all_units_with_periodic_sources (U)
- if ( (*U) == u ) {
- units_with_periodic_sources.erase( U);
- if ( verbosely > 5 )
- fprintf( stderr, " (removed \"%s\" instance from units w/ periodic sources list)\n", u->_label);
- goto start_over_2;
- }
+ units_with_continuous_sources.remove(
+ static_cast<decltype(units_with_continuous_sources)::value_type>(u));
+ units_with_periodic_sources.remove(
+ static_cast<decltype(units_with_periodic_sources)::value_type>(u));
}
@@ -469,79 +454,81 @@ start_over_2:
cnrun::C_BaseNeuron*
cnrun::CModel::
-add_neuron_species( const char *type_s, const char *label, bool finalize,
- double x, double y, double z)
+add_neuron_species( const string& type_s, const string& label,
+ const TIncludeOption include_option,
+ const double x, const double y, const double z)
{
- TUnitType t = unit_species_by_string( type_s);
- if ( t == NT_VOID || !unit_species_is_neuron(type_s) ) {
- fprintf( stderr, "Unrecognised neuron species: \"%s\"\n", type_s);
- return nullptr;
- } else
- return add_neuron_species( t, label, finalize, x, y, z);
+ TUnitType t = unit_species_by_string( type_s);
+ if ( unlikely (t == NT_VOID || !unit_species_is_neuron(type_s)) ) {
+ fprintf( stderr, "Unrecognised neuron species: \"%s\"\n", type_s.c_str());
+ return nullptr;
+ } else
+ return add_neuron_species( t, label, include_option, x, y, z);
}
cnrun::C_BaseNeuron*
cnrun::CModel::
-add_neuron_species( TUnitType type, const char *label, bool finalize,
- double x, double y, double z)
+add_neuron_species( TUnitType type, const string& label,
+ const TIncludeOption include_option,
+ double x, double y, double z)
{
- C_BaseNeuron *n;
- switch ( type ) {
- case NT_HH_D:
- n = new CNeuronHH_d( label, x, y, z, this, CN_UOWNED, finalize);
- break;
- case NT_HH_R:
- n = new CNeuronHH_r( label, x, y, z, this, CN_UOWNED);
- break;
-
- case NT_HH2_D:
- n = new CNeuronHH2_d( label, x, y, z, this, CN_UOWNED, finalize);
- break;
- // case NT_HH2_R:
- // n = new CNeuronHH2_r( label, x, y, z, this, CN_UOWNED, finalize);
- // break;
+ C_BaseNeuron *n;
+ switch ( type ) {
+ case NT_HH_D:
+ n = new CNeuronHH_d( label, x, y, z, this, CN_UOWNED, include_option);
+ break;
+ case NT_HH_R:
+ n = new CNeuronHH_r( label, x, y, z, this, CN_UOWNED);
+ break;
+
+ case NT_HH2_D:
+ n = new CNeuronHH2_d( label, x, y, z, this, CN_UOWNED, include_option);
+ break;
+ // case NT_HH2_R:
+ // n = new CNeuronHH2_r( label, x, y, z, this, CN_UOWNED, include_option);
+ // break;
//#ifdef CN_WANT_MORE_NEURONS
- case NT_EC_D:
- n = new CNeuronEC_d( label, x, y, z, this, CN_UOWNED, finalize);
- break;
- case NT_ECA_D:
- n = new CNeuronECA_d( label, x, y, z, this, CN_UOWNED, finalize);
- break;
+ case NT_EC_D:
+ n = new CNeuronEC_d( label, x, y, z, this, CN_UOWNED, include_option);
+ break;
+ case NT_ECA_D:
+ n = new CNeuronECA_d( label, x, y, z, this, CN_UOWNED, include_option);
+ break;
/*
- case NT_LV:
- n = new COscillatorLV( label, x, y, z, this, CN_UOWNED, finalize);
- break;
+ case NT_LV:
+ n = new COscillatorLV( label, x, y, z, this, CN_UOWNED, include_option);
+ break;
*/
- case NT_COLPITTS:
- n = new COscillatorColpitts( label, x, y, z, this, CN_UOWNED, finalize);
- break;
- case NT_VDPOL:
- n = new COscillatorVdPol( label, x, y, z, this, CN_UOWNED, finalize);
- break;
+ case NT_COLPITTS:
+ n = new COscillatorColpitts( label, x, y, z, this, CN_UOWNED, include_option);
+ break;
+ case NT_VDPOL:
+ n = new COscillatorVdPol( label, x, y, z, this, CN_UOWNED, include_option);
+ break;
//#endif
- case NT_DOTPOISSON:
- n = new COscillatorDotPoisson( label, x, y, z, this, CN_UOWNED);
- break;
- case NT_POISSON:
- n = new COscillatorPoisson( label, x, y, z, this, CN_UOWNED);
- break;
-
- case NT_DOTPULSE:
- n = new CNeuronDotPulse( label, x, y, z, this, CN_UOWNED);
- break;
-
- case NT_MAP:
- n = new CNeuronMap( label, x, y, z, this, CN_UOWNED);
- break;
-
- default:
- return nullptr;
- }
- if ( n && n->_status & CN_UERROR ) {
- delete n;
- return nullptr;
- }
- return n;
+ case NT_DOTPOISSON:
+ n = new COscillatorDotPoisson( label, x, y, z, this, CN_UOWNED);
+ break;
+ case NT_POISSON:
+ n = new COscillatorPoisson( label, x, y, z, this, CN_UOWNED);
+ break;
+
+ case NT_DOTPULSE:
+ n = new CNeuronDotPulse( label, x, y, z, this, CN_UOWNED);
+ break;
+
+ case NT_MAP:
+ n = new CNeuronMap( label, x, y, z, this, CN_UOWNED);
+ break;
+
+ default:
+ return nullptr;
+ }
+ if ( n && n->_status & CN_UERROR ) {
+ delete n;
+ return nullptr;
+ }
+ return n;
}
@@ -553,97 +540,100 @@ add_neuron_species( TUnitType type, const char *label, bool finalize,
cnrun::C_BaseSynapse*
cnrun::CModel::
-add_synapse_species( const char *type_s, const char *src_l, const char *tgt_l,
- double g, bool allow_clone, bool finalize)
+add_synapse_species( const string& type_s,
+ const string& src_l, const string& tgt_l,
+ const double g,
+ const TSynapseCloningOption cloning_option,
+ const TIncludeOption include_option)
{
- TUnitType ytype = unit_species_by_string( type_s);
- bool given_species = true;
- if ( ytype == NT_VOID && (given_species = false, ytype = unit_family_by_string( type_s)) == NT_VOID ) {
- fprintf( stderr, "Unrecognised synapse species or family: \"%s\"\n", type_s);
- return nullptr;
- }
-
- C_BaseNeuron
- *src = neuron_by_label( src_l),
- *tgt = neuron_by_label( tgt_l);
- if ( !src || !tgt ) {
- fprintf( stderr, "Phoney source (\"%s\") or target (\"%s\")\n", src_l, tgt_l);
- return nullptr;
- }
-
- if ( given_species ) // let lower function do the checking
- return add_synapse_species( ytype, src, tgt, g, allow_clone, finalize);
-
- switch ( ytype ) {
+ TUnitType ytype = unit_species_by_string( type_s);
+ bool given_species = true;
+ if ( ytype == NT_VOID && (given_species = false, ytype = unit_family_by_string( type_s)) == NT_VOID ) {
+ fprintf( stderr, "Unrecognised synapse species or family: \"%s\"\n", type_s.c_str());
+ return nullptr;
+ }
+
+ C_BaseNeuron
+ *src = neuron_by_label( src_l),
+ *tgt = neuron_by_label( tgt_l);
+ if ( !src || !tgt ) {
+ fprintf( stderr, "Phoney source (\"%s\") or target (\"%s\")\n", src_l.c_str(), tgt_l.c_str());
+ return nullptr;
+ }
+
+ if ( given_species ) // let lower function do the checking
+ return add_synapse_species( ytype, src, tgt, g, cloning_option, include_option);
+
+ switch ( ytype ) {
// catch by first entry in __CNUDT, assign proper species per source and target traits
- case YT_AB_DD:
- if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
- ytype = YT_AB_RR;
- else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
- ytype = YT_AB_RD;
- else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
- if ( src->traits() & UT_DOT )
- ytype = YT_MXAB_DR;
- else
- ytype = YT_AB_DR;
- else
- if ( src->traits() & UT_DOT )
- ytype = YT_MXAB_DD;
- else
- ytype = YT_AB_DD;
- break;
-
- case YT_ABMINUS_DD:
- if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
- ytype = YT_ABMINUS_RR;
- else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
- ytype = YT_ABMINUS_RD;
- else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
- if ( src->traits() & UT_DOT )
- ytype = YT_MXABMINUS_DR;
- else
- ytype = YT_ABMINUS_DR;
- else
- if ( src->traits() & UT_DOT )
- ytype = YT_MXABMINUS_DD;
- else
- ytype = YT_ABMINUS_DD;
- break;
-
- case YT_RALL_DD:
- if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
- ytype = YT_RALL_RR;
- else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
- ytype = YT_RALL_RD;
- else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
- if ( src->traits() & UT_DOT )
- ytype = YT_MXRALL_DR;
- else
- ytype = YT_RALL_DR;
- else
- if ( src->traits() & UT_DOT )
- ytype = YT_MXRALL_DD;
- else
- ytype = YT_RALL_DD;
- break;
-
- case YT_MAP:
- if ( src->traits() & UT_DDTSET)
- if ( src->traits() & UT_DOT )
- ytype = YT_MXMAP;
- else
- ytype = YT_MAP;
- else {
- fprintf( stderr, "Map synapses can only connect Map neurons\n");
- return nullptr;
- }
- break;
- default:
- printf( "Teleporting is fun!\n");
- return nullptr;
- }
-
- return add_synapse_species( ytype, src, tgt, g, allow_clone, finalize);
+ case YT_AB_DD:
+ if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
+ ytype = YT_AB_RR;
+ else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
+ ytype = YT_AB_RD;
+ else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
+ if ( src->traits() & UT_DOT )
+ ytype = YT_MXAB_DR;
+ else
+ ytype = YT_AB_DR;
+ else
+ if ( src->traits() & UT_DOT )
+ ytype = YT_MXAB_DD;
+ else
+ ytype = YT_AB_DD;
+ break;
+
+ case YT_ABMINUS_DD:
+ if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
+ ytype = YT_ABMINUS_RR;
+ else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
+ ytype = YT_ABMINUS_RD;
+ else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
+ if ( src->traits() & UT_DOT )
+ ytype = YT_MXABMINUS_DR;
+ else
+ ytype = YT_ABMINUS_DR;
+ else
+ if ( src->traits() & UT_DOT )
+ ytype = YT_MXABMINUS_DD;
+ else
+ ytype = YT_ABMINUS_DD;
+ break;
+
+ case YT_RALL_DD:
+ if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED )
+ ytype = YT_RALL_RR;
+ else if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) )
+ ytype = YT_RALL_RD;
+ else if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED )
+ if ( src->traits() & UT_DOT )
+ ytype = YT_MXRALL_DR;
+ else
+ ytype = YT_RALL_DR;
+ else
+ if ( src->traits() & UT_DOT )
+ ytype = YT_MXRALL_DD;
+ else
+ ytype = YT_RALL_DD;
+ break;
+
+ case YT_MAP:
+ if ( src->traits() & UT_DDTSET)
+ if ( src->traits() & UT_DOT )
+ ytype = YT_MXMAP;
+ else
+ ytype = YT_MAP;
+ else {
+ fprintf( stderr, "Map synapses can only connect Map neurons\n");
+ return nullptr;
+ }
+ break;
+ default:
+ printf( "Teleporting is fun!\n");
+ return nullptr;
+ }
+
+ return add_synapse_species( ytype, src, tgt, g, cloning_option, include_option);
}
@@ -651,226 +641,225 @@ add_synapse_species( const char *type_s, const char *src_l, const char *tgt_l,
cnrun::C_BaseSynapse*
cnrun::CModel::
-add_synapse_species( TUnitType ytype, C_BaseNeuron *src, C_BaseNeuron *tgt,
- double g, bool allow_clone, bool finalize)
+add_synapse_species( TUnitType ytype,
+ C_BaseNeuron *src, C_BaseNeuron *tgt,
+ double g,
+ TSynapseCloningOption cloning_option, TIncludeOption include_option)
{
- if ( verbosely > 5 )
- printf( "add_synapse_species( \"%s\", \"%s\", \"%s\", %g, %d, %d)\n",
- __CNUDT[ytype].species, src->_label, tgt->_label, g, allow_clone, finalize);
+ if ( options.verbosely > 5 )
+ printf( "add_synapse_species( \"%s\", \"%s\", \"%s\", %g, %d, %d)\n",
+ __CNUDT[ytype].species, src->_label, tgt->_label, g, cloning_option, include_option);
- C_BaseSynapse *y = nullptr;
+ C_BaseSynapse *y = nullptr;
// consider cloning
- if ( !(_status & CN_MDL_DONT_COALESCE) && allow_clone && src->_axonal_harbour.size() )
- for ( auto& L : src->_axonal_harbour )
- if ( L->_type == ytype &&
- L->is_not_altered() )
- return L->clone_to_target( tgt, g);
+ if ( cloning_option == TSynapseCloningOption::yes && src->_axonal_harbour.size() )
+ for ( auto& L : src->_axonal_harbour )
+ if ( L->_type == ytype &&
+ L->is_not_altered() )
+ return L->clone_to_target( tgt, g);
- switch ( ytype ) {
+ switch ( ytype ) {
// the __CNUDT entry at first TUnitType element whose
// 'name' matches the type id supplied, captures all cases for a given synapse family
- case YT_AB_RR:
- if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
- y = new CSynapseAB_rr( src, tgt, g, this, CN_UOWNED, finalize);
- break;
- case YT_AB_RD:
- if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
- // y = new CSynapseAB_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
- fprintf( stderr, "AB_rd not implemented\n");
- break;
- case YT_AB_DR:
- if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
- // y = new CSynapseAB_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
- fprintf( stderr, "AB_dr not implemented\n");
- break;
- case YT_AB_DD:
- if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
- y = new CSynapseAB_dd( src, tgt, g, this, CN_UOWNED, finalize);
- break;
- case YT_MXAB_DR:
- if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED && src->traits() & UT_DOT )
- y = new CSynapseMxAB_dr( src, tgt, g, this, CN_UOWNED, finalize);
- break;
- case YT_MXAB_DD:
- if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && src->traits() & UT_DOT )
- y = new CSynapseMxAB_dd( src, tgt, g, this, CN_UOWNED, finalize);
- break;
-
-
- case YT_ABMINUS_RR:
- if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
- // y = new CSynapseABMINUS_rr( src, tgt, g, this, CN_UOWNED, finalize);
- fprintf( stderr, "ABMINUS_rr not implemented\n");
- break;
- case YT_ABMINUS_RD:
- if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
- // y = new CSynapseABMINUS_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
- fprintf( stderr, "ABMINUS_rd not implemented\n");
- break;
- case YT_ABMINUS_DR:
- if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
- // y = new CSynapseABMINUS_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
- fprintf( stderr, "ABMINUS_dr not implemented\n");
- break;
- case YT_ABMINUS_DD:
- if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
- y = new CSynapseABMinus_dd( src, tgt, g, this, CN_UOWNED, finalize);
- break;
- case YT_MXABMINUS_DR:
- if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED && src->traits() & UT_DOT )
- // y = new CSynapseMxABMinus_dr( src, tgt, g, this, CN_UOWNED, finalize);
- fprintf( stderr, "MxABMinus_dr not implemented\n");
- break;
- case YT_MXABMINUS_DD:
- if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && src->traits() & UT_DOT )
- // y = new CSynapseMxABMinus_dd( src, tgt, g, this, CN_UOWNED, finalize);
- fprintf( stderr, "MxABMinus_dd not implemented\n");
- break;
-
-
- case YT_RALL_RR:
- if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
- // y = new CSynapseRall_rr( src, tgt, g, this, CN_UOWNED, finalize);
- fprintf( stderr, "Rall_rr not implemented\n");
- break;
- case YT_RALL_RD:
- if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
- // y = new CSynapseRall_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
- fprintf( stderr, "Rall_rd not implemented\n");
- break;
- case YT_RALL_DR:
- if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
- // y = new CSynapseRall_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
- fprintf( stderr, "Rall_dr not implemented\n");
- break;
- case YT_RALL_DD:
- if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
- y = new CSynapseRall_dd( src, tgt, g, this, CN_UOWNED, finalize);
- break;
- case YT_MXRALL_DR:
- if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED && src->traits() & UT_DOT )
- // y = new CSynapseMxRall_dr( src, tgt, g, this, CN_UOWNED, finalize);
- fprintf( stderr, "MxRall_dr not implemented\n");
- break;
- case YT_MXRALL_DD:
- if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && src->traits() & UT_DOT )
- // y = new CSynapseMxRall_dd( src, tgt, g, this, CN_UOWNED, finalize);
- fprintf( stderr, "MxRall_dd not implemented\n");
- break;
-
-
- case YT_MAP:
- if ( src->traits() & UT_DDTSET)
- if ( src->traits() & UT_DOT )
- y = new CSynapseMxMap( src, tgt, g, this, CN_UOWNED);
- else
- y = new CSynapseMap( src, tgt, g, this, CN_UOWNED);
- else
- fprintf( stderr, "Map synapses can only connect Map neurons\n");
- break;
-
- default:
- return nullptr;
- }
-
- if ( !y || y->_status & CN_UERROR ) {
- if ( y )
- delete y;
- return nullptr;
- }
-
- if ( verbosely > 5 )
- printf( "new synapse \"%s->%s\"\n", y->_label, tgt->label());
- y->set_g_on_target( *tgt, g);
-
- return y;
+ case YT_AB_RR:
+ if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
+ y = new CSynapseAB_rr( src, tgt, g, this, CN_UOWNED, include_option);
+ break;
+ case YT_AB_RD:
+ if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
+ // y = new CSynapseAB_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
+ fprintf( stderr, "AB_rd not implemented\n");
+ break;
+ case YT_AB_DR:
+ if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
+ // y = new CSynapseAB_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
+ fprintf( stderr, "AB_dr not implemented\n");
+ break;
+ case YT_AB_DD:
+ if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
+ y = new CSynapseAB_dd( src, tgt, g, this, CN_UOWNED, include_option);
+ break;
+ case YT_MXAB_DR:
+ if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED && src->traits() & UT_DOT )
+ y = new CSynapseMxAB_dr( src, tgt, g, this, CN_UOWNED, include_option);
+ break;
+ case YT_MXAB_DD:
+ if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && src->traits() & UT_DOT )
+ y = new CSynapseMxAB_dd( src, tgt, g, this, CN_UOWNED, include_option);
+ break;
+
+
+ case YT_ABMINUS_RR:
+ if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
+ // y = new CSynapseABMINUS_rr( src, tgt, g, this, CN_UOWNED, include_option);
+ fprintf( stderr, "ABMINUS_rr not implemented\n");
+ break;
+ case YT_ABMINUS_RD:
+ if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
+ // y = new CSynapseABMINUS_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
+ fprintf( stderr, "ABMINUS_rd not implemented\n");
+ break;
+ case YT_ABMINUS_DR:
+ if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
+ // y = new CSynapseABMINUS_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
+ fprintf( stderr, "ABMINUS_dr not implemented\n");
+ break;
+ case YT_ABMINUS_DD:
+ if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
+ y = new CSynapseABMinus_dd( src, tgt, g, this, CN_UOWNED, include_option);
+ break;
+ case YT_MXABMINUS_DR:
+ if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED && src->traits() & UT_DOT )
+ // y = new CSynapseMxABMinus_dr( src, tgt, g, this, CN_UOWNED, include_option);
+ fprintf( stderr, "MxABMinus_dr not implemented\n");
+ break;
+ case YT_MXABMINUS_DD:
+ if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && src->traits() & UT_DOT )
+ // y = new CSynapseMxABMinus_dd( src, tgt, g, this, CN_UOWNED, include_option);
+ fprintf( stderr, "MxABMinus_dd not implemented\n");
+ break;
+
+
+ case YT_RALL_RR:
+ if ( src->traits() & UT_RATEBASED && tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
+ // y = new CSynapseRall_rr( src, tgt, g, this, CN_UOWNED, include_option);
+ fprintf( stderr, "Rall_rr not implemented\n");
+ break;
+ case YT_RALL_RD:
+ if ( src->traits() & UT_RATEBASED && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
+ // y = new CSynapseRall_rd( synapse_id, src, tgt, this, CN_UOWNED, false);
+ fprintf( stderr, "Rall_rd not implemented\n");
+ break;
+ case YT_RALL_DR:
+ if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED && !(src->traits() & UT_DOT) )
+ // y = new CSynapseRall_rr( synapse_id, src, tgt, this, CN_UOWNED, false);
+ fprintf( stderr, "Rall_dr not implemented\n");
+ break;
+ case YT_RALL_DD:
+ if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && !(src->traits() & UT_DOT) )
+ y = new CSynapseRall_dd( src, tgt, g, this, CN_UOWNED, include_option);
+ break;
+ case YT_MXRALL_DR:
+ if ( !(src->traits() & UT_RATEBASED) && tgt->traits() & UT_RATEBASED && src->traits() & UT_DOT )
+ // y = new CSynapseMxRall_dr( src, tgt, g, this, CN_UOWNED, include_option);
+ fprintf( stderr, "MxRall_dr not implemented\n");
+ break;
+ case YT_MXRALL_DD:
+ if ( !(src->traits() & UT_RATEBASED) && !(tgt->traits() & UT_RATEBASED) && src->traits() & UT_DOT )
+ // y = new CSynapseMxRall_dd( src, tgt, g, this, CN_UOWNED, include_option);
+ fprintf( stderr, "MxRall_dd not implemented\n");
+ break;
+
+
+ case YT_MAP:
+ if ( src->traits() & UT_DDTSET)
+ if ( src->traits() & UT_DOT )
+ y = new CSynapseMxMap( src, tgt, g, this, CN_UOWNED);
+ else
+ y = new CSynapseMap( src, tgt, g, this, CN_UOWNED);
+ else
+ fprintf( stderr, "Map synapses can only connect Map neurons\n");
+ break;
+
+ default:
+ return nullptr;
+ }
+
+ if ( !y || y->_status & CN_UERROR ) {
+ if ( y )
+ delete y;
+ return nullptr;
+ }
+
+ if ( options.verbosely > 5 )
+ printf( "new synapse \"%s->%s\"\n", y->_label, tgt->label());
+ y->set_g_on_target( *tgt, g);
+
+ return y;
}
-
-
-
-
void
cnrun::CModel::
finalize_additions()
{
- V.resize( _var_cnt), W.resize( _var_cnt);
-
- for_all_hosted_neurons (N)
- (*N) -> reset_vars();
- for_all_hosted_synapses (Y)
- (*Y) -> reset_vars();
-
- if ( _status & CN_MDL_SORTUNITS ) {
- __C_BaseUnitCompareByLabel cmp;
- unit_list.sort( cmp);
- // hosted_neu_list.sort( cmp);
- // hosted_syn_list.sort( cmp);
- // standalone_neu_list.sort( cmp);
- // standalone_syn_list.sort( cmp);
- }
-
- _integrator->prepare();
+ V.resize( _var_cnt);
+ W.resize( _var_cnt);
+
+ for ( auto& U : hosted_neurons )
+ U->reset_vars();
+ for ( auto& U : hosted_synapses )
+ U->reset_vars();
+
+ if ( options.sort_units ) {
+ units.sort(
+ [] (C_BaseUnit *&lv, C_BaseUnit *&rv) {
+ return strcmp( lv->label(), rv->label()) < 0;
+ });
+ // hosted_neurons.sort( cmp);
+ // hosted_synapses.sort( cmp);
+ // standalone_neurons.sort( cmp);
+ // standalone_synapses.sort( cmp);
+ }
+
+ _integrator->prepare();
}
-
-
-
-
-
-
void
cnrun::CModel::
cull_deaf_synapses()
{
- // needs fixing
- // 1. Need to traverse syn_list backwards due to shifts its vector will undergo on element deletions;
- // 2. Omit those with a param reader, scheduler or range, but only if it is connected to parameter "gsyn"
-grand_restart:
- for_all_hosted_synapses (Y)
- if ( !(*Y)->has_sources() ) {
- restart:
- for ( C_BaseSynapse::lni T = (*Y)->_targets.begin(); T != (*Y)->_targets.end(); T++ ) {
- if ( (*Y)->g_on_target( **T) == 0 ) {
- if ( verbosely > 3 )
- fprintf( stderr, " (deleting dendrite to \"%s\" of a synapse \"%s\" with gsyn == 0)\n",
- (*T)->_label, (*Y)->_label);
- (*T)->_dendrites.erase( *Y);
- (*Y)->_targets.erase( find( (*Y)->_targets.begin(), (*Y)->_targets.end(), *T));
-
- snprintf( (*Y)->_label, CN_MAX_LABEL_SIZE-1, "%s:%zu", (*Y)->_source->_label, (*Y)->_targets.size());
- goto restart;
- }
- }
- if ( (*Y)->_targets.size() == 0 ) {
- delete (*Y);
- goto grand_restart;
- }
- }
-
- // older stuff
+ // 1. Need to traverse synapses backwards due to shifts its
+ // vector will undergo on element deletions;
+ // 2. Omit those with a param reader, scheduler or range, but
+ // only if it is connected to parameter "gsyn"
+ auto Yi = hosted_synapses.rbegin();
+ while ( Yi != hosted_synapses.rend() ) {
+ auto& Y = **Yi;
+ if ( Y.has_sources() )
+ continue;
+ auto Ti = Y._targets.begin();
+ while ( Ti != Y._targets.end() ) {
+ auto& T = **Ti;
+ if ( Y.g_on_target( T) == 0 ) {
+ if ( options.verbosely > 3 )
+ fprintf( stderr, " (deleting dendrite to \"%s\" of a synapse \"%s\" with gsyn == 0)\n",
+ T._label, Y._label);
+ T._dendrites.erase( &Y);
+ ++Ti;
+ Y._targets.erase( prev(Ti));
+
+ snprintf( Y._label, C_BaseUnit::max_label_size-1,
+ "%s:%zu", Y._source->_label, Y._targets.size());
+ }
+ }
+ ++Yi;
+ if ( (*prev(Yi))->_targets.empty() )
+ delete *prev(Yi);
+ }
+
+ // older stuff
/*
- for_all_synapses_reversed (Y) {
- int gsyn_pidx = (*Y) -> param_idx_by_sym( "gsyn");
- if ( ((*Y)->param_schedulers && device_list_concerns_parm( (*Y)->param_schedulers, gsyn_pidx)) ||
- ((*Y)->param_readers && device_list_concerns_parm( (*Y)->param_readers, gsyn_pidx)) ||
- ((*Y)->param_ranges && device_list_concerns_parm( (*Y)->param_ranges, gsyn_pidx)) ) {
- if ( verbosely > 2 )
- printf( " (preserving doped synapse with zero gsyn: \"%s\")\n", (*Y)->_label);
- continue;
- }
- if ( gsyn_pidx > -1 && (*Y)->param_value( gsyn_pidx) == 0. ) {
- if ( verbosely > 2 )
- printf( " (deleting synapse with zero gsyn: \"%s\")\n", (*Y)->_label);
- delete (*Y);
- cnt++;
- }
- }
- if ( verbosely > 0 && cnt )
- printf( "Deleted %zd deaf synapses\n", cnt);
+ for_all_synapses_reversed (Y) {
+ int gsyn_pidx = (*Y) -> param_idx_by_sym( "gsyn");
+ if ( ((*Y)->param_schedulers && device_list_concerns_parm( (*Y)->param_schedulers, gsyn_pidx)) ||
+ ((*Y)->param_readers && device_list_concerns_parm( (*Y)->param_readers, gsyn_pidx)) ||
+ ((*Y)->param_ranges && device_list_concerns_parm( (*Y)->param_ranges, gsyn_pidx)) ) {
+ if ( verbosely > 2 )
+ printf( " (preserving doped synapse with zero gsyn: \"%s\")\n", (*Y)->_label);
+ continue;
+ }
+ if ( gsyn_pidx > -1 && (*Y)->param_value( gsyn_pidx) == 0. ) {
+ if ( verbosely > 2 )
+ printf( " (deleting synapse with zero gsyn: \"%s\")\n", (*Y)->_label);
+ delete (*Y);
+ ++cnt;
+ }
+ }
+ if ( verbosely > 0 && cnt )
+ printf( "Deleted %zd deaf synapses\n", cnt);
*/
}
@@ -881,65 +870,69 @@ void
cnrun::CModel::
cull_blind_synapses()
{
- for_all_hosted_synapses_reversed (Y)
- if ( (*Y)->_source == nullptr && !(*Y)->has_sources() ) {
- if ( verbosely > 3 )
- printf( " (deleting synapse with nullptr source: \"%s\")\n", (*Y)->_label);
- delete (*Y);
- }
- for_all_standalone_synapses_reversed (Y)
- if ( (*Y)->_source == nullptr && !(*Y)->has_sources() ) {
- if ( verbosely > 3 )
- printf( " (deleting synapse with nullptr source: \"%s\")\n", (*Y)->_label);
- delete (*Y);
- }
+ auto Yi = hosted_synapses.rbegin();
+ // units remove themselves from all lists, including the one
+ // iterated here
+ while ( Yi != hosted_synapses.rend() ) {
+ auto& Y = **Yi;
+ if ( !Y._source && !Y.has_sources() ) {
+ if ( options.verbosely > 3 )
+ printf( " (deleting synapse with NULL source: \"%s\")\n", Y._label);
+ delete &Y;
+ }
+ }
+ auto Zi = standalone_synapses.rbegin();
+ while ( Zi != standalone_synapses.rend() ) {
+ auto& Y = **Zi;
+ if ( !Y._source && !Y.has_sources() ) {
+ if ( options.verbosely > 3 )
+ printf( " (deleting synapse with NULL source: \"%s\")\n", Y._label);
+ delete &Y;
+ }
+ }
}
-
void
cnrun::CModel::
reset_state_all_units()
{
- for_all_units (U)
- (*U) -> reset_state();
+ for ( auto& U : units )
+ U -> reset_state();
}
-
-
-
-
// tags
int
cnrun::CModel::
process_listener_tags( const list<STagGroupListener> &Listeners)
{
- regex_t RE;
- for ( auto& P : Listeners ) {
- if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
- fprintf( stderr, "Invalid regexp in process_listener_tags: \"%s\"\n", P.pattern.c_str());
- return -1;
- }
- for_all_units (U) {
- if ( regexec( &RE, (*U)->_label, 0, 0, 0) == 0 ) {
- if ( P.enable ) {
- (*U) -> start_listening( P.bits);
- if ( verbosely > 3 )
- printf( " (unit \"%s\" listening%s)\n",
- (*U)->_label, P.bits & CN_ULISTENING_1VARONLY ? ", to one var only" :"");
- } else {
- (*U) -> stop_listening();
- if ( verbosely > 3 )
- printf( " (unit \"%s\" not listening)\n", (*U)->_label);
- }
- }
- }
- }
-
- return 0;
+ regex_t RE;
+ for ( auto& P : Listeners ) {
+ if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
+ fprintf( stderr, "Invalid regexp in process_listener_tags: \"%s\"\n", P.pattern.c_str());
+ continue;
+ }
+ for ( auto& Ui : units ) {
+ auto& U = *Ui;
+ if ( regexec( &RE, U._label, 0, 0, 0) == 0 ) {
+ if ( P.enable ) {
+ U.start_listening( P.bits);
+ if ( options.verbosely > 3 )
+ printf( " (unit \"%s\" listening%s)\n",
+ U._label, P.bits & CN_ULISTENING_1VARONLY ? ", to one var only" :"");
+ } else {
+ U.stop_listening();
+ if ( options.verbosely > 3 )
+ printf( " (unit \"%s\" not listening)\n", U._label);
+ }
+ }
+ }
+ }
+
+ return 0;
}
@@ -947,53 +940,56 @@ int
cnrun::CModel::
process_spikelogger_tags( const list<STagGroupSpikelogger> &Spikeloggers)
{
- regex_t RE;
- for ( auto& P : Spikeloggers ) {
- if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
- fprintf( stderr, "Invalid regexp in process_spikelogger_tags: \"%s\"\n", P.pattern.c_str());
- return -1;
- }
- for_all_standalone_neurons (N) {
- if ( regexec( &RE, (*N)->_label, 0, 0, 0) == 0 ) {
- if ( P.enable ) {
- bool log_sdf = !(P.period == 0. || P.sigma == 0.);
- if ( ( log_sdf && !(*N)->enable_spikelogging_service( P.period, P.sigma, P.from))
- ||
- (!log_sdf && !(*N)->enable_spikelogging_service()) ) {
- fprintf( stderr, "Cannot have \"%s\" log spikes because it is not a conductance-based neuron (of type %s)\n",
- (*N)->_label, (*N)->species());
- return -1;
- }
- } else
- (*N)->disable_spikelogging_service();
-
- if ( verbosely > 3 )
- printf( " (%sabling spike logging for standalone neuron \"%s\")\n",
- P.enable ? "en" : "dis", (*N)->_label);
- }
- }
- for_all_hosted_neurons (N) {
- if ( regexec( &RE, (*N)->_label, 0, 0, 0) == 0 ) {
- if ( P.enable ) {
- bool log_sdf = !(P.period == 0. || P.sigma == 0.);
- if ( ( log_sdf && !(*N)->enable_spikelogging_service( P.period, P.sigma, P.from))
- ||
- (!log_sdf && !(*N)->enable_spikelogging_service()) ) {
- fprintf( stderr, "Cannot have \"%s\" log spikes because it is not a conductance-based neuron (of type %s)\n",
- (*N)->_label, (*N)->species());
- return -1;
- }
- } else
- (*N)->disable_spikelogging_service();
-
- if ( verbosely > 3 )
- printf( " (%sabling spike logging for hosted neuron \"%s\")\n",
- P.enable ? "en" : "dis", (*N)->_label);
- }
- }
- }
-
- return 0;
+ regex_t RE;
+ for ( auto& P : Spikeloggers ) {
+ if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
+ fprintf( stderr, "Invalid regexp in process_spikelogger_tags: \"%s\"\n", P.pattern.c_str());
+ continue;
+ }
+ for ( auto& Ni : standalone_neurons ) {
+ auto& N = *Ni;
+ if ( regexec( &RE, N._label, 0, 0, 0) == 0 ) {
+ if ( P.enable ) {
+ bool log_sdf = !(P.period == 0. || P.sigma == 0.);
+ if ( ( log_sdf && !N.enable_spikelogging_service(
+ P.period, P.sigma, P.from))
+ or
+ (!log_sdf && !N.enable_spikelogging_service()) ) {
+ fprintf( stderr, "Cannot have \"%s\" log spikes because it is not a conductance-based neuron (of type %s)\n",
+ N._label, N.species());
+ return -1;
+ }
+ } else
+ N.disable_spikelogging_service();
+
+ if ( options.verbosely > 3 )
+ printf( " (%sabling spike logging for standalone neuron \"%s\")\n",
+ P.enable ? "en" : "dis", N._label);
+ }
+ }
+ for ( auto& Ni : hosted_neurons ) {
+ auto& N = *Ni;
+ if ( regexec( &RE, N._label, 0, 0, 0) == 0 ) {
+ if ( P.enable ) {
+ bool log_sdf = !(P.period == 0. || P.sigma == 0.);
+ if ( ( log_sdf && !N.enable_spikelogging_service( P.period, P.sigma, P.from))
+ or
+ (!log_sdf && !N.enable_spikelogging_service()) ) {
+ fprintf( stderr, "Cannot have \"%s\" log spikes because it is not a conductance-based neuron (of type %s)\n",
+ N._label, N.species());
+ return -1;
+ }
+ } else
+ N.disable_spikelogging_service();
+
+ if ( options.verbosely > 3 )
+ printf( " (%sabling spike logging for hosted neuron \"%s\")\n",
+ P.enable ? "en" : "dis", N._label);
+ }
+ }
+ }
+
+ return 0;
}
@@ -1002,29 +998,28 @@ cnrun::CModel::
process_putout_tags( const list<STagGroup> &ToRemove)
{
// execute some
- regex_t RE;
- for ( auto& P : ToRemove ) {
- if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
- fprintf( stderr, "Invalid regexp in process_putout_tags: \"%s\"\n", P.pattern.c_str());
- return -1;
- }
- for_all_units (U) {
- if ( regexec( &RE, (*U)->_label, 0, 0, 0) == 0 ) {
- if ( verbosely > 2 )
- printf( " (put out unit \"%s\")\n",
- (*U)->_label);
- delete (*U);
- if ( units() > 0 )
- U = ulist_begin();
- else
- break;
- }
- }
- }
-
- cull_blind_synapses();
-
- return 0;
+ regex_t RE;
+ for ( auto& P : ToRemove ) {
+ if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
+ fprintf( stderr, "Invalid regexp in process_putout_tags: \"%s\"\n", P.pattern.c_str());
+ continue;
+ }
+ auto Ui = units.rbegin();
+ while ( Ui != units.rend() ) {
+ ++Ui;
+ auto& U = **prev(Ui);
+ if ( regexec( &RE, U._label, 0, 0, 0) == 0 ) {
+ if ( options.verbosely > 2 )
+ printf( " (put out unit \"%s\")\n",
+ U._label);
+ delete &U;
+ }
+ }
+ }
+
+ cull_blind_synapses();
+
+ return 0;
}
@@ -1033,33 +1028,34 @@ cnrun::CModel::
process_decimate_tags( const list<STagGroupDecimate> &ToDecimate)
{
// decimate others
- regex_t RE;
- for ( auto& P : ToDecimate ) {
- if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
- fprintf( stderr, "Invalid regexp in process_decimate_tags: \"%s\"\n", P.pattern.c_str());
- return -1;
- }
-
- // collect group
- vector<C_BaseUnit*> dcmgroup;
- for_all_units (U)
- if ( regexec( &RE, (*U)->_label, 0, 0, 0) == 0 )
- dcmgroup.push_back( *U);
- random_shuffle( dcmgroup.begin(), dcmgroup.end());
-
- // execute
- size_t to_execute = rint( dcmgroup.size() * P.fraction), n = to_execute;
- while ( n-- )
- delete dcmgroup[n];
-
- if ( verbosely > 3 )
- printf( " (decimated %4.1f%% (%zu units) of %s)\n", P.fraction*100, to_execute, P.pattern.c_str());
-
- }
-
- cull_blind_synapses();
-
- return 0;
+ regex_t RE;
+ for ( auto& P : ToDecimate ) {
+ if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
+ fprintf( stderr, "Invalid regexp in process_decimate_tags: \"%s\"\n", P.pattern.c_str());
+ continue;
+ }
+
+ // collect group
+ vector<C_BaseUnit*> dcmgroup;
+ for ( auto& U : units )
+ if ( regexec( &RE, U->_label, 0, 0, 0) == 0 )
+ dcmgroup.push_back( U);
+ random_shuffle( dcmgroup.begin(), dcmgroup.end());
+
+ // execute
+ size_t to_execute = rint( dcmgroup.size() * P.fraction), n = to_execute;
+ while ( n-- )
+ delete dcmgroup[n];
+
+ if ( options.verbosely > 3 )
+ printf( " (decimated %4.1f%% (%zu units) of %s)\n",
+ P.fraction*100, to_execute, P.pattern.c_str());
+
+ }
+
+ cull_blind_synapses();
+
+ return 0;
}
@@ -1071,60 +1067,60 @@ int
cnrun::CModel::
process_paramset_static_tags( const list<STagGroupNeuronParmSet> &tags)
{
- regex_t RE;
- for ( auto& P : tags ) {
- if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
- fprintf( stderr, "Invalid regexp in process_paramset_static_tags: \"%s\"\n", P.pattern.c_str());
- return -1;
- }
-
- vector<string> current_tag_assigned_labels;
-
- for_all_neurons (U) {
- if ( regexec( &RE, (*U)->_label, 0, 0, 0) == 0 )
- continue;
- // because a named parameter can map to a different param_id in different units, rather
- // do lookup every time
-
- int p_d = -1;
- C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
- if ( (p_d = (*U)->param_idx_by_sym( P.parm.c_str())) > -1 )
- kind = C_BaseUnit::SINK_PARAM;
- else if ( (p_d = (*U)->var_idx_by_sym( P.parm.c_str())) > -1 )
- kind = C_BaseUnit::SINK_VAR;
- if ( p_d == -1 ) {
- fprintf( stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
- (*U)->class_name(), (*U)->label(), (*U)->species(), P.parm.c_str());
- continue;
- }
-
- switch ( kind ) {
- case C_BaseUnit::SINK_PARAM:
- (*U)->param_value(p_d) = P.enable ? P.value : __CNUDT[(*U)->type()].stock_param_values[p_d];
- (*U)->param_changed_hook();
- break;
- case C_BaseUnit::SINK_VAR:
- (*U)-> var_value(p_d) = P.value;
- break;
- }
-
- current_tag_assigned_labels.push_back( (*U)->label());
- }
-
- if ( current_tag_assigned_labels.empty() ) {
- fprintf( stderr, "No neuron labelled matching \"%s\"\n", P.pattern.c_str());
- return -2;
- }
-
- if ( verbosely > 3 ) {
- printf( " set ");
- for ( auto S = current_tag_assigned_labels.begin(); S != current_tag_assigned_labels.end(); S++ )
- printf( "%s%s",
- (S == current_tag_assigned_labels.begin()) ? "" : ", ", S->c_str());
- printf( " {%s} = %g\n", P.parm.c_str(), P.value);
- }
- }
- return 0;
+ regex_t RE;
+ for ( auto& P : tags ) {
+ if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
+ fprintf( stderr, "Invalid regexp in process_paramset_static_tags: \"%s\"\n", P.pattern.c_str());
+ continue;
+ }
+
+ vector<string> current_tag_assigned_labels;
+
+ for ( auto& Ui : units ) {
+ if ( not Ui->is_neuron() )
+ continue;
+ auto& N = *static_cast<C_BaseNeuron*>(Ui);
+ if ( regexec( &RE, N.label(), 0, 0, 0) == REG_NOMATCH )
+ continue;
+ // because a named parameter can map to a different param_id in different units, rather
+ // do lookup every time
+
+ int p_d = -1;
+ C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
+ if ( (p_d = N.param_idx_by_sym( P.parm)) != -1 )
+ kind = C_BaseUnit::SINK_PARAM;
+ else if ( (p_d = N.var_idx_by_sym( P.parm)) != -1 )
+ kind = C_BaseUnit::SINK_VAR;
+ if ( p_d == -1 ) {
+ fprintf( stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
+ N.class_name(), N.label(), N.species(), P.parm.c_str());
+ continue;
+ }
+
+ switch ( kind ) {
+ case C_BaseUnit::SINK_PARAM:
+ N.param_value(p_d) = P.enable ? P.value : __CNUDT[N.type()].stock_param_values[p_d];
+ N.param_changed_hook();
+ break;
+ case C_BaseUnit::SINK_VAR:
+ N.var_value(p_d) = P.value;
+ break;
+ }
+
+ current_tag_assigned_labels.push_back( N.label());
+ }
+
+ if ( current_tag_assigned_labels.empty() ) {
+ fprintf( stderr, "No neuron labelled matching \"%s\"\n", P.pattern.c_str());
+ continue;
+ }
+
+ if ( options.verbosely > 3 )
+ printf( " set [%s]{%s} = %g\n",
+ join(current_tag_assigned_labels, ", ").c_str(),
+ P.parm.c_str(), P.value);
+ }
+ return 0;
}
@@ -1135,87 +1131,94 @@ int
cnrun::CModel::
process_paramset_static_tags( const list<STagGroupSynapseParmSet> &tags)
{
- for ( auto& P : tags ) {
- regex_t REsrc, REtgt;
- if (0 != regcomp( &REsrc, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB) ) { // P->pattern acting as src
- fprintf( stderr, "Invalid regexp in process_paramset_static_tags (src): \"%s\"\n", P.pattern.c_str());
- return -1;
- }
- if (0 != regcomp( &REtgt, P.target.c_str(), REG_EXTENDED | REG_NOSUB) ) {
- fprintf( stderr, "Invalid regexp in process_paramset_static_tags (tgt): \"%s\"\n", P.target.c_str());
- return -1;
- }
-
- vector<string> current_tag_assigned_labels;
-
- bool do_gsyn = (P.parm == "gsyn");
-
- if ( verbosely > 5 )
- printf( "== setting %s -> %s {%s} = %g...\n", P.pattern.c_str(), P.target.c_str(), P.parm.c_str(), P.value);
-
- for_all_neurons (Us) {
- if ( regexec( &REsrc, (*Us)->label(), 0, 0, 0) == 0 )
- continue;
-
- for_all_neurons (Ut) {
- if ( regexec( &REtgt, (*Ut)->label(), 0, 0, 0) == 0 ) /* || Us == Ut */
- continue;
- C_BaseSynapse *y = static_cast<C_BaseNeuron*>(*Us) -> connects_via( *static_cast<C_BaseNeuron*>(*Ut));
- if ( !y )
- continue;
-
- if ( do_gsyn ) {
- y->set_g_on_target( *static_cast<C_BaseNeuron*>(*Ut), P.value);
- current_tag_assigned_labels.push_back( y->label());
- continue;
- }
-
- int p_d = -1;
- C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
- if ( (p_d = y->param_idx_by_sym( P.parm.c_str())) > -1 )
- kind = C_BaseUnit::SINK_PARAM;
- else if ( (p_d = y->var_idx_by_sym( P.parm.c_str())) > -1 )
- kind = C_BaseUnit::SINK_VAR;
- if ( p_d == -1 ) {
- fprintf( stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
- y->class_name(), y->label(), y->species(), P.parm.c_str());
- continue;
- }
-
- switch ( kind ) {
- case C_BaseUnit::SINK_PARAM:
- if ( y->_targets.size() > 1 ) {
- y = y->make_clone_independent( static_cast<C_BaseNeuron*>(*Ut)); // lest brethren synapses to other targets be clobbered
- }
- y->param_value(p_d) = P.enable ? P.value : __CNUDT[y->type()].stock_param_values[p_d];
- y->param_changed_hook();
- break;
- case C_BaseUnit::SINK_VAR:
- y-> var_value(p_d) = P.value;
- break;
- }
-
- current_tag_assigned_labels.push_back( y->label());
- }
- }
- if ( current_tag_assigned_labels.empty() ) {
- fprintf( stderr, "No synapse connecting any of \"%s\" to \"%s\"\n", P.pattern.c_str(), P.target.c_str());
- return -2;
- }
-
- if ( verbosely > 3 ) {
- printf( " set ");
- for ( auto S = current_tag_assigned_labels.begin(); S != current_tag_assigned_labels.end(); S++ )
- printf( "%s%s",
- (S == current_tag_assigned_labels.begin()) ? "" : ", ", S->c_str());
- printf( " {%s} = %g\n", P.parm.c_str(), P.value);
- }
- }
-
- if ( !(_status & CN_MDL_DONT_COALESCE) )
- coalesce_synapses();
-
- return 0;
+ auto process_tag = [&] (const STagGroupSynapseParmSet& P,
+ regex_t& REsrc, regex_t& REtgt) -> void {
+ vector<string> current_tag_assigned_labels;
+
+ bool do_gsyn = (P.parm == "gsyn");
+
+ if ( options.verbosely > 5 )
+ printf( "== setting %s -> %s {%s} = %g...\n", P.pattern.c_str(), P.target.c_str(), P.parm.c_str(), P.value);
+
+ for ( auto& Uai : units ) {
+ if ( not Uai->is_neuron() )
+ continue;
+ if ( regexec( &REsrc, Uai->label(), 0, 0, 0) == REG_NOMATCH )
+ continue;
+ auto& Ua = *static_cast<C_BaseNeuron*>(Uai);
+
+ for ( auto& Ubi : units ) {
+ if ( not Ubi->is_neuron() )
+ continue;
+ if ( regexec( &REtgt, Ubi->label(), 0, 0, 0) == REG_NOMATCH ) /* || Ua == Ub */
+ continue;
+ auto& Ub = *static_cast<C_BaseNeuron*>(Ubi);
+ auto y = Ua.connects_via(Ub);
+ if ( !y )
+ continue;
+
+ if ( do_gsyn ) {
+ y->set_g_on_target( Ub, P.value);
+ current_tag_assigned_labels.push_back( y->label());
+ continue;
+ }
+
+ int p_d = -1;
+ C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
+ if ( (p_d = y->param_idx_by_sym( P.parm)) > -1 )
+ kind = C_BaseUnit::SINK_PARAM;
+ else if ( (p_d = y->var_idx_by_sym( P.parm)) > -1 )
+ kind = C_BaseUnit::SINK_VAR;
+ if ( p_d == -1 ) {
+ fprintf( stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
+ y->class_name(), y->label(), y->species(), P.parm.c_str());
+ continue;
+ }
+
+ switch ( kind ) {
+ case C_BaseUnit::SINK_PARAM:
+ if ( y->_targets.size() > 1 )
+ y = y->make_clone_independent(
+ &Ub); // lest brethren synapses to other targets be clobbered
+ y->param_value(p_d) = P.enable ? P.value : __CNUDT[y->type()].stock_param_values[p_d];
+ y->param_changed_hook();
+ break;
+ case C_BaseUnit::SINK_VAR:
+ y->var_value(p_d) = P.value;
+ break;
+ }
+
+ current_tag_assigned_labels.push_back( y->label());
+ }
+ }
+ if ( current_tag_assigned_labels.empty() ) {
+ fprintf( stderr, "No synapse connecting any of \"%s\" to \"%s\"\n", P.pattern.c_str(), P.target.c_str());
+ return;
+ }
+
+ if ( options.verbosely > 3 )
+ printf( " set [%s]{%s} = %g\n",
+ join(current_tag_assigned_labels, ", ").c_str(),
+ P.parm.c_str(), P.value);
+ };
+
+ for ( auto& P : tags ) {
+ regex_t REsrc, REtgt;
+ if (0 != regcomp( &REsrc, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB) ) { // P->pattern acting as src
+ fprintf( stderr, "Invalid regexp in process_paramset_static_tags (src): \"%s\"\n", P.pattern.c_str());
+ continue;
+ }
+ if (0 != regcomp( &REtgt, P.target.c_str(), REG_EXTENDED | REG_NOSUB) ) {
+ fprintf( stderr, "Invalid regexp in process_paramset_static_tags (tgt): \"%s\"\n", P.target.c_str());
+ continue;
+ }
+
+ process_tag( P, REsrc, REtgt);
+ }
+
+ coalesce_synapses();
+
+ return 0;
}
@@ -1224,78 +1227,77 @@ cnrun::CModel::
coalesce_synapses()
{
startover:
- for_all_synapses (U1) {
- C_BaseSynapse *y1 = static_cast<C_BaseSynapse*>(*U1);
- for_all_synapses (U2) {
- if ( *U2 == *U1 )
- continue;
-
- C_BaseSynapse *y2 = static_cast<C_BaseSynapse*>(*U2);
- if ( y1->_source == y2->_source &&
- (*U1) -> is_identical( **U2) ) {
-
- if ( verbosely > 5 )
- printf( "coalescing \"%s\" and \"%s\"\n", y1->_label, y2->_label);
- for ( C_BaseSynapse::lni T = y2->_targets.begin(); T != y2->_targets.end(); T++ ) {
- y1->_targets.push_back( *T);
- (*T)->_dendrites[y1] = (*T)->_dendrites[y2];
- }
- snprintf( y1->_label, CN_MAX_LABEL_SIZE-1, "%s:%zu", y1->_source->_label, y1->_targets.size());
-
- delete y2;
-
- goto startover;
- }
- }
- }
+ for ( auto& U1i : units ) {
+ if ( not U1i->is_synapse() )
+ continue;
+ auto& U1 = *static_cast<C_BaseSynapse*>(U1i);
+ for ( auto& U2i : units ) {
+ auto& U2 = *static_cast<C_BaseSynapse*>(U2i);
+ if ( &U2 == &U1 )
+ continue;
+
+ if ( U1._source == U2._source && U1.is_identical( U2) ) {
+ if ( options.verbosely > 5 )
+ printf( "coalescing \"%s\" and \"%s\"\n", U1.label(), U2.label());
+ for ( auto& T : U2._targets ) {
+ U1._targets.push_back( T);
+ T->_dendrites[&U1] = T->_dendrites[&U2];
+ }
+ snprintf( U1._label, C_BaseUnit::max_label_size-1,
+ "%s:%zu", U1._source->label(), U1._targets.size());
+
+ delete &U2;
+
+ goto startover; // because we have messed with both iterators
+ }
+ }
+ }
}
-
-
int
cnrun::CModel::
process_paramset_source_tags( const list<STagGroupSource> &tags)
{
- regex_t RE;
- for ( auto& P : tags ) {
- if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
- fprintf( stderr, "Invalid regexp in process_paramset_source_tags: \"%s\"\n", P.pattern.c_str());
- return -1;
- }
-
- for_all_units (U) {
- if ( regexec( &RE, (*U)->label(), 0, 0, 0) == 0 )
- continue;
-
- int p_d = -1;
- C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
- if ( (p_d = (*U)->param_idx_by_sym( P.parm.c_str())) > -1 )
- kind = C_BaseUnit::SINK_PARAM;
- else if ( (p_d = (*U)->var_idx_by_sym( P.parm.c_str())) > -1 )
- kind = C_BaseUnit::SINK_VAR;
- if ( p_d == -1 ) {
- fprintf( stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
- (*U)->class_name(), (*U)->label(), (*U)->species(), P.parm.c_str());
- continue;
- }
-
- if ( P.enable ) {
- (*U) -> attach_source( P.source, kind, p_d);
- if ( verbosely > 3 )
- printf( "Connected source \"%s\" to \"%s\"{%s}\n",
- P.source->name.c_str(), (*U)->label(), P.parm.c_str());
- } else {
- (*U) -> detach_source( P.source, kind, p_d);
- if ( verbosely > 3 )
- printf( "Disconnected source \"%s\" from \"%s\"{%s}\n",
- P.source->name.c_str(), (*U)->label(), P.parm.c_str());
- }
- }
- }
-
- return 0;
+ regex_t RE;
+ for ( auto& P : tags ) {
+ if (0 != regcomp( &RE, P.pattern.c_str(), REG_EXTENDED | REG_NOSUB)) {
+ fprintf( stderr, "Invalid regexp in process_paramset_source_tags: \"%s\"\n", P.pattern.c_str());
+ continue;
+ }
+
+ for ( auto& U : units ) {
+ if ( regexec( &RE, U->label(), 0, 0, 0) == REG_NOMATCH )
+ continue;
+
+ int p_d = -1;
+ C_BaseUnit::TSinkType kind = (C_BaseUnit::TSinkType)-1;
+ if ( (p_d = U->param_idx_by_sym( P.parm)) > -1 )
+ kind = C_BaseUnit::SINK_PARAM;
+ else if ( (p_d = U->var_idx_by_sym( P.parm)) > -1 )
+ kind = C_BaseUnit::SINK_VAR;
+ if ( p_d == -1 ) {
+ fprintf( stderr, "%s \"%s\" (type \"%s\") has no parameter or variable named \"%s\"\n",
+ U->class_name(), U->label(), U->species(), P.parm.c_str());
+ continue;
+ }
+
+ if ( P.enable ) {
+ U -> attach_source( P.source, kind, p_d);
+ if ( options.verbosely > 3 )
+ printf( "Connected source \"%s\" to \"%s\"{%s}\n",
+ P.source->name.c_str(), U->label(), P.parm.c_str());
+ } else {
+ U -> detach_source( P.source, kind, p_d);
+ if ( options.verbosely > 3 )
+ printf( "Disconnected source \"%s\" from \"%s\"{%s}\n",
+ P.source->name.c_str(), U->label(), P.parm.c_str());
+ }
+ }
+ }
+
+ return 0;
}
@@ -1305,112 +1307,118 @@ inline const char*
__attribute__ ((pure))
pl_ending( size_t cnt)
{
- return cnt == 1 ? "" : "s";
+ return cnt == 1 ? "" : "s";
}
void
cnrun::CModel::
-dump_metrics( FILE *strm)
+dump_metrics( FILE *strm) const
{
- fprintf( strm,
- "\nModel \"%s\"%s:\n"
- " %5zd unit%s total (%zd Neuron%s, %zd Synapse%s):\n"
- " %5zd hosted,\n"
- " %5zd standalone\n"
- " %5zd discrete dt-bound\n"
- " %5zd Listening unit%s\n"
- " %5zd Spikelogging neuron%s\n"
- " %5zd Unit%s being tuned continuously\n"
- " %5zd Unit%s being tuned periodically\n"
- " %5zd Spontaneously firing neuron%s\n"
- " %5zd Multiplexing synapse%s\n"
- " %6zd vars on integration vector\n\n",
- name.c_str(), (_status & CN_MDL_DISKLESS) ? " (diskless)" : "",
- units(), pl_ending(units()),
- total_neuron_cnt(), pl_ending(total_neuron_cnt()),
- total_synapse_cnt(), pl_ending(total_synapse_cnt()),
- hosted_unit_cnt(),
- standalone_unit_cnt(),
- ddtbound_unit_cnt(),
- listening_unit_cnt(), pl_ending(listening_unit_cnt()),
- spikelogging_neuron_cnt(), pl_ending(spikelogging_neuron_cnt()),
- unit_with_continuous_sources_cnt(), pl_ending(unit_with_continuous_sources_cnt()),
- unit_with_periodic_sources_cnt(), pl_ending(unit_with_periodic_sources_cnt()),
- conscious_neuron_cnt(), pl_ending(conscious_neuron_cnt()),
- mx_syn_list.size(), pl_ending(mx_syn_list.size()),
- _var_cnt-1);
- if ( _status & CN_MDL_HAS_DDTB_UNITS )
- fprintf( strm, "Discrete dt: %g msec\n", discrete_dt());
+ fprintf( strm,
+ "\nModel \"%s\"%s:\n"
+ " %5zd unit%s total (%zd Neuron%s, %zd Synapse%s):\n"
+ " %5zd hosted,\n"
+ " %5zd standalone\n"
+ " %5zd discrete dt-bound\n"
+ " %5zd Listening unit%s\n"
+ " %5zd Spikelogging neuron%s\n"
+ " %5zd Unit%s being tuned continuously\n"
+ " %5zd Unit%s being tuned periodically\n"
+ " %5zd Spontaneously firing neuron%s\n"
+ " %5zd Multiplexing synapse%s\n"
+ " %6zd vars on integration vector\n\n",
+ name.c_str(), is_diskless ? " (diskless)" : "",
+ units.size(), pl_ending(units.size()),
+ n_total_neurons(), pl_ending(n_total_neurons()),
+ n_total_synapses(), pl_ending(n_total_synapses()),
+ n_hosted_units(),
+ n_standalone_units(),
+ ddtbound_neurons.size() + ddtbound_synapses.size(),
+ listening_units.size(), pl_ending(listening_units.size()),
+ spikelogging_neurons.size(), pl_ending(spikelogging_neurons.size()),
+ units_with_continuous_sources.size(), pl_ending(units_with_continuous_sources.size()),
+ units_with_periodic_sources.size(), pl_ending(units_with_periodic_sources.size()),
+ conscious_neurons.size(), pl_ending(conscious_neurons.size()),
+ multiplexing_synapses.size(), pl_ending(multiplexing_synapses.size()),
+ _var_cnt-1);
+ if ( have_ddtb_units )
+ fprintf( strm, "Discrete dt: %g msec\n", discrete_dt());
}
void
cnrun::CModel::
-dump_state( FILE *strm)
+dump_state( FILE *strm) const
{
- fprintf( strm,
- "Model time: %g msec\n"
- "Integrator dt_min: %g msec, dt_max: %g msec\n"
- "Logging at: %g msec\n\n",
- model_time(),
- dt_min(), dt_max(),
- listen_dt);
+ fprintf( strm,
+ "Model time: %g msec\n"
+ "Integrator dt_min: %g msec, dt_max: %g msec\n"
+ "Logging at: %g msec\n\n",
+ model_time(),
+ dt_min(), dt_max(),
+ options.listen_dt);
}
void
cnrun::CModel::
-dump_units( FILE *strm)
+dump_units( FILE *strm) const
{
- fprintf( strm, "\nUnit types in the model:\n");
-
- set<int> found_unit_types;
- unsigned p = 0;
-
- fprintf( strm, "\n===== Neurons:\n");
- for_all_units (U)
- if ( (*U)->is_neuron() && found_unit_types.count( (*U)->type()) == 0 ) {
- found_unit_types.insert( (*U)->type());
-
- fprintf( strm, "--- %s: %s\nParameters: ---\n",
- (*U)->species(), (*U)->type_description());
- for ( p = 0; p < (*U)->p_no(); p++ )
- if ( *(*U)->param_sym(p) != '.' || verbosely > 5 )
- fprintf( strm, "%2d: %-5s\t= %s %s\n",
- p, (*U)->param_sym(p),
- cnrun::str::double_dot_aligned_s( (*U)->param_value(p), 4, 6).c_str(),
- (*U)->param_name(p));
- fprintf( strm, "Variables: ---\n");
- for ( p = 0; p < (*U)->v_no(); p++ )
- if ( *(*U)->var_sym(p) != '.' || verbosely > 5 )
- fprintf( strm, "%2d: %-5s\t= %s %s\n",
- p, (*U)->var_sym(p),
- cnrun::str::double_dot_aligned_s( (*U)->var_value(p), 4, 6).c_str(),
- (*U)->var_name(p));
- }
- fprintf( strm, "\n===== Synapses:\n");
- for_all_units (U)
- if ( (*U)->is_synapse() && found_unit_types.count( (*U)->type()) == 0 ) {
- found_unit_types.insert( (*U)->type());
-
- fprintf( strm, "--- %s: %s\nParameters: ---\n",
- (*U)->species(), (*U)->type_description());
- fprintf( strm, " parameters:\n");
- for ( p = 0; p < (*U)->p_no(); p++ )
- if ( *(*U)->param_sym(p) != '.' || verbosely > 5 )
- fprintf( strm, "%2d: %-5s\t= %s %s\n",
- p, (*U)->param_sym(p),
- cnrun::str::double_dot_aligned_s( (*U)->param_value(p), 4, 6).c_str(),
- (*U)->param_name(p));
- fprintf( strm, "Variables: ---\n");
- for ( p = 0; p < (*U)->v_no(); p++ )
- if ( *(*U)->var_sym(p) != '.' || verbosely > 5 )
- fprintf( strm, "%2d: %-5s\t= %s %s\n",
- p, (*U)->var_sym(p),
- cnrun::str::double_dot_aligned_s( (*U)->var_value(p), 4, 6).c_str(),
- (*U)->var_name(p));
-
- }
- fprintf( strm, "\n");
+ fprintf( strm, "\nUnit types in the model:\n");
+
+ set<int> found_unit_types;
+ unsigned p = 0;
+
+ fprintf( strm, "\n===== Neurons:\n");
+ for ( auto& U : units )
+ if ( U->is_neuron() && found_unit_types.count( U->type()) == 0 ) {
+ found_unit_types.insert( U->type());
+
+ fprintf( strm, "--- %s: %s\nParameters: ---\n",
+ U->species(), U->type_description());
+ for ( p = 0; p < U->p_no(); ++p )
+ if ( *U->param_sym(p) != '.' || options.verbosely > 5 )
+ fprintf( strm, "%2d: %-5s\t= %s %s\n",
+ p, U->param_sym(p),
+ double_dot_aligned_s( U->param_value(p), 4, 6).c_str(),
+ U->param_name(p));
+ fprintf( strm, "Variables: ---\n");
+ for ( p = 0; p < U->v_no(); ++p )
+ if ( *U->var_sym(p) != '.' || options.verbosely > 5 )
+ fprintf( strm, "%2d: %-5s\t= %s %s\n",
+ p, U->var_sym(p),
+ double_dot_aligned_s( U->var_value(p), 4, 6).c_str(),
+ U->var_name(p));
+ }
+ fprintf( strm, "\n===== Synapses:\n");
+ for ( auto& U : units )
+ if ( U->is_synapse() && found_unit_types.count( U->type()) == 0 ) {
+ found_unit_types.insert( U->type());
+
+ fprintf( strm, "--- %s: %s\nParameters: ---\n",
+ U->species(), U->type_description());
+ fprintf( strm, " parameters:\n");
+ for ( p = 0; p < U->p_no(); ++p )
+ if ( *U->param_sym(p) != '.' || options.verbosely > 5 )
+ fprintf( strm, "%2d: %-5s\t= %s %s\n",
+ p, U->param_sym(p),
+ double_dot_aligned_s( U->param_value(p), 4, 6).c_str(),
+ U->param_name(p));
+ fprintf( strm, "Variables: ---\n");
+ for ( p = 0; p < U->v_no(); ++p )
+ if ( *U->var_sym(p) != '.' || options.verbosely > 5 )
+ fprintf( strm, "%2d: %-5s\t= %s %s\n",
+ p, U->var_sym(p),
+ double_dot_aligned_s( U->var_value(p), 4, 6).c_str(),
+ U->var_name(p));
+
+ }
+ fprintf( strm, "\n");
}
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/model.hh b/upstream/src/libcn/model.hh
index a1b2926..08ddab7 100644
--- a/upstream/src/libcn/model.hh
+++ b/upstream/src/libcn/model.hh
@@ -20,8 +20,8 @@ parameters.
--------------------------------------------------------------------------*/
-#ifndef CN_LIBCN_MODEL_H_
-#define CN_LIBCN_MODEL_H_
+#ifndef CNRUN_LIBCN_MODEL_H_
+#define CNRUN_LIBCN_MODEL_H_
#include <csignal>
#include <list>
@@ -33,6 +33,8 @@ parameters.
#include "gsl/gsl_rng.h"
+#include "libstilton/exprparser.hh"
+#include "forward-decls.hh"
#include "base-neuron.hh"
#include "base-synapse.hh"
#include "hosted-neurons.hh"
@@ -47,8 +49,8 @@ parameters.
using namespace std;
-namespace cnrun {
+namespace cnrun {
struct SModelOptions {
bool listen_1varonly:1,
@@ -62,7 +64,7 @@ struct SModelOptions {
display_progress_percent:1,
display_progress_time:1;
int precision;
- float spike_threshold,
+ double spike_threshold,
spike_lapse,
listen_dt;
double //discrete_dt,
@@ -74,10 +76,10 @@ struct SModelOptions {
sdf_sigma;
int verbosely;
- list<SVariable>
+ list<cnrun::stilton::SVariable>
variables;
- SOptions ()
+ SModelOptions ()
: listen_1varonly (true), listen_deferwrite (false), listen_binary (false),
sort_units (true),
log_dt (false),
@@ -85,7 +87,6 @@ struct SModelOptions {
log_sdf (false),
display_progress_percent (true),
display_progress_time (false),
- dont_coalesce (false),
precision (8),
spike_threshold (0.), spike_lapse (3.),
listen_dt(1.),
@@ -100,445 +101,384 @@ struct SModelOptions {
class CModel {
public:
- string name;
+ // ctor, dtor
+ CModel( const string& name, CIntegrate_base*, const SModelOptions&);
+ ~CModel();
- private:
- int _status;
- public:
- int status() const
+ string name;
+
+ SModelOptions
+ options;
+
+ // Unit lookup
+ C_BaseUnit *unit_by_label( const string&) const;
+ C_BaseNeuron *neuron_by_label( const string&) const;
+ C_BaseSynapse *synapse_by_label( const string&) const;
+ unsigned short longest_label() const
+ { return _longest_label; }
+
+ // Unit tally
+ size_t n_hosted_units() const
+ { return hosted_neurons.size() + hosted_synapses.size(); }
+ size_t n_standalone_units() const
+ { return standalone_neurons.size() + standalone_synapses.size(); }
+ size_t n_ddtbound_units() const
+ { return ddtbound_neurons.size() + ddtbound_synapses.size(); }
+ size_t n_total_neurons() const
{
- return _status;
+ return hosted_neurons.size()
+ + standalone_neurons.size()
+ + ddtbound_neurons.size();
+ }
+ size_t n_total_synapses() const
+ {
+ return hosted_synapses.size()
+ + standalone_synapses.size()
+ + ddtbound_synapses.size();
}
- // structure ------
- friend class C_BaseSynapse;
- friend class C_HostedNeuron;
- friend class C_HostedConductanceBasedNeuron;
- friend class C_HostedRateBasedNeuron;
- friend class C_HostedSynapse;
- friend class CNeuronMap;
- friend class CSynapseMap;
+ // 0. Model composition
+
+ // There are two ways of adding units:
+ // - create units outside, then 'include' them in a model;
+ // - specify which unit you want, by type, and creating
+ // them directly in the model ('add').
+
+ //enum class TIncludeOption { is_last, is_notlast, }; // defined in hosted-unit.hh
+ // if option == is_last, do allocations of hosted units' vars immediately
+ // otherwise defer until addition is done with option == is_notlast
+ // or the user calls finalize_additions
+ int include_unit( C_HostedNeuron*, TIncludeOption option = TIncludeOption::is_last);
+ int include_unit( C_HostedSynapse*, TIncludeOption option = TIncludeOption::is_last);
+ int include_unit( C_StandaloneNeuron*);
+ int include_unit( C_StandaloneSynapse*);
+
+ C_BaseNeuron*
+ add_neuron_species( TUnitType, const string& label,
+ TIncludeOption = TIncludeOption::is_last,
+ double x = 0., double y = 0., double z = 0.);
+ C_BaseNeuron*
+ add_neuron_species( const string& type, const string& label,
+ TIncludeOption = TIncludeOption::is_last,
+ double x = 0., double y = 0., double z = 0.);
+
+ enum class TSynapseCloningOption { yes, no, };
+ C_BaseSynapse*
+ add_synapse_species( const string& type, const string& src_l, const string& tgt_l,
+ double g,
+ TSynapseCloningOption = TSynapseCloningOption::yes,
+ TIncludeOption = TIncludeOption::is_last);
+ void finalize_additions();
+
+ C_BaseSynapse*
+ add_synapse_species( TUnitType, C_BaseNeuron *src, C_BaseNeuron *tgt,
+ double g,
+ TSynapseCloningOption = TSynapseCloningOption::yes,
+ TIncludeOption = TIncludeOption::is_last);
+
+ enum class TExcludeOption { with_delete, no_delete, };
+ C_BaseUnit*
+ exclude_unit( C_BaseUnit*, TExcludeOption option = TExcludeOption::no_delete);
+ // return nullptr if option == do_delete, the excluded unit otherwise, even if it was not owned
+ void delete_unit( C_BaseUnit* u)
+ { exclude_unit( u, TExcludeOption::with_delete); }
+
+ // 1. NeuroMl interface
+ enum class TNMLImportOption { merge, reset, };
+ enum TNMLIOResult {
+ ok = 0, nofile, noelem, badattr, badcelltype, biglabel, structerror,
+ };
+ int import_NetworkML( const string& fname, TNMLImportOption);
+ int import_NetworkML( xmlDoc*, const string& fname, TNMLImportOption); // fname is merely informational here
+ int export_NetworkML( const string& fname);
+ int export_NetworkML( xmlDoc*);
+
+ // 2. Bulk operations
+ enum class TResetOption { with_params, no_params, };
+ void reset( TResetOption = TResetOption::no_params);
+ void reset_state_all_units();
+
+ void cull_deaf_synapses(); // those with gsyn == 0
+ void cull_blind_synapses(); // those with _source == nullptr
+
+ // 3. Informational
+ size_t vars() const { return _var_cnt; }
+ void dump_metrics( FILE *strm = stdout) const;
+ void dump_state( FILE *strm = stdout) const;
+ void dump_units( FILE *strm = stdout) const;
+
+ // 4. Set unit parameters
+ // high-level functions to manipulate unit behaviour, set params, & connect sources
+ struct STagGroup {
+ string pattern;
+ bool enable;
+ STagGroup( const string& a, bool b = true)
+ : pattern (a), enable (b)
+ {}
+ };
+ struct STagGroupListener : STagGroup {
+ int bits;
+ STagGroupListener( const string& a, bool b, int c = 0)
+ : STagGroup (a, b), bits (c)
+ {}
+ };
+ int process_listener_tags( const list<STagGroupListener>&);
+
+ struct STagGroupSpikelogger : STagGroup {
+ double period, sigma, from;
+ STagGroupSpikelogger( const string& a, bool b,
+ double c = 0., double d = 0., double e = 0.) // defaults disable sdf computation
+ : STagGroup (a, b), period (c), sigma (d), from (e)
+ {}
+ };
+ int process_spikelogger_tags( const list<STagGroupSpikelogger>&);
+ int process_putout_tags( const list<STagGroup>&);
+
+ struct STagGroupDecimate : STagGroup {
+ float fraction;
+ STagGroupDecimate( const string& a, double c)
+ : STagGroup (a), fraction (c)
+ {}
+ };
+ int process_decimate_tags( const list<STagGroupDecimate>&);
+
+ struct STagGroupNeuronParmSet : STagGroup {
+ string parm;
+ double value;
+ STagGroupNeuronParmSet( const string& a, bool b, const string& c, double d) // b == false to revert to stock
+ : STagGroup (a, b), parm (c), value (d)
+ {}
+ };
+ struct STagGroupSynapseParmSet : STagGroupNeuronParmSet {
+ string target;
+ STagGroupSynapseParmSet( const string& a, const string& z, bool b, const string& c, double d)
+ : STagGroupNeuronParmSet (a, b, c, d), target (z)
+ {}
+ };
+ int process_paramset_static_tags( const list<STagGroupNeuronParmSet>&);
+ int process_paramset_static_tags( const list<STagGroupSynapseParmSet>&);
+
+ struct STagGroupSource : STagGroup {
+ string parm;
+ C_BaseSource *source;
+ STagGroupSource( const string& a, bool b, const string& c, C_BaseSource *d) // b == false to revert to stock
+ : STagGroup (a, b), parm (c), source (d)
+ {}
+ };
+ int process_paramset_source_tags( const list<STagGroupSource>&);
+
+ C_BaseSource*
+ source_by_id( const string& id) const
+ {
+ for ( auto& S : sources )
+ if ( S->name == id )
+ return S;
+ return nullptr;
+ }
+ // 5. Running
+ unsigned advance( double dist, double *cpu_time_p = nullptr) __attribute__ ((hot));
+ double model_time() const { return V[0]; }
- public:
- size_t units() const __attribute__ ((pure))
- { return unit_list.size(); }
+ double dt() const { return _integrator->dt; }
+ double dt_min() const { return _integrator->_dt_min; }
+ double dt_max() const { return _integrator->_dt_max; }
+ unsigned long cycle() const { return _cycle; }
+ const double& model_discrete_time() const { return _discrete_time; }
+ const double& discrete_dt() const { return _discrete_dt; }
+
+ // 9. misc
+ double rng_sample() const
+ {
+ return gsl_rng_uniform_pos( _rng);
+ }
private:
- unsigned long
- _global_unit_id_reservoir;
- private:
- list<C_BaseUnit*> unit_list; // all units together
+ friend class C_BaseUnit;
+ friend class C_BaseNeuron;
+ friend class C_BaseSynapse;
+ friend class C_HostedNeuron;
+ friend class C_HostedConductanceBasedNeuron;
+ friend class C_HostedRateBasedNeuron;
+ friend class C_HostedSynapse;
+ friend class CNeuronMap;
+ friend class CSynapseMap;
+ friend class CSynapseMxAB_dd;
+ friend class SSpikeloggerService;
+
+ friend class CIntegrate_base;
+ friend class CIntegrateRK65;
+
+ // supporting functions
+ void register_listener( C_BaseUnit*);
+ void unregister_listener( C_BaseUnit*);
+ void register_spikelogger( C_BaseNeuron*);
+ void unregister_spikelogger( C_BaseNeuron*);
+ void register_mx_synapse( C_BaseSynapse*);
+ void unregister_mx_synapse( C_BaseSynapse*);
+
+ void register_unit_with_sources( C_BaseUnit*);
+ void unregister_unit_with_sources( C_BaseUnit*);
+ void _include_base_unit( C_BaseUnit*);
+
+ int _process_populations( xmlNode*);
+ int _process_population_instances( xmlNode*, const xmlChar*, const xmlChar*);
+
+ int _process_projections( xmlNode*);
+ int _process_projection_connections( xmlNode*, const xmlChar*, const xmlChar*,
+ const xmlChar *src_grp_prefix,
+ const xmlChar *tgt_grp_prefix);
+
+ void _setup_schedulers();
+ void coalesce_synapses();
+ void prepare_advance();
+ unsigned _do_advance_on_pure_hosted( double, double*) __attribute__ ((hot));
+ unsigned _do_advance_on_pure_standalone( double, double*) __attribute__ ((hot));
+ unsigned _do_advance_on_pure_ddtbound( double, double*) __attribute__ ((hot));
+ unsigned _do_advance_on_mixed( double, double*) __attribute__ ((hot));
+
+ void make_listening_units_tell()
+ {
+ for ( auto& U : listening_units )
+ U -> tell();
+ }
+ void make_conscious_neurons_possibly_fire()
+ {
+ for ( auto& U : conscious_neurons )
+ U->possibly_fire();
+ }
+ void make_units_with_periodic_sources_apprise_from_sources()
+ {
+ for ( auto& U : units_with_periodic_sources )
+ U->apprise_from_sources();
+ }
+ void make_units_with_continuous_sources_apprise_from_sources()
+ {
+ for ( auto& U : units_with_continuous_sources )
+ U->apprise_from_sources();
+ }
+
+ static double
+ model_time( vector<double> &x)
+ {
+ return x[0];
+ }
+
+ // contents
+ list<C_BaseUnit*>
+ units; // all units together
// these have derivative(), are churned in _integrator->cycle()
- list<C_HostedNeuron*> hosted_neu_list;
- list<C_HostedSynapse*> hosted_syn_list;
+ list<C_HostedNeuron*>
+ hosted_neurons;
+ list<C_HostedSynapse*>
+ hosted_synapses;
// these need preadvance() and fixate()
- list<C_StandaloneNeuron*> standalone_neu_list;
- list<C_StandaloneSynapse*> standalone_syn_list;
+ list<C_StandaloneNeuron*>
+ standalone_neurons;
+ list<C_StandaloneSynapse*>
+ standalone_synapses;
// ... also these, but at discrete dt only
// (only the standalone map units currently)
- list<C_StandaloneNeuron*> ddtbound_neu_list;
- list<C_StandaloneSynapse*> ddtbound_syn_list;
+ list<C_StandaloneNeuron*>
+ ddtbound_neurons;
+ list<C_StandaloneSynapse*>
+ ddtbound_synapses;
// neurons that can possibly_fire() (various oscillators), and
// have no inputs, and hence not dependent on anything else
- list<C_BaseNeuron*> conscious_neu_list;
+ list<C_BaseNeuron*>
+ conscious_neurons;
- // various lists to avoid traversing all of them in unit_list:
+ // various lists to avoid traversing all of them in units:
// listeners, spikeloggers & readers
- list<C_BaseUnit*> lisn_unit_list;
+ list<C_BaseUnit*>
+ listening_units;
// uses a meaningful do_spikelogging_or_whatever
- list<C_BaseNeuron*> spikelogging_neu_list;
+ list<C_BaseNeuron*>
+ spikelogging_neurons;
// `Multiplexing AB' synapses are treated very specially
- list<C_BaseSynapse*> mx_syn_list;
+ list<C_BaseSynapse*>
+ multiplexing_synapses;
// those for which apprise_from_source( model_time()) will be called
- list<C_BaseUnit*> units_with_continuous_sources;
+ list<C_BaseUnit*>
+ units_with_continuous_sources;
// same, but not every cycle
- list<C_BaseUnit*> units_with_periodic_sources;
- list<double> regular_periods;
- list<unsigned> regular_periods_last_checked;
-
- public:
- C_BaseUnit *unit_by_label( const char *) const __attribute__ ((pure));
- C_BaseNeuron *neuron_by_label( const char *) const __attribute__ ((pure));
- C_BaseSynapse *synapse_by_label( const char *) const __attribute__ ((pure));
-
- size_t hosted_unit_cnt() const __attribute__ ((pure))
- { return hosted_neu_list.size() + hosted_syn_list.size(); }
- size_t standalone_unit_cnt() const __attribute__ ((pure))
- { return standalone_neu_list.size() + standalone_syn_list.size(); }
- size_t ddtbound_unit_cnt() const __attribute__ ((pure))
- { return ddtbound_neu_list.size() + ddtbound_syn_list.size(); }
- size_t total_neuron_cnt() const __attribute__ ((pure))
- { return hosted_neu_list.size()
- + standalone_neu_list.size()
- + ddtbound_neu_list.size(); }
- size_t total_synapse_cnt() const __attribute__ ((pure))
- { return hosted_syn_list.size()
- + standalone_syn_list.size()
- + ddtbound_syn_list.size(); }
- size_t conscious_neuron_cnt() const __attribute__ ((pure))
- { return conscious_neu_list.size(); }
- size_t listening_unit_cnt() const __attribute__ ((pure))
- { return lisn_unit_list.size(); }
- size_t spikelogging_neuron_cnt() const __attribute__ ((pure))
- { return spikelogging_neu_list.size(); }
-
- size_t unit_with_continuous_sources_cnt() const __attribute__ ((pure))
- { return units_with_continuous_sources.size(); }
- size_t unit_with_periodic_sources_cnt() const __attribute__ ((pure))
- { return units_with_periodic_sources.size(); }
-
- // if is_last == true, do allocations of hosted units' vars immediately
- // otherwise defer until addition is done with is_last == true
- // or the user calls finalize_additions
- int include_unit( C_HostedNeuron*, bool is_last = true);
- int include_unit( C_HostedSynapse*, bool is_last = true);
- int include_unit( C_StandaloneNeuron*);
- int include_unit( C_StandaloneSynapse*);
-
- C_BaseNeuron *add_neuron_species( TUnitType type, const char *label, bool finalize = true,
- double x = 0., double y = 0., double z = 0.);
- C_BaseNeuron *add_neuron_species( const char *type, const char *label, bool finalize = true,
- double x = 0., double y = 0., double z = 0.);
- C_BaseSynapse *add_synapse_species( const char *type, const char *src_l, const char *tgt_l,
- double g, bool allow_clone = true, bool finalize = true);
- void finalize_additions();
- private:
- C_BaseSynapse *add_synapse_species( TUnitType type, C_BaseNeuron *src, C_BaseNeuron *tgt,
- double g, bool allow_clone, bool finalize);
- void _include_base_unit( C_BaseUnit*);
-// int _check_new_synapse( C_BaseSynapse*);
- public:
- C_BaseUnit* exclude_unit( C_BaseUnit*, bool do_delete = false);
- // return nullptr if do_delete == true, the excluded unit otherwise, even if it was not owned
- void delete_unit( C_BaseUnit* u)
- { exclude_unit( u, true); }
- private:
- friend class C_BaseUnit;
- void register_listener( C_BaseUnit*);
- void unregister_listener( C_BaseUnit*);
- friend class C_BaseNeuron;
- friend class SSpikeloggerService;
- void register_spikelogger( C_BaseNeuron*);
- void unregister_spikelogger( C_BaseNeuron*);
- void register_mx_synapse( C_BaseSynapse*);
- void unregister_mx_synapse( C_BaseSynapse*);
-
- void register_unit_with_sources( C_BaseUnit*);
- void unregister_unit_with_sources( C_BaseUnit*);
+ list<C_BaseUnit*>
+ units_with_periodic_sources;
+ list<double>
+ regular_periods;
+ list<unsigned>
+ regular_periods_last_checked;
- private:
- unsigned short _longest_label;
- public:
- unsigned short longest_label() { return _longest_label; }
-
- public:
- // ctor, dtor
- CModel( const char *inname, CIntegrate_base *inRK65Setup, int instatus);
- ~CModel();
-
- void reset( bool also_reset_params = false);
- void reset_state_all_units();
-
- public:
- // NeuroMl interface
- int import_NetworkML( const char *fname, bool appending = false);
- int import_NetworkML( xmlDoc *doc, const char *fname, bool appending = false); // fname is merely informational here
- void cull_deaf_synapses(); // those with gsyn == 0
- void cull_blind_synapses(); // those with _source == nullptr
- int export_NetworkML( const char *fname);
- int export_NetworkML( xmlDoc *doc);
- void dump_metrics( FILE *strm = stdout);
- void dump_state( FILE *strm = stdout);
- void dump_units( FILE *strm = stdout);
- private:
- int _process_populations( xmlNode*);
- int _process_population_instances( xmlNode*, const xmlChar*, const xmlChar*);
-
- int _process_projections( xmlNode*);
- int _process_projection_connections( xmlNode*, const xmlChar*, const xmlChar*,
- const xmlChar *src_grp_prefix, const xmlChar *tgt_grp_prefix);
+ unsigned long
+ _global_unit_id_reservoir;
// the essential mechanical parts: ----
// hosted unit variables
- private:
- vector<double> V, // contains catenated var vectors of all constituent neurons and synapses
- W; // V and W alternate in the capacity of the main vector, so avoiding many a memcpy
- size_t _var_cnt; // total # of variables (to be) allocated in V an W, plus one for model_time
- public:
- size_t vars() { return _var_cnt; }
+ vector<double> V, // contains catenated var vectors of all constituent neurons and synapses
+ W; // V and W alternate in the capacity of the main vector, so avoiding many a memcpy
+ size_t _var_cnt; // total # of variables (to be) allocated in V an W, plus one for model_time
// integrator interface
- private:
- friend class CIntegrate_base;
- friend class CIntegrateRK65;
- public:
- CIntegrate_base
- *_integrator;
- const double& model_time() const { return V[0]; }
-
- double& dt() const { return _integrator->dt; }
- double& dt_min() const { return _integrator->_dt_min; }
- double& dt_max() const { return _integrator->_dt_max; }
- // this one is internal
- friend class CSynapseMxAB_dd;
- private:
- const double& model_time( vector<double> &x) { return x[0]; }
-
- private:
- unsigned long
- _cycle;
- double _discrete_time;
- double _discrete_dt;
- public:
- unsigned long cycle() { return _cycle; }
- const double& model_discrete_time() { return _discrete_time; }
- const double& discrete_dt() { return _discrete_dt; }
-
- // simulation
- private:
- void _setup_schedulers();
- void prepare_advance();
-// void ctrl_c_handler( int);
- unsigned _do_advance_on_pure_hosted( double, double*) __attribute__ ((hot));
- unsigned _do_advance_on_pure_standalone( double, double*) __attribute__ ((hot));
- unsigned _do_advance_on_pure_ddtbound( double, double*) __attribute__ ((hot));
- unsigned _do_advance_on_mixed( double, double*) __attribute__ ((hot));
- public:
- unsigned advance( double dist, double *cpu_time_p = nullptr) __attribute__ ((hot));
-
- public:
- double spike_threshold, // above which neurons will detect a spike
- spike_lapse; // if occurs less than this after the unit's _last_spike_end
- // (begs to be moved to SSpikeloggerService)
-
- public:
- float listen_dt;
- private:
- ofstream
- *_dt_logger, *_spike_logger;
-
- public:
- // high-level functions to manipulate unit behaviour, set params, & connect sources
- struct STagGroup {
- string pattern;
- bool enable;
- STagGroup( const char *a, bool b = true)
- : pattern (a), enable (b) {}
- };
- struct STagGroupListener : STagGroup {
- int bits;
- STagGroupListener( const char *a, bool b, int c = 0)
- : STagGroup (a, b), bits (c) {}
- };
- int process_listener_tags( const list<STagGroupListener>&);
-
- struct STagGroupSpikelogger : STagGroup {
- double period, sigma, from;
- STagGroupSpikelogger( const char *a, bool b,
- double c = 0., double d = 0., double e = 0.) // defaults disable sdf computation
- : STagGroup (a, b), period (c), sigma (d), from (e) {}
- };
- int process_spikelogger_tags( const list<STagGroupSpikelogger>&);
-
- int process_putout_tags( const list<STagGroup>&);
-
- struct STagGroupDecimate : STagGroup {
- float fraction;
- STagGroupDecimate( const char *a, double c)
- : STagGroup (a), fraction (c) {}
- };
- int process_decimate_tags( const list<STagGroupDecimate>&);
-
- struct STagGroupNeuronParmSet : STagGroup {
- string parm;
- double value;
- STagGroupNeuronParmSet( const char *a, bool b, const char *c, double d) // b == false to revert to stock
- : STagGroup (a, b), parm (c), value (d)
- {}
- };
- struct STagGroupSynapseParmSet : STagGroupNeuronParmSet {
- string target;
- STagGroupSynapseParmSet( const char *a, const char *z, bool b, const char *c, double d)
- : STagGroupNeuronParmSet (a, b, c, d), target (z)
- {}
- };
- int process_paramset_static_tags( const list<STagGroupNeuronParmSet>&);
- int process_paramset_static_tags( const list<STagGroupSynapseParmSet>&);
- private:
- void coalesce_synapses(); // those which used to be clones then made independent
-
- public:
- struct STagGroupSource : STagGroup {
- string parm;
- C_BaseSource *source;
- STagGroupSource( const char *a, bool b, const char *c, C_BaseSource *d) // b == false to revert to stock
- : STagGroup (a, b), parm (c), source (d)
- {}
- };
- int process_paramset_source_tags( const list<STagGroupSource>&);
-
- list<C_BaseSource*> Sources;
- C_BaseSource* source_by_id( const char *id)
- {
- auto K = Sources.begin();
- while ( K != Sources.end() ) {
- if ( (*K)->name == id )
- return *K;
- K++;
- }
- return nullptr;
- }
-
- public:
- int verbosely;
+ CIntegrate_base
+ *_integrator;
- gsl_rng *_rng;
+ unsigned long
+ _cycle;
+ double _discrete_time;
+ double _discrete_dt;
- double rng_sample()
- {
- return gsl_rng_uniform_pos( _rng);
- }
+ list<C_BaseSource*>
+ sources;
- // various convenience fields and members
- typedef list<C_BaseUnit*>::iterator lBUi;
- typedef list<C_BaseUnit*>::const_iterator lBUci;
- typedef list<C_BaseUnit*>::reverse_iterator lBUri;
- typedef list<C_BaseUnit*>::const_reverse_iterator lBUcri;
+ ofstream
+ *_dt_logger,
+ *_spike_logger;
- lBUi ulist_begin() { return unit_list.begin(); }
- lBUi ulist_end() { return unit_list.end(); }
- lBUri ulist_rbegin() { return unit_list.rbegin(); }
- lBUri ulist_rend() { return unit_list.rend(); }
+ bool is_ready:1,
+ is_diskless:1,
+ have_ddtb_units:1;
- lBUi lulist_begin() { return lisn_unit_list.begin(); }
- lBUi lulist_end() { return lisn_unit_list.end(); }
- list<C_BaseNeuron*>:: iterator knlist_begin() { return spikelogging_neu_list.begin(); }
- list<C_BaseNeuron*>:: iterator knlist_end() { return spikelogging_neu_list.end(); }
+ unsigned short
+ _longest_label;
- // lBUi rlist_rbegin() { return reader_unit_list.rbegin(); }
- // lBUi rlist_rend() { return reader_unit_list.rend(); }
+ gsl_rng *_rng;
};
-// by popular demand
-#define for_all_units(U) \
- for ( auto U = ulist_begin(); U != ulist_end(); ++U )
-#define for_all_units_const(U) \
- for ( auto U = unit_list.begin(); U != unit_list.end(); ++U )
-#define for_all_neurons(U) \
- for ( auto U = ulist_begin(); U != ulist_end(); ++U ) if ( (*U)->is_neuron() )
-#define for_all_synapses(U) \
- for ( auto U = ulist_begin(); U != ulist_end(); ++U ) if ( (*U)->is_synapse() )
-#define for_all_neurons_reversed(U) \
- for ( auto U = ulist_rbegin(); U != ulist_rend(); ++U ) if ( (*U)->is_neuron() )
-#define for_all_synapses_reversed(U) \
- for ( auto U = ulist_rbegin(); U != ulist_rend(); ++U ) if ( (*U)->is_synapse() )
-
-#define for_all_hosted_neurons(U) \
- for ( auto U = hosted_neu_list.begin(); U != hosted_neu_list.end(); ++U )
-#define for_all_hosted_synapses(U) \
- for ( auto U = hosted_syn_list.begin(); U != hosted_syn_list.end(); ++U )
-#define for_all_standalone_neurons(U) \
- for ( auto U = standalone_neu_list.begin(); U != standalone_neu_list.end(); ++U )
-#define for_all_standalone_synapses(U) \
- for ( auto U = standalone_syn_list.begin(); U != standalone_syn_list.end(); ++U )
-#define for_all_ddtbound_neurons(U) \
- for ( auto U = ddtbound_neu_list.begin(); U != ddtbound_neu_list.end(); ++U )
-#define for_all_ddtbound_synapses(U) \
- for ( auto U = ddtbound_syn_list.begin(); U != ddtbound_syn_list.end(); ++U )
-
-#define for_all_units_with_contiuous_sources(U) \
- for ( auto U = units_with_continuous_sources.begin(); U != units_with_continuous_sources.end(); ++U )
-#define for_all_units_with_periodic_sources(U) \
- for ( auto U = units_with_periodic_sources.begin(); U != units_with_periodic_sources.end(); ++U )
-
-#define for_all_units_reversed(U) \
- for ( auto U = ulist_rbegin(); U != ulist_rend(); ++U )
-#define for_all_readers_reversed(U) \
- for ( auto U = rlist_rbegin(); U != rlist_rend(); ++U )
-
-#define for_all_hosted_neurons_reversed(U) for ( auto U = hosted_neu_list.rbegin(); U != hosted_neu_list.rend(); ++U )
-#define for_all_hosted_synapses_reversed(U) for ( auto U = hosted_syn_list.rbegin(); U != hosted_syn_list.rend(); ++U )
-#define for_all_standalone_synapses_reversed(U) for ( auto U = standalone_syn_list.rbegin(); U != standalone_syn_list.rend(); ++U )
-
-#define for_all_listening_units(U) \
- for ( auto U = lulist_begin(); U != lulist_end(); ++U )
-#define for_all_conscious_neurons(N) \
- for ( auto N = conscious_neu_list.begin(); N != conscious_neu_list.end(); ++N )
-#define for_all_spikelogging_neurons(N) \
- for ( auto N = knlist_begin(); N != knlist_end(); ++N )
-#define for_all_mx_synapses(N) \
- for ( auto Y = mx_syn_list.begin(); Y != mx_syn_list.end(); ++Y )
-
-
-#define for_model_units(M,U) \
- for ( auto U = M->ulist_begin(); U != M->ulist_end(); ++U )
-#define for_model_units_reversed(M,U) \
- for ( auto U = M->ulist_rbegin(); U != M->ulist_rend(); ++U )
-
-#define for_model_hosted_neurons(M,U) \
- for ( auto U = M->hosted_neu_list.begin(); U != M->hosted_neu_list.end(); ++U )
-#define for_model_hosted_synapses(M,U) \
- for ( auto U = M->hosted_syn_list.begin(); U != M->hosted_syn_list.end(); ++U )
-#define for_model_hosted_neurons_reversed(M,U) for ( auto U = M->hnlist_rbegin(); U != M->hnlist_rend(); ++U )
-#define for_model_hosted_synapses_reversed(M,U) for ( auto U = M->hylist_rbegin(); U != M->hylist_rend(); ++U )
-
-#define for_model_standalone_neurons(M,U) for ( auto U = M->snlist_begin(); U != M->snlist_end(); ++U )
-#define for_model_standalone_synapses(M,U) for ( auto U = M->sylist_begin(); U != M->sylist_end(); ++U )
-#define for_model_standalone_neurons_reversed(M,U) for ( auto U = M->snlist_rbegin(); U != M->snlist_rend(); ++U )
-#define for_model_standalone_synapses_reversed(M,U) for ( auto U = M->sylist_rbegin(); U != M->sylist_rend(); ++U )
-
-#define for_model_neuron_units(M,U) for_model_units(M,U) if ( (*U)->is_neuron() )
-#define for_model_synapse_units(M,U) for_model_units(M,U) if ( (*U)->is_synapse() )
-
-#define for_model_spikelogging_neurons(M,N) for ( auto N = M->knlist_begin(); N != M->knlist_end(); ++N )
-
-
-// return values for import_NetworkML
-#define CN_NMLIN_NOFILE -1
-#define CN_NMLIN_NOELEM -2
-#define CN_NMLIN_BADATTR -3
-#define CN_NMLIN_BADCELLTYPE -4
-#define CN_NMLIN_BIGLABEL -5
-#define CN_NMLIN_STRUCTERROR -6
-
-
-
-
-
inline void
CIntegrateRK65::fixate()
{
- swap( model->V, model->W);
+ swap( model->V, model->W);
}
// various CUnit & CNeuron methods accessing CModel members
// that we want to have inline
-inline const double&
+inline const double
C_BaseUnit::model_time() const
{
- return M->model_time();
+ return M->model_time();
}
inline void
C_BaseUnit::pause_listening()
{
- if ( M )
- M->unregister_listener( this);
+ if ( !M )
+ throw "pause_listening() called on NULL model";
+ M->unregister_listener( this);
}
inline void
C_BaseUnit::resume_listening()
{
- if ( M )
- M->register_listener( this);
+ if ( !M )
+ throw "resume_listening() called on NULL model";
+ M->register_listener( this);
}
@@ -547,8 +487,8 @@ template <class T>
void
C_BaseUnit::attach_source( T *s, TSinkType t, unsigned short idx)
{
- sources.push_back( SSourceInterface<T>( s, t, idx));
- M->register_unit_with_sources(this);
+ sources.push_back( SSourceInterface<T>( s, t, idx));
+ M->register_unit_with_sources(this);
}
@@ -558,30 +498,30 @@ C_BaseUnit::attach_source( T *s, TSinkType t, unsigned short idx)
inline SSpikeloggerService*
C_BaseNeuron::enable_spikelogging_service( int s_mask)
{
- if ( !_spikelogger_agent )
- _spikelogger_agent = new SSpikeloggerService( this, s_mask);
- M->register_spikelogger( this);
- return _spikelogger_agent;
+ if ( !_spikelogger_agent )
+ _spikelogger_agent = new SSpikeloggerService( this, s_mask);
+ M->register_spikelogger( this);
+ return _spikelogger_agent;
}
inline SSpikeloggerService*
C_BaseNeuron::enable_spikelogging_service( double sample_period, double sigma, double from, int s_mask)
{
- if ( !_spikelogger_agent )
- _spikelogger_agent = new SSpikeloggerService( this, sample_period, sigma, from, s_mask);
- M->register_spikelogger( this);
- return _spikelogger_agent;
+ if ( !_spikelogger_agent )
+ _spikelogger_agent = new SSpikeloggerService( this, sample_period, sigma, from, s_mask);
+ M->register_spikelogger( this);
+ return _spikelogger_agent;
}
inline void
C_BaseNeuron::disable_spikelogging_service()
{
- if ( _spikelogger_agent && !(_spikelogger_agent->_status & CN_KL_PERSIST)) {
- _spikelogger_agent->sync_history();
- M->unregister_spikelogger( this);
+ if ( _spikelogger_agent && !(_spikelogger_agent->_status & CN_KL_PERSIST)) {
+ _spikelogger_agent->sync_history();
+ M->unregister_spikelogger( this);
- delete _spikelogger_agent;
- _spikelogger_agent = nullptr;
- }
+ delete _spikelogger_agent;
+ _spikelogger_agent = nullptr;
+ }
}
@@ -592,23 +532,22 @@ C_BaseNeuron::disable_spikelogging_service()
inline void
C_HostedNeuron::reset_vars()
{
-// cout << "reset_vars() on " << label << " (idx " << idx << ")\n";
- if ( M && idx < M->_var_cnt )
- memcpy( &M->V[idx],
- __CNUDT[_type].stock_var_values,
- __CNUDT[_type].vno * sizeof(double));
+ if ( M && idx < M->_var_cnt )
+ memcpy( &M->V[idx],
+ __CNUDT[_type].stock_var_values,
+ __CNUDT[_type].vno * sizeof(double));
}
inline double&
C_HostedNeuron::var_value( size_t v)
{
- return M->V[idx + v];
+ return M->V[idx + v];
}
inline const double&
C_HostedNeuron::get_var_value( size_t v) const
{
- return M->V[idx + v];
+ return M->V[idx + v];
}
@@ -616,43 +555,40 @@ C_HostedNeuron::get_var_value( size_t v) const
inline unsigned
C_HostedConductanceBasedNeuron::n_spikes_in_last_dt() const
{
- return E() >= M->spike_threshold;
+ return E() >= M->options.spike_threshold;
}
inline unsigned
C_HostedRateBasedNeuron::n_spikes_in_last_dt() const
{
- return round(E() * M->dt() * M->rng_sample());
+ return round(E() * M->dt() * M->rng_sample());
}
inline unsigned
C_StandaloneConductanceBasedNeuron::n_spikes_in_last_dt() const
{
- return E() >= M->spike_threshold;
+ return E() >= M->options.spike_threshold;
}
inline unsigned
C_StandaloneRateBasedNeuron::n_spikes_in_last_dt() const
{
- return round(E() * M->dt() * M->rng_sample());
+ return round(E() * M->dt() * M->rng_sample());
}
-
-
-
inline void
C_HostedSynapse::reset_vars()
{
-// cout << "reset_vars() on " << label << " (idx " << idx << ")\n";
- if ( M && M->_var_cnt > idx )
- memcpy( &M->V[idx],
- __CNUDT[_type].stock_var_values,
- __CNUDT[_type].vno * sizeof(double));
+// cout << "reset_vars() on " << label << " (idx " << idx << ")\n";
+ if ( M && M->_var_cnt > idx )
+ memcpy( &M->V[idx],
+ __CNUDT[_type].stock_var_values,
+ __CNUDT[_type].vno * sizeof(double));
}
@@ -660,13 +596,13 @@ C_HostedSynapse::reset_vars()
inline double&
C_HostedSynapse::var_value( size_t v)
{
- return M->V[idx + v];
+ return M->V[idx + v];
}
inline const double&
C_HostedSynapse::get_var_value( size_t v) const
{
- return M->V[idx + v];
+ return M->V[idx + v];
}
@@ -674,7 +610,7 @@ C_HostedSynapse::get_var_value( size_t v) const
inline double
C_HostedConductanceBasedNeuron::E() const
{
- return M->V[idx+0];
+ return M->V[idx+0];
}
// F is computed on the fly, so far usually
@@ -683,54 +619,50 @@ C_HostedConductanceBasedNeuron::E() const
inline double
C_HostedSynapse::S() const
{
- return M->V[idx+0];
+ return M->V[idx+0];
}
-
inline
-CNeuronMap::CNeuronMap( const char *inlabel, double x, double y, double z, CModel *inM, int s_mask)
+CNeuronMap::CNeuronMap( const string& inlabel, double x, double y, double z, CModel *inM, int s_mask)
: C_StandaloneConductanceBasedNeuron( NT_MAP, inlabel, x, y, z, inM, s_mask)
{
- if ( inM ) {
- if ( isfinite( inM->_discrete_dt) && inM->_discrete_dt != fixed_dt ) {
- printf( "Inappropriate discrete dt\n");
- _status |= CN_UERROR;
- }
- inM -> _discrete_dt = fixed_dt;
- }
+ if ( inM ) {
+ if ( isfinite( inM->_discrete_dt) && inM->_discrete_dt != fixed_dt ) {
+ printf( "Inappropriate discrete dt\n");
+ _status |= CN_UERROR;
+ }
+ inM -> _discrete_dt = fixed_dt;
+ }
}
inline
CSynapseMap::CSynapseMap( C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel *inM, int s_mask, TUnitType alt_type)
+ double ing, CModel *inM, int s_mask, TUnitType alt_type)
: C_StandaloneSynapse( alt_type, insource, intarget, ing, inM, s_mask),
- _source_was_spiking (false)
+ _source_was_spiking (false)
{
- if ( !inM )
- fprintf( stderr, "A MxMap synapse is created unattached to a model: preadvance() will cause a segfault!\n");
- else {
- if ( isfinite( inM->_discrete_dt) && inM->_discrete_dt != fixed_dt ) {
- printf( "Inappropriate discrete dt\n");
- _status |= CN_UERROR;
- }
- inM -> _discrete_dt = fixed_dt;
- }
+ if ( !inM )
+ fprintf( stderr, "A MxMap synapse is created unattached to a model: preadvance() will cause a segfault!\n");
+ else {
+ if ( isfinite( inM->_discrete_dt) && inM->_discrete_dt != fixed_dt ) {
+ printf( "Inappropriate discrete dt\n");
+ _status |= CN_UERROR;
+ }
+ inM -> _discrete_dt = fixed_dt;
+ }
}
inline void
CSynapseMap::preadvance()
{
-// printf( "fafa %s\n", label);
- V_next[0] = S() * exp( -M->discrete_dt() / P[_tau_])
- + (_source->n_spikes_in_last_dt() ? P[_delta_] : 0);
-
-// V_next[1] = ;
+ V_next[0] = S() * exp( -M->discrete_dt() / P[_tau_])
+ + (_source->n_spikes_in_last_dt() ? P[_delta_] : 0);
}
@@ -738,7 +670,7 @@ CSynapseMap::preadvance()
inline void
CSynapseMxMap::preadvance()
{
- V_next[0] = S() * exp( -M->discrete_dt() / P[_tau_]) + q() * P[_delta_];
+ V_next[0] = S() * exp( -M->discrete_dt() / P[_tau_]) + q() * P[_delta_];
}
}
@@ -746,6 +678,7 @@ CSynapseMxMap::preadvance()
#endif
// Local Variables:
+// Mode: c++
// indent-tabs-mode: nil
// tab-width: 8
// c-basic-offset: 8
diff --git a/upstream/src/libcn/mx-attr.hh b/upstream/src/libcn/mx-attr.hh
index 3ecb3e0..5be9a34 100644
--- a/upstream/src/libcn/mx-attr.hh
+++ b/upstream/src/libcn/mx-attr.hh
@@ -9,8 +9,8 @@
*/
-#ifndef LIBCN_MX_ATTR_H
-#define LIBCN_MX_ATTR_H
+#ifndef CNRUN_LIBCN_MXATTR_H_
+#define CNRUN_LIBCN_MXATTR_H_
#include <vector>
@@ -24,26 +24,33 @@ namespace cnrun {
class C_MultiplexingAttributes {
protected:
- friend class CModel;
- virtual void update_queue() = 0;
- vector<double> _kq;
+ virtual void update_queue() = 0;
+ vector<double> _kq;
public:
- double q() const { return _kq.size(); }
- void reset()
- {
- _kq.clear();
- }
+ double q() const
+ {
+ return _kq.size();
+ }
+ void reset()
+ {
+ _kq.clear();
+ }
};
class C_DotAttributes {
public:
- virtual double& spikes_fired_in_last_dt() = 0;
+ virtual double& spikes_fired_in_last_dt() = 0;
};
}
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/param-unit-literals.hh b/upstream/src/libcn/param-unit-literals.hh
index 6bbfa5f..5d73e29 100644
--- a/upstream/src/libcn/param-unit-literals.hh
+++ b/upstream/src/libcn/param-unit-literals.hh
@@ -7,14 +7,13 @@
*
*/
-#ifndef LIBCN_PARAM_UNIT_LITERALS_H
-#define LIBCN_PARAM_UNIT_LITERALS_H
+#ifndef CNRUN_LIBCN_PARAMUNITLITERALS_H_
+#define CNRUN_LIBCN_PARAMUNITLITERALS_H_
#if HAVE_CONFIG_H && !defined(VERSION)
# include "config.h"
#endif
-
#define __CN_PU_CONDUCTANCE "\316\274S/cm\302\262"
#define __CN_PU_RESISTANCE "M\316\251"
#define __CN_PU_POTENTIAL "mV"
@@ -29,4 +28,9 @@
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/sources.cc b/upstream/src/libcn/sources.cc
index 78ad7b2..374a8c6 100644
--- a/upstream/src/libcn/sources.cc
+++ b/upstream/src/libcn/sources.cc
@@ -8,7 +8,6 @@
*
*/
-
#include <cmath>
#include <sys/time.h>
#include <iostream>
@@ -23,41 +22,41 @@
using namespace std;
-const char * const cnrun::__SourceTypes[] = {
- "Null",
- "Tape",
- "Periodic",
- "Function",
- "Noise",
+const char* const cnrun::C_BaseSource::types[] = {
+ "Null",
+ "Tape",
+ "Periodic",
+ "Function",
+ "Noise",
};
cnrun::CSourceTape::
-CSourceTape( const char *id, const char *infname, bool inis_looping)
+CSourceTape( const string& id, const string& infname, bool inis_looping)
: C_BaseSource (id, SRC_TAPE), is_looping (inis_looping)
{
- ifstream ins( infname);
- if ( !ins.good() ) {
- name = "";
- return;
- }
- skipws(ins);
-
- while ( !ins.eof() && ins.good() ) {
- while ( ins.peek() == '#' || ins.peek() == '\n' )
- ins.ignore( numeric_limits<streamsize>::max(), '\n');
- double timestamp, datum;
- ins >> timestamp >> datum;
- values.push_back( pair<double,double>(timestamp, datum));
- }
-
- if ( values.size() == 0 ) {
- fprintf( stderr, "No usable values in \"%s\"\n", infname);
- return;
- }
-
- fname = infname;
- I = values.begin();
+ ifstream ins( infname);
+ if ( !ins.good() ) {
+ name = "";
+ return;
+ }
+ skipws(ins);
+
+ while ( !ins.eof() && ins.good() ) {
+ while ( ins.peek() == '#' || ins.peek() == '\n' )
+ ins.ignore( numeric_limits<streamsize>::max(), '\n');
+ double timestamp, datum;
+ ins >> timestamp >> datum;
+ values.push_back( pair<double,double>(timestamp, datum));
+ }
+
+ if ( values.size() == 0 ) {
+ fprintf( stderr, "No usable values in \"%s\"\n", infname.c_str());
+ return;
+ }
+
+ fname = infname;
+ I = values.begin();
}
double
@@ -65,13 +64,13 @@ cnrun::CSourceTape::
operator() ( double t)
{
// position I such that *I < t < *(I+1)
- while ( I+1 != values.end() && (I+1)->first < t )
- ++I;
+ while ( next(I) != values.end() && (I+1)->first < t )
+ ++I;
- if ( I+1 == values.end() && is_looping )
- I = values.begin();
+ if ( next(I) == values.end() && is_looping )
+ I = values.begin();
- return I->second;
+ return I->second;
}
@@ -80,46 +79,46 @@ operator() ( double t)
cnrun::CSourcePeriodic::
-CSourcePeriodic( const char *id, const char *infname, bool inis_looping, double inperiod)
+CSourcePeriodic( const string& id, const string& infname, bool inis_looping, double inperiod)
: C_BaseSource (id, SRC_PERIODIC), is_looping (inis_looping)
{
- ifstream ins( infname);
- if ( !ins.good() ) {
- name = "";
- return;
- }
- skipws(ins);
-
- while ( ins.peek() == '#' || ins.peek() == '\n' )
- ins.ignore( numeric_limits<streamsize>::max(), '\n');
-
- if ( !isfinite(inperiod) || inperiod <= 0. ) {
- ins >> inperiod;
- if ( !isfinite(inperiod) || inperiod <= 0. ) {
- fprintf( stderr, "Bad interval for \"%s\"\n", infname);
- name = "";
- return;
- }
- }
- period = inperiod;
-
- while ( true ) {
- while ( ins.peek() == '#' || ins.peek() == '\n' )
- ins.ignore( numeric_limits<streamsize>::max(), '\n');
- double datum;
- ins >> datum;
- if ( ins.eof() || !ins.good() )
- break;
- values.push_back( datum);
- }
-
- if ( values.size() < 2 ) {
- fprintf( stderr, "Need at least 2 scheduled values in \"%s\"\n", infname);
- name = "";
- return;
- }
-
- fname = infname;
+ ifstream ins( infname);
+ if ( !ins.good() ) {
+ name = "";
+ return;
+ }
+ skipws(ins);
+
+ while ( ins.peek() == '#' || ins.peek() == '\n' )
+ ins.ignore( numeric_limits<streamsize>::max(), '\n');
+
+ if ( !isfinite(inperiod) || inperiod <= 0. ) {
+ ins >> inperiod;
+ if ( !isfinite(inperiod) || inperiod <= 0. ) {
+ fprintf( stderr, "Bad interval for \"%s\"\n", infname.c_str());
+ name = "";
+ return;
+ }
+ }
+ period = inperiod;
+
+ while ( true ) {
+ while ( ins.peek() == '#' || ins.peek() == '\n' )
+ ins.ignore( numeric_limits<streamsize>::max(), '\n');
+ double datum;
+ ins >> datum;
+ if ( ins.eof() || !ins.good() )
+ break;
+ values.push_back( datum);
+ }
+
+ if ( values.size() < 2 ) {
+ fprintf( stderr, "Need at least 2 scheduled values in \"%s\"\n", infname.c_str());
+ name = "";
+ return;
+ }
+
+ fname = infname;
}
@@ -127,29 +126,36 @@ CSourcePeriodic( const char *id, const char *infname, bool inis_looping, double
const char * const cnrun::distribution_names[] = { "uniform", "gaussian" };
cnrun::CSourceNoise::
-CSourceNoise( const char *id,
- double in_min, double in_max,
- TDistribution indist_type,
- int seed)
- : C_BaseSource (id, SRC_NOISE), _min (in_min), _max (in_max), _sigma (in_max - in_min), dist_type (indist_type)
+CSourceNoise( const string& id,
+ double in_min, double in_max,
+ TDistribution indist_type,
+ int seed)
+ : C_BaseSource (id, SRC_NOISE),
+ _min (in_min), _max (in_max),
+ _sigma (in_max - in_min),
+ dist_type (indist_type)
{
- const gsl_rng_type *T;
- gsl_rng_env_setup();
- T = gsl_rng_default;
- if ( gsl_rng_default_seed == 0 ) {
- struct timeval tp = { 0L, 0L };
- gettimeofday( &tp, nullptr);
- gsl_rng_default_seed = tp.tv_usec;
- }
- rng = gsl_rng_alloc( T);
+ const gsl_rng_type *T;
+ gsl_rng_env_setup();
+ T = gsl_rng_default;
+ if ( gsl_rng_default_seed == 0 ) {
+ struct timeval tp = { 0L, 0L };
+ gettimeofday( &tp, nullptr);
+ gsl_rng_default_seed = tp.tv_usec;
+ }
+ rng = gsl_rng_alloc( T);
}
cnrun::CSourceNoise::
~CSourceNoise()
{
- gsl_rng_free( rng);
+ gsl_rng_free( rng);
}
-
-// eof
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/sources.hh b/upstream/src/libcn/sources.hh
index 6b8ec1b..207f44c 100644
--- a/upstream/src/libcn/sources.hh
+++ b/upstream/src/libcn/sources.hh
@@ -7,13 +7,14 @@
*
*/
-
-#ifndef LIBCN_SOURCES_H
-#define LIBCN_SOURCES_H
+#ifndef CNRUN_LIBCN_SOURCES_H_
+#define CNRUN_LIBCN_SOURCES_H_
#include <string>
#include <vector>
+#include <utility>
+#include "forward-decls.hh"
#include "gsl/gsl_rng.h"
#include "gsl/gsl_randist.h"
@@ -26,105 +27,104 @@ using namespace std;
namespace cnrun {
-
-typedef enum { SRC_NULL, SRC_TAPE, SRC_PERIODIC, SRC_FUNCTION, SRC_NOISE } TSourceType;
-extern const char * const __SourceTypes[];
-
class C_BaseSource {
public:
- string name;
- TSourceType type;
- C_BaseSource( const char *id, TSourceType intype = SRC_NULL)
- : name (id), type (intype)
- {}
- virtual ~C_BaseSource()
- {}
-
- virtual double operator() ( double)
- { return 0.; }
- virtual bool is_periodic()
- { return false; }
- bool operator== ( const C_BaseSource &rv)
- { return name == rv.name; }
- bool operator== ( const char *rv)
- { return name == name; }
- virtual void dump( FILE *strm = stdout)
- {
- fprintf( strm, "%s (%s)\n", name.c_str(), __SourceTypes[type]);
- }
+ enum TSourceType { SRC_NULL, SRC_TAPE, SRC_PERIODIC, SRC_FUNCTION, SRC_NOISE };
+ static const char * const types[];
+
+ string name;
+ TSourceType type;
+ C_BaseSource( const string& id, TSourceType intype = SRC_NULL)
+ : name (id), type (intype)
+ {}
+ virtual ~C_BaseSource()
+ {}
+
+ virtual double operator() ( double)
+ { return 0.; }
+ virtual bool is_periodic()
+ { return false; }
+ bool operator== ( const C_BaseSource &rv)
+ { return name == rv.name; }
+ bool operator== ( const string& rv)
+ { return name == name; }
+ virtual void dump( FILE *strm = stdout)
+ {
+ fprintf( strm, "%s (%s)\n", name.c_str(), types[type]);
+ }
};
class CSourceTape : public C_BaseSource {
private:
- CSourceTape();
+ CSourceTape();
public:
- string fname;
- vector< pair<double, double> > values;
- bool is_looping;
+ string fname;
+ vector<pair<double, double>> values;
+ bool is_looping;
- CSourceTape( const char *id, const char *infname, bool is_looping = false);
+ CSourceTape( const string& id, const string& infname, bool is_looping = false);
- double operator() ( double at);
+ double operator() ( double at);
- void dump( FILE *strm = stdout)
- {
- fprintf( strm, "%s (%s) %zu values from %s%s\n",
- name.c_str(), __SourceTypes[type],
- values.size(), fname.c_str(), is_looping ? "" : " (looping)");
- }
+ void dump( FILE *strm = stdout)
+ {
+ fprintf( strm, "%s (%s) %zu values from %s%s\n",
+ name.c_str(), types[type],
+ values.size(), fname.c_str(), is_looping ? "" : " (looping)");
+ }
private:
- vector< pair<double, double> >::iterator I;
+ vector<pair<double, double>>::iterator I;
};
class CSourcePeriodic : public C_BaseSource {
private:
- CSourcePeriodic();
+ CSourcePeriodic();
public:
- string fname;
- vector<double> values;
- double period;
- bool is_looping;
-
- CSourcePeriodic( const char *id, const char *fname, bool is_looping = false, double period = 0.);
-
- double operator() ( double at)
- {
- size_t i_abs = (size_t)(at / period),
- i_eff = is_looping
- ? i_abs % values.size()
- : min (i_abs, values.size() - 1);
- return values[i_eff];
- }
-
- void dump( FILE *strm = stdout)
- {
- fprintf( strm, "%s (%s) %zu values at %g from %s%s\n",
- name.c_str(), __SourceTypes[type],
- values.size(), period, fname.c_str(), is_looping ? "" : " (looping)");
- }
-
- bool is_periodic()
- { return true; }
+ string fname;
+ vector<double> values;
+ double period;
+ bool is_looping;
+
+ CSourcePeriodic( const string& id, const string& fname, bool is_looping = false, double period = 0.);
+
+ double operator() ( double at)
+ {
+ size_t i_abs = (size_t)(at / period),
+ i_eff = is_looping
+ ? i_abs % values.size()
+ : min (i_abs, values.size() - 1);
+ return values[i_eff];
+ }
+
+ void dump( FILE *strm = stdout)
+ {
+ fprintf( strm, "%s (%s) %zu values at %g from %s%s\n",
+ name.c_str(), types[type],
+ values.size(), period, fname.c_str(), is_looping ? "" : " (looping)");
+ }
+
+ bool is_periodic()
+ { return true; }
};
class CSourceFunction : public C_BaseSource {
private:
- CSourceFunction();
+ CSourceFunction();
public:
- double (*function)( double at);
+ double (*function)( double at);
- CSourceFunction( const char *id, double (*f)(double))
- : C_BaseSource (id, SRC_FUNCTION), function (f)
- {}
+ CSourceFunction( const string& id, double (*f)(double))
+ : C_BaseSource (id, SRC_FUNCTION), function (f)
+ {}
- double operator() ( double at)
- {
- return function( at);
- }
+ double operator() ( double at)
+ {
+ return function( at);
+ }
};
@@ -132,40 +132,45 @@ extern const char * const distribution_names[];
class CSourceNoise : public C_BaseSource {
private:
- CSourceNoise();
+ CSourceNoise();
public:
- double _min, _max, _sigma;
- enum TDistribution {
- SOURCE_RANDDIST_UNIFORM,
- SOURCE_RANDDIST_GAUSSIAN,
- };
- TDistribution dist_type;
- gsl_rng *rng;
-
- CSourceNoise( const char *id, double in_min = 0., double in_max = 1.,
- TDistribution type = SOURCE_RANDDIST_UNIFORM,
- int seed = 0);
+ double _min, _max, _sigma;
+ enum TDistribution {
+ SOURCE_RANDDIST_UNIFORM,
+ SOURCE_RANDDIST_GAUSSIAN,
+ };
+ TDistribution dist_type;
+ gsl_rng *rng;
+
+ CSourceNoise( const string& id, double in_min = 0., double in_max = 1.,
+ TDistribution = SOURCE_RANDDIST_UNIFORM,
+ int seed = 0);
~CSourceNoise();
- double operator() ( double unused) const
- {
- switch ( dist_type ) {
- case SOURCE_RANDDIST_UNIFORM: return gsl_rng_uniform( rng) * (_max - _min) + _min;
- case SOURCE_RANDDIST_GAUSSIAN: return gsl_ran_gaussian( rng, _sigma) + (_max - _min)/2;
- default: return 42.;
- }
- }
-
- void dump( FILE *strm = stdout)
- {
- fprintf( strm, "%s (%s) %s in range %g:%g (sigma = %g)\n",
- name.c_str(), __SourceTypes[type],
- distribution_names[dist_type], _min, _max, _sigma);
- }
+ double operator() ( double unused) const
+ {
+ switch ( dist_type ) {
+ case SOURCE_RANDDIST_UNIFORM: return gsl_rng_uniform( rng) * (_max - _min) + _min;
+ case SOURCE_RANDDIST_GAUSSIAN: return gsl_ran_gaussian( rng, _sigma) + (_max - _min)/2;
+ default: return 42.;
+ }
+ }
+
+ void dump( FILE *strm = stdout)
+ {
+ fprintf( strm, "%s (%s) %s in range %g:%g (sigma = %g)\n",
+ name.c_str(), types[type],
+ distribution_names[dist_type], _min, _max, _sigma);
+ }
};
}
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/standalone-neurons.cc b/upstream/src/libcn/standalone-neurons.cc
index ba8c751..2be61ee 100644
--- a/upstream/src/libcn/standalone-neurons.cc
+++ b/upstream/src/libcn/standalone-neurons.cc
@@ -23,23 +23,23 @@
cnrun::C_StandaloneNeuron::
-C_StandaloneNeuron( TUnitType intype, const char *inlabel,
- double x, double y, double z,
- CModel *inM, int s_mask)
+C_StandaloneNeuron( TUnitType intype, const string& inlabel,
+ double x, double y, double z,
+ CModel *inM, int s_mask)
: C_BaseNeuron( intype, inlabel, x, y, z, inM, s_mask),
- C_StandaloneAttributes( __CNUDT[intype].vno)
+ C_StandaloneAttributes( __CNUDT[intype].vno)
{
- reset_vars();
- if ( M )
- M->include_unit( this);
+ reset_vars();
+ if ( M )
+ M->include_unit( this);
}
cnrun::C_StandaloneNeuron::
~C_StandaloneNeuron()
{
- if ( M && M->verbosely > 5 )
- fprintf( stderr, " deleting standalone neuron \"%s\"\n", _label);
+ if ( M && M->options.verbosely > 5 )
+ fprintf( stderr, " deleting standalone neuron \"%s\"\n", _label);
}
@@ -50,33 +50,33 @@ cnrun::C_StandaloneNeuron::
// --------- Rhythm'n'Blues
const char* const cnrun::__CN_ParamNames_NeuronHH_r[] = {
- "a, " __CN_PU_FREQUENCY,
- "I\342\202\200, " __CN_PU_CURRENT,
- "r in F(I) = a (I-I\342\202\200)^r",
- "Externally applied DC, " __CN_PU_CURRENT,
+ "a, " __CN_PU_FREQUENCY,
+ "I\342\202\200, " __CN_PU_CURRENT,
+ "r in F(I) = a (I-I\342\202\200)^r",
+ "Externally applied DC, " __CN_PU_CURRENT,
};
const char* const cnrun::__CN_ParamSyms_NeuronHH_r[] = {
- "a",
- "I0",
- "r",
- "Idc",
+ "a",
+ "I0",
+ "r",
+ "Idc",
};
const double cnrun::__CN_Params_NeuronHH_r[] = {
- 0.185, // a,
- 0.0439, // I0,
- 0.564, // r in F(I) = a * (I-I0)^r
- 0. // Externally applied DC
+ 0.185, // a,
+ 0.0439, // I0,
+ 0.564, // r in F(I) = a * (I-I0)^r
+ 0. // Externally applied DC
};
const char* const cnrun::__CN_VarNames_NeuronHH_r[] = {
- "Spiking rate, " __CN_PU_FREQUENCY,
+ "Spiking rate, " __CN_PU_FREQUENCY,
};
const char* const cnrun::__CN_VarSyms_NeuronHH_r[] = {
- "F",
+ "F",
};
const double cnrun::__CN_Vars_NeuronHH_r[] = {
- 0. // frequency F
+ 0. // frequency F
};
@@ -84,24 +84,24 @@ double
cnrun::CNeuronHH_r::
F( vector<double>& x) const
{
- double subsq = Isyn(x) - P[_I0_] + P[_Idc_];
- if ( subsq <= 0. )
- return 0.;
- else {
- return P[_a_] * pow( subsq, P[_r_]);
- }
+ double subsq = Isyn(x) - P[_I0_] + P[_Idc_];
+ if ( subsq <= 0. )
+ return 0.;
+ else {
+ return P[_a_] * pow( subsq, P[_r_]);
+ }
}
void
cnrun::CNeuronHH_r::
preadvance()
{
- double subsq = Isyn() - P[_I0_] + P[_Idc_];
-// printf( "%s->Isyn(x) = %g,\tsubsq = %g\n", _label, Isyn(), subsq);
- if ( subsq <= 0. )
- V_next[0] = 0;
- else
- V_next[0] = P[_a_] * pow( subsq, P[_r_]);
+ double subsq = Isyn() - P[_I0_] + P[_Idc_];
+// printf( "%s->Isyn(x) = %g,\tsubsq = %g\n", _label, Isyn(), subsq);
+ if ( subsq <= 0. )
+ V_next[0] = 0;
+ else
+ V_next[0] = P[_a_] * pow( subsq, P[_r_]);
}
@@ -114,35 +114,35 @@ preadvance()
const char* const cnrun::__CN_ParamNames_OscillatorPoissonDot[] = {
- "Rate \316\273, " __CN_PU_RATE,
- "Resting potential, " __CN_PU_POTENTIAL,
- "Potential when firing, " __CN_PU_POTENTIAL,
+ "Rate \316\273, " __CN_PU_RATE,
+ "Resting potential, " __CN_PU_POTENTIAL,
+ "Potential when firing, " __CN_PU_POTENTIAL,
};
const char* const cnrun::__CN_ParamSyms_OscillatorPoissonDot[] = {
- "lambda",
- "Vrst",
- "Vfir",
+ "lambda",
+ "Vrst",
+ "Vfir",
};
const double cnrun::__CN_Params_OscillatorPoissonDot[] = {
- 0.02, // firing rate Lambda [1/ms]=[10^3 Hz]
- -60.0, // input neuron resting potential
- 20.0, // input neuron potential when firing
+ 0.02, // firing rate Lambda [1/ms]=[10^3 Hz]
+ -60.0, // input neuron resting potential
+ 20.0, // input neuron potential when firing
};
const char* const cnrun::__CN_VarNames_OscillatorPoissonDot[] = {
- "Membrane potential, " __CN_PU_POTENTIAL,
- "Spikes recently fired",
-// "Time"
+ "Membrane potential, " __CN_PU_POTENTIAL,
+ "Spikes recently fired",
+// "Time"
};
const char* const cnrun::__CN_VarSyms_OscillatorPoissonDot[] = {
- "E",
- "nspk",
-// "t"
+ "E",
+ "nspk",
+// "t"
};
const double cnrun::__CN_Vars_OscillatorPoissonDot[] = {
- -60., // = Vrst, per initialization code found in ctor
- 0,
-// 0.
+ -60., // = Vrst, per initialization code found in ctor
+ 0,
+// 0.
};
@@ -150,22 +150,22 @@ const double cnrun::__CN_Vars_OscillatorPoissonDot[] = {
inline namespace {
#define _THIRTEEN_ 13
unsigned long __factorials[_THIRTEEN_] = {
- 1,
- 1, 2, 6, 24, 120,
- 720, 5040, 40320, 362880L, 3628800L,
- 39916800L, 479001600L
+ 1,
+ 1, 2, 6, 24, 120,
+ 720, 5040, 40320, 362880L, 3628800L,
+ 39916800L, 479001600L
};
inline double
__attribute__ ((pure))
factorial( unsigned n)
{
- if ( n < _THIRTEEN_ )
- return __factorials[n];
- else {
- //cerr << n << "!" << endl;
- return __factorials[_THIRTEEN_-1] * factorial(n-_THIRTEEN_);
- }
+ if ( n < _THIRTEEN_ )
+ return __factorials[n];
+ else {
+ //cerr << n << "!" << endl;
+ return __factorials[_THIRTEEN_-1] * factorial(n-_THIRTEEN_);
+ }
}
}
@@ -173,26 +173,26 @@ void
cnrun::COscillatorDotPoisson::
possibly_fire()
{
- double lt = P[_lambda_] * M->dt(),
- dice = M->rng_sample(),
- probk = 0.;
-
- unsigned k;
- for ( k = 0; ; k++ ) {
- probk += exp( -lt) * pow( lt, (double)k) / factorial(k);
- if ( probk > dice ) {
- nspikes() = k;
- break;
- }
- }
-
- if ( k ) {
- _status |= CN_NFIRING;
- var_value(0) = P[_Vfir_];
- } else {
- _status &= ~CN_NFIRING;
- var_value(0) = P[_Vrst_];
- }
+ double lt = P[_lambda_] * M->dt(),
+ dice = M->rng_sample(),
+ probk = 0.;
+
+ unsigned k;
+ for ( k = 0; ; k++ ) {
+ probk += exp( -lt) * pow( lt, (double)k) / factorial(k);
+ if ( probk > dice ) {
+ nspikes() = k;
+ break;
+ }
+ }
+
+ if ( k ) {
+ _status |= CN_NFIRING;
+ var_value(0) = P[_Vfir_];
+ } else {
+ _status &= ~CN_NFIRING;
+ var_value(0) = P[_Vrst_];
+ }
}
@@ -201,14 +201,14 @@ void
cnrun::COscillatorDotPoisson::
do_detect_spike_or_whatever()
{
- unsigned n = n_spikes_in_last_dt();
- if ( n > 0 ) {
- for ( unsigned qc = 0; qc < n; qc++ )
- _spikelogger_agent->spike_history.push_back( model_time());
- _spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
- _spikelogger_agent->t_last_spike_start = _spikelogger_agent->t_last_spike_end = model_time();
- } else
- _spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
+ unsigned n = n_spikes_in_last_dt();
+ if ( n > 0 ) {
+ for ( unsigned qc = 0; qc < n; qc++ )
+ _spikelogger_agent->spike_history.push_back( model_time());
+ _spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
+ _spikelogger_agent->t_last_spike_start = _spikelogger_agent->t_last_spike_end = model_time();
+ } else
+ _spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
}
@@ -222,35 +222,35 @@ do_detect_spike_or_whatever()
const char* const cnrun::__CN_ParamNames_OscillatorPoisson[] = {
- "Rate \316\273, " __CN_PU_RATE,
- "Input neuron resting potential, " __CN_PU_POTENTIAL,
- "Input neuron potential when firing, " __CN_PU_POTENTIAL,
- "Spike time, " __CN_PU_TIME,
- "Spike time + refractory period, " __CN_PU_TIME,
+ "Rate \316\273, " __CN_PU_RATE,
+ "Input neuron resting potential, " __CN_PU_POTENTIAL,
+ "Input neuron potential when firing, " __CN_PU_POTENTIAL,
+ "Spike time, " __CN_PU_TIME,
+ "Spike time + refractory period, " __CN_PU_TIME,
};
const char* const cnrun::__CN_ParamSyms_OscillatorPoisson[] = {
- "lambda",
- "trel",
- "trel+trfr",
- "Vrst",
- "Vfir",
+ "lambda",
+ "trel",
+ "trel+trfr",
+ "Vrst",
+ "Vfir",
};
const double cnrun::__CN_Params_OscillatorPoisson[] = {
- 0.02, // firing rate Lambda [1/ms]=[10^3 Hz]
- 0.0, // spike time
- 0.0, // refractory period + spike time
- -60.0, // input neuron resting potential
- 20.0, // input neuron potential when firing
+ 0.02, // firing rate Lambda [1/ms]=[10^3 Hz]
+ 0.0, // spike time
+ 0.0, // refractory period + spike time
+ -60.0, // input neuron resting potential
+ 20.0, // input neuron potential when firing
};
const char* const cnrun::__CN_VarNames_OscillatorPoisson[] = {
- "Membrane potential E, " __CN_PU_POTENTIAL,
+ "Membrane potential E, " __CN_PU_POTENTIAL,
};
const char* const cnrun::__CN_VarSyms_OscillatorPoisson[] = {
- "E",
+ "E",
};
const double cnrun::__CN_Vars_OscillatorPoisson[] = {
- -60.,
+ -60.,
};
@@ -259,27 +259,27 @@ void
cnrun::COscillatorPoisson::
possibly_fire()
{
- if ( _status & CN_NFIRING )
- if ( model_time() - _spikelogger_agent->t_last_spike_start > P[_trel_] ) {
- (_status &= ~CN_NFIRING) |= CN_NREFRACT;
- _spikelogger_agent->t_last_spike_end = model_time();
- }
- if ( _status & CN_NREFRACT )
- if ( model_time() - _spikelogger_agent->t_last_spike_start > P[_trelrfr_] )
- _status &= ~CN_NREFRACT;
-
- if ( !(_status & (CN_NFIRING | CN_NREFRACT)) ) {
- double lt = P[_lambda_] * M->dt();
- if ( M->rng_sample() <= exp( -lt) * lt ) {
- _status |= CN_NFIRING;
- _spikelogger_agent->t_last_spike_start = model_time() /* + M->dt() */ ;
- }
- }
-
-// E() = next_state_E;
-// next_state_E = (_status & CN_NFIRING) ?P.n.Vfir :P.n.Vrst;
- var_value(0) = (_status & CN_NFIRING) ?P[_Vfir_] :P[_Vrst_];
-// if ( strcmp( label, "ORNa.1") == 0 ) cout << label << ": firing_started = " << t_firing_started << ", firing_ended = " << t_firing_ended << " E = " << E() << endl;
+ if ( _status & CN_NFIRING )
+ if ( model_time() - _spikelogger_agent->t_last_spike_start > P[_trel_] ) {
+ (_status &= ~CN_NFIRING) |= CN_NREFRACT;
+ _spikelogger_agent->t_last_spike_end = model_time();
+ }
+ if ( _status & CN_NREFRACT )
+ if ( model_time() - _spikelogger_agent->t_last_spike_start > P[_trelrfr_] )
+ _status &= ~CN_NREFRACT;
+
+ if ( !(_status & (CN_NFIRING | CN_NREFRACT)) ) {
+ double lt = P[_lambda_] * M->dt();
+ if ( M->rng_sample() <= exp( -lt) * lt ) {
+ _status |= CN_NFIRING;
+ _spikelogger_agent->t_last_spike_start = model_time() /* + M->dt() */ ;
+ }
+ }
+
+// E() = next_state_E;
+// next_state_E = (_status & CN_NFIRING) ?P.n.Vfir :P.n.Vrst;
+ var_value(0) = (_status & CN_NFIRING) ?P[_Vfir_] :P[_Vrst_];
+// if ( strcmp( label, "ORNa.1") == 0 ) cout << label << ": firing_started = " << t_firing_started << ", firing_ended = " << t_firing_ended << " E = " << E() << endl;
}
@@ -287,17 +287,17 @@ void
cnrun::COscillatorPoisson::
do_detect_spike_or_whatever()
{
- unsigned n = n_spikes_in_last_dt();
- if ( n > 0 ) {
- if ( !(_spikelogger_agent->_status & CN_KL_ISSPIKINGNOW) ) {
- _spikelogger_agent->spike_history.push_back( model_time());
- _spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
- }
- } else
- if ( _spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) {
- _spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
- _spikelogger_agent->t_last_spike_end = model_time();
- }
+ unsigned n = n_spikes_in_last_dt();
+ if ( n > 0 ) {
+ if ( !(_spikelogger_agent->_status & CN_KL_ISSPIKINGNOW) ) {
+ _spikelogger_agent->spike_history.push_back( model_time());
+ _spikelogger_agent->_status |= CN_KL_ISSPIKINGNOW;
+ }
+ } else
+ if ( _spikelogger_agent->_status & CN_KL_ISSPIKINGNOW ) {
+ _spikelogger_agent->_status &= ~CN_KL_ISSPIKINGNOW;
+ _spikelogger_agent->t_last_spike_end = model_time();
+ }
}
@@ -310,36 +310,36 @@ do_detect_spike_or_whatever()
// Map neurons require descrete time
const double cnrun::__CN_Params_NeuronMap[] = {
- 60.0, // 0 - Vspike: spike Amplitude factor
- 3.0002440, // 1 - alpha: "steepness / size" parameter
- -2.4663490, // 3 - gamma: "shift / excitation" parameter
- 2.64, // 2 - beta: input sensitivity
- 0.,
+ 60.0, // 0 - Vspike: spike Amplitude factor
+ 3.0002440, // 1 - alpha: "steepness / size" parameter
+ -2.4663490, // 3 - gamma: "shift / excitation" parameter
+ 2.64, // 2 - beta: input sensitivity
+ 0.,
// Old comment by TN: beta chosen such that Isyn= 10 "nA" is the threshold for spiking
};
const char* const cnrun::__CN_ParamNames_NeuronMap[] = {
- "Spike amplitude factor, " __CN_PU_POTENTIAL,
- "\"Steepness / size\" parameter \316\261",
- "\"Shift / excitation\" parameter \316\263",
- "Input sensitivity \316\262, " __CN_PU_RESISTANCE,
- "External DC, " __CN_PU_CURRENT,
+ "Spike amplitude factor, " __CN_PU_POTENTIAL,
+ "\"Steepness / size\" parameter \316\261",
+ "\"Shift / excitation\" parameter \316\263",
+ "Input sensitivity \316\262, " __CN_PU_RESISTANCE,
+ "External DC, " __CN_PU_CURRENT,
};
const char* const cnrun::__CN_ParamSyms_NeuronMap[] = {
- "Vspike",
- "alpha",
- "gamma",
- "beta",
- "Idc"
+ "Vspike",
+ "alpha",
+ "gamma",
+ "beta",
+ "Idc"
};
const double cnrun::__CN_Vars_NeuronMap[] = {
- -50, // E
+ -50, // E
};
const char* const cnrun::__CN_VarNames_NeuronMap[] = {
- "Membrane potential E, " __CN_PU_POTENTIAL
+ "Membrane potential E, " __CN_PU_POTENTIAL
};
const char* const cnrun::__CN_VarSyms_NeuronMap[] = {
- "E",
+ "E",
};
@@ -348,16 +348,16 @@ void
cnrun::CNeuronMap::
preadvance()
{
- double Vspxaxb = P[_Vspike_] * (P[_alpha_] + P[_gamma_]);
- V_next[0] =
- ( E() <= 0. )
- ? P[_Vspike_] * ( P[_alpha_] * P[_Vspike_] / (P[_Vspike_] - E() - P[_beta_] * (Isyn() + P[_Idc_]))
- + P[_gamma_] )
- : ( E() <= Vspxaxb && _E_prev <= 0.)
- ? Vspxaxb
- : -P[_Vspike_];
-
- _E_prev = E();
+ double Vspxaxb = P[_Vspike_] * (P[_alpha_] + P[_gamma_]);
+ V_next[0] =
+ ( E() <= 0. )
+ ? P[_Vspike_] * ( P[_alpha_] * P[_Vspike_] / (P[_Vspike_] - E() - P[_beta_] * (Isyn() + P[_Idc_]))
+ + P[_gamma_] )
+ : ( E() <= Vspxaxb && _E_prev <= 0.)
+ ? Vspxaxb
+ : -P[_Vspike_];
+
+ _E_prev = E();
}
@@ -377,32 +377,32 @@ preadvance()
// ----- Pulse
const char* const cnrun::__CN_ParamNames_NeuronDotPulse[] = {
- "Frequency f, " __CN_PU_FREQUENCY,
- "Resting potential Vrst, " __CN_PU_VOLTAGE,
- "Firing potential Vfir, " __CN_PU_VOLTAGE,
+ "Frequency f, " __CN_PU_FREQUENCY,
+ "Resting potential Vrst, " __CN_PU_VOLTAGE,
+ "Firing potential Vfir, " __CN_PU_VOLTAGE,
};
const char* const cnrun::__CN_ParamSyms_NeuronDotPulse[] = {
- "f",
- "Vrst",
- "Vfir",
+ "f",
+ "Vrst",
+ "Vfir",
};
const double cnrun::__CN_Params_NeuronDotPulse[] = {
- 10,
- -60,
- 20
+ 10,
+ -60,
+ 20
};
const char* const cnrun::__CN_VarNames_NeuronDotPulse[] = {
- "Membrane potential E, " __CN_PU_VOLTAGE,
- "Spikes recently fired",
+ "Membrane potential E, " __CN_PU_VOLTAGE,
+ "Spikes recently fired",
};
const char* const cnrun::__CN_VarSyms_NeuronDotPulse[] = {
- "E",
- "nspk",
+ "E",
+ "nspk",
};
const double cnrun::__CN_Vars_NeuronDotPulse[] = {
- -60., // E
- 0
+ -60., // E
+ 0
};
@@ -410,18 +410,18 @@ void
cnrun::CNeuronDotPulse::
possibly_fire()
{
- enum TParametersNeuronDotPulse { _f_, _Vrst_, _Vfir_ };
+ enum TParametersNeuronDotPulse { _f_, _Vrst_, _Vfir_ };
- spikes_fired_in_last_dt() = floor( (model_time() + M->dt()) * P[_f_]/1000)
- - floor( model_time() * P[_f_]/1000);
+ spikes_fired_in_last_dt() = floor( (model_time() + M->dt()) * P[_f_]/1000)
+ - floor( model_time() * P[_f_]/1000);
- if ( spikes_fired_in_last_dt() ) {
- _status |= CN_NFIRING;
- var_value(0) = P[_Vfir_];
- } else {
- _status &= ~CN_NFIRING;
- var_value(0) = P[_Vrst_];
- }
+ if ( spikes_fired_in_last_dt() ) {
+ _status |= CN_NFIRING;
+ var_value(0) = P[_Vfir_];
+ } else {
+ _status &= ~CN_NFIRING;
+ var_value(0) = P[_Vrst_];
+ }
}
@@ -429,12 +429,16 @@ void
cnrun::CNeuronDotPulse::
param_changed_hook()
{
- if ( P[_f_] < 0 ) {
- if ( M->verbosely > 0 )
- fprintf( stderr, "DotPulse oscillator \"%s\" got a negative parameter f: capping at 0\n", _label);
- P[_f_] = 0.;
- }
+ if ( P[_f_] < 0 ) {
+ if ( M->options.verbosely > 0 )
+ fprintf( stderr, "DotPulse oscillator \"%s\" got a negative parameter f: capping at 0\n", _label);
+ P[_f_] = 0.;
+ }
}
-
-// eof
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/standalone-neurons.hh b/upstream/src/libcn/standalone-neurons.hh
index 0465dfb..a0a8715 100644
--- a/upstream/src/libcn/standalone-neurons.hh
+++ b/upstream/src/libcn/standalone-neurons.hh
@@ -9,9 +9,11 @@
-#ifndef LIBCN_STANDALONE_NEURONS_H
-#define LIBCN_STANDALONE_NEURONS_H
+#ifndef CNRUN_LIBCN_STANDALONENEURONS_H_
+#define CNRUN_LIBCN_STANDALONENEURONS_H_
+#include "libstilton/lang.hh"
+#include "forward-decls.hh"
#include "base-neuron.hh"
#include "standalone-attr.hh"
#include "mx-attr.hh"
@@ -26,26 +28,25 @@ namespace cnrun {
class C_StandaloneNeuron
: public C_BaseNeuron, public C_StandaloneAttributes {
- private:
- C_StandaloneNeuron();
+ DELETE_DEFAULT_METHODS (C_StandaloneNeuron)
protected:
- C_StandaloneNeuron( TUnitType intype, const char *inlabel,
- double x, double y, double z,
- CModel*, int s_mask);
+ C_StandaloneNeuron( TUnitType intype, const string& inlabel,
+ double x, double y, double z,
+ CModel*, int s_mask);
public:
~C_StandaloneNeuron();
- double &var_value( size_t v) { return V[v]; }
- const double &get_var_value( size_t v) const { return V[v]; }
- void reset_vars()
- {
- memcpy( V.data(), __CNUDT[_type].stock_var_values,
- sizeof(double) * v_no());
- memcpy( V_next.data(), __CNUDT[_type].stock_var_values,
- sizeof(double) * v_no());
- }
+ double &var_value( size_t v) { return V[v]; }
+ const double &get_var_value( size_t v) const { return V[v]; }
+ void reset_vars()
+ {
+ memcpy( V.data(), __CNUDT[_type].stock_var_values,
+ sizeof(double) * v_no());
+ memcpy( V_next.data(), __CNUDT[_type].stock_var_values,
+ sizeof(double) * v_no());
+ }
};
@@ -53,40 +54,37 @@ class C_StandaloneNeuron
class C_StandaloneConductanceBasedNeuron
: public C_StandaloneNeuron {
- private:
- C_StandaloneConductanceBasedNeuron();
+ DELETE_DEFAULT_METHODS (C_StandaloneConductanceBasedNeuron)
protected:
- C_StandaloneConductanceBasedNeuron( TUnitType intype, const char *inlabel,
- double inx, double iny, double inz,
- CModel *inM, int s_mask)
- : C_StandaloneNeuron (intype, inlabel, inx, iny, inz, inM, s_mask)
- {}
+ C_StandaloneConductanceBasedNeuron( TUnitType intype, const string& inlabel,
+ double inx, double iny, double inz,
+ CModel *inM, int s_mask)
+ : C_StandaloneNeuron (intype, inlabel, inx, iny, inz, inM, s_mask)
+ {}
public:
- double E() const { return V[0]; }
- double E( vector<double>&) const { return V[0]; }
+ double E() const { return V[0]; }
+ double E( vector<double>&) const { return V[0]; }
- unsigned n_spikes_in_last_dt() const;
+ unsigned n_spikes_in_last_dt() const;
};
class C_StandaloneRateBasedNeuron
: public C_StandaloneNeuron {
- private:
- C_StandaloneRateBasedNeuron();
+ DELETE_DEFAULT_METHODS (C_StandaloneRateBasedNeuron)
protected:
- C_StandaloneRateBasedNeuron( TUnitType intype, const char *inlabel,
- double inx, double iny, double inz,
- CModel *inM, int s_mask)
- : C_StandaloneNeuron (intype, inlabel, inx, iny, inz, inM, s_mask)
- {}
+ C_StandaloneRateBasedNeuron( TUnitType intype, const string& inlabel,
+ double inx, double iny, double inz,
+ CModel *inM, int s_mask)
+ : C_StandaloneNeuron (intype, inlabel, inx, iny, inz, inM, s_mask)
+ {}
public:
-
- unsigned n_spikes_in_last_dt() const;
+ unsigned n_spikes_in_last_dt() const;
};
@@ -98,20 +96,23 @@ class C_StandaloneRateBasedNeuron
class CNeuronHH_r
: public C_StandaloneRateBasedNeuron {
+
+ DELETE_DEFAULT_METHODS(CNeuronHH_r)
+
public:
- CNeuronHH_r( const char *inlabel,
- double x, double y, double z,
- CModel *inM, int s_mask = 0)
- : C_StandaloneRateBasedNeuron( NT_HH_R, inlabel, x, y, z, inM, s_mask)
- {}
+ CNeuronHH_r( const string& inlabel,
+ double x, double y, double z,
+ CModel *inM, int s_mask = 0)
+ : C_StandaloneRateBasedNeuron( NT_HH_R, inlabel, x, y, z, inM, s_mask)
+ {}
- enum {
- _a_, _I0_, _r_, _Idc_,
- };
+ enum {
+ _a_, _I0_, _r_, _Idc_,
+ };
- double F( vector<double>&) const __attribute__ ((hot));
+ double F( vector<double>&) const __attribute__ ((hot));
- void preadvance() __attribute__ ((hot));
+ void preadvance() __attribute__ ((hot));
};
@@ -124,22 +125,26 @@ class CNeuronHH_r
class COscillatorPoisson
: public C_StandaloneConductanceBasedNeuron {
- public:
- COscillatorPoisson( const char *inlabel, double x, double y, double z, CModel *inM, int s_mask = 0)
- : C_StandaloneConductanceBasedNeuron( NT_POISSON, inlabel, x, y, z, inM, s_mask)
- {
- // need _spikelogger_agent's fields even when no spikelogging is done
- _spikelogger_agent = new SSpikeloggerService( static_cast<C_BaseNeuron*>(this),
- 0 | CN_KL_PERSIST | CN_KL_IDLE);
- }
-
- enum {
- _lambda_, _trel_, _trelrfr_, _Vrst_, _Vfir_,
- };
+ DELETE_DEFAULT_METHODS (COscillatorPoisson)
- void possibly_fire() __attribute__ ((hot));
-
- void do_detect_spike_or_whatever() __attribute__ ((hot));
+ public:
+ COscillatorPoisson( const string& inlabel,
+ double x, double y, double z,
+ CModel *inM, int s_mask = 0)
+ : C_StandaloneConductanceBasedNeuron( NT_POISSON, inlabel, x, y, z, inM, s_mask)
+ {
+ // need _spikelogger_agent's fields even when no spikelogging is done
+ _spikelogger_agent = new SSpikeloggerService( static_cast<C_BaseNeuron*>(this),
+ 0 | CN_KL_PERSIST | CN_KL_IDLE);
+ }
+
+ enum {
+ _lambda_, _trel_, _trelrfr_, _Vrst_, _Vfir_,
+ };
+
+ void possibly_fire() __attribute__ ((hot));
+
+ void do_detect_spike_or_whatever() __attribute__ ((hot));
};
@@ -154,49 +159,56 @@ class COscillatorPoisson
class COscillatorDotPoisson
: public C_StandaloneConductanceBasedNeuron {
+ DELETE_DEFAULT_METHODS (COscillatorDotPoisson)
+
public:
- COscillatorDotPoisson( const char *inlabel, double x, double y, double z, CModel *inM, int s_mask = 0)
- : C_StandaloneConductanceBasedNeuron( NT_DOTPOISSON, inlabel, x, y, z, inM, s_mask)
- {
- // need _spikelogger_agent's fields even when no spikelogging is done
- _spikelogger_agent = new SSpikeloggerService( static_cast<C_BaseNeuron*>(this),
- 0 | CN_KL_PERSIST | CN_KL_IDLE);
- }
+ COscillatorDotPoisson( const string& inlabel,
+ double x, double y, double z,
+ CModel *inM, int s_mask = 0)
+ : C_StandaloneConductanceBasedNeuron( NT_DOTPOISSON, inlabel, x, y, z, inM, s_mask)
+ {
+ // need _spikelogger_agent's fields even when no spikelogging is done
+ _spikelogger_agent = new SSpikeloggerService( static_cast<C_BaseNeuron*>(this),
+ 0 | CN_KL_PERSIST | CN_KL_IDLE);
+ }
- enum {
- _lambda_, _Vrst_, _Vfir_,
- };
+ enum {
+ _lambda_, _Vrst_, _Vfir_,
+ };
- void do_detect_spike_or_whatever() __attribute__ ((hot));
+ void do_detect_spike_or_whatever() __attribute__ ((hot));
- void possibly_fire() __attribute__ ((hot));
+ void possibly_fire() __attribute__ ((hot));
- unsigned n_spikes_in_last_dt()
- { return V[1]; }
+ unsigned n_spikes_in_last_dt()
+ { return V[1]; }
- double &nspikes()
- { return V[1]; }
+ double &nspikes()
+ { return V[1]; }
};
class CNeuronDotPulse
: public C_StandaloneConductanceBasedNeuron {
+
+ DELETE_DEFAULT_METHODS (CNeuronDotPulse)
+
public:
- CNeuronDotPulse( const char *inlabel,
- double x, double y, double z,
- CModel *inM, int s_mask = 0)
- : C_StandaloneConductanceBasedNeuron( NT_DOTPULSE, inlabel, x, y, z, inM, s_mask)
- {}
+ CNeuronDotPulse( const string& inlabel,
+ double x, double y, double z,
+ CModel *inM, int s_mask = 0)
+ : C_StandaloneConductanceBasedNeuron( NT_DOTPULSE, inlabel, x, y, z, inM, s_mask)
+ {}
- enum { _f_, _Vrst_, _Vfir_ };
+ enum { _f_, _Vrst_, _Vfir_ };
- double &spikes_fired_in_last_dt()
- { return V[1]; }
+ double &spikes_fired_in_last_dt()
+ { return V[1]; }
- void possibly_fire();
+ void possibly_fire();
- void param_changed_hook();
+ void param_changed_hook();
};
@@ -210,26 +222,32 @@ class CNeuronDotPulse
class CNeuronMap
: public C_StandaloneConductanceBasedNeuron {
+ DELETE_DEFAULT_METHODS (CNeuronMap)
+
public:
- static constexpr double fixed_dt = 0.1;
+ static const constexpr double fixed_dt = 0.1;
- CNeuronMap( const char *inlabel, double x, double y, double z, CModel *inM, int s_mask = 0);
+ CNeuronMap( const string& inlabel, double x, double y, double z,
+ CModel*, int s_mask = 0);
- enum {
- _Vspike_, _alpha_, _gamma_, _beta_, _Idc_
- };
+ enum {
+ _Vspike_, _alpha_, _gamma_, _beta_, _Idc_
+ };
- void preadvance();
- void fixate();
+ void preadvance();
+ void fixate();
private:
- double _E_prev;
+ double _E_prev;
};
-
-
}
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/standalone-synapses.cc b/upstream/src/libcn/standalone-synapses.cc
index f29caf2..95294d6 100644
--- a/upstream/src/libcn/standalone-synapses.cc
+++ b/upstream/src/libcn/standalone-synapses.cc
@@ -25,30 +25,30 @@
cnrun::C_StandaloneSynapse::
C_StandaloneSynapse( TUnitType intype,
- C_BaseNeuron* insource, C_BaseNeuron* intarget,
- double ing, CModel* inM, int s_mask)
+ C_BaseNeuron* insource, C_BaseNeuron* intarget,
+ double ing, CModel* inM, int s_mask)
: C_BaseSynapse (intype, insource, intarget, ing, inM, s_mask),
- C_StandaloneAttributes (__CNUDT[intype].vno)
+ C_StandaloneAttributes (__CNUDT[intype].vno)
{
- reset_vars();
- if ( M )
- M->include_unit( this);
- // else
- // _status &= ~CN_UENABLED;
+ reset_vars();
+ if ( M )
+ M->include_unit( this);
+ // else
+ // _status &= ~CN_UENABLED;
}
cnrun::C_StandaloneSynapse::
~C_StandaloneSynapse()
{
- if ( __cn_verbosely > 5 )
- fprintf( stderr, " deleting standalone synapse \"%s\"\n", _label);
+ if ( cn_verbosely > 5 )
+ fprintf( stderr, " deleting standalone synapse \"%s\"\n", _label);
}
// C_StandaloneSynapse::~C_StandaloneSynapse()
// {
-// if ( M->unregister_standalone_synapse( this) )
-// cerr << "Synapse " << label << " was forgotten by mother model\n";
+// if ( M->unregister_standalone_synapse( this) )
+// cerr << "Synapse " << label << " was forgotten by mother model\n";
// }
@@ -59,23 +59,23 @@ cnrun::C_StandaloneSynapse::
const double cnrun::__CN_Params_SynapseMap[] = {
-// 0.075,
+// 0.075,
18.94463, // Decay rate time constant
- 0.25,
- 0
+ 0.25,
+ 0
};
const char* const cnrun::__CN_ParamNames_SynapseMap[] = {
-// "Synaptic strength g, " __CN_PU_CONDUCTANCE,
- "Decay rate time constant \317\204, " __CN_PU_RATE,
- "Release quantile \316\264",
- "Reversal potential Vrev, " __CN_PU_POTENTIAL
+// "Synaptic strength g, " __CN_PU_CONDUCTANCE,
+ "Decay rate time constant \317\204, " __CN_PU_RATE,
+ "Release quantile \316\264",
+ "Reversal potential Vrev, " __CN_PU_POTENTIAL
};
const char* const cnrun::__CN_ParamSyms_SynapseMap[] = {
-// "gsyn",
- "tau",
- "delta",
- "Vrev"
+// "gsyn",
+ "tau",
+ "delta",
+ "Vrev"
};
@@ -88,17 +88,21 @@ void
cnrun::CSynapseMxMap::
update_queue()
{
- unsigned k = _source -> n_spikes_in_last_dt();
- while ( k-- )
- _kq.push_back( model_time());
-
- while ( true ) {
- if ( q() > 0 && model_time() - _kq.front() > P[_tau_] )
- _kq.erase( _kq.begin());
- else
- break;
- }
+ size_t k = _source -> n_spikes_in_last_dt();
+ while ( k-- )
+ _kq.push_back( model_time());
+
+ while ( true ) {
+ if ( q() > 0 && model_time() - _kq.front() > P[_tau_] )
+ _kq.erase( _kq.begin());
+ else
+ break;
+ }
}
-
-// eof
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/standalone-synapses.hh b/upstream/src/libcn/standalone-synapses.hh
index 6da3d3d..48d03c3 100644
--- a/upstream/src/libcn/standalone-synapses.hh
+++ b/upstream/src/libcn/standalone-synapses.hh
@@ -10,8 +10,8 @@
-#ifndef LIBCN_STANDALONE_SYNAPSES_H
-#define LIBCN_STANDALONE_SYNAPSES_H
+#ifndef CNRUN_LIBCN_STANDALONE_SYNAPSES_H_
+#define CNRUN_LIBCN_STANDALONE_SYNAPSES_H_
#include <iostream>
@@ -33,26 +33,26 @@ class C_StandaloneSynapse
: public C_BaseSynapse, public C_StandaloneAttributes {
private:
- C_StandaloneSynapse();
+ C_StandaloneSynapse();
protected:
- C_StandaloneSynapse( TUnitType intype, C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel* inM, int s_mask = 0);
+ C_StandaloneSynapse( TUnitType, C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel*, int s_mask = 0);
public:
~C_StandaloneSynapse();
- double &var_value( size_t v) { return V[v]; }
- const double &get_var_value( size_t v) const { return V[v]; }
- double S() const { return V[0]; }
- double &S( vector<double>&) { return V[0]; }
-
- void reset_vars()
- {
- memcpy( V.data(), __CNUDT[_type].stock_var_values,
- sizeof(double) * v_no());
- memcpy( V_next.data(), __CNUDT[_type].stock_var_values,
- sizeof(double) * v_no());
- }
+ double &var_value( size_t v) { return V[v]; }
+ const double &get_var_value( size_t v) const { return V[v]; }
+ double S() const { return V[0]; }
+ double &S( vector<double>&) { return V[0]; }
+
+ void reset_vars()
+ {
+ memcpy( V.data(), __CNUDT[_type].stock_var_values,
+ sizeof(double) * v_no());
+ memcpy( V_next.data(), __CNUDT[_type].stock_var_values,
+ sizeof(double) * v_no());
+ }
};
@@ -63,30 +63,30 @@ class CSynapseMap
: public C_StandaloneSynapse {
private:
- CSynapseMap();
+ CSynapseMap();
public:
- static constexpr double fixed_dt = 0.1;
+ static constexpr double fixed_dt = 0.1;
- CSynapseMap( C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel *inM, int s_mask = 0, TUnitType alt_type = YT_MAP);
+ CSynapseMap( C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel*, int s_mask = 0, TUnitType alt_type = YT_MAP);
- void preadvance(); // defined inline in model.h
+ void preadvance(); // defined inline in model.h
- enum {
- _tau_, _delta_, _Esyn_
- };
- double Isyn( const C_BaseNeuron &with_neuron, double g) const
- {
- return -g * S() * (with_neuron.E() - P[_Esyn_]);
- }
- double Isyn( vector<double>& unused, const C_BaseNeuron &with_neuron, double g) const
- {
- return Isyn( with_neuron, g);
- }
+ enum {
+ _tau_, _delta_, _Esyn_
+ };
+ double Isyn( const C_BaseNeuron &with_neuron, double g) const
+ {
+ return -g * S() * (with_neuron.E() - P[_Esyn_]);
+ }
+ double Isyn( vector<double>& unused, const C_BaseNeuron &with_neuron, double g) const
+ {
+ return Isyn( with_neuron, g);
+ }
protected:
- bool _source_was_spiking;
+ bool _source_was_spiking;
};
@@ -97,25 +97,30 @@ class CSynapseMxMap
: public CSynapseMap, public C_MultiplexingAttributes {
public:
- static constexpr double fixed_dt = 0.1;
+ static constexpr double fixed_dt = 0.1;
- CSynapseMxMap( C_BaseNeuron *insource, C_BaseNeuron *intarget,
- double ing, CModel *inM, int s_mask = 0)
- : CSynapseMap( insource, intarget, ing, inM, s_mask, YT_MXMAP)
- {}
+ CSynapseMxMap( C_BaseNeuron *insource, C_BaseNeuron *intarget,
+ double ing, CModel *inM, int s_mask = 0)
+ : CSynapseMap( insource, intarget, ing, inM, s_mask, YT_MXMAP)
+ {}
- enum {
- _tau_, _delta_, _Esyn_
- };
- void preadvance(); // defined inline in model.h
+ enum {
+ _tau_, _delta_, _Esyn_
+ };
+ void preadvance(); // defined inline in model.h
private:
- friend class CModel;
- void update_queue();
+ friend class CModel;
+ void update_queue();
};
}
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/types.cc b/upstream/src/libcn/types.cc
index e6fcfbc..91eb67e 100644
--- a/upstream/src/libcn/types.cc
+++ b/upstream/src/libcn/types.cc
@@ -30,436 +30,436 @@ cnrun::SCNDescriptor cnrun::__CNUDT[] = {
// ---------------- Neuron types
- { UT_HOSTED, // NT_HH_D
- 8+18, 4,
- __CN_Params_NeuronHH_d,
- __CN_ParamNames_NeuronHH_d,
- __CN_ParamSyms_NeuronHH_d,
- __CN_Vars_NeuronHH_d,
- __CN_VarNames_NeuronHH_d,
- __CN_VarSyms_NeuronHH_d,
- "HH",
- "HH",
- "Hodgkin-Huxley by Traub and Miles (1991)"
- },
-
- { UT_RATEBASED, // NT_HH_R
- 4, 1,
- __CN_Params_NeuronHH_r,
- __CN_ParamNames_NeuronHH_r,
- __CN_ParamSyms_NeuronHH_r,
- __CN_Vars_NeuronHH_r,
- __CN_VarNames_NeuronHH_r,
- __CN_VarSyms_NeuronHH_r,
- "HHRate",
- "HHRate",
- "Rate-based model of the Hodgkin-Huxley neuron"
- },
-
- { UT_HOSTED, // NT_HH2_D
- 11+18-1, 4,
- __CN_Params_NeuronHH2_d,
- __CN_ParamNames_NeuronHH2_d,
- __CN_ParamSyms_NeuronHH2_d,
- __CN_Vars_NeuronHH2_d,
- __CN_VarNames_NeuronHH_d,
- __CN_VarSyms_NeuronHH_d,
- "HH2",
- "HH2",
- "Hodgkin-Huxley by Traub & Miles w/ K leakage"
- },
-
- { UT_RATEBASED | UT__STUB, // NT_HH2_R
- 0, 0,
- NULL, NULL, NULL,
- NULL, NULL, NULL,
- "HH2Rate",
- "HH2Rate",
- "Rate-based model of the Hodgkin-Huxley by Traub & Miles"
- },
+ { UT_HOSTED, // NT_HH_D
+ 8+18, 4,
+ __CN_Params_NeuronHH_d,
+ __CN_ParamNames_NeuronHH_d,
+ __CN_ParamSyms_NeuronHH_d,
+ __CN_Vars_NeuronHH_d,
+ __CN_VarNames_NeuronHH_d,
+ __CN_VarSyms_NeuronHH_d,
+ "HH",
+ "HH",
+ "Hodgkin-Huxley by Traub and Miles (1991)"
+ },
+
+ { UT_RATEBASED, // NT_HH_R
+ 4, 1,
+ __CN_Params_NeuronHH_r,
+ __CN_ParamNames_NeuronHH_r,
+ __CN_ParamSyms_NeuronHH_r,
+ __CN_Vars_NeuronHH_r,
+ __CN_VarNames_NeuronHH_r,
+ __CN_VarSyms_NeuronHH_r,
+ "HHRate",
+ "HHRate",
+ "Rate-based model of the Hodgkin-Huxley neuron"
+ },
+
+ { UT_HOSTED, // NT_HH2_D
+ 11+18-1, 4,
+ __CN_Params_NeuronHH2_d,
+ __CN_ParamNames_NeuronHH2_d,
+ __CN_ParamSyms_NeuronHH2_d,
+ __CN_Vars_NeuronHH2_d,
+ __CN_VarNames_NeuronHH_d,
+ __CN_VarSyms_NeuronHH_d,
+ "HH2",
+ "HH2",
+ "Hodgkin-Huxley by Traub & Miles w/ K leakage"
+ },
+
+ { UT_RATEBASED | UT__STUB, // NT_HH2_R
+ 0, 0,
+ NULL, NULL, NULL,
+ NULL, NULL, NULL,
+ "HH2Rate",
+ "HH2Rate",
+ "Rate-based model of the Hodgkin-Huxley by Traub & Miles"
+ },
//#ifdef CN_WANT_MORE_NEURONS
- { UT_HOSTED, // NT_EC_D
- 14, 6,
- __CN_Params_NeuronEC_d,
- __CN_ParamNames_NeuronEC_d,
- __CN_ParamSyms_NeuronEC_d,
- __CN_Vars_NeuronEC_d,
- __CN_VarNames_NeuronEC_d,
- __CN_VarSyms_NeuronEC_d,
- "EC",
- "EC",
- "Entorhinal Cortex neuron"
- },
-
- { UT_HOSTED, // NT_ECA_D
- 11, 7,
- __CN_Params_NeuronECA_d,
- __CN_ParamNames_NeuronECA_d,
- __CN_ParamSyms_NeuronECA_d,
- __CN_Vars_NeuronECA_d,
- __CN_VarNames_NeuronECA_d,
- __CN_VarSyms_NeuronECA_d,
- "ECA",
- "ECA",
- "Entorhinal Cortex (A) neuron"
- },
+ { UT_HOSTED, // NT_EC_D
+ 14, 6,
+ __CN_Params_NeuronEC_d,
+ __CN_ParamNames_NeuronEC_d,
+ __CN_ParamSyms_NeuronEC_d,
+ __CN_Vars_NeuronEC_d,
+ __CN_VarNames_NeuronEC_d,
+ __CN_VarSyms_NeuronEC_d,
+ "EC",
+ "EC",
+ "Entorhinal Cortex neuron"
+ },
+
+ { UT_HOSTED, // NT_ECA_D
+ 11, 7,
+ __CN_Params_NeuronECA_d,
+ __CN_ParamNames_NeuronECA_d,
+ __CN_ParamSyms_NeuronECA_d,
+ __CN_Vars_NeuronECA_d,
+ __CN_VarNames_NeuronECA_d,
+ __CN_VarSyms_NeuronECA_d,
+ "ECA",
+ "ECA",
+ "Entorhinal Cortex (A) neuron"
+ },
//#endif
- { UT_OSCILLATOR | UT_DOT, // NT_POISSONDOT
- 3, 2,
- __CN_Params_OscillatorPoissonDot,
- __CN_ParamNames_OscillatorPoissonDot,
- __CN_ParamSyms_OscillatorPoissonDot,
- __CN_Vars_OscillatorPoissonDot,
- __CN_VarNames_OscillatorPoissonDot,
- __CN_VarSyms_OscillatorPoissonDot,
- "DotPoisson",
- "DotPoisson",
- "Duration-less spike Poisson oscillator"
- },
-
- { UT_OSCILLATOR, // NT_POISSON
- 5, 1,
- __CN_Params_OscillatorPoisson,
- __CN_ParamNames_OscillatorPoisson,
- __CN_ParamSyms_OscillatorPoisson,
- __CN_Vars_OscillatorPoisson,
- __CN_VarNames_OscillatorPoisson,
- __CN_VarSyms_OscillatorPoisson,
- "Poisson",
- "Poisson",
- "Poisson oscillator"
- },
+ { UT_OSCILLATOR | UT_DOT, // NT_POISSONDOT
+ 3, 2,
+ __CN_Params_OscillatorPoissonDot,
+ __CN_ParamNames_OscillatorPoissonDot,
+ __CN_ParamSyms_OscillatorPoissonDot,
+ __CN_Vars_OscillatorPoissonDot,
+ __CN_VarNames_OscillatorPoissonDot,
+ __CN_VarSyms_OscillatorPoissonDot,
+ "DotPoisson",
+ "DotPoisson",
+ "Duration-less spike Poisson oscillator"
+ },
+
+ { UT_OSCILLATOR, // NT_POISSON
+ 5, 1,
+ __CN_Params_OscillatorPoisson,
+ __CN_ParamNames_OscillatorPoisson,
+ __CN_ParamSyms_OscillatorPoisson,
+ __CN_Vars_OscillatorPoisson,
+ __CN_VarNames_OscillatorPoisson,
+ __CN_VarSyms_OscillatorPoisson,
+ "Poisson",
+ "Poisson",
+ "Poisson oscillator"
+ },
/*
- { UT_HOSTED | UT_OSCILLATOR, // NT_LV
- 1, 2,
- __CN_Params_OscillatorLV,
- __CN_ParamNames_OscillatorLV,
- __CN_ParamSyms_OscillatorLV,
- __CN_Vars_OscillatorLV,
- __CN_VarNames_OscillatorLV,
- __CN_VarSyms_OscillatorLV,
- "LV",
- "LV",
- "Lotka-Volterra oscillator"
- },
+ { UT_HOSTED | UT_OSCILLATOR, // NT_LV
+ 1, 2,
+ __CN_Params_OscillatorLV,
+ __CN_ParamNames_OscillatorLV,
+ __CN_ParamSyms_OscillatorLV,
+ __CN_Vars_OscillatorLV,
+ __CN_VarNames_OscillatorLV,
+ __CN_VarSyms_OscillatorLV,
+ "LV",
+ "LV",
+ "Lotka-Volterra oscillator"
+ },
*/
- { UT_HOSTED | UT_OSCILLATOR, // NT_COLPITTS,
- 4, 3,
- __CN_Params_OscillatorColpitts,
- __CN_ParamNames_OscillatorColpitts,
- __CN_ParamSyms_OscillatorColpitts,
- __CN_Vars_OscillatorColpitts,
- __CN_VarNames_OscillatorColpitts,
- __CN_VarSyms_OscillatorColpitts,
- "Colpitts",
- "Colpitts",
- "Colpitts oscillator"
- },
-
- { UT_HOSTED | UT_OSCILLATOR, // NT_VDPOL,
- 2, 2,
- __CN_Params_OscillatorVdPol,
- __CN_ParamNames_OscillatorVdPol,
- __CN_ParamSyms_OscillatorVdPol,
- __CN_Vars_OscillatorVdPol,
- __CN_VarNames_OscillatorVdPol,
- __CN_VarSyms_OscillatorVdPol,
- "VdPol",
- "VdPol",
- "Van der Pol oscillator"
- },
-
- { UT_OSCILLATOR | UT_DOT, // NT_DOTPULSE
- 3, 2,
- __CN_Params_NeuronDotPulse,
- __CN_ParamNames_NeuronDotPulse,
- __CN_ParamSyms_NeuronDotPulse,
- __CN_Vars_NeuronDotPulse,
- __CN_VarNames_NeuronDotPulse,
- __CN_VarSyms_NeuronDotPulse,
- "DotPulse",
- "DotPulse",
- "Dot Pulse generator"
- },
-
- { UT_DDTSET, // NT_MAP
- 5, 1,
- __CN_Params_NeuronMap,
- __CN_ParamNames_NeuronMap,
- __CN_ParamSyms_NeuronMap,
- __CN_Vars_NeuronMap,
- __CN_VarNames_NeuronMap,
- __CN_VarSyms_NeuronMap,
- "NMap",
- "NMap",
- "Map neuron"
- },
+ { UT_HOSTED | UT_OSCILLATOR, // NT_COLPITTS,
+ 4, 3,
+ __CN_Params_OscillatorColpitts,
+ __CN_ParamNames_OscillatorColpitts,
+ __CN_ParamSyms_OscillatorColpitts,
+ __CN_Vars_OscillatorColpitts,
+ __CN_VarNames_OscillatorColpitts,
+ __CN_VarSyms_OscillatorColpitts,
+ "Colpitts",
+ "Colpitts",
+ "Colpitts oscillator"
+ },
+
+ { UT_HOSTED | UT_OSCILLATOR, // NT_VDPOL,
+ 2, 2,
+ __CN_Params_OscillatorVdPol,
+ __CN_ParamNames_OscillatorVdPol,
+ __CN_ParamSyms_OscillatorVdPol,
+ __CN_Vars_OscillatorVdPol,
+ __CN_VarNames_OscillatorVdPol,
+ __CN_VarSyms_OscillatorVdPol,
+ "VdPol",
+ "VdPol",
+ "Van der Pol oscillator"
+ },
+
+ { UT_OSCILLATOR | UT_DOT, // NT_DOTPULSE
+ 3, 2,
+ __CN_Params_NeuronDotPulse,
+ __CN_ParamNames_NeuronDotPulse,
+ __CN_ParamSyms_NeuronDotPulse,
+ __CN_Vars_NeuronDotPulse,
+ __CN_VarNames_NeuronDotPulse,
+ __CN_VarSyms_NeuronDotPulse,
+ "DotPulse",
+ "DotPulse",
+ "Dot Pulse generator"
+ },
+
+ { UT_DDTSET, // NT_MAP
+ 5, 1,
+ __CN_Params_NeuronMap,
+ __CN_ParamNames_NeuronMap,
+ __CN_ParamSyms_NeuronMap,
+ __CN_Vars_NeuronMap,
+ __CN_VarNames_NeuronMap,
+ __CN_VarSyms_NeuronMap,
+ "NMap",
+ "NMap",
+ "Map neuron"
+ },
// ---------------- Synapse types
// a proper synapse (of eg AB type) will be selected based on whether
// its source/target is rate-based or discrete
- { UT_HOSTED, // YT_AB_DD
- 5, 1,
- __CN_Params_SynapseAB_dd,
- __CN_ParamNames_SynapseAB_dd,
- __CN_ParamSyms_SynapseAB_dd,
- __CN_Vars_SynapseAB,
- __CN_VarNames_SynapseAB,
- __CN_VarSyms_SynapseAB,
- "AB",
- "AB_pp",
- "Alpha-Beta synapse (Destexhe, Mainen, Sejnowsky, 1994)"
- },
-
- { UT_HOSTED | UT_TGTISRATE | UT__STUB, // YT_AB_DR
- 5, 1,
- NULL, NULL, NULL,
- NULL, NULL, NULL,
- "AB",
- "AB_pt",
- "Alpha-Beta synapse (phasic->tonic)"
- },
-
- { UT_HOSTED | UT_SRCISRATE | UT__STUB, // YT_AB_RD
- 5, 1,
- NULL, NULL, NULL,
- NULL, NULL, NULL,
- "AB",
- "AB_tp",
- "Alpha-Beta synapse (tonic->phasic)"
- },
-
- { UT_HOSTED | UT_RATEBASED, // YT_AB_RR
- 4, 1,
- __CN_Params_SynapseAB_rr,
- __CN_ParamNames_SynapseAB_rr,
- __CN_ParamSyms_SynapseAB_rr,
- __CN_Vars_SynapseAB,
- __CN_VarNames_SynapseAB,
- __CN_VarSyms_SynapseAB,
- "AB",
- "AB_tt",
- "Alpha-Beta synapse (tonic->tonic)"
- },
-
- { UT_HOSTED | UT_MULTIPLEXING, // YT_MXAB_DD, inheriting all parameters except alpha, and variables from YT_AB
- 5, 1,
- __CN_Params_SynapseMxAB_dd,
- __CN_ParamNames_SynapseAB_dd,
- __CN_ParamSyms_SynapseAB_dd,
- __CN_Vars_SynapseAB,
- __CN_VarNames_SynapseAB,
- __CN_VarSyms_SynapseAB,
- "AB",
- "AB_Mx_pp",
- "Multiplexing Alpha-Beta synapse for use with durationless units as source (phasic->phasic)"
- },
-
- { UT_HOSTED | UT_TGTISRATE | UT_MULTIPLEXING, // YT_MXAB_DR
- 5, 1,
- __CN_Params_SynapseMxAB_dr,
- __CN_ParamNames_SynapseAB_dr,
- __CN_ParamSyms_SynapseAB_dr,
- __CN_Vars_SynapseAB,
- __CN_VarNames_SynapseAB,
- __CN_VarSyms_SynapseAB,
- "AB",
- "AB_Mx_pt",
- "Multiplexing Alpha-Beta synapse for use with durationless units as source (phasic->tonic)"
- },
-
-
- { UT_HOSTED, // YT_ABMINS_DD
- 5, 1,
- __CN_Params_SynapseABMinus_dd,
- __CN_ParamNames_SynapseAB_dd,
- __CN_ParamSyms_SynapseAB_dd,
- __CN_Vars_SynapseAB,
- __CN_VarNames_SynapseAB,
- __CN_VarSyms_SynapseAB,
- "ABMinus",
- "ABMinus_pp",
- "Alpha-Beta synapse w/out (1-S) term"
- },
-
- { UT_HOSTED | UT_TGTISRATE | UT__STUB, // YT_ABMINS_DR
- 5, 1,
- NULL, NULL, NULL,
- __CN_Vars_SynapseAB,
- __CN_VarNames_SynapseAB,
- __CN_VarSyms_SynapseAB,
- "ABMinus",
- "ABMinus_pt",
- "Alpha-Beta synapse w/out (1-S) term (phasic->tonic)"
- },
-
- { UT_HOSTED | UT_SRCISRATE | UT__STUB, // YT_ABMINS_RD
- 5, 1,
- NULL, NULL, NULL,
- __CN_Vars_SynapseAB,
- __CN_VarNames_SynapseAB,
- __CN_VarSyms_SynapseAB,
- "ABMinus",
- "ABMinus_tp",
- "Alpha-Beta synapse w/out (1-S) term (tonic->phasic)"
- },
-
- { UT_HOSTED | UT_SRCISRATE | UT_TGTISRATE | UT__STUB, // YT_ABMINS_RR
- 5, 1,
- NULL, NULL, NULL,
- __CN_Vars_SynapseAB,
- __CN_VarNames_SynapseAB,
- __CN_VarSyms_SynapseAB,
- "ABMinus",
- "ABMinus_tt",
- "Alpha-Beta synapse w/out (1-S) term (tonic->tonic)"
- },
-
- { UT_HOSTED | UT_MULTIPLEXING | UT__STUB, // YT_MXABMINUS_DD
- 5, 1,
- NULL, NULL, NULL,
- __CN_Vars_SynapseAB,
- __CN_VarNames_SynapseAB,
- __CN_VarSyms_SynapseAB,
- "ABMinus",
- "ABMinus_Mx_pp",
- "Multiplexing Alpha-Beta w/out (1-S) synapse for use with durationless units as source (phasic->phasic)"
- },
-
- { UT_HOSTED | UT_TGTISRATE | UT_MULTIPLEXING | UT__STUB, // YT_MXABMINUS_DR
- 5, 1,
- NULL, NULL, NULL,
- __CN_Vars_SynapseAB,
- __CN_VarNames_SynapseAB,
- __CN_VarSyms_SynapseAB,
- "ABMinus",
- "ABMinus_Mx_pt",
- "Multiplexing Alpha-Beta w/out (1-S) synapse for use with durationless units as source (phasic->tonic)"
- },
-
-
- { UT_HOSTED, // YT_RALL_DD
- 3, 2,
- __CN_Params_SynapseRall_dd,
- __CN_ParamNames_SynapseRall_dd,
- __CN_ParamSyms_SynapseRall_dd,
- __CN_Vars_SynapseRall,
- __CN_VarNames_SynapseRall,
- __CN_VarSyms_SynapseRall,
- "Rall",
- "Rall_pp",
- "Rall synapse (Rall, 1967)"
- },
-
- { UT_HOSTED | UT_TGTISRATE | UT__STUB, // YT_RALL_DR
- 3, 2,
- NULL, NULL, NULL,
- __CN_Vars_SynapseRall,
- __CN_VarNames_SynapseRall,
- __CN_VarSyms_SynapseRall,
- "Rall",
- "Rall_pt",
- "Rall synapse (Rall, 1967) (phasic->tonic)"
- },
-
- { UT_HOSTED | UT_SRCISRATE | UT__STUB, // YT_RALL_RD
- 3, 2,
- NULL, NULL, NULL,
- __CN_Vars_SynapseRall,
- __CN_VarNames_SynapseRall,
- __CN_VarSyms_SynapseRall,
- "Rall",
- "Rall_tp",
- "Rall synapse (tonic->phasic)"
- },
-
- { UT_HOSTED | UT_SRCISRATE | UT_TGTISRATE | UT__STUB, // YT_RALL_RR
- 3, 2,
- NULL, NULL, NULL,
- __CN_Vars_SynapseRall,
- __CN_VarNames_SynapseRall,
- __CN_VarSyms_SynapseRall,
- "Rall",
- "Rall_tt",
- "Rall synapse (tonic->tonic)"
- },
-
- { UT_HOSTED | UT_MULTIPLEXING | UT__STUB, // YT_MXRALL_DD
- 3, 2,
- NULL, NULL, NULL,
- __CN_Vars_SynapseRall,
- __CN_VarNames_SynapseRall,
- __CN_VarSyms_SynapseRall,
- "Rall",
- "Rall_Mx_pp",
- "Rall synapse for use with durationless units as source (phasic->phasic)"
- },
-
- { UT_HOSTED | UT_TGTISRATE | UT_MULTIPLEXING | UT__STUB, // YT_MXRALL_DR
- 3, 2,
- NULL, NULL, NULL,
- __CN_Vars_SynapseRall,
- __CN_VarNames_SynapseRall,
- __CN_VarSyms_SynapseRall,
- "Rall",
- "Rall_Mx_pt",
- "Rall synapse for use with durationless units as source (phasic->tonic)"
- },
-
-
- { UT_DDTSET, // YT_MAP
- 3, 1,
- __CN_Params_SynapseMap,
- __CN_ParamNames_SynapseMap,
- __CN_ParamSyms_SynapseMap,
- __CN_Vars_SynapseAB,
- __CN_VarNames_SynapseAB,
- __CN_VarSyms_SynapseAB,
- "Map",
- "Map",
- "Map synapse"
- },
-
- { UT_DDTSET | UT_MULTIPLEXING, // YT_MXMAP
- 3, 1,
- __CN_Params_SynapseMap,
- __CN_ParamNames_SynapseMap,
- __CN_ParamSyms_SynapseMap,
- __CN_Vars_SynapseAB,
- __CN_VarNames_SynapseAB,
- __CN_VarSyms_SynapseAB,
- "Map",
- "Map_Mx",
- "Multiplexing Map synapse"
- },
+ { UT_HOSTED, // YT_AB_DD
+ 5, 1,
+ __CN_Params_SynapseAB_dd,
+ __CN_ParamNames_SynapseAB_dd,
+ __CN_ParamSyms_SynapseAB_dd,
+ __CN_Vars_SynapseAB,
+ __CN_VarNames_SynapseAB,
+ __CN_VarSyms_SynapseAB,
+ "AB",
+ "AB_pp",
+ "Alpha-Beta synapse (Destexhe, Mainen, Sejnowsky, 1994)"
+ },
+
+ { UT_HOSTED | UT_TGTISRATE | UT__STUB, // YT_AB_DR
+ 5, 1,
+ NULL, NULL, NULL,
+ NULL, NULL, NULL,
+ "AB",
+ "AB_pt",
+ "Alpha-Beta synapse (phasic->tonic)"
+ },
+
+ { UT_HOSTED | UT_SRCISRATE | UT__STUB, // YT_AB_RD
+ 5, 1,
+ NULL, NULL, NULL,
+ NULL, NULL, NULL,
+ "AB",
+ "AB_tp",
+ "Alpha-Beta synapse (tonic->phasic)"
+ },
+
+ { UT_HOSTED | UT_RATEBASED, // YT_AB_RR
+ 4, 1,
+ __CN_Params_SynapseAB_rr,
+ __CN_ParamNames_SynapseAB_rr,
+ __CN_ParamSyms_SynapseAB_rr,
+ __CN_Vars_SynapseAB,
+ __CN_VarNames_SynapseAB,
+ __CN_VarSyms_SynapseAB,
+ "AB",
+ "AB_tt",
+ "Alpha-Beta synapse (tonic->tonic)"
+ },
+
+ { UT_HOSTED | UT_MULTIPLEXING, // YT_MXAB_DD, inheriting all parameters except alpha, and variables from YT_AB
+ 5, 1,
+ __CN_Params_SynapseMxAB_dd,
+ __CN_ParamNames_SynapseAB_dd,
+ __CN_ParamSyms_SynapseAB_dd,
+ __CN_Vars_SynapseAB,
+ __CN_VarNames_SynapseAB,
+ __CN_VarSyms_SynapseAB,
+ "AB",
+ "AB_Mx_pp",
+ "Multiplexing Alpha-Beta synapse for use with durationless units as source (phasic->phasic)"
+ },
+
+ { UT_HOSTED | UT_TGTISRATE | UT_MULTIPLEXING, // YT_MXAB_DR
+ 5, 1,
+ __CN_Params_SynapseMxAB_dr,
+ __CN_ParamNames_SynapseAB_dr,
+ __CN_ParamSyms_SynapseAB_dr,
+ __CN_Vars_SynapseAB,
+ __CN_VarNames_SynapseAB,
+ __CN_VarSyms_SynapseAB,
+ "AB",
+ "AB_Mx_pt",
+ "Multiplexing Alpha-Beta synapse for use with durationless units as source (phasic->tonic)"
+ },
+
+
+ { UT_HOSTED, // YT_ABMINS_DD
+ 5, 1,
+ __CN_Params_SynapseABMinus_dd,
+ __CN_ParamNames_SynapseAB_dd,
+ __CN_ParamSyms_SynapseAB_dd,
+ __CN_Vars_SynapseAB,
+ __CN_VarNames_SynapseAB,
+ __CN_VarSyms_SynapseAB,
+ "ABMinus",
+ "ABMinus_pp",
+ "Alpha-Beta synapse w/out (1-S) term"
+ },
+
+ { UT_HOSTED | UT_TGTISRATE | UT__STUB, // YT_ABMINS_DR
+ 5, 1,
+ NULL, NULL, NULL,
+ __CN_Vars_SynapseAB,
+ __CN_VarNames_SynapseAB,
+ __CN_VarSyms_SynapseAB,
+ "ABMinus",
+ "ABMinus_pt",
+ "Alpha-Beta synapse w/out (1-S) term (phasic->tonic)"
+ },
+
+ { UT_HOSTED | UT_SRCISRATE | UT__STUB, // YT_ABMINS_RD
+ 5, 1,
+ NULL, NULL, NULL,
+ __CN_Vars_SynapseAB,
+ __CN_VarNames_SynapseAB,
+ __CN_VarSyms_SynapseAB,
+ "ABMinus",
+ "ABMinus_tp",
+ "Alpha-Beta synapse w/out (1-S) term (tonic->phasic)"
+ },
+
+ { UT_HOSTED | UT_SRCISRATE | UT_TGTISRATE | UT__STUB, // YT_ABMINS_RR
+ 5, 1,
+ NULL, NULL, NULL,
+ __CN_Vars_SynapseAB,
+ __CN_VarNames_SynapseAB,
+ __CN_VarSyms_SynapseAB,
+ "ABMinus",
+ "ABMinus_tt",
+ "Alpha-Beta synapse w/out (1-S) term (tonic->tonic)"
+ },
+
+ { UT_HOSTED | UT_MULTIPLEXING | UT__STUB, // YT_MXABMINUS_DD
+ 5, 1,
+ NULL, NULL, NULL,
+ __CN_Vars_SynapseAB,
+ __CN_VarNames_SynapseAB,
+ __CN_VarSyms_SynapseAB,
+ "ABMinus",
+ "ABMinus_Mx_pp",
+ "Multiplexing Alpha-Beta w/out (1-S) synapse for use with durationless units as source (phasic->phasic)"
+ },
+
+ { UT_HOSTED | UT_TGTISRATE | UT_MULTIPLEXING | UT__STUB, // YT_MXABMINUS_DR
+ 5, 1,
+ NULL, NULL, NULL,
+ __CN_Vars_SynapseAB,
+ __CN_VarNames_SynapseAB,
+ __CN_VarSyms_SynapseAB,
+ "ABMinus",
+ "ABMinus_Mx_pt",
+ "Multiplexing Alpha-Beta w/out (1-S) synapse for use with durationless units as source (phasic->tonic)"
+ },
+
+
+ { UT_HOSTED, // YT_RALL_DD
+ 3, 2,
+ __CN_Params_SynapseRall_dd,
+ __CN_ParamNames_SynapseRall_dd,
+ __CN_ParamSyms_SynapseRall_dd,
+ __CN_Vars_SynapseRall,
+ __CN_VarNames_SynapseRall,
+ __CN_VarSyms_SynapseRall,
+ "Rall",
+ "Rall_pp",
+ "Rall synapse (Rall, 1967)"
+ },
+
+ { UT_HOSTED | UT_TGTISRATE | UT__STUB, // YT_RALL_DR
+ 3, 2,
+ NULL, NULL, NULL,
+ __CN_Vars_SynapseRall,
+ __CN_VarNames_SynapseRall,
+ __CN_VarSyms_SynapseRall,
+ "Rall",
+ "Rall_pt",
+ "Rall synapse (Rall, 1967) (phasic->tonic)"
+ },
+
+ { UT_HOSTED | UT_SRCISRATE | UT__STUB, // YT_RALL_RD
+ 3, 2,
+ NULL, NULL, NULL,
+ __CN_Vars_SynapseRall,
+ __CN_VarNames_SynapseRall,
+ __CN_VarSyms_SynapseRall,
+ "Rall",
+ "Rall_tp",
+ "Rall synapse (tonic->phasic)"
+ },
+
+ { UT_HOSTED | UT_SRCISRATE | UT_TGTISRATE | UT__STUB, // YT_RALL_RR
+ 3, 2,
+ NULL, NULL, NULL,
+ __CN_Vars_SynapseRall,
+ __CN_VarNames_SynapseRall,
+ __CN_VarSyms_SynapseRall,
+ "Rall",
+ "Rall_tt",
+ "Rall synapse (tonic->tonic)"
+ },
+
+ { UT_HOSTED | UT_MULTIPLEXING | UT__STUB, // YT_MXRALL_DD
+ 3, 2,
+ NULL, NULL, NULL,
+ __CN_Vars_SynapseRall,
+ __CN_VarNames_SynapseRall,
+ __CN_VarSyms_SynapseRall,
+ "Rall",
+ "Rall_Mx_pp",
+ "Rall synapse for use with durationless units as source (phasic->phasic)"
+ },
+
+ { UT_HOSTED | UT_TGTISRATE | UT_MULTIPLEXING | UT__STUB, // YT_MXRALL_DR
+ 3, 2,
+ NULL, NULL, NULL,
+ __CN_Vars_SynapseRall,
+ __CN_VarNames_SynapseRall,
+ __CN_VarSyms_SynapseRall,
+ "Rall",
+ "Rall_Mx_pt",
+ "Rall synapse for use with durationless units as source (phasic->tonic)"
+ },
+
+
+ { UT_DDTSET, // YT_MAP
+ 3, 1,
+ __CN_Params_SynapseMap,
+ __CN_ParamNames_SynapseMap,
+ __CN_ParamSyms_SynapseMap,
+ __CN_Vars_SynapseAB,
+ __CN_VarNames_SynapseAB,
+ __CN_VarSyms_SynapseAB,
+ "Map",
+ "Map",
+ "Map synapse"
+ },
+
+ { UT_DDTSET | UT_MULTIPLEXING, // YT_MXMAP
+ 3, 1,
+ __CN_Params_SynapseMap,
+ __CN_ParamNames_SynapseMap,
+ __CN_ParamSyms_SynapseMap,
+ __CN_Vars_SynapseAB,
+ __CN_VarNames_SynapseAB,
+ __CN_VarSyms_SynapseAB,
+ "Map",
+ "Map_Mx",
+ "Multiplexing Map synapse"
+ },
};
cnrun::TUnitType
cnrun::
-unit_family_by_string( const char *id)
+unit_family_by_string( const string& id)
{
- for ( int i = NT_FIRST; i <= YT_LAST; i++ )
- if ( strcmp( id, __CNUDT[i].family ) == 0 )
- return (TUnitType)i;
- return NT_VOID;
+ for ( int i = NT_FIRST; i <= YT_LAST; ++i )
+ if ( id == __CNUDT[i].family )
+ return (TUnitType)i;
+ return NT_VOID;
}
cnrun::TUnitType
cnrun::
-unit_species_by_string( const char *id)
+unit_species_by_string( const string& id)
{
- for ( int i = NT_FIRST; i <= YT_LAST; i++ )
- if ( strcmp( id, __CNUDT[i].species ) == 0 )
- return (TUnitType)i;
- return NT_VOID;
+ for ( int i = NT_FIRST; i <= YT_LAST; ++i )
+ if ( id == __CNUDT[i].species )
+ return (TUnitType)i;
+ return NT_VOID;
}
@@ -469,53 +469,61 @@ void
cnrun::
cnmodel_dump_available_units()
{
- size_t u, p;
- cout << "\n===== Neurons:\n";
- for ( u = NT_FIRST; u <= NT_LAST; u++ ) {
- SCNDescriptor &U = __CNUDT[u];
- if ( U.traits & UT__STUB )
- continue;
- printf( "--- [%s]: %s\nParameters:\n",
- U.species, U.description);
- for ( p = 0; p < U.pno; p++ ) {
- printf( "%4zu: %-5s\t= %s %s\n",
- p, U.stock_param_syms[p],
- cnrun::str::double_dot_aligned_s( U.stock_param_values[p], 4, 8).c_str(),
- U.stock_param_names[p]);
- }
- printf( "Variables:\n");
- for ( p = 0; p < U.vno; p++ ) {
- printf( "%4zu: %-5s\t= %s %s\n",
- p, U.stock_var_syms[p],
- cnrun::str::double_dot_aligned_s( U.stock_var_values[p], 4, 8).c_str(),
- U.stock_var_names[p]);
- }
- cout << endl;
- }
- cout << "\n===== Synapses:\n";
- for ( u = YT_FIRST; u <= YT_LAST; u++ ) {
- SCNDescriptor &U = __CNUDT[u];
- if ( U.traits & UT__STUB )
- continue;
- printf( "--- [%s]: %s\nParameters:\n",
- U.species, U.description);
- for ( p = 0; p < U.pno; p++ ) {
- printf( "%4zu: %-5s\t= %s %s\n",
- p, U.stock_param_syms[p],
- cnrun::str::double_dot_aligned_s( U.stock_param_values[p], 4, 8).c_str(),
- U.stock_param_names[p]);
- }
- cout << "Variables:\n";
- for ( p = 0; p < U.vno; p++ ) {
- printf( "%4zu: %-5s\t= %s %s\n",
- p, U.stock_var_syms[p],
- cnrun::str::double_dot_aligned_s( U.stock_var_values[p], 4, 8).c_str(),
- U.stock_var_names[p]);
- }
- cout << endl;
- }
- cout << endl;
+ using cnrun::stilton::str::double_dot_aligned_s;
+
+ size_t u, p;
+ cout << "\n===== Neurons:\n";
+ for ( u = NT_FIRST; u <= NT_LAST; ++u ) {
+ SCNDescriptor &U = __CNUDT[u];
+ if ( U.traits & UT__STUB )
+ continue;
+ printf( "--- [%s]: %s\nParameters:\n",
+ U.species, U.description);
+ for ( p = 0; p < U.pno; ++p ) {
+ printf( "%4zu: %-5s\t= %s %s\n",
+ p, U.stock_param_syms[p],
+ double_dot_aligned_s(
+ U.stock_param_values[p], 4, 8).c_str(),
+ U.stock_param_names[p]);
+ }
+ printf( "Variables:\n");
+ for ( p = 0; p < U.vno; ++p ) {
+ printf( "%4zu: %-5s\t= %s %s\n",
+ p, U.stock_var_syms[p],
+ double_dot_aligned_s( U.stock_var_values[p], 4, 8).c_str(),
+ U.stock_var_names[p]);
+ }
+ cout << endl;
+ }
+ cout << "\n===== Synapses:\n";
+ for ( u = YT_FIRST; u <= YT_LAST; ++u ) {
+ SCNDescriptor &U = __CNUDT[u];
+ if ( U.traits & UT__STUB )
+ continue;
+ printf( "--- [%s]: %s\nParameters:\n",
+ U.species, U.description);
+ for ( p = 0; p < U.pno; ++p ) {
+ printf( "%4zu: %-5s\t= %s %s\n",
+ p, U.stock_param_syms[p],
+ double_dot_aligned_s(
+ U.stock_param_values[p], 4, 8).c_str(),
+ U.stock_param_names[p]);
+ }
+ cout << "Variables:\n";
+ for ( p = 0; p < U.vno; ++p ) {
+ printf( "%4zu: %-5s\t= %s %s\n",
+ p, U.stock_var_syms[p],
+ double_dot_aligned_s( U.stock_var_values[p], 4, 8).c_str(),
+ U.stock_var_names[p]);
+ }
+ cout << endl;
+ }
+ cout << endl;
}
-
-// eof
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libcn/types.hh b/upstream/src/libcn/types.hh
index dcbab3e..1d0300e 100644
--- a/upstream/src/libcn/types.hh
+++ b/upstream/src/libcn/types.hh
@@ -12,8 +12,8 @@
//#define CN_WANT_MORE_NEURONS
-#ifndef LIBCN_TYPES_H
-#define LIBCN_TYPES_H
+#ifndef CNRUN_LIBCN_TYPES_H_
+#define CNRUN_LIBCN_TYPES_H_
#if HAVE_CONFIG_H && !defined(VERSION)
# include "config.h"
@@ -25,50 +25,50 @@ namespace cnrun {
enum TUnitType {
// neuron types
- NT_VOID = -1,
+ NT_VOID = -1,
- NT_HH_D,
- NT_HH_R,
- NT_HH2_D,
- NT_HH2_R,
+ NT_HH_D,
+ NT_HH_R,
+ NT_HH2_D,
+ NT_HH2_R,
//#ifdef CN_WANT_MORE_NEURONS
- NT_EC_D,
- NT_ECA_D,
+ NT_EC_D,
+ NT_ECA_D,
//#endif
- NT_DOTPOISSON,
- NT_POISSON,
+ NT_DOTPOISSON,
+ NT_POISSON,
//#ifdef CN_WANT_MORE_NEURONS
-// NT_LV,
- NT_COLPITTS,
- NT_VDPOL,
+// NT_LV,
+ NT_COLPITTS,
+ NT_VDPOL,
//#endif
- NT_DOTPULSE,
- NT_MAP,
+ NT_DOTPULSE,
+ NT_MAP,
// synapse types
- YT_AB_DD,
- YT_AB_DR,
- YT_AB_RD,
- YT_AB_RR,
- YT_MXAB_DD,
- YT_MXAB_DR,
-
- YT_ABMINUS_DD,
- YT_ABMINUS_DR,
- YT_ABMINUS_RD,
- YT_ABMINUS_RR,
- YT_MXABMINUS_DD,
- YT_MXABMINUS_DR,
-
- YT_RALL_DD,
- YT_RALL_DR,
- YT_RALL_RD,
- YT_RALL_RR,
- YT_MXRALL_DD,
- YT_MXRALL_DR,
-
- YT_MAP,
- YT_MXMAP,
+ YT_AB_DD,
+ YT_AB_DR,
+ YT_AB_RD,
+ YT_AB_RR,
+ YT_MXAB_DD,
+ YT_MXAB_DR,
+
+ YT_ABMINUS_DD,
+ YT_ABMINUS_DR,
+ YT_ABMINUS_RD,
+ YT_ABMINUS_RR,
+ YT_MXABMINUS_DD,
+ YT_MXABMINUS_DR,
+
+ YT_RALL_DD,
+ YT_RALL_DR,
+ YT_RALL_RD,
+ YT_RALL_RR,
+ YT_MXRALL_DD,
+ YT_MXRALL_DR,
+
+ YT_MAP,
+ YT_MXMAP,
};
#define NT_FIRST NT_HH_D
@@ -79,64 +79,65 @@ enum TUnitType {
// traits, used to ensure units being connected are compatible
-#define UT_HOSTED (1 << 0)
-#define UT_DDTSET (1 << 1)
-#define UT_OSCILLATOR (1 << 2)
-#define UT_RATEBASED (1 << 3)
-#define UT_SRCISRATE UT_RATEBASED
-#define UT_TGTISRATE (1 << 4)
-#define UT_DOT (1 << 5)
-#define UT_MULTIPLEXING UT_DOT
-#define UT__STUB (1 << 15)
+#define UT_HOSTED (1 << 0)
+#define UT_DDTSET (1 << 1)
+#define UT_OSCILLATOR (1 << 2)
+#define UT_RATEBASED (1 << 3)
+#define UT_SRCISRATE UT_RATEBASED
+#define UT_TGTISRATE (1 << 4)
+#define UT_DOT (1 << 5)
+#define UT_MULTIPLEXING UT_DOT
+#define UT__STUB (1 << 15)
struct SCNDescriptor {
- int traits;
- unsigned short
- pno, vno;
- const double *const stock_param_values;
- const char *const *stock_param_names;
- const char *const *stock_param_syms;
- const double *const stock_var_values;
- const char *const *stock_var_names;
- const char *const *stock_var_syms;
- const char *family, *species;
- const char *description;
+ int traits;
+ unsigned short
+ pno, vno;
+ const double *const stock_param_values;
+ const char *const *stock_param_names;
+ const char *const *stock_param_syms;
+ const double *const stock_var_values;
+ const char *const *stock_var_names;
+ const char *const *stock_var_syms;
+ const char *family,
+ *species;
+ const char *description;
};
-TUnitType unit_family_by_string( const char*) __attribute__ ((pure));
-TUnitType unit_species_by_string( const char*) __attribute__ ((pure));
+TUnitType unit_family_by_string( const string&) __attribute__ ((pure));
+TUnitType unit_species_by_string( const string&) __attribute__ ((pure));
inline bool
-unit_species_is_valid( const char *id)
+unit_species_is_valid( const string& id)
{
- return unit_species_by_string(id) != NT_VOID;
+ return unit_species_by_string(id) != NT_VOID;
}
inline bool
-unit_species_is_neuron( const char *id)
+unit_species_is_neuron( const string& id)
{
- TUnitType t = unit_species_by_string(id);
- return t >= NT_FIRST && t <= NT_LAST;
+ TUnitType t = unit_species_by_string(id);
+ return t >= NT_FIRST && t <= NT_LAST;
}
inline bool
-unit_species_is_synapse( const char *id)
+unit_species_is_synapse( const string& id)
{
- TUnitType t = unit_species_by_string(id);
- return t >= YT_FIRST && t <= YT_LAST;
+ TUnitType t = unit_species_by_string(id);
+ return t >= YT_FIRST && t <= YT_LAST;
}
inline bool
-unit_family_is_neuron( const char *id)
+unit_family_is_neuron( const string& id)
{
- TUnitType t = unit_family_by_string(id);
- return t >= NT_FIRST && t <= NT_LAST;
+ TUnitType t = unit_family_by_string(id);
+ return t >= NT_FIRST && t <= NT_LAST;
}
inline bool
-unit_family_is_synapse( const char *id)
+unit_family_is_synapse( const string& id)
{
- TUnitType t = unit_family_by_string(id);
- return t >= YT_FIRST && t <= YT_LAST;
+ TUnitType t = unit_family_by_string(id);
+ return t >= YT_FIRST && t <= YT_LAST;
}
extern SCNDescriptor __CNUDT[];
@@ -277,4 +278,9 @@ extern const char* const __CN_ParamSyms_SynapseMap[];
}
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libstilton/containers.hh b/upstream/src/libstilton/containers.hh
index eb3bfb6..95c108f 100644
--- a/upstream/src/libstilton/containers.hh
+++ b/upstream/src/libstilton/containers.hh
@@ -9,13 +9,14 @@
* License: GPL
*/
-#ifndef _CNRUN_LIBSTILTON_CONTAINERS_H
-#define _CNRUN_LIBSTILTON_CONTAINERS_H
+#ifndef CNRUN_LIBSTILTON_CONTAINERS_H_
+#define CNRUN_LIBSTILTON_CONTAINERS_H_
#include <list>
#include <forward_list>
#include <vector>
#include <map>
+#include <algorithm>
#if HAVE_CONFIG_H && !defined(VERSION)
# include "config.h"
@@ -30,21 +31,21 @@ template <typename T>
bool
member( const T& x, const list<T>& v)
{
- return any( v.begin(), v.end(), x);
+ return find( v.begin(), v.end(), x) != v.end();
}
template <typename T>
bool
member( const T& x, const forward_list<T>& v)
{
- return any( v.begin(), v.end(), x);
+ return find( v.begin(), v.end(), x) != v.end();
}
template <typename T>
bool
member( const T& x, const vector<T>& v)
{
- return any( v.begin(), v.end(), x);
+ return find( v.begin(), v.end(), x) != v.end();
}
template <typename K, typename V>
diff --git a/upstream/src/libstilton/exprparser.cc b/upstream/src/libstilton/exprparser.cc
index fc7d9f6..e694e4a 100644
--- a/upstream/src/libstilton/exprparser.cc
+++ b/upstream/src/libstilton/exprparser.cc
@@ -24,277 +24,288 @@
using namespace std;
-const char* const cnrun::__exparse_error_strings[] = {
- "",
- "Missing operand",
- "Unbalanced parentheses",
- "Unparsable value or missing operator",
- "Unary operator used as binary",
- "Undefined variable",
- "Non-lvalue in assignment",
- "varlist is NULL"
+const char* const
+cnrun::stilton::CExpression::error_strings[] = {
+ "",
+ "Missing operand",
+ "Unbalanced parentheses",
+ "Unparsable value or missing operator",
+ "Unary operator used as binary",
+ "Undefined variable",
+ "Non-lvalue in assignment",
+ "varlist is NULL",
+ 0,
};
enum TOperator {
- OP_VOID = -1,
- OP_NEG,
- OP_UNARYMINUS,
- OP_MULT, OP_DIV,
- OP_ADD, OP_SUBTRACT,
- OP_LT, OP_GT,
- OP_ASSIGN,
-
- OP_LE, OP_GE, OP_EQ,
+ OP_VOID = -1,
+ OP_NEG,
+ OP_UNARYMINUS,
+ OP_MULT,
+ OP_DIV,
+ OP_ADD,
+ OP_SUBTRACT,
+ OP_LT,
+ OP_GT,
+ OP_ASSIGN,
+ OP_LE,
+ OP_GE,
+ OP_EQ,
};
struct SOp {
- char literal[4];
- int prio;
- bool assoc_ltr,
- is_binary;
+ char literal[4];
+ int prio;
+ bool assoc_ltr,
+ is_binary;
- SOp( const char *l, int p, bool a, bool b)
- : prio (p), assoc_ltr (a), is_binary (b)
- { strncpy( literal, l, 3); }
+ SOp( const char *l, int p, bool a, bool b)
+ : prio (p), assoc_ltr (a), is_binary (b)
+ { strncpy( literal, l, 3); }
- bool isat( const char *where)
- { return (strncmp( where, literal, strlen( literal)) == 0); }
+ bool isat( const char *where) const
+ { return (strncmp( where, literal, strlen( literal)) == 0); }
};
-#define n_ops 12
inline namespace {
-array<SOp, n_ops> Ops = {
- {
- SOp("!", 1, false, false),
- SOp("-", 1, false, false),
- SOp("*", 3, true, true), SOp("/", 3, true, true),
- SOp("+", 5, true, true), SOp("-", 5, true, true),
- SOp("<", 7, true, true), SOp(">", 7, true, true),
- SOp("=", 9, false, true),
-
- SOp("<=", 7, true, true), SOp(">=", 7, true, true), SOp("==", 7, true, true)
- }
+const array<SOp, 12> Ops {
+ {
+ SOp("!", 1, false, false),
+ SOp("-", 1, false, false),
+ SOp("*", 3, true, true),
+ SOp("/", 3, true, true),
+ SOp("+", 5, true, true),
+ SOp("-", 5, true, true),
+ SOp("<", 7, true, true),
+ SOp(">", 7, true, true),
+ SOp("=", 9, false, true),
+ SOp("<=", 7, true, true),
+ SOp(">=", 7, true, true),
+ SOp("==", 7, true, true)
+ }
};
+
} // inline namespace
-cnrun::TExprParserError
-cnrun::CExpression::
-_do_parse( const char *str, double& parsed, list<SVariable> *varlist)
+cnrun::stilton::TExprParserError
+cnrun::stilton::CExpression::
+_do_parse( const string& str, double& parsed,
+ list<cnrun::stilton::SVariable> *varlist)
{
- if ( !str ) {
- parsed = 0;
- return status = EXPARSE_OK;
- }
+ if ( str.empty() ) {
+ parsed = 0;
+ return status = EXPARSE_OK;
+ }
- parsed = NAN;
- _var = "";
+ parsed = NAN;
+ _var = "";
- string workbuf( str);
- char *p = &workbuf[0];
+ string workbuf = str;
+ char *p = &workbuf[0];
- p += strspn( p, " \t");
- if ( !*p ) {
- parsed = 0;
- return status = EXPARSE_EMPTY;
- }
+ p += strspn( p, " \t");
+ if ( !*p ) {
+ parsed = 0;
+ return status = EXPARSE_EMPTY;
+ }
- char *expr1 = p,
- *expr2 = nullptr;
- TExprParserError subexpr_retval;
+ char *expr1 = p,
+ *expr2 = nullptr;
+ TExprParserError subexpr_retval;
// determine subexpressions, if any, at top level
- int level = 0;
- char *tl_op_at = nullptr;
- TOperator
- tl_op = OP_VOID;
- bool last_token_was_operator = true;
-
-// cerr << "\nPARSE \"" << p << "\"\n";
- while ( *p ) {
- if ( *p == eol_comment_delim ) {
- *p = '\0';
- break;
- }
- if ( *p == '(' ) level++;
- else if ( *p == ')' ) level--;
-
- if ( level < 0 )
- return status = EXPARSE_UNBALANCED;
- if ( level > 0 || isspace( *p) )
- goto end_detect;
-
- // detect exponent (e-4)
- if ( strncasecmp( p, "e-", 2) == 0 ) {
- p++;
- goto end_detect;
- }
- // serve the case of unary -: part one
- if ( *p == '-' && last_token_was_operator ) {
- char *pp = p;
- while ( pp > &workbuf[0] && !isspace(*pp) ) pp--; // pp++;
-// cerr << " (checking \"" << pp << "\"";
- char *tp;
- if ( strtod( pp, &tp) )
- ;
- if ( tp > p ) { // we have indeed read a number
-// cerr << " parsed a number up to \"" << tp<< "\")\n";
- p = tp - 1;
- last_token_was_operator = false;
- goto end_detect;
- }
-// cerr << " not a number)\n";
- }
-
- int o;
- for ( o = n_ops-1; o >= 0; o-- ) // check for multibyte operators first (those are at end)
- if ( Ops[o].isat( p) ) {
- char *pp = p;
- p += strlen( Ops[o].literal) - 1; // anticipate general p++
-
- if ( o == OP_SUBTRACT && last_token_was_operator ) {
-// cerr << "override\n";
- o = OP_UNARYMINUS;
- } else
- if ( !last_token_was_operator && !Ops[o].is_binary ) {
-// cerr << " ...at \"" << pp << "\" with op " << Ops[o].literal << endl;
- if ( !silent ) fprintf( stderr, "Unary %s used after an operand\n", Ops[o].literal);
- return status = EXPARSE_UNASSOC;
- }
-
- if ( tl_op == OP_VOID ||
- (Ops[o].assoc_ltr && Ops[tl_op].prio <= Ops[o].prio) ||
- (!Ops[o].assoc_ltr && Ops[tl_op].prio < Ops[o].prio) ) {
-// cerr << "current tlop: " << Ops[o].literal << endl;
- tl_op_at = pp;
- tl_op = (TOperator)o;
- }
- last_token_was_operator = true;
- goto end_detect;
- }
-
- last_token_was_operator = false;
-
- end_detect:
- p++;
- }
-// cerr << "tlop is " << Ops[tl_op].literal << endl;
-
- if ( level > 0 ) {
- if ( !silent ) fprintf( stderr, "Expression lacks some `)''\n");
- return status = EXPARSE_UNBALANCED;
- }
-
- list<SVariable>::iterator V;
-
- if ( tl_op != OP_VOID ) {
- *tl_op_at = '\0';
- expr2 = tl_op_at + strlen( Ops[tl_op].literal);
- double opd1, opd2;
-
-// cerr << "parsing [" << expr1 << "] "<< Ops[tl_op].literal << " [" << expr2 << "]\n";
-
- // second subexpr must always be good
- subexpr_retval = _do_parse( expr2, opd2, varlist);
- if ( subexpr_retval )
- return status = subexpr_retval;
-
- // first subexpr must be empty, but only in the case of OP_NEG
- subexpr_retval = _do_parse( expr1, opd1, varlist);
-
- switch ( subexpr_retval ) {
- case EXPARSE_OK:
- break;
- case EXPARSE_EMPTY:
- if ( !Ops[tl_op].is_binary ) {
-// cerr << "was a unary op\n";
- break;
- } else
- return subexpr_retval;
- case EXPARSE_UNDEFVAR:
- if ( tl_op == OP_ASSIGN )
- break;
- else {
- // have it reported here (in deeper _do_parse where it is flagged), we don't know yet
- // if an undefined var is going to be defined
- if ( !silent ) fprintf( stderr, "Undefined variable `%s'\n", strtok( expr1, " \t"));
- return status = subexpr_retval;
- }
- break;
- default:
- return subexpr_retval;
- }
-
- switch ( tl_op ) {
- case OP_VOID: break;
- case OP_UNARYMINUS: parsed = -opd2; break;
- case OP_ADD: parsed = opd1 + opd2; break;
- case OP_SUBTRACT: parsed = opd1 - opd2; break;
- case OP_MULT: parsed = opd1 * opd2; break;
- case OP_DIV: parsed = opd1 / opd2; break;
- case OP_LT: parsed = opd1 < opd2; break;
- case OP_LE: parsed = opd1 <= opd2; break;
- case OP_GT: parsed = opd1 > opd2; break;
- case OP_GE: parsed = opd1 >= opd2; break;
- case OP_EQ: parsed = opd1 == opd2; break;
- case OP_NEG: parsed = !opd2; break;
- case OP_ASSIGN:
- if ( !varlist ) {
- if ( !silent ) fprintf( stderr, "Variable assignment reqires a user varlist\n");
- return status = EXPARSE_VARLISTNULL;
- }
- if ( _var == "" ) {
- if ( !silent ) fprintf( stderr, "Non-lvalue in assignment\n");
- return status = EXPARSE_NONLVAL;
- }
- parsed = opd2;
- for ( V = varlist->begin(); V != varlist->end(); V++ )
- if ( strcmp( V->name, _var.c_str()) == 0 ) { // _var has been cached by a previous call to _do_parse
- V->value = opd2;
- toplevel_op = tl_op;
- return status = EXPARSE_OK;
- }
- varlist->push_back( SVariable( _var.c_str(), opd2));
- break;
- }
- toplevel_op = tl_op;
- return status = EXPARSE_OK;
- }
+ int level = 0;
+ char *tl_op_at = nullptr;
+ TOperator
+ tl_op = OP_VOID;
+ bool last_token_was_operator = true;
+
+ while ( *p ) {
+ if ( *p == eol_comment_delim ) {
+ *p = '\0';
+ break;
+ }
+ if ( *p == '(' ) ++level;
+ else if ( *p == ')' ) --level;
+
+ if ( level < 0 )
+ return status = EXPARSE_UNBALANCED;
+ if ( level > 0 || isspace( *p) )
+ goto end_detect;
+
+ // detect exponent (e-4)
+ if ( strncasecmp( p, "e-", 2) == 0 ) {
+ p++;
+ goto end_detect;
+ }
+ // serve the case of unary -: part one
+ if ( *p == '-' && last_token_was_operator ) {
+ char *pp = p;
+ while ( pp > &workbuf[0] && !isspace(*pp) )
+ --pp;
+ char *tp;
+ if ( strtod( pp, &tp) )
+ ;
+ if ( tp > p ) { // we have indeed read a number
+ p = tp - 1;
+ last_token_was_operator = false;
+ goto end_detect;
+ }
+ }
+
+ int o;
+ for ( o = Ops.size()-1; o >= 0; --o ) // check for multibyte operators first (those are at end)
+ if ( Ops[o].isat( p) ) {
+ char *pp = p;
+ p += strlen( Ops[o].literal) - 1; // anticipate general p++
+
+ if ( o == OP_SUBTRACT && last_token_was_operator ) {
+ o = OP_UNARYMINUS;
+ } else
+ if ( !last_token_was_operator && !Ops[o].is_binary ) {
+ if ( !silent ) fprintf( stderr, "Unary %s used after an operand\n", Ops[o].literal);
+ return status = EXPARSE_UNASSOC;
+ }
+
+ if ( tl_op == OP_VOID ||
+ (Ops[o].assoc_ltr && Ops[tl_op].prio <= Ops[o].prio) ||
+ (!Ops[o].assoc_ltr && Ops[tl_op].prio < Ops[o].prio) ) {
+// cerr << "current tlop: " << Ops[o].literal << endl;
+ tl_op_at = pp;
+ tl_op = (TOperator)o;
+ }
+ last_token_was_operator = true;
+ goto end_detect;
+ }
+
+ last_token_was_operator = false;
+
+ end_detect:
+ ++p;
+ }
+
+ if ( level > 0 ) {
+ if ( !silent ) fprintf( stderr, "Expression lacks some `)''\n");
+ return status = EXPARSE_UNBALANCED;
+ }
+
+ list<cnrun::stilton::SVariable>::iterator V;
+
+ if ( tl_op != OP_VOID ) {
+ *tl_op_at = '\0';
+ expr2 = tl_op_at + strlen( Ops[tl_op].literal);
+ double opd1, opd2;
+
+// cerr << "parsing [" << expr1 << "] "<< Ops[tl_op].literal << " [" << expr2 << "]\n";
+
+ // second subexpr must always be good
+ subexpr_retval = _do_parse( expr2, opd2, varlist);
+ if ( subexpr_retval )
+ return status = subexpr_retval;
+
+ // first subexpr must be empty, but only in the case of OP_NEG
+ subexpr_retval = _do_parse( expr1, opd1, varlist);
+
+ switch ( subexpr_retval ) {
+ case EXPARSE_OK:
+ break;
+ case EXPARSE_EMPTY:
+ if ( !Ops[tl_op].is_binary ) {
+// cerr << "was a unary op\n";
+ break;
+ } else
+ return subexpr_retval;
+ case EXPARSE_UNDEFVAR:
+ if ( tl_op == OP_ASSIGN )
+ break;
+ else {
+ // have it reported here (in deeper _do_parse where it is flagged), we don't know yet
+ // if an undefined var is going to be defined
+ if ( !silent ) fprintf( stderr, "Undefined variable `%s'\n", strtok( expr1, " \t"));
+ return status = subexpr_retval;
+ }
+ break;
+ default:
+ return subexpr_retval;
+ }
+
+ switch ( tl_op ) {
+ case OP_VOID: break;
+ case OP_UNARYMINUS: parsed = -opd2; break;
+ case OP_ADD: parsed = opd1 + opd2; break;
+ case OP_SUBTRACT: parsed = opd1 - opd2; break;
+ case OP_MULT: parsed = opd1 * opd2; break;
+ case OP_DIV: parsed = opd1 / opd2; break;
+ case OP_LT: parsed = opd1 < opd2; break;
+ case OP_LE: parsed = opd1 <= opd2; break;
+ case OP_GT: parsed = opd1 > opd2; break;
+ case OP_GE: parsed = opd1 >= opd2; break;
+ case OP_EQ: parsed = opd1 == opd2; break;
+ case OP_NEG: parsed = !opd2; break;
+ case OP_ASSIGN:
+ if ( !varlist ) {
+ if ( !silent )
+ fprintf( stderr, "Variable assignment reqires a user varlist\n");
+ return status = EXPARSE_VARLISTNULL;
+ }
+ if ( _var == "" ) {
+ if ( !silent )
+ fprintf( stderr, "Non-lvalue in assignment\n");
+ return status = EXPARSE_NONLVAL;
+ }
+ parsed = opd2;
+ for ( V = varlist->begin(); V != varlist->end(); V++ )
+ if ( strcmp( V->name, _var.c_str()) == 0 ) { // _var has been cached by a previous call to _do_parse
+ V->value = opd2;
+ toplevel_op = tl_op;
+ return status = EXPARSE_OK;
+ }
+ varlist->push_back( SVariable( _var.c_str(), opd2));
+ break;
+ }
+ toplevel_op = tl_op;
+ return status = EXPARSE_OK;
+ }
// single expression, possibly in parentheses
- if ( *expr1 == '(' ) {
- *strrchr( ++expr1, ')') = '\0'; // parentheses have been checked in the by-char parser loop above
- return _do_parse( expr1, parsed, varlist);
- }
+ if ( *expr1 == '(' ) {
+ *strrchr( ++expr1, ')') = '\0'; // parentheses have been checked in the by-char parser loop above
+ return _do_parse( expr1, parsed, varlist);
+ }
// bare expression
- expr1 = strtok( expr1, " \t");
- char *tailp;
- parsed = strtod( expr1, &tailp);
- if ( tailp == nullptr || strspn( tailp, " \t\n\r;") == strlen( tailp) ) // digits followed by whitespace
- return status = EXPARSE_OK;
-
- if ( tailp == expr1 && varlist ) { // no digits at front: check if that's a variable
- for ( V = varlist->begin(); V != varlist->end(); V++ ) {
- if ( strcmp( V->name, expr1) == 0 ) {
- parsed = V->value;
- _var = V->name;
- return status = EXPARSE_OK;
- }
- }
- _var = expr1; // possibly to be assigned in caller; parsed remains NAN
- return status = EXPARSE_UNDEFVAR;
- }
+ expr1 = strtok( expr1, " \t");
+ char *tailp;
+ parsed = strtod( expr1, &tailp);
+ if ( tailp == nullptr || strspn( tailp, " \t\n\r;") == strlen( tailp) ) // digits followed by whitespace
+ return status = EXPARSE_OK;
+
+ if ( tailp == expr1 && varlist ) { // no digits at front: check if that's a variable
+ for ( V = varlist->begin(); V != varlist->end(); V++ ) {
+ if ( strcmp( V->name, expr1) == 0 ) {
+ parsed = V->value;
+ _var = V->name;
+ return status = EXPARSE_OK;
+ }
+ }
+ _var = expr1; // possibly to be assigned in caller; parsed remains NAN
+ return status = EXPARSE_UNDEFVAR;
+ }
// some digits followed by rubbish
- return status = EXPARSE_BAD;
+ return status = EXPARSE_BAD;
}
-
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libstilton/exprparser.hh b/upstream/src/libstilton/exprparser.hh
index 9620d88..ea2de3c 100644
--- a/upstream/src/libstilton/exprparser.hh
+++ b/upstream/src/libstilton/exprparser.hh
@@ -8,8 +8,8 @@
* An expression parser
*/
-#ifndef CNAUX_EXPRPARSER_H
-#define CNAUX_EXPRPARSER_H
+#ifndef CNRUN_LIBSTILTON_EXPRPARSER_H_
+#define CNRUN_LIBSTILTON_EXPRPARSER_H_
#include <cmath>
#include <cstring>
@@ -20,80 +20,97 @@
# include "config.h"
#endif
-namespace cnrun {
-
using namespace std;
+namespace cnrun {
+namespace stilton {
+
enum TExprParserError {
- EXPARSE_OK = 0,
- EXPARSE_EMPTY,
- EXPARSE_UNBALANCED,
- EXPARSE_BAD,
- EXPARSE_UNASSOC,
- EXPARSE_UNDEFVAR,
- EXPARSE_NONLVAL,
- EXPARSE_VARLISTNULL
+ EXPARSE_OK = 0,
+ EXPARSE_EMPTY,
+ EXPARSE_UNBALANCED,
+ EXPARSE_BAD,
+ EXPARSE_UNASSOC,
+ EXPARSE_UNDEFVAR,
+ EXPARSE_NONLVAL,
+ EXPARSE_VARLISTNULL
};
-#define STILTON_MAX_VAR_LEN 32
+#define STILTON_MAX_VAR_LEN 32
struct SVariable {
- char name[STILTON_MAX_VAR_LEN];
- double value;
- SVariable( const char *inname, double invalue = NAN)
- : value (invalue)
- {
- strncpy( name, inname, STILTON_MAX_VAR_LEN-1);
- }
- bool operator== ( const SVariable &rv) const
- {
- return strcmp( name, rv.name /*, STILTON_MAX_VAR_LEN */ ) == 0;
- }
- bool operator< ( const SVariable &rv) const
- {
- return strcmp( name, rv.name /*, STILTON_MAX_VAR_LEN */ ) == -1;
- }
+ char name[STILTON_MAX_VAR_LEN];
+ double value;
+ SVariable( const char *inname, double invalue = NAN)
+ : value (invalue)
+ {
+ strncpy( name, inname, STILTON_MAX_VAR_LEN-1);
+ }
+ bool operator== ( const SVariable &rv) const
+ {
+ return strcmp( name, rv.name /*, STILTON_MAX_VAR_LEN */ ) == 0;
+ }
+ bool operator< ( const SVariable &rv) const
+ {
+ return strcmp( name, rv.name /*, STILTON_MAX_VAR_LEN */ ) == -1;
+ }
};
-extern const char *const __exparse_error_strings[];
class CExpression {
public:
- TExprParserError status;
-
- CExpression()
- : status (EXPARSE_OK), silent (false), eol_comment_delim ('#'), toplevel_op (' '), _parsed_value (NAN)
- {}
- const char *error_string() const
- { return __exparse_error_strings[status]; }
-
- double operator() ( const char *str, list<SVariable> *varlist = nullptr)
- { return ( _do_parse( str, _parsed_value, varlist) == EXPARSE_OK )
- ? _parsed_value : NAN; }
- int operator() ( const char *str, double& parsed, list<SVariable> *varlist = nullptr)
- { _do_parse( str, parsed, varlist);
- return status; }
-
- bool silent;
- char eol_comment_delim;
- char toplevel_op;
-
- const char *status_s() const
- { return __exparse_error_strings[status]; }
+ TExprParserError status;
+
+ CExpression()
+ : status (EXPARSE_OK),
+ silent (false),
+ eol_comment_delim ('#'),
+ toplevel_op (' '),
+ _parsed_value (NAN)
+ {}
+ const char *error_string() const
+ {
+ return error_strings[status];
+ }
+
+ double operator() ( const char *str, list<SVariable> *varlist = nullptr)
+ {
+ return ( _do_parse( str, _parsed_value, varlist) == EXPARSE_OK )
+ ? _parsed_value : NAN;
+ }
+ int operator() ( const char *str, double& parsed, list<SVariable> *varlist = nullptr)
+ {
+ _do_parse( str, parsed, varlist);
+ return status;
+ }
+
+ bool silent;
+ char eol_comment_delim;
+ char toplevel_op;
+
+ static const char *const error_strings[];
+ const char *status_s() const
+ {
+ return error_strings[status];
+ }
private:
- double _parsed_value;
- string _var;
-// string _source_str;
- TExprParserError _do_parse( const char *str, double& parsed, list<SVariable>*);
+ double _parsed_value;
+ string _var;
+ TExprParserError _do_parse( const string& str, double& parsed, list<SVariable>*);
};
-
+}
}
#endif
-// EOF
+// Local Variables:
+// Mode: c++
+// indent-tabs-mode: nil
+// tab-width: 8
+// c-basic-offset: 8
+// End:
diff --git a/upstream/src/libstilton/lang.hh b/upstream/src/libstilton/lang.hh
index eb941ec..4e5ddbb 100644
--- a/upstream/src/libstilton/lang.hh
+++ b/upstream/src/libstilton/lang.hh
@@ -9,8 +9,8 @@
* License: GPL
*/
-#ifndef _CNRUN_LIBSTILTON_LANG_H
-#define _CNRUN_LIBSTILTON_LANG_H
+#ifndef CNRUN_LIBSTILTON_LANG_H_
+#define CNRUN_LIBSTILTON_LANG_H_
#if HAVE_CONFIG_H && !defined(VERSION)
# include "config.h"
@@ -23,7 +23,8 @@
using namespace std;
-namespace agh {
+namespace cnrun {
+namespace stilton {
// for functions to suppress some possibly benign exceptions:
enum class TThrowOption {
@@ -75,7 +76,8 @@ inline int dbl_cmp( double x, double y) // optional precision maybe?
#define FABUF printf( __FILE__ ":%d (%s): %s\n", __LINE__, __FUNCTION__, __buf__);
#define FAFA printf( __FILE__ ":%d (%s): fafa\n", __LINE__, __FUNCTION__);
-} // namespace agh
+}
+}
#endif
diff --git a/upstream/src/libstilton/libcommon.cc b/upstream/src/libstilton/libcommon.cc
index d1344b5..3f0d4ff 100644
--- a/upstream/src/libstilton/libcommon.cc
+++ b/upstream/src/libstilton/libcommon.cc
@@ -31,7 +31,7 @@ using namespace std;
string
-cnrun::str::
+cnrun::stilton::str::
svasprintf( const char* fmt, va_list ap)
{
char *_;
@@ -45,7 +45,7 @@ svasprintf( const char* fmt, va_list ap)
string
-cnrun::str::
+cnrun::stilton::str::
sasprintf( const char* fmt, ...)
{
char *_;
@@ -63,7 +63,7 @@ sasprintf( const char* fmt, ...)
string
-cnrun::str::
+cnrun::stilton::str::
trim( const string& r0)
{
string r (r0);
@@ -78,7 +78,7 @@ trim( const string& r0)
}
string
-cnrun::str::
+cnrun::stilton::str::
pad( const string& r0, size_t to)
{
string r (to, ' ');
@@ -89,7 +89,7 @@ pad( const string& r0, size_t to)
list<string>
-cnrun::str::
+cnrun::stilton::str::
tokens_trimmed( const string& s_, const char* sep)
{
string s {s_};
@@ -104,7 +104,7 @@ tokens_trimmed( const string& s_, const char* sep)
}
list<string>
-cnrun::str::
+cnrun::stilton::str::
tokens( const string& s_, const char* sep)
{
string s {s_};
@@ -122,7 +122,7 @@ tokens( const string& s_, const char* sep)
void
-cnrun::str::
+cnrun::stilton::str::
decompose_double( double value, double *mantissa, int *exponent)
{
char buf[32];
@@ -135,7 +135,7 @@ decompose_double( double value, double *mantissa, int *exponent)
string&
-cnrun::str::
+cnrun::stilton::str::
homedir2tilda( string& inplace)
{
const char *home = getenv("HOME");
@@ -146,7 +146,7 @@ homedir2tilda( string& inplace)
}
string
-cnrun::str::
+cnrun::stilton::str::
homedir2tilda( const string& v)
{
string inplace (v);
@@ -158,7 +158,7 @@ homedir2tilda( const string& v)
}
string&
-cnrun::str::
+cnrun::stilton::str::
tilda2homedir( string& inplace)
{
const char *home = getenv("HOME");
@@ -171,7 +171,7 @@ tilda2homedir( string& inplace)
}
string
-cnrun::str::
+cnrun::stilton::str::
tilda2homedir( const string& v)
{
string inplace (v);
@@ -187,10 +187,10 @@ tilda2homedir( const string& v)
string
-cnrun::str::
+cnrun::stilton::str::
dhms( double seconds, int dd)
{
- bool positive = seconds >= 0.;
+ bool positive = seconds >= 0.;
if ( not positive )
seconds = -seconds;
@@ -200,8 +200,8 @@ dhms( double seconds, int dd)
d = (int)seconds/60/60/24 % (60*60*24);
double f = seconds - floor(seconds);
- using cnrun::str::sasprintf;
- string f_ = ( dd == 0 )
+ using cnrun::stilton::str::sasprintf;
+ string f_ = ( dd == 0 )
? ""
: sasprintf( ".%0*d", dd, (int)(f*pow(10, dd)));
return ( d > 0 )
@@ -214,7 +214,7 @@ dhms( double seconds, int dd)
}
string
-cnrun::str::
+cnrun::stilton::str::
dhms_colon( double seconds, int dd)
{
bool positive = seconds >= 0.;
@@ -227,8 +227,8 @@ dhms_colon( double seconds, int dd)
d = (int)seconds/60/60/24 % (60*60*24);
double f = seconds - floor(seconds);
- using cnrun::str::sasprintf;
- string f_ = ( dd == 0 )
+ using cnrun::stilton::str::sasprintf;
+ string f_ = ( dd == 0 )
? ""
: sasprintf( ".%0*d", dd, (int)(f*pow(10, dd)));
@@ -254,7 +254,7 @@ n_frac_digits( double v)
}
string
-cnrun::str::
+cnrun::stilton::str::
double_dot_aligned_s( double val, int int_width, int frac_width)
{
char buf[40];
diff --git a/upstream/src/libstilton/string.hh b/upstream/src/libstilton/string.hh
index 38e6bca..b4c3688 100644
--- a/upstream/src/libstilton/string.hh
+++ b/upstream/src/libstilton/string.hh
@@ -25,6 +25,7 @@
using namespace std;
namespace cnrun {
+namespace stilton {
namespace str {
enum class TStrCmpCaseOption {
@@ -107,6 +108,7 @@ double_dot_aligned_s( double val, int int_width = 8, int frac_width = 8);
}
}
+}
#endif
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/cnrun.git
More information about the debian-med-commit
mailing list